Skip to content

Commit f74c4cc

Browse files
committed
Updated codegen to remove TensorOptions from generated code
1 parent d2a3b6e commit f74c4cc

31 files changed

+1121
-434
lines changed

aten/src/ATen/ScalarOps.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,14 +13,14 @@ inline at::Tensor scalar_to_tensor(Scalar s, const Device device = at::kCPU) {
1313
// This is the fast track we have for CPU scalar tensors.
1414
if (device == at::kCPU) {
1515
if (s.isFloatingPoint()) {
16-
return at::native::scalar_tensor(s, at::device(at::kCPU).dtype(at::kDouble));
16+
return at::native::scalar_tensor(s, at::kDouble, c10::nullopt, at::kCPU);
1717
} else if (s.isBoolean()) {
18-
return at::native::scalar_tensor(s, at::device(at::kCPU).dtype(at::kBool));
18+
return at::native::scalar_tensor(s, at::kBool, c10::nullopt, at::kCPU);
1919
} else if (s.isComplex()) {
20-
return at::native::scalar_tensor(s, at::device(at::kCPU).dtype(at::kComplexDouble));
20+
return at::native::scalar_tensor(s, at::kComplexDouble, c10::nullopt, at::kCPU);
2121
} else {
2222
AT_ASSERT(s.isIntegral(false));
23-
return at::native::scalar_tensor(s, at::device(at::kCPU).dtype(at::kLong));
23+
return at::native::scalar_tensor(s, at::kLong, c10::nullopt, at::kCPU);
2424
}
2525
}
2626
if (s.isFloatingPoint()) {

aten/src/ATen/function_wrapper.py

Lines changed: 195 additions & 24 deletions
Large diffs are not rendered by default.

aten/src/ATen/native/QuantizedLinear.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,9 @@ std::tuple<Tensor, Tensor, double, int64_t> fbgemm_linear_quantize_weight(
236236
q_params.precision = kPrecision;
237237

238238
Tensor quantized = at::native::empty_like(
239-
weight_contig, weight_contig.options().dtype(at::kChar), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
239+
weight_contig, at::kChar, weight_contig.options().layout(),
240+
weight_contig.options().device(), weight_contig.options().pinned_memory(),
241+
LEGACY_CONTIGUOUS_MEMORY_FORMAT);
240242
// Tensor quantized = at::native::empty_cpu(
241243
// weight_contig.sizes(), weight_contig.options().dtype(at::kChar));
242244
fbgemm::Quantize<int8_t>(

aten/src/ATen/native/SobolEngineOps.cpp

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,12 @@ Tensor& _sobol_engine_initialize_state_(Tensor& sobolstate, int64_t dimension) {
150150
}
151151
}
152152

153-
Tensor pow2s = at::pow(2, at::native::arange((MAXBIT - 1), -1, -1, sobolstate.options()));
153+
Tensor pow2s = at::pow(2, at::native::arange(
154+
(MAXBIT - 1), -1, -1,
155+
typeMetaToScalarType(sobolstate.options().dtype()),
156+
sobolstate.options().layout(),
157+
sobolstate.options().device(),
158+
sobolstate.options().pinned_memory()));
154159
sobolstate.mul_(pow2s);
155160
return sobolstate;
156161
}

aten/src/ATen/native/SummaryOps.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,11 @@ Tensor _bincount_cpu_template(
3636

3737
const input_t* self_p = self.data_ptr<input_t>();
3838
if (has_weights) {
39-
output = native::zeros({nbins}, weights.options());
39+
output = native::zeros({nbins},
40+
typeMetaToScalarType(weights.options().dtype()),
41+
weights.options().layout(),
42+
weights.options().device(),
43+
weights.options().pinned_memory());
4044
weights_t* output_p = output.data_ptr<weights_t>();
4145
const weights_t* weights_p = weights.data_ptr<weights_t>();
4246
for (int64_t i = 0; i < self.size(0); i++) {

aten/src/ATen/native/TensorConversions.cpp

Lines changed: 11 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -45,29 +45,23 @@ static inline Tensor to_impl(const Tensor& self, const TensorOptions& options, b
4545
return r;
4646
}
4747

48-
Tensor to(const Tensor& self, const TensorOptions& options, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
49-
TORCH_CHECK(options.requires_grad_opt() == c10::nullopt,
50-
"to(options) expects unset requires_grad flag, but got "
51-
"options.requires_grad set as ", options.requires_grad());
52-
53-
const auto & layout_opt = options.layout_opt();
54-
TORCH_CHECK(!layout_opt || self.layout() == layout_opt.value(),
48+
Tensor to(const Tensor& self, c10::optional<ScalarType> dtype, c10::optional<Layout> layout, c10::optional<Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<c10::MemoryFormat> optional_memory_format) {
49+
TORCH_CHECK(!layout.has_value() || self.layout() == layout.value(),
5550
"to(options) doesn't support converting to a different layout, "
5651
"but got self.layout being ", self.layout(),
57-
" and options.layout set as ", options.layout());
58-
59-
auto device_opt = options.device_opt();
60-
if (device_opt) {
61-
device_opt = ensure_has_index(device_opt.value());
52+
" and options.layout set as ", layout.value());
53+
54+
if (device.has_value()) {
55+
device = ensure_has_index(device.value());
6256
}
63-
const auto & dtype_opt = options.dtype_opt();
6457
auto specified_options = self.options();
65-
if (device_opt) {
66-
specified_options = specified_options.device(device_opt.value());
58+
if (device.has_value()) {
59+
specified_options = specified_options.device(device.value());
6760
}
68-
if (dtype_opt) {
69-
specified_options = specified_options.dtype(dtype_opt.value());
61+
if (dtype.has_value()) {
62+
specified_options = specified_options.dtype(dtype.value());
7063
}
64+
7165
return to_impl(self, specified_options, non_blocking, copy, optional_memory_format);
7266
}
7367

0 commit comments

Comments
 (0)