2018-09-21 21:12:37 -07:00
|
|
|
#include <gtest/gtest.h>
|
2018-09-06 12:29:34 -07:00
|
|
|
|
|
|
|
|
#include <torch/jit.h>
|
2018-11-06 14:28:20 -08:00
|
|
|
#include <torch/types.h>
|
2018-09-06 12:29:34 -07:00
|
|
|
|
|
|
|
|
#include <string>
|
|
|
|
|
|
2018-09-21 21:12:37 -07:00
|
|
|
TEST(TorchScriptTest, CanCompileMultipleFunctions) {
|
|
|
|
|
auto module = torch::jit::compile(R"JIT(
|
2018-09-06 12:29:34 -07:00
|
|
|
def test_mul(a, b):
|
|
|
|
|
return a * b
|
|
|
|
|
def test_relu(a, b):
|
|
|
|
|
return torch.relu(a + b)
|
|
|
|
|
def test_while(a, i):
|
2018-09-13 11:10:00 -07:00
|
|
|
while bool(i < 10):
|
2018-09-06 12:29:34 -07:00
|
|
|
a += a
|
|
|
|
|
i += 1
|
|
|
|
|
return a
|
2018-12-21 16:44:19 -08:00
|
|
|
def test_len(a : List[int]):
|
|
|
|
|
return len(a)
|
2018-09-06 12:29:34 -07:00
|
|
|
)JIT");
|
2018-09-21 21:12:37 -07:00
|
|
|
auto a = torch::ones(1);
|
|
|
|
|
auto b = torch::ones(1);
|
2018-09-06 12:29:34 -07:00
|
|
|
|
Remove caffe2::Tensor::capacity_nbytes, at::Tensor::to##name##Data, (#11876)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11876
Modern C++ api instead of macros, item() is aligned with Python frontend. caffe2::Tensor::capacity_nbytes is effecitvely unused and confusing w.r.t. caffe2::Tensor::nbytes().
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCByte "item<uint8_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCLong "item<int64_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCInt "item<int32_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCDouble "item<double>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toByteData "data<uint8_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toLongData "data<int64_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toIntData "data<int32_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toDoubleData "data<double>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toFloatData "data<float>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCByte "item<uint8_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCLong "item<int64_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCInt "item<int32_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCDouble "item<double>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toByteData "data<uint8_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toLongData "data<int64_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toIntData "data<int32_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toDoubleData "data<double>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toFloatData "data<float>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCComplexDouble "item<std::complex<double>>"
codemod -d tc --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
Reviewed By: ezyang
Differential Revision: D9948572
fbshipit-source-id: 70c9f5390d92b82c85fdd5f8a5aebca338ab413c
2018-09-24 10:39:10 -07:00
|
|
|
ASSERT_EQ(1, module->run_method("test_mul", a, b).toTensor().item<int64_t>());
|
2018-09-06 12:29:34 -07:00
|
|
|
|
Remove caffe2::Tensor::capacity_nbytes, at::Tensor::to##name##Data, (#11876)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11876
Modern C++ api instead of macros, item() is aligned with Python frontend. caffe2::Tensor::capacity_nbytes is effecitvely unused and confusing w.r.t. caffe2::Tensor::nbytes().
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCByte "item<uint8_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCLong "item<int64_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCInt "item<int32_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCDouble "item<double>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toByteData "data<uint8_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toLongData "data<int64_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toIntData "data<int32_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toDoubleData "data<double>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toFloatData "data<float>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCByte "item<uint8_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCLong "item<int64_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCInt "item<int32_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCDouble "item<double>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toByteData "data<uint8_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toLongData "data<int64_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toIntData "data<int32_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toDoubleData "data<double>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toFloatData "data<float>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCComplexDouble "item<std::complex<double>>"
codemod -d tc --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
Reviewed By: ezyang
Differential Revision: D9948572
fbshipit-source-id: 70c9f5390d92b82c85fdd5f8a5aebca338ab413c
2018-09-24 10:39:10 -07:00
|
|
|
ASSERT_EQ(2, module->run_method("test_relu", a, b).toTensor().item<int64_t>());
|
2018-09-06 12:29:34 -07:00
|
|
|
|
2018-09-21 21:12:37 -07:00
|
|
|
ASSERT_TRUE(
|
Remove caffe2::Tensor::capacity_nbytes, at::Tensor::to##name##Data, (#11876)
Summary:
Pull Request resolved: https://github.com/pytorch/pytorch/pull/11876
Modern C++ api instead of macros, item() is aligned with Python frontend. caffe2::Tensor::capacity_nbytes is effecitvely unused and confusing w.r.t. caffe2::Tensor::nbytes().
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCByte "item<uint8_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCLong "item<int64_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCInt "item<int32_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCDouble "item<double>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toByteData "data<uint8_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toLongData "data<int64_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toIntData "data<int32_t>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toDoubleData "data<double>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toFloatData "data<float>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCByte "item<uint8_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCLong "item<int64_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCInt "item<int32_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCDouble "item<double>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toByteData "data<uint8_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toLongData "data<int64_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toIntData "data<int32_t>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toDoubleData "data<double>"
codemod -d hphp --extensions cc,cpp,cu,cuh,h,py,hpp,mm toFloatData "data<float>"
codemod -d caffe2 --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCComplexDouble "item<std::complex<double>>"
codemod -d tc --extensions cc,cpp,cu,cuh,h,py,hpp,mm toCFloat "item<float>"
Reviewed By: ezyang
Differential Revision: D9948572
fbshipit-source-id: 70c9f5390d92b82c85fdd5f8a5aebca338ab413c
2018-09-24 10:39:10 -07:00
|
|
|
0x200 == module->run_method("test_while", a, b).toTensor().item<int64_t>());
|
2018-12-21 16:44:19 -08:00
|
|
|
|
|
|
|
|
at::IValue list = std::vector<int64_t>({3, 4});
|
|
|
|
|
ASSERT_EQ(2, module->run_method("test_len", list).toInt());
|
|
|
|
|
|
2018-09-06 12:29:34 -07:00
|
|
|
}
|
2019-01-30 13:48:36 -08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
TEST(TorchScriptTest, TestNestedIValueModuleArgMatching) {
|
|
|
|
|
auto module = torch::jit::compile(R"JIT(
|
|
|
|
|
def nested_loop(a: List[List[Tensor]], b: int):
|
|
|
|
|
return torch.tensor(1.0) + b
|
|
|
|
|
)JIT");
|
|
|
|
|
|
|
|
|
|
auto b = 3;
|
|
|
|
|
|
|
|
|
|
std::vector<torch::Tensor> list = {torch::rand({4, 4})};
|
|
|
|
|
|
|
|
|
|
std::vector<torch::jit::IValue> list_of_lists;
|
|
|
|
|
list_of_lists.push_back(list);
|
|
|
|
|
module->run_method("nested_loop", list_of_lists, b);
|
|
|
|
|
|
|
|
|
|
std::vector<torch::jit::IValue> generic_list;
|
|
|
|
|
std::vector<torch::jit::IValue> empty_generic_list;
|
|
|
|
|
empty_generic_list.push_back(generic_list);
|
|
|
|
|
module->run_method("nested_loop", empty_generic_list, b);
|
|
|
|
|
|
|
|
|
|
std::vector<torch::jit::IValue> too_many_lists;
|
|
|
|
|
too_many_lists.push_back(empty_generic_list);
|
|
|
|
|
try {
|
|
|
|
|
module->run_method("nested_loop", too_many_lists, b);
|
|
|
|
|
AT_ASSERT(false);
|
|
|
|
|
} catch (const c10::Error& error) {
|
|
|
|
|
AT_ASSERT(
|
|
|
|
|
std::string(error.what_without_backtrace())
|
2019-06-12 20:10:26 -07:00
|
|
|
.find("nested_loop() expected a value of type 'List[List[Tensor]]'"
|
2019-05-30 10:46:52 -07:00
|
|
|
" for argument 'a' but instead found type "
|
|
|
|
|
"'List[List[List[t]]]'") == 0);
|
2019-01-30 13:48:36 -08:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
std::vector<torch::jit::IValue> gen_list;
|
|
|
|
|
std::vector<int64_t> int_list = {1, 2, 3};
|
|
|
|
|
|
|
|
|
|
gen_list.emplace_back(list);
|
|
|
|
|
gen_list.emplace_back(int_list);
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
module->run_method("nested_loop", gen_list, b);
|
|
|
|
|
AT_ASSERT(false);
|
|
|
|
|
} catch (const c10::Error& error) {
|
|
|
|
|
//TODO: currently does not unify types across encounted generic lists,
|
|
|
|
|
//so the error message is not helpful here.
|
|
|
|
|
AT_ASSERT(
|
|
|
|
|
std::string(error.what_without_backtrace())
|
2019-06-12 20:10:26 -07:00
|
|
|
.find("nested_loop() expected a value of type "
|
2019-05-30 10:46:52 -07:00
|
|
|
"'List[List[Tensor]]' for argument 'a' but "
|
|
|
|
|
"instead found type 'List[List[Tensor]]'") == 0);
|
2019-01-30 13:48:36 -08:00
|
|
|
};
|
2019-02-07 18:21:30 -08:00
|
|
|
}
|
2019-01-30 13:48:36 -08:00
|
|
|
|
|
|
|
|
|
2019-02-07 18:21:30 -08:00
|
|
|
TEST(TorchScriptTest, TestDictArgMatching) {
|
|
|
|
|
auto module = torch::jit::compile(R"JIT(
|
|
|
|
|
def dict_op(a: Dict[str, Tensor], b: str):
|
|
|
|
|
return a[b]
|
|
|
|
|
)JIT");
|
2019-05-23 15:18:32 -07:00
|
|
|
c10::impl::GenericDictPtr dict = c10::impl::make_generic_dict();
|
2019-05-14 18:33:08 -07:00
|
|
|
dict.insert("hello", torch::ones({2}));
|
2019-02-07 18:21:30 -08:00
|
|
|
auto output = module->run_method("dict_op", dict, std::string("hello"));
|
|
|
|
|
ASSERT_EQ(1, output.toTensor()[0].item<int64_t>());
|
2019-01-30 13:48:36 -08:00
|
|
|
}
|
2019-03-06 11:21:09 -08:00
|
|
|
|
|
|
|
|
TEST(TorchScriptTest, TestTupleArgMatching) {
|
|
|
|
|
auto module = torch::jit::compile(R"JIT(
|
|
|
|
|
def tuple_op(a: Tuple[List[int]]):
|
|
|
|
|
return a
|
|
|
|
|
)JIT");
|
|
|
|
|
|
|
|
|
|
std::vector<int64_t> int_list = {1};
|
2019-06-12 13:46:31 -07:00
|
|
|
auto tuple_generic_list = c10::ivalue::TuplePtr::create({ int_list });
|
2019-03-06 11:21:09 -08:00
|
|
|
|
|
|
|
|
// doesn't fail on arg matching
|
|
|
|
|
module->run_method("tuple_op", tuple_generic_list);
|
|
|
|
|
|
|
|
|
|
}
|
2019-04-18 17:06:09 -07:00
|
|
|
|
|
|
|
|
TEST(TorchScriptTest, TestOptionalArgMatching) {
|
|
|
|
|
auto module = torch::jit::compile(R"JIT(
|
|
|
|
|
def optional_tuple_op(a: Optional[Tuple[int, str]]):
|
|
|
|
|
if a is None:
|
|
|
|
|
return 0
|
|
|
|
|
else:
|
|
|
|
|
return a[0]
|
|
|
|
|
)JIT");
|
|
|
|
|
|
2019-06-12 13:46:31 -07:00
|
|
|
auto optional_tuple = c10::ivalue::TuplePtr::create({2, std::string("hi")});
|
2019-04-18 17:06:09 -07:00
|
|
|
|
2019-04-18 18:02:14 -07:00
|
|
|
ASSERT_EQ(2, module->run_method("optional_tuple_op", optional_tuple).toInt());
|
|
|
|
|
ASSERT_EQ(
|
|
|
|
|
0, module->run_method("optional_tuple_op", torch::jit::IValue()).toInt());
|
2019-04-18 17:06:09 -07:00
|
|
|
|
|
|
|
|
}
|