3#include <torch/torch.h>
23void ConvertTensorToString(
const torch::Tensor &from, std::string &to, std::vector<Index> *index =
nullptr);
24void ConvertTensorToString(
const torch::Tensor &from, std::wstring &to, std::vector<Index> *index =
nullptr);
35 && obj.Dict<Obj>::at(
"start").second
36 && obj.Dict<Obj>::at(
"stop").second
37 && obj.Dict<Obj>::at(
"step").second);
39 NL_CHECK(obj.Dict<Obj>::at(
"start").second->
is_integer(),
"Slice value start support integer type only!");
40 NL_CHECK(obj.Dict<Obj>::at(
"stop").second->
is_integer(),
"Slice value stop support integer type only!");
41 NL_CHECK(obj.Dict<Obj>::at(
"step").second->
is_integer(),
"Slice value step support integer type only!");
43 return at::indexing::Slice(
55 LOG_RUNTIME(
"Unsupport torch type %s (%d)!", at::toString(tensor.dtype().toScalarType()), (
int) tensor.dtype().toScalarType());
58 if (tensor.dim() == 0) {
59 if (tensor.is_floating_point()) {
62 ASSERT(!tensor.is_complex());
69 *
result->m_tensor = tensor;
70 result->m_var_is_init =
true;
78 return at::ScalarType::Bool;
82 return at::ScalarType::Char;
85 return at::ScalarType::Short;
88 return at::ScalarType::Int;
90 case ObjType::DWord64:
91 case ObjType::Integer:
92 return at::ScalarType::Long;
93 case ObjType::Float32:
96 return at::ScalarType::Float;
97 case ObjType::Float64:
100 return at::ScalarType::Double;
101 case ObjType::Complex16:
102 return at::ScalarType::ComplexHalf;
103 case ObjType::Complex32:
104 return at::ScalarType::ComplexFloat;
105 case ObjType::Complex64:
106 case ObjType::Complex:
107 return at::ScalarType::ComplexDouble;
114 case at::ScalarType::Bool:
115 return ObjType::Bool;
116 case at::ScalarType::Byte:
117 case at::ScalarType::Char:
118 case at::ScalarType::QInt8:
119 case at::ScalarType::QUInt8:
120 return ObjType::Int8;
121 case at::ScalarType::Short:
122 return ObjType::Int16;
123 case at::ScalarType::Int:
124 case at::ScalarType::QInt32:
125 return ObjType::Int32;
126 case at::ScalarType::Long:
127 return ObjType::Int64;
128 case at::ScalarType::BFloat16:
129 case at::ScalarType::Half:
130 return ObjType::Float16;
131 case at::ScalarType::Float:
132 return ObjType::Float32;
133 case at::ScalarType::Double:
134 return ObjType::Float64;
135 case at::ScalarType::ComplexHalf:
136 return ObjType::Complex16;
137 case at::ScalarType::ComplexFloat:
138 return ObjType::Complex32;
139 case at::ScalarType::ComplexDouble:
140 return ObjType::Complex64;
142 LOG_RUNTIME(
"Can`t convert type '%s' to ObjType!", at::toString(t));
146 std::stringstream ss;
153 std::stringstream ss;
156 for (
int i = 0; i < index.size(); i++) {
171 out << var->toString().c_str();
173 out <<
"<NOT OBJECT>";
179m_var_type_current(type), m_var_name(var_name ? var_name :
""), m_prototype(func_proto) {
187 m_var = std::monostate();
188 m_tensor = std::make_shared<torch::Tensor>();
212 int64_t new_size =
m_tensor->size(0) - 1;
213 if ((from == 0 && to == 1) || (from == 0 && to == 0)) {
215 std::vector<int64_t> sizes(1);
216 sizes[0] = new_size + 1;
218 at::Tensor ind = torch::arange(sizes[0] - new_size - 1, sizes[0] - 1, at::ScalarType::Long);
219 at::Tensor any = torch::zeros(sizes[0] - new_size, at::ScalarType::Long);
222 ind = at::cat({any, ind});
230 m_tensor->resize_(at::IntArrayRef(sizes));
278 new_size = -new_size;
279 if (
static_cast<int64_t
> (
size()) > new_size) {
288 }
else if (
static_cast<int64_t
> (
size()) < new_size) {
290 m_value.insert(0, new_size,
' ');
304 std::vector<int64_t> sizes;
305 for (
int i = 0; i <
m_tensor->dim(); i++) {
313 }
else if (new_size == 0 || sizes[0] == new_size) {
315 }
else if (new_size > 0) {
318 ASSERT(sizes.size() == 1);
321 m_tensor->resize_(at::IntArrayRef(sizes));
325 new_size = -new_size;
326 if (sizes[0] == new_size) {
328 }
else if (sizes[0] > new_size) {
330 ASSERT(sizes.size() == 1);
332 at::Tensor ind = torch::arange(sizes[0] - new_size - 1, sizes[0] - 1, at::ScalarType::Long);
333 at::Tensor any = torch::zeros(sizes[0] - new_size, at::ScalarType::Long);
336 ind = at::cat({any, ind});
344 m_tensor->resize_(at::IntArrayRef(sizes));
348 ASSERT(sizes.size() == 1);
413 if (
index.size() != 1 || !
index[0].is_integer()) {
416 int64_t pos =
index[0].integer().expect_int();
420 if (pos <
static_cast<int64_t
> (
m_value.size())) {
425 if (
index.size() != 1 || !
index[0].is_integer()) {
428 int64_t pos =
index[0].integer().expect_int();
432 if (pos <
static_cast<int64_t
> (
m_string.size())) {
444 if (
index.size() != 1 || !
index[0].is_integer()) {
452 if (
index.size() != 1 || !
index[0].is_integer()) {
455 int64_t pos =
index[0].integer().expect_int();
459 if (pos <
static_cast<int64_t
> (
m_value.size())) {
467 if (
index.size() != 1 || !
index[0].is_integer()) {
470 int64_t pos =
index[0].integer().expect_int();
474 if (pos <
static_cast<int64_t
> (
m_string.size())) {
487 if (temp->is_scalar()) {
488 if (temp->is_integral()) {
491 ASSERT(temp->is_floating());
495 ASSERT(temp->m_tensor->defined());
502 if (
index.size() != 1 || !
index[0].is_integer()) {
505 (*
at(
index[0].integer().expect_int()).second) = value;
536 for (
auto &elem : *
this) {
537 if (strong &&
find->op_accurate(elem.second)) {
539 }
else if (!strong &&
find->op_equal(elem.second)) {
553 for (
int i = 0; i < obj->
size(); i++) {
585 m_var = std::monostate();
593 (*m_tensor) = -(*m_tensor);
616 *obj->m_tensor = torch::zeros_like(*obj->m_tensor) - *obj->m_tensor;
619 LOG_RUNTIME(
"Object '%s' not numeric!", obj->toString().c_str());
685 for (
int i = 0; i < value.
size(); i++) {
741 for (
int i = 0; i < value.
size(); i++) {
743 if (found !=
end()) {
744 ListType::erase(found);
890 m_var =
static_cast<int64_t
> (num / den);
892 ldiv_t res = std::ldiv(num, den);
893 m_var =
static_cast<int64_t
> ((res.rem) ? res.quot - 1 : res.quot);
970 for (
auto &elem : *
this) {
971 if (strong &&
find->op_accurate(elem.second)) {
973 }
else if (!strong &&
find->op_equal(elem.second)) {
984 if (&
clone !=
this) {
993 clone.m_dimensions =
nullptr;
1032 for (
int i = 0; i < Dict<Obj>::size(); i++) {
1040 if (
name(i).empty()) {
1044 clone.Dict<Obj>::push_back(
nullptr,
name(i));
1059 std::stringstream ss;
1103 result =
at(
"start").second->GetValueAsString();
1105 result +=
at(
"stop").second->GetValueAsString();
1107 result +=
at(
"step").second->GetValueAsString();
1172 if (std::holds_alternative<void *>(
m_var)) {
1173 ss << std::get<void *>(
m_var);
1192 result += (*m_dimensions)[i].second->toString();
1208 result =
":Reference to ";
1211 result = ref->toString();
1332void TensorToString_(
const torch::Tensor &tensor, c10::IntArrayRef shape, std::vector<Index> &ind,
const int64_t pos,
1333 std::stringstream & str) {
1335 ASSERT(pos <
static_cast<int64_t
> (ind.size()));
1337 if (shape.size() > 1 && pos + 1 <
static_cast<int64_t
> (ind.size())) {
1339 intend = std::string((pos + 1) * 2,
' ');
1342 if (pos + 1 <
static_cast<int64_t
> (ind.size())) {
1344 for (ind[pos] = 0; ind[pos].integer() < shape[pos]; ind[pos] = ind[pos].integer() + 1) {
1354 for (ind[pos] = 0; ind[pos].integer() < shape[pos]; ind[pos] = ind[pos].integer() + 1) {
1360 if (tensor.is_floating_point()) {
1361 str << tensor.index(ind).item<
double>();
1362 }
else if (tensor.is_complex()) {
1363 ASSERT(!
"Not implemented!");
1365 str << tensor.index(ind).item<int64_t>();
1370 if (!intend.empty()) {
1332void TensorToString_(
const torch::Tensor &tensor, c10::IntArrayRef shape, std::vector<Index> &ind,
const int64_t pos, {
…}
1378 std::stringstream ss;
1380 if (!tensor.dim()) {
1381 LOG_RUNTIME(
"!tensor.dim() %s", tensor.toString().c_str());
1385 c10::IntArrayRef shape = tensor.sizes();
1386 std::vector<Index> ind(shape.size(), 0);
1405 return !at::_is_zerotensor(*
m_tensor);
1416 return std::get<void *>(
m_var);
1427 if (elem->GetValueAsBoolean()) {
1442 return m_iterator->data().second->GetValueAsBoolean();
1459 if (std::holds_alternative<int64_t>(
m_var)) {
1460 return std::get<int64_t>(
m_var);
1461 }
else if (std::holds_alternative<bool *>(
m_var)) {
1462 return *std::get<bool *>(
m_var);
1467 if (std::holds_alternative<int64_t>(
m_var)) {
1468 return std::get<int64_t>(
m_var);
1469 }
else if (std::holds_alternative<int8_t *>(
m_var)) {
1470 return *std::get<int8_t *>(
m_var);
1474 if (std::holds_alternative<int64_t>(
m_var)) {
1475 return std::get<int64_t>(
m_var);
1476 }
else if (std::holds_alternative<int16_t *>(
m_var)) {
1477 return *std::get<int16_t *>(
m_var);
1481 if (std::holds_alternative<int64_t>(
m_var)) {
1482 return std::get<int64_t>(
m_var);
1483 }
else if (std::holds_alternative<int32_t *>(
m_var)) {
1484 return *std::get<int32_t *>(
m_var);
1488 if (std::holds_alternative<int64_t>(
m_var)) {
1489 return std::get<int64_t>(
m_var);
1490 }
else if (std::holds_alternative<int64_t *>(
m_var)) {
1491 return *std::get<int64_t *>(
m_var);
1494 if (std::holds_alternative<int64_t>(
m_var)) {
1495 return std::get<int64_t>(
m_var);
1529 return m_iterator->data().second->GetValueAsInteger();
1534 return reinterpret_cast<int64_t
> (std::get<void *>(
m_var));
1540Obj::operator float()
const {
1541 double result = GetValueAsNumber();
1542 if (
result > (
double) std::numeric_limits<float>::max()) {
1543 LOG_RUNTIME(
"Value1 '%s' %.20f %.20f %.20f is out of range of the casting type float!", GetValueAsString().c_str(),
result, std::numeric_limits<float>::max(), std::numeric_limits<float>::lowest());
1547 if (
result < -__FLT_MAX__) {
1548 LOG_RUNTIME(
"Value2 '%s' %.20f %.20f %.20f is out of range of the casting type float!", GetValueAsString().c_str(),
result, std::numeric_limits<float>::max(), std::numeric_limits<float>::lowest());
1550 LOG_DEBUG(
"operator float() '%s' %.20f", GetValueAsString().c_str(),
result);
1540Obj::operator float()
const {
…}
1554Obj::operator double()
const {
1555 return GetValueAsNumber();
1554Obj::operator double()
const {
…}
1566 if (std::holds_alternative<double>(
m_var)) {
1568 return std::get<double>(
m_var);
1569 }
else if (std::holds_alternative<float *>(
m_var)) {
1571 return *std::get<float *>(
m_var);
1575 if (std::holds_alternative<double>(
m_var)) {
1577 return std::get<double>(
m_var);
1578 }
else if (std::holds_alternative<double *>(
m_var)) {
1580 return *std::get<double *>(
m_var);
1583 if (std::holds_alternative<double>(
m_var)) {
1584 return std::get<double>(
m_var);
1596 return m_iterator->data().second->GetValueAsNumber();
1610 std::stringstream ss;
1613 LOG_RUNTIME(
"Object not initialized name:'%s' type:%s, fix:%s!",
1650 ASSERT(!
"Not implemented!");
1686 ss << std::get<void *>(
m_var);
1717 if (
this == &value) {
1771 if (
this == &value) {
1791 return m_tensor->toType(summary_torch_type).equal(*value.
toType(summary_type)->m_tensor);
1793 }
catch (std::exception e) {
1812 for (int64_t i = 0; i < static_cast<int64_t> (
size()); i++) {
1813 if (
name(i).compare(value.
name(i)) != 0) {
1816 if (!
at(i).second->op_equal(value[i].second)) {
1850 while (pos <
size()) {
1851 if (!obj.
exist(
at(pos).second, strong)) {
1861 while (pos <
size()) {
1862 if (!obj.
exist(
at(pos).second, strong)) {
1875 if (obj->is_string_type()) {
1877 }
else if (!obj->m_class_name.empty()) {
1879 }
else if (obj->is_type_name()) {
1928 for (
int i = 0; i < value->
size(); i++) {
1929 if (value->
name(i).empty()) {
1930 field = (*base)[i].second;
1932 field = (*base)[value->
name(i)].second;
1937 if (strong || !((*value)[i].second->getType() !=
ObjType::None)) {
1938 for (
auto &elem : *value) {
1939 if (!field->op_duck_test(elem.second, strong)) {
1955 if (temp >
static_cast<double> (std::numeric_limits<int64_t>::max())) {
1958 m_var =
static_cast<int64_t
> (llround(temp));
2071 temp->CloneDataTo(*dest);
2072 temp->ClonePropTo(*dest);
2092 for (
int i = 0; i < temp->size(); i++) {
2093 dest->
push_back(temp->at(i).second, temp->at(i).first);
2110 size +=
ConcatData(dest, *(temp.get()), mode);
2123 LOG_RUNTIME(
"Cannot tensor shape from empty dictionary!");
2125 shape.push_back(obj->
size());
2126 if (obj->
at(0).second) {
2133 std::vector<int64_t> shape;
2153 LOG_RUNTIME(
"Fail convert empty string to tensor!");
2155 to = torch::from_blob((
void *) from.c_str(),{(int64_t) from.size()}, at::ScalarType::Char).clone().toType(
toTorchType(type));
2160 LOG_RUNTIME(
"Fail convert empty string to tensor!");
2162 if (
sizeof (
wchar_t) ==
sizeof (int32_t)) {
2163 to = torch::from_blob((
void *) from.c_str(),{(int64_t) from.size()}, torch::Dtype::Int).clone().toType(
toTorchType(type));
2164 }
else if (
sizeof (
wchar_t) ==
sizeof (int16_t)) {
2165 to = torch::from_blob((
void *) from.c_str(),{(int64_t) from.size()}, torch::Dtype::Short).clone().toType(
toTorchType(type));
2167 LOG_RUNTIME(
"Unsupport wchar_t size '%d'!!!", (
int)
sizeof (
wchar_t));
2175 std::vector<Index> dims;
2176 if (index ==
nullptr) {
2178 dims.push_back(
Index(0));
2182 int64_t pos = index->size();
2183 if (pos == from.dim()) {
2184 at::ScalarType torch_type;
2185 switch (
sizeof (to[0])) {
2187 torch_type = at::ScalarType::Char;
2190 torch_type = at::ScalarType::Short;
2193 torch_type = at::ScalarType::Int;
2196 LOG_RUNTIME(
"Unsupported char size! %d", (
int)
sizeof (to[0]));
2198 for (
int i = 0; i < from.size(pos - 1); i++) {
2199 (*index)[pos - 1] = i;
2200 to += from.index(*index).toType(torch_type).item<
int>();
2203 index->push_back(0);
2204 for (int64_t i = 0; i < from.size(pos - 1); i++) {
2205 (*index)[pos - 1] = i;
2229 std::vector<Index> dims;
2230 if (index ==
nullptr) {
2231 dims.push_back(
Index(0));
2235 int64_t pos = index->size();
2236 if (pos == from.dim()) {
2237 for (
int i = 0; i < from.size(pos - 1); i++) {
2238 (*index)[pos - 1] = i;
2242 index->push_back(0);
2243 for (int64_t i = 0; i < from.size(pos - 1); i++) {
2244 (*index)[pos - 1] = i;
2307 ObjPtr temp = iter->IteratorNext(std::numeric_limits<int64_t>::max());
2309 for (
auto &elem : *temp) {
2349 if (
sizeof (
wchar_t) == 4) {
2352 ASSERT(
sizeof (
wchar_t) == 2);
2377 if ((char_val < 0 && char_val < std::numeric_limits<char>::min()) ||
2378 (char_val > std::numeric_limits<uint8_t>::max())) {
2379 LOG_ERROR(
"Single char overflow! %ld", char_val);
2383 m_value.assign(1,
static_cast<char> (char_val));
2386 m_value.assign(1,
static_cast<uint8_t
> (char_val));
2388 m_var = std::monostate();
2404 if ((char_val < std::numeric_limits<wchar_t>::min()) ||
2405 (char_val > std::numeric_limits<wchar_t>::max())) {
2406 LOG_ERROR(
"Single wchar_t overflow! %ld", char_val);
2408 m_string.assign(1,
static_cast<wchar_t> (char_val));
2409 m_var = std::monostate();
2412 ASSERT(
sizeof (
wchar_t) == 2 ||
sizeof (
wchar_t) == 4);
2427 LOG_RUNTIME(
"Convert tensor to rational support for scalar only!");
2431 m_var = std::monostate();
2443 m_var = std::monostate();
2447 m_var = std::monostate();
2469 LOG_RUNTIME(
"Fail convert empty dictionary to tensor!");
2477 for (
int i = 0; i <
size(); i++) {
2478 at(i).second->toType_(type);
2485 at::ScalarType summary_torch_type =
toTorchType(
static_cast<ObjType> (summary_type));
2486 if (
at(0).second->is_scalar()) {
2487 if (
at(0).second->is_integral()) {
2488 *
m_tensor = torch::full({1},
at(0).second->GetValueAsInteger(), summary_torch_type);
2490 if (!
at(0).second->is_floating()) {
2491 ASSERT(
at(0).second->is_floating());
2493 *
m_tensor = torch::full({1},
at(0).second->GetValueAsNumber(), summary_torch_type);
2496 *
m_tensor =
at(0).second->m_tensor->toType(summary_torch_type);
2499 for (
int i = 1; i <
size(); i++) {
2503 LOG_RUNTIME(
"Fail convert nullptr to tensor at index %d!", i);
2506 torch::Tensor temp_tensor;
2507 if (temp->is_scalar()) {
2508 ASSERT(!temp->m_tensor->defined());
2509 if (temp->is_integral()) {
2510 temp_tensor = torch::full({1}, temp->GetValueAsInteger(), summary_torch_type);
2512 if (!temp->is_floating()) {
2513 ASSERT(temp->is_floating());
2515 temp_tensor = torch::full({1}, temp->GetValueAsNumber(), summary_torch_type);
2518 ASSERT(temp->m_tensor->defined());
2519 temp_tensor = temp->m_tensor->toType(summary_torch_type);
2558 result->m_var_type_fixed = type;
2560 std::string func_proto(
result->m_class_name);
2561 func_proto +=
"(...)";
2562 func_proto +=
result->m_class_name;
2563 func_proto +=
":-{ }";
2571 result->m_is_const =
true;
2572 result->m_var_is_init =
true;
2579 if (args.
empty() || !args[0].second) {
2610 for (
int i = 1; i < args.
size(); i++) {
2615 for (
int i = 1; i < args.
size(); i++) {
2618 }
else if (args.
size() == 1) {
2621 result->m_is_const =
false;
2642 if (args.
size() == 1) {
2644 result->m_is_const =
false;
2673 ObjPtr first_dim =
result->m_dimensions &&
result->m_dimensions->size() ?
result->m_dimensions->at(0).second :
nullptr;
2675 bool to_scalar =
false;
2677 if (!first_dim || (first_dim->is_integral() && first_dim->GetValueAsInteger() == 0)) {
2680 if (args.
size() > 2) {
2686 result->m_var =
static_cast<int64_t
> (value);
2687 result->m_var_is_init =
true;
2688 result->m_tensor->reset();
2689 result->m_dimensions =
nullptr;
2696 if (args.
size() == 2) {
2718 for (
int i = 1; i < args.
size(); i++) {
2722 LOG_RUNTIME(
"There is no previous item to repeat!");
2724 if (i + 1 != args.
size()) {
2727 if (!
result->m_dimensions || !
result->m_dimensions->size()) {
2730 int64_t full_size = 1;
2731 for (
int j = 0; j <
result->m_dimensions->size(); j++) {
2732 if (!
result->m_dimensions->at(i).second->is_integral()) {
2735 full_size *=
result->m_dimensions->at(i).second->GetValueAsInteger();
2737 if (full_size <= 0) {
2738 LOG_RUNTIME(
"Items count error for all dimensions!");
2741 for (int64_t j =
result->size(); j < full_size; j++) {
2748 prev = args[i].second;
2763 if (
result->is_scalar()) {
2765 }
else if (
result->size() == 1) {
2767 if (
result->is_arithmetic_type()) {
2768 if (
result->is_integral()) {
2769 result->m_var =
static_cast<int64_t
> (
result->at(0).second->GetValueAsInteger());
2770 }
else if (
result->is_floating()) {
2771 result->m_var =
static_cast<double> (
result->at(0).second->GetValueAsNumber());
2782 result->m_dimensions.reset();
2783 result->m_tensor->reset();
2786 LOG_RUNTIME(
"Conversion to scalar is not possible!");
2794 if (
result->m_dimensions->size() != 1) {
2797 if (!first_dim->is_any_size()) {
2798 result->resize_(first_dim->GetValueAsInteger(),
nullptr);
2802 if (
result->m_dimensions->size() != 1 || !
result->m_dimensions->at(0).second->is_any_size()) {
2803 std::vector<int64_t> dims;
2804 for (
int i = 0; i <
result->m_dimensions->size(); i++) {
2806 if (ind.is_integer()) {
2807 dims.push_back(ind.integer().expect_int());
2808 }
else if (ind.is_boolean()) {
2809 dims.push_back(ind.boolean());
2834 for (
int i = 1; i < args.
size(); i++) {
2835 result->push_back(args[i].second, args.
name(i));
2837 result->m_var_is_init =
true;
2922 for (
int i = 0; i <
result->size(); i++) {
2923 if (!(*
result)[i].second) {
2942 int64_t val_int = 0;
2944 std::string enum_name;
2946 for (
int i = 1; i < args.
size(); i++) {
2947 if (args.
name(i).empty()) {
2954 enum_name = args.
name(i);
2956 if (args[i].second && (args[i].second->
is_integral())) {
2958 }
else if (!args[i].second || !args[i].second->
is_none_type()) {
2959 LOG_RUNTIME(
"Field value '%s' %d must integer type!", args.
name(i).c_str(), i);
2964 LOG_RUNTIME(
"Field value '%s' at index %d already exists!", enum_name.c_str(), i);
2969 enum_value->m_var_type_fixed = enum_value->m_var_type_current;
2970 enum_value->m_is_const =
true;
2971 result->push_back(enum_value, enum_name);
2975 result->m_is_const =
true;
2976 result->m_var_is_init =
true;
3049 ASSERT(m_iter_obj->m_iter_range_value);
3051 ObjPtr value = m_iter_obj->m_iter_range_value;
3052 ObjPtr stop = m_iter_obj->at(
"stop").second;
3059 int up_direction =
step->op_compare(*
zero);
3062 if (up_direction < 0) {
3063 if (value->op_compare(*stop) > 0) {
3070 if (value->op_compare(*stop) < 0) {
3079 }
else if (m_iter_obj->is_indexing()) {
3080 result = (*(*this)).second;
3085 }
else if (count < 0) {
3088 result->m_var_is_init =
true;
3092 ASSERT(m_iter_obj->m_iter_range_value);
3094 ObjPtr value = m_iter_obj->m_iter_range_value;
3095 ObjPtr stop = m_iter_obj->at(
"stop").second;
3102 int up_direction =
step->op_compare(*
zero);
3105 bool next_value =
true;
3106 for (int64_t i = 0; i < -count; i++) {
3109 if (up_direction < 0) {
3110 if (value->op_compare(*stop) < 0) {
3113 result->push_back(value->Clone());
3117 if (value->op_compare(*stop) > 0) {
3120 result->push_back(value->Clone());
3130 }
else if (m_iter_obj->is_indexing()) {
3132 for (int64_t i = 0; i < -count; i++) {
3133 if (*
this != this->end()) {
3134 result->push_back(*(*
this));
3148 result->m_var_is_init =
true;
3152 ASSERT(m_iter_obj->m_iter_range_value);
3154 ObjPtr value = m_iter_obj->m_iter_range_value;
3155 ObjPtr stop = m_iter_obj->at(
"stop").second;
3162 int up_direction =
step->op_compare(*
zero);
3165 if (up_direction < 0) {
3166 while (value->op_compare(*stop) > 0) {
3167 result->push_back(value->Clone());
3171 while (value->op_compare(*stop) < 0) {
3172 result->push_back(value->Clone());
3178 }
else if (m_iter_obj->is_indexing()) {
3179 while (*
this != this->end() &&
result->size() < count) {
3180 result->push_back(*(*
this));
3199 result->m_iterator = std::make_shared<Iterator < Obj >> (
shared(), filter);
3201 ASSERT(!
result->m_iterator->m_iter_obj->m_iter_range_value);
3204 ASSERT(
result->m_iterator->m_iter_obj->m_iter_range_value);
3218 if (!args || args->
size() == 0) {
3219 result->m_iterator = std::make_shared<Iterator < Obj >> (
shared());
3220 }
else if (args->
size() == 1 && args->
at(0).second && args->
at(0).second->is_string_type()) {
3222 }
else if (args->
size() >= 1 && args->
at(0).second && args->
at(0).second->is_function_type()) {
3232 ASSERT(!
result->m_iterator->m_iter_obj->m_iter_range_value);
3235 ASSERT(
result->m_iterator->m_iter_obj->m_iter_range_value);
3268 int up_direction =
step->op_compare(*
zero);
3271 if (up_direction < 0) {
3272 if (value->op_compare(*stop) > 0) {
3273 return value->Clone();
3276 if (value->op_compare(*stop) < 0) {
3277 return value->Clone();
3283 }
else if (
m_iterator->m_iter_obj->is_indexing()) {
3291 LOG_RUNTIME(
"Method available an iterator only!");
3297 m_iterator->m_iter_obj->m_iter_range_value =
m_iterator->m_iter_obj->at(
"start").second->toType(summary_type);
3298 }
else if (
m_iterator->m_iter_obj->is_indexing()) {
3308 LOG_RUNTIME(
"Method available an iterator only!");
3319 static const char * SYS__NAME__ =
"__name__";
3320 static const char * SYS__FULL_NAME__ =
"__full_name__";
3321 static const char * SYS__TYPE__ =
"__type__";
3322 static const char * SYS__TYPE_FIXED__ =
"__type_fixed__";
3323 static const char * SYS__MOULE__ =
"__module__";
3324 static const char * SYS__CLASS__ =
"__class__";
3325 static const char * SYS__BASE__ =
"__base__";
3326 static const char * SYS__SIZE__ =
"__size__";
3328 static const char * SYS__DOC__ =
"__doc__";
3329 static const char * SYS__STR__ =
"__str__";
3330 static const char * SYS__SOURCE__ =
"__source__";
3332 static const char * MODULE__MD5__ =
"__md5__";
3333 static const char * MODULE__FILE__ =
"__file__";
3334 static const char * MODULE__TIMESTAMP__ =
"__timestamp__";
3335 static const char * MODULE__MAIN__ =
"__main__";
3336 static const char * MODULE__VERSION__ =
"__version__";
3340 }
else if (!obj || !obj->
is_init()) {
3342 }
else if (name.compare(SYS__FULL_NAME__) == 0) {
3344 }
else if (name.compare(SYS__NAME__) == 0) {
3346 }
else if (name.compare(SYS__MOULE__) == 0) {
3348 }
else if (name.compare(SYS__TYPE__) == 0) {
3350 }
else if (name.compare(SYS__TYPE_FIXED__) == 0) {
3352 }
else if (name.compare(SYS__CLASS__) == 0) {
3354 }
else if (name.compare(SYS__BASE__) == 0) {
3356 }
else if (name.compare(SYS__MOULE__) == 0) {
3358 }
else if (name.compare(SYS__SIZE__) == 0) {
3362 }
else if (name.compare(SYS__STR__) == 0) {
3387 std::string message(
"Internal field '");
3389 message +=
"' not exist!";
3415 new_type =
static_cast<ObjType> (
static_cast<uint8_t
> (type) + 1);
3444 switch (val.dtype().toScalarType()) {
3445 case at::ScalarType::Bool:
3447 case at::ScalarType::Half:
3448 case at::ScalarType::BFloat16:
3450 case at::ScalarType::Float:
3452 case at::ScalarType::Double:
3454 case at::ScalarType::Byte:
3455 case at::ScalarType::Char:
3456 case at::ScalarType::QInt8:
3457 case at::ScalarType::QUInt8:
3458 case at::ScalarType::QUInt4x2:
3460 case at::ScalarType::Short:
3462 case at::ScalarType::Int:
3463 case at::ScalarType::QInt32:
3465 case at::ScalarType::Long:
3467 case at::ScalarType::ComplexHalf:
3469 case at::ScalarType::ComplexFloat:
3471 case at::ScalarType::ComplexDouble:
3474 LOG_RUNTIME(
"Fail tensor type %s", val.toString().c_str());
3502 return Index(c10::nullopt);
3504 std::vector<int64_t> temp = obj.
toIntVector(
true);
3506 torch::Tensor tensor = torch::from_blob(temp.data(), temp.size(), torch::Dtype::Long);
3507 return Index(tensor.clone());
3509 return Index(c10::nullopt);
3528 return Index(at::indexing::Ellipsis);
3538 if (value->is_none_type()) {
3543 ASSERT(!value->m_class_name.empty());
3548 value->CloneDataTo(*
this);
3549 value->ClonePropTo(*
this);
3556 if (value->empty()) {
3557 m_var = std::monostate();
3570 ASSERT(std::holds_alternative<std::monostate>(
m_var));
3573 if (value->is_scalar()) {
3574 m_var = value->m_var;
3583 *
m_tensor = value->m_tensor->clone();
3591 if (
is_scalar() && value->is_scalar()) {
3595 if (std::holds_alternative<int64_t>(
m_var)) {
3596 m_var = value->GetValueAsInteger();
3597 }
else if (std::holds_alternative<bool *>(
m_var)) {
3599 *std::get<bool *>(
m_var) = value->GetValueAsInteger();
3605 if (std::holds_alternative<int64_t>(
m_var)) {
3606 m_var = value->GetValueAsInteger();
3607 }
else if (std::holds_alternative<int8_t *>(
m_var)) {
3609 *std::get<int8_t *>(
m_var) =
static_cast<int8_t
> (value->GetValueAsInteger());
3614 if (std::holds_alternative<int64_t>(
m_var)) {
3615 m_var = value->GetValueAsInteger();
3616 }
else if (std::holds_alternative<int16_t *>(
m_var)) {
3618 *std::get<int16_t *>(
m_var) =
static_cast<int16_t
> (value->GetValueAsInteger());
3623 if (std::holds_alternative<int64_t>(
m_var)) {
3624 m_var = value->GetValueAsInteger();
3625 }
else if (std::holds_alternative<int32_t *>(
m_var)) {
3627 *std::get<int32_t *>(
m_var) =
static_cast<int32_t
> (value->GetValueAsInteger());
3632 if (std::holds_alternative<int64_t>(
m_var)) {
3633 m_var = value->GetValueAsInteger();
3634 }
else if (std::holds_alternative<int64_t *>(
m_var)) {
3636 *std::get<int64_t *>(
m_var) = value->GetValueAsInteger();
3641 if (std::holds_alternative<double>(
m_var)) {
3642 m_var = value->GetValueAsNumber();
3643 }
else if (std::holds_alternative<float *>(
m_var)) {
3645 *std::get<float *>(
m_var) =
static_cast<float> (value->GetValueAsNumber());
3650 if (std::holds_alternative<double>(
m_var)) {
3651 m_var = value->GetValueAsNumber();
3652 }
else if (std::holds_alternative<double *>(
m_var)) {
3654 *std::get<double *>(
m_var) = value->GetValueAsNumber();
3661 }
else if (
is_scalar() && !value->is_scalar()) {
3663 m_var = std::monostate();
3666 *
m_tensor = value->m_tensor->clone();
3668 }
else if (!
is_scalar() && value->is_scalar()) {
3671 if (value->is_integral()) {
3672 m_tensor->set_(torch::scalar_tensor(value->GetValueAsInteger(),
m_tensor->scalar_type()));
3675 m_tensor->set_(torch::scalar_tensor(value->GetValueAsNumber(),
m_tensor->scalar_type()));
3681 if (
m_tensor->sizes().equals(value->m_tensor->sizes())) {
3701 SetValue_(value->GetValueAsStringWide());
3709 value->CloneDataTo(*
this);
3710 value->ClonePropTo(*
this);
3730 value->CloneDataTo(*
this);
3731 value->ClonePropTo(*
this);
3744 m_var = std::monostate();
3768 ASSERT(std::holds_alternative<void *>(value->m_var));
3769 m_var = std::get<void *>(value->m_var);
3782 if (value >= std::numeric_limits<float>::min() && value < std::numeric_limits<float>::max()) {
3789 if (value == 1 || value == 0) {
3791 }
else if (value < std::numeric_limits<int32_t>::min() || value > std::numeric_limits<int32_t>::max()) {
3792 ASSERT(value > std::numeric_limits<int64_t>::min());
3793 ASSERT(value < std::numeric_limits<int64_t>::max());
3795 }
else if (value < std::numeric_limits<int16_t>::min() || value > std::numeric_limits<int16_t>::max()) {
3797 }
else if (value < std::numeric_limits<int8_t>::min() ||
3798 value > std::numeric_limits<int8_t>::max()) {
3803 return type_default;
3807#pragma message "Переделать сравнение"
3817 if (elem->op_class_test(
name, ctx)) {
3822 bool has_error =
false;
3839 return !class_name.empty() && class_name.compare(
name) == 0;
static ObjPtr Call(Context *runner, Obj &obj, TermPtr &term)
std::pair< std::string, Type > PairType
virtual int64_t index(const std::string_view field_name)
virtual int64_t resize(int64_t new_size, const Type fill, const std::string &name="")
PairType & push_back(const PairType &p)
std::enable_if< std::is_integral< I >::value &&!std::is_pointer< I >::value, constPairType & >::type operator[](I index)
static PairType pair(const Type value, const std::string name="")
virtual PairType & at(const int64_t index)
virtual void erase(const int64_t index)
IntAny(const ObjPtr value, ObjType type)
const IterPairType & data()
Dict< T >::PairType IterPairType
ObjPtr read_and_next(int64_t count)
static ObjPtr ConstructorSimpleType_(Context *ctx, Obj &args)
bool is_string_char_type() const
ObjPtr op_set_index(ObjPtr index, ObjPtr value)
ObjPtr op_bit_and_set(Obj &obj, bool strong)
bool is_function_type() const
virtual ObjPtr IteratorReset()
const ObjPtr index_get(const std::vector< Index > &index) const
Obj::iterator find(const std::string name)
bool is_simple_type() const
static ObjPtr ConstructorStub_(Context *ctx, Obj &args)
ObjPtr op_call(ObjPtr args)
virtual ObjPtr IteratorMake(const char *filter=nullptr, bool check_create=true)
ObjPtr operator%=(ObjPtr obj)
virtual int64_t size() const
int op_compare(Obj &value)
void ClonePropTo(Obj &clone) const
virtual bool empty() const
static ObjPtr CreateString(const std::string_view str, Sync *sync=nullptr)
std::variant< std::monostate, int64_t, double, void *, bool *, int8_t *, int16_t *, int32_t *, int64_t *, float *, double *, NativeData, std::string, TermPtr, Iterator< Obj > > m_var
static ObjPtr ConstructorDictionary_(Context *ctx, Obj &args)
ObjPtr toType(ObjType type) const
size_t ItemValueCount(ObjPtr &find, bool strong)
Obj(ObjType type=ObjType::None, const char *var_name=nullptr, TermPtr func_proto=nullptr, ObjType fixed=ObjType::None, bool init=false, Sync *sync=nullptr)
std::vector< ObjPtr > m_class_parents
Родительские классы (типы)
virtual ObjPtr IteratorData()
ObjType m_var_type_current
Текущий тип значения объекта
Rational m_rational
Содержит дробь из длинных чисел
bool is_none_type() const
ObjPtr op_div_ceil_(ObjPtr obj)
std::shared_ptr< Rational > GetValueAsRational() const
bool m_var_is_init
Содержит ли объект корректное значение ???
bool is_string_type() const
double GetValueAsNumber() const
std::string GetValueAsString() const
void erase(const size_t from, const size_t to) override
std::enable_if< std::is_same< T, bool >::value, void >::type SetValue_(bool value)
ObjPtr index_set_(const std::vector< Index > &index, const ObjPtr value)
std::string m_var_name
Имя переменной, в которой хранится объект
ObjPtr m_dimensions
Размерности для ObjType::Type.
static ObjPtr CreateBaseType(ObjType type)
bool GetValueAsBoolean() const
std::vector< int64_t > toIntVector(bool raise=true) const
std::shared_ptr< Iterator< Obj > > m_iterator
Итератор для данных
bool is_tensor_type() const
ObjPtr operator/=(ObjPtr obj)
bool is_arithmetic_type() const
ObjPtr Clone(const char *new_name=nullptr) const
void testResultIntegralType(ObjType type, bool upscalint)
bool is_class_type() const
std::string toString(bool deep=true) const
static ObjPtr CreateDict(Sync *sync=nullptr)
static ObjPtr ConstructorClass_(Context *ctx, Obj &args)
void testConvertType(ObjType type)
ObjPtr operator+=(ObjPtr obj)
std::string m_class_name
Имя класса объекта (у базовых типов отсуствует)
static ObjPtr ConstructorStruct_(Context *ctx, Obj &args)
bool exist(ObjPtr &find, bool strong)
ObjPtr operator-=(ObjPtr obj)
std::wstring GetValueAsStringWide() const
const TermPtr m_prototype
Описание прототипа функции (или данных)
void toType_(ObjType type)
static ObjPtr ConstructorEnum_(Context *ctx, Obj &args)
static ObjPtr CreateClass(std::string name)
bool op_equal(ObjPtr value)
static std::enable_if< std::is_same< T, std::string >::value||std::is_same< T, constchar * >::value, ObjPtr >::type CreateValue(T value, Sync *sync=nullptr)
static ObjPtr ConstructorNative_(Context *ctx, Obj &args)
const std::string & name(const int64_t index) const override
int64_t resize_(int64_t size, ObjPtr fill, const std::string="")
std::shared_ptr< torch::Tensor > m_tensor
Содержит только размерные тензоры (скляры хранятся в поле m_pointer и не создают m_tensor->defined())
bool is_dictionary_type() const
ObjPtr operator*=(ObjPtr obj)
std::wstring m_string
Содержит строку широких символов
bool op_accurate(ObjPtr obj)
ObjPtr m_iter_range_value
std::string m_value
Содержит байтовую строку или байтовый массив с данными для представления в нативном виде (Struct,...
bool op_duck_test(ObjPtr obj, bool strong)
static bool op_duck_test_prop(Obj *base, Obj *value, bool strong)
int64_t GetValueAsInteger() const
TermPtr m_sequence
Последовательно распарсенных команд для выполнения
Dict< Obj >::PairType & at(const std::string name) const
PairType & push_back(const PairType &p)
ObjType m_var_type_fixed
Максимальный размер для арифметических типов, который задается разработчиком
static ObjPtr BaseTypeConstructor(Context *ctx, Obj &in)
static ObjPtr CreateNone(Sync *sync=nullptr)
static ObjPtr CreateType(ObjType type, ObjType fixed=ObjType::None, bool is_init=false, Sync *sync=nullptr)
void CloneDataTo(Obj &clone) const
bool is_string_wide_type() const
void dump_dict_(std::string &str, bool deep=true) const
bool is_bool_type() const
bool op_class_test(ObjPtr obj, Context *ctx) const
ObjPtr op_pow_(ObjPtr obj)
virtual ObjPtr IteratorNext(int64_t count)
std::string GetAsString() const
Rational & op_pow_(const Rational &rational)
Rational & op_div_ceil_(Rational &rational)
bool op_equal(const Rational &rational) const
std::shared_ptr< Rational > clone() const
int64_t GetAsBoolean() const
int op_compare(const Rational &rational) const
Rational & set_(const int64_t value)
int64_t GetAsInteger() const
double GetAsNumber() const
static ObjType BaseTypeFromString(RunTime *rt, const std::string_view text, bool *has_error=nullptr)
ObjType getSummaryTensorType(Obj *obj, ObjType start=ObjType::None)
#define LOG_RUNTIME(format,...)
#define ASSERT(condition)
bool isFloatingType(ObjType t)
bool isStringChar(ObjType t)
bool isContainsType(ObjType generic, ObjType type)
std::wstring utf8_decode(const std::string str)
bool isStringWide(ObjType t)
bool isArithmeticType(ObjType t)
std::string ExtractName(std::string name)
newlang::ObjPtr clone(newlang::Context *ctx, newlang::Obj &in)
bool isIntegralType(ObjType t, bool includeBool)
bool isGenericType(ObjType t)
int64_t ConcatData(Obj *dest, Obj &src, ConcatMode mode=ConcatMode::Error)
bool isDictionary(ObjType t)
bool isLocalType(ObjType)
std::shared_ptr< Term > TermPtr
std::shared_ptr< Obj > ObjPtr
bool isSystemName(const std::string_view name)
std::string ExtractModuleName(const std::string_view name)
T repeat(T str, const std::size_t n)
bool isSimpleType(ObjType t)
ObjPtr CheckSystemField(const Obj *obj, const std::string name)
ObjType typeFromLimit(int64_t value, ObjType type_default=ObjType::Int64)
std::string NormalizeName(const std::string_view name)
const char * toString(TermID type)
std::vector< int64_t > TensorShapeFromDict(const Obj *obj)
std::string utf8_encode(const std::wstring wstr)
std::string & trim(std::string &s, const char *t=ws)
bool canCast(const ObjType from, const ObjType to)
ObjType fromTorchType(at::ScalarType t)
void ConvertStringToTensor(const std::string &from, torch::Tensor &to, ObjType type=ObjType::None)
at::indexing::Slice toSlice(Obj &obj)
ObjPtr CreateTensor(torch::Tensor tensor)
std::string DimToString(const Dimension dim)
void TensorToString_(const torch::Tensor &tensor, c10::IntArrayRef shape, std::vector< Index > &ind, const int64_t pos, std::stringstream &str)
void ConvertTensorToStringTemplate(const torch::Tensor &from, T &to, std::vector< Index > *index)
void ShapeFromDict(const Obj *obj, std::vector< int64_t > &shape)
torch::ScalarType toTorchType(ObjType t)
void ConvertTensorToString(const torch::Tensor &from, std::string &to, std::vector< Index > *index=nullptr)
std::string IndexToString(const std::vector< Index > &index)
std::ostream & operator<<(std::ostream &out, newlang::Obj &var)
void ConvertTensorToDict(const torch::Tensor &from, Obj &to, std::vector< Index > *index=nullptr)
std::string TensorToString(const torch::Tensor &tensor)
ObjType getSummaryTensorType(Obj *obj, ObjType start=ObjType::None)
ObjType GetTensorType(torch::Tensor &val)
at::indexing::TensorIndex Index
at::IntArrayRef Dimension
#define NL_CHECK(cond, format,...)