NewLang Project
Yet another programm language
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros
Loading...
Searching...
No Matches
object.cpp
Go to the documentation of this file.
1
2#include "warning_push.h"
3#include <torch/torch.h>
4#include <ATen/ATen.h>
5#include "warning_pop.h"
6
7#include <codecvt>
8
9#include "object.h"
10#include "context.h"
11#include "runtime.h"
12
13using namespace newlang;
14
15
16std::string TensorToString(const torch::Tensor &tensor);
17ObjType fromTorchType(at::ScalarType t);
18
19/* Для конвертирования словаря в тензор для вывода общего типа данных для всех элементов */
20ObjType getSummaryTensorType(Obj *obj, ObjType start = ObjType::None);
21void ConvertStringToTensor(const std::string &from, torch::Tensor &to, ObjType type = ObjType::None);
22void ConvertStringToTensor(const std::wstring &from, torch::Tensor &to, ObjType type = ObjType::None);
23void ConvertTensorToString(const torch::Tensor &from, std::string &to, std::vector<Index> *index = nullptr);
24void ConvertTensorToString(const torch::Tensor &from, std::wstring &to, std::vector<Index> *index = nullptr);
25void ConvertTensorToDict(const torch::Tensor &from, Obj &to, std::vector<Index> *index = nullptr);
26
27ObjType GetTensorType(torch::Tensor & val);
28
29at::indexing::Slice toSlice(Obj &obj);
30
31at::indexing::Slice toSlice(Obj &obj) {
32 NL_CHECK(obj.is_range(), "Convert to slice supported for range only!");
33
34 ASSERT(obj.size() == 3
35 && obj.Dict<Obj>::at("start").second
36 && obj.Dict<Obj>::at("stop").second
37 && obj.Dict<Obj>::at("step").second);
38
39 NL_CHECK(obj.Dict<Obj>::at("start").second->is_integer(), "Slice value start support integer type only!");
40 NL_CHECK(obj.Dict<Obj>::at("stop").second->is_integer(), "Slice value stop support integer type only!");
41 NL_CHECK(obj.Dict<Obj>::at("step").second->is_integer(), "Slice value step support integer type only!");
42
43 return at::indexing::Slice(
44 obj.Dict<Obj>::at("start").second->GetValueAsInteger(),
45 obj.Dict<Obj>::at("stop").second->GetValueAsInteger(),
46 obj.Dict<Obj>::at("step").second->GetValueAsInteger());
47}
48
49
50Index toIndex(Obj &obj);
51
52ObjPtr CreateTensor(torch::Tensor tensor) {
53 ObjType check_type = GetTensorType(tensor);
54 if (!isTensor(check_type)) {
55 LOG_RUNTIME("Unsupport torch type %s (%d)!", at::toString(tensor.dtype().toScalarType()), (int) tensor.dtype().toScalarType());
56 }
58 if (tensor.dim() == 0) {
59 if (tensor.is_floating_point()) {
60 result = Obj::CreateValue(tensor.item<double>(), check_type);
61 } else {
62 ASSERT(!tensor.is_complex());
63 result = Obj::CreateValue(tensor.item<int64_t>(), check_type);
64 }
65 } else {
66
67 result = Obj::CreateType(check_type);
68 ASSERT(result->m_tensor);
69 *result->m_tensor = tensor;
70 result->m_var_is_init = true;
71 }
72 return result;
73}
74
75torch::ScalarType toTorchType(ObjType t) {
76 switch (t) {
77 case ObjType::Bool:
78 return at::ScalarType::Bool;
79 case ObjType::Int8:
80 case ObjType::Byte:
81 case ObjType::Char:
82 return at::ScalarType::Char;
83 case ObjType::Int16:
84 case ObjType::Word:
85 return at::ScalarType::Short;
86 case ObjType::Int32:
87 case ObjType::DWord:
88 return at::ScalarType::Int;
89 case ObjType::Int64:
90 case ObjType::DWord64:
91 case ObjType::Integer:
92 return at::ScalarType::Long;
93 case ObjType::Float32:
94 case ObjType::Single:
95 case ObjType::Tensor:
96 return at::ScalarType::Float;
97 case ObjType::Float64:
98 case ObjType::Double:
99 case ObjType::Number:
100 return at::ScalarType::Double;
101 case ObjType::Complex16:
102 return at::ScalarType::ComplexHalf;
103 case ObjType::Complex32:
104 return at::ScalarType::ComplexFloat;
105 case ObjType::Complex64:
106 case ObjType::Complex:
107 return at::ScalarType::ComplexDouble;
108 }
109 LOG_RUNTIME("Can`t convert type '%s' to torch scalar type!", toString(t));
110}
111
112ObjType fromTorchType(torch::Dtype t) {
113 switch (t) {
114 case at::ScalarType::Bool:
115 return ObjType::Bool;
116 case at::ScalarType::Byte:
117 case at::ScalarType::Char:
118 case at::ScalarType::QInt8:
119 case at::ScalarType::QUInt8:
120 return ObjType::Int8;
121 case at::ScalarType::Short:
122 return ObjType::Int16;
123 case at::ScalarType::Int:
124 case at::ScalarType::QInt32:
125 return ObjType::Int32;
126 case at::ScalarType::Long:
127 return ObjType::Int64;
128 case at::ScalarType::BFloat16:
129 case at::ScalarType::Half:
130 return ObjType::Float16;
131 case at::ScalarType::Float:
132 return ObjType::Float32;
133 case at::ScalarType::Double:
134 return ObjType::Float64;
135 case at::ScalarType::ComplexHalf:
136 return ObjType::Complex16;
137 case at::ScalarType::ComplexFloat:
138 return ObjType::Complex32;
139 case at::ScalarType::ComplexDouble:
140 return ObjType::Complex64;
141 }
142 LOG_RUNTIME("Can`t convert type '%s' to ObjType!", at::toString(t));
143}
144
145std::string DimToString(const Dimension dim) {
146 std::stringstream ss;
147 ss << dim;
148
149 return ss.str();
150}
151
152std::string IndexToString(const std::vector<Index> &index) {
153 std::stringstream ss;
154
155 ss << "[";
156 for (int i = 0; i < index.size(); i++) {
157 ss << index[i];
158 }
159 ss << "]";
160
161 return ss.str();
162}
163
164std::ostream &operator<<(std::ostream &out, newlang::Obj &var) {
165 out << var.toString().c_str();
166 return out;
167}
168
169std::ostream &operator<<(std::ostream &out, newlang::ObjPtr var) {
170 if (var) {
171 out << var->toString().c_str();
172 } else {
173 out << "<NOT OBJECT>";
174 }
175 return out;
176}
177
178Obj::Obj(ObjType type, const char *var_name, TermPtr func_proto, ObjType fixed, bool init, Sync *sync) :
179m_var_type_current(type), m_var_name(var_name ? var_name : ""), m_prototype(func_proto) {
180 m_is_const = false;
181 m_check_args = false;
182 m_dimensions = nullptr;
183 m_is_reference = false;
184 m_var_type_fixed = fixed;
185 m_var_is_init = init;
186 m_is_const = false;
187 m_var = std::monostate();
188 m_tensor = std::make_shared<torch::Tensor>();
189 m_sync = sync;
190 m_ctx = nullptr;
191}
192
193bool Obj::empty() const {
194 if (is_none_type()) {
195 return true;
196 } else if (m_var_type_current == ObjType::StrChar) {
197 return !m_var_is_init || m_value.empty();
198 } else if (m_var_type_current == ObjType::StrWide) {
199 return !m_var_is_init || m_string.empty();
200 } else if (is_tensor_type()) {
201 return !m_var_is_init || at::_is_zerotensor(*m_tensor);
202 }
203 return Dict<Obj>::empty();
204}
205
206void Obj::erase(const size_t from, const size_t to) {
207 if (!is_indexing()) {
208 LOG_RUNTIME("Operator erase(from, to) for object type %s not implemented!", newlang::toString(m_var_type_current));
209 }
210 if (is_tensor_type()) {
211 // For expand operator (val, tensor := ... tensor)
212 int64_t new_size = m_tensor->size(0) - 1;
213 if ((from == 0 && to == 1) || (from == 0 && to == 0)) {
214 if (new_size > 0) {
215 std::vector<int64_t> sizes(1);
216 sizes[0] = new_size + 1;
217
218 at::Tensor ind = torch::arange(sizes[0] - new_size - 1, sizes[0] - 1, at::ScalarType::Long);
219 at::Tensor any = torch::zeros(sizes[0] - new_size, at::ScalarType::Long);
220 // LOG_DEBUG("arange %s %s", TensorToString(ind).c_str(), TensorToString(any).c_str());
221
222 ind = at::cat({any, ind});
223 // LOG_DEBUG("cat %s", TensorToString(ind).c_str());
224
225 // LOG_DEBUG("m_value %s", TensorToString(m_value).c_str());
226 m_tensor->index_copy_(0, ind, m_tensor->clone());
227 // LOG_DEBUG("index_copy_ %s", TensorToString(m_value).c_str());
228
229 sizes[0] = new_size;
230 m_tensor->resize_(at::IntArrayRef(sizes));
231
232 } else {
233 m_tensor->reset();
235 }
236 } else {
237 LOG_RUNTIME("Operator erase(%ld, %ld) for object type %s not implemented!", from, to, newlang::toString(m_var_type_current));
238 }
239 } else {
240 Dict<Obj>::erase(from, to);
241 }
242}
243
244int64_t Obj::size(int64_t dim) const {
245 if (is_tensor_type()) {
246 if (is_scalar()) {
247 if (dim != 0) {
248 LOG_RUNTIME("Scalar has zero dimension!");
249 }
250 return 0;
251 }
252 return m_tensor->size(dim);
253 }
254 ASSERT(dim == 0);
256 return m_value.size();
257 } else if (m_var_type_current == ObjType::StrWide) {
258 return m_string.size();
259 }
260 return Dict::size();
261}
262
263int64_t Obj::resize_(int64_t new_size, ObjPtr fill, const std::string name) {
264
265 if (is_string_type()) {
266
267 if (new_size >= 0) {
268 // Размер положительный, просто изменить число элементов добавив или удалив последние
270 m_value.resize(new_size, ' ');
271 return m_value.size();
272 } else if (m_var_type_current == ObjType::StrWide) {
273 m_string.resize(new_size, L' ');
274 return m_string.size();
275 }
276 } else {
277 // Если размер отрицательный - добавить или удалить вначале
278 new_size = -new_size;
279 if (static_cast<int64_t> (size()) > new_size) {
281 m_value.erase(0, new_size);
282 return m_value.size();
283
284 } else if (m_var_type_current == ObjType::StrWide) {
285 m_value.erase(0, new_size);
286 return m_string.size();
287 }
288 } else if (static_cast<int64_t> (size()) < new_size) {
290 m_value.insert(0, new_size, ' ');
291 return m_value.size();
292
293 } else if (m_var_type_current == ObjType::StrWide) {
294 m_string.insert(0, new_size, L' ');
295 return m_string.size();
296
297 }
298 }
299 }
300
301 } else if (is_dictionary_type()) {
302 return Dict::resize(new_size, fill ? fill : Obj::CreateNone(), name);
303 } else if (is_tensor_type()) {
304 std::vector<int64_t> sizes;
305 for (int i = 0; i < m_tensor->dim(); i++) {
306 sizes.push_back(m_tensor->size(i));
307 }
308
309 if (sizes.empty()) { // Scalar
310
311 LOG_RUNTIME("Method resize for SCALAR type '%s' not implemented!", newlang::toString(m_var_type_current));
312
313 } else if (new_size == 0 || sizes[0] == new_size) {
314 // Tensor size OK - do nothing
315 } else if (new_size > 0) { // Increase tensor size
316
317 // The size is positive, just change the number of elements by adding or removing the last
318 ASSERT(sizes.size() == 1);
319
320 sizes[0] = new_size;
321 m_tensor->resize_(at::IntArrayRef(sizes));
322
323 } else { // Decrease tensor size
324 // If the size is negative - add or remove elements first
325 new_size = -new_size;
326 if (sizes[0] == new_size) {
327 // Tensor size OK - do nothing
328 } else if (sizes[0] > new_size) {
329
330 ASSERT(sizes.size() == 1);
331
332 at::Tensor ind = torch::arange(sizes[0] - new_size - 1, sizes[0] - 1, at::ScalarType::Long);
333 at::Tensor any = torch::zeros(sizes[0] - new_size, at::ScalarType::Long);
334 // LOG_DEBUG("arange %s %s", TensorToString(ind).c_str(), TensorToString(any).c_str());
335
336 ind = at::cat({any, ind});
337 // LOG_DEBUG("cat %s", TensorToString(ind).c_str());
338
339 // LOG_DEBUG("m_value %s", TensorToString(m_value).c_str());
340 m_tensor->index_copy_(0, ind, m_tensor->clone());
341 // LOG_DEBUG("index_copy_ %s", TensorToString(m_value).c_str());
342
343 sizes[0] = new_size;
344 m_tensor->resize_(at::IntArrayRef(sizes));
345 // LOG_DEBUG("resize_ %s", TensorToString(m_value).c_str());
346
347 } else { // sizes[0] < size
348 ASSERT(sizes.size() == 1);
349
350 *m_tensor = at::cat({torch::zeros(new_size - sizes[0], m_tensor->scalar_type()), *m_tensor});
351 }
352 }
353
354 if (new_size == 0) {
355 m_tensor->reset();
357 }
358 return new_size;
359 }
360 LOG_RUNTIME("Method resize for type '%s' not implemented!", newlang::toString(m_var_type_current));
361}
362
363const Dict<Obj>::PairType & Obj::at(int64_t index) const {
365 if (index < static_cast<int64_t> (m_value.size())) {
366 m_str_pair = pair(CreateString(std::string(1, m_value[index])));
367 return m_str_pair;
368 }
369 LOG_RUNTIME("Index '%ld' not exists in byte string '%s'!", index, m_value.c_str());
370 } else if (m_var_type_current == ObjType::StrWide) {
371 if (index < static_cast<int64_t> (m_string.size())) {
372 m_str_pair = pair(CreateString(std::wstring(1, m_string[index])));
373 return m_str_pair;
374 }
375 LOG_RUNTIME("Index '%ld' not exists in byte string '%s'!", index, "WIDE");
376
377 } else if (is_tensor_type()) {
378 ASSERT(!is_scalar());
379 torch::Tensor t = m_tensor->index({index});
381 return m_str_pair;
382 }
383 return Dict::at(index);
384}
385
386Dict<Obj>::PairType & Obj::at(int64_t index) {
388 if (index < static_cast<int64_t> (m_value.size())) {
389 m_str_pair = pair(CreateString(std::string(1, m_value[index])));
390 return m_str_pair;
391 }
392 LOG_RUNTIME("Index '%ld' not exists in byte string '%s'!", index, m_value.c_str());
393 } else if (m_var_type_current == ObjType::StrWide) {
394 if (index < static_cast<int64_t> (m_string.size())) {
395 m_str_pair = pair(CreateString(std::wstring(1, m_string[index])));
396 return m_str_pair;
397 }
398 LOG_RUNTIME("Index '%ld' not exists in byte string '%s'!", index, "WIDE");
399
400 } else if (is_tensor_type()) {
401 ASSERT(!is_scalar());
402 ASSERT(m_tensor->defined());
403 torch::Tensor t = m_tensor->index({(int) index});
405 return m_str_pair;
406 // }
407 }
408 return Dict::at(index);
409}
410
411const ObjPtr Obj::index_get(const std::vector<Index> &index) const {
413 if (index.size() != 1 || !index[0].is_integer()) {
414 LOG_RUNTIME("The index must be an integer value '%s'!", IndexToString(index).c_str());
415 }
416 int64_t pos = index[0].integer().expect_int();
417 if (pos < 0) {
418 pos = m_value.size() + pos; // Позиция с конца строки
419 }
420 if (pos < static_cast<int64_t> (m_value.size())) {
421 return CreateString(std::string(1, m_value[pos]));
422 }
423 LOG_RUNTIME("Index '%s' not exists in byte string '%s'!", IndexToString(index).c_str(), m_value.c_str());
425 if (index.size() != 1 || !index[0].is_integer()) {
426 LOG_RUNTIME("The index must be an integer value '%s'!", IndexToString(index).c_str());
427 }
428 int64_t pos = index[0].integer().expect_int();
429 if (pos < 0) {
430 pos = m_string.size() + pos; // Позиция с конца строки
431 }
432 if (pos < static_cast<int64_t> (m_string.size())) {
433 return CreateString(std::wstring(1, m_string[pos]));
434 }
435 LOG_RUNTIME("Index '%s' not exists in WIDE string '%s'!", IndexToString(index).c_str(), utf8_encode(m_string).c_str());
436
437 } else if (is_tensor_type()) {
438 ASSERT(!is_scalar());
439 ASSERT(m_tensor->defined());
440 torch::Tensor t = m_tensor->index(index);
441 return CreateTensor(t);
442 }
443
444 if (index.size() != 1 || !index[0].is_integer()) {
445 LOG_RUNTIME("The index must be an integer value '%s'!", IndexToString(index).c_str());
446 }
447 return Dict::at(index[0].integer().expect_int()).second;
448}
449
450ObjPtr Obj::index_set_(const std::vector<Index> &index, const ObjPtr value) {
452 if (index.size() != 1 || !index[0].is_integer()) {
453 LOG_RUNTIME("The index must be an integer value '%s'!", IndexToString(index).c_str());
454 }
455 int64_t pos = index[0].integer().expect_int();
456 if (pos < 0) {
457 pos = m_value.size() + pos; // Позиция с конца строки
458 }
459 if (pos < static_cast<int64_t> (m_value.size())) {
460 m_value.erase(pos, 1);
461 m_value.insert(pos, value->toType(ObjType::StrChar)->m_value);
462 m_var_is_init = true;
463 return shared();
464 }
465 LOG_RUNTIME("Index '%s' not exists in byte string '%s'!", IndexToString(index).c_str(), m_value.c_str());
466 } else if (m_var_type_current == ObjType::StrWide) {
467 if (index.size() != 1 || !index[0].is_integer()) {
468 LOG_RUNTIME("The index must be an integer value '%s'!", IndexToString(index).c_str());
469 }
470 int64_t pos = index[0].integer().expect_int();
471 if (pos < 0) {
472 pos = m_value.size() + pos; // Позиция с конца строки
473 }
474 if (pos < static_cast<int64_t> (m_string.size())) {
475 m_string.erase(pos, 1);
476 m_string.insert(pos, value->toType(ObjType::StrWide)->m_string);
477 m_var_is_init = true;
478 return shared();
479 }
480 LOG_RUNTIME("Index '%s' not exists in byte string '%s'!", IndexToString(index).c_str(), "WIDE");
481
482 } else if (is_tensor_type()) {
483 ASSERT(!is_scalar());
484 ASSERT(m_tensor->defined());
485
486 ObjPtr temp = value->toType(fromTorchType(m_tensor->scalar_type()));
487 if (temp->is_scalar()) {
488 if (temp->is_integral()) {
489 m_tensor->index_put_(index, temp->GetValueAsInteger());
490 } else {
491 ASSERT(temp->is_floating());
492 m_tensor->index_put_(index, temp->GetValueAsNumber());
493 }
494 } else {
495 ASSERT(temp->m_tensor->defined());
496 m_tensor->index_put_(index, *temp->m_tensor);
497 }
498 m_var_is_init = true;
499 return shared();
500
501 } else if (is_dictionary_type()) {
502 if (index.size() != 1 || !index[0].is_integer()) {
503 LOG_RUNTIME("The index must be an integer value '%s'!", IndexToString(index).c_str());
504 }
505 (*at(index[0].integer().expect_int()).second) = value;
506 return shared();
507 LOG_RUNTIME("Index '%s' not exists in object '%s'!", IndexToString(index).c_str(), toString().c_str());
508 }
509 LOG_RUNTIME("Don`t set index '%s' in object '%s'!", IndexToString(index).c_str(), toString().c_str());
510}
511
512ObjPtr Obj::op_set_index(int64_t index, std::string value) {
514 if (index < static_cast<int64_t> (m_value.size())) {
515 m_value.erase(index, 1);
516 m_value.insert(index, value);
517 m_var_is_init = true;
518 return shared();
519 }
520 LOG_RUNTIME("Index '%ld' not exists in byte string '%s'!", index, m_value.c_str());
521 } else if (m_var_type_current == ObjType::StrWide) {
522 if (index < static_cast<int64_t> (m_string.size())) {
523 m_string.erase(index, 1);
524 m_string.insert(index, utf8_decode(value));
525 m_var_is_init = true;
526 return shared();
527 }
528 LOG_RUNTIME("Index '%ld' not exists in byte string '%s'!", index, "WIDE");
529 }
530 // at(index).second.set_(value);
531 (*at(index).second) = value;
532 return Dict::operator[](index).second;
533}
534
535bool Obj::exist(ObjPtr &find, bool strong) {
536 for (auto &elem : * this) {
537 if (strong && find->op_accurate(elem.second)) {
538 return true;
539 } else if (!strong && find->op_equal(elem.second)) {
540 return true;
541 }
542 }
543 return false;
544}
545
547
548 ObjType result = start;
549 if (!obj) {
550 return result;
551 }
552 if (obj->is_dictionary_type() || obj->is_range()) {
553 for (int i = 0; i < obj->size(); i++) {
554 result = getSummaryTensorType(obj->at(i).second.get(), result);
555 }
556 return result;
557 } else if (obj->is_arithmetic_type()) {
559 return std::max(obj->m_var_type_current, start);
561 return ObjType::Rational;
562 } else {
563 if (start >= std::max(obj->m_var_type_current, obj->m_var_type_fixed)) {
564 return start;
565 } else {
566 return std::max(obj->m_var_type_current, obj->m_var_type_fixed);
567 }
568 }
569 }
570 LOG_RUNTIME("Tensor support arithmetic data type only '%s'!", obj->toString().c_str());
571}
572
573void Obj::clear_(bool clear_iterator_name) {
574
575 m_value.clear();
576 m_string.clear();
578
579 m_class_parents.clear();
580 m_var_is_init = false;
581 if (m_tensor) {
582 m_tensor->reset();
583 }
584 m_rational.set_(0);
585 m_var = std::monostate();
586 // m_value.reset(); //????????????????
587 // m_items.clear();
588}
589
591 if (is_arithmetic_type()) {
592 if (is_tensor_type()) {
593 (*m_tensor) = -(*m_tensor);
594 } else if (is_integer()) {
598 } else {
599 LOG_RUNTIME("Unary minus for object '%s' not supported!", toString().c_str());
600 }
601 return shared();
602 }
603 LOG_RUNTIME("Unary minus for object '%s' not supported!", toString().c_str());
604}
605
607 if (is_tensor_type()) {
608 m_tensor->sub_(torch::ones_like(*m_tensor));
609 return shared();
610 }
611 LOG_RUNTIME("Object '%s' not numeric!", toString().c_str());
612}
613
615 if (is_tensor_type()) {
616 *obj->m_tensor = torch::zeros_like(*obj->m_tensor) - *obj->m_tensor;
617 return obj;
618 }
619 LOG_RUNTIME("Object '%s' not numeric!", obj->toString().c_str());
620}
621
622
623//префиксная версия возвращает значение после инкремента
624
626 if (is_tensor_type()) {
627 m_tensor->add_(torch::ones_like(*m_tensor));
628
629 return shared();
630 }
631 LOG_RUNTIME("Object '%s' not numeric!", toString().c_str());
632}
633
635 if (is_tensor_type()) {
636 if (value.is_tensor_type()) {
638 if (is_scalar() && value.is_scalar()) {
639 if (is_floating()) {
640 ASSERT(std::holds_alternative<double>(m_var));
643 } else if (is_integral() && value.is_integral()) {
644 ASSERT(std::holds_alternative<int64_t>(m_var));
647 } else {
648 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
649 }
650 } else if (value.is_scalar()) {
651 if (value.is_floating()) {
652 m_tensor->add_(value.GetValueAsNumber());
653 } else if (value.is_integral()) {
654 m_tensor->add_(value.GetValueAsInteger());
655 } else {
656 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
657 }
658 } else {
659 ASSERT(!is_scalar() && !value.is_scalar());
660 m_tensor->add_(*value.m_tensor);
661 }
662 return shared();
663 }
664 }
665 switch (m_var_type_current) {
666 case ObjType::StrChar:
667 case ObjType::StrWide:
668 switch (value.m_var_type_current) {
669 case ObjType::None:
670 return shared();
671 case ObjType::StrChar:
672 m_value += value.m_value;
673 return shared();
674 case ObjType::StrWide:
675 m_string += value.m_string;
676 return shared();
677 }
678 break;
679
680 case ObjType::Class:
682 if (value.m_var_type_current == ObjType::None) {
683 return shared();
685 for (int i = 0; i < value.size(); i++) {
686 push_back(value.at(i).second);
687 }
688 return shared();
689 }
690 break;
691
694 m_rational.operator+=(value.m_rational);
695 } else {
696 m_rational.operator+=(value.toType(ObjType::Rational)->m_rational);
697 }
698 return shared();
699
700 }
701 LOG_RUNTIME("Operator '+' fail for '%s' and '%s'", toString().c_str(), value.toString().c_str());
702}
703
707 } else if (is_tensor_type() && value.is_tensor_type()) {
709 if (is_scalar() && value.is_scalar()) {
710 if (is_floating()) {
711 ASSERT(std::holds_alternative<double>(m_var));
714 } else if (is_integral() && value.is_integral()) {
715 ASSERT(std::holds_alternative<int64_t>(m_var));
718 } else {
719 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
720 }
721 } else if (value.is_scalar()) {
722 if (value.is_floating()) {
723 m_tensor->sub_(value.GetValueAsNumber());
724 } else if (value.is_integral()) {
725 m_tensor->sub_(value.GetValueAsInteger());
726 } else {
727 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
728 }
729 } else {
730 ASSERT(!is_scalar() && !value.is_scalar());
731 m_tensor->sub_(*value.m_tensor);
732 }
733 return shared();
734 }
735 switch (m_var_type_current) {
736 case ObjType::Class:
738 if (value.m_var_type_current == ObjType::None) {
739 return shared();
741 for (int i = 0; i < value.size(); i++) {
742 auto found = find(value.name(i));
743 if (found != end()) {
744 ListType::erase(found);
745 }
746 }
747 return shared();
748 }
749 break;
752 m_rational.operator-=(value.m_rational);
753 } else {
754 m_rational.operator-=(value.toType(ObjType::Rational)->m_rational);
755 }
756 return shared();
757 }
758 LOG_RUNTIME("Operator '-' fail for '%s' and '%s'", toString().c_str(), value.toString().c_str());
759}
760
764 } else if (is_tensor_type() && value.is_tensor_type()) {
766 if (is_scalar() && value.is_scalar()) {
767 if (is_floating()) {
768 ASSERT(std::holds_alternative<double>(m_var));
771 } else if (is_integral() && value.is_integral()) {
772 ASSERT(std::holds_alternative<int64_t>(m_var));
775 } else {
776 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
777 }
778 } else if (value.is_scalar()) {
779 if (value.is_floating()) {
780 m_tensor->mul_(value.GetValueAsNumber());
781 } else if (value.is_integral()) {
782 m_tensor->mul_(value.GetValueAsInteger());
783 } else {
784 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
785 }
786 } else {
787 ASSERT(!is_scalar() && !value.is_scalar());
788 m_tensor->mul_(*value.m_tensor);
789 }
790 return shared();
791 }
792
793 switch (m_var_type_current) {
794
795 case ObjType::Class:
797 if (value.m_var_type_current == ObjType::None) {
798 Dict::clear_();
799 return shared();
801 op_bit_and_set(value, false);
802 return shared();
803 }
804 break;
805
806 case ObjType::StrChar:
807 if (value.is_integral()) {
809 return shared();
810 } else if (value.is_string_type()) {
811 m_value += value.GetValueAsString();
812 return shared();
813 }
814 case ObjType::StrWide:
815 if (value.is_integral()) {
817 return shared();
818 } else if (value.is_string_type()) {
820 return shared();
821 }
822
825 m_rational.operator*=(value.m_rational);
826 } else {
827 m_rational.operator*=(value.toType(ObjType::Rational)->m_rational);
828 }
829 return shared();
830
831 }
832 LOG_RUNTIME("Operator '*' fail for '%s' and '%s'", toString().c_str(), value.toString().c_str());
833}
834
836 if (is_tensor_type() && value.is_tensor_type()) {
838 if (is_scalar() && value.is_scalar()) {
839 if (is_floating()) {
840 ASSERT(std::holds_alternative<double>(m_var));
843 } else if (is_integral() && value.is_integral()) {
844 ASSERT(std::holds_alternative<int64_t>(m_var));
847 } else {
848 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
849 }
850 } else if (value.is_scalar()) {
851 if (value.is_floating()) {
852 m_tensor->div_(value.GetValueAsNumber());
853 } else if (value.is_integral()) {
854 m_tensor->div_(value.GetValueAsInteger());
855 } else {
856 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
857 }
858 } else {
859 ASSERT(!is_scalar() && !value.is_scalar());
860 ASSERT(value.m_tensor);
861 m_tensor->div_(*value.m_tensor);
862 }
863 return shared();
866 m_rational.operator/=(value.m_rational);
867 } else {
868 m_rational.operator/=(value.toType(ObjType::Rational)->m_rational);
869 }
870 return shared();
871 }
872 LOG_RUNTIME("Operator '/' fail for '%s' and '%s'", toString().c_str(), value.toString().c_str());
873}
874
876 if (is_tensor_type() && value.is_tensor_type()) {
878 // testResultIntegralType(ObjType::Float32, false);
879 if (is_scalar() && value.is_scalar()) {
880 if (is_floating()) {
881 ASSERT(std::holds_alternative<double>(m_var));
882 m_var = floor(GetValueAsNumber() / value.GetValueAsNumber());
884 } else if (is_integral() && value.is_integral()) {
885 ASSERT(std::holds_alternative<int64_t>(m_var));
886
887 int64_t num = GetValueAsInteger();
888 int64_t den = value.GetValueAsInteger();
889 if (0 < (num^den)) {
890 m_var = static_cast<int64_t> (num / den);
891 } else {
892 ldiv_t res = std::ldiv(num, den);
893 m_var = static_cast<int64_t> ((res.rem) ? res.quot - 1 : res.quot);
894 }
896 } else {
897 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
898 }
899 } else if (value.is_scalar()) {
900 if (value.is_floating()) {
901 m_tensor->div_(value.GetValueAsNumber(), "floor");
902 } else if (value.is_integral()) {
903 m_tensor->div_(value.GetValueAsInteger(), "floor");
904 } else {
905 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
906 }
907 } else {
908 ASSERT(!is_scalar() && !value.is_scalar());
909 ASSERT(value);
910 m_tensor->div_(*value.m_tensor, "floor");
911 (*m_tensor) = m_tensor->toType(toTorchType(type));
912 }
913 // ObjType type = m_var_type_current;
914 // testResultIntegralType(ObjType::Float32, false);
915 // m_tensor->div_(value.m_tensor, "floor");
916 // m_tensor = m_tensor->toType(toTorchType(type));
917 return shared();
918
922 } else {
924 }
925 return shared();
926 }
927 LOG_RUNTIME("Operator '//' fail for '%s' and '%s'", toString().c_str(), value.toString().c_str());
928}
929
931 if (is_tensor_type() && value.is_tensor_type()) {
933 if (is_scalar() && value.is_scalar()) {
934 if (is_floating()) {
935 ASSERT(std::holds_alternative<double>(m_var));
936 m_var = fmod(GetValueAsNumber(), value.GetValueAsNumber());
937 } else if (is_integral() && value.is_integral()) {
938 ASSERT(std::holds_alternative<int64_t>(m_var));
940 } else {
941 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
942 }
943 } else if (value.is_scalar()) {
944 if (value.is_floating()) {
945 m_tensor->fmod_(value.GetValueAsNumber());
946 } else if (value.is_integral()) {
947 m_tensor->fmod_(value.GetValueAsInteger());
948 } else {
949 LOG_RUNTIME("Fail convert '%s' to type %s!", value.toString().c_str(), newlang::toString(m_var_type_current));
950 }
951 } else {
952 ASSERT(!is_scalar() && !value.is_scalar());
953 ASSERT(value.m_tensor);
954 m_tensor->fmod_(*value.m_tensor);
955 }
956 return shared();
959 m_rational.operator%=(value.m_rational);
960 } else {
961 m_rational.operator%=(value.toType(ObjType::Rational)->m_rational);
962 }
963 return shared();
964 }
965 LOG_RUNTIME("Operator '%%' fail for '%s' and '%s'", toString().c_str(), value.toString().c_str());
966}
967
968size_t Obj::ItemValueCount(ObjPtr &find, bool strong) {
969 size_t result = 0;
970 for (auto &elem : * this) {
971 if (strong && find->op_accurate(elem.second)) {
972 result++;
973 } else if (!strong && find->op_equal(elem.second)) {
974 result++;
975 }
976 }
977 return result;
978}
979
981
982 NL_CHECK(!isLocalType(m_var_type_current), "Local object not clonable!");
983
984 if (&clone != this) { // Не клонировать сам в себя
985 clone.m_var_type_current = m_var_type_current;
986 clone.m_var_type_fixed = m_var_type_fixed;
987 clone.m_var_is_init = m_var_is_init;
988 // clone.m_var_type_name = m_var_type_name;
989
990 if (m_dimensions) {
991 clone.m_dimensions = m_dimensions->Clone();
992 } else {
993 clone.m_dimensions = nullptr;
994 }
995
996 clone.m_var_name = m_var_name;
997 clone.m_ctx = m_ctx;
998 clone.m_value = m_value;
999 clone.m_string = m_string;
1000 clone.m_return_obj = m_return_obj;
1001
1002 clone.m_rational = *m_rational.clone();
1003 clone.m_iterator = m_iterator;
1004 if (m_iter_range_value) {
1005 clone.m_iter_range_value = m_iter_range_value->Clone();
1006 }
1007
1008 clone.m_class_parents = m_class_parents;
1009 clone.m_class_name = m_class_name;
1010 clone.m_is_const = m_is_const;
1011
1012 clone.m_var = m_var;
1013 if (m_prototype) {
1014 *const_cast<TermPtr *> (&clone.m_prototype) = m_prototype;
1015 }
1016 if (m_sequence) {
1017 clone.m_sequence = m_sequence->Clone();
1018 }
1020 if (m_tensor->defined()) {
1021 *clone.m_tensor = m_tensor->clone();
1022 }
1023 clone.m_sync = m_sync;
1024 clone.m_reference = m_reference;
1025 }
1026}
1027
1029
1030 NL_CHECK(!isLocalType(m_var_type_current), "Local object not clonable!");
1031
1032 for (int i = 0; i < Dict<Obj>::size(); i++) {
1033 if (Dict<Obj>::at(i).second) {
1034 // if (Dict<Obj>::at(i).second->m_is_reference || Dict<Obj>::at(i).second->m_is_reference) {
1035 // clone.Dict<Obj>::push_back(Dict<Obj>::at(i));
1036 // } else {
1037 clone.Dict<Obj>::push_back(Dict<Obj>::at(i).second->Clone(nullptr), name(i));
1038 // }
1039 } else {
1040 if (name(i).empty()) {
1041 LOG_RUNTIME("Null arg %d without name! %s", i, toString().c_str());
1042 }
1043 // Объекта может не быть у обязательных параметров функций
1044 clone.Dict<Obj>::push_back(nullptr, name(i));
1045 }
1046 }
1047}
1048
1049std::string Obj::toString(bool deep) const {
1050 std::string result(m_is_reference ? "&" : "");
1051 result += m_var_name;
1052 if (!result.empty() && m_var_type_current == ObjType::Class && !deep && m_is_reference) {
1053 return result;
1054 }
1055 if (!m_var_name.empty()) {
1056 result.append("=");
1057 }
1058
1059 std::stringstream ss;
1060
1062 // if (m_prototype && m_prototype->GetType()) {
1063 // result += m_prototype->GetType()->toString();
1064 // } else if (m_var_type_fixed != ObjType::None) {
1065 // result += newlang::toString(m_var_type_fixed);
1066 // }
1067 result += "_";
1068 return result;
1069 } else if (is_tensor_type()) {
1070 if (is_scalar()) {
1072 } else {
1074 ASSERT(m_tensor->defined());
1076 }
1077 return result;
1078 } else if (isSimpleType(m_var_type_current)) {
1080 return result;
1081 } else {
1082 switch (m_var_type_current) {
1083
1084 case ObjType::None: // name:=<EMPTY>
1085 result = "_";
1086 return result;
1087
1088 case ObjType::StrChar:
1089 case ObjType::FmtChar:
1090 result += "'";
1091 result += m_value;
1092 result.append("'");
1093 return result;
1094
1095 case ObjType::StrWide: // name:='string' or name:="string"
1096 case ObjType::FmtWide:
1097 result += "\"";
1099 result.append("\"");
1100 return result;
1101
1102 case ObjType::Range: // name:=(1,second="two",3,<EMPTY>,5)
1103 result = at("start").second->GetValueAsString();
1104 result += "..";
1105 result += at("stop").second->GetValueAsString();
1106 result += "..";
1107 result += at("step").second->GetValueAsString();
1108 return result;
1109
1110 // case ObjType::Return:
1111 case ObjType::RetPlus:
1112 case ObjType::RetMinus:
1113 case ObjType::RetRepeat:
1114
1115 if (!m_value.empty()) {
1116 result += m_value;
1117 result += " ";
1118 }
1120 result += "++";
1121 } else if (m_var_type_current == ObjType::RetMinus) {
1122 result += "--";
1123 } else {
1125 result += "-+";
1126 }
1127
1128 if (m_return_obj) {
1129 result += m_return_obj->toString();
1131 result += "++";
1132 } else if (m_var_type_current == ObjType::RetMinus) {
1133 result += "--";
1134 } else {
1136 result += "+-";
1137 }
1138 }
1139 return result;
1140
1141
1142 case ObjType::Struct:
1143 case ObjType::Union:
1144 case ObjType::Enum:
1145
1146 case ObjType::Error:
1150 case ObjType::Continue:
1151 case ObjType::Break:
1152 if (m_class_name.empty()) {
1154 } else {
1156 }
1157 result += "(";
1159 result += ")";
1160 return result;
1161
1162 case ObjType::Module:
1163 case ObjType::Dictionary: // name:=(1,second="two",3,<EMPTY>,5)
1164 result += "(";
1166 result += ",";
1167 result += ")";
1169 return result;
1170
1171 case ObjType::Pointer:
1172 if (std::holds_alternative<void *>(m_var)) {
1173 ss << std::get<void *>(m_var);
1174 // } else if (m_var_type_fixed == ObjType::None || m_var_type_current == ObjType::None) {
1175 // ss << "nullptr";
1176 } else {
1177 LOG_RUNTIME("Fail convert object to pointer!");
1178 }
1179 result += ss.str();
1180 result += ":Pointer";
1181 return result;
1182
1183
1184 case ObjType::Type:
1186 if (m_dimensions && m_dimensions->size()) {
1187 result += "[";
1188 for (int i = 0; i < m_dimensions->size(); i++) {
1189 if (i) {
1190 result += ",";
1191 }
1192 result += (*m_dimensions)[i].second->toString();
1193 }
1194 result += "]";
1195 }
1196
1197
1198 if (size()) {
1199 result += "(";
1201 result += ")";
1202 }
1203 return result;
1204
1205 case ObjType::Reference:
1206
1207 {
1208 result = ":Reference to ";
1209 ObjPtr ref = m_reference.lock();
1210 if (ref) {
1211 result = ref->toString();
1212 } else {
1213 result = "nullptr";
1214 }
1215 return result;
1216 }
1217
1219 case ObjType::Function: // name:={function code}
1220 case ObjType::PureFunc: // name=>{function code}
1222 result += m_prototype->m_text;
1223 result += "(";
1224 m_prototype->dump_items_(result);
1225 result += ")";
1226 if (m_prototype->m_type) {
1227 result += m_prototype->m_type->asTypeString();
1228 }
1229
1230 case ObjType::BLOCK:
1231 result += "{ }";
1232 return result;
1233
1234 case ObjType::BLOCK_TRY:
1235 result += "{* *}";
1236 return result;
1237
1239 result += "{+ +}";
1240 return result;
1241
1243 result += "{- -}";
1244 return result;
1245
1246 case ObjType::EVAL_FUNCTION: // name=>{function code}
1247 // ASSERT(m_prototype);
1248 // result += m_prototype->m_text;
1249 // result += "(";
1250 // m_prototype->dump_items_(result);
1251 // result += ")";
1252 // if (m_prototype->m_type) {
1253 // result += m_prototype->m_type->asTypeString();
1254 // }
1255 //
1256 // if (m_var_type_current == ObjType::EVAL_FUNCTION) {
1257 // result += ":=";
1258 // } else if (m_var_type_current == ObjType::PureFunc) {
1259 // result += ":-";
1260 // } else {
1261 // LOG_RUNTIME("Fail function type");
1262 // }
1263 //
1264 // if (m_sequence->getTermID() != TermID::BLOCK) {
1265 // result += "{";
1266 // }
1267 // if (m_sequence) {
1268 // result += m_sequence->toString();
1269 // if (m_sequence->getTermID() != TermID::BLOCK) {
1270 // result += ";";
1271 // }
1272 // }
1273 // if (m_sequence->getTermID() != TermID::BLOCK) {
1274 // result.append("}");
1275 // }
1276 return result;
1277
1278 case ObjType::Class: // name:=@term(id=123, ...) name=base(id=123, ... )
1280 result += "(";
1281
1282 if (!empty()) {
1284 }
1285 result += ")";
1286 return result;
1287
1288 case ObjType::Ellipsis:
1289 result += "...";
1290 return result;
1291
1292 case ObjType::Rational:
1294 return result;
1295
1296 case ObjType::Iterator:
1297
1298 // LOG_TEST("%s..%s..%s(%s)",
1299 // this->at("start").second->toString().c_str(),
1300 // this->at("stop").second->toString().c_str(),
1301 // this->at("step").second->toString().c_str(),
1302 // m_iterator->m_iter_obj->m_iter_range_value->toString().c_str());
1303
1304
1307
1308 // case ObjType::Context:
1309 // {
1310 // Context * ctx = (Context *)this;
1311 //
1312 // ObjPtr temp = Obj::CreateType(ObjType::Dictionary, ObjType::Dictionary, true);
1313 // temp->m_var_name = "$$";
1314 //
1315 // auto iter = ctx->begin();
1316 // while (iter != ctx->end()) {
1317 // if (!iter->second.expired()) {
1318 // temp->push_back(Obj::CreateString(iter->first));
1319 // iter++;
1320 // } else {
1321 // iter = ctx->ListType::erase(iter);
1322 // }
1323 // }
1324 //
1325 // return temp->toString();
1326 // }
1327 }
1328 }
1329 LOG_RUNTIME("Unknown type '%s' (%d)", newlang::toString(m_var_type_current), (int) m_var_type_current);
1330}
1331
1332void TensorToString_(const torch::Tensor &tensor, c10::IntArrayRef shape, std::vector<Index> &ind, const int64_t pos,
1333 std::stringstream & str) {
1334 std::string intend;
1335 ASSERT(pos < static_cast<int64_t> (ind.size()));
1336 str << "[";
1337 if (shape.size() > 1 && pos + 1 < static_cast<int64_t> (ind.size())) {
1338 str << "\n";
1339 intend = std::string((pos + 1) * 2, ' ');
1340 str << intend;
1341 }
1342 if (pos + 1 < static_cast<int64_t> (ind.size())) {
1343 bool comma = false;
1344 for (ind[pos] = 0; ind[pos].integer() < shape[pos]; ind[pos] = ind[pos].integer() + 1) {
1345 if (comma) {
1346 str << ", ";
1347 } else {
1348 comma = true;
1349 }
1350 TensorToString_(tensor, shape, ind, pos + 1, str);
1351 }
1352 } else {
1353 bool comma = false;
1354 for (ind[pos] = 0; ind[pos].integer() < shape[pos]; ind[pos] = ind[pos].integer() + 1) {
1355 if (comma) {
1356 str << ", ";
1357 } else {
1358 comma = true;
1359 }
1360 if (tensor.is_floating_point()) {
1361 str << tensor.index(ind).item<double>();
1362 } else if (tensor.is_complex()) {
1363 ASSERT(!"Not implemented!");
1364 } else {
1365 str << tensor.index(ind).item<int64_t>();
1366 }
1367 }
1368 }
1369 str << ",";
1370 if (!intend.empty()) {
1371 str << "\n";
1372 }
1373 str << "]";
1374}
1375
1376std::string TensorToString(const torch::Tensor & tensor) {
1377 std::string result;
1378 std::stringstream ss;
1379
1380 if (!tensor.dim()) {
1381 LOG_RUNTIME("!tensor.dim() %s", tensor.toString().c_str());
1382 // ASSERT(tensor.dim());
1383 }
1384
1385 c10::IntArrayRef shape = tensor.sizes(); // Кол-во эментов в каждом измерении
1386 std::vector<Index> ind(shape.size(), 0); // Счетчик обхода всех элементов тензора
1387 TensorToString_(tensor, shape, ind, 0, ss);
1388 result = ss.str();
1389 result += newlang::toString(fromTorchType(tensor.scalar_type()));
1390
1391 return result;
1392}
1393
1395 TEST_INIT_();
1397 return false;
1398 }
1399 if (is_scalar()) {
1400 return GetValueAsInteger();
1401 //return m_tensor->toType(at::ScalarType::Bool).item<double>();
1402 } else if (isSimpleType(m_var_type_current)) {
1403 // Error: Boolean value of Tensor with more than one value is ambiguous
1405 return !at::_is_zerotensor(*m_tensor);
1406 } else {
1407 switch (m_var_type_current) {
1408 case ObjType::StrWide:
1409 return !m_string.empty();
1410 case ObjType::StrChar:
1411 return !m_value.empty();
1412 case ObjType::None:
1413 return false;
1414 case ObjType::Pointer:
1415 ASSERT(std::holds_alternative<void *>(m_var));
1416 return std::get<void *>(m_var);
1417
1418 case ObjType::Rational:
1419 return m_rational.GetAsBoolean();
1420
1422 case ObjType::Class:
1423 if (size()) {
1424 return true;
1425 }
1426 for (auto &elem : m_class_parents) {
1427 if (elem->GetValueAsBoolean()) {
1428 return true;
1429 }
1430 }
1431 return false;
1432
1433 case ObjType::Iterator:
1435 ASSERT(m_iterator->m_iter_obj);
1436 if (m_iterator->m_iter_obj->getType() == ObjType::Range) {
1437 if (m_iterator->m_iter_obj->m_iter_range_value && m_iterator->m_iter_obj->m_iter_range_value->m_var_type_current != ObjType::IteratorEnd) {
1438 return true;
1439 }
1440 } else {
1441 if ((*m_iterator) != m_iterator->end()) {
1442 return m_iterator->data().second->GetValueAsBoolean();
1443 }
1444 }
1445 return false;
1446
1447 default:
1448 LOG_RUNTIME("Type cast to bool %s", toString().c_str());
1449 }
1450 return true;
1451 }
1452}
1453
1454int64_t Obj::GetValueAsInteger() const {
1455 TEST_INIT_();
1456
1457 switch (m_var_type_current) {
1458 case ObjType::Bool:
1459 if (std::holds_alternative<int64_t>(m_var)) {
1460 return std::get<int64_t>(m_var);
1461 } else if (std::holds_alternative<bool *>(m_var)) {
1462 return *std::get<bool *>(m_var);
1463 }
1464 case ObjType::Int8:
1465 case ObjType::Char:
1466 case ObjType::Byte:
1467 if (std::holds_alternative<int64_t>(m_var)) {
1468 return std::get<int64_t>(m_var);
1469 } else if (std::holds_alternative<int8_t *>(m_var)) {
1470 return *std::get<int8_t *>(m_var);
1471 }
1472 case ObjType::Int16:
1473 case ObjType::Word:
1474 if (std::holds_alternative<int64_t>(m_var)) {
1475 return std::get<int64_t>(m_var);
1476 } else if (std::holds_alternative<int16_t *>(m_var)) {
1477 return *std::get<int16_t *>(m_var);
1478 }
1479 case ObjType::Int32:
1480 case ObjType::DWord:
1481 if (std::holds_alternative<int64_t>(m_var)) {
1482 return std::get<int64_t>(m_var);
1483 } else if (std::holds_alternative<int32_t *>(m_var)) {
1484 return *std::get<int32_t *>(m_var);
1485 }
1486 case ObjType::Int64:
1487 case ObjType::DWord64:
1488 if (std::holds_alternative<int64_t>(m_var)) {
1489 return std::get<int64_t>(m_var);
1490 } else if (std::holds_alternative<int64_t *>(m_var)) {
1491 return *std::get<int64_t *>(m_var);
1492 }
1493 case ObjType::Integer:
1494 if (std::holds_alternative<int64_t>(m_var)) {
1495 return std::get<int64_t>(m_var);
1496 // } else if (m_tensor->dim()==0) {
1497 // return m_tensor->item<int64_t>();
1498 }
1499 ASSERT(!is_scalar());
1500 LOG_RUNTIME("Can`t convert tensor to scalar!");
1501
1502 case ObjType::Float16:
1503 case ObjType::Float32:
1504 case ObjType::Float64:
1505 case ObjType::Single:
1506 case ObjType::Double:
1507 case ObjType::Number:
1508 return static_cast<int64_t> (GetValueAsNumber());
1509
1510 case ObjType::Rational:
1511 return m_rational.GetAsInteger();
1512
1513 case ObjType::StrWide:
1514 case ObjType::FmtWide:
1515 if (m_string.size() == 1) {
1516 return m_string[0];
1517 }
1518 break;
1519
1520 case ObjType::StrChar:
1521 case ObjType::FmtChar:
1522 if (m_value.size() == 1) {
1523 return m_value[0];
1524 }
1525 break;
1526
1527 case ObjType::Iterator:
1529 return m_iterator->data().second->GetValueAsInteger();
1530
1531 default:
1533 ASSERT(std::holds_alternative<void *>(m_var));
1534 return reinterpret_cast<int64_t> (std::get<void *>(m_var));
1535 }
1536 }
1537 LOG_RUNTIME("Data type incompatible %s", toString().c_str());
1538}
1539
1540Obj::operator float() const {
1541 double result = GetValueAsNumber();
1542 if (result > (double) std::numeric_limits<float>::max()) {
1543 LOG_RUNTIME("Value1 '%s' %.20f %.20f %.20f is out of range of the casting type float!", GetValueAsString().c_str(), result, std::numeric_limits<float>::max(), std::numeric_limits<float>::lowest());
1544 }
1545
1546 // __asm__ volatile ( "; //SOURCE: __FLT_MAX__ ");
1547 if (result < -__FLT_MAX__) {//(double) std::numeric_limits<float>::lowest()) {
1548 LOG_RUNTIME("Value2 '%s' %.20f %.20f %.20f is out of range of the casting type float!", GetValueAsString().c_str(), result, std::numeric_limits<float>::max(), std::numeric_limits<float>::lowest());
1549 }
1550 LOG_DEBUG("operator float() '%s' %.20f", GetValueAsString().c_str(), result);
1551 return result;
1552}
1553
1554Obj::operator double() const {
1555 return GetValueAsNumber();
1556}
1557
1559 TEST_INIT_();
1560
1561 switch (m_var_type_current) {
1562
1563 case ObjType::Single:
1564 case ObjType::Float16:
1565 case ObjType::Float32:
1566 if (std::holds_alternative<double>(m_var)) {
1567 // LOG_DEBUG("1std::get<double>(m_var) %.20f", std::get<double>(m_var));
1568 return std::get<double>(m_var);
1569 } else if (std::holds_alternative<float *>(m_var)) {
1570 // LOG_DEBUG("1std::get<float *>(m_var) %.20f", *std::get<float *>(m_var));
1571 return *std::get<float *>(m_var);
1572 }
1573 case ObjType::Double:
1574 case ObjType::Float64:
1575 if (std::holds_alternative<double>(m_var)) {
1576 // LOG_DEBUG("2std::get<double>(m_var) %.20f", std::get<double>(m_var));
1577 return std::get<double>(m_var);
1578 } else if (std::holds_alternative<double *>(m_var)) {
1579 // LOG_DEBUG("2std::get<float *>(m_var) %.20f", *std::get<float *>(m_var));
1580 return *std::get<double *>(m_var);
1581 }
1582 case ObjType::Number:
1583 if (std::holds_alternative<double>(m_var)) {
1584 return std::get<double>(m_var);
1585 }
1586 if (is_scalar()) {
1587 ASSERT(!is_scalar());
1588 }
1589 LOG_RUNTIME("Can`t convert tensor to scalar!");
1590
1591 case ObjType::Rational:
1592 return m_rational.GetAsNumber();
1593
1594 case ObjType::Iterator:
1596 return m_iterator->data().second->GetValueAsNumber();
1597
1598 default:
1599 if (is_simple_type() || is_string_type()) {
1600 // LOG_DEBUG("3is_simple_type() || is_string_type() %.20f", static_cast<double> (GetValueAsInteger()));
1601 return static_cast<double> (GetValueAsInteger());
1602 }
1603 }
1604 LOG_RUNTIME("Data type incompatible %s", toString().c_str());
1605}
1606
1607std::string Obj::GetValueAsString() const {
1608 std::string result;
1609 std::string temp;
1610 std::stringstream ss;
1611
1612 if (!m_var_is_init) {
1613 LOG_RUNTIME("Object not initialized name:'%s' type:%s, fix:%s!",
1615 }
1616
1617 switch (m_var_type_current) {
1618 case ObjType::None:
1619 return result;
1620
1621 case ObjType::Tensor:
1622 case ObjType::Bool:
1623 case ObjType::Int8:
1624 case ObjType::Int16:
1625 case ObjType::Int32:
1626 case ObjType::Int64:
1627 case ObjType::Char:
1628 case ObjType::Byte:
1629 case ObjType::Word:
1630 case ObjType::DWord:
1631 case ObjType::DWord64:
1632 case ObjType::Integer:
1633 case ObjType::Float16:
1634 case ObjType::Float32:
1635 case ObjType::Float64:
1636 case ObjType::Single:
1637 case ObjType::Double:
1638 case ObjType::Number:
1639 case ObjType::Complex:
1640 case ObjType::Complex16:
1641 case ObjType::Complex32:
1642 case ObjType::Complex64:
1643 if (is_scalar()) {
1644 if (is_integral()) {
1645 return std::to_string(GetValueAsInteger());
1646 } else if (is_floating()) {
1647 ss << GetValueAsNumber();
1648 return ss.str();
1649 } else {
1650 ASSERT(!"Not implemented!");
1651 }
1652 } else {
1653 return TensorToString(*m_tensor);
1654 }
1655
1656 case ObjType::StrChar:
1657 case ObjType::FmtChar:
1658 return m_value;
1659
1660 case ObjType::StrWide:
1661 case ObjType::FmtWide:
1662 return utf8_encode(m_string);
1663
1665 case ObjType::Function:
1666 case ObjType::PureFunc:
1668 return m_var_name + "={ }";
1669
1670 case ObjType::Class:
1672 return toString();
1673
1674 case ObjType::Error:
1676 if (!m_var_name.empty()) {
1677 result += ": ";
1678 }
1679 temp = m_value;
1680 trim(temp, "\n");
1681 result += temp;
1682 return result;
1683
1684 case ObjType::Pointer:
1685 ASSERT(std::holds_alternative<void *>(m_var));
1686 ss << std::get<void *>(m_var);
1687 result += ss.str();
1688 if (m_class_name.empty()) {
1689 result += ":Pointer";
1690 } else {
1692 }
1693 return result;
1694
1695 case ObjType::Range:
1696 result += toString();
1697 return result;
1698
1699 case ObjType::Rational:
1701 return result;
1702
1703 case ObjType::Iterator:
1706 }
1707 LOG_RUNTIME("Data type '%s' %d incompatible to string!", newlang::toString(m_var_type_current), (int) m_var_type_current);
1708}
1709
1710/*
1711 *
1712 *
1713 *
1714 *
1715 */
1716int Obj::op_compare(Obj & value) {
1717 if (this == &value) {
1718 return 0;
1719 }
1720 if (is_scalar() && value.is_scalar()) {
1721 if (is_floating() || value.is_floating()) {
1722 if (GetValueAsNumber() < value.GetValueAsNumber()) {
1723 return -1;
1724 } else if (GetValueAsNumber() > value.GetValueAsNumber()) {
1725 return 1;
1726 };
1727 return 0;
1728 } else if (is_complex() || value.is_complex()) {
1729 // Будет ошибка сравнения комплексных значений
1730 } else {
1731 if (GetValueAsInteger() < value.GetValueAsInteger()) {
1732 return -1;
1733 } else if (GetValueAsInteger() > value.GetValueAsInteger()) {
1734 return 1;
1735 };
1736 return 0;
1737 }
1738 } else if (is_rational()) {
1739
1740 if (value.getType() == ObjType::Rational) {
1741 return m_rational.op_compare(value.m_rational);
1742 } else {
1743 return m_rational.op_compare(*value.GetValueAsRational());
1744 }
1745
1746 } else if ((is_string_type() && value.is_string_type())) {
1747 switch (m_var_type_current) {
1748 case ObjType::StrChar:
1749 return m_value.compare(value.GetValueAsString());
1750
1751 case ObjType::StrWide:
1752 return m_string.compare(value.GetValueAsStringWide());
1753 }
1754 }
1756}
1757
1759// 0 == false // true
1760// 0 === false // false, так как разные типы
1761// 1 == "1" // true, происходит автоматическая конвертация
1762// 1 === "1" // false, так как разные типы
1763// null == undefined // true
1764// null === undefined // false
1765// '0' == false // true
1766// '0' === false // false
1767// */
1768//
1769
1770bool Obj::op_equal(Obj & value) {
1771 if (this == &value) {
1772 return true;
1773 } else if (is_tensor_type()) {
1774 ObjType summary_type = static_cast<ObjType> (std::max(
1775 static_cast<uint8_t> (m_var_type_current),
1776 static_cast<uint8_t> (value.m_var_type_current)));
1777 try {
1778 if (is_scalar() || value.is_scalar()) {
1779 if (is_scalar() && value.is_scalar()) {
1780 if (isIntegralType(summary_type, true)) {
1781 return GetValueAsInteger() == value.GetValueAsInteger();
1782 } else if (isFloatingType(summary_type)) {
1783 return GetValueAsNumber() == value.GetValueAsNumber();
1784 } else {
1785 LOG_RUNTIME("Fail compare type '%s'!", newlang::toString(summary_type));
1786 }
1787 }
1788 return false;
1789 }
1790 torch::Dtype summary_torch_type = toTorchType(static_cast<ObjType> (summary_type));
1791 return m_tensor->toType(summary_torch_type).equal(*value.toType(summary_type)->m_tensor);
1792
1793 } catch (std::exception e) {
1794 LOG_RUNTIME("Fail compare"); //, e.what());
1795 }
1796 } else if (is_bool_type()) {
1797 return GetValueAsBoolean() == value.GetValueAsBoolean();
1798 } else if (is_string_type()) {
1799 return GetValueAsString().compare(value.GetValueAsString()) == 0;
1800 } else if (is_rational()) {
1801
1802 if (value.getType() == ObjType::Rational) {
1803 return m_rational.op_equal(value.m_rational);
1804 } else {
1805 return m_rational.op_equal(*value.GetValueAsRational());
1806 }
1807
1808 } else if (is_dictionary_type() && value.is_dictionary_type()) {
1809 if (size() != value.size()) {
1810 return false;
1811 }
1812 for (int64_t i = 0; i < static_cast<int64_t> (size()); i++) {
1813 if (name(i).compare(value.name(i)) != 0) {
1814 return false;
1815 }
1816 if (!at(i).second->op_equal(value[i].second)) {
1817
1818 return false;
1819 }
1820 }
1821 return true;
1822 }
1823 return false; // оставшиеся типы равны только если идентичны сами объекты (первое условие)
1824}
1825
1826bool Obj::op_accurate(Obj & value) {
1827 if (this == &value || (is_none_type() && value.is_none_type())) {
1828 return true;
1829 } else if ((is_bool_type() && value.is_bool_type()) || (is_arithmetic_type() && value.is_arithmetic_type()) ||
1830 (is_string_type() && value.is_string_type()) || (is_dictionary_type() && value.is_dictionary_type())) {
1831
1832 return op_equal(value);
1833 }
1834 return false;
1835}
1836
1837ObjPtr Obj::op_bit_and_set(Obj &obj, bool strong) {
1840 m_tensor->bitwise_and_(*obj.m_tensor);
1841 // m_values.integer &= obj.m_values.integer;
1842 return shared();
1843 }
1845 Dict::clear_();
1846 return shared();
1849 int pos = 0;
1850 while (pos < size()) {
1851 if (!obj.exist(at(pos).second, strong)) {
1852 erase(pos);
1853 } else {
1854 pos++;
1855 }
1856 }
1857 return shared();
1858 }
1859 } else if (is_tensor_type() && obj.is_tensor_type()) {
1860 int pos = 0;
1861 while (pos < size()) {
1862 if (!obj.exist(at(pos).second, strong)) {
1863 erase(pos);
1864 } else {
1865
1866 pos++;
1867 }
1868 }
1869 return shared();
1870 }
1871 LOG_RUNTIME("Incompatible types %d and %d for '&' operator!", (int) m_var_type_current, (int) obj.m_var_type_current);
1872}
1873
1874bool Obj::op_class_test(ObjPtr obj, Context * ctx) const {
1875 if (obj->is_string_type()) {
1876 return op_class_test(obj->GetValueAsString().c_str(), ctx);
1877 } else if (!obj->m_class_name.empty()) {
1878 return op_class_test(obj->m_class_name.c_str(), ctx);
1879 } else if (obj->is_type_name()) {
1880 return op_class_test(newlang::toString(obj->m_var_type_fixed), ctx);
1881 } else {
1882 return op_class_test(newlang::toString(obj->m_var_type_current), ctx);
1883 }
1884}
1885
1886//bool Obj::op_class_test(const char *name, Context * ctx) const {
1887//
1888// ASSERT(name || *name);
1889//
1890// if (!m_class_name.empty() && m_class_name.compare(name) == 0) {
1891// return true;
1892// }
1893// for (auto &elem : m_class_parents) {
1894// if (elem->op_class_test(name, ctx)) {
1895// return true;
1896// }
1897// }
1898//
1899// bool has_error = false;
1900// ObjType type = RunTime::BaseTypeFromString(m_ctx ? m_ctx->m_runtime.get() : nullptr, name, &has_error);
1901// if (has_error) {
1902// LOG_DEBUG("Type name %s not found!", name);
1903// return false;
1904// }
1905//
1906// ObjType check_type = m_var_type_current;
1907// if (m_var_type_current == ObjType::Type || (!m_var_is_init && m_var_type_current == ObjType::None)) {
1908// check_type = m_var_type_fixed;
1909// }
1910//
1911// if (isContainsType(type, check_type)) {
1912// return true;
1913// }
1914//
1915// std::string class_name = newlang::toString(check_type);
1916// return !class_name.empty() && class_name.compare(name) == 0;
1917//}
1918
1919bool Obj::op_duck_test_prop(Obj *base, Obj *value, bool strong) {
1920 if (!value) {
1921 return !strong; // Пустой объект равен любому при нечетком сравнении
1922 }
1923 if (!base || base->m_var_type_current == ObjType::None) {
1924 // Итина при пустом текущем может быть только если образец тоже пустой
1925 return value->m_var_type_current == ObjType::None;
1926 }
1927 ObjPtr field;
1928 for (int i = 0; i < value->size(); i++) {
1929 if (value->name(i).empty()) {
1930 field = (*base)[i].second;
1931 } else {
1932 field = (*base)[value->name(i)].second;
1933 }
1934 if (!field) {
1935 return false;
1936 }
1937 if (strong || !((*value)[i].second->getType() != ObjType::None)) {
1938 for (auto &elem : *value) {
1939 if (!field->op_duck_test(elem.second, strong)) {
1940
1941 return false;
1942 }
1943 }
1944 }
1945 }
1946 return true;
1947}
1948
1950 if (is_tensor_type()) {
1952 if (is_scalar()) {
1953 double temp = pow(GetValueAsNumber(), obj.GetValueAsNumber());
1954 if (is_integral()) {
1955 if (temp > static_cast<double> (std::numeric_limits<int64_t>::max())) {
1956 LOG_ERROR("Integer overflow!");
1957 }
1958 m_var = static_cast<int64_t> (llround(temp));
1959 } else {
1960 m_var = temp;
1961 }
1962 } else {
1963 ASSERT(m_tensor->defined());
1964 m_tensor->pow_(obj.GetValueAsNumber());
1965 }
1966 return shared();
1967 } else if (is_rational()) {
1968
1969 if (obj.getType() == ObjType::Rational) {
1971 } else {
1973 }
1974 return shared();
1975
1976 } else if (m_var_type_current == ObjType::StrChar && obj.is_integral()) {
1978 return shared();
1979 } else if (m_var_type_current == ObjType::StrWide && obj.is_integral()) {
1981 return shared();
1982 } else if (m_var_type_current == ObjType::Rational) {
1983 // if(value.m_var_type_current == ObjType::Rational) {
1984 // m_rational->op_pow_(value.m_rational);
1985 // } else {
1986 // m_rational->op_pow_(value.toType(ObjPtr::Rational)->m_rational);
1987 // }
1988 // return shared();
1989 }
1990 LOG_RUNTIME("Unsupported power operator for '%s' and '%s'!", toString().c_str(), obj.toString().c_str());
1991}
1992
1993bool Obj::op_duck_test(Obj *value, bool strong) {
1994
1995 if (!value || value->m_var_type_current == ObjType::None) {
1996 // Пустой объект совместим с любым объектом,
1997 // а при строгом сравнении только с таким же пустым
1998 return strong ? m_var_type_current == value->m_var_type_current : true;
1999 }
2000
2001 if (strong) {
2002 if (value->is_simple_type()) {
2003 if (m_var_type_current == value->m_var_type_current || (is_string_type() && value->is_string_type())) {
2004 return true;
2005 }
2006 return false;
2007 }
2008 return op_duck_test_prop(this, value, strong);
2009 }
2012 } else if (is_string_type() && value->is_string_type()) {
2013 return true;
2014 } else if (is_function_type() && value->is_function_type()) {
2015 return true;
2016 } else if (value->m_var_type_current == ObjType::Dictionary || value->m_var_type_current == ObjType::Class) {
2018
2019 return op_duck_test_prop(this, value, strong);
2020 }
2021 return false;
2022 }
2023 return m_var_type_current == value->m_var_type_current;
2024}
2025
2026//std::string Obj::format(std::string format, Obj * args) {
2027// if (args && !args->empty()) {
2028// std::string name;
2029// std::string place;
2030// std::wstring wname;
2031// for (int i = 0; i < args->size(); i++) {
2032//
2033// if (isSystemName(args->name(i))) {
2034// continue;
2035// }
2036//
2037// // Заменить номер аргумента
2038// name = "\\$" + std::to_string(i + 1);
2039// place = (*args)[i].second->GetValueAsString();
2040// format = std::regex_replace(format, std::regex(name), place);
2041//
2042// if (!args->name(i).empty()) {
2043//
2044// std::wstring wplace = utf8_decode(place);
2045// std::wstring temp = utf8_decode(format);
2046//
2047// wname = L"\\$\\{" + utf8_decode(args->name(i)) + L"\\}";
2048// temp = std::regex_replace(temp, std::wregex(wname), wplace);
2049//
2050// wname = L"\\$" + utf8_decode(args->name(i)); // + L"\\b"; // Иначе перестает работать UTF8
2051// temp = std::regex_replace(temp, std::wregex(wname), wplace);
2052//
2053// format = utf8_encode(temp);
2054//
2055// }
2056// }
2057// }
2058// return format;
2059//}
2060
2061int64_t newlang::ConcatData(Obj *dest, Obj &src, ConcatMode mode) {
2062 int64_t size = 0;
2063 ASSERT(dest);
2064
2065 if (!dest->m_var_is_init) {
2066
2067 ObjPtr temp = src.toType(dest->m_var_type_current);
2068 temp->m_var_type_fixed = dest->m_var_type_current;
2069
2070 dest->clear_();
2071 temp->CloneDataTo(*dest);
2072 temp->ClonePropTo(*dest);
2073 dest->m_var_is_init = true;
2074
2075 } else if (dest->is_string_type()) {
2076
2077 if (dest->m_var_type_current == ObjType::StrChar) {
2078 std::string add = src.GetValueAsString();
2079 dest->m_value.append(add);
2080 size = add.size();
2081 } else if (dest->m_var_type_current == ObjType::StrWide) {
2082 std::wstring add = src.GetValueAsStringWide();
2083 dest->m_string.append(add);
2084 size = add.size();
2085 } else {
2086 LOG_RUNTIME("Unknown string type %s!", dest->toString().c_str());
2087 }
2088
2089 } else if (dest->is_dictionary_type()) {
2090
2091 ObjPtr temp = src.toType(ObjType::Dictionary);
2092 for (int i = 0; i < temp->size(); i++) {
2093 dest->push_back(temp->at(i).second, temp->at(i).first);
2094 size++;
2095 }
2096
2097 } else if (dest->is_tensor_type()) {
2098
2099 if (dest->m_var_type_current == src.m_var_type_current) {
2100 if (dest->m_tensor->dim() == 0) {
2101 dest->m_tensor->resize_(1);
2102 }
2103 if (src.m_tensor->dim() == 0) {
2104 src.m_tensor->resize_(1);
2105 }
2106 *dest->m_tensor = torch::cat({*(dest->m_tensor), *src.m_tensor});
2107 // size += src.m_value.si
2108 } else {
2109 ObjPtr temp = src.toType(dest->m_var_type_current);
2110 size += ConcatData(dest, *(temp.get()), mode);
2111 }
2112
2113 } else {
2114 LOG_RUNTIME("Unknown data type %s!", dest->toString().c_str());
2115 }
2116
2117 return size;
2118}
2119
2120void ShapeFromDict(const Obj *obj, std::vector<int64_t> &shape) {
2121 if (obj && (obj->is_dictionary_type() || (obj->is_tensor_type() && !obj->is_scalar()))) {
2122 if (!obj->size()) {
2123 LOG_RUNTIME("Cannot tensor shape from empty dictionary!");
2124 }
2125 shape.push_back(obj->size());
2126 if (obj->at(0).second) {
2127 ShapeFromDict(obj->at(0).second.get(), shape);
2128 }
2129 }
2130}
2131
2132std::vector<int64_t> newlang::TensorShapeFromDict(const Obj * obj) {
2133 std::vector<int64_t> shape;
2134 ShapeFromDict(obj, shape);
2135 return shape;
2136}
2137
2138/*
2139 * Так как под виндой не получается передавать аргументы в функции при вызове LLVMRunFunction вернул libffi.
2140 */
2141
2142bool Obj::is_scalar() const {
2143 return is_tensor_type() && m_tensor && !m_tensor->defined();
2144}
2145
2146/*
2147 *
2148 *
2149 */
2150
2151void ConvertStringToTensor(const std::string &from, torch::Tensor &to, ObjType type) {
2152 if (from.empty()) {
2153 LOG_RUNTIME("Fail convert empty string to tensor!");
2154 }
2155 to = torch::from_blob((void *) from.c_str(),{(int64_t) from.size()}, at::ScalarType::Char).clone().toType(toTorchType(type));
2156}
2157
2158void ConvertStringToTensor(const std::wstring &from, torch::Tensor &to, ObjType type) {
2159 if (from.empty()) {
2160 LOG_RUNTIME("Fail convert empty string to tensor!");
2161 }
2162 if (sizeof (wchar_t) == sizeof (int32_t)) {
2163 to = torch::from_blob((void *) from.c_str(),{(int64_t) from.size()}, torch::Dtype::Int).clone().toType(toTorchType(type));
2164 } else if (sizeof (wchar_t) == sizeof (int16_t)) {
2165 to = torch::from_blob((void *) from.c_str(),{(int64_t) from.size()}, torch::Dtype::Short).clone().toType(toTorchType(type));
2166 } else {
2167 LOG_RUNTIME("Unsupport wchar_t size '%d'!!!", (int) sizeof (wchar_t));
2168 }
2169}
2170
2171template <typename T> void ConvertTensorToStringTemplate(const torch::Tensor &from, T &to, std::vector<Index> *index) {
2172
2173 ASSERT(from.dim()); // Скаляры хранятся не тензорами, а нативными типами
2174
2175 std::vector<Index> dims;
2176 if (index == nullptr) {
2177 to.clear();
2178 dims.push_back(Index(0));
2179 index = &dims;
2180 }
2181
2182 int64_t pos = index->size();
2183 if (pos == from.dim()) {
2184 at::ScalarType torch_type;
2185 switch (sizeof (to[0])) {
2186 case 1:
2187 torch_type = at::ScalarType::Char;
2188 break;
2189 case 2:
2190 torch_type = at::ScalarType::Short;
2191 break;
2192 case 4:
2193 torch_type = at::ScalarType::Int;
2194 break;
2195 default:
2196 LOG_RUNTIME("Unsupported char size! %d", (int) sizeof (to[0]));
2197 }
2198 for (int i = 0; i < from.size(pos - 1); i++) {
2199 (*index)[pos - 1] = i;
2200 to += from.index(*index).toType(torch_type).item<int>();
2201 }
2202 } else {
2203 index->push_back(0);
2204 for (int64_t i = 0; i < from.size(pos - 1); i++) {
2205 (*index)[pos - 1] = i;
2206 ConvertTensorToString(from, to, index);
2207 }
2208 }
2209}
2210
2211void ConvertTensorToString(const torch::Tensor &from, std::string &to, std::vector<Index> *index) {
2213}
2214
2215void ConvertTensorToString(const torch::Tensor &from, std::wstring &to, std::vector<Index> *index) {
2217}
2218
2219void ConvertTensorToDict(const torch::Tensor &from, Obj &to, std::vector<Index> *index) {
2220
2221 to.m_var_is_init = false;
2223 if (!to.is_dictionary_type()) {
2225 }
2226
2227 ASSERT(from.dim()); // Скаляры хранятся не тензорами, а нативными типами
2228
2229 std::vector<Index> dims;
2230 if (index == nullptr) {
2231 dims.push_back(Index(0));
2232 index = &dims;
2233 }
2234
2235 int64_t pos = index->size();
2236 if (pos == from.dim()) {
2237 for (int i = 0; i < from.size(pos - 1); i++) {
2238 (*index)[pos - 1] = i;
2239 to.push_back(CreateTensor(from.index(*index)));
2240 }
2241 } else {
2242 index->push_back(0);
2243 for (int64_t i = 0; i < from.size(pos - 1); i++) {
2244 (*index)[pos - 1] = i;
2245 ConvertTensorToDict(from, to, index);
2246 }
2247 }
2248
2249 to.m_var_is_init = true;
2250}
2251
2252/*
2253 * Варианты преобраования типов
2254 * Range -> Dict
2255 * String -> Tensor
2256 * Tensor -> String
2257 * Tensor -> Dict
2258 * Dict -> Tensor
2259 */
2261 if (m_var_type_current == type || type == ObjType::Any || (is_string_char_type() && isString(type))) {
2262 return;
2263 } else if (type == ObjType::None) {
2264 clear_();
2265 return;
2267
2268 m_var_type_current = type;
2269 return;
2270
2271 } else if (is_tensor_type() && isTensor(type)) {
2272
2273 // Изменить тип тензора
2274 if (isGenericType(type)) {
2275 m_var_type_fixed = type;
2276 }
2277
2278 if (is_scalar()) {
2279 if ((isGenericType(type) && isContainsType(type, m_var_type_current)) ||
2280 (is_integral() && isIntegralType(type, true)) || (is_floating() && isFloatingType(type))) {
2281 // Ничего менять ненужно
2282 } else if (isIntegralType(type, true)) {
2284 } else if (isFloatingType(type)) {
2286 } else {
2287 LOG_RUNTIME("Unknown convert value!");
2288 }
2289 if (!isGenericType(type)) {
2290 m_var_type_current = type;
2291 }
2292
2293 } else {
2294 ASSERT(m_tensor->defined());
2295 if (!isGenericType(type)) {
2296 *m_tensor = m_tensor->toType(toTorchType(type));
2297 m_var_type_current = fromTorchType(m_tensor->scalar_type());
2298 }
2299 }
2300
2301 return;
2302
2303 } else if (is_range() && isDictionary(type)) {
2304
2305
2306 ObjPtr iter = IteratorMake();
2307 ObjPtr temp = iter->IteratorNext(std::numeric_limits<int64_t>::max());
2308 Dict::clear_();
2309 for (auto &elem : *temp) {
2310 Dict::push_back(elem);
2311 }
2312 m_var_type_current = type;
2314 return;
2315
2316 } else if (is_range() && isTensor(type)) {
2317
2319 toType_(type);
2320 return;
2321
2322 } else if (is_string_char_type() && isTensor(type)) {
2323
2324 if (isGenericType(type)) {
2325 m_var_type_fixed = type;
2327 } else {
2328 m_var_type_current = type;
2329 }
2330
2331 if (m_value.size() == 1) {
2332 // Скаляр хранится в нативном типе
2334 m_var = static_cast<int64_t> (m_value[0]);
2335 } else {
2337 m_var = static_cast<double> (m_value[0]);
2338 }
2339 } else {
2341 }
2342 m_value.clear();
2343 return;
2344
2345 } else if (is_string_wide_type() && isTensor(type)) {
2346
2347 if (isGenericType(type)) {
2348 m_var_type_fixed = type;
2349 if (sizeof (wchar_t) == 4) {
2351 } else {
2352 ASSERT(sizeof (wchar_t) == 2);
2354 }
2355 } else {
2356 m_var_type_current = type;
2357 }
2358
2359 if (m_string.size() == 1) {
2360 // Скаляр хранится в нативном типе
2362 m_var = static_cast<int64_t> (m_string[0]);
2363 } else {
2365 m_var = static_cast<double> (m_string[0]);
2366 }
2367 } else {
2369 }
2370 m_string.clear();
2371 return;
2372
2373 } else if (is_tensor_type() && isStringChar(type)) {
2374
2375 if (is_scalar()) {
2376 int64_t char_val = GetValueAsInteger();
2377 if ((char_val < 0 && char_val < std::numeric_limits<char>::min()) ||
2378 (char_val > std::numeric_limits<uint8_t>::max())) {
2379 LOG_ERROR("Single char overflow! %ld", char_val);
2380 }
2381 if (char_val < 0) {
2382 // По стандарту char - знаковый тип
2383 m_value.assign(1, static_cast<char> (char_val));
2384 } else {
2385 // Но часто про это забывают и забивают
2386 m_value.assign(1, static_cast<uint8_t> (char_val));
2387 }
2388 m_var = std::monostate();
2389 } else {
2390 ASSERT(!is_scalar());
2391 if (static_cast<uint8_t> (m_var_type_current) > static_cast<uint8_t> (ObjType::Int8)) {
2392 LOG_ERROR("Possible data loss when converting tensor %s to a byte string!", newlang::toString(m_var_type_current));
2393 }
2395 m_tensor->reset();
2396 }
2397 m_var_type_current = type;
2398 return;
2399
2400 } else if (is_tensor_type() && isStringWide(type)) {
2401
2402 if (is_scalar()) {
2403 int64_t char_val = GetValueAsInteger();
2404 if ((char_val < std::numeric_limits<wchar_t>::min()) ||
2405 (char_val > std::numeric_limits<wchar_t>::max())) {
2406 LOG_ERROR("Single wchar_t overflow! %ld", char_val);
2407 }
2408 m_string.assign(1, static_cast<wchar_t> (char_val));
2409 m_var = std::monostate();
2410 } else {
2411 ASSERT(!is_scalar());
2412 ASSERT(sizeof (wchar_t) == 2 || sizeof (wchar_t) == 4);
2413 if ((sizeof (wchar_t) == 2 && static_cast<uint8_t> (m_var_type_current) > static_cast<uint8_t> (ObjType::Int16)) ||
2414 (sizeof (wchar_t) == 4 && static_cast<uint8_t> (m_var_type_current) > static_cast<uint8_t> (ObjType::Int32))) {
2415 LOG_ERROR("Possible data loss when converting tensor %s to a wide string!", newlang::toString(m_var_type_current));
2416 }
2419 m_tensor->reset();
2420 }
2421 m_var_type_current = type;
2422 return;
2423
2424 } else if (is_tensor_type() && type == ObjType::Rational) {
2425
2426 if (!is_scalar()) {
2427 LOG_RUNTIME("Convert tensor to rational support for scalar only!");
2428 }
2429 if (is_integral()) {
2431 m_var = std::monostate();
2432 } else {
2433 LOG_RUNTIME("Convert value '%s' to rational not implemented!", toString().c_str());
2434 }
2435 m_var_type_current = type;
2436 return;
2437
2438 } else if (is_tensor_type() && isDictionary(type)) {
2439
2440 if (is_scalar() && is_integral()) {
2441 ASSERT(std::holds_alternative<int64_t>(m_var));
2442 push_back(Obj::CreateValue(std::get<int64_t>(m_var)));
2443 m_var = std::monostate();
2444 } else if (is_scalar() && is_floating()) {
2445 ASSERT(std::holds_alternative<double>(m_var));
2446 push_back(Obj::CreateValue(std::get<double>(m_var)));
2447 m_var = std::monostate();
2448 } else {
2449 ASSERT(!is_scalar());
2451 if (!m_tensor->defined()) {
2452 ASSERT(m_tensor->defined());
2453 }
2455 m_tensor->reset();
2456 }
2457 m_var_type_current = type;
2458 return;
2459
2460 } else if (is_dictionary_type() && isTensor(type)) {
2461
2462 ASSERT(!m_tensor->defined());
2463
2464 if (isGenericType(type)) {
2465 m_var_type_fixed = type;
2466 }
2467
2468 if (!size()) {
2469 LOG_RUNTIME("Fail convert empty dictionary to tensor!");
2470 }
2471
2472 /*
2473 * Все элементы словаря переводятся в требуемый тип и определется минимально
2474 * возможный тип по его размерности среди всех элементов сконвертированных словаря.
2475 */
2476
2477 for (int i = 0; i < size(); i++) {
2478 at(i).second->toType_(type);
2479 }
2480 ObjType summary_type = getSummaryTensorType(this, ObjType::None);
2481
2482 /*
2483 * Создается итоговый тензор требуемого типа и в него последоватльно добаляются все элементы словаря.
2484 */
2485 at::ScalarType summary_torch_type = toTorchType(static_cast<ObjType> (summary_type));
2486 if (at(0).second->is_scalar()) {
2487 if (at(0).second->is_integral()) {
2488 *m_tensor = torch::full({1}, at(0).second->GetValueAsInteger(), summary_torch_type);
2489 } else {
2490 if (!at(0).second->is_floating()) {
2491 ASSERT(at(0).second->is_floating());
2492 }
2493 *m_tensor = torch::full({1}, at(0).second->GetValueAsNumber(), summary_torch_type);
2494 }
2495 } else {
2496 *m_tensor = at(0).second->m_tensor->toType(summary_torch_type);
2497 }
2498
2499 for (int i = 1; i < size(); i++) {
2500
2501 ObjPtr temp = at(i).second;
2502 if (!temp) {
2503 LOG_RUNTIME("Fail convert nullptr to tensor at index %d!", i);
2504 }
2505
2506 torch::Tensor temp_tensor;
2507 if (temp->is_scalar()) {
2508 ASSERT(!temp->m_tensor->defined());
2509 if (temp->is_integral()) {
2510 temp_tensor = torch::full({1}, temp->GetValueAsInteger(), summary_torch_type);
2511 } else {
2512 if (!temp->is_floating()) {
2513 ASSERT(temp->is_floating());
2514 }
2515 temp_tensor = torch::full({1}, temp->GetValueAsNumber(), summary_torch_type);
2516 }
2517 } else {
2518 ASSERT(temp->m_tensor->defined());
2519 temp_tensor = temp->m_tensor->toType(summary_torch_type);
2520 }
2521
2522 *m_tensor = torch::cat({*m_tensor, temp_tensor});
2523 }
2524
2525 Dict::clear_();
2526
2527 m_var_type_current = summary_type;
2528
2529 return;
2530
2531 }
2532 LOG_RUNTIME("Can`t convert object type %s to %s!", newlang::toString(m_var_type_current), newlang::toString(type));
2533}
2534
2535/*
2536 * ТИПЫ ДАННЫХ (без аргументов)
2537 *
2538 * :type_int := :Int32; # Синоним типа Int32 во время компиляции (тип не может быть изменен)
2539 * :type_int := :Int32(); # Копия типа Int32 во время выполнения (тип может быть изменен после Mutable)
2540 * var_type := :Int32; # Тип в переменной, которую можно передавать как аргумент в функции
2541 *
2542 *
2543 * ЗНАЧЕНИЯ УКАЗАННЫХ ТИПОВ (при наличии аргументов)
2544 *
2545 * scalar_int := :Int32(0); # Преобразование типа во время выполнения с автоматической размерностью (скаляр)
2546 * scalar_int := :Int32[0](0); # Преобразование типа во время выполнения с указанием размерности (скаляр)
2547 * scalar_int := :Int32[0]([0,]); # Преобразование типа во время выполнения с указанием размерности (скаляр)
2548 *
2549 * tensor_int := :Int32([0,]); # Преобразование типа во время выполнения с автоматической размерностью (тензор)
2550 * tensor_int := :Int32[1](0); # Преобразование типа во время выполнения с указанием размерности (тензор)
2551 * tensor_int := :Int32[...](0); # Преобразование типа во время выполнения с произвольной размернотью (тензор)
2552 */
2553
2555
2556 ObjPtr result = std::make_shared<Obj>(ObjType::Type);
2557 result->m_class_name = newlang::toString(type);
2558 result->m_var_type_fixed = type;
2559
2560 std::string func_proto(result->m_class_name);
2561 func_proto += "(...)";
2562 func_proto += result->m_class_name;
2563 func_proto += ":-{ }";
2564 result->m_class_name = newlang::NormalizeName(newlang::toString(type));
2565
2566 // TermPtr proto = Parser::ParseString(func_proto, nullptr);
2567 // ASSERT(proto->Left());
2568 // * const_cast<TermPtr *> (&result->m_prototype) = proto->Left();
2569
2570 result->m_var = (void *) BaseTypeConstructor;
2571 result->m_is_const = true;
2572 result->m_var_is_init = true;
2573
2574 return result;
2575}
2576
2578
2579 if (args.empty() || !args[0].second) {
2580 LOG_RUNTIME("Self simple type not defined!");
2581 }
2582 ASSERT(args[0].second->getType() == ObjType::Type);
2583
2584 ObjPtr result = nullptr;
2585 if (isArithmeticType(args[0].second->m_var_type_fixed)) {
2586 result = ConstructorSimpleType_(ctx, args);
2587 } else if (args[0].second->m_var_type_fixed == ObjType::Dictionary) {
2588 result = ConstructorDictionary_(ctx, args);
2589 } else if (args[0].second->m_var_type_fixed == ObjType::Pointer && args.size() > 1) {
2590 result = ConstructorNative_(ctx, args);
2591 } else if (args[0].second->m_var_type_fixed == ObjType::Class) {
2592 result = ConstructorClass_(ctx, args);
2593 } else if (args[0].second->m_var_type_fixed == ObjType::Struct || args[0].second->m_var_type_fixed == ObjType::Union) {
2594 result = ConstructorStruct_(ctx, args);
2595 } else if (args[0].second->m_var_type_fixed == ObjType::Enum) {
2596 result = ConstructorEnum_(ctx, args);
2597 // } else if (args[0].second->m_var_type_fixed == ObjType::Return) {
2598 // result = ConstructorReturn_(ctx, args);
2599 // } else if (args[0].second->m_var_type_fixed == ObjType::Thread) {
2600 // result = ConstructorThread_(ctx, args);
2601 // } else if (args[0].second->m_var_type_fixed == ObjType::Sys) {
2602 // result = ConstructorSystem_(ctx, args);
2603 // } else if (args[0].second->m_var_type_fixed == ObjType::Break || args[0].second->m_var_type_fixed == ObjType::Continue) {
2604 // result = ConstructorInterraption_(ctx, args, args[0].second->m_var_type_fixed);
2605 // } else if (args[0].second->m_var_type_fixed == ObjType::Error || args[0].second->m_var_type_fixed == ObjType::ErrorParser
2606 // || args[0].second->m_var_type_fixed == ObjType::ErrorRunTime || args[0].second->m_var_type_fixed == ObjType::ErrorSignal) {
2607 // result = ConstructorError_(ctx, args);
2608 } else if (args[0].second->m_var_type_fixed == ObjType::StrChar && args.size() > 1) {
2610 for (int i = 1; i < args.size(); i++) {
2611 result->op_concat_(args[i].second, ConcatMode::Append);
2612 }
2613 } else if (args[0].second->m_var_type_fixed == ObjType::StrWide && args.size() > 1) {
2615 for (int i = 1; i < args.size(); i++) {
2616 result->op_concat_(args[i].second, ConcatMode::Append);
2617 }
2618 } else if (args.size() == 1) { // Клонировать тип
2619 result = args[0].second->Clone();
2620 if (result) {
2621 result->m_is_const = false;
2622 }
2623 }
2624
2625 if (!result) {
2626 LOG_RUNTIME("Create type '%s' error or not implemented!", newlang::toString(args[0].second->m_var_type_fixed));
2627 }
2628
2629 // result->m_class_name = args[0].second->m_class_name;
2630
2631 return result;
2632}
2633
2635
2636 ASSERT(!args.empty() && args[0].second);
2637 ASSERT(args[0].second->getType() == ObjType::Type);
2638
2639 // Переданы значения для приведения типов
2640 // Но само значение пока не установлено
2641 ObjPtr result = args[0].second->Clone();
2642 if (args.size() == 1) {
2643 // Копия существующего типа с возможностью редактирования
2644 result->m_is_const = false;
2645 return result;
2646 }
2647
2648 // std::vector<int64_t> dims;
2649 //
2650 // bool dim_auto = false;
2651 // if (result->m_dimensions) {
2652 // // Размерность указана
2653 // for (int i = 0; i < result->m_dimensions->size(); i++) {
2654 // ObjPtr ind = result->m_dimensions->at(i).second;
2655 // ASSERT(ind);
2656 // if (ind->GetType())->isind.is_integer()) {
2657 // dims.push_back(ind.integer());
2658 // } else if (ind.is_boolean()) {
2659 // dims.push_back(ind.boolean());
2660 // } else if (ind.is_none()) {
2661 //
2662 // } else if (isEllipsis(ind->GetType())) {
2663 // if (dim_auto) {
2664 // LOG_RUNTIME("Only one dimension of any size is possible!");
2665 // }
2666 // dim_auto = true;
2667 // } else {
2668 // LOG_RUNTIME("Non fixed dimension not implemented!");
2669 // }
2670 // }
2671 // }
2672
2673 ObjPtr first_dim = result->m_dimensions && result->m_dimensions->size() ? result->m_dimensions->at(0).second : nullptr;
2674
2675 bool to_scalar = false;
2676
2677 if (!first_dim || (first_dim->is_integral() && first_dim->GetValueAsInteger() == 0)) {
2678 if (result->m_var_type_fixed == ObjType::Bool) {
2679 bool value = false;
2680 if (args.size() > 2) {
2681 value = true;
2682 } else {
2683 value = args[1].second->GetValueAsBoolean();
2684 }
2685 result->m_var_type_current = result->m_var_type_fixed;
2686 result->m_var = static_cast<int64_t> (value);
2687 result->m_var_is_init = true;
2688 result->m_tensor->reset();
2689 result->m_dimensions = nullptr;
2690 return result;
2691 }
2692 to_scalar = true;
2693 }
2694
2695
2696 if (args.size() == 2) {
2697 // Передано единственное значение (нулевой аргумент - сам объект, т.е. :Тип(Значение) )
2698
2700
2701 // Если обобщенный тип данных, а сами данные принадлежат обощенному типу
2702 if (isGenericType(result->m_var_type_fixed) && isContainsType(result->m_var_type_fixed, args[1].second->getType())) {
2703 convert = args[1].second->Clone();
2704 } else {
2705 convert = args[1].second->toType(result->m_var_type_fixed);
2706 }
2707 convert->m_var_type_fixed = result->m_var_type_fixed;
2708 convert.swap(result);
2709 convert->m_dimensions.swap(result->m_dimensions);
2710
2711 } else {
2712
2713 // Для списка значений сперва формируется словарь, а после он конвертируется в нужный тип данных
2714
2715 result->m_var_type_current = ObjType::Dictionary;
2716
2717 ObjPtr prev = nullptr;
2718 for (int i = 1; i < args.size(); i++) {
2719
2720 if (args[i].second->getType() == ObjType::Ellipsis) {
2721 if (!prev) {
2722 LOG_RUNTIME("There is no previous item to repeat!");
2723 }
2724 if (i + 1 != args.size()) {
2725 LOG_RUNTIME("Ellipsis is not the last element!");
2726 }
2727 if (!result->m_dimensions || !result->m_dimensions->size()) {
2728 LOG_RUNTIME("Object has no dimensions!");
2729 }
2730 int64_t full_size = 1;
2731 for (int j = 0; j < result->m_dimensions->size(); j++) {
2732 if (!result->m_dimensions->at(i).second->is_integral()) {
2733 LOG_RUNTIME("Type '%s' error for dimensions!", newlang::toString(result->m_dimensions->at(i).second->getType()));
2734 }
2735 full_size *= result->m_dimensions->at(i).second->GetValueAsInteger();
2736 }
2737 if (full_size <= 0) {
2738 LOG_RUNTIME("Items count error for all dimensions!");
2739 }
2740
2741 for (int64_t j = result->size(); j < full_size; j++) {
2742 result->op_concat_(prev, ConcatMode::Append);
2743 }
2744
2745 break;
2746
2747 } else {
2748 prev = args[i].second;
2749 }
2750 prev->m_dimensions = result->m_dimensions;
2751 result->op_concat_(prev, ConcatMode::Append);
2752 }
2753
2754 if (args[0].second->m_var_type_fixed != ObjType::Dictionary) {
2755 result = result->toType(args[0].second->m_var_type_fixed);
2756 result->m_var_type_fixed = result->m_var_type_current;
2757 }
2758 }
2759
2760
2761 if (to_scalar) {
2762 // To Scalar
2763 if (result->is_scalar()) {
2764 // All ok
2765 } else if (result->size() == 1) {
2766
2767 if (result->is_arithmetic_type()) {
2768 if (result->is_integral()) {
2769 result->m_var = static_cast<int64_t> (result->at(0).second->GetValueAsInteger());
2770 } else if (result->is_floating()) {
2771 result->m_var = static_cast<double> (result->at(0).second->GetValueAsNumber());
2772 } else {
2773 LOG_RUNTIME("Convert type '%s' to scalar not implemented!", newlang::toString(result->getType()));
2774 }
2775 } else {
2776 LOG_RUNTIME("Convert type '%s' to scalar not implemented!", newlang::toString(result->getType()));
2777 }
2778
2779 if (result->is_dictionary_type() && result->size()) {
2780 result->erase(0);
2781 }
2782 result->m_dimensions.reset();
2783 result->m_tensor->reset();
2784
2785 } else {
2786 LOG_RUNTIME("Conversion to scalar is not possible!");
2787 }
2788
2789 // result->m_var_type_fixed = result->getType();
2790
2791 } else {
2792 ASSERT(first_dim);
2793 if (isString(result->getType()) || isDictionary(result->getType())) {
2794 if (result->m_dimensions->size() != 1) {
2795 LOG_RUNTIME("Fail dimension size for type '%s'!", newlang::toString(result->getType()));
2796 }
2797 if (!first_dim->is_any_size()) {
2798 result->resize_(first_dim->GetValueAsInteger(), nullptr);
2799 }
2800 } else if (isTensor(result->getType())) {
2801
2802 if (result->m_dimensions->size() != 1 || !result->m_dimensions->at(0).second->is_any_size()) {
2803 std::vector<int64_t> dims;
2804 for (int i = 0; i < result->m_dimensions->size(); i++) {
2805 Index ind = toIndex(*(*result->m_dimensions)[i].second);
2806 if (ind.is_integer()) {
2807 dims.push_back(ind.integer().expect_int());
2808 } else if (ind.is_boolean()) {
2809 dims.push_back(ind.boolean());
2810 } else {
2811 LOG_RUNTIME("Resize dimension '%s' not implemented!", newlang::toString(result->m_dimensions->at(i).second->getType()));
2812 }
2813 }
2814 *result->m_tensor = result->m_tensor->reshape(dims);
2815 }
2816
2817 } else {
2818 LOG_RUNTIME("Fail use dimensions for type '%s'!", newlang::toString(result->getType()));
2819 }
2820 }
2821 return result;
2822}
2823
2824/*
2825 * :Class(One=0, Two=_, Three=3); # Все аргументы имеют имена
2826 */
2827
2829
2830 ASSERT(!args.empty() && args[0].second);
2831 ASSERT(args[0].second->getType() == ObjType::Type);
2832
2834 for (int i = 1; i < args.size(); i++) {
2835 result->push_back(args[i].second, args.name(i));
2836 }
2837 result->m_var_is_init = true;
2838
2839 return result;
2840}
2841
2843 // if (args.size() < 2) {
2844 LOG_RUNTIME("Empty argument list!");
2845 // }
2846 // if (!args.at(1).second->is_string_type()) {
2847 // LOG_RUNTIME("First argument not a string!");
2848 // }
2849 // //@todo Передача дополнительных аргументов? args["module"]->GetValueAsString().c_str(), args["lazzy"]->GetValueAsBoolean()
2850 // Context *ctx = const_cast<Context *> (ctx_const);
2851 // return ctx->m_runtime->CreateNative(args.at(1).second->GetValueAsString().c_str());
2852}
2853
2855 return Obj::CreateClass(":Class");
2856}
2857
2858/*
2859 * <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
2860 * Различие между классом и словарям в том, что элементы словаря могут добавлятся и удаляться динамически,
2861 * а у класса состав полей фиуксируется при определении и в последствии они не могут быть добалвены или удалены.
2862 * Это нужно для возможности работы синтаксического анализатора на этапе компиляции программы.
2863 * >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
2864 *
2865 * Данный конструктор используется для создания классов литералов без дополнительных
2866 * вызовов базовых классов и/или дектрукторов
2867 *
2868 */
2870
2871 // bool is_check = false;
2872 // ObjPtr result = nullptr;
2873 // ObjPtr constructor = nullptr;
2874 // if (args.size() && !args.at(0).first.empty() && args.at(0).second) {
2875 // //LOG_DEBUG("'%s' %s", args.at(0).first.c_str(), args.at(0).second->toString().c_str());
2876 // result = args.at(0).second;
2877 // is_check = true;
2878 //
2879 // std::string name = MakeConstructorName(args.at(0).second->m_class_name);
2880 // constructor = const_cast<Context *> (ctx)->FindTerm(name);
2881 //
2882 // } else {
2883 // result = Obj::CreateType(ObjType::Class, ObjType::Class, true);
2884 //
2885 // ASSERT(args.size() == 0);
2886 // args.push_back(result);
2887 // }
2888 //
2889 // for (int i = 1; i < args.size(); i++) {
2890 // if (args.name(i).empty()) {
2891 // LOG_RUNTIME("Field pos %d has no name!", i);
2892 // }
2893 // for (int pos = 0; pos < i; pos++) {
2894 // if (args.name(pos).compare(args.name(i)) == 0) {
2895 // LOG_RUNTIME("Field name '%s' at index %d already exists!", args.name(i).c_str(), i);
2896 // }
2897 // }
2898 // if (result->find(args.name(i)) != result->end()) {
2899 // result->find(args.name(i))->second->SetValue_(args.at(i).second);
2900 // } else if (is_check) {
2901 // LOG_RUNTIME("Property '%s' not found!", args.name(i).c_str());
2902 // } else {
2903 // result->push_back(args.at(i).second, args.name(i));
2904 // }
2905 // }
2906 //
2907 // if (constructor) {
2908 // // result = constructor->Call(const_cast<Context *> (ctx), &args, true, result);
2909 // }
2910 ASSERT(0);
2911 return nullptr;
2912}
2913
2915 ObjPtr result = ConstructorClass_(ctx, args);
2916 result->m_var_type_fixed = ObjType::Struct;
2917
2918 if (!result->size()) {
2919 LOG_RUNTIME("Empty Struct not allowed!");
2920 }
2921
2922 for (int i = 0; i < result->size(); i++) {
2923 if (!(*result)[i].second) {
2924 LOG_RUNTIME("Field '%s' at pos %d not defined!", result->name(i).c_str(), i);
2925 }
2926 if (!(*result)[i].second || !isSimpleType((*result)[i].second->getType()) || isGenericType((*result)[i].second->getType())) {
2927
2928 LOG_RUNTIME("Field '%s' at pos %d not simple type! (%s)", result->name(i).c_str(), i, newlang::toString((*result)[i].second->getType()));
2929 }
2930 }
2931 return result;
2932}
2933
2934/*
2935 * :Enum(One=0, Two=_, "Three", Ten=10);
2936 */
2937
2940 result->m_var_type_fixed = ObjType::Enum;
2941
2942 int64_t val_int = 0;
2943 ObjPtr enum_value;
2944 std::string enum_name;
2945
2946 for (int i = 1; i < args.size(); i++) {
2947 if (args.name(i).empty()) {
2948 if (args[i].second && args[i].second->is_string_type()) {
2949 enum_name = args[i].second->GetValueAsString();
2950 } else {
2951 LOG_RUNTIME("Field pos %d has no name!", i);
2952 }
2953 } else {
2954 enum_name = args.name(i);
2955
2956 if (args[i].second && (args[i].second->is_integral())) {
2957 val_int = args[i].second->GetValueAsInteger();
2958 } else if (!args[i].second || !args[i].second->is_none_type()) {
2959 LOG_RUNTIME("Field value '%s' %d must integer type!", args.name(i).c_str(), i);
2960 }
2961 }
2962
2963 if (result->find(enum_name) != result->end()) {
2964 LOG_RUNTIME("Field value '%s' at index %d already exists!", enum_name.c_str(), i);
2965 }
2966
2967
2968 enum_value = Obj::CreateValue(val_int, ObjType::None); // , type
2969 enum_value->m_var_type_fixed = enum_value->m_var_type_current;
2970 enum_value->m_is_const = true;
2971 result->push_back(enum_value, enum_name);
2972 val_int += 1;
2973 }
2974
2975 result->m_is_const = true;
2976 result->m_var_is_init = true;
2977
2978 return result;
2979}
2980
2981//ObjPtr Obj::ConstructorError_(Context *ctx, Obj & args) {
2982// ObjPtr result = ConstructorDictionary_(ctx, args);
2983// result->m_var_type_current = ObjType::Error;
2984// result->m_var_type_fixed = ObjType::Error;
2985// return result;
2986//}
2987
2988//ObjPtr Obj::ConstructorReturn_(Context *ctx, Obj & args) {
2989// ObjPtr result = ConstructorDictionary_(ctx, args);
2990// result->m_var_type_current = ObjType::Return;
2991// result->m_var_type_fixed = ObjType::Return;
2992// if (result->size() == 0) {
2993// result->push_back(Obj::Arg(Obj::CreateNone()));
2994// }
2995// if (result->size() != 1) {
2996//
2997// LOG_RUNTIME("Multiple argument for type ':Return'!");
2998// }
2999// return result;
3000//}
3001//
3002//ObjPtr Obj::ConstructorThread_(Context *ctx, Obj & args) {
3003// if (args.size() == 0) {
3004// LOG_RUNTIME("Function for thread not defined!");
3005// }
3006//
3007// ObjPtr result = ConstructorDictionary_(ctx, args);
3008// result->m_var_type_current = ObjType::Thread;
3009// result->m_var_type_fixed = ObjType::Thread;
3010//
3011//
3012// return result;
3013//}
3014//
3015//ObjPtr Obj::ConstructorSystem_(Context *ctx, Obj & args) {
3016// // if (args.size() == 0) {
3017// LOG_RUNTIME("Function for System not defined!");
3018// // }
3019//
3020// ObjPtr result = ConstructorDictionary_(ctx, args);
3021// result->m_var_type_current = ObjType::Thread;
3022// result->m_var_type_fixed = ObjType::Thread;
3023//
3024//
3025// return result;
3026//}
3027//
3028//ObjPtr Obj::ConstructorInterraption_(Context* ctx, Obj& args, ObjType type) {
3029// ObjPtr result = ConstructorDictionary_(ctx, args);
3030// result->m_var_type_current = type;
3031// result->m_var_type_fixed = type;
3032// if (result->size()) {
3033// LOG_RUNTIME("Argument for type %s not allowed!", newlang::toString(type));
3034// }
3035// return result;
3036//}
3037
3038template<>
3040static const ObjPtr zero = Obj::CreateValue(0);
3041
3042template<>
3043ObjPtr Iterator<Obj>::read_and_next(int64_t count) {
3044 ObjPtr result;
3045
3046 if (count == 0) {
3047 if (m_iter_obj->m_var_type_current == ObjType::Range) {
3048
3049 ASSERT(m_iter_obj->m_iter_range_value);
3050
3051 ObjPtr value = m_iter_obj->m_iter_range_value; //->Clone();
3052 ObjPtr stop = m_iter_obj->at("stop").second;
3053 ObjPtr step = m_iter_obj->at("step").second;
3054
3055 ASSERT(value);
3056 ASSERT(stop);
3057 ASSERT(step);
3058
3059 int up_direction = step->op_compare(*zero);
3060 ASSERT(up_direction);
3061
3062 if (up_direction < 0) {
3063 if (value->op_compare(*stop) > 0) {
3064 result = value->Clone();
3065 (*value) += step;
3066 } else {
3068 }
3069 } else {
3070 if (value->op_compare(*stop) < 0) {
3071 result = value->Clone();
3072 (*value) += step;
3073 } else {
3075 }
3076 }
3077 // m_iter_obj->m_iter_range_value = value;
3078
3079 } else if (m_iter_obj->is_indexing()) {
3080 result = (*(*this)).second;
3081 (*this)++;
3082 } else {
3083 LOG_RUNTIME("Iterator to type %s not implemented!", newlang::toString(m_iter_obj->m_var_type_current));
3084 }
3085 } else if (count < 0) {
3086
3088 result->m_var_is_init = true;
3089
3090 if (m_iter_obj->m_var_type_current == ObjType::Range) {
3091
3092 ASSERT(m_iter_obj->m_iter_range_value);
3093
3094 ObjPtr value = m_iter_obj->m_iter_range_value; //->Clone();
3095 ObjPtr stop = m_iter_obj->at("stop").second;
3096 ObjPtr step = m_iter_obj->at("step").second;
3097
3098 ASSERT(value);
3099 ASSERT(stop);
3100 ASSERT(step);
3101
3102 int up_direction = step->op_compare(*zero);
3103 ASSERT(up_direction);
3104
3105 bool next_value = true;
3106 for (int64_t i = 0; i < -count; i++) {
3107
3108 if (next_value) {
3109 if (up_direction < 0) {
3110 if (value->op_compare(*stop) < 0) {
3111 next_value = false;
3112 } else {
3113 result->push_back(value->Clone());
3114 (*value) += step;
3115 }
3116 } else {
3117 if (value->op_compare(*stop) > 0) {
3118 next_value = false;
3119 } else {
3120 result->push_back(value->Clone());
3121 (*value) += step;
3122 }
3123 }
3124 } else {
3126 }
3127 }
3128 //m_iter_obj->m_iter_range_value = value;
3129
3130 } else if (m_iter_obj->is_indexing()) {
3131
3132 for (int64_t i = 0; i < -count; i++) {
3133 if (*this != this->end()) {
3134 result->push_back(*(*this));
3135 (*this)++;
3136 } else {
3138 }
3139 }
3140
3141 } else {
3142 LOG_RUNTIME("Iterator to type %s not implemented!", newlang::toString(m_iter_obj->m_var_type_current));
3143 }
3144
3145 } else {
3146
3148 result->m_var_is_init = true;
3149
3150 if (m_iter_obj->m_var_type_current == ObjType::Range) {
3151
3152 ASSERT(m_iter_obj->m_iter_range_value);
3153
3154 ObjPtr value = m_iter_obj->m_iter_range_value; //->Clone();
3155 ObjPtr stop = m_iter_obj->at("stop").second;
3156 ObjPtr step = m_iter_obj->at("step").second;
3157
3158 ASSERT(value);
3159 ASSERT(stop);
3160 ASSERT(step);
3161
3162 int up_direction = step->op_compare(*zero);
3163 ASSERT(up_direction);
3164
3165 if (up_direction < 0) {
3166 while (value->op_compare(*stop) > 0) {
3167 result->push_back(value->Clone());
3168 (*value) += step;
3169 }
3170 } else {
3171 while (value->op_compare(*stop) < 0) {
3172 result->push_back(value->Clone());
3173 (*value) += step;
3174 }
3175 }
3176 // m_iter_obj->m_iter_range_value.swap(value);
3177
3178 } else if (m_iter_obj->is_indexing()) {
3179 while (*this != this->end() && result->size() < count) {
3180 result->push_back(*(*this));
3181 (*this)++;
3182 }
3183 } else {
3184 LOG_RUNTIME("Iterator to type %s not implemented!", newlang::toString(m_iter_obj->m_var_type_current));
3185 }
3186 }
3187
3188 return result;
3189}
3190
3191ObjPtr Obj::IteratorMake(const char * filter, bool check_create) {
3194 if (getType() == ObjType::Iterator && !check_create) {
3195 return shared();
3196 }
3197 LOG_RUNTIME("Can't create iterator from '%s'!", toString().c_str());
3198 }
3199 result->m_iterator = std::make_shared<Iterator < Obj >> (shared(), filter);
3200 ASSERT(result->m_iterator->m_iter_obj.get() == this);
3201 ASSERT(!result->m_iterator->m_iter_obj->m_iter_range_value);
3202 result->IteratorReset();
3203 if (is_range()) {
3204 ASSERT(result->m_iterator->m_iter_obj->m_iter_range_value);
3205 }
3206 return result;
3207}
3208
3212 // if(getType() == ObjType::Iterator) {
3213 // return shared();
3214 // }
3215 LOG_RUNTIME("Can't create iterator from '%s'!", toString().c_str());
3216 }
3217
3218 if (!args || args->size() == 0) {
3219 result->m_iterator = std::make_shared<Iterator < Obj >> (shared());
3220 } else if (args->size() == 1 && args->at(0).second && args->at(0).second->is_string_type()) {
3221 result->m_iterator = std::make_shared<Iterator < Obj >> (shared(), args->GetValueAsString().c_str());
3222 } else if (args->size() >= 1 && args->at(0).second && args->at(0).second->is_function_type()) {
3223 ASSERT(false);
3224 // ObjPtr func = args->at(0).second;
3225 // ObjPtr func_arg = args->Clone();
3226 // func_arg->resize_(-(func_arg->size() - 1)); // Удалить первый элемент
3227 // result->m_iterator = std::make_shared<Iterator < Obj >> (shared(), func.get(), func_arg.get());
3228 } else {
3229 LOG_RUNTIME("Invalid arguments for iterator create! '%s'", args->toString().c_str());
3230 }
3231 ASSERT(result->m_iterator->m_iter_obj.get() == this);
3232 ASSERT(!result->m_iterator->m_iter_obj->m_iter_range_value);
3233 result->IteratorReset();
3234 if (is_range()) {
3235 ASSERT(result->m_iterator->m_iter_obj->m_iter_range_value);
3236 }
3237 return result;
3238}
3239
3242 LOG_RUNTIME("Object '%s' not iterator!", toString().c_str());
3243 }
3244
3246 return Iterator<Obj>::m_Iterator_end.second;
3247 }
3248
3250 ASSERT(m_iterator->m_iter_obj);
3251
3252 if (m_iterator->m_iter_obj->is_range()) {
3253
3254 ASSERT(m_iterator->m_iter_obj->m_iter_range_value);
3255
3256 ObjPtr value = m_iterator->m_iter_obj->m_iter_range_value; //->Clone();
3257 ObjPtr stop = m_iterator->m_iter_obj->at("stop").second;
3258 ObjPtr step = m_iterator->m_iter_obj->at("step").second;
3259
3260 ASSERT(value);
3261 ASSERT(stop);
3262 ASSERT(step);
3263
3264 // LOG_TEST("value: %s", value->toString().c_str());
3265 // LOG_TEST("stop: %s", stop->toString().c_str());
3266 // LOG_TEST("step: %s", step->toString().c_str());
3267
3268 int up_direction = step->op_compare(*zero);
3269 ASSERT(up_direction);
3270
3271 if (up_direction < 0) {
3272 if (value->op_compare(*stop) > 0) {
3273 return value->Clone();
3274 }
3275 } else {
3276 if (value->op_compare(*stop) < 0) {
3277 return value->Clone();
3278 }
3279 }
3280
3281 return Iterator<Obj>::m_Iterator_end.second;
3282
3283 } else if (m_iterator->m_iter_obj->is_indexing()) {
3284 return m_iterator->data().second;
3285 }
3286 LOG_RUNTIME("IteratorData not implemented for object type %s!", newlang::toString(m_iterator->m_iter_obj->m_var_type_current));
3287}
3288
3291 LOG_RUNTIME("Method available an iterator only!");
3292 }
3294
3295 if (m_iterator->m_iter_obj->is_range()) {
3296 ObjType summary_type = getSummaryTensorType(m_iterator->m_iter_obj.get(), ObjType::None);
3297 m_iterator->m_iter_obj->m_iter_range_value = m_iterator->m_iter_obj->at("start").second->toType(summary_type);
3298 } else if (m_iterator->m_iter_obj->is_indexing()) {
3299 m_iterator->reset();
3300 } else {
3301 LOG_RUNTIME("IteratorReset not implemented for object type %s!", newlang::toString(m_iterator->m_iter_obj->m_var_type_current));
3302 }
3303 return shared();
3304}
3305
3308 LOG_RUNTIME("Method available an iterator only!");
3309 }
3311 return m_iterator->read_and_next(count);
3312}
3313
3314ObjPtr newlang::CheckSystemField(const Obj *obj, std::string name) {
3315 /*
3316 Встроенные атрибуты у каждого объекта
3317 */
3318
3319 static const char * SYS__NAME__ = "__name__";
3320 static const char * SYS__FULL_NAME__ = "__full_name__";
3321 static const char * SYS__TYPE__ = "__type__";
3322 static const char * SYS__TYPE_FIXED__ = "__type_fixed__";
3323 static const char * SYS__MOULE__ = "__module__";
3324 static const char * SYS__CLASS__ = "__class__";
3325 static const char * SYS__BASE__ = "__base__";
3326 static const char * SYS__SIZE__ = "__size__";
3327
3328 static const char * SYS__DOC__ = "__doc__";
3329 static const char * SYS__STR__ = "__str__";
3330 static const char * SYS__SOURCE__ = "__source__";
3331
3332 static const char * MODULE__MD5__ = "__md5__";
3333 static const char * MODULE__FILE__ = "__file__";
3334 static const char * MODULE__TIMESTAMP__ = "__timestamp__";
3335 static const char * MODULE__MAIN__ = "__main__";
3336 static const char * MODULE__VERSION__ = "__version__";
3337
3338 if (!isSystemName(name)) {
3339 return nullptr;
3340 } else if (!obj || !obj->is_init()) {
3341 return Obj::CreateString(":Undefined");
3342 } else if (name.compare(SYS__FULL_NAME__) == 0) {
3343 return Obj::CreateString(obj->m_var_name);
3344 } else if (name.compare(SYS__NAME__) == 0) {
3346 } else if (name.compare(SYS__MOULE__) == 0) {
3347 return Obj::CreateString(ExtractModuleName(obj->m_var_name.c_str()));
3348 } else if (name.compare(SYS__TYPE__) == 0) {
3350 } else if (name.compare(SYS__TYPE_FIXED__) == 0) {
3352 } else if (name.compare(SYS__CLASS__) == 0) {
3353 return Obj::CreateString(obj->m_class_name);
3354 } else if (name.compare(SYS__BASE__) == 0) {
3355 return Obj::CreateDict(obj->m_class_parents);
3356 } else if (name.compare(SYS__MOULE__) == 0) {
3357 return Obj::CreateString(obj->m_var_name);
3358 } else if (name.compare(SYS__SIZE__) == 0) {
3359 return Obj::CreateValue(obj->size());
3360 // } else if (name.compare(SYS__DOC__) == 0) {
3361 // return Obj::CreateString(GetDoc(obj->m_var_name));
3362 } else if (name.compare(SYS__STR__) == 0) {
3363 return Obj::CreateString(obj->toString());
3364 } else if (name.compare(SYS__SOURCE__) == 0 && obj->m_var_type_current != ObjType::Module) {
3365 // @todo Тут должен возвращаться код для создания аналогичного объекта
3366 return Obj::CreateString(obj->toString());
3367 } else {
3368
3369 if (obj->m_var_type_current == ObjType::Module) {
3370 // const Module *mod = static_cast<const Module *> (obj);
3371 //
3372 // if (name.compare(MODULE__MD5__) == 0) {
3373 // return Obj::CreateString(mod->m_md5);
3374 // } else if (name.compare(MODULE__FILE__) == 0) {
3375 // return Obj::CreateString(mod->m_file);
3376 // } else if (name.compare(MODULE__TIMESTAMP__) == 0) {
3377 // return Obj::CreateString(mod->m_timestamp);
3378 // } else if (name.compare(MODULE__VERSION__) == 0) {
3379 // return Obj::CreateString(mod->m_version);
3380 // } else if (name.compare(MODULE__MAIN__) == 0) {
3381 // return Obj::CreateBool(mod->m_is_main);
3382 // } else if (name.compare(SYS__SOURCE__) == 0) {
3383 // return Obj::CreateString(mod->m_source);
3384 // }
3385 }
3386
3387 std::string message("Internal field '");
3388 message += name;
3389 message += "' not exist!";
3390 return Obj::CreateString(message);
3391 }
3392 return nullptr;
3393}
3394
3396 return Context::Call(m_ctx, *this, args);
3397}
3398
3399void Obj::testResultIntegralType(ObjType type, bool upscalint) {
3400 ObjType new_type = m_var_type_current;
3401
3402 if (((is_integral() && isIntegralType(type, true)) || (is_floating() && isFloatingType(type)))
3403 && static_cast<uint8_t> (type) <= static_cast<uint8_t> (m_var_type_current)) {
3404 // type already including to current type
3405
3406 } else {
3407
3408 if (!canCast(type, m_var_type_current)) {
3409 testConvertType(type);
3410 new_type = type;
3411 }
3412 bool check = false;
3413 if (upscalint && isIntegralType(m_var_type_current, true) && isIntegralType(type, true)) {
3414 if (type < ObjType::Int64) {
3415 new_type = static_cast<ObjType> (static_cast<uint8_t> (type) + 1);
3416 check = true;
3417 }
3418 if (check && m_var_type_fixed != ObjType::None && !canCast(new_type, m_var_type_fixed)) {
3419 new_type = type; // Тип данных менять нельзя, но сама операция возможна
3420 LOG_WARNING("Data type '%s' cannot be changed to '%s', loss of precision is possible!", newlang::toString(type), newlang::toString(m_var_type_fixed));
3421 }
3422 }
3423 }
3424 if (new_type != m_var_type_current) {
3425 if (is_scalar()) {
3426 if (isFloatingType(new_type) && isIntegralType(m_var_type_current, true)) {
3427 // Для скаляров повышение типа с целочисленного на число с плавающий точкой
3428 m_var = static_cast<double> (GetValueAsInteger());
3429 } else {
3430 // Измерение размерности, а не типа - ничего делать ненужно
3432 (isIntegralType(new_type, true) && isIntegralType(m_var_type_current, true)));
3433 }
3434 } else {
3436 ASSERT(m_tensor->defined());
3437 *m_tensor = m_tensor->toType(toTorchType(new_type));
3438 }
3439 m_var_type_current = new_type;
3440 }
3441}
3442
3443ObjType GetTensorType(torch::Tensor & val) {
3444 switch (val.dtype().toScalarType()) {
3445 case at::ScalarType::Bool:
3446 return ObjType::Bool;
3447 case at::ScalarType::Half:
3448 case at::ScalarType::BFloat16:
3449 return ObjType::Float16;
3450 case at::ScalarType::Float:
3451 return ObjType::Float32;
3452 case at::ScalarType::Double:
3453 return ObjType::Float64;
3454 case at::ScalarType::Byte:
3455 case at::ScalarType::Char:
3456 case at::ScalarType::QInt8:
3457 case at::ScalarType::QUInt8:
3458 case at::ScalarType::QUInt4x2:
3459 return ObjType::Int8;
3460 case at::ScalarType::Short:
3461 return ObjType::Int16;
3462 case at::ScalarType::Int:
3463 case at::ScalarType::QInt32:
3464 return ObjType::Int32;
3465 case at::ScalarType::Long:
3466 return ObjType::Int64;
3467 case at::ScalarType::ComplexHalf:
3468 return ObjType::Complex16;
3469 case at::ScalarType::ComplexFloat:
3470 return ObjType::Complex32;
3471 case at::ScalarType::ComplexDouble:
3472 return ObjType::Complex64;
3473 }
3474 LOG_RUNTIME("Fail tensor type %s", val.toString().c_str());
3475}
3476
3477/*
3478 * From TensorIndexing.h
3479// There is one-to-one correspondence between Python and C++ tensor index types:
3480// Python | C++
3481// -----------------------------------------------------
3482// `None` | `at::indexing::None`
3483// `Ellipsis` | `at::indexing::Ellipsis`
3484// `...` | `"..."`
3485// `123` | `123`
3486// `True` / `False` | `true` / `false`
3487// `:` | `Slice()` / `Slice(None, None)`
3488// `::` | `Slice()` / `Slice(None, None, None)`
3489// `1:` | `Slice(1, None)`
3490// `1::` | `Slice(1, None, None)`
3491// `:3` | `Slice(None, 3)`
3492// `:3:` | `Slice(None, 3, None)`
3493// `::2` | `Slice(None, None, 2)`
3494// `1:3` | `Slice(1, 3)`
3495// `1::2` | `Slice(1, None, 2)`
3496// `:3:2` | `Slice(None, 3, 2)`
3497// `1:3:2` | `Slice(1, 3, 2)`
3498// `torch.tensor([1, 2])`) | `torch::tensor({1, 2})`
3499 */
3501 if (obj.is_none_type()) {
3502 return Index(c10::nullopt);
3503 } else if (obj.is_dictionary_type()) {
3504 std::vector<int64_t> temp = obj.toIntVector(true);
3505 if (temp.size()) {
3506 torch::Tensor tensor = torch::from_blob(temp.data(), temp.size(), torch::Dtype::Long);
3507 return Index(tensor.clone());
3508 } else {
3509 return Index(c10::nullopt);
3510 }
3511
3512 } else if (obj.is_scalar()) {
3513 switch (obj.m_var_type_current) {
3514 case ObjType::Bool:
3515 return Index(obj.GetValueAsBoolean());
3516
3517 case ObjType::Int8:
3518 case ObjType::Int16:
3519 case ObjType::Int32:
3520 case ObjType::Int64:
3521 return Index(obj.GetValueAsInteger());
3522 default:
3523 LOG_RUNTIME("Fail convert scalar type '%s' to Index!", newlang::toString(obj.m_var_type_current));
3524 }
3525 } else if (obj.is_tensor_type()) {
3526 return Index(*obj.m_tensor);
3527 } else if (obj.is_ellipsis()) {
3528 return Index(at::indexing::Ellipsis);
3529 } else if (obj.is_range()) {
3530
3531 return Index(toSlice(obj));
3532 }
3533 LOG_RUNTIME("Fail convert object '%s' to Index!", obj.toString().c_str());
3534}
3535
3537 TEST_CONST_();
3538 if (value->is_none_type()) {
3539 clear_();
3540 return;
3541 } else if ((is_none_type() || is_class_type()) && (value->is_class_type() || (value->is_type_name() && isClass(value->m_var_type_fixed)))) {
3542 if (is_class_type() && m_class_name.compare(value->m_class_name) != 0) {
3543 ASSERT(!value->m_class_name.empty());
3544 LOG_RUNTIME("Fail set value class '%s' as class '%s'!", m_class_name.c_str(), value->m_class_name.c_str());
3545 }
3546
3547 std::string old_name = m_var_name;
3548 value->CloneDataTo(*this);
3549 value->ClonePropTo(*this);
3550 m_var_name.swap(old_name);
3551 m_var_is_init = true;
3552 return;
3553
3554 } else if ((is_none_type() || is_tensor_type()) && value->is_tensor_type()) {
3555
3556 if (value->empty()) {
3557 m_var = std::monostate();
3558 m_tensor->reset();
3559 m_var_is_init = false;
3560 return;
3561 }
3562
3563 if (!canCast(value->m_var_type_current, m_var_type_current)) {
3564 testConvertType(value->m_var_type_current);
3565 }
3566
3567 if (is_none_type()) {
3568
3569 // Присаеваем данные пустому значению
3570 ASSERT(std::holds_alternative<std::monostate>(m_var));
3571 ASSERT(!m_tensor->defined());
3572
3573 if (value->is_scalar()) {
3574 m_var = value->m_var;
3575 // if (value->is_integral()) {
3576 // m_var = value->GetValueAsInteger(); // Нужно считывать значение, т.к. может быть ссылка
3577 // } else {
3578 // ASSERT(value->is_floating());
3579 // m_var = value->GetValueAsNumber(); // Нужно считывать значение, т.к. может быть ссылка
3580 // }
3581 } else {
3583 *m_tensor = value->m_tensor->clone();
3584 }
3585 m_var_type_current = value->m_var_type_current;
3586
3587 } else {
3588
3589 // текущая переменная уже сожержит данные
3590
3591 if (is_scalar() && value->is_scalar()) {
3592 // Два скаляра
3593 switch (m_var_type_current) {
3594 case ObjType::Bool:
3595 if (std::holds_alternative<int64_t>(m_var)) {
3596 m_var = value->GetValueAsInteger();
3597 } else if (std::holds_alternative<bool *>(m_var)) {
3598 ASSERT(std::get<bool *>(m_var));
3599 *std::get<bool *>(m_var) = value->GetValueAsInteger();
3600 }
3601 break;
3602 case ObjType::Int8:
3603 case ObjType::Char:
3604 case ObjType::Byte:
3605 if (std::holds_alternative<int64_t>(m_var)) {
3606 m_var = value->GetValueAsInteger();
3607 } else if (std::holds_alternative<int8_t *>(m_var)) {
3608 ASSERT(std::get<int8_t *>(m_var));
3609 *std::get<int8_t *>(m_var) = static_cast<int8_t> (value->GetValueAsInteger());
3610 }
3611 break;
3612 case ObjType::Int16:
3613 case ObjType::Word:
3614 if (std::holds_alternative<int64_t>(m_var)) {
3615 m_var = value->GetValueAsInteger();
3616 } else if (std::holds_alternative<int16_t *>(m_var)) {
3617 ASSERT(std::get<int16_t *>(m_var));
3618 *std::get<int16_t *>(m_var) = static_cast<int16_t> (value->GetValueAsInteger());
3619 }
3620 break;
3621 case ObjType::Int32:
3622 case ObjType::DWord:
3623 if (std::holds_alternative<int64_t>(m_var)) {
3624 m_var = value->GetValueAsInteger();
3625 } else if (std::holds_alternative<int32_t *>(m_var)) {
3626 ASSERT(std::get<int32_t *>(m_var));
3627 *std::get<int32_t *>(m_var) = static_cast<int32_t> (value->GetValueAsInteger());
3628 }
3629 break;
3630 case ObjType::Int64:
3631 case ObjType::DWord64:
3632 if (std::holds_alternative<int64_t>(m_var)) {
3633 m_var = value->GetValueAsInteger();
3634 } else if (std::holds_alternative<int64_t *>(m_var)) {
3635 ASSERT(std::get<int64_t *>(m_var));
3636 *std::get<int64_t *>(m_var) = value->GetValueAsInteger();
3637 }
3638 break;
3639 case ObjType::Float32:
3640 case ObjType::Single:
3641 if (std::holds_alternative<double>(m_var)) {
3642 m_var = value->GetValueAsNumber();
3643 } else if (std::holds_alternative<float *>(m_var)) {
3644 ASSERT(std::get<float *>(m_var));
3645 *std::get<float *>(m_var) = static_cast<float> (value->GetValueAsNumber());
3646 }
3647 break;
3648 case ObjType::Float64:
3649 case ObjType::Double:
3650 if (std::holds_alternative<double>(m_var)) {
3651 m_var = value->GetValueAsNumber();
3652 } else if (std::holds_alternative<double *>(m_var)) {
3653 ASSERT(std::get<double *>(m_var));
3654 *std::get<double *>(m_var) = value->GetValueAsNumber();
3655 }
3656 break;
3657 default:
3658 LOG_RUNTIME("Fail set value type '%s'!", newlang::toString(m_var_type_current));
3659 }
3660
3661 } else if (is_scalar() && !value->is_scalar()) {
3662
3663 m_var = std::monostate();
3665 ASSERT(!m_tensor->defined());
3666 *m_tensor = value->m_tensor->clone();
3667
3668 } else if (!is_scalar() && value->is_scalar()) {
3669
3670 // Установить одно значение для всех элементов тензора
3671 if (value->is_integral()) {
3672 m_tensor->set_(torch::scalar_tensor(value->GetValueAsInteger(), m_tensor->scalar_type()));
3673 } else {
3675 m_tensor->set_(torch::scalar_tensor(value->GetValueAsNumber(), m_tensor->scalar_type()));
3676 }
3677
3678 } else {
3679 // Продублировать значения тензора если они одинакового размера
3681 if (m_tensor->sizes().equals(value->m_tensor->sizes())) {
3682 *m_tensor = value->m_tensor->toType(m_tensor->scalar_type()).clone();
3683 } else {
3684 LOG_RUNTIME("Different sizes of tensors!");
3685 }
3686 }
3687 }
3688 m_var_is_init = true;
3689 return;
3690
3691 } else if ((is_none_type() || is_string_type()) && value->is_string_type()) {
3692
3693 switch (m_var_type_current) {
3694 case ObjType::None: // @todo Какой тип сроки по умолчанию? Пока байтовые
3695 case ObjType::StrChar:
3696 case ObjType::FmtChar:
3697 SetValue_(value->GetValueAsString());
3698 return;
3699 case ObjType::StrWide:
3700 case ObjType::FmtWide:
3701 SetValue_(value->GetValueAsStringWide());
3702 return;
3703 }
3704
3705 } else if ((is_none_type() || is_dictionary_type()) && (value->is_dictionary_type() || value->getType() == ObjType::Iterator)) {
3706
3707 std::string old_name = m_var_name;
3708 clear_();
3709 value->CloneDataTo(*this);
3710 value->ClonePropTo(*this);
3711 m_var_name.swap(old_name);
3712 m_var_is_init = true;
3713 return;
3714
3715 // } else if ((is_none_type() || m_var_type_current == ObjType::Pointer) && value->m_var_type_current == ObjType::Pointer) {
3716 // //@todo Check tree type !!!
3717 //
3718 // std::string old_name = m_var_name;
3719 // value->CloneDataTo(*this);
3720 // value->ClonePropTo(*this);
3721 // m_var_name.swap(old_name);
3722 // m_var_is_init = true;
3723 // return;
3724
3725 } else if (((is_none_type() || m_var_type_current == ObjType::Function) && value->is_function_type()) ||
3726 ((is_none_type() || m_var_type_current == ObjType::Pointer) && value->m_var_type_current == ObjType::Pointer)) {
3727 //@todo Check function type args !!!
3728
3729 std::string old_name = m_var_name;
3730 value->CloneDataTo(*this);
3731 value->ClonePropTo(*this);
3732 m_var_name.swap(old_name);
3733 m_var_is_init = true;
3734 return;
3735
3736 } else if ((is_none_type() && value->getType() == ObjType::Rational) || ((m_var_type_current == ObjType::Rational) && value->is_arithmetic_type())) {
3737
3738 if (is_none_type()) {
3739 m_rational = *value->GetValueAsRational();
3740 m_var_is_init = true;
3741 } else {
3742 m_rational.set_(*value->GetValueAsRational());
3743 }
3744 m_var = std::monostate();
3746 return;
3747
3748 } else if ((is_none_type() || m_var_type_current == ObjType::Function || m_var_type_current == ObjType::EVAL_FUNCTION) && value->is_block()) {
3749 //@todo Check function type args !!!
3750
3751 // std::string old_name = m_var_name;
3752 // TermPtr save_proto = m_func_proto;
3753 // TermPtr save_block = m_block_source;
3754 // ObjType save_type = m_var_type_current;
3755 // value->CloneDataTo(*this);
3756 // value->ClonePropTo(*this);
3757 // m_var_name.swap(old_name);
3758 // *const_cast<TermPtr *> (&m_func_proto) = save_proto;
3759 m_sequence = value->m_sequence;
3760 m_var_is_init = value->m_var_is_init;
3761 // m_var_type_current = save_type;
3762
3763 return;
3764
3765 } else if ((is_none_type() || m_var_type_current == ObjType::Pointer || m_var_type_current == ObjType::NativeFunc) && (value->m_var_type_current == ObjType::Pointer || value->m_var_type_fixed == ObjType::Pointer)) {
3766
3767 m_var_is_init = value->m_var_is_init;
3768 ASSERT(std::holds_alternative<void *>(value->m_var));
3769 m_var = std::get<void *>(value->m_var);
3770 return;
3771
3772 }
3773
3774 LOG_RUNTIME("Set value type '%s' as '%s' not implemented!", newlang::toString(m_var_type_current), value->toString().c_str());
3775}
3776
3777IntAny::IntAny(const ObjPtr obj, ObjType type) : Obj(type, nullptr, nullptr, type, true) {
3778 m_return_obj = obj;
3779}
3780
3781ObjType newlang::typeFromLimit(double value, ObjType type_default) {
3782 if (value >= std::numeric_limits<float>::min() && value < std::numeric_limits<float>::max()) {
3783 return ObjType::Float32;
3784 }
3785 return ObjType::Float64;
3786}
3787
3788ObjType newlang::typeFromLimit(int64_t value, ObjType type_default) {
3789 if (value == 1 || value == 0) {
3790 return ObjType::Bool;
3791 } else if (value < std::numeric_limits<int32_t>::min() || value > std::numeric_limits<int32_t>::max()) {
3792 ASSERT(value > std::numeric_limits<int64_t>::min());
3793 ASSERT(value < std::numeric_limits<int64_t>::max());
3794 return ObjType::Int64;
3795 } else if (value < std::numeric_limits<int16_t>::min() || value > std::numeric_limits<int16_t>::max()) {
3796 return ObjType::Int32;
3797 } else if (value < std::numeric_limits<int8_t>::min() ||
3798 value > std::numeric_limits<int8_t>::max()) { //-127 < ... > 128
3799 return ObjType::Int16;
3800 } else {
3801 return ObjType::Int8;
3802 }
3803 return type_default;
3804}
3805
3806
3807#pragma message "Переделать сравнение"
3808
3809bool Obj::op_class_test(const char *name, Context * ctx) const {
3810
3811 ASSERT(name || *name);
3812
3813 if (!m_class_name.empty() && m_class_name.compare(name) == 0) {
3814 return true;
3815 }
3816 for (auto &elem : m_class_parents) {
3817 if (elem->op_class_test(name, ctx)) {
3818 return true;
3819 }
3820 }
3821
3822 bool has_error = false;
3823 ObjType type = RunTime::BaseTypeFromString(m_ctx ? m_ctx->m_runtime : nullptr, name, &has_error);
3824 if (has_error) {
3825 LOG_DEBUG("Type name %s not found!", name);
3826 return false;
3827 }
3828
3829 ObjType check_type = m_var_type_current;
3831 check_type = m_var_type_fixed;
3832 }
3833
3834 if (isContainsType(type, check_type)) {
3835 return true;
3836 }
3837
3838 std::string class_name = newlang::toString(check_type);
3839 return !class_name.empty() && class_name.compare(name) == 0;
3840}
RunTime * m_runtime
Definition context.h:115
static ObjPtr Call(Context *runner, Obj &obj, TermPtr &term)
Definition context.cpp:860
std::pair< std::string, Type > PairType
Definition dict.h:77
virtual int64_t index(const std::string_view field_name)
Definition dict.h:149
virtual int64_t resize(int64_t new_size, const Type fill, const std::string &name="")
Definition dict.h:161
PairType & push_back(const PairType &p)
Definition dict.h:94
std::enable_if< std::is_integral< I >::value &&!std::is_pointer< I >::value, constPairType & >::type operator[](I index)
Definition dict.h:84
static PairType pair(const Type value, const std::string name="")
Definition dict.h:110
virtual PairType & at(const int64_t index)
Definition dict.h:114
virtual void clear_()
Definition dict.h:157
virtual void erase(const int64_t index)
Definition dict.h:235
IntAny(const ObjPtr value, ObjType type)
Definition object.cpp:3777
const IterPairType & data()
Definition object.h:273
Dict< T >::PairType IterPairType
Definition object.h:183
ObjPtr read_and_next(int64_t count)
static ObjPtr ConstructorSimpleType_(Context *ctx, Obj &args)
Definition object.cpp:2634
bool is_string_char_type() const
Definition object.h:486
ObjPtr op_set_index(ObjPtr index, ObjPtr value)
Definition object.h:1758
ObjPtr op_bit_and_set(Obj &obj, bool strong)
Definition object.cpp:1837
bool is_function_type() const
Definition object.h:525
virtual ObjPtr IteratorReset()
Definition object.cpp:3289
const ObjPtr index_get(const std::vector< Index > &index) const
Definition object.cpp:411
Obj::iterator find(const std::string name)
Definition object.h:749
bool is_simple_type() const
Definition object.h:517
static ObjPtr ConstructorStub_(Context *ctx, Obj &args)
Definition object.cpp:2854
ObjPtr op_call(ObjPtr args)
Definition object.h:677
virtual ObjPtr IteratorMake(const char *filter=nullptr, bool check_create=true)
Definition object.cpp:3191
ObjPtr operator%=(ObjPtr obj)
Definition object.h:1246
Obj::iterator end()
Definition object.h:763
virtual int64_t size() const
Definition object.h:624
int op_compare(Obj &value)
Definition object.cpp:1716
void ClonePropTo(Obj &clone) const
Definition object.cpp:1028
virtual bool empty() const
Definition object.cpp:193
static ObjPtr CreateString(const std::string_view str, Sync *sync=nullptr)
Definition object.h:1596
std::variant< std::monostate, int64_t, double, void *, bool *, int8_t *, int16_t *, int32_t *, int64_t *, float *, double *, NativeData, std::string, TermPtr, Iterator< Obj > > m_var
Definition object.h:1897
static ObjPtr ConstructorDictionary_(Context *ctx, Obj &args)
Definition object.cpp:2828
ObjPtr shared()
Definition object.h:389
ObjPtr toType(ObjType type) const
Definition object.h:1725
size_t ItemValueCount(ObjPtr &find, bool strong)
Definition object.cpp:968
Obj(ObjType type=ObjType::None, const char *var_name=nullptr, TermPtr func_proto=nullptr, ObjType fixed=ObjType::None, bool init=false, Sync *sync=nullptr)
Definition object.cpp:178
std::vector< ObjPtr > m_class_parents
Родительские классы (типы)
Definition object.h:1872
Context * m_ctx
Definition object.h:1876
virtual ObjPtr IteratorData()
Definition object.cpp:3240
ObjType m_var_type_current
Текущий тип значения объекта
Definition object.h:1864
Rational m_rational
Содержит дробь из длинных чисел
Definition object.h:1908
bool m_is_const
Definition object.h:1925
bool m_check_args
Definition object.h:1917
bool is_none_type() const
Definition object.h:466
ObjPtr op_div_ceil_(ObjPtr obj)
Definition object.h:1239
std::shared_ptr< Rational > GetValueAsRational() const
Definition object.h:1359
bool is_ellipsis() const
Definition object.h:565
bool m_var_is_init
Содержит ли объект корректное значение ???
Definition object.h:1866
Sync * m_sync
Definition object.h:1915
bool is_string_type() const
Definition object.h:481
double GetValueAsNumber() const
Definition object.cpp:1558
std::string GetValueAsString() const
Definition object.cpp:1607
void erase(const size_t from, const size_t to) override
Definition object.cpp:206
ObjPtr operator++()
Definition object.cpp:625
bool is_floating() const
Definition object.h:555
std::enable_if< std::is_same< T, bool >::value, void >::type SetValue_(bool value)
Definition object.h:1766
ObjPtr index_set_(const std::vector< Index > &index, const ObjPtr value)
Definition object.cpp:450
std::string m_var_name
Имя переменной, в которой хранится объект
Definition object.h:1870
ObjPtr m_dimensions
Размерности для ObjType::Type.
Definition object.h:1878
static ObjPtr CreateBaseType(ObjType type)
Definition object.cpp:2554
ObjType getType()
Definition object.h:423
bool GetValueAsBoolean() const
Definition object.cpp:1394
std::vector< int64_t > toIntVector(bool raise=true) const
Definition object.h:1498
std::shared_ptr< Iterator< Obj > > m_iterator
Итератор для данных
Definition object.h:1909
bool is_tensor_type() const
Definition object.h:535
ObjPtr operator/=(ObjPtr obj)
Definition object.h:1232
bool is_arithmetic_type() const
Definition object.h:476
ObjPtr Clone(const char *new_name=nullptr) const
Definition object.h:1689
void testResultIntegralType(ObjType type, bool upscalint)
Definition object.cpp:3399
ObjPtr operator-()
Definition object.cpp:590
bool is_class_type() const
Definition object.h:512
void clear_() override
Definition object.h:843
std::string toString(bool deep=true) const
Definition object.cpp:1049
static ObjPtr CreateDict(Sync *sync=nullptr)
Definition object.h:1625
static ObjPtr ConstructorClass_(Context *ctx, Obj &args)
Definition object.cpp:2869
bool is_rational() const
Definition object.h:575
void testConvertType(ObjType type)
Definition object.h:1833
bool is_complex() const
Definition object.h:550
ObjPtr operator+=(ObjPtr obj)
Definition object.h:1033
bool m_is_reference
Definition object.h:1926
ObjPtr operator--()
Definition object.cpp:606
std::string m_class_name
Имя класса объекта (у базовых типов отсуствует)
Definition object.h:1871
bool is_integer() const
Definition object.h:540
static ObjPtr ConstructorStruct_(Context *ctx, Obj &args)
Definition object.cpp:2914
bool exist(ObjPtr &find, bool strong)
Definition object.cpp:535
ObjPtr operator-=(ObjPtr obj)
Definition object.h:1050
std::wstring GetValueAsStringWide() const
Definition object.h:1355
const TermPtr m_prototype
Описание прототипа функции (или данных)
Definition object.h:1874
ObjWeak m_reference
Definition object.h:1914
void toType_(ObjType type)
Definition object.cpp:2260
static ObjPtr ConstructorEnum_(Context *ctx, Obj &args)
Definition object.cpp:2938
static ObjPtr CreateClass(std::string name)
Definition object.h:1657
bool is_integral() const
Definition object.h:545
bool op_equal(ObjPtr value)
Definition object.h:1132
static std::enable_if< std::is_same< T, std::string >::value||std::is_same< T, constchar * >::value, ObjPtr >::type CreateValue(T value, Sync *sync=nullptr)
Definition object.h:1562
ObjPtr m_return_obj
Definition object.h:1912
static ObjPtr ConstructorNative_(Context *ctx, Obj &args)
Definition object.cpp:2842
const std::string & name(const int64_t index) const override
Definition object.h:819
int64_t resize_(int64_t size, ObjPtr fill, const std::string="")
Definition object.cpp:263
std::shared_ptr< torch::Tensor > m_tensor
Содержит только размерные тензоры (скляры хранятся в поле m_pointer и не создают m_tensor->defined())
Definition object.h:1907
bool is_dictionary_type() const
Definition object.h:496
ObjPtr operator*=(ObjPtr obj)
Definition object.h:1223
std::wstring m_string
Содержит строку широких символов
Definition object.h:1906
PairType m_str_pair
Definition object.h:1880
bool op_accurate(ObjPtr obj)
Definition object.h:1142
ObjPtr m_iter_range_value
Definition object.h:1910
std::string m_value
Содержит байтовую строку или байтовый массив с данными для представления в нативном виде (Struct,...
Definition object.h:1905
bool is_scalar() const
Definition object.cpp:2142
bool is_init() const
Definition object.h:461
bool op_duck_test(ObjPtr obj, bool strong)
Definition object.h:1125
static bool op_duck_test_prop(Obj *base, Obj *value, bool strong)
Definition object.cpp:1919
int64_t GetValueAsInteger() const
Definition object.cpp:1454
TermPtr m_sequence
Последовательно распарсенных команд для выполнения
Definition object.h:1911
bool is_range() const
Definition object.h:570
Dict< Obj >::PairType & at(const std::string name) const
Definition object.h:632
PairType & push_back(const PairType &p)
Definition object.h:784
ObjType m_var_type_fixed
Максимальный размер для арифметических типов, который задается разработчиком
Definition object.h:1865
static ObjPtr BaseTypeConstructor(Context *ctx, Obj &in)
Definition object.cpp:2577
static ObjPtr CreateNone(Sync *sync=nullptr)
Definition object.h:1510
static ObjPtr CreateType(ObjType type, ObjType fixed=ObjType::None, bool is_init=false, Sync *sync=nullptr)
Definition object.h:1376
void CloneDataTo(Obj &clone) const
Definition object.cpp:980
bool is_string_wide_type() const
Definition object.h:491
void dump_dict_(std::string &str, bool deep=true) const
Definition object.h:1314
bool is_indexing() const
Definition object.h:560
bool is_bool_type() const
Definition object.h:471
bool op_class_test(ObjPtr obj, Context *ctx) const
Definition object.cpp:1874
ObjPtr op_pow_(ObjPtr obj)
Definition object.h:1170
virtual ObjPtr IteratorNext(int64_t count)
Definition object.cpp:3306
std::string GetAsString() const
Definition rational.cpp:229
Rational & op_pow_(const Rational &rational)
Definition rational.cpp:394
Rational & op_div_ceil_(Rational &rational)
Definition rational.cpp:425
bool op_equal(const Rational &rational) const
Definition rational.cpp:400
std::shared_ptr< Rational > clone() const
Definition rational.cpp:223
int64_t GetAsBoolean() const
Definition rational.cpp:237
int op_compare(const Rational &rational) const
Definition rational.cpp:406
Rational & set_(const int64_t value)
Definition rational.cpp:288
int64_t GetAsInteger() const
Definition rational.cpp:242
double GetAsNumber() const
Definition rational.cpp:259
static ObjType BaseTypeFromString(RunTime *rt, const std::string_view text, bool *has_error=nullptr)
Definition runtime.cpp:1525
ObjType getSummaryTensorType(Obj *obj, ObjType start=ObjType::None)
Definition object.cpp:546
yylloc step()
int result
Definition lexer.l:367
#define LOG_RUNTIME(format,...)
Definition logger.h:26
#define LOG_DEBUG(...)
Definition logger.h:119
#define ASSERT(condition)
Definition logger.h:60
#define LOG_ERROR(...)
Definition logger.h:122
#define LOG_WARNING(...)
Definition logger.h:121
Definition nlc.h:59
bool isFloatingType(ObjType t)
Definition types.h:732
bool isStringChar(ObjType t)
Definition types.h:759
bool isContainsType(ObjType generic, ObjType type)
Definition types.h:1418
std::wstring utf8_decode(const std::string str)
Definition variable.cpp:22
bool isStringWide(ObjType t)
Definition types.h:764
bool isString(ObjType t)
Definition types.h:769
bool isClass(ObjType t)
Definition types.h:789
bool isArithmeticType(ObjType t)
Definition types.h:752
std::string ExtractName(std::string name)
Definition types.h:1223
newlang::ObjPtr clone(newlang::Context *ctx, newlang::Obj &in)
Definition builtin.cpp:39
bool isIntegralType(ObjType t, bool includeBool)
Definition types.h:725
bool isGenericType(ObjType t)
Definition types.h:626
int64_t ConcatData(Obj *dest, Obj &src, ConcatMode mode=ConcatMode::Error)
Definition object.cpp:2061
bool isDictionary(ObjType t)
Definition types.h:784
bool isLocalType(ObjType)
Definition types.h:819
std::shared_ptr< Term > TermPtr
Definition variable.h:33
std::shared_ptr< Obj > ObjPtr
Definition variable.h:28
bool isSystemName(const std::string_view name)
Definition types.h:1118
std::string ExtractModuleName(const std::string_view name)
Definition types.h:1188
bool isTensor(ObjType t)
Definition types.h:742
ObjType
Definition types.h:524
T repeat(T str, const std::size_t n)
Definition types.h:116
bool isSimpleType(ObjType t)
Definition types.h:720
ObjPtr CheckSystemField(const Obj *obj, const std::string name)
Definition object.cpp:3314
ObjType typeFromLimit(int64_t value, ObjType type_default=ObjType::Int64)
Definition object.cpp:3788
std::string NormalizeName(const std::string_view name)
Definition types.h:1152
const char * toString(TermID type)
Definition term.h:126
std::vector< int64_t > TensorShapeFromDict(const Obj *obj)
Definition object.cpp:2132
std::string utf8_encode(const std::wstring wstr)
Definition variable.cpp:10
std::string & trim(std::string &s, const char *t=ws)
Definition types.h:111
ConcatMode
Definition object.h:48
bool canCast(const ObjType from, const ObjType to)
Definition types.h:967
ObjType fromTorchType(at::ScalarType t)
void ConvertStringToTensor(const std::string &from, torch::Tensor &to, ObjType type=ObjType::None)
Definition object.cpp:2151
at::indexing::Slice toSlice(Obj &obj)
Definition object.cpp:31
ObjPtr CreateTensor(torch::Tensor tensor)
Definition object.cpp:52
std::string DimToString(const Dimension dim)
Definition object.cpp:145
void TensorToString_(const torch::Tensor &tensor, c10::IntArrayRef shape, std::vector< Index > &ind, const int64_t pos, std::stringstream &str)
Definition object.cpp:1332
void ConvertTensorToStringTemplate(const torch::Tensor &from, T &to, std::vector< Index > *index)
Definition object.cpp:2171
void ShapeFromDict(const Obj *obj, std::vector< int64_t > &shape)
Definition object.cpp:2120
torch::ScalarType toTorchType(ObjType t)
Definition object.cpp:75
void ConvertTensorToString(const torch::Tensor &from, std::string &to, std::vector< Index > *index=nullptr)
Definition object.cpp:2211
std::string IndexToString(const std::vector< Index > &index)
Definition object.cpp:152
std::ostream & operator<<(std::ostream &out, newlang::Obj &var)
Definition object.cpp:164
void ConvertTensorToDict(const torch::Tensor &from, Obj &to, std::vector< Index > *index=nullptr)
Definition object.cpp:2219
static const ObjPtr zero
Definition object.cpp:3040
std::string TensorToString(const torch::Tensor &tensor)
Definition object.cpp:1376
Index toIndex(Obj &obj)
Definition object.cpp:3500
ObjType getSummaryTensorType(Obj *obj, ObjType start=ObjType::None)
Definition object.cpp:546
ObjType GetTensorType(torch::Tensor &val)
Definition object.cpp:3443
at::indexing::TensorIndex Index
Definition object.h:12
at::IntArrayRef Dimension
Definition object.h:13
#define TEST_INIT_()
Definition object.h:420
#define TEST_CONST_()
Definition object.h:419
char convert(char c)
Definition runtime.cpp:1328
#define NL_CHECK(cond, format,...)
Definition types.h:326