diff --git a/.gitignore b/.gitignore
index 2f836aacf..36e183858 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,2 +1,3 @@
*~
*.pyc
+flycheck_*
\ No newline at end of file
diff --git a/MODULEINFO b/MODULEINFO
index 90295af9c..aa9c8fdbe 100644
--- a/MODULEINFO
+++ b/MODULEINFO
@@ -89,7 +89,8 @@ Group: dmlc-lib/6
Require-tokens: public
Make: dmlc
$(HOST)/bin/dml/include/simics/LICENSE
- $(HOST)/bin/dml/include/simics/dmllib.h
+ $(HOST)/bin/dml/include/simics/dml-lib.h
+ $(HOST)/bin/dml/include/simics/dml-raii-types.h
$(HOST)/bin/dml-old-4.8/1.2/LICENSE
$(HOST)/bin/dml-old-4.8/1.4/LICENSE
$(HOST)/bin/dml/1.2/LICENSE
@@ -146,7 +147,8 @@ Group: dmlc-lib/7
Require-tokens: public
Make: dmlc
$(HOST)/bin/dml/include/simics/LICENSE
- $(HOST)/bin/dml/include/simics/dmllib.h
+ $(HOST)/bin/dml/include/simics/dml-lib.h
+ $(HOST)/bin/dml/include/simics/dml-raii-types.h
$(HOST)/bin/dml/1.2/LICENSE
$(HOST)/bin/dml/1.4/LICENSE
# dml files available in all APIs
diff --git a/Makefile b/Makefile
index a4079e7eb..d1491520c 100644
--- a/Makefile
+++ b/Makefile
@@ -81,7 +81,7 @@ SCRIPTS := $(PYTHONPATH)/port_dml.py
MPL_LICENSE := $(PYTHONPATH)/LICENSE
BSD0_LICENSES := $(addsuffix /LICENSE,$(DMLLIB_DESTDIRS) $(OLD_DMLLIB_DESTDIRS_4_8) $(DMLLIB_DEST)/include/simics)
-HFILES := $(DMLLIB_DEST)/include/simics/dmllib.h
+HFILES := $(DMLLIB_DEST)/include/simics/dml-lib.h $(DMLLIB_DEST)/include/simics/dml-raii-types.h
DMLC_BIN := $(OUT_PYFILES) $(OUT_GEN_PYFILES) $(HFILES)
diff --git a/RELEASENOTES.docu b/RELEASENOTES.docu
index f671560ed..9e9e49fec 100644
--- a/RELEASENOTES.docu
+++ b/RELEASENOTES.docu
@@ -1730,4 +1730,10 @@ extern typedef struct { } my_type_t;
This warning is only enabled by default with Simics API version 7 or
above. With version 6 and below it must be explicitly enabled by passing
--warn=WLOGMIXUP to DMLC.
+ The file dmllib.h, needed
+ to compile DMLC-generated C files, has been renamed to dml-lib.h.
+ In addition, a new header file called dml-raii-types.h has been
+ introduced which is also needed to compile DMLC-generated C files. Both
+ of these are located in host/bin/dml/include/simics.
+
diff --git a/doc/1.4/language.md b/doc/1.4/language.md
index 7c9923baa..8efd961ae 100644
--- a/doc/1.4/language.md
+++ b/doc/1.4/language.md
@@ -1481,9 +1481,11 @@ deserialize for the purposes of checkpointing. This is important for the use of
statement](#after-statements).
All primitive non-pointer data types (integers, floating-point types, booleans,
-etc.) are considered serializable, as is any struct, layout, or array type
-consisting entirely of serializable types. [Template types](#templates-as-types)
-and [hook reference types](#hook-declarations) are also considered serializable.
+etc.) are considered serializable, as is any struct, layout, [vector](#vectors),
+or array type consisting entirely of serializable types. The
+[`string`](#strings) type (not to be confused with `char *`) is also considered
+serializable, as are [template types](#templates-as-types) and [hook reference
+types](#hook-declarations).
Any type not fitting the above criteria is not considered serializable:
in particular, any pointer type is not considered serializable, nor is any
@@ -1491,6 +1493,312 @@ in particular, any pointer type is not considered serializable, nor is any
impossible for the compiler to ensure it's aware of all members of the struct
type.
+### Resource-enriched (RAII) types
+
+_Resource-enriched types_ are types for which any value of such a type has
+_associated resources beyond the simple storage needed for the value_, and that
+these resources are bound to the lifetime of variable.
+The DML compiler automatically manages the integrity of resources bound
+to a value: they are duplicated if the value is copied to another location,
+and are freed once the lifetime of the value expires (such as a variable going
+out of scope, or a pointer allocated via [`new`](#new-expressions) being
+[`delete`](#delete-statements)d.)
+
+Resource-enriched types are DML's application of Resource Acquisition Is
+Initialization (RAII), a concept present in several other languages such as C++
+and Rust.
+
+As the integrity of values of resource-enriched types — and thus, their
+safe usage — rely heavily on the DML compiler, usage of resource-enriched
+types are more restricted compared to other types:
+
+* `sizeof` or `sizeoftype` may not be used to acquire the size of a
+ resource-enriched type. This is because the size of the underlying C type does
+ not reflect the associated resources of the type, and so any primitive memory
+ operation such as `memset` or `memcpy` are unsafe with values of
+ resource-enriched types.
+* Values of resource-enriched types may not be passed as variadic arguments.
+ Variadic functions are never defined within DML, and so cannot manage
+ arguments of resource-enriched type correctly.
+* Resource-enriched types may not be present in the signature of any [exported
+ method](#exported-methods).
+ Methods are exported for external use, but DML methods have an internal
+ calling convention when resource-enriched types are involved, and so it does
+ not make sense to export such methods.
+
+Any `struct` or array type containing a resource-enriched type is itself
+considered resource-enriched.
+
+
+**Note:** For the time being, DML's support for resource-enriched types is
+rather minimal. DML currently only supports two kinds of resource-enriched
+types: strings, and vectors, with no way to define your own. In addition,
+DML does not provide any operation to cheaply move resources from one value
+of resource-enriched type to another. Further extensions to resource-enriched
+types are currently under evaluation and experimentation.
+
+
+#### Strings
+```
+string
+```
+DML strings are simultaneously length-counted and `NUL`-terminated dynamically
+allocated strings. They have support for efficient mutation, making them
+suitable for use as string builders. In particular, using `+=` to add *N*
+characters to the end of a particular string is an O(*N*) (amortized) operation,
+no matter the previous length of the string.
+
+`string` supports the use of string literals as initializers:
+```
+local string s = "Hello world!";
+```
+
+String values are copied by contents:
+```
+local string s1 = "Hello world!";
+local string s2 = s1;
+s2 += " Goodbye world"; // Does not affect s1.
+assert s1 == "Hello world!" && s2 == "Hello world! Goodbye world";
+```
+
+To construct a string from any arbitrary C string (rather than a string
+literal), `mk_string` and `mk_string_f` may be used:
+```
+method construct_guarded_string(const char *s) -> (string) throws {
+ if (strlen(s) > 512)
+ throw;
+ return mk_string(s);
+}
+
+method stringify_int(int i) -> (string) {
+ return mk_string_f("%d", i);
+}
+```
+
+Strings may be concatenated using `+`, and support all comparison operators
+(which are based on alphabetical ordering). In addition, `+` and the comparison
+operators support either operand being a string literal.
+
+The [`log` statement](#log-statements) has built-in support for values of string
+type; the following is allowed:
+```
+local string s = "Hello world";
+log info: "%s", s;
+```
+
+In addition to the operations described above, a string s also
+supports the following operations:
+
+* _Indexing_: s[idx]
+
+ Retrieves the character at index idx within s.
+ idx must be non-negative, and less than the length of the string
+ (which does *not* include the final NUL character). Failure to uphold
+ this will result in a failed assertion at run-time.
+
+ s[idx] is a writable expression as long as _`s`_
+ is writable and not of const-qualified type. However,
+ s[idx] is never _addressable_, i.e. its address
+ cannot be taken using `&`. In order to retrieve a pointer within the buffer
+ of a string, use `.c_str()`.
+
+* s.c_str()
+ Retrieves a `NUL`-terminated C string corresponding to _`s`_.
+
+ The pointer retrieved from `.c_str()` will only remain valid as long as the
+ value referenced by s *remains alive* and *is not modified beyond
+ the mutation of preexisting elements*. In particular, for the pointer to be
+ valid it is necessary (but not sufficient) that the length of the string
+ remains unchanged. Any attempt to use the pointer past the point it is
+ invalidated is undefined behavior, and so `.c_str()` must be called again
+ to retrieve a new pointer. For example:
+ ```
+ local string s = "I am a dtring";
+ local char *s_ptr = s.c_str();
+ assert s_ptr[7] == 'd'; // OK
+ s[7] = 's';
+ assert s_ptr[7] == 's'; // OK
+ s += "ly string";
+ assert s_ptr[7] == 's'; // BAD: undefined behavior
+ s_ptr = s.c_str();
+ assert s_ptr[7] == 's'; // OK
+ s = mk_string_f("%sly string", s_ptr); // OK!
+ assert s_ptr[7] == 's'; // BAD: undefined behavior
+ ```
+
+ The type of the retrieved pointer will be `char *` if _`s`_ is both
+ writable and not of const-qualified type. Otherwise, the type of the
+ pointer will be `const char *`. If the former, then it is allowed to use the
+ pointer to mutate characters at indices less than the length of the string (it
+ is *not* allowed to mutate the final `NUL` character) as long as the pointer
+ remains valid as described above. Any character mutated this way will be
+ reflected in the DML string the pointer was retrieved from.
+
+* s.len
+ Evaluates to the length of the string (which does *not* include the final
+ `NUL` character.)
+
+ s.len is a writable expression as long as _`s`_ is writable
+ and not of const-qualified type. Writing to s.len will cause
+ _`s`_ to be resized to the specified length; truncating the string if the new
+ length is less than the previous one, and padding it with `NUL` characters
+ if the new length is greater than the previous one (these `NUL` characters
+ are at indices less than the length of the list, and so are allowed to be
+ indexed and/or overwritten.)
+* mk_vect_from_string(s)
+ Creates a `vect(char)` [vector](#vectors) from a string; its length is the
+ as `s`, and its elements are the characters of `s` in order (excluding the
+ final `NUL` character).
+
+ Note that this does not consume or otherwise mutate _`s`_; it remains valid
+ and unchanged.
+
+Strings have a maximum length of 2147483647 characters. Exceeding this limit
+is considered undefined behavior, but will with all likelihood result in a
+failed run-time assertion.
+
+#### Vectors
+
+vect(elem-type)
+
+DML vectors are dynamic arrays with support for efficient double-ended queue
+operations — adding or removing an element from either end of a vector
+is an O(1) (amortized) operation. These properties makes DML vectors suitable
+for a large number of applications.
+
+`vect` supports the use of compound initializers:
+```
+local vect(int) s = {1, 2, 3};
+s += {4, 5};
+```
+
+Vector values are copied by contents:
+```
+local vect(int) v1 = {1, 2, 3};
+local vect(int) v2 = s1;
+v2 += {4, 5}; // Does not affect s1.
+assert v1.len == 3 && s2.len == 5;
+```
+
+Vectors may be concatenated using `+` as long as they share the same
+_`elem-type`_. Unlike [`string`s](#strings), vectors do not support any
+comparison operator; element-by-element comparisons must be done by iterating
+through the vector. Note also that `v + {6, 7}` is not valid, as compound
+initializers are not valid expressions; instead, a cast must be used to create a
+vector out of the initializer: `v + cast({6, 7}, vect(int))`.
+
+The [`foreach`](#foreach-statements) supports iteration on vectors:
+```
+local vect(int) v = {1, 2, 3};
+foreach elem in (v) {
+ // The itered element is considered writable (but not addressable)
+ // as long as the vector or element type is not const-qualified
+ elem += 1;
+}
+assert v[0] == 2 && v[1] == 3 && v[2] == 4;
+```
+`foreach` on vectors are restricted to addressable vector expressions —
+i.e. those who may have their address taken. This restriction allows for safe
+and predictable semantics while making as few assumptions as possible; if this
+prevents the use of `foreach` for a particular use-case, then consider binding
+the vector expression to variable. If all else fails, consider using a standard
+`for`-loop instead.
+
+In addition to the operations described above, a vector v also
+supports the following operations:
+
+* _Indexing_: v[idx]
+
+ Retrieves the element at index idx within s.
+ idx must be non-negative, and less than the length of the vector.
+ Failure to uphold this will result in a failed assertion at run-time.
+
+ v[idx] is a writable expression as long as _`s`_
+ is writable and not of const-qualified type. However,
+ v[idx] is never _addressable_, i.e. its address
+ cannot be taken using `&`. In order to retrieve a pointer within the buffer
+ of a vector, use `.c_buf()`.
+
+* v.c_buf()
+ Retrieves a pointer to a continuous array of the vector's elements. Unlike
+ `string`'s `.c_str()`, `.c_buf()` may only be used if _`v`_ is writable and
+ not of const-qualified type. (This is because the underlying buffer of _`v`_
+ may need to be reordered to make the elements continuous in memory.)
+ This also means that the type of the retrieved pointer will always be
+ elem-type *
+
+ The pointer retrieved from `.c_buf()` will only remain valid as long as the
+ value referenced by s *remains alive* and *has not been modified
+ beyond the mutation of preexisting elements*. In particular, for the pointer
+ to be valid it is necessary (but not sufficient) that the length of vector
+ remains unchanged. Any attempt to use the pointer past the point it is
+ invalidated is undefined behavior, and so `.c_buf()` must be called again to
+ retrieve a new pointer.
+
+ Note that although `.c_buf()` may mutate the vector it's used with, a call to
+ `.c_buf()` is guaranteed not to invalidate any pointer retrieved from a
+ previous call to `c_buf()` (not already invalidated).
+ ```
+ local vect(int) v = {1, 2, 7};
+ local int *v_ptr = v.c_buf();
+ assert v_ptr[2] == 7; // OK
+ v.c_buf()[2] = 3;
+ assert v_ptr[2] == 3; // OK
+ v += {4, 5};
+ assert v_ptr[2] == 3; // BAD: undefined behavior
+ v_ptr = v.c_buf();
+ assert v_ptr[2] == 3; // OK
+ v = {1, 2, 3}
+ assert v_ptr[2] == 3; // BAD: undefined behavior
+ ```
+
+ writable and not of const-qualified type. Otherwise, the type of the
+ pointer will be `const char *`. If the former, then it is allowed to use the
+ pointer to mutate characters at indices less than the length of the string (it
+ is *not* allowed to mutate the final `NUL` character) as long as the pointer
+ remains valid as described above.
+
+* v.push_back(item)
+ Adds _`item`_ to _`v`_ as its last element.
+* v.push_front(item)
+ Adds _`item`_ to _`v`_ as its first element.
+* v.pop_back(item)
+ Remove and return the last element of _`v`_. This requires the vector to be
+ non-empty. If that is not upheld, then an assertion will be failed at
+ run-time.
+* v.pop_front(item)
+ Remove and return the first element of _`v`_. This requires the vector to be
+ non-empty. If that is not upheld, then an assertion will be failed at
+ run-time.
+* v.insert(index, item)
+ Inserts _`item`_ at index _`index`_ within _`v`_. This operation is O(n)
+ unless _`index`_ is up to one element away from either end of the vector, in
+ which case it's O(1) amortized.
+* v.remove(index)
+ Remove and return the element at index _`index`_ within _`v`_. This requires
+ the vector to be non-empty. If that is not upheld, then an assertion will be
+ failed at run-time.
+
+ This operation is O(n) unless _`index`_ is up to one element away from either
+ end of the vector, in which case it's O(1) amortized.
+* v.len
+ Evaluates to the length of the vector.
+
+ v.len is a writable expression as long as _`v`_ is writable
+ and not of const-qualified type. Writing to v.len will cause
+ _`v`_ to be resized to the specified length; truncating the vector if the new
+ length is less than the previous one, and padding it with zero-initialized
+ elements if the new length is greater than the previous one.
+* mk_string_from_vect(v)
+ Creates a [`string`](#strings) from a `vect(char)`; its length will be
+ the same as `v`, and its characters will be the same as the elements of `v`.
+ Note that this does not consume or otherwise mutate _`v`_; it remains valid
+ and unchanged.
+
+Vectors have a maximum length of 2147483648 elements. Exceeding this limit is
+considered undefined behavior, but will with all likelihood result in a failed
+run-time assertion.
+
## Methods
@@ -3040,6 +3348,7 @@ DML adds the following statements:
target1 [= target2 = ...] = initializer;
(target1, target2, ...) = initializer;
+target assignop initializer;
Assign values to targets according to an initializer. Unlike C, assignments are
@@ -3071,6 +3380,25 @@ contents of variables through the following:
(a, b) = (b, a)
```
+The third form constitues what is known in C as "compound assignment"; mutation
+of the target according to a specified operator, i.e.:
+```
+a += 1;
+a -= 1;
+a |= 1;
+...
+```
+Though the right-hand side permits arbitrary initializer syntax, the types that
+compound assignment support typically restrict the initializer to be a simple
+expression. The one exception to this are [`string`](#strings) and [vector
+types](#vectors), which support the use of `+=` together with a string or
+compound initializer, respectively:
+```
+local (string s, vect(int) v) = ("I am", {1, 2});
+s += " a string";
+v += {3, 4};
+```
+
### Local Statements
local type identifier [= initializer];
@@ -3150,13 +3478,25 @@ method n() -> (bool, int) {
delete expr;
+delete\<spec\> expr;
-Deallocates the memory pointed to by the result of evaluating
-*`expr`*. The memory must have been allocated with the
-`new` operator, and must not have been deallocated previously.
-Equivalent to `delete` in C++; however, in DML, `delete`
-can only be used as a statement, not as an expression.
+Deallocates the memory pointed to by the result of evaluating *`expr`*.
+
+*spec* specifies an *allocation format*, and can either be `enriched` or
+`extern`. If not explicitly specified, *spec* will default to `extern`. This is
+for backwards compatibility reasons — in the future the default will be
+changed to be `enriched`.
+
+The *enriched* format uses an allocation format specific to the device model,
+and so can *only* be used in order to deallocate storage previously allocated
+via [`new`](#new-expressions) by the same device model.
+
+The *extern* format compiles the `delete` statement to a use of `MM_FREE`,
+meaning it may be used to deallocate storage previously allocated by any use of
+Simics's memory allocation functions/macros (such as `MM_MALLOC`.) This includes
+storage allocated via [`new`](#new-expressions) (which `new` without
+allocation format specifier is equivalent to).
### Try Statements
@@ -3587,8 +3927,9 @@ The `foreach` statement repeats its body (the
The *`identifier`* is used to refer to the current element
within the body.
-DML currently only supports `foreach` iteration on values of `sequence` types
-— which are created through [Each-In expressions](#each-in-expressions).
+DML currently only supports `foreach` iteration on values of [`vect`
+types](#vectors) or `sequence` types — the latter of which are created
+through [Each-In expressions](#each-in-expressions).
The `break` statement can be used within a `foreach` loop to exit it.
@@ -3778,10 +4119,12 @@ will give a compile error unless it appears in one of the following contexts:
### Method References as Function Pointers
It is possible to retrieve a function pointer for a method by using the prefix
operator `&` with a reference to that method. The methods this is possible with
-are subject to the same restrictions as with the [`export` object
+are subject to most of the same restrictions as with the [`export` object
statement](#export-declarations): it's not possible to retrieve a function
pointer to any inline method, shared method, method that throws, method with
-more than one return argument, or method declared inside an object array.
+more than one return argument, or method declared inside an object array. Unlike
+`export`, `&` may be used with methods that have input parameters or return
+values of [resource-enriched type](#raii-types).
For example, with the following method in DML:
```
@@ -3814,16 +4157,39 @@ independent method callback(int i, void *aux) {
new type
new type[count]
+
+new\<spec\> type
+
+new\<spec\> type[count]
Allocates a chunk of memory large enough for a value of the specified
-type. If the second form is used, memory for *count* values will
+type. If a form specifying *count* is used, then memory for *count* values will
be allocated. The result is a pointer to the allocated memory. (The
pointer is never null; if allocation should fail, the Simics
application will be terminated.)
+*spec* specifies an *allocation format*, and can either be `enriched` or
+`extern`. If not explicitly specified, *spec* will default to `extern`. This is
+for backwards compatibility reasons — in the future the default will be
+changed to be `enriched`.
+
+The *enriched* format uses an allocation format specific to the device model,
+and *must* be used in order to allocate storage for values of [resource-enriched
+(RAII) type](#raii-types). The fact the allocation format is model-specific
+comes with the drawback that a pointer created with `new` *cannot be
+freed* using `MM_FREE`/`free`: only code from the same device model can free it,
+and only by using [`delete`](#delete-statements).
+
+The *extern* format compiles `new` to a use of `MM_ZALLOC`, meaning a pointer
+allocated this way may be freed using `MM_FREE` outside of the device model.
+However, this format does not support allocating storage for values of
+resource-enriched type.
+
When the memory is no longer needed, it should be deallocated using a
-`delete` statement.
+[`delete` statement](#delete-statements). The allocation format specified for the
+`delete` statement *must* match that of the `new` expression used to allocate
+the pointer.
### Cast Expressions
diff --git a/include/simics/dmllib.h b/include/simics/dml-lib.h
similarity index 84%
rename from include/simics/dmllib.h
rename to include/simics/dml-lib.h
index 96f027454..4266875ff 100644
--- a/include/simics/dmllib.h
+++ b/include/simics/dml-lib.h
@@ -5,8 +5,8 @@
/* DML runtime utilities needed by the C code generated by dmlc */
-#ifndef SIMICS_DMLLIB_H
-#define SIMICS_DMLLIB_H
+#ifndef SIMICS_DML_LIB_H
+#define SIMICS_DML_LIB_H
#include
@@ -24,6 +24,8 @@
#include
#include
+#include
+
// Copy bits from y given by mask into corresponding bits in x and return the
// result
static inline uint64 DML_combine_bits(uint64 x, uint64 y, uint64 mask)
@@ -150,6 +152,11 @@ DML_eq(uint64 a, uint64 b)
return ((a ^ b) | ((b | a) >> 63)) == 0;
}
+#define DML_SAFE_ASSIGN(dest, src) (({ \
+ typeof(dest) __tmp = src; \
+ dest = __tmp; \
+ }))
+
typedef struct {
uint32 id;
@@ -1091,8 +1098,9 @@ _deserialize_simple_event_arguments(
*out_args = NULL;
return Sim_Set_Ok;
} else {
+ // ZALLOC is critical for deserialization of values of RAII type
void *temp_out_args =
- args_size ? MM_MALLOC(args_size, uint8) : NULL;
+ args_size ? MM_ZALLOC(args_size, uint8) : NULL;
set_error_t error = args_deserializer(*arguments_attr,
temp_out_args);
if (error != Sim_Set_Ok) {
@@ -1219,14 +1227,16 @@ _deserialize_simple_event_data(
dimsizes, dimensions, indices_attr, &temp_out.indices);
if (error != Sim_Set_Ok) goto error;
- error = _deserialize_simple_event_arguments(
- args_size, args_deserializer, arguments_attr, &temp_out.args);
- if (error != Sim_Set_Ok) goto error;
-
error = _deserialize_simple_event_domains(
id_info_ht, domains_attr, &temp_out.domains, &temp_out.no_domains);
if (error != Sim_Set_Ok) goto error;
+ // Deserializing arguments last means we need not deal with destroying it
+ // in case of a later error
+ error = _deserialize_simple_event_arguments(
+ args_size, args_deserializer, arguments_attr, &temp_out.args);
+ if (error != Sim_Set_Ok) goto error;
+
_simple_event_data_t *out_ptr = MM_MALLOC(1, _simple_event_data_t);
*out_ptr = temp_out;
*out = out_ptr;
@@ -1265,6 +1275,7 @@ _DML_create_simple_event_data(
typedef struct {
void (*callback)(conf_object_t *dev, const uint32 *indices,
const void *args);
+ void (*args_destructor)(void *args);
_simple_event_data_t data;
} _dml_immediate_after_queue_elem_t;
@@ -1321,11 +1332,13 @@ _DML_post_immediate_after(
conf_object_t *dev,
_dml_immediate_after_state_t *state,
void (*callback)(conf_object_t *, const uint32 *, const void *),
+ void (*args_destructor)(void *),
const uint32 *indices, uint32 dimensions, const void *args,
size_t args_size, const _identity_t *domains, uint32 no_domains) {
ASSERT(!state->deleted);
_dml_immediate_after_queue_elem_t elem = {
.callback = callback,
+ .args_destructor = args_destructor,
.data = _DML_create_simple_event_data(
indices, dimensions, args, args_size, domains, no_domains)
};
@@ -1353,6 +1366,8 @@ _DML_cancel_immediate_afters(_dml_immediate_after_state_t *state,
}
}
if (found) {
+ if (elem.args_destructor)
+ elem.args_destructor(elem.data.args);
_free_simple_event_data(elem.data);
} else {
QADD(new_queue, elem);
@@ -1378,24 +1393,23 @@ _DML_cancel_immediate_afters(_dml_immediate_after_state_t *state,
// Any after-on-hooks that are identical in respect to these three things
// share the same set of after-on-hook info.
typedef struct {
- const char *callback_key;
void (*callback)(conf_object_t *dev, const uint32 *indices,
const void *args, const void *msg);
- _serializer_t args_serializer;
- _deserializer_t args_deserializer;
+ void (*args_destructor)(void *);
// Size of the data args_de/serializer works with
uint32 args_size;
// id of the object parent of the target callback method
uint32 method_parent_id;
+ const char *callback_key;
+ _serializer_t args_serializer;
+ _deserializer_t args_deserializer;
} _dml_after_on_hook_info_t;
// An element of a hook queue. Currently, the only elements of a hook queue
// are calls suspended from an after-on-hook, so that's what this represents.
typedef struct {
- void (*callback)(conf_object_t *dev, const uint32 *indices,
- const void *args, const void *msg);
+ const _dml_after_on_hook_info_t *info;
_simple_event_data_t data;
- const char *callback_key;
} _dml_hook_queue_elem_t;
// A hook queue, which may or may not have been detached.
@@ -1492,6 +1506,8 @@ _DML_resolve_hookref(void *dev, const _dml_hook_aux_info_t *hook_aux_infos,
UNUSED static void
_DML_free_hook_queue(_dml_hook_queue_t *q) {
VFORT(*q, _dml_hook_queue_elem_t, elem) {
+ if (elem.info->args_destructor)
+ elem.info->args_destructor(elem.data.args);
_free_simple_event_data(elem.data);
}
VFREE(*q);
@@ -1504,13 +1520,24 @@ _DML_attach_callback_to_hook(
const _identity_t *domains, uint32 no_domains) {
uint32 args_size = info->args_size;
_dml_hook_queue_elem_t elem = {
- .callback = info->callback,
- .callback_key = info->callback_key,
+ .info = info,
.data = _DML_create_simple_event_data(
- indices, dimensions, args, args_size, domains, no_domains)};
+ indices, dimensions, args, args_size, domains, no_domains)
+ };
VADD(hook->queue, elem);
}
+
+#define _DML_SEND_HOOK(hookref, msg) \
+ (({ _DML_send_hook(&_dev->obj, &_dev->_detached_hook_queue_stack, \
+ hookref, msg); }))
+
+#define _DML_SEND_HOOK_RAII(hookref, msg, destructor) (({ \
+ void *__msg = (void *)(msg); \
+ uint64 __resumed = _DML_SEND_HOOK(hookref, __msg); \
+ destructor(__msg); \
+ __resumed; }))
+
UNUSED static uint64
_DML_send_hook(conf_object_t *dev,
_dml_detached_hook_queue_t **detached_queue_stack,
@@ -1533,7 +1560,7 @@ _DML_send_hook(conf_object_t *dev,
VFORI(detached_queue, i) {
_dml_hook_queue_elem_t elem = VGET(detached_queue, i);
++detached_queue_ref.processed_elems;
- elem.callback(dev, elem.data.indices, elem.data.args, msg);
+ elem.info->callback(dev, elem.data.indices, elem.data.args, msg);
_free_simple_event_data(elem.data);
}
if (detached_queue_ref.next) {
@@ -1582,6 +1609,8 @@ _DML_cancel_afters_in_hook_queue(
}
}
if (found) {
+ if (elem.info->args_destructor)
+ elem.info->args_destructor(elem.data.args);
_free_simple_event_data(elem.data);
} else {
VADD(new_queue, elem);
@@ -1664,22 +1693,22 @@ _DML_deserialize_hook_queue_elem_after(ht_str_table_t *callback_ht,
goto error;
}
- error = _deserialize_simple_event_arguments(
- callback_info->args_size, callback_info->args_deserializer,
- &arguments_attr, &data.args);
+ error = _deserialize_simple_event_domains(
+ id_info_ht, &domains_attr, &data.domains, &data.no_domains);
if (unlikely(error != Sim_Set_Ok)) {
goto error;
}
- error = _deserialize_simple_event_domains(
- id_info_ht, &domains_attr, &data.domains, &data.no_domains);
+ error = _deserialize_simple_event_arguments(
+ callback_info->args_size, callback_info->args_deserializer,
+ &arguments_attr, &data.args);
if (unlikely(error != Sim_Set_Ok)) {
goto error;
}
- e->callback = callback_info->callback;
+
+ e->info = callback_info;
e->data = data;
- e->callback_key = callback_info->callback_key;
return Sim_Set_Ok;
error:
_free_simple_event_data(data);
@@ -1691,12 +1720,9 @@ UNUSED static attr_value_t
_DML_serialize_hook_queue_elem_after(ht_str_table_t *callback_ht,
const _id_info_t *id_infos,
_dml_hook_queue_elem_t elem) {
- _dml_after_on_hook_info_t *callback_info =
- ht_lookup_str(callback_ht, elem.callback_key);
- ASSERT(callback_info);
- _id_info_t id_info = id_infos[callback_info->method_parent_id - 1];
+ _id_info_t id_info = id_infos[elem.info->method_parent_id - 1];
attr_value_t callback_key_attr
- = SIM_make_attr_string(callback_info->callback_key);
+ = SIM_make_attr_string(elem.info->callback_key);
attr_value_t indices_attr = SIM_alloc_attr_list(id_info.dimensions);
attr_value_t *indices_attr_list = SIM_attr_list(indices_attr);
@@ -1704,8 +1730,8 @@ _DML_serialize_hook_queue_elem_after(ht_str_table_t *callback_ht,
indices_attr_list[i] = SIM_make_attr_uint64(elem.data.indices[i]);
}
attr_value_t arguments_attr =
- callback_info->args_serializer
- ? callback_info->args_serializer(elem.data.args)
+ elem.info->args_serializer
+ ? elem.info->args_serializer(elem.data.args)
: SIM_make_attr_list(0);
attr_value_t domains_attr = SIM_alloc_attr_list(elem.data.no_domains);
@@ -2149,7 +2175,8 @@ _DML_get_qname(_identity_t id, const _id_info_t *id_infos,
uint32 indices[info.dimensions];
uint32 index = id.encoded_index;
- for (int32 i = info.dimensions - 1; i >= 0; --i) {
+ for (uint32 i_ = 0; i_ < info.dimensions; ++i_) {
+ uint32 i = info.dimensions - 1 - i_;
indices[i] = index % info.dimsizes[i];
index /= info.dimsizes[i];
}
@@ -2937,6 +2964,356 @@ _callback_after_write(conf_object_t *bank,
Callback_After_Write);
}
+
+typedef void (*_raii_copier_t)(void *tgt, const void *src);
+typedef void (*_raii_destructor_t)(void *data);
+
+typedef struct {
+ const _raii_destructor_t *destructor_ref;
+ char data[] __attribute__((aligned));
+} _loose_allocation_t;
+
+typedef struct {
+ _raii_destructor_t destructor;
+ char data[] __attribute__((aligned));
+} _orphan_allocation_t;
+
+typedef struct {
+ _raii_destructor_t destructor;
+ void *data;
+} _scope_allocation_t;
+
+#define DML_NEW(t, count, destroy_ref) ({ \
+ _loose_allocation_t *__alloc = \
+ mm_zalloc(sizeof(_loose_allocation_t) \
+ + sizeof(t)*(size_t)(count), \
+ sizeof(t), \
+ #t " (DML allocated)", \
+ __FILE__, __LINE__); \
+ __alloc->destructor_ref = destroy_ref; \
+ (typeof(t) *)__alloc->data; })
+
+#define DML_NEW_ORPHAN(t, destroy) (({ \
+ _orphan_allocation_t *__alloc = \
+ mm_zalloc(sizeof(_orphan_allocation_t) \
+ + sizeof(t), \
+ sizeof(t), \
+ "orphan temporary (DML allocated)", \
+ __FILE__, __LINE__); \
+ __alloc->destructor = destroy; \
+ (typeof(t) *)__alloc->data; }))
+
+#define DML_RAII_SCOPE_LVAL(n, destructor, x) do { \
+ ASSERT_MSG(_scope_allocs_lens[n] \
+ < sizeof(_scope_ ## n ## _allocs) \
+ /sizeof(_scope_allocation_t), \
+ "RAII allocation buffer overrun. " \
+ "Report this as a DMLC bug."); \
+ _scope_ ## n ## _allocs[_scope_allocs_lens[n]++] = \
+ (_scope_allocation_t) { destructor, (void *)&x }; \
+ } while (0)
+
+
+#define DML_RAII_SCOPE_ORPHAN(n, destructor, x) (*({ \
+ typeof(x) *__alloc = DML_NEW_ORPHAN(typeof(x), destructor); \
+ memcpy(__alloc, (typeof(x)[1]) { x }, sizeof(*__alloc)); \
+ DML_RAII_SCOPE_LVAL(n, _DML_delete_orphan, *__alloc); \
+ __alloc; }))
+
+#define DML_RAII_SCOPE_ARRAY_ORPHAN(n, destructor, x) (*({ \
+ typeof(x) *__alloc = DML_NEW_ORPHAN(typeof(x), destructor); \
+ memcpy(__alloc, x, sizeof(*__alloc)); \
+ DML_RAII_SCOPE_LVAL(n, _DML_delete_orphan, *__alloc); \
+ __alloc; }))
+
+#define DML_RAII_SESSION_ORPHAN(lval, destructor, x) (*({ \
+ typeof(x) *__alloc = DML_NEW_ORPHAN(typeof(x), destructor); \
+ memcpy(__alloc, (typeof(x)[1]) { x }, sizeof(*__alloc)); \
+ lval = __alloc; \
+ __alloc; }))
+
+#define DML_RAII_SESSION_ARRAY_ORPHAN(lval, destructor, x) (*({ \
+ typeof(x) *__alloc = DML_NEW_ORPHAN(typeof(x), 1, destructor); \
+ memcpy(__alloc, x, sizeof(*__alloc)); \
+ lval = (void *)__alloc; \
+ __alloc; }))
+
+#define DML_STATIC_ARRAY(t, x) (*({ \
+ static typeof(t) __array; \
+ memcpy(__array, x, sizeof(t)); \
+ &__array;}))
+
+#define DML_STATIC_ARRAY_CONSTSAFE(x) (*({ \
+ typeof(x) *__array = MM_MALLOC(1, typeof(x)); \
+ memcpy(*__array, x, sizeof(x)); \
+ __array; }))
+
+#define DML_RAII_SCOPE_CLEANUP(n) do { \
+ _DML_cleanup_raii_scope(_scope_ ## n ## _allocs, \
+ _scope_allocs_lens[n]); \
+ _scope_allocs_lens[n] = 0; \
+ } while (0)
+
+UNUSED static void _DML_cleanup_raii_scope(_scope_allocation_t *allocs,
+ uint16 len) {
+ for (uint16 i = 0; i < len; ++i) {
+ allocs[i].destructor(allocs[i].data);
+ }
+}
+
+
+#define DML_DELETE(alloc) ( \
+ _DML_delete(alloc, \
+ (uintptr_t)_dml_raii_destructors, \
+ (uintptr_t)_dml_raii_destructor_ref_none, \
+ __FILE__, \
+ __LINE__))
+
+UNUSED static void _DML_delete(void *_alloc,
+ uintptr_t _dml_raii_destructors,
+ uintptr_t _dml_raii_destructor_ref_none,
+ const char *filename,
+ int lineno) {
+ if (!_alloc)
+ return;
+ _loose_allocation_t *alloc = (_loose_allocation_t *)(
+ (char *)_alloc - offsetof(_loose_allocation_t, data));
+ if ((uintptr_t)alloc->destructor_ref != _dml_raii_destructor_ref_none) {
+ if (likely((uintptr_t)alloc->destructor_ref >= _dml_raii_destructors
+ && (uintptr_t)alloc->destructor_ref
+ < _dml_raii_destructor_ref_none)) {
+ (*alloc->destructor_ref)(alloc->data);
+ } else {
+ _signal_critical_error("%s:%d: DML 'delete' statement used on "
+ "pointer %p, which has not been allocated "
+ "with 'new'! "
+ "Any pointer external to a particular DML "
+ "device model should be deallocated using "
+ "'MM_FREE'.",
+ filename, lineno, _alloc);
+ return;
+ }
+ }
+ MM_FREE(alloc);
+}
+
+UNUSED static void _DML_delete_orphan(void *_alloc) {
+ if (!_alloc)
+ return;
+ _orphan_allocation_t *alloc = (_orphan_allocation_t *)(
+ (char *)_alloc - offsetof(_orphan_allocation_t, data));
+ if (alloc->destructor) {
+ alloc->destructor(alloc->data);
+ }
+ MM_FREE(alloc);
+}
+
+UNUSED static void _dml_vect_free_raii(
+ size_t elem_size, _raii_destructor_t elem_raii_destructor,
+ _dml_vect_t v) {
+ if (elem_raii_destructor) {
+ for (uint32 i = 0; i < v.len; ++i) {
+ elem_raii_destructor(
+ v.elements + DML_VECT_INDEX(v, i)*elem_size);
+ }
+ }
+ _dml_vect_free(v);
+}
+
+UNUSED static void _dml_vect_resize_raii(
+ size_t elem_size, _raii_destructor_t elem_raii_destructor,
+ _dml_vect_t *v, uint32 new_len) {
+ ASSERT(new_len >= v->len || elem_raii_destructor);
+ for (uint32 i = new_len; i < v->len; ++i) {
+ elem_raii_destructor(
+ v->elements + DML_VECT_INDEX(*v, i)*elem_size);
+ }
+ _dml_vect_resize(elem_size, v, new_len, true);
+}
+
+UNUSED static void _dml_vect_clear_raii(
+ size_t elem_size, _raii_destructor_t elem_raii_destructor,
+ _dml_vect_t *v) {
+ for (uint32 i = 0; i < v->len; ++i) {
+ elem_raii_destructor(
+ v->elements + DML_VECT_INDEX(*v, i)*elem_size);
+ }
+ _dml_vect_clear(elem_size, v);
+}
+
+UNUSED static void _dml_vect_resize_destructive_raii(
+ size_t elem_size, _raii_destructor_t elem_raii_destructor,
+ _dml_vect_t *v, uint32 new_len) {
+ for (uint32 i = 0; i < v->len; ++i) {
+ elem_raii_destructor(
+ v->elements + DML_VECT_INDEX(*v, i)*elem_size);
+ }
+ _dml_vect_resize_destructive(elem_size, v, new_len);
+}
+
+
+// TODO consider moving elements to the start
+UNUSED static void _dml_vect_copy_raii(
+ size_t elem_size, _raii_copier_t elem_raii_copier,
+ _raii_destructor_t elem_raii_destructor,
+ _dml_vect_t *tgt, _dml_vect_t src) {
+ if (unlikely((uintptr_t)tgt->elements == (uintptr_t)src.elements)) {
+ ASSERT(tgt->size == src.size && tgt->len == src.len
+ && tgt->start == src.start);
+ return;
+ }
+ _dml_vect_resize_raii(elem_size, elem_raii_destructor, tgt, src.len);
+ for (uint32 i = 0; i < src.len; ++i) {
+ elem_raii_copier(
+ tgt->elements + DML_VECT_INDEX(*tgt, i)*elem_size,
+ src.elements + DML_VECT_INDEX(src, i)*elem_size);
+ }
+}
+
+UNUSED static _dml_vect_t
+_dml_vect_add(size_t elem_size, _dml_vect_t a, _dml_vect_t b) {
+ _dml_vect_append(elem_size, &a, b);
+ return a;
+}
+
+UNUSED static void _dml_vect_append_raii(
+ size_t elem_size, _raii_copier_t elem_raii_copier, _dml_vect_t *tgt,
+ _dml_vect_t src) {
+ uint32 tgt_start = tgt->len, add_len = src.len;
+ if (!add_len) return;
+ bool alias = tgt->elements == src.elements;
+ _dml_vect_resize(elem_size, tgt, tgt->len + src.len, true);
+ if (unlikely(alias))
+ src = *tgt;
+ for (uint32 i = 0; i < add_len; ++i) {
+ elem_raii_copier(tgt->elements
+ + DML_VECT_INDEX(*tgt, tgt_start + i)*elem_size,
+ src.elements + DML_VECT_INDEX(src, i)*elem_size);
+ }
+}
+
+UNUSED static _dml_vect_t
+_dml_vect_add_raii(size_t elem_size, _raii_copier_t elem_raii_copier,
+ _dml_vect_t a, _dml_vect_t b) {
+ _dml_vect_append_raii(elem_size, elem_raii_copier, &a, b);
+ return a;
+}
+
+UNUSED static void _dml_vect_set_compound_init_raii(
+ size_t elem_size, _raii_destructor_t elem_raii_destructor,
+ _dml_vect_t *tgt, const void *src, uint32 no_elements) {
+ if (!no_elements)
+ _dml_vect_clear_raii(elem_size, elem_raii_destructor, tgt);
+
+ _dml_vect_resize_destructive_raii(elem_size, elem_raii_destructor,
+ tgt, no_elements);
+ memcpy(tgt->elements, src, no_elements*elem_size);
+}
+
+// TODO(RAII): Not currently used.
+#define _DML_RAII_MOVED(x) (({ \
+ typeof(x) *__val = &(x); \
+ typeof(x) __ret = *__val; \
+ memset((void *)__val, 0, sizeof(x)); \
+ __ret; \
+ }))
+
+#define _DML_RAII_DUPE(t, copier, x) (({ \
+ typeof(t) __ret = {0}, __val = x; \
+ copier(&__ret, &__val); \
+ __ret; \
+ }))
+
+#define _DML_RAII_ZERO_OUT(destroy, lval) (({ \
+ void *__pointer = (void *)&(lval); \
+ destroy(__pointer); \
+ memset(__pointer, 0, sizeof(lval)); \
+ }))
+
+#define _DML_RAII_COPY_RVAL(copier, dest, src) (({ \
+ copier((void *)&(dest), (typeof(dest)[1]){(src)}); \
+ }))
+
+#define _DML_RAII_DESTROY_RVAL(destroy, expr) (({ \
+ destroy((void *)(typeof(expr)[1]){(expr)}); \
+ }))
+
+// No aliasing checks; not needed.
+// Note that src must be evaluated before tgt is destroyed
+#define _DML_RAII_LINEAR_MOVE_SIMPLE_RVAL(t, destroy, tgt, src) (({ \
+ typeof(t) __src = src; \
+ typeof(t) *__dest = (typeof(t) *)&(tgt); \
+ destroy(__dest); \
+ *__dest = __src; \
+ }))
+
+// No aliasing checks; not needed.
+// Note that src must be evaluated before tgt is destroyed
+#define _DML_RAII_LINEAR_MOVE_SIMPLE(t, destroy, tgt, src) (({ \
+ typeof(t) *__src = (typeof(t) *)&(src); \
+ typeof(t) *__dest = (typeof(t) *)&(tgt); \
+ destroy(__dest); \
+ *__dest = *__src; \
+ }))
+
+
+#define _DML_RAII_LINEAR_MOVE_MEMCPY(destroy, tgt, src) (({ \
+ const void *__src = &(src); \
+ void *__dest = (void *)&(tgt); \
+ destroy(__dest); \
+ memcpy(__dest, __src, sizeof(tgt)); \
+ }))
+
+#define _DML_RAII_LINEAR_MOVE_MEMCPY_RVAL(destroy, tgt, src) (({ \
+ typeof(tgt) __src = src; \
+ void *__dest = (void *)&(tgt); \
+ destroy((void *)__dest); \
+ memcpy((void *)dest, &__src, sizeof(tgt)); \
+ }))
+
+UNUSED static void
+_DML_vector_nonraii_elems_destructor(void *data) {
+ _dml_vect_free(*(_dml_vect_t *)data);
+}
+
+UNUSED static void
+_DML_string_destructor(void *data) {
+ _dml_string_free(*(_dml_string_t *)data);
+}
+
+UNUSED static void
+_DML_string_copier(void *tgt, const void *src) {
+ _dml_string_copy((_dml_string_t *)tgt, *(const _dml_string_t *)src);
+}
+
+UNUSED static _dml_string_t
+_dml_string_add(_dml_string_t a, _dml_string_t b) {
+ _dml_string_cat(&a, b);
+ return a;
+}
+
+UNUSED static _dml_string_t
+_dml_string_add_cstr_before(const char *a, _dml_string_t b) {
+ _dml_string_addstr_before(a, &b);
+ return b;
+}
+
+UNUSED static _dml_string_t
+_dml_string_add_cstr(_dml_string_t a, const char *b) {
+ _dml_string_addstr(&a, b);
+ return a;
+}
+
+UNUSED static int
+_dml_string_cmp(_dml_string_t a, _dml_string_t b) {
+ return strcmp(_dml_string_str(a), _dml_string_str(b));
+}
+
+UNUSED static int
+_dml_string_cmp_c_str(_dml_string_t a, const char *b) {
+ return strcmp(_dml_string_str(a), b);
+}
+
UNUSED static set_error_t
_set_device_member(attr_value_t val,
char *ptr,
@@ -3175,83 +3552,173 @@ _serialize_array(const void *data, size_t elem_size,
total_elem_count, serialize_elem);
}
-
-static set_error_t
-_deserialize_array_aux(attr_value_t val, uint8 *data, size_t elem_size,
- const uint32 *dimsizes, uint32 dims,
- size_t total_elem_count,
- _deserializer_t deserialize_elem,
- bool elems_are_bytes) {
- uint32 len = *dimsizes;
- ASSERT(len != 0);
-
- // Allow the final dimension to be represented as data if elems_are_bytes
- bool data_allowed = elems_are_bytes && dims == 1;
- if (data_allowed && SIM_attr_is_data(val)) {
- if (unlikely(SIM_attr_data_size(val) != len)) {
- SIM_c_attribute_error(
- "Invalid serialized value of byte array: expected data of %u "
- "bytes, got %u", len, SIM_attr_data_size(val));
- return Sim_Set_Illegal_Value;
- }
- memcpy(data, SIM_attr_data(val), len);
- return Sim_Set_Ok;
- } else if (unlikely(!SIM_attr_is_list(val))) {
- if (data_allowed) {
- SIM_attribute_error("Invalid serialized representation of byte "
- "array: not a list or data");
- } else {
- SIM_attribute_error("Invalid serialized representation of array: "
- "not a list");
+UNUSED static set_error_t
+_deserialize_array(attr_value_t in_val, void *out_data, size_t elem_size,
+ const uint32 *dimsizes, uint32 dims,
+ _deserializer_t deserialize_elem,
+ _raii_destructor_t elem_raii_destructor,
+ bool elems_are_bytes) {
+ uint32 outer_dims_lists = 1;
+ for (uint32 i = 0; i < dims - 1; ++i) {
+ outer_dims_lists *= dimsizes[i];
+ }
+ uint32 total_elem_count = outer_dims_lists*dimsizes[dims-1];
+ attr_value_t *lists = MM_MALLOC(outer_dims_lists, attr_value_t);
+ attr_value_t *lists_buf = MM_MALLOC(outer_dims_lists, attr_value_t);
+ *lists = in_val;
+ size_t no_lists = 1;
+ for (uint32 i = 0; i < dims - 1; ++i) {
+ uint32 frag_size = dimsizes[i];
+ size_t new_no_lists = no_lists * frag_size;
+ for (uint32 j = 0; j < no_lists; ++j) {
+ if (unlikely(!SIM_attr_is_list(lists[j]))) {
+ SIM_attribute_error("Invalid serialized representation of "
+ "array: not a list");
+ goto flatten_failure;
+ } else if (unlikely(SIM_attr_list_size(lists[j]) != frag_size)) {
+ SIM_c_attribute_error(
+ "Invalid serialized representation of array: expected "
+ "list of %u elements, got %u",
+ frag_size, SIM_attr_list_size(lists[j]));
+ goto flatten_failure;
+ }
+ attr_value_t *subfrags = SIM_attr_list(lists[j]);
+ for (int k = 0; k < frag_size; ++k) {
+ lists_buf[j * frag_size + k] = subfrags[k];
+ }
}
- return Sim_Set_Illegal_Type;
- } else if (unlikely(SIM_attr_list_size(val) != len)) {
- SIM_c_attribute_error(
- "Invalid serialized representation of %sarray: expected list of "
- "%u elements, got %u", data_allowed ? "byte " : "", len,
- SIM_attr_list_size(val));
+ attr_value_t *prev_lists = lists;
+ lists = lists_buf;
+ lists_buf = prev_lists;
+ no_lists = new_no_lists;
+ }
+ if (0) {
+ flatten_failure:
+ MM_FREE(lists);
+ MM_FREE(lists_buf);
return Sim_Set_Illegal_Type;
}
- attr_value_t *items = SIM_attr_list(val);
- size_t children_elem_count = total_elem_count/len;
- for (uint32 i = 0; i < len; ++i) {
- set_error_t error;
- if (dims == 1) {
- error = deserialize_elem(items[i], &data[i*elem_size]);
+ MM_FREE(lists_buf);
+ uint8 *tmp_out = elem_raii_destructor
+ ? MM_MALLOC(total_elem_count*elem_size, uint8)
+ : MM_ZALLOC(total_elem_count*elem_size, uint8);
+ set_error_t error = Sim_Set_Ok;
+ uint32 processed = 0;
+ uint32 last_dimsize = dimsizes[dims-1];
+ for (uint32 i = 0; i < no_lists; ++i) {
+ attr_value_t list = lists[i];
+ if (elems_are_bytes && SIM_attr_is_data(list)) {
+ if (unlikely(SIM_attr_data_size(list) != last_dimsize)) {
+ SIM_c_attribute_error(
+ "Invalid serialized value of byte array: expected "
+ "data of %u bytes, got %u", last_dimsize,
+ SIM_attr_data_size(list));
+ error = Sim_Set_Illegal_Value;
+ goto elem_deserialization_failure;
+ }
+ memcpy(tmp_out + last_dimsize*i, SIM_attr_data(list),
+ last_dimsize);
+ processed += last_dimsize;
+ } else if (likely(SIM_attr_is_list(list))) {
+ if (unlikely(SIM_attr_list_size(list) != last_dimsize)) {
+ SIM_c_attribute_error(
+ "Invalid serialized representation of %sarray: "
+ "expected list of %u elements, got %u",
+ elems_are_bytes ? "byte " : "", last_dimsize,
+ SIM_attr_list_size(list));
+ error = Sim_Set_Illegal_Type;
+ goto elem_deserialization_failure;
+ }
+ attr_value_t *items = SIM_attr_list(list);
+ for (uint32 j = 0; j < last_dimsize; ++j) {
+ error = deserialize_elem(
+ items[j], &tmp_out[(last_dimsize*i + j)*elem_size]);
+ if (unlikely(error != Sim_Set_Ok))
+ goto elem_deserialization_failure;
+ ++processed;
+ }
} else {
- error = _deserialize_array_aux(
- items[i], &data[i*children_elem_count*elem_size], elem_size,
- dimsizes + 1, dims - 1, children_elem_count, deserialize_elem,
- elems_are_bytes);
+ SIM_attribute_error(
+ elems_are_bytes
+ ? "Invalid serialized representation of byte array: not a "
+ "list or data"
+ : "Invalid serialized representation of array: not a list");
+ error = Sim_Set_Illegal_Type;
+ goto elem_deserialization_failure;
}
- if (error != Sim_Set_Ok) {
- return error;
+ }
+ elem_deserialization_failure:
+ MM_FREE(lists);
+ if (unlikely(error != Sim_Set_Ok)) {
+ if (elem_raii_destructor) {
+ for (uint32 i = 0; i < processed; ++i)
+ elem_raii_destructor(tmp_out + i*elem_size);
+ }
+ } else {
+ if (elem_raii_destructor) {
+ for (uint32 i = 0; i < total_elem_count; ++i)
+ elem_raii_destructor((char *)out_data + i*elem_size);
}
+ memcpy(out_data, tmp_out, total_elem_count*elem_size);
}
- return Sim_Set_Ok;
+ MM_FREE(tmp_out);
+ return error;
}
-UNUSED static set_error_t
-_deserialize_array(attr_value_t in_val, void *out_data, size_t elem_size,
- const uint32 *dimsizes, uint32 dims,
- _deserializer_t deserialize_elem, bool elems_are_bytes) {
- ASSERT(dims > 0);
- size_t total_elem_count = 1;
- for (uint32 i = 0; i < dims; ++i) {
- total_elem_count *= dimsizes[i];
+UNUSED static void _dml_vect_move_raii(
+ size_t elem_size, _raii_destructor_t elem_raii_destructor,
+ _dml_vect_t *tgt, _dml_vect_t src) {
+ if (unlikely((uintptr_t)tgt->elements == (uintptr_t)src.elements)) {
+ ASSERT(tgt->size == src.size && tgt->len == src.len
+ && tgt->start == src.start);
+ return;
}
+ _dml_vect_free_raii(elem_size, elem_raii_destructor, *tgt);
+ *tgt = src;
+}
- uint8 *temp_out = MM_MALLOC(total_elem_count * elem_size, uint8);
- set_error_t error = _deserialize_array_aux(
- in_val, temp_out, elem_size, dimsizes, dims, total_elem_count,
- deserialize_elem, elems_are_bytes);
- if (error == Sim_Set_Ok) {
- memcpy(out_data, temp_out, total_elem_count*elem_size);
+UNUSED static attr_value_t
+_serialize_vector(_dml_vect_t v, size_t elem_size,
+ _serializer_t serialize_elem) {
+ attr_value_t val = SIM_alloc_attr_list(v.len);
+ attr_value_t *items = SIM_attr_list(val);
+ for (uint32 i = 0; i < v.len; ++i) {
+ items[i] = serialize_elem(v.elements + DML_VECT_INDEX(v, i)*elem_size);
}
- MM_FREE(temp_out);
- return error;
+ return val;
}
+UNUSED static set_error_t
+_deserialize_vector(attr_value_t val, _dml_vect_t *tgt, size_t elem_size,
+ _deserializer_t deserialize_elem,
+ _raii_destructor_t elem_raii_destructor) {
+
+ if (unlikely(!SIM_attr_is_list(val))) {
+ SIM_attribute_error("Invalid serialized representation of vector: "
+ "not a list");
+ return Sim_Set_Illegal_Type;
+ }
+ _dml_vect_t tmp_vect = {0};
+ _dml_vect_resize(elem_size, &tmp_vect, SIM_attr_list_size(val),
+ elem_raii_destructor ? true : false);
+ ASSERT(tmp_vect.start == 0);
+ attr_value_t *items = SIM_attr_list(val);
+ for (uint32 i = 0; i < tmp_vect.len; ++i) {
+ set_error_t error = deserialize_elem(items[i],
+ tmp_vect.elements + i*elem_size);
+ if (error != Sim_Set_Ok) {
+ if (elem_raii_destructor) {
+ for (uint32 j = 0; j < i; ++j) {
+ elem_raii_destructor(tmp_vect.elements + j*elem_size);
+ }
+ }
+ _dml_vect_free(tmp_vect);
+ return error;
+ }
+ }
+ _dml_vect_move_raii(elem_size, elem_raii_destructor, tgt, tmp_vect);
+ return Sim_Set_Ok;
+}
// The internal format for ht is:
// dict(trait_identifier -> dict(statement_idx -> bool))
@@ -3402,5 +3869,4 @@ UNUSED static void _DML_register_attributes(
sb_free(&type);
}
}
-
#endif
diff --git a/include/simics/dml-raii-types.h b/include/simics/dml-raii-types.h
new file mode 100644
index 000000000..9ee869600
--- /dev/null
+++ b/include/simics/dml-raii-types.h
@@ -0,0 +1,1138 @@
+/*
+ © 2010-2023 Intel Corporation
+ SPDX-License-Identifier: 0BSD
+*/
+
+/* DML runtime utilities needed by the C code generated by dmlc */
+
+#ifndef SIMICS_DML_RAII_TYPES_H
+#define SIMICS_DML_RAII_TYPES_H
+
+#include
+#include
+#include
+#include
+#include
+
+// Smallest power of two >= x, except _DML_BITCEIL(0) == 0
+#define _DML_BITCEIL(x) \
+ ((uint32)(x) <= 1 ? (uint32)(x) : (1 << (32 - __builtin_clz((x) - 1))))
+// Largest power of two <= x, except _DML_BITFLOOR(0) == 0
+#define _DML_BITFLOOR(x) \
+ ((uint32)(x) <= 1 ? (uint32)(x) : (1 << (31 - __builtin_clz(x))))
+
+// Variant of strbuf, but where 0-initialization is valid.
+// Invariants:
+// - size > len OR size == len == 0
+// - size > 0 implies s != NULL and s[len] == '\0'
+typedef struct {
+ char *s;
+ uint32 size;
+ uint32 len;
+} _dml_string_t;
+
+/* minimum allocation size */
+#define _DML_STRING_INITIAL_SIZE 32
+
+/* return string as C-string, always 0-terminated.
+ Only guaranteed valid as long as the argument is alive and
+ unmodified.
+ May be written to, but only at indices less than len
+ (the final NUL character may not be written to.) */
+UNUSED static inline char *
+_dml_string_str(_dml_string_t s) {
+ return s.s ? s.s : (char *)"";
+}
+
+#define DML_STRING_INBOUNDS(v, i) \
+ ASSERT_FMT((i) >= 0 && (i) < (v).len, \
+ "OOB index %u for DML string of length %u", \
+ (unsigned) i, (v).len)
+
+#define DML_STRING_CHAR(str, i) (*({ \
+ _dml_string_t __string = (str); \
+ uint32 __index = (i); \
+ DML_STRING_INBOUNDS(__string, __index); \
+ &__string.s[__index]; \
+ }))
+
+UNUSED static inline void
+_dml_string_init(_dml_string_t *s) {
+ s->s = NULL;
+ s->size = 0;
+ s->len = 0;
+}
+
+UNUSED static void
+_dml_string_realloc(_dml_string_t *s, uint32 new_size) {
+ // new_size is a power of two or 0
+ ASSERT((new_size & (new_size - 1)) == 0);
+ if (unlikely(new_size == s->size)) {
+ return;
+ }
+ // TODO(RAII) This path is unused. Remove?
+ if (unlikely(new_size == 0)) {
+ MM_FREE(s->s);
+ s->s = NULL;
+ s->size = s->len = 0;
+ return;
+ }
+ s->s = MM_REALLOC_SZ(s->s, new_size, char);
+ s->size = new_size;
+}
+
+UNUSED static void
+_dml_string_realloc_for_len(_dml_string_t *s, uint32 new_len) {
+ ASSERT(s->len < s->size || (s->len == 0 && s->size == 0));
+ if (s->len == new_len) {
+ return;
+ }
+ uint32 new_size = s->size;
+ if ((s->size > _DML_STRING_INITIAL_SIZE && new_len + 1 < s->size/4)) {
+ new_size = MAX(_DML_STRING_INITIAL_SIZE, _DML_BITCEIL(new_len + 1)*2);
+ } else if (new_len + 1 > s->size) {
+ new_size = MAX(_DML_STRING_INITIAL_SIZE, _DML_BITCEIL(new_len + 1));
+ }
+ // This assert also catches if the buf grows to be larger than
+ // 2^32-1 elements
+ ASSERT(new_size >= new_len);
+ if (new_size != s->size)
+ _dml_string_realloc(s, new_size);
+}
+
+UNUSED static void
+_dml_string_resize(_dml_string_t *s, uint32 new_len) {
+ uint32 prev_len = s->len;
+ uint32 prev_size = s->size;
+ if (prev_len == new_len) return;
+ _dml_string_realloc_for_len(s, new_len);
+ s->len = new_len;
+ if (s->size) {
+ if (prev_size == 0 || prev_len > new_len) {
+ s->s[new_len] = '\0';
+ } else {
+ memset(s->s + prev_len + 1, 0, new_len - prev_len);
+ }
+ }
+}
+
+UNUSED __attribute__ ((const)) static inline bool
+_dml_pointers_overlap(const void *const orig, const void *const possible_deriv,
+ const size_t orig_size) {
+ return (uintptr_t)(orig) + orig_size > (uintptr_t)possible_deriv
+ && (uintptr_t)possible_deriv >= (uintptr_t)orig;
+}
+
+/* set a string to the contents of a C-string */
+UNUSED static void
+_dml_string_set(_dml_string_t *s, const char *str) {
+ if (unlikely(_dml_pointers_overlap(s->s, str, s->size))) {
+ uint32 new_len = s->len - (uint32)((uintptr_t)str - (uintptr_t)s->s);
+ if (new_len == s->len) return;
+ memmove(s->s, str, new_len + 1);
+ _dml_string_realloc_for_len(s, new_len);
+ s->len = new_len;
+ return;
+ }
+ uint32 len = strlen(str);
+ _dml_string_realloc_for_len(s, len);
+ if (s->size)
+ memcpy(s->s, str, len + 1);
+ s->len = len;
+}
+
+/* make a string empty */
+UNUSED static inline void
+_dml_string_clear(_dml_string_t *s)
+{
+ _dml_string_realloc_for_len(s, 0);
+ if (s->size)
+ s->s[0] = '\0';
+ s->len = 0;
+}
+
+/* set a string to the contents of another string */
+UNUSED static void
+_dml_string_copy(_dml_string_t *dst, _dml_string_t src) {
+ if (src.s == dst->s) {
+ ASSERT(dst->len == src.len && dst->size == src.size);
+ return;
+ }
+ if (src.len) {
+ _dml_string_realloc_for_len(dst, src.len);
+ memcpy(dst->s, src.s, src.len + 1);
+ dst->len = src.len;
+ } else {
+ _dml_string_clear(dst);
+ }
+}
+
+/* Free storage associated with a string, making it empty.
+ The string should not be used after calling this function. */
+UNUSED static void
+_dml_string_free(_dml_string_t s)
+{
+ MM_FREE(s.s);
+}
+
+UNUSED static inline void
+_dml_string_move(_dml_string_t *dst, _dml_string_t src) {
+ _dml_string_free(*dst);
+ *dst = src;
+}
+
+/* append s2 to s1 */
+UNUSED static void
+_dml_string_cat(_dml_string_t *s1, _dml_string_t s2)
+{
+ if (s2.len == 0) return;
+ if (unlikely(s1->s == s2.s)) {
+ ASSERT(s1->len == s2.len && s1->size == s2.size);
+ _dml_string_realloc_for_len(s1, 2*s2.len);
+ memcpy(s1->s + s2.len, s1->s, s2.len);
+ s1->len = 2*s2.len;
+ s1->s[s1->len] = '\0';
+ } else {
+ _dml_string_realloc_for_len(s1, s1->len + s2.len);
+ memcpy(s1->s + s1->len, s2.s, s2.len + 1);
+ s1->len += s2.len;
+ }
+}
+
+/* append a C-string to a strbuf */
+UNUSED static void
+_dml_string_addstr(_dml_string_t *s, const char *str)
+{
+ if (unlikely(_dml_pointers_overlap(s->s, str, s->size))) {
+ uint32 offset = (uintptr_t)str - (uintptr_t)s->s;
+ uint32 new_len = 2*s->len - offset;
+ if (new_len == s->len) return;
+ _dml_string_realloc_for_len(s, new_len);
+ memcpy(s->s + s->len, s->s + offset, s->len - offset);
+ s->s[new_len] = '\0';
+ s->len = new_len;
+ return;
+ }
+ uint32 len = strlen(str);
+ if (!len)
+ return;
+ _dml_string_realloc_for_len(s, s->len + len);
+ memcpy(s->s + s->len, str, len + 1);
+ s->len += len;
+}
+
+/* append a C-string to a strbuf */
+UNUSED static void
+_dml_string_addstr_before(const char *str, _dml_string_t *s)
+{
+ if (unlikely(_dml_pointers_overlap(s->s, str, s->size))) {
+ uint32 offset = (uintptr_t)str - (uintptr_t)s->s;
+ uint32 len = s->len - offset;
+ if (!len) return;
+ _dml_string_realloc_for_len(s, s->len + len);
+ memmove(s->s + len, s->s, s->len + 1);
+ memcpy(s->s, s->s + len + offset, len);
+ s->len += len;
+ return;
+ }
+ uint32 len = strlen(str);
+ if (!len) return;
+ _dml_string_realloc_for_len(s, s->len + len);
+ memmove(s->s + len, s->s, s->len + 1);
+ memcpy(s->s, str, len);
+ s->len += len;
+}
+
+/* append a counted string to a strbuf */
+UNUSED static void
+_dml_string_addmem(_dml_string_t *s, const char *str, uint32 len)
+{
+ if (!len)
+ return;
+ if (unlikely(_dml_pointers_overlap(s->s, str, s->size))) {
+ uint32 offset = (uintptr_t)str - (uintptr_t)s->s;
+ // + 1 as including the NUL byte is allowed
+ ASSERT(len <= s->len - offset + 1);
+ _dml_string_realloc_for_len(s, s->len + len);
+ memcpy(s->s + s->len, s->s + offset, len);
+ s->len += len;
+ s->s[s->len] = '\0';
+ return;
+ }
+ _dml_string_realloc_for_len(s, s->len + len);
+ memcpy(s->s + s->len, str, len);
+ s->len += len;
+ s->s[s->len] = '\0';
+}
+
+/* add a character to a string */
+UNUSED static void
+_dml_string_addc(_dml_string_t *s, char c)
+{
+ _dml_string_realloc_for_len(s, s->len + 1);
+ s->s[s->len++] = c;
+ s->s[s->len] = '\0';
+}
+
+/* add a character repeated a given number of times to a string */
+UNUSED static void
+_dml_string_addchars(_dml_string_t *s, char c, unsigned n)
+{
+ _dml_string_realloc_for_len(s, s->len + n);
+ memset(s->s + s->len, c, n);
+ s->len += n;
+ s->s[s->len] = '\0';
+}
+
+/* return a copy of a string */
+UNUSED static inline _dml_string_t
+_dml_string_dupe(_dml_string_t s)
+{
+ _dml_string_t new_s = {0};
+ _dml_string_cat(&new_s, s);
+ return new_s;
+}
+
+/* Delete at most n characters from position start.
+ If start is negative, count from the end. */
+UNUSED static void
+_dml_string_delete(_dml_string_t *s, int start, unsigned n)
+{
+ if (start < 0)
+ start = s->len + start;
+
+ /* Avoid deleting too many characters. */
+ if (start + n > s->len)
+ n = s->len - start;
+
+ if (s->len)
+ memmove(s->s + start, s->s + start + n,
+ s->len - (start + n) + 1);
+ _dml_string_realloc_for_len(s, s->len - n);
+ s->len -= n;
+}
+
+/* Insert n characters from str at index start (start <= s->len) */
+UNUSED static void
+_dml_string_insertmem(_dml_string_t *s, uint32 start, const char *str, uint32 n)
+{
+ ASSERT(start <= s->len);
+ if (!n) return;
+ bool overlap = _dml_pointers_overlap(s->s, str, s->size);
+ if (unlikely(overlap)) {
+ char *duped_str = MM_MALLOC(n, char);
+ memcpy(duped_str, str, n);
+ str = duped_str;
+ }
+ _dml_string_realloc_for_len(s, s->len + n);
+ memmove(s->s + start + n, s->s + start, s->len - start + 1);
+ memcpy(s->s + start, str, n);
+ s->len += n;
+ if (unlikely(overlap)) {
+ MM_FREE((char *)str);
+ }
+}
+
+/* Insert a zero-terminated string at index start (start <= s->len) */
+UNUSED static inline void
+_dml_string_insertstr(_dml_string_t *s, unsigned start, const char *str)
+{
+ _dml_string_insertmem(s, start, str, strlen(str));
+}
+
+/* return a fresh strbuf initialised to a copy of a C string */
+UNUSED static _dml_string_t
+_dml_string_new(const char *init)
+{
+ _dml_string_t s = {0};
+ _dml_string_set(&s, init);
+ return s;
+}
+
+
+/* return a fresh strbuf initialised to a copy of a counted C string */
+UNUSED static _dml_string_t
+_dml_string_new_counted(const char *init, uint32 size)
+{
+ _dml_string_t s = {0};
+ _dml_string_addmem(&s, init, size);
+ return s;
+}
+
+/* return allocated C-string, and clear strbuf */
+UNUSED static char *
+_dml_string_detach(_dml_string_t *s)
+{
+ char *detached = s->s;
+ if (s->size == 0) {
+ /* special case: returned malloced zero-sized string */
+ detached = MM_MALLOC(1, char);
+ *detached = 0;
+ }
+ _dml_string_init(s);
+ return detached;
+}
+
+/* output a string to a file. return number of characters written */
+UNUSED static int
+_dml_string_write(_dml_string_t s, FILE *f)
+{
+ return fwrite(_dml_string_str(s), 1, s.len, f);
+}
+
+/* append formatted text to string */
+UNUSED static void
+_dml_string_vaddfmt(_dml_string_t *s, const char *format, va_list va)
+{
+ va_list va2;
+ va_copy(va2, va);
+ int need = vsnprintf(NULL, 0, format, va2);
+ va_end(va2);
+ ASSERT(need >= 0);
+ // Inefficient, but avoids aliasing issues
+ char *tmp = MM_MALLOC(need + 1, char);
+ int need2 = vsnprintf(tmp, need + 1, format, va);
+ ASSERT(need == need2);
+ _dml_string_realloc_for_len(s, s->len + need);
+ if (s->size)
+ memcpy(s->s + s->len, tmp, need + 1);
+ s->len += need;
+ MM_FREE(tmp);
+}
+
+/* append formatted text to string */
+UNUSED PRINTF_FORMAT(2, 3) static void
+_dml_string_addfmt(_dml_string_t *s, const char *format, ...)
+{
+ va_list va;
+
+ va_start(va, format);
+ _dml_string_vaddfmt(s, format, va);
+ va_end(va);
+}
+
+/* return a fresh DML string initialised to a formatted string */
+UNUSED static _dml_string_t
+_dml_string_vnewf(const char *format, va_list va)
+{
+ va_list va2;
+ va_copy(va2, va);
+ int need = vsnprintf(NULL, 0, format, va2);
+ va_end(va2);
+ ASSERT(need >= 0);
+ _dml_string_t s = {0};
+ if (!need)
+ return s;
+ _dml_string_realloc_for_len(&s, need);
+ int need2 = vsnprintf(s.s, need + 1, format, va);
+ ASSERT(need == need2);
+ s.len = need;
+ return s;
+}
+
+/* return a fresh DML string initialised to a formatted string */
+UNUSED PRINTF_FORMAT(1, 2) static _dml_string_t
+_dml_string_newf(const char *format, ...)
+{
+ va_list va;
+ va_start(va, format);
+ _dml_string_t s = _dml_string_vnewf(format, va);
+ va_end(va);
+ return s;
+}
+
+
+/* assign formatted text to string */
+UNUSED static void
+_dml_string_vfmt(_dml_string_t *s, const char *format, va_list va)
+{
+ _dml_string_move(s, _dml_string_vnewf(format, va));
+}
+
+/* assign formatted text to string */
+UNUSED static void
+PRINTF_FORMAT(2, 3)
+_dml_string_fmt(_dml_string_t *s, const char *format, ...)
+{
+ va_list va;
+ va_start(va, format);
+ _dml_string_vfmt(s, format, va);
+ va_end(va);
+}
+
+/* Add character, escaped if not printable or if delimiter */
+UNUSED static void
+_dml_string_addesc(_dml_string_t *s, char c, char delim)
+{
+ static const char ctrlchars[] = {
+ 0, 0, 0, 0, 0, 0, 0, 'a', 'b', 't', 'n', 'v', 'f', 'r'
+ };
+ unsigned char uc = c;
+ _dml_string_realloc_for_len(s, s->len + 4); /* room for "\377" */
+ /* Don't escape values in 0x80..0xff - it would severely reduce
+ readability of UTF-8 strings */
+ if (uc < 0x20 || uc == 0x7f) {
+ /* control char */
+ s->s[s->len++] = '\\';
+ if (uc < sizeof ctrlchars && ctrlchars[uc])
+ s->s[s->len++] = ctrlchars[uc];
+ else {
+ sprintf(s->s + s->len, "%03o", uc);
+ s->len += 3;
+ }
+ } else {
+ /* printable */
+ if (uc == delim || uc == '\\')
+ s->s[s->len++] = '\\';
+ s->s[s->len++] = uc;
+ }
+ s->s[s->len] = '\0';
+}
+
+/* Read a line from f into s, replacing previous contents if any, and
+ return true. If EOF was encountered before anything could be read,
+ return false. The contents of s are then undefined.
+ The buffer will not contain the terminating newline. The last line of
+ a stream will be read in even if not newline-terminated. */
+UNUSED static bool
+_dml_string_readline(_dml_string_t *s, FILE *f)
+{
+ /* BUG: This routine cannot read lines containing null bytes,
+ because fgets() does not tell us how many bytes it reads.
+
+ We could pad the buffer with nonzero bytes before calling
+ fgets() and detect the last null byte, but that would be
+ slow and complicate the code. Or we could do a simpler
+ solution based on getc_unlocked(), but we would have to
+ use plain getc() on Windows and that is quite slow. */
+
+ unsigned min_room = 128;
+ unsigned got = 0;
+ for (;;) {
+ unsigned room = s->len - got;
+ if (room < min_room) {
+ room = min_room;
+ _dml_string_resize(s, got + room);
+ }
+ if (!fgets(s->s + got, room, f)) {
+ if (got > 0)
+ break;
+ else
+ return false;
+ }
+ got += strlen(s->s + got);
+ if (got > 0 && s->s[got - 1] == '\n') {
+ got--;
+ break;
+ }
+ }
+ _dml_string_resize(s, got);
+ return true;
+}
+
+
+
+// Invariants:
+// - size is a power of two, or 0
+// - elements == NULL if and only if size == 0
+// - len <= size
+// - if size != 0 then start < size else start == 0
+typedef struct {
+ char *elements;
+ uint32 size;
+ uint32 start;
+ uint32 len;
+} _dml_vect_t;
+
+#define DML_VECT_INDEX(v, i) \
+ (((v).start + (i)) & ((v).size - 1))
+
+
+#define DML_VECT_ELEMENTS(t, v) \
+ ((typeof(t) *)(_dml_vect_elements(sizeof(t), &(v)))
+
+// TODO(RAII): consider removal
+#define DML_VECT_ELEMENTS_FALLBACK(t, v, duped) (({ \
+ _dml_vect_t __vect = v; \
+ (typeof(t) *)(__vect.start + __vect.len <= __vect.size \
+ ? __vect.elements + __vect.start*sizeof(t) \
+ : _dml_vect_elements(sizeof(t), &duped)); \
+ }))
+
+#define DML_VECT_INBOUNDS(v, i) \
+ ASSERT_FMT((i) >= 0 && (i) < (v).len, \
+ "OOB index %u for DML vector of length %u", \
+ (unsigned) i, (v).len)
+
+#define DML_VECT_ELEM_UNSAFE(t, v, i) \
+ (((typeof(t) *)(v).elements)[DML_VECT_INDEX((v), (i))])
+
+#define DML_VECT_ELEM(t, v, i) (*({ \
+ _dml_vect_t __vect = v; \
+ uint32 __index = i; \
+ DML_VECT_INBOUNDS(__vect, __index); \
+ &DML_VECT_ELEM_UNSAFE(t, __vect, __index); \
+ }))
+
+#define DML_VECT_REMOVE(t, v, i) (({ \
+ _dml_vect_t *__vect = &(v); \
+ uint32 __index = i; \
+ DML_VECT_INBOUNDS(*__vect, __index); \
+ typeof(t) __popped = DML_VECT_ELEM_UNSAFE(t, *__vect, __index); \
+ _dml_vect_remove(sizeof(t), __vect, __index); \
+ __popped; \
+ }))
+
+#define DML_VECT_NEW_ELEM_AT(t, v, i) (*(typeof(t) *)({ \
+ _dml_vect_t *__vect = &(v); \
+ uint32 __index = i; \
+ ASSERT_FMT(__index >= 0 && __index <= __vect->len, \
+ "OOB insertion index %u for DML vector of length %u", \
+ (unsigned) __index, __vect->len); \
+ _dml_vect_new_at(sizeof(t), __vect, __index); \
+ }))
+
+#define DML_VECT_NEW_ELEM_AT_BACK(t, v) (*({ \
+ _dml_vect_t *__vect = &(v); \
+ _dml_vect_resize(sizeof(t), __vect, __vect->len + 1, false); \
+ &DML_VECT_ELEM_UNSAFE(t, *__vect, __vect->len - 1); \
+ }))
+
+#define DML_VECT_NEW_ELEM_AT_FRONT(t, v) \
+ (DML_VECT_NEW_ELEM_AT(t, v, 0, e))
+
+#define DML_VECT_POP_BACK(t, v) (({ \
+ _dml_vect_t *__vect = &(v); \
+ ASSERT_MSG(__vect->len >= 0, ".pop_back(): DML vector is empty"); \
+ typeof(t) __popped = DML_VECT_ELEM_UNSAFE(t, *__vect, vect->len - 1); \
+ _dml_vect_remove(sizeof(t), __vect, vect->len - 1); \
+ __popped; \
+ }))
+
+#define DML_VECT_POP_FRONT(t, v) (({ \
+ _dml_vect_t *__vect = &(v); \
+ ASSERT_MSG(__vect->len > 0, ".pop_front(): DML vector is empty"); \
+ typeof(t) __popped = DML_VECT_ELEM_UNSAFE(t, *__vect, 0); \
+ _dml_vect_remove(sizeof(t), __vect, 0); \
+ __popped; \
+ }))
+
+#define DML_VECT_INITIAL_ELEMS(elem_size) \
+ MAX(1, _DML_BITFLOOR(32 / (elem_size)))
+
+
+UNUSED static void
+_dml_vect_realloc(size_t elem_size, _dml_vect_t *v, uint32 new_size) {
+ // new_size is a power of two or 0
+ ASSERT((new_size & (new_size - 1)) == 0);
+ // This assert also catches if the buf grows to be larger than
+ // 2^32-1 elements
+ ASSERT(new_size >= v->len);
+
+ if (unlikely(new_size == v->size)) return;
+
+ // TODO(RAII) This path is unused. Remove?
+ if (unlikely(new_size == 0)) {
+ MM_FREE(v->elements);
+ v->elements = NULL;
+ v->size = v->start = v->len = 0;
+ return;
+ }
+ if (new_size > v->size) {
+ ASSERT(new_size >= v->start + v->len);
+ v->elements = MM_REALLOC_SZ(v->elements, new_size*elem_size, char);
+ if (v->size && v->start + v->len > v->size)
+ memcpy(v->elements + v->size*elem_size,
+ v->elements,
+ (v->len - (v->size - v->start))*elem_size);
+ } else {
+ // If moving elements is necessary...
+ if (v->start + v->len > new_size) {
+ if (v->start < new_size) {
+ // No preexisting wraparound. Induce wraparound
+ memcpy(v->elements,
+ v->elements + new_size*elem_size,
+ (v->len - (new_size - v->start))*elem_size);
+ } else {
+ // Preexisting wraparound. Move start segment back.
+ uint32 start_segment_len = v->size - v->start;
+ memcpy(v->elements + (new_size - start_segment_len)*elem_size,
+ v->elements + v->start*elem_size,
+ start_segment_len*elem_size);
+ v->start = new_size - start_segment_len;
+ }
+ }
+ v->elements = MM_REALLOC_SZ(v->elements, new_size*elem_size, char);
+ }
+ v->size = new_size;
+}
+
+// Internal. Realtime O(n), amortized O(1)
+UNUSED static void
+_dml_vect_force(size_t elem_size, _dml_vect_t *v) {
+ if (v->len == 0) {
+ v->start = 0;
+ }
+ if (v->len < v->size/4 && v->size > DML_VECT_INITIAL_ELEMS(elem_size)) {
+ uint32 new_size;
+ if (v->len == 0) {
+ new_size = MAX(DML_VECT_INITIAL_ELEMS(elem_size), 2);
+ } else {
+ new_size = _DML_BITCEIL(v->len);
+ new_size = MAX(DML_VECT_INITIAL_ELEMS(elem_size), new_size*2);
+ }
+ _dml_vect_realloc(elem_size, v, new_size);
+ }
+}
+
+UNUSED static void
+_dml_vect_clear(size_t elem_size, _dml_vect_t *v) {
+ v->len = 0;
+ _dml_vect_force(elem_size, v);
+}
+
+// Realtime O(max(new_len, n)), amortized O(|new_len - n|)
+UNUSED static void
+_dml_vect_resize(size_t elem_size, _dml_vect_t *v, uint32 new_len,
+ bool zero_init_new) {
+ uint32 prev_len = v->len;
+ ASSERT(prev_len <= v->size);
+ if (new_len == prev_len)
+ return;
+ v->len = new_len;
+ if (new_len <= v->size) {
+ _dml_vect_force(elem_size, v);
+ } else {
+ uint32 new_size = MAX(DML_VECT_INITIAL_ELEMS(elem_size),
+ _DML_BITCEIL(new_len));
+ if (new_size != v->size)
+ _dml_vect_realloc(elem_size, v, new_size);
+ }
+ if (zero_init_new && new_len > prev_len) {
+ if (v->start + prev_len < v->size) {
+ uint32 mid_segment_len = MIN(new_len - prev_len,
+ v->size - v->start - prev_len);
+ memset(v->elements + (v->start + prev_len)*elem_size,
+ 0,
+ mid_segment_len*elem_size);
+ if (mid_segment_len < new_len - prev_len) {
+ memset(v->elements,
+ 0,
+ (new_len - prev_len - mid_segment_len)*elem_size);
+ }
+ } else {
+ memset(v->elements + (v->start + prev_len - v->size)*elem_size,
+ 0,
+ (new_len - prev_len)*elem_size);
+ }
+ }
+}
+
+UNUSED static void
+_dml_vect_resize_destructive(
+ size_t elem_size, _dml_vect_t *v, uint32 new_len) {
+ v->start = 0;
+ _dml_vect_resize(elem_size, v, new_len, false);
+}
+
+// O(1) if i is 0 or 1 elements from either end, O(n) otherwise
+UNUSED static void
+_dml_vect_remove(size_t elem_size, _dml_vect_t *v, uint32 i) {
+ ASSERT(i < v->len);
+
+ // Special cases
+ if (i == v->len - 1) {
+ goto done;
+ } else if (i == v->len - 2) {
+ memcpy(v->elements + DML_VECT_INDEX(*v, i)*elem_size,
+ v->elements + DML_VECT_INDEX(*v, v->len - 1)*elem_size,
+ elem_size);
+ goto done;
+ }
+ switch (i) {
+ case 0:
+ v->start = (v->start + 1) & (v->size - 1);
+ goto done;
+ case 1:
+ memcpy(v->elements + DML_VECT_INDEX(*v, 1)*elem_size,
+ v->elements + v->start*elem_size,
+ elem_size);
+ v->start = (v->start + 1) & (v->size - 1);
+ goto done;
+ }
+
+ if (v->start + v->len > v->size) {
+ // If the vector is discontinuous, then whatever segment the index is
+ // in is moved.
+ if (v->start + i < v->size) {
+ memmove(v->elements + (v->start + 1)*elem_size,
+ v->elements + v->start*elem_size,
+ i*elem_size);
+ ++v->start;
+ } else {
+ uint32 index = v->start + i - v->size;
+ memmove(v->elements + index*elem_size,
+ v->elements + (index + 1)*elem_size,
+ (v->len - 1 - i)*elem_size);
+ }
+ } else {
+ // Otherwise, whatever end is closest to the index is moved
+ // TODO(RAII): consider always moving the back end in order to avoid
+ // contributing to future discontinuity
+ if (i < v->len/2) {
+ memmove(v->elements + (v->start + 1)*elem_size,
+ v->elements + v->start*elem_size,
+ i*elem_size);
+ v->start = (v->start + 1) & (v->size - 1);
+ } else {
+ memmove(v->elements + (v->start + i)*elem_size,
+ v->elements + (v->start + i + 1)*elem_size,
+ (v->len - 1 - i)*elem_size);
+ }
+ }
+ done:
+ --v->len;
+ _dml_vect_force(elem_size, v);
+}
+
+// O(1) amortized if i is 0 or 1 elements from either end, O(n) otherwise
+UNUSED static void *
+_dml_vect_new_at(size_t elem_size, _dml_vect_t *v, uint32 i) {
+ ASSERT(i <= v->len);
+ _dml_vect_resize(elem_size, v, v->len + 1, false);
+
+ // Special cases
+ if (i == v->len - 1) {
+ goto done;
+ } else if (i == v->len - 2) {
+ memcpy(v->elements + DML_VECT_INDEX(*v, v->len - 1)*elem_size,
+ v->elements + DML_VECT_INDEX(*v, i)*elem_size,
+ elem_size);
+ goto done;
+ }
+ switch (i) {
+ case 0:
+ v->start = (v->start - 1) & (v->size - 1);
+ goto done;
+ case 1:
+ memcpy(v->elements + v->start,
+ v->elements + DML_VECT_INDEX(*v, 1)*elem_size,
+ elem_size);
+ goto done;
+ }
+
+ if (v->start + v->len > v->size) {
+ // If the vector is discontinuous, then whatever segment the index is
+ // in is moved.
+ if (v->start + i < v->size) {
+ memmove(v->elements + (v->start - 1)*elem_size,
+ v->elements + v->start*elem_size,
+ i*elem_size);
+ --v->start;
+ } else {
+ uint32 index = v->start + i - v->size;
+ memmove(v->elements + (index + 1)*elem_size,
+ v->elements + index*elem_size,
+ (v->len - i - 1)*elem_size);
+ }
+ } else {
+ // Otherwise, whatever end is closest to the index is moved, if
+ // it can be done without incurring wrap-around
+ // TODO(RAII): consider always moving the back end in order to avoid
+ // contributing to future discontinuity
+ if (v->start && i < v->len/2) {
+ memmove(v->elements + (v->start - 1)*elem_size,
+ v->elements + v->start*elem_size,
+ i*elem_size);
+ --v->start;
+ } else {
+ memmove(v->elements + (v->start + i + 1)*elem_size,
+ v->elements + (v->start + i)*elem_size,
+ (v->len - 1 - i)*elem_size);
+ }
+ }
+ done:
+ return v->elements + DML_VECT_INDEX(*v, i)*elem_size;
+}
+
+// Amortized O(1)
+UNUSED static void
+_dml_vect_replace_with_back(size_t elem_size, _dml_vect_t *v, uint32 i) {
+ ASSERT(i < v->len);
+ if (i != v->len - 1) {
+ memcpy(v->elements + DML_VECT_INDEX(*v, i)*elem_size,
+ v->elements + DML_VECT_INDEX(*v, v->len - 1)*elem_size,
+ elem_size);
+ }
+ --v->len;
+ _dml_vect_force(elem_size, v);
+}
+
+// Amortized O(1)
+UNUSED static void
+_dml_vect_replace_with_front(size_t elem_size, _dml_vect_t *v, uint32 i) {
+ ASSERT(i < v->len);
+ if (i != 0) {
+ memcpy(v->elements + DML_VECT_INDEX(*v, i)*elem_size,
+ v->elements + v->start*elem_size,
+ elem_size);
+ }
+ ++v->start;
+ --v->len;
+ _dml_vect_force(elem_size, v);
+}
+
+// Realtime O(n), hopefully fast
+UNUSED static void
+_dml_vect_reinit_size(size_t elem_size, _dml_vect_t *v, size_t size) {
+ if (!v->size && !size)
+ return;
+ v->size = MAX(DML_VECT_INITIAL_ELEMS(elem_size), size);
+ v->elements = MM_REALLOC_SZ(v->elements, v->size*elem_size, char);
+ v->len = v->start = 0;
+}
+
+UNUSED static void *
+_dml_vect_elements(size_t elem_size, _dml_vect_t *v) {
+ // Fast path, O(1) realtime -- the vect is already continuous
+ if (v->start + v->len <= v->size) {
+ return v->elements + v->start*elem_size;
+ }
+
+ // Slow path, O(n) realtime. Reorder the vect into one continuous segment,
+ // offset 1/4 into the buf, thus leaving room for at least n/4 future
+ // elements at either end.
+ // This guarantees O(1) amortized behavior -- the slow path can only be
+ // encountered again if Omega(n) elements have been removed or added since.
+
+ // Grow the buffer if we're starting to run out of elements
+ if (v->len*2 > v->size) {
+ _dml_vect_realloc(elem_size, v, v->size*2);
+ }
+ uint32 offset = v->size/4;
+ if (v->start + v->len <= v->size) {
+ // The vect is continuous as a result of realloc.
+ // Move it to 1/4 through the buffer if not there already.
+ if (v->start != offset) {
+ // In this path overlap between the current vect segment and
+ // the location of the desired vect segment is guaranteed.
+ memmove(v->elements + offset*elem_size,
+ v->elements + v->start*elem_size,
+ v->len*elem_size);
+ }
+ } else {
+ // The vect is discontinuous. Only possible if realloc didn't happen,
+ // meaning v->len*2 <= v->size. This also implies v->start > v->len
+ uint32 start_segment_len = v->size - v->start;
+ uint32 end_segment_len = v->len - start_segment_len;
+
+ if (offset < end_segment_len) {
+ // Current end segment overlaps with desired vect segment.
+ // This implies (proof omitted): offset + v->len <= v->start
+ // SO: may memmove the end segment, then copy the start segment
+ memmove(v->elements + (offset + start_segment_len)*elem_size,
+ v->elements,
+ end_segment_len*elem_size);
+ memcpy(v->elements + offset*elem_size,
+ v->elements + v->start*elem_size,
+ start_segment_len*elem_size);
+ } else {
+ // Either no overlap, or current start segment overlaps with
+ // desired vect segment.
+ // We have that offset >= end_segment_len
+ // SO: may memmove the start segment, then copy the end segment
+ memmove(v->elements + offset*elem_size,
+ v->elements + v->start*elem_size,
+ start_segment_len);
+ memcpy(v->elements + (offset + start_segment_len)*elem_size,
+ v->elements,
+ end_segment_len*elem_size);
+ }
+ }
+ v->start = offset;
+ return v->elements + offset*elem_size;
+}
+
+
+// O(n)
+UNUSED static void
+_dml_vect_copy_to_index(size_t elem_size, _dml_vect_t *tgt, _dml_vect_t src,
+ uint32 i) {
+ // Aliasing must be dealt with before this call!
+ ASSERT(src.elements != tgt->elements);
+ ASSERT(i + src.len <= tgt->len);
+ if (src.len == 0) return;
+
+ uint32 tgt_start = DML_VECT_INDEX(*tgt, i);
+ uint32 src_start_len = MIN(src.len, src.size - src.start);
+ uint32 tgt_start_len = MIN(src.len, tgt->size - tgt_start);
+
+ memcpy(tgt->elements + tgt_start*elem_size,
+ src.elements + src.start*elem_size,
+ MIN(src_start_len, tgt_start_len)*elem_size);
+ if (src_start_len < tgt_start_len) {
+ memcpy(tgt->elements + (tgt_start + src_start_len)*elem_size,
+ src.elements,
+ (tgt_start_len - src_start_len)*elem_size);
+ } else if (tgt_start_len < src_start_len) {
+ memcpy(tgt->elements,
+ src.elements + (src.start + tgt_start_len)*elem_size,
+ (src_start_len - tgt_start_len)*elem_size);
+ }
+
+ uint32 remainder_offset = MAX(src_start_len, tgt_start_len);
+ if (remainder_offset != src.len) {
+ memcpy(tgt->elements
+ + DML_VECT_INDEX(*tgt, i + remainder_offset)*elem_size,
+ src.elements + DML_VECT_INDEX(src, remainder_offset)*elem_size,
+ src.len - remainder_offset);
+ }
+}
+
+UNUSED static void _dml_vect_free(_dml_vect_t v) {
+ MM_FREE(v.elements);
+}
+
+// O(n)
+UNUSED static void
+_dml_vect_copy(size_t elem_size, _dml_vect_t *tgt, _dml_vect_t src) {
+ if (unlikely((uintptr_t)tgt->elements == (uintptr_t)src.elements)) {
+ ASSERT(tgt->size == src.size && tgt->len == src.len
+ && tgt->start == src.start);
+ return;
+ }
+ _dml_vect_reinit_size(elem_size, tgt, src.size);
+ tgt->len = src.len;
+ _dml_vect_copy_to_index(elem_size, tgt, src, 0);
+}
+
+// O(n)
+UNUSED static _dml_vect_t
+_dml_vect_dupe(size_t elem_size, _dml_vect_t src) {
+ _dml_vect_t v = {0};
+ _dml_vect_copy(elem_size, &v, src);
+ return v;
+}
+
+// O(n)
+UNUSED static void
+_dml_vect_append(size_t elem_size, _dml_vect_t *tgt, _dml_vect_t src) {
+ if (src.len == 0) return;
+ uint32 appendat_index = tgt->len;
+ if (unlikely((uintptr_t)tgt->elements == (uintptr_t)src.elements)) {
+ ASSERT(tgt->size == src.size && tgt->len == src.len
+ && tgt->start == src.start);
+ _dml_vect_resize(elem_size, tgt, tgt->len*2, false);
+ // TODO(RAII) Rewrite. I believe the logic to be correct, but it is
+ // horribly headache inducing
+ if (appendat_index*2 <= tgt->size - tgt->start) {
+ memcpy(tgt->elements + (tgt->start + appendat_index)*elem_size,
+ tgt->elements + tgt->start*elem_size,
+ appendat_index*elem_size);
+ } else if (appendat_index <= tgt->size - tgt->start) {
+ uint32 tgt_start_len = tgt->size - tgt->start - appendat_index;
+ if (tgt_start_len) {
+ memcpy(tgt->elements + (tgt->start + appendat_index)*elem_size,
+ tgt->elements + tgt->start*elem_size,
+ tgt_start_len*elem_size);
+ }
+ memcpy(tgt->elements,
+ tgt->elements + (tgt->start + tgt_start_len)*elem_size,
+ (appendat_index - tgt_start_len)*elem_size);
+ } else {
+ uint32 tgt_start_index = DML_VECT_INDEX(*tgt, appendat_index);
+ uint32 src_start_len = tgt->size - tgt->start;
+ memcpy(tgt->elements + tgt_start_index*elem_size,
+ tgt->elements + tgt->start*elem_size,
+ src_start_len*elem_size);
+ memcpy(tgt->elements + (tgt_start_index + src_start_len)*elem_size,
+ tgt->elements,
+ (appendat_index - src_start_len)*elem_size);
+ }
+ } else {
+ _dml_vect_resize(elem_size, tgt, tgt->len + src.len, false);
+ _dml_vect_copy_to_index(elem_size, tgt, src, appendat_index);
+ }
+}
+
+UNUSED static _dml_vect_t
+_dml_vect_from_array(size_t elem_size, const void *src, uint32 no_elements) {
+ _dml_vect_t v = {0};
+ _dml_vect_resize(elem_size, &v, no_elements, false);
+ if (v.size)
+ memcpy(v.elements, src, no_elements*elem_size);
+ return v;
+}
+
+UNUSED static void
+_dml_vect_set_array(size_t elem_size, _dml_vect_t *tgt, const void *src,
+ uint32 no_elements) {
+ if (!no_elements) {
+ _dml_vect_clear(elem_size, tgt);
+ return;
+ }
+
+ bool overlap = _dml_pointers_overlap(tgt->elements, src,
+ tgt->size*elem_size);
+
+ if (unlikely(overlap)) {
+ uint8 *duped_src = MM_MALLOC(no_elements*elem_size, uint8);
+ memcpy(duped_src, src, no_elements*elem_size);
+ src = duped_src;
+ }
+
+ _dml_vect_resize_destructive(elem_size, tgt, no_elements);
+ memcpy(tgt->elements, src, no_elements*elem_size);
+
+ if (unlikely(overlap))
+ MM_FREE((uint8 *)src);
+}
+
+UNUSED static void
+_dml_vect_append_array(size_t elem_size, _dml_vect_t *tgt, const void *src,
+ uint32 no_elements) {
+ if (!no_elements)
+ return;
+ bool overlap = _dml_pointers_overlap(tgt->elements, src,
+ tgt->size*elem_size);
+ if (unlikely(overlap)) {
+ uint8 *duped_src = MM_MALLOC(no_elements*elem_size, uint8);
+ memcpy(duped_src, src, no_elements*elem_size);
+ src = duped_src;
+ }
+ uint32 prev_len = tgt->len;
+ _dml_vect_resize(elem_size, tgt, tgt->len + no_elements, false);
+ uint32 copyat_start = DML_VECT_INDEX(*tgt, prev_len);
+ uint32 tgt_start_len = tgt->size - copyat_start;
+
+ memcpy(tgt->elements + copyat_start*elem_size, src,
+ MIN(no_elements, tgt_start_len)*elem_size);
+ if (tgt_start_len < no_elements) {
+ memcpy(tgt->elements, (uint8 *)src + tgt_start_len*elem_size,
+ (no_elements - tgt_start_len)*elem_size);
+ }
+
+ if (unlikely(overlap))
+ MM_FREE((void *)src);
+}
+
+UNUSED static _dml_vect_t
+_dml_vect_from_string(_dml_string_t s) {
+ if (!s.size) return (_dml_vect_t) {0};
+ uint32 new_size = _DML_BITCEIL(s.size);
+ if (s.size != new_size)
+ s.s = MM_REALLOC_SZ(s.s, new_size, char);
+ return (_dml_vect_t) {.elements = s.s, .size = new_size, .len = s.len};
+}
+
+UNUSED static _dml_string_t
+_dml_string_from_vect(_dml_vect_t v) {
+ if (!v.size) return (_dml_string_t) {0};
+ if (v.start) {
+ (void)_dml_vect_elements(sizeof(char), &v);
+ memmove(v.elements, v.elements + v.start, v.len);
+ }
+ return (_dml_string_t) { .s = v.elements, .size = v.size, .len = v.len };
+}
+
+#endif
diff --git a/lib/1.2/dml-builtins.dml b/lib/1.2/dml-builtins.dml
index c7766e97b..35d5aa670 100644
--- a/lib/1.2/dml-builtins.dml
+++ b/lib/1.2/dml-builtins.dml
@@ -11,7 +11,7 @@ loggroup Register_Read;
loggroup Register_Write;
header %{
- #include
+ #include
%}
import "simics-api.dml";
diff --git a/lib/1.2/simics-api.dml b/lib/1.2/simics-api.dml
index bc80d5baa..d8a8080c3 100644
--- a/lib/1.2/simics-api.dml
+++ b/lib/1.2/simics-api.dml
@@ -48,15 +48,15 @@ extern uint16 CONVERT_BE16(uint16 val);
extern uint32 CONVERT_BE32(uint32 val);
extern uint64 CONVERT_BE64(uint64 val);
-extern uint8 LOAD_LE8 (void *src);
-extern uint16 LOAD_LE16(void *src);
-extern uint32 LOAD_LE32(void *src);
-extern uint64 LOAD_LE64(void *src);
+extern uint8 LOAD_LE8 (const void *src);
+extern uint16 LOAD_LE16(const void *src);
+extern uint32 LOAD_LE32(const void *src);
+extern uint64 LOAD_LE64(const void *src);
-extern uint8 LOAD_BE8 (void *src);
-extern uint16 LOAD_BE16(void *src);
-extern uint32 LOAD_BE32(void *src);
-extern uint64 LOAD_BE64(void *src);
+extern uint8 LOAD_BE8 (const void *src);
+extern uint16 LOAD_BE16(const void *src);
+extern uint32 LOAD_BE32(const void *src);
+extern uint64 LOAD_BE64(const void *src);
extern void STORE_LE8 (void *dst, uint8 val);
extern void STORE_LE16(void *dst, uint16 val);
@@ -68,15 +68,15 @@ extern void STORE_BE16(void *dst, uint16 val);
extern void STORE_BE32(void *dst, uint32 val);
extern void STORE_BE64(void *dst, uint64 val);
-extern uint8 UNALIGNED_LOAD_LE8 (void *src);
-extern uint16 UNALIGNED_LOAD_LE16(void *src);
-extern uint32 UNALIGNED_LOAD_LE32(void *src);
-extern uint64 UNALIGNED_LOAD_LE64(void *src);
+extern uint8 UNALIGNED_LOAD_LE8 (const void *src);
+extern uint16 UNALIGNED_LOAD_LE16(const void *src);
+extern uint32 UNALIGNED_LOAD_LE32(const void *src);
+extern uint64 UNALIGNED_LOAD_LE64(const void *src);
-extern uint8 UNALIGNED_LOAD_BE8 (void *src);
-extern uint16 UNALIGNED_LOAD_BE16(void *src);
-extern uint32 UNALIGNED_LOAD_BE32(void *src);
-extern uint64 UNALIGNED_LOAD_BE64(void *src);
+extern uint8 UNALIGNED_LOAD_BE8 (const void *src);
+extern uint16 UNALIGNED_LOAD_BE16(const void *src);
+extern uint32 UNALIGNED_LOAD_BE32(const void *src);
+extern uint64 UNALIGNED_LOAD_BE64(const void *src);
extern void UNALIGNED_STORE_LE8 (void *dst, uint8 val);
extern void UNALIGNED_STORE_LE16(void *dst, uint16 val);
diff --git a/lib/1.4/dml-builtins.dml b/lib/1.4/dml-builtins.dml
index 4725dfa23..00eb8fb3d 100644
--- a/lib/1.4/dml-builtins.dml
+++ b/lib/1.4/dml-builtins.dml
@@ -10,7 +10,7 @@ bitorder le;
loggroup Register_Read;
loggroup Register_Write;
header %{
- #include
+ #include
%}
import "simics/C.dml";
@@ -161,6 +161,13 @@ extern void _callback_after_write(conf_object_t *bank,
extern void _cancel_simple_events(conf_object_t *obj, _identity_t id);
extern void _register_attributes(conf_object_t *obj, _identity_t id);
+
+// Operations on RAII types
+extern string _dml_string_new(const char *) as mk_string;
+extern string _dml_string_newf(const char *, ...) as mk_string_f;
+extern string _dml_string_from_vect(vect(char)) as mk_string_from_vect;
+extern string _dml_vect_from_string(string) as mk_vect_from_string;
+
/**
# Libraries and Built-ins
@@ -3615,6 +3622,7 @@ template init_val is init {
extern const char *_DML_get_qname(_identity_t, const _id_info_t *,
dml_qname_cache_t *, const char *);
+
template _qname {
shared method _qname() -> (const char *) {
local _qname ref = this;
diff --git a/lib/1.4/internal.dml b/lib/1.4/internal.dml
index 5a9051694..a436b507d 100644
--- a/lib/1.4/internal.dml
+++ b/lib/1.4/internal.dml
@@ -39,15 +39,15 @@ extern uint16 CONVERT_BE16(uint16 val);
extern uint32 CONVERT_BE32(uint32 val);
extern uint64 CONVERT_BE64(uint64 val);
-extern uint8 LOAD_LE8 (void *src);
-extern uint16 LOAD_LE16(void *src);
-extern uint32 LOAD_LE32(void *src);
-extern uint64 LOAD_LE64(void *src);
+extern uint8 LOAD_LE8 (const void *src);
+extern uint16 LOAD_LE16(const void *src);
+extern uint32 LOAD_LE32(const void *src);
+extern uint64 LOAD_LE64(const void *src);
-extern uint8 LOAD_BE8 (void *src);
-extern uint16 LOAD_BE16(void *src);
-extern uint32 LOAD_BE32(void *src);
-extern uint64 LOAD_BE64(void *src);
+extern uint8 LOAD_BE8 (const void *src);
+extern uint16 LOAD_BE16(const void *src);
+extern uint32 LOAD_BE32(const void *src);
+extern uint64 LOAD_BE64(const void *src);
extern void STORE_LE8 (void *dst, uint8 val);
extern void STORE_LE16(void *dst, uint16 val);
@@ -59,15 +59,15 @@ extern void STORE_BE16(void *dst, uint16 val);
extern void STORE_BE32(void *dst, uint32 val);
extern void STORE_BE64(void *dst, uint64 val);
-extern uint8 UNALIGNED_LOAD_LE8 (void *src);
-extern uint16 UNALIGNED_LOAD_LE16(void *src);
-extern uint32 UNALIGNED_LOAD_LE32(void *src);
-extern uint64 UNALIGNED_LOAD_LE64(void *src);
+extern uint8 UNALIGNED_LOAD_LE8 (const void *src);
+extern uint16 UNALIGNED_LOAD_LE16(const void *src);
+extern uint32 UNALIGNED_LOAD_LE32(const void *src);
+extern uint64 UNALIGNED_LOAD_LE64(const void *src);
-extern uint8 UNALIGNED_LOAD_BE8 (void *src);
-extern uint16 UNALIGNED_LOAD_BE16(void *src);
-extern uint32 UNALIGNED_LOAD_BE32(void *src);
-extern uint64 UNALIGNED_LOAD_BE64(void *src);
+extern uint8 UNALIGNED_LOAD_BE8 (const void *src);
+extern uint16 UNALIGNED_LOAD_BE16(const void *src);
+extern uint32 UNALIGNED_LOAD_BE32(const void *src);
+extern uint64 UNALIGNED_LOAD_BE64(const void *src);
extern void UNALIGNED_STORE_LE8 (void *dst, uint8 val);
extern void UNALIGNED_STORE_LE16(void *dst, uint16 val);
diff --git a/py/dml/c_backend.py b/py/dml/c_backend.py
index 42416697e..1ccec5fe5 100644
--- a/py/dml/c_backend.py
+++ b/py/dml/c_backend.py
@@ -28,6 +28,7 @@
from .set import Set
prototypes = []
+constants = []
c_split_threshold = None
log_object_t = TNamed("log_object_t")
@@ -142,6 +143,11 @@ def composite_ctype(node, unfiltered_members, label=None):
members.append((v.value, v.type))
members.append(('_immediate_after_state',
TPtr(TNamed('_dml_immediate_after_state_t'))))
+ if dml.globals.session_orphan_allocs:
+ t = TArray(TPtr(void),
+ ctree.mkIntegerLiteral(
+ node.site, dml.globals.session_orphan_allocs))
+ members.append(('_orphan_allocs', t))
return composite_ctype(node,
members + [(crep.cname(sub), print_device_substruct(sub))
for sub in node.get_components()],
@@ -365,9 +371,12 @@ def generate_hfile(device, headers, filename):
def generate_protofile(device):
linkage = 'extern' if c_split_threshold else 'static'
- out('\n/* generated function prototypes */\n')
+ out('\n/* generated function and variable prototypes */\n')
for proto in prototypes:
out("%s %s UNUSED;\n" % (linkage, proto))
+ out('\n/* generated internal constants */\n')
+ for (proto, init) in constants:
+ out("static %s UNUSED = %s;\n" % (proto, init))
def get_attr_fname(node, port, group_prefix):
port_prefix = port.attrname() + '_' if port else ''
@@ -416,19 +425,19 @@ def generate_attr_setter(fname, node, port, dimsizes, cprefix, loopvars,
out('attr_value_t attr%d = %s;\n' % (dim, list_item))
valuevar = 'attr%d' % (dim,)
- with NoFailure(node.site), crep.DeviceInstanceContext():
- setcode = [
- codegen_inline_byname(
- node, port_indices + loopvars,
- '_set_attribute' if dml.globals.dml_version == (1, 2)
- else 'set_attribute',
- [mkLit(node.site, valuevar, TNamed('attr_value_t'))],
- [mkLit(node.site, '_status', TNamed('set_error_t'))],
- node.site,
- inhibit_copyin = not loopvars)]
-
- code = mkCompound(None, declarations(fscope) + setcode)
- code.toc_inline()
+ with MethodRAIIScope() as raii_scope, NoFailure(node.site), \
+ crep.DeviceInstanceContext():
+ setcode = codegen_inline_byname(
+ node, port_indices + loopvars,
+ '_set_attribute' if dml.globals.dml_version == (1, 2)
+ else 'set_attribute',
+ [mkLit(node.site, valuevar, TNamed('attr_value_t'))],
+ [mkLit(node.site, '_status', TNamed('set_error_t'))],
+ node.site,
+ inhibit_copyin = not loopvars)
+ code = declarations(fscope) + [setcode]
+
+ mkCompoundRAII(None, code, raii_scope).toc_inline()
reset_line_directive()
if dimsizes:
# abort on first bad value
@@ -477,15 +486,16 @@ def generate_attr_getter(fname, node, port, dimsizes, cprefix, loopvars):
out('attr_value_t %s;\n' % (next_valuevar.read()))
valuevar = next_valuevar
- with NoFailure(node.site), crep.DeviceInstanceContext():
+ with MethodRAIIScope() as raii_scope, NoFailure(node.site), \
+ crep.DeviceInstanceContext():
getcode = codegen_inline_byname(
node, port_indices + loopvars,
'_get_attribute' if dml.globals.dml_version == (1, 2)
else 'get_attribute',
[], [valuevar], node.site)
- code = mkCompound(node.site, declarations(fscope) + [getcode])
- code.toc_inline()
- reset_line_directive()
+ code = declarations(fscope) + [getcode]
+ mkCompoundRAII(node.site, code, raii_scope).toc_inline()
+ reset_line_directive()
for depth, loopvar in reversed(list(enumerate(loopvars))):
out('SIM_attr_list_set_item(&_val%d, %s, _val%d);\n'
@@ -740,7 +750,7 @@ def wrap_method(meth, wrapper_name, indices=()):
assert meth.dimensions == len(indices)
out(devstruct+' *_dev UNUSED = ('+devstruct+'*)_obj;\n')
indices = tuple(mkIntegerLiteral(meth.site, i) for i in indices)
- with crep.DeviceInstanceContext():
+ with UnusedMethodRAIIScope(), crep.DeviceInstanceContext():
if retvar:
decl = mkDeclaration(meth.site, retvar, rettype,
init = get_initializer(meth.site, rettype,
@@ -801,13 +811,13 @@ def generate_implement_method(device, ifacestruct, meth, indices):
# method in DML
raise EMETH(meth.site, None, 'interface method is variadic')
for ((mp, mt), it) in zip(meth.inp, iface_input_types):
- if safe_realtype(mt).cmp(safe_realtype(it)) != 0:
+ if safe_realtype_unconst(mt).cmp(safe_realtype_unconst(it)) != 0:
raise EARGT(meth.site, 'implement', meth.name,
mt, mp, it, 'method')
if iface_num_outputs and dml.globals.dml_version != (1, 2):
[(_, mt)] = meth.outp
- if safe_realtype(mt).cmp(
- safe_realtype(ifacemethtype.output_type)) != 0:
+ if safe_realtype_unconst(mt).cmp(
+ safe_realtype_unconst(ifacemethtype.output_type)) != 0:
raise EARGT(meth.site, 'implement', meth.name,
mt, '', ifacemethtype.output_type,
'method')
@@ -1036,11 +1046,11 @@ def generate_simple_events(device):
# If data is NULL, report it and use emergency indices/args
if info.dimensions or info.args_type:
- out('if (!data) {', postindent=1)
+ out('if (!data) {\n', postindent=1)
out('const char *msg = "Failed deserialization of after event '
+ 'data. Using emergency indices/arguments.";\n')
out('VT_critical_error(msg, msg);\n')
- out('}', preindent=-1)
+ out('}\n', preindent=-1)
if info.dimensions > 0:
out('const uint32 *_indices = data ? data->indices '
@@ -1067,6 +1077,25 @@ def generate_simple_events(device):
out('}\n\n', preindent = -1)
splitting_point()
+ if info.args_type and info.args_type.is_raii:
+ start_function_definition(
+ f'void {info.cident_destroy}'
+ + '(conf_object_t *_obj, lang_void *_data)')
+ out('{\n', postindent = 1)
+ out('_simple_event_data_t *data = '
+ + '(_simple_event_data_t *)_data;\n')
+ out('if (data) {\n', postindent=1)
+ args_decl = TPtr(info.args_type).declaration('_args')
+ out(f'{args_decl} = data->args;\n')
+ raii_info = get_raii_type_info(info.args_type)
+ out(f'{raii_info.read_destroy_lval("*_args")};\n')
+ out('_free_simple_event_data(*data);\n')
+ out('MM_FREE(data);\n')
+ out('}\n', preindent=-1)
+ out('}\n\n', preindent = -1)
+ splitting_point()
+
+
if info.dimensions or info.args_type:
start_function_definition(
f'attr_value_t {info.cident_get_value}'
@@ -1131,6 +1160,19 @@ def generate_after_on_hooks_artifacts(device):
splitting_point()
if info.has_serialized_args:
+ if info.args_type and info.args_type.is_raii:
+ start_function_definition(
+ f'void {info.cident_args_destructor}('
+ + 'void *_args)')
+ out('{\n', postindent = 1)
+ args_type_ptr = TPtr(info.args_type)
+ out(f'{args_type_ptr.declaration("args")} = _args;\n')
+ args_expr = mkDereference(site, mkLit(site, 'args',
+ args_type_ptr))
+ info.generate_args_destructor(site, args_expr)
+ out('}\n\n', preindent = -1)
+ splitting_point()
+
start_function_definition(
f'attr_value_t {info.cident_args_serializer}('
+ 'const void *_args)')
@@ -1155,12 +1197,28 @@ def generate_after_on_hooks_artifacts(device):
out('{\n', postindent = 1)
out('set_error_t _success UNUSED = Sim_Set_Ok;\n')
+ cleanup = []
+ cleanup_on_failure = []
if info.args_type:
- out(f'{TPtr(info.args_type).declaration("out")} = _out;\n')
- out_expr = mkDereference(site, mkLit(site, 'out',
- TPtr(info.args_type)))
+ raii_info = (get_raii_type_info(info.args_type)
+ if info.args_type.is_raii else None)
+ malloc = mkLit(site,
+ f'MM_{("Z" if raii_info is not None else "M")}'
+ + f'ALLOC(1, {info.args_type.declaration("")})',
+ TPtr(info.args_type))
+ (tmp_out_decl, tmp_out_ref) = serialize.declare_variable(
+ site, "_tmp_out", TPtr(info.args_type), malloc)
+ tmp_out_decl.toc()
+ tmp_out_expr = mkDereference(site, tmp_out_ref)
+ cleanup_ref = ('(void *)'*deep_const(info.args_type)
+ + '_tmp_out')
+ cleanup.append(ctree.mkInline(site,
+ f'MM_FREE({cleanup_ref});'))
+ if raii_info:
+ cleanup_on_failure.append(ctree.mkInline(
+ site, raii_info.read_destroy_lval('*_tmp_out') + ';'))
else:
- out_expr = None
+ tmp_out_expr = None
def error_out(exc, msg):
stmts = []
stmts.append(mkInline(site, f'_success = {exc};'))
@@ -1171,10 +1229,28 @@ def error_out(exc, msg):
return stmts
val_expr = mkLit(site, 'val', attr_value_t)
- info.generate_args_deserializer(site, val_expr, out_expr,
+ info.generate_args_deserializer(site, val_expr, tmp_out_expr,
error_out)
- out('_exit:\n')
+ if info.args_type:
+ out_expr = ctree.mkDereference(
+ site,
+ ctree.mkCast(site, mkLit(site, '_out', TPtr(void)),
+ TPtr(info.args_type)))
+ src = OrphanWrap(site, tmp_out_expr)
+ ctree.AssignStatement(site, out_expr,
+ ctree.ExpressionInitializer(src)).toc()
+ if cleanup_on_failure:
+ out('if (false) {\n', postindent=1)
+ out('_exit:\n')
+ for stmt in cleanup_on_failure:
+ stmt.toc()
+ out('}\n', preindent=-1)
+ else:
+ out('_exit:\n')
+
+ for stmt in cleanup:
+ stmt.toc()
out('return _success;\n')
out('}\n\n', preindent = -1)
splitting_point()
@@ -1183,12 +1259,16 @@ def error_out(exc, msg):
if dml.globals.after_on_hook_infos:
init = '{\n%s\n}' % (',\n'.join(
- ' {%s, %s, %s, %s, %s, %d}'
- % ((string_literal(info.string_key), info.cident_callback)
+ ' {%s, %s, %s, %d, %s, %s, %s}'
+ % ((info.cident_callback,
+ info.cident_args_destructor
+ if info.args_type and info.args_type.is_raii else 'NULL',
+ f'sizeof({info.args_type.declaration("")})'
+ if info.args_type else '0',
+ info.parent.uniq,
+ string_literal(info.string_key))
+ ((info.cident_args_serializer, info.cident_args_deserializer)
- if info.has_serialized_args else ('NULL', 'NULL'))
- + (f'sizeof({info.args_type.declaration("")})'
- if info.args_type else '0', info.parent.uniq))
+ if info.has_serialized_args else ('NULL', 'NULL')))
for info in dml.globals.after_on_hook_infos),)
add_variable_declaration(
'const _dml_after_on_hook_info_t _after_on_hook_infos[]', init)
@@ -1318,7 +1398,7 @@ def generate_register_events(device):
out(('%s = SIM_register_event(%s, class, 0, %s, %s, %s, %s, '
+ 'NULL);\n')
% (crep.get_evclass(key), string_literal(info.string_key),
- info.cident_callback, '_destroy_simple_event_data',
+ info.cident_callback, info.cident_destroy,
info.cident_get_value, info.cident_set_value))
out('}\n\n', preindent = -1)
splitting_point()
@@ -1346,17 +1426,20 @@ def generate_reg_callback(meth, name):
out('{\n', postindent = 1)
out('%s *_dev = _obj;\n' % dev_t)
scope = Symtab(global_scope)
- fail = ReturnFailure(meth.site)
- with fail, crep.DeviceInstanceContext():
- inargs = [mkLit(meth.site, n, t) for n, t in meth.inp]
- outargs = [mkLit(meth.site, "*" + n, t) for n, t in meth.outp]
- code = [codegen_call(
- meth.site, meth,
- tuple(mkLit(meth.site, 'indices[%d]' % i, TInt(32, False))
- for i in range(meth.dimensions)),
- inargs, outargs)]
-
- code = mkCompound(meth.site, declarations(scope) + code + [fail.nofail()])
+ with UnusedMethodRAIIScope():
+ fail = ReturnFailure(meth.site)
+ with fail, crep.DeviceInstanceContext():
+ inargs = [mkLit(meth.site, n, t) for n, t in meth.inp]
+ outargs = [mkLit(meth.site, "*" + n, t) for n, t in meth.outp]
+ code = [codegen_call(
+ meth.site, meth,
+ tuple(mkLit(meth.site, 'indices[%d]' % i, TInt(32, False))
+ for i in range(meth.dimensions)),
+ inargs, outargs)]
+
+ code = mkCompound(meth.site,
+ declarations(scope) + code + [fail.nofail()])
+
code.toc()
out('}\n', preindent = -1)
out('\n')
@@ -1571,7 +1654,7 @@ def generate_initialize(device):
# changed such that zero-initialization would not be valid.
out('QINIT(_dev->_immediate_after_state->queue);\n')
- with crep.DeviceInstanceContext():
+ with MethodRAIIScope() as raii_scope, crep.DeviceInstanceContext():
if dml.globals.dml_version == (1, 2):
# Functions called from init_object shouldn't throw any
# exceptions. But we don't want to force them to insert try-catch
@@ -1584,9 +1667,11 @@ def generate_initialize(device):
hard_reset = codegen_call_byname(device.site, device, (),
'hard_reset', [], [])
- mkCompound(device.site, [init, hard_reset]).toc()
+ code = [init, hard_reset]
else:
- codegen_inline_byname(device, (), '_init', [], [], device.site).toc()
+ code = [codegen_inline_byname(device, (), '_init', [], [],
+ device.site)]
+ mkCompoundRAII(device.site, code, raii_scope).toc()
reset_line_directive()
if dml.globals.api_version <= '6':
@@ -1598,9 +1683,47 @@ def generate_initialize(device):
def generate_dealloc(device):
start_function_definition(
- f'void {crep.cname(device)}_dealloc(conf_object_t *dev)')
+ f'void {crep.cname(device)}_dealloc(conf_object_t *_obj)')
out('{\n', postindent = 1)
- out('MM_FREE(dev);\n')
+ out(crep.structtype(device) + ' *_dev = ('
+ + crep.structtype(device) + ' *)_obj;\n')
+ site = SimpleSite('generated dealloc function for device')
+ by_dims = {}
+ for node in device.get_recursive_components('session', 'saved'):
+ if node._type.is_raii:
+ by_dims.setdefault(node.dimsizes, []).append(node)
+ for dims in by_dims:
+ for i in range(len(dims)):
+ idxvar = '_i%d' % (i,)
+ out('for (uint32 %s = 0; %s < %d; %s++) {\n'
+ % (idxvar, idxvar, dims[i], idxvar), postindent=1)
+
+ indices = tuple(mkLit(site, f'_i{i}', TInt(32, False))
+ for i in range(len(dims)))
+
+ for node in by_dims[dims]:
+ info = get_raii_type_info(node._type)
+ assert info
+ out(info.read_destroy_lval('_dev->'
+ + crep.cref_session(node, indices))
+ + ';\n')
+
+ for i in range(len(dims)):
+ out('}\n', preindent=-1)
+
+ for (sym, _) in dml.globals.static_vars:
+ if not sym.type.is_raii:
+ continue
+ info = get_raii_type_info(sym.type)
+ out(info.read_destroy_lval('_dev->'+sym.value) + ';\n')
+
+ allocs = dml.globals.session_orphan_allocs
+ if allocs:
+ out(f'for (uint32 i = 0; i < {allocs}; ++i)\n', postindent=1)
+ out('_DML_delete_orphan(_dev->_orphan_allocs[i]);\n',
+ postindent=-1)
+
+ out('MM_FREE(_dev);\n')
out('}\n\n', preindent = -1)
def generate_finalize(device):
@@ -1610,7 +1733,7 @@ def generate_finalize(device):
out(crep.structtype(device) + ' *_dev UNUSED = ('
+ crep.structtype(device) + ' *)_obj;\n')
- with crep.DeviceInstanceContext():
+ with MethodRAIIScope() as raii_scope, crep.DeviceInstanceContext():
if dml.globals.dml_version == (1, 2):
# Functions called from new_instance shouldn't throw any
# exceptions. But we don't want to force them to insert try-catch
@@ -1621,8 +1744,7 @@ def generate_finalize(device):
else:
code = codegen_inline_byname(device, (), '_post_init', [], [],
device.site)
- if not code.is_empty:
- code.toc()
+ mkCompoundRAII(device.site, [code], raii_scope).toc_inline()
out('}\n\n', preindent = -1)
reset_line_directive()
@@ -1636,66 +1758,66 @@ def generate_deinit(device):
out('_DML_execute_immediate_afters_now(_obj, '
+ '_dev->_immediate_after_state);\n')
- with crep.DeviceInstanceContext():
- # Cancel all events
- events = device.get_recursive_components('event')
+ # Cancel all events
+ events = device.get_recursive_components('event')
- by_dims = {}
- for event in events:
- by_dims.setdefault(event.dimsizes, []).append(event)
+ by_dims = {}
+ for event in events:
+ by_dims.setdefault(event.dimsizes, []).append(event)
- for (dims, events) in by_dims.items():
- for i in range(len(dims)):
- out(f'for (uint32 _i{i} = 0; _i{i} < {dims[i]}; _i{i}++) {{\n',
- postindent=1)
+ for (dims, events) in by_dims.items():
+ for i in range(len(dims)):
+ out(f'for (uint32 _i{i} = 0; _i{i} < {dims[i]}; _i{i}++) {{\n',
+ postindent=1)
- indices = tuple(mkLit(device.site, f'_i{i}', TInt(32, False))
- for i in range(len(dims)))
- for event in events:
- method = event.get_component('_cancel_all', 'method')
- # Functions called from pre_delete_instance shouldn't throw
- # any exceptions. But we don't want to force them to insert
- # try-catch in the init method.
- with LogFailure(device.site, event, indices):
- codegen_inline(device.site, method, indices, [], []).toc()
- for i in range(len(dims)):
- out('}\n', preindent=-1)
+ indices = tuple(mkLit(device.site, f'_i{i}', TInt(32, False))
+ for i in range(len(dims)))
+ for event in events:
+ method = event.get_component('_cancel_all', 'method')
+ # Functions called from pre_delete_instance shouldn't throw
+ # any exceptions. But we don't want to force them to insert
+ # try-catch in the init method.
+ with crep.DeviceInstanceContext(), UnusedMethodRAIIScope(), \
+ LogFailure(device.site, event, indices):
+ codegen_inline(device.site, method, indices, [], []).toc()
+ for i in range(len(dims)):
+ out('}\n', preindent=-1)
- for key in dml.globals.after_delay_infos:
- out(f'SIM_event_cancel_time(_obj, {crep.get_evclass(key)}, _obj, '
- + '0, NULL);\n')
+ for key in dml.globals.after_delay_infos:
+ out(f'SIM_event_cancel_time(_obj, {crep.get_evclass(key)}, _obj, '
+ + '0, NULL);\n')
- with LogFailure(device.site, device, ()):
- code = codegen_inline_byname(device, (), 'destroy', [], [],
- device.site)
- if not code.is_empty:
- code.toc()
+ # Cancel all pending afters on hooks
+ by_dims = {}
+ for hook in dml.globals.hooks:
+ by_dims.setdefault(hook.dimsizes, []).append(hook)
- # Cancel all pending afters on hooks
- by_dims = {}
- for hook in dml.globals.hooks:
- by_dims.setdefault(hook.dimsizes, []).append(hook)
+ for (dims, hooks) in by_dims.items():
+ for i in range(len(dims)):
+ out(f'for (uint32 _i{i} = 0; _i{i} < {dims[i]}; _i{i}++) {{\n',
+ postindent=1)
- for (dims, hooks) in by_dims.items():
- for i in range(len(dims)):
- out(f'for (uint32 _i{i} = 0; _i{i} < {dims[i]}; _i{i}++) {{\n',
- postindent=1)
+ indices = tuple(mkLit(device.site, f'_i{i}', TInt(32, False))
+ for i in range(len(dims)))
+ for hook in hooks:
+ out('_DML_free_hook_queue('
+ + f'&_dev->{crep.cref_hook(hook, indices)}.queue);\n')
+ for i in range(len(dims)):
+ out('}\n', preindent=-1)
- indices = tuple(mkLit(device.site, f'_i{i}', TInt(32, False))
- for i in range(len(dims)))
- for hook in hooks:
- out('_DML_free_hook_queue('
- + f'&_dev->{crep.cref_hook(hook, indices)}.queue);\n')
- for i in range(len(dims)):
- out('}\n', preindent=-1)
+ with MethodRAIIScope() as raii_scope, crep.DeviceInstanceContext(), \
+ LogFailure(device.site, device, ()):
+ code = codegen_inline_byname(device, (), 'destroy', [], [],
+ device.site)
+ mkCompoundRAII(device.site, [code], raii_scope).toc()
- # Execute all immediate afters posted by destruction code
- out('_DML_execute_immediate_afters_now(_obj, '
- + '_dev->_immediate_after_state);\n')
+ # Execute all immediate afters posted by destruction code
+ out('_DML_execute_immediate_afters_now(_obj, '
+ + '_dev->_immediate_after_state);\n')
- # Free the tables used for log_once after all calls into device code
- # are done
- out('_free_table(&_dev->_subsequent_log_ht);\n')
+ # Free the tables used for log_once after all calls into device code
+ # are done
+ out('_free_table(&_dev->_subsequent_log_ht);\n')
out('QFREE(_dev->_immediate_after_state->queue);\n')
out('if (likely(!_dev->_immediate_after_state->posted)) {\n', postindent=1)
@@ -1717,11 +1839,12 @@ def generate_reset(device, hardness):
out(crep.structtype(device) + ' *_dev UNUSED = ('
+ crep.structtype(device) + ' *)_obj;\n\n')
scope = Symtab(global_scope)
- with LogFailure(device.site, device, ()), crep.DeviceInstanceContext():
+ with MethodRAIIScope() as raii_scope, \
+ LogFailure(device.site, device, ()), crep.DeviceInstanceContext():
code = codegen_call_byname(device.site, device, (),
hardness+'_reset', [], [])
- code = mkCompound(device.site, declarations(scope) + [code])
- code.toc()
+ mkCompoundRAII(device.site, declarations(scope) + [code],
+ raii_scope).toc_inline()
out('}\n\n', preindent = -1)
reset_line_directive()
@@ -1863,10 +1986,12 @@ def generate_init_data_objs(device):
try:
# only data/method obj
assert not node.isindexed()
- init = eval_initializer(
- node.site, node._type, node.astinit,
- Location(node.parent, static_indices(node)),
- global_scope, True)
+ with ctree.SessionRAIIScope() as raii_scope:
+ init = eval_initializer(
+ node.site, node._type, node.astinit,
+ Location(node.parent, static_indices(node)),
+ global_scope, True)
+ dml.globals.session_orphan_allocs += raii_scope.allocs
# mainly meant to capture EIDXVAR; for other errors, the error will
# normally re-appear when evaluating per instance
except DMLError:
@@ -1875,10 +2000,12 @@ def generate_init_data_objs(device):
for i in indices)
nref = mkNodeRef(node.site, node, index_exprs)
try:
- init = eval_initializer(
- node.site, node._type, node.astinit,
- Location(node.parent, index_exprs), global_scope,
- True)
+ with ctree.SessionRAIIScope() as raii_scope:
+ init = eval_initializer(
+ node.site, node._type, node.astinit,
+ Location(node.parent, index_exprs),
+ global_scope, True)
+ dml.globals.session_orphan_allocs += raii_scope.allocs
except DMLError as e:
report(e)
else:
@@ -1886,7 +2013,7 @@ def generate_init_data_objs(device):
coverity_marker('store_writes_const_field',
'FALSE',
node.site)
- init.assign_to(nref, node._type)
+ out(init.assign_to(nref.read(), node._type) + ';\n')
else:
index_exprs = ()
for (i, sz) in enumerate(node.dimsizes):
@@ -1898,7 +2025,7 @@ def generate_init_data_objs(device):
nref = mkNodeRef(node.site, node, index_exprs)
if deep_const(node._type):
coverity_marker('store_writes_const_field', 'FALSE', node.site)
- init.assign_to(nref, node._type)
+ out(init.assign_to(nref.read(), node._type) + ';\n')
for _ in range(node.dimensions):
out('}\n', postindent=-1)
out('}\n\n', preindent = -1)
@@ -1997,27 +2124,38 @@ def generate_init(device, initcode, outprefix):
def generate_static_trampoline(func):
# static trampolines never need to be generated for independent methods
- assert not func.independent
- params = [("_obj", TPtr(TNamed("conf_object_t")))] + func.cparams[1:]
+ # with the niche exception of when
+ # 1. they are startup memoized
+ # 2. have a return value of RAII Type
+ assert not func.independent or (func.memoized and func.rettype.is_raii)
+ params = ([("_obj", TPtr(TNamed("conf_object_t")))] + func.cparams[1:]
+ if not func.independent else func.cparams)
params_string = ('void' if not params
else ", ".join(t.declaration(n) for (n, t) in params))
start_function_definition(func.rettype.declaration(
"%s(%s)" % ("_trampoline" + func.get_cname(), params_string)))
out("{\n", postindent=1)
- out('ASSERT(_obj);\n')
- out('ASSERT(SIM_object_class(_obj) == _dev_class);\n')
- (name, typ) = func.cparams[0]
- out("%s = (%s)_obj;\n" % (typ.declaration(name), typ.declaration("")))
+ if not func.independent:
+ out('ASSERT(_obj);\n')
+ out('ASSERT(SIM_object_class(_obj) == _dev_class);\n')
+ (name, typ) = func.cparams[0]
+ out("%s = (%s)_obj;\n" % (typ.declaration(name), typ.declaration("")))
out("%s%s(%s);\n" % ("" if func.rettype.void
else func.rettype.declaration("result") + " = ",
func.get_cname(),
", ".join(n for (n, t) in func.cparams)))
- output_dml_state_change(name)
+ if not func.independent:
+ output_dml_state_change(name)
if not func.rettype.void:
- out("return result;\n")
+ ret = 'result'
+ if func.memoized and func.rettype.is_raii:
+ info = get_raii_type_info(func.rettype)
+ ret = info.read_dupe('result')
+ out(f'return {ret};\n')
out("}\n", preindent=-1)
def generate_extern_trampoline(exported_name, func):
+ assert not (func.independent and func.memoized and func.rettype.is_raii)
params = (func.cparams if func.independent else
[("_obj", TPtr(TNamed("conf_object_t")))] + func.cparams[1:])
params_string = ('void' if not params
@@ -3104,34 +3242,24 @@ def generate_startup_trait_calls(data, idxvars):
ref = ObjTraitRef(site, node, trait, indices)
out(f'_tref = {ref.read()};\n')
for method in trait_methods:
- outargs = [mkLit(site,
- ('*((%s) {0})'
- % ((TArray(t, mkIntegerLiteral(site, 1))
- .declaration('')),)),
- t)
- for (_, t) in method.outp]
+ outargs = [mkDiscardRef(site) for _ in method.outp]
method_ref = TraitMethodDirect(
site, mkLit(site, '_tref', TTrait(trait)), method)
- with IgnoreFailure(site):
+ with UnusedMethodRAIIScope(), IgnoreFailure(site):
codegen_call_traitmethod(site, method_ref, [],
- outargs) .toc()
+ outargs).toc()
out('}\n', preindent=-1)
def generate_startup_regular_call(method, idxvars):
site = method.site
indices = tuple(mkLit(site, idx, TInt(32, False)) for idx in idxvars)
- outargs = [mkLit(site,
- ('*((%s) {0})'
- % ((TArray(t, mkIntegerLiteral(site, 1))
- .declaration('')),)),
- t)
- for (_, t) in method.outp]
+ outargs = [mkDiscardRef(site) for _ in method.outp]
# startup memoized methods can throw, which is ignored during startup.
# Memoization of the throw then allows for the user to check whether
# or not the method did throw during startup by calling the method
# again.
- with IgnoreFailure(site):
+ with UnusedMethodRAIIScope(), IgnoreFailure(site):
codegen_call(method.site, method, indices, [], outargs).toc()
def generate_startup_calls_entry_function(devnode):
@@ -3177,6 +3305,48 @@ def generate_startup_calls_entry_function(devnode):
generate_startup_call_loops(startups)
out('}\n', preindent=-1)
+def generate_raii_artifacts():
+ destructor_array_items = []
+
+ def add_destructor_array_item(ref_name, destructor):
+ constants.append((
+ f'const _raii_destructor_t *const {ref_name}',
+ f'&_dml_raii_destructors[{len(destructor_array_items)}]'))
+ destructor_array_items.append(destructor)
+
+ add_destructor_array_item(StringTypeInfo.cident_destructor_array_item,
+ StringTypeInfo.cident_destructor)
+ add_destructor_array_item(
+ VectorRAIITypeInfo.cident_destructor_array_item_nonraii_elems,
+ VectorRAIITypeInfo.cident_destructor_nonraii_elems)
+ for info in dml.globals.generated_raii_types.values():
+ if info.should_generate_destructor:
+ start_function_definition(
+ f'void {info.cident_destructor}(void *_data)')
+ out('{\n', postindent=1)
+ TPtr(info.type).print_declaration('data', '_data')
+ info.generate_destructor('data')
+ out('}\n', preindent=-1)
+ add_destructor_array_item(info.cident_destructor_array_item,
+ info.cident_destructor)
+
+ if info.should_generate_copier:
+ start_function_definition(
+ f'void {info.cident_copier}(void *_dest, const void *_src)')
+ out('{\n', postindent=1)
+ TPtr(info.type).print_declaration('dest', '_dest')
+ TPtr(conv_const(True, info.type)).print_declaration('src', '_src')
+ info.generate_copier('dest', 'src')
+ out('}\n', preindent=-1)
+
+ constants.append(('const _raii_destructor_t *const '
+ + '_dml_raii_destructor_ref_none',
+ f'_dml_raii_destructors + {len(destructor_array_items)}')
+ )
+ add_variable_declaration('const _raii_destructor_t _dml_raii_destructors'
+ + f'[{len(destructor_array_items)}]',
+ '{%s}' % (', '.join(destructor_array_items),))
+
class MultiFileOutput(FileOutput):
def __init__(self, stem, header):
@@ -3284,20 +3454,20 @@ def generate_cfile_body(device, footers, full_module, filename_prefix):
generate_initialize(device)
generate_finalize(device)
generate_deinit(device)
- generate_dealloc(device)
generate_events(device)
generate_identity_data_decls()
generate_object_vtables_array()
generate_class_var_decl()
generate_startup_calls_entry_function(device)
generate_init_data_objs(device)
+ generate_dealloc(device)
if dml.globals.dml_version == (1, 2):
generate_reset(device, 'hard')
generate_reset(device, 'soft')
# These parameter values are output into static context, so make sure
# the expressions do not use _dev
- with crep.TypedParamContext():
+ with crep.TypedParamContext(), ctree.StaticRAIIScope():
trait_param_values = {
node: resolve_trait_param_values(node)
for node in flatten_object_subtree(device)
@@ -3407,6 +3577,7 @@ def generate_cfile_body(device, footers, full_module, filename_prefix):
generate_index_enumerations()
generate_tuple_table()
+ generate_raii_artifacts()
for c in footers:
c.toc()
diff --git a/py/dml/clone_test.py b/py/dml/clone_test.py
index 0ef9a63f6..4e04973c8 100644
--- a/py/dml/clone_test.py
+++ b/py/dml/clone_test.py
@@ -21,13 +21,14 @@ def test(self):
dt.TFloat("a"),
dt.TArray(typ0, ctree.mkIntegerLiteral(0, 2)),
dt.TPtr(typ0),
- dt.TVector(typ0),
+ dt.TVectorLegacy(typ0),
dt.TTrait(object()),
dt.TStruct({"name": types.TInt(32, False)}),
dt.TLayout("big-endian", {}),
dt.TFunction([], dt.TVoid()),
dt.TDevice("a")):
typ_clone = typ.clone()
+
self.assertEqual(
types.realtype(typ_clone).cmp(types.realtype(typ)), 0)
self.assertEqual(
diff --git a/py/dml/codegen.py b/py/dml/codegen.py
index fbc25cf3a..ff1789114 100644
--- a/py/dml/codegen.py
+++ b/py/dml/codegen.py
@@ -2,7 +2,7 @@
# SPDX-License-Identifier: MPL-2.0
import re
-from abc import ABC, abstractmethod, abstractproperty
+from abc import ABC, ABCMeta, abstractmethod, abstractproperty
import operator
import contextlib
from functools import reduce
@@ -48,6 +48,9 @@
'CatchFailure',
'ReturnFailure',
'IgnoreFailure',
+ 'get_raii_type_info',
+ 'StringTypeInfo',
+ 'VectorRAIITypeInfo',
'c_rettype',
'c_inargs',
@@ -100,14 +103,26 @@ class LoopContext:
def __enter__(self):
self.prev = LoopContext.current
LoopContext.current = self
+ assert RAIIScope.scope_stack()
+ self.outermost_raii_scope = RAIIScope.scope_stack()[-1]
+ return self
def __exit__(self, exc_type, exc_val, exc_tb):
assert LoopContext.current is self
LoopContext.current = self.prev
+ @abstractmethod
+ def break_(self, site): pass
+ @abstractmethod
+ def continue_(self, site): pass
class CLoopContext(LoopContext):
'''DML loop context corresponding to a C loop'''
def break_(self, site):
- return [mkBreak(site)]
+ return [codegen_raii_clear_up_to(site, self.outermost_raii_scope),
+ mkBreak(site)]
+
+ def continue_(self, site):
+ return [codegen_raii_clear_up_to(site, self.outermost_raii_scope),
+ mkContinue(site)]
class NoLoopContext(LoopContext):
'''DML loop context corresponding to an inlined method call.
@@ -116,6 +131,9 @@ class NoLoopContext(LoopContext):
def break_(self, site):
raise EBREAK(site)
+ def continue_(self, site):
+ raise ECONT(site)
+
class GotoLoopContext(LoopContext):
'''DML loop context not directly corresponding to a single C loop
statement. Uses of `break` is codegen:d as a goto past the loop.'''
@@ -128,7 +146,11 @@ def __init__(self):
def break_(self, site):
self.used = True
- return [mkGotoBreak(site, self.label)]
+ return [codegen_raii_clear_up_to(site, self.outermost_raii_scope),
+ mkGotoBreak(site, self.label)]
+
+ def continue_(self, site):
+ raise ECONTU(site)
class Failure(ABC):
'''Handle exceptions failure handling is supposed to handle the various kind of
@@ -138,6 +160,10 @@ class Failure(ABC):
fail_stack = []
def __init__(self, site):
self.site = site
+ if RAIIScope.scope_stack():
+ self.outermost_raii_scope = RAIIScope.scope_stack()[-1]
+ else:
+ self.outermost_raii_scope = None
def __enter__(self):
self.fail_stack.append(self)
def __exit__(self, exc_type, exc_val, exc_tb):
@@ -174,7 +200,12 @@ class ReturnFailure(Failure):
'''Generate boolean return statements to signal success. False
means success.'''
def fail(self, site):
- return mkReturn(site, mkBoolConstant(site, True))
+ assert self.outermost_raii_scope is not None
+ return mkCompound(site,
+ [codegen_raii_clear_up_to(
+ site,
+ self.outermost_raii_scope),
+ mkReturn(site, mkBoolConstant(site, True))])
def nofail(self):
'''Return code that is used to leave the method successfully'''
return mkReturn(self.site, mkBoolConstant(self.site, False))
@@ -187,9 +218,13 @@ def __init__(self, site, method_node):
self.label = None
self.method = method_node
def fail(self, site):
+ assert self.outermost_raii_scope is not None
if not self.label:
self.label = gensym('throw')
- return mkThrow(site, self.label)
+ return mkCompound(site,
+ [codegen_raii_clear_up_to(site,
+ self.outermost_raii_scope),
+ mkThrow(site, self.label)])
class IgnoreFailure(Failure):
'''Ignore exceptions'''
@@ -199,9 +234,14 @@ def fail(self, site):
class ExitHandler(ABC):
current = None
+ def __init__(self):
+ assert RAIIScope.scope_stack()
+ self.outermost_raii_scope = RAIIScope.scope_stack()[-1]
+
def __enter__(self):
self.prev = ExitHandler.current
ExitHandler.current = self
+
def __exit__(self, exc_type, exc_val, exc_tb):
assert ExitHandler.current is self
ExitHandler.current = self.prev
@@ -221,12 +261,24 @@ def __init__(self):
self.used = False
GotoExit.count += 1
self.label = 'exit%d' % (self.count,)
+ super(GotoExit, self).__init__()
+
+def codegen_raii_clear_up_to(site, scope):
+ return mkRAIIScopeClears(site,
+ RAIIScope.scope_stack()[scope.scope_stack_ix+1:])
+def codegen_raii_clear_up_to_and_including(site, scope):
+ return mkRAIIScopeClears(site,
+ RAIIScope.scope_stack()[scope.scope_stack_ix:])
+
class GotoExit_dml12(GotoExit):
def codegen_exit(self, site, retvals):
assert retvals is None
self.used = True
- return mkGoto(site, self.label)
+ return mkCompound(
+ site,
+ [codegen_raii_clear_up_to(site, self.outermost_raii_scope)]
+ + [mkGoto(site, self.label)])
class GotoExit_dml14(GotoExit):
def __init__(self, outvars):
@@ -240,12 +292,14 @@ def codegen_exit(self, site, retvals):
site,
[mkCopyData(site, val, out)
for (out, val) in zip(self.outvars, retvals)]
- + [mkReturnFromInline(site, self.label)])
+ + [codegen_raii_clear_up_to(site, self.outermost_raii_scope),
+ mkReturnFromInline(site, self.label)])
class ReturnExit(ExitHandler):
def __init__(self, outp, throws):
self.outp = outp
self.throws = throws
+ super(ReturnExit, self).__init__()
def codegen_exit(self, site, retvals):
assert retvals is not None, 'dml 1.2/1.4 mixup'
return codegen_return(site, self.outp, self.throws, retvals)
@@ -253,10 +307,10 @@ def codegen_exit(self, site, retvals):
def memoized_return_failure_leave(site, make_ref, failed):
ran = make_ref('ran', TInt(8, True))
threw = make_ref('threw', TBool())
- stmts = [mkCopyData(site, mkIntegerLiteral(site, 1), ran),
- mkCopyData(site, mkBoolConstant(site, failed), threw),
- mkReturn(site, mkBoolConstant(site, failed))]
- return stmts
+ return [mkCopyData(site, mkIntegerLiteral(site, 1), ran),
+ mkCopyData(site, mkBoolConstant(site, failed), threw),
+ mkRAIIScopeClears(site, RAIIScope.scope_stack()),
+ mkReturn(site, mkBoolConstant(site, failed))]
class MemoizedReturnFailure(Failure):
'''Generate boolean return statements to signal success. False
@@ -293,7 +347,7 @@ def codegen_exit(self, site, retvals):
for ((name, typ), val) in zip(self.outp, retvals):
target = self.make_ref(f'p_{name}', typ)
stmts.append(mkCopyData(site, val, target))
- targets.append(target)
+ targets.append(OrphanWrap(target.site, target))
stmts.append(codegen_return(site, self.outp, self.throws, targets))
return mkCompound(site, stmts)
@@ -364,20 +418,22 @@ def fail_handler(self):
if self.method.throws else NoFailure(self.method.site))
def memoization_common_prelude(name, site, outp, throws, make_ref):
- has_run_stmts = []
- # Throwing is treated as a special kind of output parameter, stored through
- # 'threw'. When 'ran' indicates the method has been called before to
- # completion, 'threw' is retrieved to check whether the method threw or
- # not. If it did, then the call completes by throwing again; otherwise, the
- # cached return values are retrieved and returned.
- if throws:
+ with UnusedMethodRAIIScope():
+ has_run_stmts = []
+ # Throwing is treated as a special kind of output parameter, stored
+ # through 'threw'. When 'ran' indicates the method has been called
+ # before to completion, 'threw' is retrieved to check whether the
+ # method threw or not. If it did, then the call completes by throwing
+ # again; otherwise, the cached return values are retrieved and
+ # returned.
+ if throws:
+ has_run_stmts.append(
+ mkIf(site, make_ref('threw', TBool()),
+ mkReturn(site, mkBoolConstant(site, True))))
has_run_stmts.append(
- mkIf(site, make_ref('threw', TBool()),
- mkReturn(site, mkBoolConstant(site, True))))
- has_run_stmts.append(
- codegen_return(site, outp, throws,
- [make_ref(f'p_{pname}', ptype)
- for (pname, ptype) in outp]))
+ codegen_return(site, outp, throws,
+ [OrphanWrap(site, make_ref(f'p_{pname}', ptype))
+ for (pname, ptype) in outp]))
# 'ran' is used to check whether the function has been called or not:
# - 0: never been called before. Set to -1, and execute the body.
# Before any return, cache the results, and set 'ran' to 1.
@@ -506,6 +562,12 @@ def cident_set_value(self):
else:
return '_simple_event_only_domains_set_value'
+ @property
+ def cident_destroy(self):
+ return (self.cident_prefix + 'destroy'
+ if self.args_type and self.args_type.is_raii
+ else '_destroy_simple_event_data')
+
class AfterOnHookInfo(CheckpointedAfterInfo):
def __init__(self, dimsizes, parent, typeseq_info, prim_key,
@@ -531,6 +593,10 @@ def generate_args_serializer(self, site, args_expr, out_expr): pass
def generate_args_deserializer(self, site, val_expr, out_expr, error_out):
pass
+ @abstractmethod
+ def generate_args_destructor(self, site, args_expr):
+ pass
+
@abstractproperty
def string_prim_key(self):
'''The AfterOnHookInfo key for the primary component -- the target
@@ -559,10 +625,19 @@ def cident_args_deserializer(self):
assert self.has_serialized_args
return self.cident_prefix + 'args_deserializer'
+ @property
+ def cident_args_destructor(self):
+ assert self.has_serialized_args and self.args_type.is_raii
+ return self.cident_prefix + 'args_destructor'
+
class ImmediateAfterInfo(AfterInfo):
def __init__(self, key, dimsizes, uniq):
self.uniq = uniq
super().__init__(key, dimsizes)
+ self.args_raii_info = (get_raii_type_info(self.args_type)
+ if (self.args_type
+ and self.args_type.is_raii)
+ else None)
@abstractmethod
def generate_callback_call(self, indices_lit, args_lit): pass
@@ -597,7 +672,7 @@ def generate_callback_call(self, indices_lit, args_lit):
for i in range(self.method.dimensions))
args = tuple(mkLit(site, f'{args_lit}->{pname}', ptype)
for (pname, ptype) in self.method.inp)
- with LogFailure(site, self.method, indices), \
+ with UnusedMethodRAIIScope(), LogFailure(site, self.method, indices), \
crep.DeviceInstanceContext():
code = codegen_call(site, self.method, indices, args, ())
code = mkCompound(site, [code])
@@ -606,13 +681,16 @@ def generate_callback_call(self, indices_lit, args_lit):
class AfterDelayIntoSendNowInfo(AfterDelayInfo):
def __init__(self, typeseq_info, uniq):
super().__init__(typeseq_info, [], uniq)
- self .typeseq_info = typeseq_info
+ self.typeseq_info = typeseq_info
hookref_type = THook(typeseq_info.types, validated=True)
self._args_type = (
TStruct({'hookref': hookref_type,
'args': typeseq_info.struct},
label=f'_simple_event_{self.uniq}_args')
if typeseq_info.types else hookref_type)
+ self.typeseq_info_struct_raii_info = (
+ get_raii_type_info(typeseq_info.struct)
+ if typeseq_info.types and typeseq_info.struct.is_raii else None)
@property
def args_type(self):
@@ -630,10 +708,13 @@ def generate_callback_call(self, indices_lit, args_lit):
assert indices_lit is None
has_args = bool(self.typeseq_info.types)
hookref = f'{args_lit}->hookref' if has_args else f'*{args_lit}'
+ resolved_hookref = ('_DML_resolve_hookref(_dev, _hook_aux_infos, '
+ + f'{hookref})')
args = f'&{args_lit}->args' if has_args else 'NULL'
- out('_DML_send_hook(&_dev->obj, &_dev->_detached_hook_queue_stack, '
- + f'_DML_resolve_hookref(_dev, _hook_aux_infos, {hookref}), '
- + f'{args});\n')
+ out(f'_DML_SEND_HOOK({resolved_hookref}, {args});\n')
+ if self.typeseq_info_struct_raii_info:
+ out(self.typeseq_info_struct_raii_info.read_destroy_lval(
+ f'{args_lit}->args') + ';\n')
def get_after_delay(key):
try:
@@ -644,7 +725,6 @@ def get_after_delay(key):
dml.globals.after_delay_infos[key] = info
return info
-
class AfterOnHookIntoMethodInfo(AfterOnHookInfo):
def __init__(self, typeseq_info, method, param_to_msg_comp):
self.method = method
@@ -656,23 +736,36 @@ def __init__(self, typeseq_info, method, param_to_msg_comp):
if i not in param_to_msg_comp},
label=f'_after_on_hook_{self.uniq}_args')
if len(self.method.inp) > len(param_to_msg_comp) else None)
+ self.args_raii_info = (get_raii_type_info(self._args_type)
+ if self._args_type and self._args_type.is_raii
+ else None)
def generate_callback_call(self, indices_lit, args_lit, msg_lit):
site = self.method.site
indices = tuple(mkLit(site, f'{indices_lit}[{i}]', TInt(32, False))
for i in range(self.method.dimensions))
args = tuple(
- mkLit(site,
- f'{msg_lit}->comp{self.param_to_msg_comp[i]}'
- if i in self.param_to_msg_comp else f'{args_lit}->{pname}',
- ptype)
+ (lambda e: RAIIDupe(site, e) if ptype.is_raii else e)(
+ mkLit(site, f'{msg_lit}->comp{self.param_to_msg_comp[i]}',
+ ptype))
+ if i in self.param_to_msg_comp else
+ mkLit(site, f'{args_lit}->{pname}', ptype)
for (i, (pname, ptype)) in enumerate(self.method.inp))
- with LogFailure(site, self.method, indices), \
+ with UnusedMethodRAIIScope(), LogFailure(site, self.method, indices), \
crep.DeviceInstanceContext():
code = codegen_call(site, self.method, indices, args, ())
code = mkCompound(site, [code])
code.toc()
+ def generate_args_destructor(self, site, args_expr):
+ for (i, (name, typ)) in enumerate(self.method.inp):
+ if i in self.param_to_msg_comp or not typ.is_raii:
+ continue
+ info = get_raii_type_info(typ)
+ out(info.read_destroy_lval(ctree.mkSubRef(site, args_expr, name,
+ '.').read())
+ + ';\n')
+
def generate_args_serializer(self, site, args_expr, out_expr):
sources = tuple((ctree.mkSubRef(site, args_expr, name, "."),
safe_realtype(typ))
@@ -681,13 +774,7 @@ def generate_args_serializer(self, site, args_expr, out_expr):
serialize.serialize_sources_to_list(site, sources, out_expr)
def generate_args_deserializer(self, site, val_expr, out_expr, error_out):
- if self.args_type:
- tmp_out_decl, tmp_out_ref = serialize.declare_variable(
- site, '_tmp_out', self.args_type)
- tmp_out_decl.toc()
- else:
- tmp_out_ref = None
- targets = tuple((ctree.mkSubRef(site, tmp_out_ref, name, "."),
+ targets = tuple((ctree.mkSubRef(site, out_expr, name, "."),
safe_realtype(typ))
if i not in self.param_to_msg_comp else None
for (i, (name, typ)) in enumerate(self.method.inp))
@@ -698,10 +785,6 @@ def error_out_at_index(_i, exc, msg):
serialize.deserialize_list_to_targets(
site, val_expr, targets, error_out_at_index,
f'deserialization of arguments to {self.method.name}')
- if self.args_type:
- ctree.mkAssignStatement(site, out_expr,
- ctree.ExpressionInitializer(
- tmp_out_ref)).toc()
@property
def args_type(self):
@@ -719,6 +802,11 @@ def string_prim_key(self):
class AfterOnHookIntoSendNowInfo(AfterOnHookInfo):
def __init__(self, typeseq_info, sendnow_typeseq_info, param_to_msg_comp):
self.sendnow_typeseq_info = sendnow_typeseq_info
+ self.sendnow_typeseq_info_raii_info = (
+ get_raii_type_info(sendnow_typeseq_info.struct)
+ if (self.sendnow_typeseq_info.types
+ and self.sendnow_typeseq_info.struct.is_raii) else None)
+
inp = [(f'comp{i}', typ)
for (i, typ) in enumerate(sendnow_typeseq_info.types)]
has_inner_args = len(inp) > len(param_to_msg_comp)
@@ -743,20 +831,41 @@ def generate_callback_call(self, indices_lit, args_lit, msg_lit):
has_inner_args = (len(self.sendnow_typeseq_info.types)
> len(self.param_to_msg_comp))
hookref = f'{args_lit}->hookref' if has_inner_args else f'*{args_lit}'
+ resolved_hookref = ('_DML_resolve_hookref(_dev, _hook_aux_infos, '
+ + f'{hookref})')
+
+ msg_comps = ', '.join(
+ (lambda x: (get_raii_type_info(typ).read_dupe(x)
+ if typ.is_raii else x))(
+ f'{msg_lit}->comp{self.param_to_msg_comp[i]}')
+ if i in self.param_to_msg_comp else
+ f'{args_lit}->args.comp{i}'
+ for (i, typ) in enumerate(self.sendnow_typeseq_info.types))
+ msg = (('&((%s_t){%s})'
+ % (self.sendnow_typeseq_info.struct.label, msg_comps))
+ if msg_comps else 'NULL')
+ if self.sendnow_typeseq_info_raii_info:
+ destructor = self.sendnow_typeseq_info_raii_info.cident_destructor
+ out(f'_DML_SEND_HOOK_RAII({resolved_hookref}, {msg}, '
+ + f'{destructor});\n')
+ else:
+ out(f'_DML_SEND_HOOK({resolved_hookref}, {msg});\n')
- sendnow_msg_struct = self.sendnow_typeseq_info.struct
- args = (('&(%s_t) {%s}'
- % (sendnow_msg_struct.label,
- ', '.join(
- f'{msg_lit}->comp{self.param_to_msg_comp[i]}'
- if i in self.param_to_msg_comp else
- f'{args_lit}->args.comp{i}'
- for i in range(len(self.sendnow_typeseq_info.types)))))
- if self.sendnow_typeseq_info.types else 'NULL')
+ def generate_args_destructor(self, site, args_expr):
+ has_inner_args = (len(self.sendnow_typeseq_info.types)
+ > len(self.param_to_msg_comp))
+ if not has_inner_args:
+ return
+
+ inner_args = ctree.mkSubRef(site, args_expr, 'args', '.')
+ for (i, typ) in enumerate(self.sendnow_typeseq_info.types):
+ if i in self.param_to_msg_comp or not typ.is_raii:
+ continue
+ info = get_raii_type_info(typ)
+ out(info.read_destroy_lval(ctree.mkSubRef(site, inner_args,
+ f'comp{i}', '.').read())
+ + ';\n')
- out('_DML_send_hook(&_dev->obj, &_dev->_detached_hook_queue_stack, '
- + f'_DML_resolve_hookref(_dev, _hook_aux_infos, {hookref}), '
- + f'{args});\n')
def generate_args_serializer(self, site, args_expr, out_expr):
has_inner_args = (len(self.sendnow_typeseq_info.types)
@@ -783,11 +892,8 @@ def generate_args_serializer(self, site, args_expr, out_expr):
def generate_args_deserializer(self, site, val_expr, out_expr, error_out):
has_inner_args = (len(self.sendnow_typeseq_info.types)
> len(self.param_to_msg_comp))
- tmp_out_decl, tmp_out_ref = serialize.declare_variable(
- site, '_tmp_out', self.args_type)
- tmp_out_decl.toc()
- hookref = (ctree.mkSubRef(site, tmp_out_ref, 'hookref', '.')
- if has_inner_args else tmp_out_ref)
+ hookref = (ctree.mkSubRef(site, out_expr, 'hookref', '.')
+ if has_inner_args else out_expr)
targets = [(hookref,
safe_realtype(THook(self.sendnow_typeseq_info.types,
validated=True)))]
@@ -806,7 +912,7 @@ def error_out_at_index(_i, exc, msg):
'deserialization of arguments to a send_now')
if has_inner_args:
- inner_args = ctree.mkSubRef(site, tmp_out_ref, 'args', '.')
+ inner_args = ctree.mkSubRef(site, out_expr, 'args', '.')
inner_args_targets = (
(ctree.mkSubRef(site, inner_args, f'comp{i}', '.'),
safe_realtype(typ))
@@ -816,10 +922,6 @@ def error_out_at_index(_i, exc, msg):
site, inner_args_val, inner_args_targets, error_out_at_index,
'deserialization of arguments to a send_now')
-
- ctree.mkAssignStatement(site, out_expr,
- ctree.ExpressionInitializer(tmp_out_ref)).toc()
-
@property
def args_type(self):
return self._args_type
@@ -835,10 +937,10 @@ def string_prim_key(self):
class ImmediateAfterIntoMethodInfo(ImmediateAfterInfo):
def __init__(self, method, uniq):
self.method = method
- super().__init__(method, method.dimsizes, uniq)
self._args_type = (TStruct(dict(method.inp),
- label=f'_immediate_after_{self.uniq}_args')
+ label=f'_immediate_after_{uniq}_args')
if method.inp else None)
+ super().__init__(method, method.dimsizes, uniq)
@property
def args_type(self):
@@ -854,22 +956,25 @@ def generate_callback_call(self, indices_lit, args_lit):
for i in range(self.method.dimensions))
args = tuple(mkLit(site, f'{args_lit}->{pname}', ptype)
for (pname, ptype) in self.method.inp)
- with LogFailure(site, self.method, indices), \
+ with UnusedMethodRAIIScope(), LogFailure(site, self.method, indices), \
crep.DeviceInstanceContext():
code = codegen_call(site, self.method, indices, args, ())
- code = mkCompound(site, [code])
- code.toc()
+ mkCompound(site, [code]).toc()
class ImmediateAfterIntoSendNowInfo(ImmediateAfterInfo):
def __init__(self, typeseq_info, uniq):
- super().__init__(typeseq_info, [], uniq)
self.typeseq_info = typeseq_info
hookref_type = THook(typeseq_info.types, validated=True)
self._args_type = (
TStruct({'hookref': hookref_type,
'args': typeseq_info.struct},
- label=f'_immediate_after_{self.uniq}_args')
+ label=f'_immediate_after_{uniq}_args')
if typeseq_info.types else hookref_type)
+ self.typeseq_info_raii_info = (get_raii_type_info(typeseq_info.struct)
+ if (typeseq_info.types
+ and typeseq_info.struct.is_raii)
+ else None)
+ super().__init__(typeseq_info, [], uniq)
@property
def args_type(self):
@@ -884,9 +989,12 @@ def generate_callback_call(self, indices_lit, args_lit):
has_args = bool(self.typeseq_info.types)
hookref = f'{args_lit}->hookref' if has_args else f'*{args_lit}'
args = f'&{args_lit}->args' if has_args else 'NULL'
- out('_DML_send_hook(&_dev->obj, &_dev->_detached_hook_queue_stack, '
- + f'_DML_resolve_hookref(_dev, _hook_aux_infos, {hookref}), '
- + f'{args});\n')
+ resolved_hookref = ('_DML_resolve_hookref(_dev, _hook_aux_infos, '
+ + f'{hookref})')
+ out(f'_DML_SEND_HOOK({resolved_hookref}, {args});\n')
+ if self.typeseq_info_raii_info:
+ out(self.typeseq_info_raii_info.read_destroy_lval(
+ f'{args_lit}->args') + ';\n')
def get_immediate_after(key):
try:
@@ -937,7 +1045,329 @@ def args_init(self):
assert self.inargs
return f'{{ {", ".join(inarg.read() for inarg in self.inargs)} }}'
-def declarations(scope):
+class RAIITypeInfo(metaclass=ABCMeta):
+ '''Information used to generate artifact corresponding to a unique RAII
+ type'''
+
+ @abstractproperty
+ def type(self): pass
+
+ @abstractproperty
+ def cident_destructor(self): pass
+
+ @abstractproperty
+ def cident_destructor_array_item(self): pass
+
+ @abstractproperty
+ def cident_copier(self): pass
+
+ # Return a void-typed C expression that consumes all resources
+ # associated with the expression's value
+ @abstractmethod
+ def read_destroy(self, expr): pass
+
+ # A variant of read_destroy specialized for when expr is an lvalue
+ # This can be important for when the RAII type this RAIITypeInfo is
+ # for is arbitrarily large
+ def read_destroy_lval(self, expr):
+ return self.read_destroy(expr)
+
+ # Return a void-typed C expression that copies src into tgt, replacing
+ # tgt's resources with a duplication of those of src.
+ @abstractmethod
+ def read_copy(self, tgt, src): pass
+
+ # A variant of read_copy specialized for when src is an lvalue
+ # This can be important for when the RAII type this RAIITypeInfo is
+ # for can be arbitrarily large
+ def read_copy_lval(self, tgt, src):
+ return self.read_copy(tgt, src)
+
+ # Return a void-typed C expression that moves src into tgt.
+ # tgt must be a C lval. src is consumed by this and must never be accessed
+ # again; including never destroyed. This means src must be orphan.
+ def read_linear_move(self, tgt, src):
+ return (f'_DML_RAII_LINEAR_MOVE_SIMPLE_RVAL('
+ + f'{self.type.declaration("")}, '
+ + f'{self.cident_destructor}, {tgt}, {src})')
+
+ # a specialization of read_linear_move to optimize the case when the src is
+ # a C lvalue.
+ # This can be important if the type this RAIITypeInfo is for can be
+ # arbitrarily large (like structs)
+ def read_linear_move_lval(self, tgt, src):
+ return (f'_DML_RAII_LINEAR_MOVE_SIMPLE({self.type.declaration("")}, '
+ + f'{self.cident_destructor}, {tgt}, {src})')
+
+ # Return a C expression that evaluates to a copy of expr with duplicated
+ # associated resources.
+ def read_dupe(self, expr):
+ t = self.type.declaration('')
+ return f'_DML_RAII_DUPE({t}, {self.cident_copier}, {expr})'
+
+class StringTypeInfo(RAIITypeInfo):
+ type = TString()
+ cident_destructor = '_DML_string_destructor'
+ cident_destructor_array_item = '_DML_string_destructor_ref'
+ cident_copier = '_DML_string_copier'
+
+ def read_destroy(self, expr):
+ return f'_dml_string_free({expr})'
+
+ def read_copy(self, tgt, src):
+ return f'_dml_string_copy((void *)&({tgt}), {src})'
+
+ def read_dupe(self, expr):
+ return f'_dml_string_dupe({expr})'
+
+class GeneratedRAIITypeInfo(RAIITypeInfo):
+ @abstractproperty
+ def cident_prefix(self):
+ '''A prefix for C identifiers used for the naming of artifacts
+ generated for the RAII type'''
+
+ @property
+ def cident_destructor(self):
+ return self.cident_prefix + 'destructor'
+
+ @property
+ def cident_destructor_array_item(self):
+ return self.cident_prefix + 'destructor_ref'
+
+ @property
+ def cident_copier(self):
+ return self.cident_prefix + 'copier'
+
+ @property
+ def cident_dupe(self):
+ return self.cident_prefix + 'dupe'
+
+ def read_destroy(self, expr):
+ return f'_DML_RAII_DESTROY_RVAL({self.cident_destructor}, {expr})'
+
+ def read_destroy_lval(self, expr):
+ return f'{self.cident_destructor}((void *)&({expr}))'
+
+ def read_copy(self, tgt, src):
+ return f'_DML_RAII_COPY_RVAL({self.cident_copier}, {tgt}, {src})'
+
+ def read_copy_lval(self, tgt, src):
+ return f'{self.cident_copier}((void *)&({tgt}), &({src}))'
+
+ should_generate_destructor = True
+ def generate_destructor(self, expr):
+ raise ICE(None, f"generate_destructor of {type(self).__name__} is abstract")
+
+ should_generate_copier = True
+ def generate_copier(self, tgt, src):
+ raise ICE(None, f"generate_copier of {type(self).__name__} is abstract")
+
+class StructRAIITypeInfo(GeneratedRAIITypeInfo):
+ def __init__(self, type):
+ assert isinstance(type, TStruct)
+ self._type = type
+ self.raii_members = [
+ (name, typ, info)
+ for (name, typ) in self.type.members.items()
+ if typ.is_raii
+ for info in (get_raii_type_info(typ),)]
+
+
+ @property
+ def type(self):
+ return self._type
+
+ @property
+ def cident_prefix(self):
+ return '_dml_raii_' + self.type.label + '_'
+
+ def read_linear_move_lval(self, tgt, src):
+ if deep_const(self.type):
+ return (f'_DML_RAII_LINEAR_MOVE_MEMCPY({self.cident_destructor}'
+ + f', {tgt}, {src})')
+ return GeneratedRAIITypeInfo.read_linear_move_lval(self, tgt, src)
+
+ def read_linear_move(self, tgt, src):
+ if deep_const(self.type):
+ return ('_DML_RAII_LINEAR_MOVE_MEMCPY_RVAL('
+ + f'{self.cident_destructor}, {tgt}, {src})')
+ return GeneratedRAIITypeInfo.read_linear_move(self, tgt, src)
+
+ def generate_destructor(self, expr):
+ for (name, _, info) in self.raii_members:
+ out(info.read_destroy_lval(f'{expr}->{name}') + ';\n')
+
+ def generate_copier(self, tgt, src):
+ site = logging.SimpleSite(self.cident_copier)
+ for (name, typ) in self.type.members.items():
+ out(ctree.ExpressionInitializer(
+ mkLit(site, f'{src}->{name}', typ)).assign_to(f'{tgt}->{name}',
+ typ)
+ + ';\n')
+
+class ArrayRAIITypeInfo(GeneratedRAIITypeInfo):
+ count = 0
+ def __init__(self, type):
+ assert isinstance(type, TArray) and type.size.constant
+ self._type = type
+ self.base_info = get_raii_type_info(type.base)
+ self.uniq = ArrayRAIITypeInfo.count
+ ArrayRAIITypeInfo.count += 1
+
+ @property
+ def type(self):
+ return self._type
+
+ @property
+ def cident_prefix(self):
+ return f'_dml_raii_array_{self.uniq}_'
+
+ def generate_destructor(self, expr):
+ if not self.type.size.value:
+ return
+ out(f'for (uint32 _i = 0; _i < {self.type.size.read()}; ++_i)\n',
+ postindent=1)
+ out(self.base_info.read_destroy_lval(f'(*{expr})[_i]') + ';\n',
+ postindent=-1)
+
+ def generate_copier(self, tgt, src):
+ site = logging.SimpleSite(self.cident_copier)
+ out(f'for (uint32 _i = 0; _i < {self.type.size.read()}; ++_i) {{\n',
+ postindent=1)
+ out(ctree.ExpressionInitializer(
+ mkLit(site, f'(*{src})[_i]', self.type.base)).assign_to(
+ f'(*({tgt}))[_i]', self.type.base)
+ + ';\n')
+ out('}\n',preindent=-1)
+
+ def read_linear_move_lval(self, tgt, src):
+ return (f'_DML_RAII_LINEAR_MOVE_MEMCPY({self.cident_destructor}'
+ + f', {tgt}, {src})')
+
+ def read_linear_move(self, tgt, src):
+ # Any array expression should be an lvalue
+ raise ICE(None,
+ 'ArrayTypeInfo.read_linear_move called instead of '
+ + 'read_linear_move_lval')
+
+ def read_dupe(self, expr):
+ raise ICE(None,
+ 'ArrayTypeInfo.read_dupe called. '
+ + 'Arrays should never be duped!')
+
+class VectorRAIITypeInfo(GeneratedRAIITypeInfo):
+ cident_destructor_nonraii_elems = '_DML_vector_nonraii_elems_destructor'
+ cident_destructor_array_item_nonraii_elems = (
+ '_DML_vector_nonraii_elems_destructor_ref')
+ count = 0
+ def __init__(self, type):
+ assert isinstance(type, TVector)
+ self._type = type
+ self.base_info = (get_raii_type_info(type.base)
+ if type.base.is_raii else None)
+ self.uniq = VectorRAIITypeInfo.count
+ VectorRAIITypeInfo.count += 1
+
+ @property
+ def type(self):
+ return self._type
+
+ @property
+ def cident_prefix(self):
+ return f'_dml_raii_vector_{self.uniq}_'
+
+ @property
+ def cident_destructor(self):
+ if self.base_info:
+ return super().cident_destructor
+ return self.cident_destructor_nonraii_elems
+
+ @property
+ def cident_destructor_array_item(self):
+ if self.base_info:
+ return super().cident_destructor_array_item
+ return self.cident_destructor_array_item_nonraii_elems
+
+ def read_destroy(self, expr):
+ if self.base_info:
+ return ('_dml_vect_free_raii(sizeof(%s), %s, %s)'
+ % (self.type.base.declaration(""),
+ self.base_info.cident_destructor,
+ expr))
+ else:
+ return f'_dml_vect_free({expr})'
+
+ def read_destroy_lval(self, expr):
+ return self.read_destroy(expr)
+
+ def read_copy(self, tgt, src):
+ if not self.base_info:
+ return ('_dml_vect_copy(sizeof(%s), (_dml_vect_t *)&(%s), %s)'
+ % (self.type.base.declaration(""), tgt, src))
+ return GeneratedRAIITypeInfo.read_copy(self, tgt, src)
+
+ def read_copy_lval(self, tgt, src):
+ if not self.base_info:
+ return ('_dml_vect_copy(sizeof(%s), (_dml_vect_t *)&(%s), %s)'
+ % (self.type.base.declaration(""), tgt, src))
+ return GeneratedRAIITypeInfo.read_copy_lval(self, tgt, src)
+
+ @property
+ def should_generate_destructor(self):
+ return self.base_info is not None
+
+ def generate_destructor(self, expr):
+ assert self.should_generate_destructor
+ out(self.read_destroy_lval(f'*{expr}') + ';\n')
+
+ def generate_copier(self, tgt, src):
+ if self.base_info:
+ out(f'if (unlikely({tgt}->elements == {src}->elements)) return;\n')
+ out('_dml_vect_resize_raii(sizeof(%s), %s, %s, %s->len);\n'
+ % (self.type.base.declaration(''),
+ self.base_info.cident_destructor,
+ tgt,
+ src))
+ def elem_of_vec(vec):
+ return ('DML_VECT_ELEM_UNSAFE(%s, %s, _i)'
+ % (self.type.base.declaration(""), vec))
+ out(f'for (uint32 _i = 0; _i < {src}->len; ++_i)\n',
+ postindent=1)
+ out(self.base_info.read_copy_lval(elem_of_vec(f'*({tgt})'),
+ elem_of_vec(f'*({src})'))
+ + ';\n',
+ postindent=-1)
+ else:
+ out('_dml_vect_copy(sizeof(%s), %s, *%s);\n'
+ % (self.type.base.declaration(""), tgt, src))
+
+def get_raii_type_info(t):
+ uct = safe_realtype_unconst(t)
+ assert uct.is_raii
+ if isinstance(uct, TString):
+ return StringTypeInfo()
+ try:
+ return dml.globals.generated_raii_types[TypeKey(uct)]
+ except KeyError:
+ if isinstance(uct, TStruct):
+ if deep_const(uct):
+ raise ICE(t.declaration_site or None,
+ ('Structs that simultaneously have const-qualified '
+ + 'members and resource-enriched (RAII) members '
+ + 'are not supported'))
+ info = StructRAIITypeInfo(uct)
+ elif isinstance(uct, TArray):
+ if not uct.size.constant:
+ raise EVLARAII(t.declaration_site, t.describe())
+ info = ArrayRAIITypeInfo(uct)
+ elif isinstance(uct, TVector):
+ info = VectorRAIITypeInfo(uct)
+ else:
+ assert False
+ dml.globals.generated_raii_types[TypeKey(uct)] = info
+ return info
+
+def declarations(scope, unscoped_raii=False):
"Get all local declarations in a scope as a list of Declaration objects"
decls = []
for sym in scope.symbols():
@@ -946,7 +1376,7 @@ def declarations(scope):
continue
if sym.stmt:
continue
- decl = sym_declaration(sym)
+ decl = sym_declaration(sym, unscoped_raii=unscoped_raii)
if decl:
decls.append(decl)
@@ -1061,7 +1491,7 @@ def codegen_sizeof(site, expr):
fun = mkLit(site, 'sizeof',
TFunction([], TNamed('size_t'),
varargs = True))
- return Apply(site, fun, [expr], fun.ctype())
+ return Apply(site, fun, [expr], fun.ctype(), True)
def flatten(x):
'''Recursively flatten lists and tuples'''
@@ -1084,6 +1514,8 @@ def expr_unop(tree, location, scope):
var = rh_ast.args[0]
if var in typedefs and scope.lookup(var) is None:
report(WSIZEOFTYPE(tree.site))
+ if safe_realtype_shallow(typedefs[var]).is_raii:
+ report(ESIZEOFRAII(tree.site))
return codegen_sizeof(
tree.site, mkLit(tree.site, cident(var), None))
try:
@@ -1142,8 +1574,10 @@ def expr_unop(tree, location, scope):
elif op == 'post++': return mkPostInc(tree.site, rh)
elif op == 'post--': return mkPostDec(tree.site, rh)
elif op == 'sizeof':
- if not dml.globals.compat_dml12 and not isinstance(rh, ctree.LValue):
+ if not dml.globals.compat_dml12 and not rh.addressable:
raise ERVAL(rh.site, 'sizeof')
+ if safe_realtype_shallow(rh.ctype()).is_raii:
+ report(ESIZEOFRAII(tree.site))
return codegen_sizeof(tree.site, rh)
elif op == 'defined': return mkBoolConstant(tree.site, True)
elif op == 'stringify':
@@ -1160,17 +1594,28 @@ def expr_typeop(tree, location, scope):
(struct_defs, t) = eval_type(t, tree.site, location, scope)
for (site, _) in struct_defs:
report(EANONSTRUCT(site, "'sizeoftype' expression"))
+ if safe_realtype_shallow(t).is_raii:
+ report(ESIZEOFRAII(tree.site))
return codegen_sizeof(tree.site, mkLit(tree.site, t.declaration(''), None))
@expression_dispatcher
def expr_new(tree, location, scope):
- [t, count] = tree.args
+ [spec, t, count] = tree.args
(struct_defs, t) = eval_type(t, tree.site, location, scope)
+ if t.is_raii and spec != 'enriched':
+ report(ENEWRAII(tree.site, t.describe(),
+ 'new' + f'<{spec}>'*(spec is not None)))
+ spec = 'enriched'
+
for (site, _) in struct_defs:
report(EANONSTRUCT(site, "'new' expression"))
if count:
count = codegen_expression(count, location, scope)
- return mkNew(tree.site, t, count)
+ if spec == 'enriched':
+ return mkNew(tree.site, t, count)
+ else:
+ assert spec is None or spec == 'extern'
+ return mkNewExtern(tree.site, t, count)
@expression_dispatcher
def expr_apply(tree, location, scope):
@@ -1312,24 +1757,14 @@ def expr_list(tree, location, scope):
@expression_dispatcher
def expr_cast(tree, location, scope):
- [expr_ast, casttype] = tree.args
- expr = codegen_expression_maybe_nonvalue(expr_ast, location, scope)
+ [init_ast, casttype] = tree.args
(struct_defs, type) = eval_type(casttype, tree.site, location, scope)
for (site, _) in struct_defs:
report(EANONSTRUCT(site, "'cast' expression"))
- if (dml.globals.compat_dml12 and dml.globals.api_version <= "6"
- and isinstance(expr, InterfaceMethodRef)):
- # Workaround for bug 24144
- return mkLit(tree.site, "%s->%s" % (
- expr.node_expr.read(), expr.method_name), type)
- if isinstance(expr, NonValue) and (
- not isinstance(expr, NodeRef)
- or not isinstance(safe_realtype(type), TTrait)):
- raise expr.exc()
- else:
- return mkCast(tree.site, expr, type)
+ return eval_initializer(tree.site, type, init_ast, location, scope, False,
+ cast_init=True).as_expr(type)
@expression_dispatcher
def expr_undefined(tree, location, scope):
@@ -1429,6 +1864,8 @@ def fix_printf(fmt, args, argsites, site):
TPtr(TNamed('char',
const=True)))),
[mkAddressOf(site, arg)])
+ elif isinstance(argtype, TString):
+ arg = mkStringCStrApply(site, arg)
filtered_fmt += "%" + flags + width + precision + length + conversion
filtered_args.append(arg)
@@ -1551,6 +1988,12 @@ def eval_type(asttype, site, location, scope, extern=False, typename=None,
msg_comp_types.append(msg_comp_type)
struct_defs.extend(msg_comp_struct_defs)
etype = THook(msg_comp_types)
+ elif tag == 'vect':
+ (bsite, btype_ast) = info
+ (local_struct_defs, btype) = eval_type(
+ btype_ast, bsite, location, scope, extern)
+ struct_defs.extend(local_struct_defs)
+ etype = TVector(btype)
else:
raise ICE(site, "Strange type")
elif isinstance(asttype[0], str):
@@ -1575,7 +2018,7 @@ def eval_type(asttype, site, location, scope, extern=False, typename=None,
elif asttype[0] == 'vect':
if etype.void:
raise EVOID(site)
- etype = TVector(etype)
+ etype = TVectorLegacy(etype)
asttype = asttype[1:]
elif asttype[0] == 'array':
if etype.void:
@@ -1780,7 +2223,8 @@ def mk_bitfield_compound_initializer_expr(site, etype, inits, location, scope,
return source_for_assignment(site, etype, bitfields_expr)
-def eval_initializer(site, etype, astinit, location, scope, static):
+def eval_initializer(site, etype, astinit, location, scope, static,
+ cast_init=False):
"""Deconstruct an AST for an initializer, and return a
corresponding initializer object. Report EDATAINIT errors upon
invalid initializers.
@@ -1798,12 +2242,22 @@ def eval_initializer(site, etype, astinit, location, scope, static):
def do_eval(etype, astinit):
shallow_real_etype = safe_realtype_shallow(etype)
if astinit.kind == 'initializer_scalar':
- expr = codegen_expression(astinit.args[0], location, scope)
- if static and not expr.constant:
+ expr = codegen_expression_maybe_nonvalue(astinit.args[0], location,
+ scope)
+ if (isinstance(expr, StringConstant)
+ and isinstance(shallow_real_etype, TString)):
+ expr = FromCString(astinit.site, expr)
+ elif cast_init:
+ assert not static
+ expr = mkCast(astinit.site, expr, etype)
+ elif isinstance(expr, NonValue):
+ raise expr.exc()
+ elif static and not expr.constant:
raise EDATAINIT(astinit.site, 'non-constant expression')
+ else:
+ expr = source_for_assignment(astinit.site, etype, expr)
- return ExpressionInitializer(
- source_for_assignment(astinit.site, etype, expr))
+ return ExpressionInitializer(expr)
elif astinit.kind == 'initializer_designated_struct':
(init_asts, allow_partial) = astinit.args
if isinstance(shallow_real_etype, StructType):
@@ -1854,6 +2308,12 @@ def do_eval(etype, astinit):
for ((mn, mt), e) in zip(etype.members_qualified,
init_asts)}
return DesignatedStructInitializer(site, init)
+ elif isinstance(etype, TVector):
+ return ExpressionInitializer(
+ VectorCompoundLiteral(
+ site,
+ tuple(do_eval(etype.base, e) for e in init_asts),
+ etype.base))
elif etype.is_int and etype.is_bitfields:
if len(etype.members) != len(init_asts):
raise EDATAINIT(site, 'mismatched number of fields')
@@ -1896,11 +2356,12 @@ def get_initializer(site, etype, astinit, location, scope):
return ExpressionInitializer(mkBoolConstant(site, False))
elif typ.is_float:
return ExpressionInitializer(mkFloatConstant(site, 0.0))
- elif isinstance(typ, (TStruct, TExternStruct, TArray, TTrait, THook)):
- return MemsetInitializer(site)
+ elif isinstance(typ, (TStruct, TExternStruct, TArray, TTrait, THook,
+ TString, TVector)):
+ return MemsetInitializer(site, ignore_raii=True)
elif isinstance(typ, TPtr):
return ExpressionInitializer(mkLit(site, 'NULL', typ))
- elif isinstance(typ, TVector):
+ elif isinstance(typ, TVectorLegacy):
return ExpressionInitializer(mkLit(site, 'VNULL', typ))
elif isinstance(typ, TFunction):
raise EVARTYPE(site, etype.describe())
@@ -1919,8 +2380,12 @@ def codegen_statements(trees, *args):
report(e)
return stmts
+
def codegen_statement(tree, *args):
- return mkCompound(tree.site, codegen_statements([tree], *args))
+ with RAIIScope() as raii_scope:
+ stmts = codegen_statements([tree], *args)
+
+ return mkCompoundRAII(tree.site, stmts, raii_scope)
@statement_dispatcher
def stmt_compound(stmt, location, scope):
@@ -1940,8 +2405,10 @@ def stmt_compound(stmt, location, scope):
dmlparse.start_site(rh.site),
ret.site))
lscope = Symtab(scope)
- statements = codegen_statements(stmt_asts, location, lscope)
- return [mkCompound(stmt.site, declarations(lscope) + statements)]
+ with RAIIScope() as raii_scope:
+ statements = codegen_statements(stmt_asts, location, lscope)
+ return [mkCompoundRAII(stmt.site, declarations(lscope) + statements,
+ raii_scope)]
def check_shadowing(scope, name, site):
if (dml.globals.dml_version == (1, 2)
@@ -1983,6 +2450,10 @@ def convert_decl(decl_ast):
check_varname(stmt.site, name)
(struct_decls, etype) = eval_type(asttype, stmt.site, location, scope)
stmts.extend(mkStructDefinition(site, t) for (site, t) in struct_decls)
+ for (site, t) in struct_decls:
+ if t.is_raii:
+ raise EANONRAIISTRUCT(site)
+
etype = etype.resolve()
rt = safe_realtype_shallow(etype)
if isinstance(rt, TArray) and not rt.size.constant and deep_const(rt):
@@ -2004,14 +2475,16 @@ def mk_sym(name, typ, mkunique=not dml.globals.debuggable):
for (name, typ) in decls:
sym = mk_sym(name, typ)
tgt_typ = safe_realtype_shallow(typ)
- if tgt_typ.const:
- nonconst_typ = tgt_typ.clone()
- nonconst_typ.const = False
+ if shallow_const(tgt_typ):
+ nonconst_typ = safe_realtype_unconst(tgt_typ)
tgt_sym = mk_sym('_tmp_' + name, nonconst_typ, True)
sym.init = ExpressionInitializer(mkLocalVariable(stmt.site,
tgt_sym))
late_declared_syms.append(sym)
else:
+ if tgt_typ.is_raii:
+ sym.init = get_initializer(stmt.site, typ, None, location,
+ scope)
tgt_sym = sym
syms_to_add.append(sym)
tgt_syms.append(tgt_sym)
@@ -2023,9 +2496,9 @@ def mk_sym(name, typ, mkunique=not dml.globals.debuggable):
if method_invocation is not None and stmt.site.dml_version != (1, 2):
for sym in syms_to_add:
scope.add(sym)
- stmts.extend(sym_declaration(sym, True) for sym in tgt_syms)
+ stmts.extend(sym_declaration(sym, unused=True) for sym in tgt_syms)
stmts.append(method_invocation)
- stmts.extend(sym_declaration(sym, True)
+ stmts.extend(sym_declaration(sym, unused=True)
for sym in late_declared_syms)
else:
if len(tgts) != 1:
@@ -2035,7 +2508,7 @@ def mk_sym(name, typ, mkunique=not dml.globals.debuggable):
sym.init = ExpressionInitializer(
codegen_expression(inits[0].args[0], location, scope))
scope.add(sym)
- stmts.append(sym_declaration(sym, True))
+ stmts.append(sym_declaration(sym, unused=True))
else:
# Initializer evaluation and variable declarations are done in separate
# passes in order to prevent the newly declared variables from being in
@@ -2080,8 +2553,9 @@ def stmt_session(stmt, location, scope):
add_late_global_struct_defs(struct_decls)
if init:
try:
- init = eval_initializer(
- stmt.site, etype, init, location, global_scope, True)
+ with ctree.SessionRAIIScope():
+ init = eval_initializer(
+ stmt.site, etype, init, location, global_scope, True)
except DMLError as e:
report(e)
init = None
@@ -2122,8 +2596,8 @@ def make_static_var(site, location, static_sym_type, name, init=None,
with init_code:
if deep_const(static_sym_type):
coverity_marker('store_writes_const_field', 'FALSE')
- init.assign_to(mkStaticVariable(site, static_sym),
- static_sym_type)
+ out(init.assign_to(mkStaticVariable(site, static_sym).read(),
+ static_sym_type) + ';\n')
c_init = init_code.buf
else:
c_init = None
@@ -2264,8 +2738,7 @@ def try_codegen_invocation(site, init_ast, outargs, location, scope):
meth_expr = codegen_expression_maybe_nonvalue(meth_ast, location, scope)
if (isinstance(meth_expr, NonValue)
and not isinstance(meth_expr, (
- TraitMethodRef, NodeRef, InterfaceMethodRef, HookSendNowRef,
- HookSendRef))):
+ TraitMethodRef, NodeRef, InterfaceMethodRef, PseudoMethodRef))):
raise meth_expr.exc()
if isinstance(meth_expr, TraitMethodRef):
if not meth_expr.throws and len(meth_expr.outp) <= 1:
@@ -2373,8 +2846,8 @@ def stmt_assign(stmt, location, scope):
name, type=tgt.ctype(), site=tgt.site, init=init, stmt=True)
init = ExpressionInitializer(mkLocalVariable(tgt.site, sym))
stmts.extend([sym_declaration(sym),
- mkAssignStatement(tgt.site, tgt, init)])
- return stmts + [mkAssignStatement(tgts[0].site, tgts[0], init)]
+ AssignStatement(tgt.site, tgt, init)])
+ return stmts + [AssignStatement(tgts[0].site, tgts[0], init)]
else:
# Guaranteed by grammar
assert tgt_ast.kind == 'assign_target_tuple' and len(tgts) > 1
@@ -2403,7 +2876,7 @@ def stmt_assign(stmt, location, scope):
stmts.extend(map(sym_declaration, syms))
stmts.extend(
- mkAssignStatement(
+ AssignStatement(
tgt.site, tgt, ExpressionInitializer(mkLocalVariable(tgt.site,
sym)))
for (tgt, sym) in zip(tgts, syms))
@@ -2411,32 +2884,98 @@ def stmt_assign(stmt, location, scope):
@statement_dispatcher
def stmt_assignop(stmt, location, scope):
- (kind, site, tgt_ast, op, src_ast) = stmt
+ (_, site, tgt_ast, op, src_ast) = stmt
tgt = codegen_expression(tgt_ast, location, scope)
- if deep_const(tgt.ctype()):
- raise ECONST(tgt.site)
- if isinstance(tgt, ctree.BitSlice):
- # destructive hack
- return stmt_assign(
- ast.assign(site, ast.assign_target_chain(site, [tgt_ast]),
- [ast.initializer_scalar(
- site,
- ast.binop(site, tgt_ast, op[:-1], src_ast))]),
- location, scope)
- src = codegen_expression(src_ast, location, scope)
+ if isinstance(tgt, ctree.InlinedParam):
+ raise EASSINL(tgt.site, tgt.name)
+ if not tgt.writable:
+ raise EASSIGN(site, tgt)
+
ttype = tgt.ctype()
+ if deep_const(ttype):
+ raise ECONST(tgt.site)
+
lscope = Symtab(scope)
- sym = lscope.add_variable(
- 'tmp', type = TPtr(ttype), site = tgt.site,
- init = ExpressionInitializer(mkAddressOf(tgt.site, tgt)), stmt=True)
- # Side-Effect Free representation of the tgt lvalue
- tgt_sef = mkDereference(site, mkLocalVariable(tgt.site, sym))
- return [
- sym_declaration(sym), mkExpressionStatement(
- site,
- mkAssignOp(site, tgt_sef, arith_binops[op[:-1]](
- site, tgt_sef, src)))]
+ tmp_ret_sym = lscope.add_variable(
+ '_tmp_ret', type = ttype, site = src_ast.site,
+ stmt=True)
+
+ if ttype.is_raii:
+ tmp_ret_sym.init = get_initializer(site, ttype, None, location, scope)
+
+ # TODO(RAII): Not ideal... 'p += m();' where m is a throwing method
+ # returning an integer and p is a pointer will fail.
+ method_invocation = try_codegen_invocation(
+ site, [src_ast], [mkLocalVariable(tgt.site, tmp_ret_sym)], location,
+ scope)
+ if method_invocation:
+ src = OrphanWrap(site, mkLocalVariable(tmp_ret_sym.site, tmp_ret_sym))
+ else:
+ # Only use eval_initializer if we have to. This is because the RHS
+ # of a binary operator need not be compatible with the LHS, such as
+ # 'p += 4', where 'p' is a pointer.
+ # HACK/TODO(RAII): scalar initializers may receive special treatment
+ # by eval_initializer. Currently, that only applies to string literals,
+ # which are valid initializers for TString. This one case gets handled
+ # by mkStringAppend. However, one could imagine additional cases, with
+ # which one may use a binary operator with, could be added in the
+ # future.
+ if src_ast.kind != 'initializer_scalar':
+ src = (eval_initializer(site, ttype, src_ast, location, scope,
+ False).as_expr(ttype))
+ else:
+ src = codegen_expression(src_ast.args[0], location, scope)
+
+ operation = None
+ # TODO(RAII) this is somewhat hacky.
+ if op == '+=':
+ real_ttype = safe_realtype_shallow(ttype)
+ if isinstance(real_ttype, TString):
+ operation = lambda: [mkStringAppend(site, tgt, src)]
+ if isinstance(real_ttype, TVector):
+ operation = lambda: [mkVectorAppend(site, tgt, src)]
+
+ if operation is None:
+ if tgt.addressable:
+ tmp_tgt_sym = lscope.add_variable(
+ '_tmp_tgt', type = TPtr(ttype), site = tgt.site,
+ init = ExpressionInitializer(mkAddressOf(tgt.site, tgt)),
+ stmt=True)
+ # Side-Effect Free representation of the tgt lvalue
+ tgt = mkDereference(site, mkLocalVariable(tgt.site, tmp_tgt_sym))
+ else:
+ # TODO(RAII) Not ideal. This path is needed to deal with writable
+ # expressions that do not correspond to C lvalues; such as bit
+ # slices or .len of vectors and stirngs.
+ # But the potentially repeated evaluation is painful, (though
+ # that's not a regression compared to how bit slices used to be
+ # managed).
+ # TODO(RAII) Also consider if tgt.adressable should be relaxed to
+ # tgt.c_lval. Other than the considerations of whether that would
+ # be safe, it would also mean mean mkAddressOf can't be used,
+ # which has some ramifications as it has a few layers of expression
+ # adjustment logic to it.
+ tmp_tgt_sym = None
+ def operation():
+ tmp_tgt_decl = ([sym_declaration(tmp_tgt_sym)]
+ if tmp_tgt_sym is not None else [])
+ assign_src = source_for_assignment(site, ttype,
+ arith_binops[op[:-1]](site,
+ tgt, src))
+
+ return (tmp_tgt_decl
+ + [mkExpressionStatement(site,
+ ctree.AssignOp(site, tgt,
+ assign_src))])
+
+
+ with RAIIScope() as raii_scope:
+ method_invocation = ([sym_declaration(tmp_ret_sym, unscoped_raii=True),
+ method_invocation]
+ if method_invocation is not None else [])
+ body = method_invocation + operation()
+ return [mkCompoundRAII(site, body, raii_scope)]
@statement_dispatcher
def stmt_expression(stmt, location, scope):
@@ -2577,9 +3116,22 @@ def stmt_default(stmt, location, scope):
@statement_dispatcher
def stmt_delete(stmt, location, scope):
- [expr] = stmt.args
+ [spec, expr] = stmt.args
expr = codegen_expression(expr, location, scope)
- return [mkDelete(stmt.site, expr)]
+ etype = safe_realtype_shallow(expr.ctype())
+ if not isinstance(etype, TPtr):
+ raise ENOPTR(stmt.site, expr)
+ if etype.base.is_raii and spec != 'enriched':
+ report(EDELETERAII(stmt.site,
+ etype.base.describe(),
+ 'delete' + f'<{spec}>'*(spec is not None)))
+ spec = 'enriched'
+
+ if spec == 'enriched':
+ return [mkDelete(stmt.site, expr)]
+ else:
+ assert spec is None or spec == 'extern'
+ return [mkDeleteExtern(stmt.site, expr)]
def probable_loggroups_specification(expr):
subexprs = [expr]
@@ -3043,11 +3595,11 @@ def stmt_select(stmt, location, scope):
if_chain = mkIf(cond.site, cond, stmt, if_chain)
return [if_chain]
raise lst.exc()
- elif dml.globals.compat_dml12 and isinstance(lst.ctype(), TVector):
+ elif dml.globals.compat_dml12 and isinstance(lst.ctype(), TVectorLegacy):
itervar = lookup_var(stmt.site, scope, itername)
if not itervar:
raise EIDENT(stmt.site, itername)
- return [mkVectorForeach(stmt.site,
+ return [mkLegacyVectorForeach(stmt.site,
lst, itervar,
codegen_statement(stmt_ast, location, scope))]
else:
@@ -3060,14 +3612,32 @@ def foreach_each_in(site, itername, trait, each_in,
inner_scope.add_variable(
itername, type=trait_type, site=site,
init=ForeachSequence.itervar_initializer(site, trait))
- context = GotoLoopContext()
- with context:
- inner_body = mkCompound(site, declarations(inner_scope)
- + codegen_statements([body_ast], location, inner_scope))
+ with GotoLoopContext() as loopcontext, RAIIScope() as raii_scope:
+ body = (declarations(inner_scope)
+ + codegen_statements([body_ast], location, inner_scope))
+ inner_body = mkCompoundRAII(site, body, raii_scope)
- break_label = context.label if context.used else None
+ break_label = loopcontext.label if loopcontext.used else None
return [mkForeachSequence(site, trait, each_in, inner_body, break_label)]
+def foreach_vector(site, itername, vect_realtype, vect, body_ast, location,
+ scope):
+ if not vect.addressable:
+ raise ERVAL(vect, 'foreach')
+
+ uniq = ForeachVector.count
+ ForeachVector.count += 1
+ inner_scope = Symtab(scope)
+ iter_typ = conv_const(vect_realtype.const, vect_realtype.base)
+ inner_scope.add(
+ ExpressionSymbol(itername,
+ ForeachVectorIterRef(site, itername, uniq,
+ vect.writable, iter_typ),
+ site))
+ with CLoopContext():
+ body = codegen_statement(body_ast, location, inner_scope)
+ return [mkForeachVector(site, vect, uniq, body)]
+
@expression_dispatcher
def expr_each_in(ast, location, scope):
(traitname, node_ast) = ast.args
@@ -3091,13 +3661,14 @@ def stmt_foreach_dml12(stmt, location, scope):
statement, location, scope)
list_type = safe_realtype(lst.ctype())
- if isinstance(list_type, TVector):
+ if isinstance(list_type, TVectorLegacy):
itervar = lookup_var(stmt.site, scope, itername)
if not itervar:
raise EIDENT(lst, itername)
with CLoopContext():
- res = mkVectorForeach(stmt.site, lst, itervar,
- codegen_statement(statement, location, scope))
+ res = mkLegacyVectorForeach(stmt.site, lst, itervar,
+ codegen_statement(statement, location,
+ scope))
return [res]
else:
raise ENLST(stmt.site, lst)
@@ -3113,6 +3684,9 @@ def stmt_foreach(stmt, location, scope):
# .traitname was validated by safe_realtype()
dml.globals.traits[list_type.traitname],
lst, statement, location, scope)
+ elif isinstance(list_type, TVector):
+ return foreach_vector(
+ stmt.site, itername, list_type, lst, statement, location, scope)
else:
raise ENLST(stmt.site, lst)
@@ -3211,10 +3785,8 @@ def stmt_switch(stmt, location, scope):
assert body_ast.kind == 'compound'
[stmt_asts] = body_ast.args
stmts = codegen_statements(stmt_asts, location, scope)
- if (not stmts
- or not isinstance(stmts[0], (ctree.Case, ctree.Default))):
- raise ESWITCH(
- body_ast.site, "statement before first case label")
+ if not stmts:
+ raise ESWITCH(body_ast.site, "empty switch statement")
defaults = [i for (i, sub) in enumerate(stmts)
if isinstance(sub, ctree.Default)]
if len(defaults) > 1:
@@ -3256,13 +3828,10 @@ def stmt_switch(stmt, location, scope):
@statement_dispatcher
def stmt_continue(stmt, location, scope):
- if (LoopContext.current is None
- or isinstance(LoopContext.current, NoLoopContext)):
+ if LoopContext.current is None:
raise ECONT(stmt.site)
- elif isinstance(LoopContext.current, CLoopContext):
- return [mkContinue(stmt.site)]
else:
- raise ECONTU(stmt.site)
+ return LoopContext.current.continue_(stmt.site)
@statement_dispatcher
def stmt_break(stmt, location, scope):
@@ -3308,7 +3877,8 @@ def mkcall_method(site, func, indices):
else [mkLit(site, dev(site),
TDevice(crep.structtype(dml.globals.device)))])
return lambda args: mkApply(
- site, func.cfunc_expr(site), devarg + list(indices) + args)
+ site, func.cfunc_expr(site), devarg + list(indices) + args,
+ orphan=not func.memoized)
def common_inline(site, method, indices, inargs, outargs):
if not verify_args(site, method.inp, method.outp, inargs, outargs):
@@ -3341,7 +3911,8 @@ def common_inline(site, method, indices, inargs, outargs):
return codegen_call_stmt(site, func.method.name,
mkcall_method(site, func, indices),
- inp, func.outp, func.throws, inargs, outargs)
+ inp, func.outp, func.throws, inargs, outargs,
+ move_rets=not func.memoized)
if not method.independent:
crep.require_dev(site)
@@ -3514,7 +4085,8 @@ def codegen_inline(site, meth_node, indices, inargs, outargs,
# TODO: in python 3.10 we can use parentheses instead of \
with RecursiveInlineGuard(site, meth_node), \
ErrorContext(meth_node, site), \
- contextlib.nullcontext() if meth_node.throws else NoFailure(site):
+ contextlib.nullcontext() if meth_node.throws else NoFailure(site), \
+ RAIIScope() as raii_scope:
param_scope = MethodParamScope(global_scope)
param_scope.add(meth_node.default_method.default_sym(indices))
@@ -3582,7 +4154,7 @@ def codegen_inline(site, meth_node, indices, inargs, outargs,
parmtype if parmtype else arg.ctype(),
meth_node.name)
for (arg, var, (parmname, parmtype)) in zip(
- outargs, outvars, meth_node.outp)]
+ outargs, outvars, meth_node.outp)]
exit_handler = GotoExit_dml12()
with exit_handler:
code = [codegen_statement(meth_node.astcode,
@@ -3591,8 +4163,7 @@ def codegen_inline(site, meth_node, indices, inargs, outargs,
if exit_handler.used:
code.append(mkLabel(site, exit_handler.label))
code.extend(copyout)
- body = mkCompound(site, declarations(param_scope) + code)
- return mkInlinedMethod(site, meth_node, body)
+ code = declarations(param_scope) + code
else:
assert meth_node.astcode.kind == 'compound'
[subs] = meth_node.astcode.args
@@ -3602,10 +4173,13 @@ def codegen_inline(site, meth_node, indices, inargs, outargs,
code = codegen_statements(subs, location, param_scope)
if exit_handler.used:
code.append(mkLabel(site, exit_handler.label))
- body = mkCompound(site, declarations(param_scope) + code)
- if meth_node.outp and body.control_flow().fallthrough:
+ code = declarations(param_scope) + code
+ if (meth_node.outp
+ and mkCompound(site, code).control_flow().fallthrough):
report(ENORET(meth_node.astcode.site))
- return mkInlinedMethod(site, meth_node, body)
+
+ return mkInlinedMethod(site, meth_node, mkCompoundRAII(site, code,
+ raii_scope))
def c_rettype(outp, throws):
if throws:
@@ -3753,7 +4327,7 @@ def codegen_method_func(func):
intercepted = intercepted_method(method)
if intercepted:
assert method.throws
- with crep.DeviceInstanceContext():
+ with UnusedMethodRAIIScope(), crep.DeviceInstanceContext():
return intercepted(
method.parent, indices,
[mkLit(method.site, n, t) for (n, t) in func.inp],
@@ -3793,12 +4367,31 @@ def codegen_return(site, outp, throws, retvals):
# no fall-through
return mkAssert(site, mkBoolConstant(site, False))
if throws:
- ret = mkReturn(site, mkBoolConstant(site, False))
+ def mk_ret_with_prelude(stmts):
+ return mkCompound(site,
+ stmts
+ + [mkReturn(site, mkBoolConstant(site, False))])
elif outp:
(_, t) = outp[0]
- ret = mkReturn(site, retvals[0], t)
+ if t.is_raii:
+ def mk_ret_with_prelude(stmts):
+ scope = Symtab(global_scope)
+ sym = scope.add_variable(
+ 'ret', t, ExpressionInitializer(retvals[0]), site, True,
+ True)
+ return mkCompound(
+ site,
+ [sym_declaration(sym, unscoped_raii=True)]
+ + stmts
+ + [mkReturn(site, mkLocalVariable(site, sym), t)])
+ else:
+ def mk_ret_with_prelude(stmts):
+ return mkCompound(site,
+ stmts + [mkReturn(site, retvals[0], t)])
else:
- ret = mkReturn(site, None)
+ def mk_ret_with_prelude(stmts):
+ return mkCompound(site,
+ stmts + [mkReturn(site, None)])
stmts = []
return_first_outarg = bool(not throws and outp)
for ((name, typ), val) in itertools.islice(
@@ -3810,13 +4403,14 @@ def codegen_return(site, outp, throws, retvals):
assert val.rh.str == name
continue
stmts.append(mkCopyData(site, val, mkLit(site, "*%s" % (name,), typ)))
- stmts.append(ret)
- return mkCompound(site, stmts)
+ stmts.append(mkRAIIScopeClears(site, RAIIScope.scope_stack()))
+ return mk_ret_with_prelude(stmts)
def codegen_method(site, inp, outp, throws, independent, memoization, ast,
default, location, fnscope, rbrace_site):
with (crep.DeviceInstanceContext() if not independent
- else contextlib.nullcontext()):
+ else contextlib.nullcontext()), \
+ MethodRAIIScope() as raii_scope:
for (arg, etype) in inp:
fnscope.add_variable(arg, type=etype, site=site, make_unique=False)
initializers = [get_initializer(site, parmtype, None, None, None)
@@ -3838,18 +4432,18 @@ def prelude():
else ReturnExit(outp, throws))
if ast.site.dml_version() == (1, 2):
+ body = []
if throws:
# Declare and initialize one variable for each output
# parameter. We cannot write to the output parameters
# directly, because they should be left untouched if an
# exception is thrown.
- code = []
for ((varname, parmtype), init) in zip(outp, initializers):
sym = fnscope.add_variable(
varname, type=parmtype, init=init, make_unique=True,
site=ast.site)
sym.incref()
- code.append(sym_declaration(sym))
+ body.append(sym_declaration(sym))
else:
if outp:
# pass first out argument as return value
@@ -3858,23 +4452,27 @@ def prelude():
init=initializers[0],
make_unique=False)
sym.incref()
- code = [sym_declaration(sym)]
+ body.append(sym_declaration(sym))
for ((name, typ), init) in zip(outp[1:], initializers[1:]):
# remaining output arguments pass-by-pointer
param = mkDereference(site,
mkLit(site, name, TPtr(typ)))
fnscope.add(ExpressionSymbol(name, param, site))
- code.append(mkAssignStatement(site, param, init))
+ body.append(AssignStatement(site, param, init))
else:
- code = []
+ body = []
+ body.append(mkRAIIScopeDeclarations(site, raii_scope))
+ body.append(mkRAIIScopeBindLVals(
+ site, raii_scope,
+ tuple(lookup_var(site, fnscope, varname)
+ for (varname, t) in inp if t.is_raii)))
with fail_handler, exit_handler:
- code.append(codegen_statement(ast, location, fnscope))
+ body.append(codegen_statement(ast, location, fnscope))
if exit_handler.used:
- code.append(mkLabel(site, exit_handler.label))
- code.append(codegen_return(site, outp, throws, [
+ body.append(mkLabel(site, exit_handler.label))
+ body.append(codegen_return(site, outp, throws, [
lookup_var(site, fnscope, varname) for (varname, _) in outp]))
- to_return = mkCompound(site, code)
else:
# manually deconstruct compound AST node, to make sure
# top-level locals share scope with parameters
@@ -3882,16 +4480,18 @@ def prelude():
[subs] = ast.args
with fail_handler, exit_handler:
body = prelude()
+ body.append(mkRAIIScopeDeclarations(site, raii_scope))
+ body.append(mkRAIIScopeBindLVals(
+ site, raii_scope,
+ tuple(lookup_var(site, fnscope, varname)
+ for (varname, t) in inp if t.is_raii)))
body.extend(codegen_statements(subs, location, fnscope))
- code = mkCompound(site, body)
- if code.control_flow().fallthrough:
+ if mkCompound(site, body).control_flow().fallthrough:
if outp:
report(ENORET(site))
else:
- code = mkCompound(site, body + [codegen_exit(site,
- [])])
- to_return = code
- return to_return
+ body.append(codegen_exit(site, []))
+ return mkCompound(site, body)
# Keep track of methods that we need to generate code for
def mark_method_referenced(func):
@@ -3960,7 +4560,8 @@ def mkcall(args):
args = [coerce_if_eint(arg) for arg in args]
return expr.call_expr(args, rettype)
return codegen_call_stmt(site, str(expr), mkcall, expr.inp, expr.outp,
- expr.throws, inargs, outargs)
+ expr.throws, inargs, outargs,
+ move_rets=not expr.memoized)
def codegen_call(site, meth_node, indices, inargs, outargs):
'''Generate a call using a direct reference to the method node'''
@@ -3978,7 +4579,8 @@ def codegen_call(site, meth_node, indices, inargs, outargs):
mark_method_referenced(func)
return codegen_call_stmt(site, func.method.name,
mkcall_method(site, func, indices),
- func.inp, func.outp, func.throws, inargs, outargs)
+ func.inp, func.outp, func.throws, inargs, outargs,
+ move_rets=not func.memoized)
def codegen_call_byname(site, node, indices, meth_name, inargs, outargs):
'''Generate a call using the parent node and indices, plus the method
@@ -3991,7 +4593,7 @@ def codegen_call_byname(site, node, indices, meth_name, inargs, outargs):
raise UnknownMethod(node, meth_name)
return codegen_call(site, meth_node, indices, inargs, outargs)
-def copy_outarg(arg, var, parmname, parmtype, method_name):
+def copy_outarg(arg, var, parmname, parmtype, method_name, move_outvar=True):
'''Type-check the output argument 'arg', and create a local
variable with that type in scope 'callscope'. The address of this
variable will be passed as an output argument to the C function
@@ -4002,17 +4604,22 @@ def copy_outarg(arg, var, parmname, parmtype, method_name):
an exception. We would be able to skip the proxy variable for
calls to non-throwing methods when arg.ctype() and parmtype are
equivalent types, but we don't do this today.'''
- argtype = arg.ctype()
-
- if not argtype:
- raise ICE(arg.site, "unknown expression type")
+ if isinstance(arg, NonValue):
+ if not arg.writable:
+ raise arg.exc()
else:
- ok, trunc, constviol = realtype(parmtype).canstore(realtype(argtype))
- if not ok:
- raise EARGT(arg.site, 'call', method_name,
- arg.ctype(), parmname, parmtype, 'output')
+ argtype = arg.ctype()
- return mkCopyData(var.site, var, arg)
+ if not argtype:
+ raise ICE(arg.site, "unknown expression type")
+ else:
+ ok, trunc, constviol = realtype(parmtype).canstore(realtype(argtype))
+ if not ok:
+ raise EARGT(arg.site, 'call', method_name,
+ arg.ctype(), parmname, parmtype, 'output')
+ return mkCopyData(var.site,
+ OrphanWrap(var.site, var) if move_outvar else var,
+ arg)
def add_proxy_outvar(site, parmname, parmtype, callscope):
varname = parmname
@@ -4020,7 +4627,8 @@ def add_proxy_outvar(site, parmname, parmtype, callscope):
sym = callscope.add_variable(varname, type=parmtype, init=varinit, site=site)
return mkLocalVariable(site, sym)
-def codegen_call_stmt(site, name, mkcall, inp, outp, throws, inargs, outargs):
+def codegen_call_stmt(site, name, mkcall, inp, outp, throws, inargs, outargs,
+ move_rets=True):
'''Generate a statement for calling a method'''
if len(outargs) != len(outp):
raise ICE(site, "wrong number of outargs")
@@ -4033,23 +4641,28 @@ def codegen_call_stmt(site, name, mkcall, inp, outp, throws, inargs, outargs):
# an uint8 variable is passed in an uint32 output parameter.
postcode = []
outargs_conv = []
- for (arg, (parmname, parmtype)) in zip(
- outargs[return_first_outarg:], outp[return_first_outarg:]):
- # It would make sense to pass output arguments directly, but
- # the mechanisms to detect whether this is safe are
- # broken. See bug 21900.
- # if (isinstance(arg, (
- # Variable, ctree.Dereference, ctree.ArrayRef, ctree.SubRef))
- # and TPtr(parmtype).canstore(TPtr(arg.ctype()))):
- # outargs_conv.append(mkAddressOf(arg.site, arg))
- # else:
- var = add_proxy_outvar(site, '_ret_' + parmname, parmtype,
- callscope)
- outargs_conv.append(mkAddressOf(var.site, var))
- postcode.append(copy_outarg(arg, var, parmname, parmtype, name))
+ # RAII Scope should never need to be leveraged
+ with UnusedMethodRAIIScope():
+ for (arg, (parmname, parmtype)) in zip(
+ outargs[return_first_outarg:], outp[return_first_outarg:]):
+ # It would make sense to pass output arguments directly, but
+ # the mechanisms to detect whether this is safe are
+ # broken. See bug 21900.
+ # if (isinstance(arg, (
+ # Variable, ctree.Dereference, ctree.ArrayRef,
+ # ctree.SubRef))
+ # and TPtr(parmtype).canstore(TPtr(arg.ctype()))):
+ # outargs_conv.append(mkAddressOf(arg.site, arg))
+ # else:
+ var = add_proxy_outvar(site, '_ret_' + parmname, parmtype,
+ callscope)
+ outargs_conv.append(mkAddressOf(var.site, var))
+ postcode.append(copy_outarg(arg, var, parmname, parmtype, name,
+ move_outvar=move_rets))
typecheck_inargs(site, inargs, inp, 'method')
call_expr = mkcall(list(inargs) + outargs_conv)
+ assert call_expr.orphan == move_rets
if throws:
if not Failure.fail_stack[-1].allowed:
@@ -4061,6 +4674,8 @@ def codegen_call_stmt(site, name, mkcall, inp, outp, throws, inargs, outargs):
else:
call_stmt = mkExpressionStatement(site, call_expr)
- return mkCompound(site, declarations(callscope) + [call_stmt] + postcode)
+ return mkCompound(site,
+ declarations(callscope, unscoped_raii=True)
+ + [call_stmt] + postcode)
ctree.codegen_call_expr = codegen_call_expr
diff --git a/py/dml/ctree-test.h b/py/dml/ctree-test.h
index 1f6918c64..95598ddd2 100644
--- a/py/dml/ctree-test.h
+++ b/py/dml/ctree-test.h
@@ -15,7 +15,7 @@ static void capture_assert_error(int line, const char *file,
static void VT_critical_error(const char *short_msg, const char *long_msg);
#include
-#include
+#include
api_function_t SIM_get_api_function(const char *name) { return NULL; }
diff --git a/py/dml/ctree.py b/py/dml/ctree.py
index 5d9fad091..1e70d3642 100644
--- a/py/dml/ctree.py
+++ b/py/dml/ctree.py
@@ -20,7 +20,8 @@
from .types import *
from .expr import *
from .expr_util import *
-from .slotsmeta import auto_init
+from .slotsmeta import auto_init, SlotsMeta
+from .set import Set
from . import dmlparse, output
import dml.globals
# set from codegen.py
@@ -31,11 +32,11 @@
'source_for_assignment',
'Location',
-
+ 'RAIIScope', 'MethodRAIIScope', 'UnusedMethodRAIIScope',
'ExpressionSymbol',
'LiteralSymbol',
- 'mkCompound',
+ 'mkCompound', 'mkCompoundRAII',
'mkNull', 'Null',
'mkLabel',
'mkUnrolledLoop',
@@ -50,6 +51,7 @@
'mkAssert',
'mkReturn',
'mkDelete',
+ 'mkDeleteExtern',
'mkExpressionStatement',
'mkAfter',
'mkAfterOnHook',
@@ -59,15 +61,26 @@
'mkDoWhile',
'mkFor',
'mkForeachSequence', 'ForeachSequence',
+ 'mkForeachVector', 'ForeachVector', 'ForeachVectorIterRef',
'mkSwitch',
'mkSubsequentCases',
'mkCase',
'mkDefault',
- 'mkVectorForeach',
+ 'mkLegacyVectorForeach',
'mkBreak',
'mkContinue',
- 'mkAssignStatement',
+ 'mkAssignStatement', 'AssignStatement',
'mkCopyData',
+ 'mkRAIIScopeClears',
+ 'mkRAIIScopeDeclarations',
+ 'mkRAIIScopeBindLVals',
+ 'RAIIDupe',
+ 'mkDiscardRef', 'DiscardRef',
+ 'mkStringAppend',
+ 'mkVectorAppend',
+ 'mkStringCStrRef', 'mkStringCStrApply',
+ 'FromCString',
+ 'PseudoMethodRef',
'mkIfExpr', 'IfExpr',
#'BinOp',
#'Test',
@@ -114,6 +127,7 @@
'mkHookSendRef', 'HookSendRef',
'mkHookSendApply', 'HookSendApply',
'mkNew',
+ 'mkNewExtern',
#'Constant',
'mkIntegerConstant', 'IntegerConstant',
'mkIntegerLiteral',
@@ -157,6 +171,7 @@
'mkStructDefinition',
'mkDeclaration',
'mkCText',
+ 'CompoundLiteral', 'VectorCompoundLiteral',
'Initializer', 'ExpressionInitializer', 'CompoundInitializer',
'DesignatedStructInitializer', 'MemsetInitializer',
@@ -290,9 +305,15 @@ class Statement(Code):
def __init__(self, site):
self.site = site
self.context = ErrorContext.current()
+ # Emit a single C statement
@abc.abstractmethod
- def toc(self): pass
- def toc_inline(self): return self.toc()
+ def toc_stmt(self): pass
+ # Emit any number of C statements, but without any guarantee that they are
+ # in a dedicated C block
+ def toc(self): self.toc_stmt()
+ # Emit any number of C statements, with the guarantee that they are in a
+ # dedicated C block
+ def toc_inline(self): self.toc()
def control_flow(self):
'''Rudimentary control flow analysis: Return a ControlFlow object
@@ -314,19 +335,33 @@ def control_flow(self):
example, see test/1.4/errors/T_ENORET_throw_after_break.dml.'''
return ControlFlow(fallthrough=True)
+
class Compound(Statement):
@auto_init
def __init__(self, site, substatements):
assert isinstance(substatements, list)
+ def toc_stmt(self):
+ if all(sub.is_empty for sub in self.substatements):
+ if self.site:
+ self.linemark()
+ out(';\n')
+ else:
+ out('{\n', postindent = 1)
+ self.toc_inline()
+ out('}\n', preindent = -1)
+
def toc(self):
- out('{\n', postindent = 1)
- self.toc_inline()
- out('}\n', preindent = -1)
+ if any(sub.is_declaration for sub in self.substatements):
+ self.toc_stmt()
+ else:
+ self.toc_inline()
def toc_inline(self):
for substatement in self.substatements:
- substatement.toc()
+ if not substatement.is_empty:
+ substatement.toc()
+
def control_flow(self):
acc = ControlFlow(fallthrough=True)
@@ -337,6 +372,18 @@ def control_flow(self):
return acc
return acc
+ @property
+ def is_empty(self):
+ return all(sub.is_empty for sub in self.substatements)
+
+def mkCompoundRAII(site, statements, raii_scope):
+ assert raii_scope.completed
+ body = (([mkRAIIScopeDeclarations(site, raii_scope)]
+ if isinstance(raii_scope, MethodRAIIScope) else [])
+ + statements
+ + [mkRAIIScopeClears(site, [raii_scope])])
+ return mkCompound(site, body)
+
def mkCompound(site, statements):
"Create a simplified Compound() from a list of ctree.Statement"
collapsed = []
@@ -356,9 +403,9 @@ def mkCompound(site, statements):
class Null(Statement):
is_empty = True
- def toc_inline(self):
- pass
def toc(self):
+ pass
+ def toc_stmt(self):
if self.site:
self.linemark()
out(';\n')
@@ -370,7 +417,7 @@ def __init__(self, site, label, unused=False):
Statement.__init__(self, site)
self.label = label
self.unused = unused
- def toc(self):
+ def toc_stmt(self):
out('%s: %s;\n' % (self.label, 'UNUSED'*self.unused), preindent = -1,
postindent = 1)
@@ -380,14 +427,15 @@ class UnrolledLoop(Statement):
@auto_init
def __init__(self, site, substatements, break_label):
assert isinstance(substatements, list)
+ assert all(not sub.is_declaration for sub in substatements)
- def toc(self):
+ def toc_stmt(self):
self.linemark()
out('{\n', postindent = 1)
self.toc_inline()
out('}\n', preindent = -1)
- def toc_inline(self):
+ def toc(self):
for substatement in self.substatements:
substatement.toc()
if self.break_label is not None:
@@ -404,7 +452,7 @@ def control_flow(self):
class Goto(Statement):
@auto_init
def __init__(self, site, label): pass
- def toc(self):
+ def toc_stmt(self):
out('goto %s;\n' % (self.label,))
def control_flow(self):
@@ -435,7 +483,11 @@ class TryCatch(Statement):
block with a catch label, to which Throw statements inside will go.'''
@auto_init
def __init__(self, site, label, tryblock, catchblock): pass
- def toc_inline(self):
+ def toc_stmt(self):
+ out('{\n', postindent = 1)
+ self.toc()
+ out('}\n', preindent = -1)
+ def toc(self):
self.tryblock.toc()
if (dml.globals.dml_version != (1, 2)
@@ -449,10 +501,6 @@ def toc_inline(self):
out('%s: ;\n' % (self.label,), postindent=1)
self.catchblock.toc_inline()
out('}\n', preindent=-1)
- def toc(self):
- out('{\n', postindent = 1)
- self.toc_inline()
- out('}\n', preindent = -1)
def control_flow(self):
tryflow = self.tryblock.control_flow()
if not tryflow.throw:
@@ -469,7 +517,7 @@ def mkTryCatch(site, label, tryblock, catchblock):
class Inline(Statement):
@auto_init
def __init__(self, site, str): pass
- def toc(self):
+ def toc_stmt(self):
out(self.str + '\n')
mkInline = Inline
@@ -478,6 +526,8 @@ class InlinedMethod(Statement):
'''Wraps the body of an inlined method, to protect it from analysis'''
@auto_init
def __init__(self, site, method, body): pass
+ def toc_stmt(self):
+ self.body.toc_stmt()
def toc(self):
self.body.toc()
def toc_inline(self):
@@ -490,7 +540,7 @@ def control_flow(self):
class Comment(Statement):
@auto_init
def __init__(self, site, str): pass
- def toc(self):
+ def toc_stmt(self):
# self.linemark()
out('/* %s */\n' % self.str)
@@ -499,7 +549,7 @@ def toc(self):
class Assert(Statement):
@auto_init
def __init__(self, site, expr): pass
- def toc(self):
+ def toc_stmt(self):
out('DML_ASSERT("%s", %d, %s);\n'
% (quote_filename(self.site.filename()),
self.site.lineno, self.expr.read()))
@@ -513,7 +563,7 @@ def mkAssert(site, expr):
class Return(Statement):
@auto_init
def __init__(self, site, expr): pass
- def toc(self):
+ def toc_stmt(self):
self.linemark()
if self.expr is None:
out('return;\n')
@@ -530,16 +580,25 @@ def mkReturn(site, expr, rettype=None):
class Delete(Statement):
@auto_init
def __init__(self, site, expr): pass
- def toc(self):
- out('MM_FREE(%s);\n' % self.expr.read())
+ def toc_stmt(self):
+ self.linemark()
+ out(f'DML_DELETE({self.expr.read()});\n')
+
+mkDelete = Delete
+
+class DeleteExtern(Statement):
+ @auto_init
+ def __init__(self, site, expr): pass
+ def toc_stmt(self):
+ self.linemark()
+ out(f'MM_FREE({self.expr.read()});\n')
-def mkDelete(site, expr):
- return Delete(site, expr)
+mkDeleteExtern = DeleteExtern
class ExpressionStatement(Statement):
@auto_init
def __init__(self, site, expr): pass
- def toc(self):
+ def toc_stmt(self):
#if not self.site:
# print 'NOSITE', str(self), repr(self)
self.linemark()
@@ -556,15 +615,19 @@ def mkExpressionStatement(site, expr):
def toc_constsafe_pointer_assignment(site, source, target, typ):
target_val = mkDereference(site,
Cast(site, mkLit(site, target, TPtr(void)), TPtr(typ)))
- mkAssignStatement(site, target_val,
- ExpressionInitializer(mkLit(site, source, typ))).toc()
+
+ init = ExpressionInitializer(
+ source_for_assignment(site, typ, mkLit(site, source, typ)),
+ ignore_raii=True)
+
+ return AssignStatement(site, target_val, init).toc()
class After(Statement):
@auto_init
def __init__(self, site, unit, delay, domains, info, indices,
args_init):
crep.require_dev(site)
- def toc(self):
+ def toc_stmt(self):
self.linemark()
objarg = '&_dev->obj'
out(f'if (SIM_object_clock({objarg}) == NULL)\n', postindent=1)
@@ -622,7 +685,7 @@ class AfterOnHook(Statement):
def __init__(self, site, domains, hookref_expr, info, indices,
args_init):
crep.require_dev(site)
- def toc(self):
+ def toc_stmt(self):
self.linemark()
hookref = resolve_hookref(self.hookref_expr)
indices = ('(const uint32 []) {%s}'
@@ -636,18 +699,20 @@ def toc(self):
domains = ('(const _identity_t []) {%s}'
% (', '.join(domain.read() for domain in self.domains),)
if self.domains else 'NULL')
- out(f'_DML_attach_callback_to_hook({hookref}, '
+ out(f'{{ _DML_attach_callback_to_hook({hookref}, '
+ f'&_after_on_hook_infos[{self.info.uniq}], {indices}, '
+ f'{len(self.indices)}, {args}, {domains}, '
- + f'{len(self.domains)});\n')
+ + f'{len(self.domains)}); }}\n')
mkAfterOnHook = AfterOnHook
class ImmediateAfter(Statement):
+ slots = ('args_raii_info',)
@auto_init
def __init__(self, site, domains, info, indices, args_init):
crep.require_dev(site)
- def toc(self):
+
+ def toc_stmt(self):
self.linemark()
indices = ('(const uint32 []) {%s}'
% (', '.join(i.read() for i in self.indices),)
@@ -660,13 +725,17 @@ def toc(self):
args_size = f'sizeof({self.info.args_type.declaration("")})'
else:
(args, args_size) = ('NULL', '0')
+
+ args_destructor = (self.info.args_raii_info.cident_destructor
+ if self.info.args_raii_info else 'NULL')
domains = ('(const _identity_t []) {%s}'
% (', '.join(domain.read() for domain in self.domains),)
if self.domains else 'NULL')
- out('_DML_post_immediate_after('
+ out('{ _DML_post_immediate_after('
+ '&_dev->obj, _dev->_immediate_after_state, '
- + f'{self.info.cident_callback}, {indices}, {len(self.indices)}, '
- + f'{args}, {args_size}, {domains}, {len(self.domains)});\n')
+ + f'{self.info.cident_callback}, {args_destructor}, {indices}, '
+ + f'{len(self.indices)}, {args}, {args_size}, {domains}, '
+ + f'{len(self.domains)}); }}\n')
mkImmediateAfter = ImmediateAfter
@@ -676,7 +745,7 @@ def __init__(self, site, cond, truebranch, falsebranch):
assert_type(site, cond.ctype(), TBool)
assert_type(site, truebranch, Statement)
assert_type(site, falsebranch, (Statement, type(None)))
- def toc(self):
+ def toc_stmt(self):
self.linemark()
out('if ('+self.cond.read()+') {\n', postindent = 1)
self.truebranch.toc_inline()
@@ -684,7 +753,10 @@ def toc(self):
out('} else ', preindent = -1)
if dml.globals.linemarks:
out('\n')
- self.falsebranch.toc()
+ # TODO(RAII): from what I (lwaern) can tell, this is the ONE USE of
+ # toc_stmt that is needed across dmlc. If we can work around it, we
+ # may scrap toc_stmt.
+ self.falsebranch.toc_stmt()
elif self.falsebranch:
out('} else {\n', preindent = -1, postindent = 1)
self.falsebranch.toc_inline()
@@ -714,7 +786,7 @@ class While(Statement):
def __init__(self, site, cond, stmt):
assert_type(site, cond.ctype(), TBool)
assert_type(site, stmt, Statement)
- def toc(self):
+ def toc_stmt(self):
self.linemark()
# out('/* %s */\n' % repr(self))
out('while ('+self.cond.read()+') {\n', postindent = 1)
@@ -742,7 +814,7 @@ class DoWhile(Statement):
def __init__(self, site, cond, stmt):
assert_type(site, cond.ctype(), TBool)
assert_type(site, stmt, Statement)
- def toc(self):
+ def toc_stmt(self):
self.linemark()
# out('/* %s */\n' % repr(self))
out('do {\n', postindent = 1)
@@ -765,7 +837,7 @@ class For(Statement):
def __init__(self, site, pres, cond, posts, stmt):
assert_type(site, cond.ctype(), TBool)
assert_type(site, stmt, Statement)
- def toc(self):
+ def toc_stmt(self):
self.linemark()
out('for (%s; %s; ' % (", ".join(pre.discard()
@@ -821,7 +893,7 @@ def itervar_initializer(site, trait):
@auto_init
def __init__(self, site, trait, each_in_expr, body, break_label): pass
- def toc(self):
+ def toc_stmt(self):
self.linemark()
out('{\n', postindent=1)
self.linemark()
@@ -851,12 +923,55 @@ def control_flow(self):
mkForeachSequence = ForeachSequence
+
+class ForeachVectorIterRef(Expression):
+ explicit_type = True
+ priority = dml.expr.Apply.priority
+ c_lval = True
+ @auto_init
+ def __init__(self, site, itername, uniq, writable, type): pass
+
+ def __str__(self):
+ return self.itername
+
+ def read(self):
+ t = self.type.declaration('')
+ return f'DML_VECT_ELEM({t}, *_{self.uniq}_vect, _{self.uniq}_vect_idx)'
+
+class ForeachVector(Statement):
+ count = 0
+
+ @auto_init
+ def __init__(self, site, vect, uniq, body):
+ assert self.vect.c_lval
+ def toc_stmt(self):
+ self.linemark()
+ out('{\n', postindent=1)
+ self.linemark()
+ const = ' const'*safe_realtype_shallow(self.vect.ctype()).const
+ out(f'_dml_vect_t{const} *_{self.uniq}_vect = '
+ + f'&({self.vect.read()});\n')
+ self.linemark()
+ out(f'for (uint32 _{self.uniq}_vect_idx = 0; '
+ + f'_{self.uniq}_vect_idx < _{self.uniq}_vect->len; '
+ + f'++_{self.uniq}_vect_idx) {{\n', postindent=1)
+ self.body.toc_inline()
+ out('}\n', preindent=-1)
+ out('}\n', preindent=-1)
+
+ def control_flow(self):
+ bodyflow = self.body.control_flow()
+ # fallthrough is possible if the vector is empty
+ return bodyflow.replace(fallthrough=True, br=False)
+
+mkForeachVector = ForeachVector
+
class Switch(Statement):
@auto_init
def __init__(self, site, expr, stmt):
assert_type(site, expr, Expression)
assert_type(site, stmt, Statement)
- def toc(self):
+ def toc_stmt(self):
self.linemark()
# out('/* %s */\n' % repr(self))
out('switch ('+self.expr.read()+') {\n', postindent = 1)
@@ -892,7 +1007,7 @@ class SubsequentCases(Statement):
@auto_init
def __init__(self, site, cases, has_default):
assert len(self.cases) > 0
- def toc(self):
+ def toc_stmt(self):
for (i, case) in enumerate(self.cases):
assert isinstance(case, (Case, Default))
site_linemark(case.site)
@@ -908,7 +1023,7 @@ def toc(self):
class Case(Statement):
@auto_init
def __init__(self, site, expr): pass
- def toc(self):
+ def toc_stmt(self):
self.linemark()
out('case %s: ;\n' % self.expr.read(), preindent = -1, postindent = 1)
@@ -917,17 +1032,17 @@ def toc(self):
class Default(Statement):
@auto_init
def __init__(self, site): pass
- def toc(self):
+ def toc_stmt(self):
self.linemark()
out('default: ;\n', preindent = -1, postindent = 1)
mkDefault = Default
-class VectorForeach(Statement):
+class LegacyVectorForeach(Statement):
@auto_init
def __init__(self, site, vect, var, stmt): pass
- def toc(self):
+ def toc_stmt(self):
out('VFOREACH(%s, %s) {\n' % (self.vect.read(), self.var.read()),
postindent = 1)
self.stmt.toc_inline()
@@ -937,11 +1052,11 @@ def control_flow(self):
flow = self.stmt.control_flow()
return flow.replace(fallthrough=flow.fallthrough or flow.br, br=False)
-def mkVectorForeach(site, vect, var, stmt):
- return VectorForeach(site, vect, var, stmt)
+def mkLegacyVectorForeach(site, vect, var, stmt):
+ return LegacyVectorForeach(site, vect, var, stmt)
class Break(Statement):
- def toc(self):
+ def toc_stmt(self):
out('break;\n')
def control_flow(self):
return ControlFlow(br=True)
@@ -949,7 +1064,7 @@ def control_flow(self):
mkBreak = Break
class Continue(Statement):
- def toc(self):
+ def toc_stmt(self):
out('continue;\n')
def control_flow(self):
return ControlFlow()
@@ -960,19 +1075,39 @@ class AssignStatement(Statement):
@auto_init
def __init__(self, site, target, initializer):
assert isinstance(initializer, Initializer)
- def toc(self):
- out('{\n', postindent=1)
- self.toc_inline()
- out('}\n', preindent=-1)
- def toc_inline(self):
- self.initializer.assign_to(self.target, self.target.ctype())
+ def toc_stmt(self):
+ self.linemark()
+ out(self.target.write(self.initializer) + ';\n')
+
+def mkAssignStatement(site, target, init):
+ if isinstance(target, InlinedParam):
+ raise EASSINL(target.site, target.name)
+ if not target.writable:
+ raise EASSIGN(site, target)
-mkAssignStatement = AssignStatement
+ if isinstance(target, NonValue):
+ if not isinstance(init, ExpressionInitializer):
+ raise ICE(target.site,
+ f'{target} can only be used as the target of an '
+ + 'assignment if its initializer is a simple expression '
+ + 'or a return value of a method call')
+ else:
+ target_type = target.ctype()
+
+ if deep_const(target_type):
+ raise ECONST(site)
+
+ if isinstance(init, ExpressionInitializer):
+ init = ExpressionInitializer(
+ source_for_assignment(site, target_type, init.expr),
+ init.ignore_raii)
+
+ return AssignStatement(site, target, init)
-def mkCopyData(site, source, target):
+def mkCopyData(site, source, target, ignore_raii=False):
"Convert a copy statement to intermediate representation"
- assignexpr = mkAssignOp(site, target, source)
- return mkExpressionStatement(site, assignexpr)
+ return mkAssignStatement(site, target,
+ ExpressionInitializer(source, ignore_raii))
#
# Expressions
@@ -1015,7 +1150,8 @@ def as_int(e):
target_type = TInt(64, True)
if t.is_endian:
(fun, funtype) = t.get_load_fun()
- e = dml.expr.Apply(e.site, mkLit(e.site, fun, funtype), (e,), funtype)
+ e = dml.expr.Apply(e.site, mkLit(e.site, fun, funtype), (e,), funtype,
+ True)
if not compatible_types(realtype(e.ctype()), target_type):
e = mkCast(e.site, e, target_type)
return e
@@ -1030,27 +1166,28 @@ def truncate_int_bits(value, signed, bits=64):
else:
return value & mask
+class PseudoMethodRef(NonValue):
+ '''Nonvalue expressions with 'apply' that are not method references'''
+
+ def apply(self, inits, location, scope):
+ raise ICE(self.site, f'apply not implemented for {type(self)}')
+
class LValue(Expression):
"Somewhere to read or write data"
writable = True
-
- def write(self, source):
- rt = realtype(self.ctype())
- if isinstance(rt, TEndianInt):
- return (f'{rt.dmllib_fun("copy")}(&{self.read()},'
- + f' {source.read()})')
- return '%s = %s' % (self.read(), source.read())
-
- @property
- def is_stack_allocated(self):
- '''Returns true only if it's known that writing to the lvalue will
- write to stack-allocated data'''
- return False
+ addressable = True
+ c_lval = True
class IfExpr(Expression):
priority = 30
+ slots = ('orphan',)
@auto_init
- def __init__(self, site, cond, texpr, fexpr, type): pass
+ def __init__(self, site, cond, texpr, fexpr, type):
+ self.orphan = texpr.orphan and fexpr.orphan
+ if texpr.orphan != fexpr.orphan:
+ self.texpr = mkAdoptedOrphan(texpr.site, texpr)
+ self.fexpr = mkAdoptedOrphan(fexpr.site, fexpr)
+
def __str__(self):
return '%s ? %s : %s' % (self.cond, self.texpr, self.fexpr)
def read(self):
@@ -1103,7 +1240,7 @@ def mkIfExpr(site, cond, texpr, fexpr):
(texpr, fexpr, utype) = usual_int_conv(
texpr, ttype, fexpr, ftype)
else:
- if not compatible_types(ttype, ftype):
+ if not compatible_types_fuzzy(ttype, ftype):
raise EBINOP(site, ':', texpr, fexpr)
# TODO: in C, the rules are more complex,
# but our type system is too primitive to cover that
@@ -1226,7 +1363,7 @@ class Compare(BinOp):
@abc.abstractproperty
def cmp_functions(self):
- '''pair of dmllib.h functions for comparison between signed and
+ '''pair of dml-lib.h functions for comparison between signed and
unsigned integer, with (int, uint) and (uint, int) args,
respectively'''
@@ -1240,8 +1377,10 @@ def make(cls, site, lh, rh):
lhtype = realtype(lh.ctype())
rhtype = realtype(rh.ctype())
- if (lhtype.is_arith and rhtype.is_arith
- and lh.constant and rh.constant):
+ if ((lhtype.is_arith and rhtype.is_arith
+ and lh.constant and rh.constant)
+ or (isinstance(lh, StringConstant)
+ and isinstance(rh, StringConstant))):
return mkBoolConstant(site, cls.eval_const(lh.value, rh.value))
if lhtype.is_int:
lh = as_int(lh)
@@ -1272,8 +1411,13 @@ def make(cls, site, lh, rh):
if ((lhtype.is_arith and rhtype.is_arith)
or (isinstance(lhtype, (TPtr, TArray))
and isinstance(rhtype, (TPtr, TArray))
- and compatible_types(lhtype.base, rhtype.base))):
+ and compatible_types_fuzzy(lhtype.base, rhtype.base))):
return cls.make_simple(site, lh, rh)
+ if ((isinstance(lh, StringConstant)
+ or isinstance(lhtype, TString))
+ and (isinstance(rh, StringConstant)
+ or isinstance (rhtype, TString))):
+ return mkStringCmp(site, lh, rh, cls.op)
raise EILLCOMP(site, lh, lhtype, rh, rhtype)
@classmethod
@@ -1405,6 +1549,58 @@ def make_simple(site, lh, rh):
return mkBoolConstant(site, lh.value == rh.value)
return Equals_dml12(site, lh, rh)
+class StringCmp(Expression):
+ priority = 70
+ type = TBool()
+
+ @auto_init
+ def __init__(self, site, lh, rh, op): pass
+
+ def __str__(self):
+ lh = str(self.lh)
+ rh = str(self.rh)
+ if self.lh.priority <= self.priority:
+ lh = '('+lh+')'
+ if self.rh.priority <= self.priority:
+ rh = '('+rh+')'
+ return lh + ' ' + self.op + ' ' + rh
+
+ def read(self):
+ return (f'_dml_string_cmp({self.lh.read()}, {self.rh.read()}) '
+ + f'{self.op} 0')
+
+class StringCmpC(Expression):
+ priority = 70
+ type = TBool()
+
+ @auto_init
+ def __init__(self, site, lh, rh, op): pass
+
+ def __str__(self):
+ lh = str(self.lh)
+ rh = str(self.rh)
+ if self.lh.priority <= self.priority:
+ lh = '('+lh+')'
+ if self.rh.priority <= self.priority:
+ rh = '('+rh+')'
+ return lh + ' ' + self.op + ' ' + rh
+
+ def read(self):
+ return (f'_dml_string_cmp_c_str({self.lh.read()}, {self.rh.read()}) '
+ + f'{self.op} 0')
+
+def mkStringCmp(site, lh, rh, op):
+ if isinstance(lh, StringConstant):
+ op = {'<': '>', '>':'<'}.get(op[1], op[1]) + op[1:]
+ rh_conv = mkAdoptedOrphan(rh.site, rh)
+ return StringCmpC(site, rh_conv, lh, op)
+ elif isinstance(rh, StringConstant):
+ lh_conv = mkAdoptedOrphan(rh.site, lh)
+ return StringCmpC(site, lh_conv, rh, op)
+ lh_conv = mkAdoptedOrphan(lh.site, lh_conv)
+ rh_conv = mkAdoptedOrphan(rh.site, rh_conv)
+ return StringCmp(site, lh_conv, rh_conv, op)
+
class Equals(BinOp):
priority = 70
type = TBool()
@@ -1477,7 +1673,7 @@ def make(cls, site, lh, rh):
if ((lhtype.is_arith and rhtype.is_arith)
or (isinstance(lhtype, (TPtr, TArray))
and isinstance(rhtype, (TPtr, TArray))
- and compatible_types(lhtype, rhtype))
+ and compatible_types_fuzzy(lhtype, rhtype))
or (isinstance(lhtype, TBool) and isinstance(rhtype, TBool))):
return Equals(site, lh, rh)
@@ -1488,6 +1684,11 @@ def make(cls, site, lh, rh):
if (isinstance(lhtype, THook) and isinstance(rhtype, THook)
and lhtype.cmp(rhtype) == 0):
return IdentityEq(site, lh, rh)
+ if ((isinstance(lh, StringConstant)
+ or isinstance(lhtype, TString))
+ and (isinstance(rh, StringConstant)
+ or isinstance (rhtype, TString))):
+ return mkStringCmp(site, lh, rh, '==')
raise EILLCOMP(site, lh, lhtype, rh, rhtype)
@@ -2202,6 +2403,143 @@ def make_simple(site, lh, rh):
return Add_dml12(site, lh, rh)
+class StringAdd(Orphan):
+ priority = dml.expr.Apply.priority
+
+ type = TString()
+
+ # Implemented by INHERITING ownership of first argument, but only BORROWING
+ # second argument. Meaning the second argument is never duped, and, if
+ # orphan, must be adopted.
+ def __init__(self, site, lh, rh):
+ self.site = site
+ self.lh = lh
+ self.rh = mkAdoptedOrphan(rh.site, rh)
+
+ def __str__(self):
+ lh = str(self.lh)
+ rh = str(self.rh)
+ if self.lh.priority <= Add.priority:
+ lh = '('+lh+')'
+ if self.rh.priority <= Add.priority:
+ rh = '('+rh+')'
+ return lh + ' + ' + rh
+
+ def read(self):
+ lh = self.lh.read()
+ if not self.lh.orphan:
+ lh = f'_dml_string_dupe({lh})'
+ return f'_dml_string_add({lh}, {self.rh.read()})'
+
+class StringAddCStrAfter(Orphan):
+ priority = dml.expr.Apply.priority
+
+ type = TString()
+
+ @auto_init
+ def __init__(self, site, lh, rh): pass
+
+ def __str__(self):
+ lh = str(self.lh)
+ rh = str(self.rh)
+ if self.lh.priority <= Add.priority:
+ lh = '('+lh+')'
+ if self.rh.priority <= Add.priority:
+ rh = '('+rh+')'
+ return lh + ' + ' + rh
+
+ def read(self):
+ lh = self.lh.read()
+ if not self.lh.orphan:
+ lh = f'_dml_string_dupe({lh})'
+ return f'_dml_string_add_cstr({lh}, {self.rh.read()})'
+
+class StringAddCStrBefore(Orphan):
+ priority = dml.expr.Apply.priority
+
+ type = TString()
+
+ @auto_init
+ def __init__(self, site, lh, rh): pass
+
+ def __str__(self):
+ lh = str(self.lh)
+ rh = str(self.rh)
+ if self.lh.priority <= Add.priority:
+ lh = '('+lh+')'
+ if self.rh.priority <= Add.priority:
+ rh = '('+rh+')'
+ return lh + ' + ' + rh
+
+ def read(self):
+ rh = self.rh.read()
+ if not self.rh.orphan:
+ rh = f'_dml_string_dupe({rh})'
+ return f'_dml_string_add_cstr_before({self.lh.read()}, {rh})'
+
+class StringCStrRef(PseudoMethodRef):
+ @auto_init
+ def __init__(self, site, expr): pass
+
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, 'c_str')
+
+ def apply(self, inits, location, scope):
+ if inits:
+ raise EARG(self.site, '.c_str')
+ return mkStringCStrApply(self.site, self.expr)
+
+mkStringCStrRef = StringCStrRef
+
+class StringCStrApply(Expression):
+ priority = dml.expr.Apply.priority
+
+ slots = ('type',)
+
+ @auto_init
+ def __init__(self, site, expr):
+ assert not expr.orphan
+ self.type = TPtr(
+ TNamed('char',
+ const=(not expr.writable
+ or safe_realtype_shallow(expr.ctype()).const)))
+
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, 'c_str()')
+
+ def read(self):
+ return f'_dml_string_str({self.expr.read()})'
+
+ @property
+ def is_pointer_to_stack_allocation(self):
+ return self.expr.is_stack_allocated
+
+def mkStringCStrApply(site, expr):
+ if expr.constant and isinstance(expr, FromCString):
+ return mkStringConstant(site, expr.value)
+
+ if expr.orphan:
+ expr = mkAdoptedOrphan(expr.site, expr)
+ return StringCStrApply(site, expr)
+
+class FromCString(Orphan):
+ priority = dml.expr.Apply.priority
+
+ type = TString()
+
+ slots = ('constant', 'value')
+
+ @auto_init
+ def __init__(self, site, expr):
+ self.constant = expr.constant
+ self.value = expr.value if expr.constant else None
+
+ def __str__(self):
+ return f'cast({self.expr}, string)'
+
+ def read(self):
+ return f'_dml_string_new({self.expr.read()})'
+
class Add(ArithBinOp):
priority = 110
op = '+'
@@ -2216,6 +2554,29 @@ def make_simple(site, lh, rh):
return StringConstant(site, lh.value + rh.value)
lhtype = realtype(lh.ctype())
rhtype = realtype(rh.ctype())
+ if isinstance(lhtype, TString) and isinstance(rh, StringConstant):
+ if (isinstance(lh, StringAddCStrAfter)
+ and isinstance(lh.rh, StringConstant)):
+ (lh, rh) = (lh.lh,
+ mkStringConstant(rh.site, lh.rh.value + rh.value))
+
+ return StringAddCStrAfter(site, lh, rh)
+ elif isinstance(lh, StringConstant) and isinstance(rhtype, TString):
+ if (isinstance(rh, StringAddCStrBefore)
+ and isinstance(rh.lh, StringConstant)):
+ (lh, rh) = (mkStringConstant(lh.site, lh.value + rh.lh.value),
+ rh.rh)
+ return StringAddCStrBefore(site, lh, rh)
+
+ if isinstance(lhtype, TString) and isinstance(rhtype, TString):
+ return StringAdd(site, lh, rh)
+
+ if (isinstance(lhtype, TVector)
+ and (safe_realtype_unconst(lhtype).cmp(
+ safe_realtype_unconst(rhtype))
+ == 0)):
+ return VectorAdd(site, lh, rh)
+
# ECSADD should always be emitted when the operand types are equivalent
# to char pointers/arrays -- even including when the operands are
# explicitly typed as int8 pointers/arrays
@@ -2377,7 +2738,7 @@ def __str__(self):
return "%s = %s" % (self.lh, self.rh)
def discard(self):
- return self.lh.write(self.rh)
+ return self.lh.write(ExpressionInitializer(self.rh))
def read(self):
return '((%s), (%s))' % (self.discard(), self.lh.read())
@@ -2394,7 +2755,6 @@ def mkAssignOp(site, target, source):
raise EASSINL(target.site, target.name)
if not target.writable:
raise EASSIGN(site, target)
-
target_type = target.ctype()
source = source_for_assignment(site, target_type, source)
@@ -2455,13 +2815,14 @@ def make_simple(cls, site, rh):
TFunction([TPtr(TNamed('conf_object_t')),
TPtr(TVoid())],
TVoid())))
- if not dml.globals.compat_dml12 and not isinstance(rh, LValue):
+ if not dml.globals.compat_dml12 and not rh.addressable:
raise ERVAL(rh.site, '&')
+
return AddressOf(site, rh)
@property
def is_pointer_to_stack_allocation(self):
- return isinstance(self.rh, LValue) and self.rh.is_stack_allocated
+ return self.rh.is_stack_allocated
def mkAddressOf(site, rh):
if dml.globals.compat_dml12_int(site):
@@ -2621,7 +2982,7 @@ def mkUnaryPlus(site, rh):
rh, _ = promote_integer(rh, rhtype)
else:
raise ICE(site, "Unexpected arith argument to unary +")
- if isinstance(rh, LValue):
+ if rh.addressable or rh.writable:
# +x is a rvalue
rh = mkRValue(rh)
return rh
@@ -2647,7 +3008,7 @@ def make_simple(cls, site, rh):
rhtype = safe_realtype(rh.ctype())
if not isinstance(rhtype, (IntegerType, TPtr)):
raise EINCTYPE(site, cls.op)
- if not isinstance(rh, LValue):
+ if not rh.addressable:
if isinstance(rh, BitSlice):
hint = 'try %s= 1' % (cls.base_op[0],)
else:
@@ -2660,9 +3021,10 @@ def make_simple(cls, site, rh):
(result, signed) = promote_integer(result, rhtype)
return result
+ @classmethod
@property
- def op(self):
- return self.base_op
+ def op(cls):
+ return cls.base_op
@property
def is_pointer_to_stack_allocation(self):
@@ -2808,8 +3170,8 @@ def mkInterfaceMethodRef(site, iface_node, indices, method_name):
if (not isinstance(ftype, TFunction)
or not ftype.input_types
- or TPtr(safe_realtype(TNamed('conf_object_t'))).cmp(
- safe_realtype(ftype.input_types[0])) != 0):
+ or TPtr(safe_realtype_unconst(TNamed('conf_object_t'))).cmp(
+ safe_realtype_unconst(ftype.input_types[0])) != 0):
# non-method members are not accessible
raise EMEMBER(site, struct_name, method_name)
@@ -2853,7 +3215,8 @@ def writable(self):
return self.expr.writable
def write(self, source):
- source_expr = source
+ assert isinstance(source, ExpressionInitializer)
+ source_expr = source.expr
# if not self.size.constant or source.ctype() > self.type:
# source = mkBitAnd(source, self.mask)
@@ -2875,7 +3238,7 @@ def write(self, source):
target_type = realtype(self.expr.ctype())
if target_type.is_int and target_type.is_endian:
expr = mkCast(self.site, expr, target_type)
- return self.expr.write(expr)
+ return self.expr.write(ExpressionInitializer(expr))
def mkBitSlice(site, expr, msb, lsb, bitorder):
# lsb == None means that only one bit number was given (expr[i]
@@ -2923,7 +3286,8 @@ def mkBitSlice(site, expr, msb, lsb, bitorder):
class TraitMethodApplyIndirect(Expression):
'''The C expression of a trait method call'''
@auto_init
- def __init__(self, site, traitref, methname, independent, inargs, type):
+ def __init__(self, site, traitref, methname, independent, memoized,
+ inargs, type):
if not independent:
crep.require_dev(site)
@@ -2931,6 +3295,10 @@ def __str__(self):
return '%s.%s(%s)' % (self.traitref, self.methname,
', '.join(map(str, self.inargs)))
+ @property
+ def orphan(self):
+ return not self.memoized
+
def read(self):
infix_independent = 'INDEPENDENT_' if self.independent else ''
suffix_noarg = '' if self.inargs else '0'
@@ -2953,6 +3321,10 @@ def __init__(self, site, traitref, methodref, inargs, type):
def __str__(self):
return '%s(%s)' % (self.methodref, ', '.join(map(str, self.inargs)))
+ @property
+ def orphan(self):
+ return not self.methodref.memoized
+
def read(self):
return "%s(%s)" % (
self.methodref.cname(),
@@ -2960,6 +3332,34 @@ def read(self):
+ [arg.read() for arg in [self.traitref] + self.inargs]))
class New(Expression):
+ priority = 160 # f()
+ slots = ('type',)
+ @auto_init
+ def __init__(self, site, newtype, count, raii_info):
+ self.type = TPtr(newtype)
+ def __str__(self):
+ if self.count:
+ return 'new %s[%s]' % (self.newtype, self.count)
+ else:
+ return 'new %s' % self.newtype
+ def read(self):
+ destructor = (self.raii_info.cident_destructor_array_item
+ if self.raii_info else '_dml_raii_destructor_ref_none')
+ count = self.count.read() if self.count else '1'
+ return (f'DML_NEW({self.newtype.declaration("")}, {count}, '
+ +f'{destructor})')
+
+def mkNew(site, newtype, count = None):
+ if count:
+ count = as_int(count)
+ if newtype.is_raii:
+ from .codegen import get_raii_type_info # TODO(RAII) imports...
+ info = get_raii_type_info(newtype)
+ else:
+ info = None
+ return New(site, newtype, count, info)
+
+class NewExtern(Expression):
priority = 160 # f()
slots = ('type',)
@auto_init
@@ -2967,9 +3367,9 @@ def __init__(self, site, newtype, count):
self.type = TPtr(newtype)
def __str__(self):
if self.count:
- return 'new %s[%s]' % (self.newtype, self.count)
+ return 'new %s[%s]' % (self.newtype, self.count)
else:
- return 'new %s' % self.newtype
+ return 'new %s' % self.newtype
def read(self):
t = self.newtype.declaration('')
if self.count:
@@ -2977,10 +3377,12 @@ def read(self):
else:
return 'MM_ZALLOC(1, %s)' % (t)
-def mkNew(site, newtype, count = None):
+
+def mkNewExtern(site, newtype, count = None):
+ assert not newtype.is_raii
if count:
count = as_int(count)
- return New(site, newtype, count)
+ return NewExtern(site, newtype, count)
class ListItems(metaclass=abc.ABCMeta):
'''A series of consecutive list elements, where each list element
@@ -3337,7 +3739,9 @@ def ctype(self):
self.value.rettype))
def read(self):
- prefix = '_trampoline' * (not self.value.independent)
+ prefix = '_trampoline' * (not self.value.independent
+ or (self.value.memoized
+ and self.value.rettype.is_raii))
return f'(&{prefix}{self.value.get_cname()})'
@@ -3398,6 +3802,18 @@ def exc(self):
mkUndefined = Undefined
+class DiscardRef(NonValue):
+ writable = True
+
+ def __str__(self):
+ return '_'
+
+ def write(self, source):
+ assert isinstance(source, ExpressionInitializer)
+ return source.expr.discard()
+
+mkDiscardRef = DiscardRef
+
def endian_convert_expr(site, idx, endian, size):
"""Convert a bit index to little-endian (lsb=0) numbering.
@@ -3570,8 +3986,12 @@ def mkTraitUpcast(site, sub, parent):
if typ.trait is parent:
return sub
elif parent in typ.trait.ancestors:
+ if isinstance(sub, ObjTraitRef):
+ return ObjTraitRef(site, sub.node, parent, sub.indices)
return TraitUpcast(site, sub, parent)
elif parent is dml.globals.object_trait:
+ if isinstance(sub, ObjTraitRef):
+ return ObjTraitRef(site, sub.node, parent, sub.indices)
return TraitObjectCast(site, sub)
raise ETEMPLATEUPCAST(site, typ, parent.type())
@@ -3646,6 +4066,8 @@ def outp(self): pass
def throws(self): pass
@abc.abstractproperty
def independent(self): pass
+ @abc.abstractproperty
+ def memoized(self): pass
def apply(self, inits, location, scope):
'''Return expression for application as a function'''
@@ -3688,6 +4110,10 @@ def throws(self):
def independent(self):
return self.methodref.independent
+ @property
+ def memoized(self):
+ return self.methodref.memoized
+
def call_expr(self, inargs, rettype):
return TraitMethodApplyDirect(self.site, self.traitref,
self.methodref, inargs, rettype)
@@ -3699,7 +4125,7 @@ class TraitMethodIndirect(TraitMethodRef):
it needs to be called.'''
@auto_init
def __init__(self, site, traitref, methname, inp, outp, throws,
- independent): pass
+ independent, memoized): pass
def __str__(self):
return "%s.%s" % (str(self.traitref), self.methname)
@@ -3707,7 +4133,7 @@ def __str__(self):
def call_expr(self, inargs, rettype):
return TraitMethodApplyIndirect(self.site, self.traitref,
self.methname, self.independent,
- inargs, rettype)
+ self.memoized, inargs, rettype)
class TraitHookArrayRef(NonValueArrayRef):
@auto_init
@@ -4067,7 +4493,7 @@ def read(self):
mkHookSuspended = HookSuspended
-class HookSendNowRef(NonValue):
+class HookSendNowRef(PseudoMethodRef):
'''Reference to the send_now pseudomethod of a hook'''
@auto_init
def __init__(self, site, hookref_expr): pass
@@ -4087,33 +4513,39 @@ def apply(self, inits, location, scope):
class HookSendNowApply(Expression):
'''Application of the send_now pseudomethod with valid arguments'''
- slots = ('msg_struct',)
+ slots = ('msg_struct', 'msg_struct_info')
type = TInt(64, False)
priority = dml.expr.Apply.priority
@auto_init
def __init__(self, site, hookref_expr, args):
crep.require_dev(site)
msg_types = safe_realtype(hookref_expr.ctype()).msg_types
- from .codegen import get_type_sequence_info
+ from .codegen import get_type_sequence_info, get_raii_type_info
self.msg_struct = get_type_sequence_info(msg_types,
create_new=True).struct
+ self.msg_struct_info = (get_raii_type_info(self.msg_struct)
+ if self.msg_struct and self.msg_struct.is_raii
+ else None)
+
def __str__(self):
return '%s.send_now(%s)' % (self.hookref_expr,
', '.join(str(e) for e in self.args))
def read(self):
- msg = (('&(%s_t) {%s}'
+ hookref = resolve_hookref(self.hookref_expr)
+ msg = (('&((%s_t) {%s})'
% (self.msg_struct.label,
', '.join(arg.read() for arg in self.args)))
if self.args else 'NULL')
- hookref = resolve_hookref(self.hookref_expr)
-
- return ('_DML_send_hook(&_dev->obj, '
- + f'&_dev->_detached_hook_queue_stack, {hookref}, {msg})')
+ if self.msg_struct_info:
+ return (f'_DML_SEND_HOOK_RAII({hookref}, {msg}, '
+ + f'{self.msg_struct_info.cident_destructor})')
+ else:
+ return f'_DML_SEND_HOOK({hookref}, {msg})'
mkHookSendNowApply = HookSendNowApply
-class HookSendRef(NonValue):
+class HookSendRef(PseudoMethodRef):
'''Reference to the send pseudomethod of a hook'''
@auto_init
def __init__(self, site, hookref_expr): pass
@@ -4157,11 +4589,15 @@ def read(self):
% (TArray(self.info.args_type,
mkIntegerLiteral(self.site, 1)).declaration(''),
self.hookref_expr.read()))
+
+ args_destructor = (self.info.args_raii_info.cident_destructor
+ if self.info.args_raii_info else 'NULL')
+
args_size = f'sizeof({self.info.args_type.declaration("")})'
- return ('_DML_post_immediate_after('
+ return ('(({ _DML_post_immediate_after('
+ '&_dev->obj, _dev->_immediate_after_state, '
- + f'{self.info.cident_callback}, NULL, 0, {args}, '
- + f'{args_size}, NULL, 0)')
+ + f'{self.info.cident_callback}, {args_destructor}, NULL, 0, '
+ + f'{args}, {args_size}, NULL, 0); }}))')
mkHookSendApply = HookSendApply
@@ -4182,6 +4618,8 @@ def read(self):
else:
s = self.sym.name
return s
+ def discard(self):
+ return f'(void){self.read()}'
def incref(self):
self.sym.incref()
def decref(self):
@@ -4210,13 +4648,30 @@ def read(self):
mkStaticVariable = StaticVariable
-class StructMember(LValue):
+class StructMember(Expression):
priority = 160
explicit_type = True
+
+ slots = ('orphan',)
+
@auto_init
def __init__(self, site, expr, sub, type, op):
+ assert not expr.writable or expr.c_lval
assert_type(site, expr, Expression)
assert_type(site, sub, str)
+ self.orphan = expr.orphan and op == '.'
+
+ @property
+ def writable(self):
+ return self.expr.writable
+
+ @property
+ def addressable(self):
+ return self.expr.addressable
+
+ @property
+ def c_lval(self):
+ return self.expr.c_lval
def __str__(self):
s = str(self.expr)
@@ -4231,7 +4686,9 @@ def read(self):
@property
def is_stack_allocated(self):
- return isinstance(self.expr, LValue) and self.expr.is_stack_allocated
+ return (self.expr.is_stack_allocated
+ if self.op == '.' else
+ self.expr.is_pointer_to_stack_allocation)
@property
def is_pointer_to_stack_allocation(self):
@@ -4270,12 +4727,14 @@ def mkSubRef(site, expr, sub, op):
basetype = real_etype.base
real_basetype = safe_realtype(basetype)
baseexpr = mkDereference(site, expr)
+ def structmember_expr(): return expr
else:
if op == '->':
raise ENOPTR(site, expr)
basetype = etype
real_basetype = safe_realtype(etype)
baseexpr = expr
+ def structmember_expr(): return mkAdoptedOrphan(expr.site, expr)
real_basetype = real_basetype.resolve()
@@ -4283,7 +4742,12 @@ def mkSubRef(site, expr, sub, op):
typ = real_basetype.get_member_qualified(sub)
if not typ:
raise EMEMBER(site, baseexpr, sub)
- return StructMember(site, expr, sub, typ, op)
+ subref = StructMember(site, structmember_expr(), sub, typ, op)
+ return (AdoptedOrphan(site, subref)
+ if (subref.orphan
+ and isinstance(safe_realtype_shallow(typ), TArray))
+ else subref)
+
elif real_basetype.is_int and real_basetype.is_bitfields:
member = real_basetype.members.get(sub)
if member is None:
@@ -4318,16 +4782,121 @@ def mkSubRef(site, expr, sub, op):
return mkHookSendNowRef(site, baseexpr)
elif sub == 'suspended':
return mkHookSuspended(site, baseexpr)
-
+ elif isinstance(basetype, TString):
+ if sub == 'c_str':
+ return mkStringCStrRef(site, baseexpr)
+ if sub == 'len':
+ return mkStringLen(site, baseexpr)
+ elif isinstance(basetype, TVector):
+ if sub == 'c_buf':
+ return mkVectorCBuf(site, baseexpr)
+ if sub == 'len':
+ return mkVectorLen(site, baseexpr)
+ if sub == 'push_back':
+ return mkVectorPushBack(site, baseexpr)
+ if sub == 'pop_back':
+ return mkVectorPopBack(site, baseexpr)
+ if sub == 'push_front':
+ return mkVectorPushFront(site, baseexpr)
+ if sub == 'pop_front':
+ return mkVectorPopFront(site, baseexpr)
+ if sub == 'insert':
+ return mkVectorInsert(site, baseexpr)
+ if sub == 'remove':
+ return mkVectorRemove(site, baseexpr)
raise ENOSTRUCT(site, expr)
+class StringLen(Expression):
+ priority = StructMember.priority
+
+ slots = ('type',)
+
+ @auto_init
+ def __init__(self, site, expr):
+ assert not expr.orphan
+ assert not expr.writable or expr.c_lval
+ self.type = TInt(32, False,
+ const=safe_realtype_shallow(self.expr.ctype()).const)
+
+
+ def __str__(self):
+ expr = (f'({self.expr})'
+ if self.expr.priority < StructMember.priority
+ else str(self.expr))
+ return f'{expr}.len'
+
+ @property
+ def writable(self):
+ return self.expr.writable
+
+ def read(self):
+ s = self.expr.read()
+ if self.expr.priority < self.priority:
+ s = '(' + s + ')'
+ return s + '.len'
+
+ def write(self, source):
+ assert isinstance(source, ExpressionInitializer)
+ return (f'_dml_string_resize(&({self.expr.read()}), '
+ + f'{source.expr.read()})')
+
+def mkStringLen(site, expr):
+ return StringLen(site, mkAdoptedOrphan(expr.site, expr))
+
+class VectorLen(Expression):
+ priority = StructMember.priority
+
+ slots = ('type',)
+
+ @auto_init
+ def __init__(self, site, expr):
+ assert not expr.orphan
+ assert not expr.writable or expr.c_lval
+ self.type = TInt(32, False, const=deep_const(self.expr.ctype()))
+
+ def __str__(self):
+ expr = (f'({self.expr})'
+ if self.expr.priority < StructMember.priority
+ else str(self.expr))
+ return f'{expr}.len'
+
+ def read(self):
+ s = self.expr.read()
+ if self.expr.priority < self.priority:
+ s = '(' + s + ')'
+ return s + '.len'
+
+ @property
+ def writable(self):
+ return self.expr.writable
+
+ def write(self, source):
+ assert isinstance(source, ExpressionInitializer)
+ base = realtype_shallow(self.expr.ctype()).base
+ if base.is_raii:
+ from .codegen import get_raii_type_info
+ info = get_raii_type_info(base)
+ return ('_dml_vect_resize_raii(sizeof(%s), %s, (void *)&(%s), %s)'
+ % (base.declaration(''), info.cident_destructor,
+ self.expr.read(), source.expr.read()))
+ else:
+ return ('_dml_vect_resize(sizeof(%s), (void *)&(%s), %s, true)'
+ % (base.declaration(''), self.expr.read(),
+ source.expr.read()))
+
+def mkVectorLen(site, expr):
+ return VectorLen(site, mkAdoptedOrphan(expr.site, expr))
+
class ArrayRef(LValue):
slots = ('type',)
priority = 160
explicit_type = True
@auto_init
def __init__(self, site, expr, idx):
- self.type = realtype_shallow(expr.ctype()).base
+ expr_type = realtype_shallow(expr.ctype())
+ self.type = conv_const(expr_type.const
+ and isinstance(expr_type, TArray),
+ expr_type.base)
def __str__(self):
return '%s[%s]' % (self.expr, self.idx)
def read(self):
@@ -4344,16 +4913,75 @@ def is_stack_allocated(self):
def is_pointer_to_stack_allocation(self):
return isinstance(self.type, TArray) and self.is_stack_allocated
-class VectorRef(LValue):
+class LegacyVectorRef(LValue):
slots = ('type',)
@auto_init
def __init__(self, site, expr, idx):
self.type = realtype(self.expr.ctype()).base
def read(self):
return 'VGET(%s, %s)' % (self.expr.read(), self.idx.read())
- def write(self, source):
- return "VSET(%s, %s, %s)" % (self.expr.read(), self.idx.read(),
- source.read())
+ # No need for write, VGET results in an lvalue
+
+class StringCharRef(Expression):
+ slots = ('type',)
+ priority = dml.expr.Apply.priority
+ c_lval = True
+ @auto_init
+ def __init__(self, site, expr, idx):
+ self.type = TNamed(
+ 'char', const=(safe_realtype(self.expr.ctype()).const))
+
+ @property
+ def writable(self):
+ return self.expr.writable
+
+ def read(self):
+ return f'DML_STRING_CHAR({self.expr.read()}, {self.idx.read()})'
+
+def mkStringCharRef(site, expr, idx):
+ return StringCharRef(site, mkAdoptedOrphan(expr.site, expr), idx)
+
+
+# Not considered addressable, as the address of an elem is very easily
+# invalidated.
+# Users have to use .c_buf() instead to acknowledge that possibility.
+# TODO(RAII): users may shoot themselves in the foot anyway, if the basetype
+# is an array or a struct with array member. What do we do about that?
+class VectorRef(Expression):
+ slots = ('type',)
+ priority = dml.expr.Apply.priority
+ explicit_type=True
+ c_lval = True
+
+ @auto_init
+ def __init__(self, site, expr, idx):
+ assert not expr.orphan
+ typ = safe_realtype_shallow(self.expr.ctype())
+ self.type = conv_const(typ.const, typ.base)
+
+ @property
+ def writable(self):
+ return self.expr.writable
+
+ def read(self):
+ base_typ = safe_realtype_shallow(self.expr.ctype()).base
+ return ('DML_VECT_ELEM(%s, %s, %s)'
+ % (base_typ.declaration(''), self.expr.read(),
+ self.idx.read()))
+
+ @property
+ def is_stack_allocated(self):
+ return self.expr.is_stack_allocated
+
+ @property
+ def is_pointer_to_stack_allocation(self):
+ return (isinstance(safe_realtype_shallow(self.type), TArray)
+ and self.is_stack_allocated)
+
+def mkVectorRef(site, expr, idx):
+ if idx.constant and idx.value < 0:
+ raise EOOB(expr)
+ return VectorRef(site, mkAdoptedOrphan(expr.site, expr), idx)
def mkIndex(site, expr, idx):
if isinstance(idx, NonValue):
@@ -4403,20 +5031,30 @@ def mkIndex(site, expr, idx):
if typ.is_int:
return mkBitSlice(site, expr, idx, None, None)
+ expr = mkAdoptedOrphan(expr.site, expr)
+
if isinstance(typ, (TArray, TPtr)):
return ArrayRef(site, expr, idx)
if isinstance(typ, (TVector)):
return VectorRef(site, expr, idx)
+ if isinstance(typ, (TVectorLegacy)):
+ return LegacyVectorRef(site, expr, idx)
+
+ if isinstance(typ, TString):
+ return mkStringCharRef(site, expr, idx)
+
raise ENARRAY(expr)
class Cast(Expression):
"A C type cast"
priority = 140
explicit_type = True
+ slots = ('orphan',)
@auto_init
- def __init__(self, site, expr, type): pass
+ def __init__(self, site, expr, type):
+ self.orphan = expr.orphan
def __str__(self):
return 'cast(%s, %s)' % (self.expr, self.type.declaration(""))
def read(self):
@@ -4440,22 +5078,36 @@ def mkCast(site, expr, new_type):
raise ETEMPLATEUPCAST(site, "object", new_type)
else:
return mkTraitUpcast(site, expr, real.trait)
+
+ if (dml.globals.compat_dml12 and dml.globals.api_version <= "6"
+ and isinstance(expr, InterfaceMethodRef)):
+ # Workaround for bug 24144
+ return mkLit(site, "%s->%s" % (
+ expr.node_expr.read(), expr.method_name), new_type)
+ if isinstance(expr, NonValue):
+ raise expr.exc()
old_type = safe_realtype(expr.ctype())
if (dml.globals.compat_dml12_int(site)
- and (isinstance(old_type, (TStruct, TVector))
- or isinstance(real, (TStruct, TVector)))):
+ and (isinstance(old_type, (TStruct, TVectorLegacy))
+ or isinstance(real, (TStruct, TVectorLegacy)))):
# these casts are permitted by C only if old and new are
# the same type, which is useless
return Cast(site, expr, new_type)
+ if isinstance(real, (TVoid, TArray, TFunction)):
+ raise ECAST(site, expr, new_type)
+ if old_type.cmp(real) == 0:
+ if (old_type.is_int
+ and not old_type.is_endian
+ and dml.globals.compat_dml12_int(expr.site)):
+ # 1.2 integer expressions often lie about their actual type,
+ # and require a "redundant" cast! Why yes, this IS horrid!
+ return Cast(site, expr, new_type)
+ return mkRValue(expr)
if isinstance(real, TStruct):
- if isinstance(old_type, TStruct) and old_type.label == real.label:
- return expr
raise ECAST(site, expr, new_type)
if isinstance(real, TExternStruct):
- if isinstance(old_type, TExternStruct) and old_type.id == real.id:
- return expr
raise ECAST(site, expr, new_type)
- if isinstance(real, (TVoid, TArray, TVector, TTraitList, TFunction)):
+ if isinstance(real, (TVector, TTraitList)):
raise ECAST(site, expr, new_type)
if isinstance(old_type, (TVoid, TStruct, TVector, TTraitList, TTrait)):
raise ECAST(site, expr, new_type)
@@ -4463,7 +5115,7 @@ def mkCast(site, expr, new_type):
expr = as_int(expr)
old_type = safe_realtype(expr.ctype())
if real.is_int and not real.is_endian:
- if isinstance(expr, IntegerConstant):
+ if old_type.is_int and expr.constant:
value = truncate_int_bits(expr.value, real.signed, real.bits)
if dml.globals.compat_dml12_int(site):
return IntegerConstant_dml12(site, value, real)
@@ -4474,8 +5126,8 @@ def mkCast(site, expr, new_type):
# Shorten redundant chains of integer casts. Avoids insane C
# output for expressions like a+b+c+d.
if (isinstance(expr, Cast)
- and isinstance(expr.type, TInt)
- and expr.type.bits >= real.bits):
+ and isinstance(old_type, TInt)
+ and old_type.bits >= real.bits):
# (uint64)(int64)x -> (uint64)x
expr = expr.expr
old_type = safe_realtype(expr.ctype())
@@ -4511,9 +5163,7 @@ def mkCast(site, expr, new_type):
return expr
elif real.is_int and real.is_endian:
old_type = safe_realtype(expr.ctype())
- if real.cmp(old_type) == 0:
- return expr
- elif old_type.is_arith or isinstance(old_type, TPtr):
+ if old_type.is_arith or isinstance(old_type, TPtr):
return mkApply(
expr.site,
mkLit(expr.site, *real.get_store_fun()),
@@ -4570,7 +5220,6 @@ def mkCast(site, expr, new_type):
class RValue(Expression):
'''Wraps an lvalue to prohibit write. Useful when a composite
expression is reduced down to a single variable.'''
- writable = False
@auto_init
def __init__(self, site, expr): pass
def __str__(self):
@@ -4579,10 +5228,29 @@ def ctype(self):
return self.expr.ctype()
def read(self):
return self.expr.read()
- def discard(self): pass
+ @property
+ def c_lval(self):
+ return self.expr.c_lval()
+ @property
+ def explicit_type(self):
+ return self.expr.explicit_type
+ @property
+ def type(self):
+ assert self.explicit_type
+ return self.expr.type
+ @property
+ def orphan(self):
+ return self.expr.orphan
+ # TODO(RAII) This used to be simply be `pass`, which SCREAMS incorrect.
+ # But it makes me wonder why it was defined like that to begin with.
+ def discard(self):
+ return self.expr.discard()
+ @property
+ def is_pointer_to_stack_allocation(self):
+ return self.expr.is_pointer_to_stack_allocation
def mkRValue(expr):
- if isinstance(expr, LValue) or expr.writable:
+ if expr.addressable or expr.writable:
return RValue(expr.site, expr)
return expr
@@ -4733,23 +5401,44 @@ class Initializer(object):
being initialized."""
__slots__ = ()
+ @abc.abstractproperty
+ def site(self): pass
+
class ExpressionInitializer(Initializer):
- __slots__ = ('expr',)
- def __init__(self, expr):
+ __slots__ = ('expr', 'ignore_raii')
+ def __init__(self, expr, ignore_raii=False):
assert isinstance(expr, Expression)
self.expr = expr
+ self.ignore_raii = ignore_raii
def __str__(self):
return "%s" % self.expr
def __repr__(self):
- return "ExpressionInitializer(%r)" % self.expr
+ return f'ExpressionInitializer({self.expr!r}, {self.ignore_raii})'
+ @property
+ def site(self):
+ return self.expr.site
def incref(self):
self.expr.incref()
def decref(self):
self.expr.decref()
def read(self):
+ realt = safe_realtype_shallow(self.expr.ctype())
+ if (not self.ignore_raii
+ and realt.is_raii
+ and not isinstance(realt, TArray)
+ and not self.expr.orphan):
+ from .codegen import get_raii_type_info
+ return get_raii_type_info(self.expr.ctype()).read_dupe(
+ self.expr.read())
+
return self.expr.read()
def as_expr(self, typ):
- return source_for_assignment(self.expr.site, typ, self.expr)
+ expr = source_for_assignment(self.expr.site, typ, self.expr)
+ return (RAIIDupe(expr.site, expr)
+ if (not self.ignore_raii and not self.expr.orphan
+ and typ.is_raii)
+ else expr)
+
def assign_to(self, dest, typ):
# Assigns to (partially) const-qualified targets can happen as part of
# initializing (partially) const-qualified session variables. To allow
@@ -4758,15 +5447,59 @@ def assign_to(self, dest, typ):
# Since session variables are allocated on the heap, this should *not*
# be UB as long as the session variable hasn't been initialized
# previously.
+ typ = realtype(typ)
site = self.expr.site
- if deep_const(typ):
- out('memcpy((void *)&%s, (%s){%s}, sizeof %s);\n'
- % (dest.read(),
- TArray(typ, mkIntegerLiteral(site, 1)).declaration(''),
- mkCast(site, self.expr, typ).read(),
- dest.read()))
+ if not self.ignore_raii and typ.is_raii:
+ # TODO(RAII) this logic should likely be decentralized
+ from .codegen import get_raii_type_info, VectorRAIITypeInfo
+ info = get_raii_type_info(typ)
+ if isinstance(self.expr, FromCString):
+ return (f'_dml_string_set((void *)&({dest}), '
+ + f'{self.expr.expr.read()})')
+ if isinstance(self.expr, VectorCompoundLiteral):
+ assert isinstance(info, VectorRAIITypeInfo)
+ sizeof = f'sizeof({info.type.base.declaration("")})'
+ tgt = f'(void *)&({dest})'
+ src = self.expr.as_array_literal().read()
+ length = f'{len(self.expr.inits)}ULL'
+ if not info.base_info:
+ return (f'(({{ _dml_vect_set_array({sizeof}, {tgt}, {src}, '
+ + f'{length}); }}))')
+ destructor = info.base_info.cident_destructor
+ return (f'(({{ _dml_vect_set_compound_init_raii({sizeof}, '
+ + f'{destructor}, {tgt}, {src}, {length}); }}))')
+ if self.expr.orphan:
+ if isinstance(self.expr, RAIIDupe):
+ return (info.read_copy_lval if self.expr.expr.c_lval else
+ info.read_copy)(dest, self.expr.expr.read())
+ outp = (info.read_linear_move_lval if self.expr.c_lval else
+ info.read_linear_move)(dest, self.expr.read())
+ if (isinstance(self.expr, CompoundLiteral)
+ and isinstance(safe_realtype_shallow(self.expr.ctype()),
+ TArray)):
+ outp = f'(({{ {outp}; }}))'
+ return outp
+ return (info.read_copy_lval
+ if self.expr.c_lval else info.read_copy)(dest,
+ self.expr.read())
+
+ elif isinstance(typ, TEndianInt):
+ return (f'{typ.dmllib_fun("copy")}((void *)&{dest},'
+ + f' {self.expr.read()})')
+ elif deep_const(typ):
+ shallow_deconst_typ = safe_realtype_unconst(typ)
+ if (deep_const(shallow_deconst_typ)
+ or isinstance(typ, (TExternStruct, TArray))):
+ return ('memcpy((void *)&%s, (%s){%s}, sizeof %s)'
+ % (dest,
+ TArray(typ, mkIntegerLiteral(site, 1)).declaration(''),
+ mkCast(site, self.expr, typ).read(),
+ dest))
+ else:
+ return (f'*({TPtr(shallow_deconst_typ).declaration("")})'
+ + f'&{dest} = {self.expr.read()}')
else:
- mkCopyData(site, self.expr, dest).toc()
+ return f'{dest} = {self.expr.read()}'
class CompoundInitializer(Initializer):
'''Initializer for a variable of struct or array type, using the
@@ -4789,24 +5522,33 @@ def decref(self):
def read(self):
return '{' + ", ".join(i.read() for i in self.init) + '}'
def as_expr(self, typ):
+ if isinstance(safe_realtype_shallow(typ), TArray):
+ return ArrayCompoundLiteral(self.site, self, typ)
return CompoundLiteral(self.site, self, typ)
def assign_to(self, dest, typ):
'''output C statements to assign an lvalue'''
# (void *) cast to avoid GCC erroring if the target type is (partially)
# const-qualified. See ExpressionInitializer.assign_to
- if isinstance(typ, TNamed):
- out('memcpy((void *)&%s, &(%s)%s, sizeof %s);\n' %
- (dest.read(), typ.declaration(''), self.read(),
- dest.read()))
+ if typ.is_raii:
+ from .codegen import get_raii_type_info
+ info = get_raii_type_info(typ)
+ outp = info.read_linear_move_lval(
+ dest, f'(({typ.declaration("")}){self.read()})')
+ if isinstance(safe_realtype_shallow(typ), TArray):
+ outp = f'(({{ {outp}; }}))'
+ return outp
+ elif isinstance(typ, TNamed):
+ return ('memcpy((void *)&%s, &(%s)%s, sizeof %s)' %
+ (dest, typ.declaration(''), self.read(),
+ dest))
elif isinstance(typ, TArray):
- out('memcpy((void *)%s, (%s)%s, sizeof %s);\n'
- % (dest.read(), typ.declaration(''),
- self.read(), dest.read()))
+ return ('memcpy((void *)%s, (%s)%s, sizeof %s)'
+ % (dest, typ.declaration(''), self.read(), dest))
elif isinstance(typ, TStruct):
- out('memcpy((void *)&%s, (%s){%s}, sizeof %s);\n' % (
- dest.read(),
+ return 'memcpy((void *)&%s, (%s){%s}, sizeof %s)' % (
+ dest,
TArray(typ, mkIntegerLiteral(self.site, 1)).declaration(''),
- self.read(), dest.read()))
+ self.read(), dest)
else:
raise ICE(self.site, 'strange type %s' % typ)
@@ -4843,15 +5585,25 @@ def read(self):
def as_expr(self, typ):
return CompoundLiteral(self.site, self, typ)
def assign_to(self, dest, typ):
- '''output C statements to assign an lvalue'''
+ '''return a C string for a void expression to assign an lvalue'''
typ = safe_realtype(typ)
if isinstance(typ, StructType):
- # (void *) cast to avoid GCC erroring if the target type is
- # (partially) const-qualified. See ExpressionInitializer.assign_to
- out('memcpy((void *)&%s, (%s){%s}, sizeof %s);\n' % (
- dest.read(),
- TArray(typ, mkIntegerLiteral(self.site, 1)).declaration(''),
- self.read(), dest.read()))
+ if typ.is_raii:
+ from .codegen import get_raii_type_info
+ info = get_raii_type_info(typ)
+ return info.read_linear_move_lval(
+ dest,
+ f'(({typ.declaration.declaration("")}){self.read()})')
+ else:
+ # (void *) cast to avoid GCC erroring if the target type is
+ # (partially) const-qualified.
+ # See ExpressionInitializer.assign_to
+ return 'memcpy((void *)&%s, (%s){%s}, sizeof %s)' % (
+ dest,
+ TArray(typ,
+ mkIntegerLiteral(self.site, 1)).declaration(''),
+ self.read(), dest)
+
else:
raise ICE(self.site, f'unexpected type for initializer: {typ}')
@@ -4868,9 +5620,10 @@ class MemsetInitializer(Initializer):
This initializer may only be used for struct or array initializers.
'''
- __slots__ = ('site',)
- def __init__(self, site):
+ __slots__ = ('site','ignore_raii')
+ def __init__(self, site, ignore_raii=False):
self.site = site
+ self.ignore_raii = ignore_raii
def __str__(self):
return self.read()
def __repr__(self):
@@ -4882,18 +5635,25 @@ def decref(self):
def read(self):
return '{0}'
def as_expr(self, typ):
+ if isinstance(safe_realtype_shallow(typ), TArray):
+ return ArrayCompoundLiteral(self.site, self, typ)
return CompoundLiteral(self.site, self, typ)
def assign_to(self, dest, typ):
'''output C statements to assign an lvalue'''
assert isinstance(safe_realtype(typ),
(TExternStruct, TStruct, TArray, TEndianInt, TTrait,
- THook))
+ THook, TString))
+
+ if not self.ignore_raii and typ.is_raii:
+ from .codegen import get_raii_type_info
+ info = get_raii_type_info(typ)
+ return f'_DML_RAII_ZERO_OUT({info.cident_destructor}, {dest})'
+
# (void *) cast to avoid GCC erroring if the target type is
# (partially) const-qualified. See ExpressionInitializer.assign_to
- out('memset((void *)&%s, 0, sizeof(%s));\n'
- % (dest.read(), typ.declaration('')))
+ return f'memset((void *)&{dest}, 0, sizeof({typ.declaration("")}))'
-class CompoundLiteral(Expression):
+class CompoundLiteral(Orphan):
@auto_init
def __init__(self, site, init, type):
assert isinstance(init, (CompoundInitializer,
@@ -4902,7 +5662,109 @@ def __init__(self, site, init, type):
def __str__(self):
return 'cast(%s, %s)' % (self.init, self.type)
def read(self):
- return f'({self.type.declaration("")}) {self.init.read()}'
+ return f'(({self.type.declaration("")}){self.init.read()})'
+
+class ArrayCompoundLiteral(Expression):
+ slots = ('read_adopted', 'is_stack_allocated')
+ # TODO(RAII) writable?
+ addressable = True
+ c_lval = True
+ @auto_init
+ def __init__(self, site, init, type):
+ assert isinstance(init, (CompoundInitializer, MemsetInitializer))
+ self.is_stack_allocated = isinstance(TopRAIIScope.active,
+ MethodRAIIScope)
+ self.read_adopted = RAIIScope.reserve_orphan_adoption(
+ site, CompoundLiteral(site, init, type))
+
+ def __str__(self):
+ return 'cast(%s, %s)' % (self.init, self.type)
+ def read(self):
+ return self.read_adopted()
+
+ @property
+ def is_pointer_to_stack_allocation(self):
+ return self.is_stack_allocated
+
+class VectorCompoundLiteral(Orphan):
+ '''Initializer for a variable of vector type, using the
+ {value1, value2, ...} syntax as in C'''
+ priority = Cast.priority
+ slots = ('type',)
+ @auto_init
+ def __init__(self, site, inits, basetype):
+ self.type = TVector(basetype)
+
+ def __str__(self):
+ return ('cast({%s}, vect(%s))'
+ % (','.join(str(e) for e in self.inits),
+ str(self.basetype)))
+
+ def read(self):
+ if self.inits:
+ # The statement expression limits the lifetime of the compound
+ # literal, cause the compiler won't be able to tell otherwise
+ # that it needn't consume the stack space.
+ return ('(({ _dml_vect_from_array(sizeof(%s), %s, %s); }))'
+ % (self.basetype.declaration(''),
+ self.as_array_literal().read(),
+ len(self.inits)))
+ else:
+ return '((const _dml_vect_t){0})'
+
+ def as_array_literal(self):
+ if self.inits:
+ count = mkIntegerLiteral(self.site, len(self.inits))
+ const_basetype = conv_const(True, self.basetype)
+ return CompoundLiteral(
+ self.site, CompoundInitializer(self.site, self.inits),
+ TArray(const_basetype, count))
+ else:
+ return NullConstant(self.site)
+
+class RAIIDupe(Orphan):
+ priority = dml.expr.Apply.priority
+ slots = ('info',)
+ @auto_init
+ def __init__(self, site, expr):
+ assert expr.ctype().is_raii
+ assert not isinstance(safe_realtype_shallow(expr.ctype()), TArray)
+ from .codegen import get_raii_type_info
+ self.info = get_raii_type_info(expr.ctype())
+
+ def read(self):
+ return self.info.read_dupe(self.expr.read())
+
+ def ctype(self):
+ return self.expr.ctype()
+
+ def discard(self):
+ return self.expr.discard()
+
+class AdoptedOrphan(Expression):
+ priority = dml.expr.Apply.priority
+ slots = ('read_adopted', 'is_stack_allocated')
+ c_lval = True
+ @auto_init
+ def __init__(self, site, expr):
+ assert expr.orphan
+ self.is_stack_allocated = isinstance(TopRAIIScope.active,
+ MethodRAIIScope)
+ self.read_adopted = RAIIScope.reserve_orphan_adoption(site, expr)
+
+ def __str__(self):
+ return str(self.expr)
+
+ def ctype(self):
+ return self.expr.ctype()
+
+ def read(self):
+ return self.read_adopted()
+
+def mkAdoptedOrphan(site, expr):
+ if expr.orphan and expr.ctype().is_raii:
+ return AdoptedOrphan(site, expr)
+ return expr
class StructDefinition(Statement):
"""A C struct definition appearing in a local scope, like
@@ -4911,14 +5773,16 @@ class StructDefinition(Statement):
is preceded by a StructDefinition."""
@auto_init
def __init__(self, site, structtype): pass
- def toc(self):
+ def toc_stmt(self):
self.structtype.resolve().print_struct_definition()
mkStructDefinition = StructDefinition
class Declaration(Statement):
"A variable declaration"
is_declaration = True
- def __init__(self, site, name, type, init = None, unused = False):
+ slots = ('toc_raii_bind',)
+ def __init__(self, site, name, type, init = None, unused = False,
+ unscoped_raii = False):
assert site
self.site = site
self.name = name
@@ -4930,24 +5794,693 @@ def __init__(self, site, name, type, init = None, unused = False):
if name.startswith("__"):
assert unused == True
self.unused = unused
+ self.unscoped_raii = unscoped_raii
+ if type.is_raii and not unscoped_raii:
+ self.toc_raii_bind = RAIIScope.reserve_bind_lval(
+ site, mkLit(site, self.name, type))
+ else:
+ self.toc_raii_bind = None
- def toc(self):
+ def toc_stmt(self):
self.linemark()
if (isinstance(self.init, MemsetInitializer)
- and not deep_const(self.type)):
+ and not deep_const(self.type)
+ and not self.type.is_raii):
# ducks a potential GCC warning, and also serves to
# zero-initialize VLAs
self.type.print_declaration(self.name, unused = self.unused)
- self.init.assign_to(mkLit(self.site, self.name, self.type),
- self.type)
+ self.linemark()
+ out(self.init.assign_to(self.name, self.type) + ';\n')
else:
self.type.print_declaration(
self.name, init=self.init.read() if self.init else None,
unused=self.unused)
+ if self.toc_raii_bind is not None:
+ self.toc_raii_bind()
mkDeclaration = Declaration
+class RAIIScope(metaclass=SlotsMeta):
+ slots = ('top_scope', 'allocs', 'scope_stack_ix',)
+ def __init__(self):
+ self.top_scope = None
+ self.scope_stack_ix = None
+ self.allocs = 0
+
+ @staticmethod
+ def scope_stack():
+ return TopRAIIScope.active.scope_stack
+
+ def __enter__(self):
+ assert TopRAIIScope.active is not None
+ self.top_scope = TopRAIIScope.active
+ TopRAIIScope.active.push_scope(self)
+ return self
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ assert TopRAIIScope.active is self.top_scope
+ top = self.top_scope.pop_scope()
+ assert top is self
+
+ @staticmethod
+ def reserve_orphan_adoption(site, expr):
+ assert TopRAIIScope.active is not None
+ return TopRAIIScope.active.reserve_orphan_adoption(
+ site, expr, RAIIScope.scope_stack()[-1])
+
+ @staticmethod
+ def reserve_bind_lval(site, expr):
+ return RAIIScope.reserve_bind_lvals(site, (expr,))
+
+ @staticmethod
+ def reserve_bind_lvals(site, exprs):
+ assert TopRAIIScope.active
+ return TopRAIIScope.active.reserve_bind_lvals(
+ site, exprs, RAIIScope.scope_stack()[-1])
+
+ def clear_scope(self, site):
+ assert self.top_scope is not None
+ self.top_scope.clear_subscope(site, self)
+
+ @property
+ def used(self):
+ return self.allocs > 0
+
+ @property
+ def completed(self):
+ return self.top_scope is not None and self.scope_stack_ix is None
+
+class TopRAIIScope(RAIIScope):
+ active = None
+
+ slots = ('prev_active',)
+
+ @auto_init
+ def __init__(self):
+ self.prev_active = None
+
+ def __enter__(self):
+ self.prev_active = TopRAIIScope.active
+ TopRAIIScope.active = self
+ return RAIIScope.__enter__(self)
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ RAIIScope.__exit__(self, exc_type, exc_val, exc_tb)
+ assert TopRAIIScope.active is self
+ TopRAIIScope.active = self.prev_active
+
+ @property
+ def scope_stack(self):
+ raise ICE(None,
+ f'{type(self).__name__}.scope_stack not supported')
+
+ def clear_subscope(self, site, scope):
+ raise ICE(site,
+ f'{type(self).__name__}.clear_subscope() not supported')
+
+ def reserve_bind_lval(self, site, lval, scope):
+ raise ICE(site,
+ f'{type(self).__name__}.reserve_bind_lval() not supported')
+
+ def reserve_bind_lvals(self, site, exprs, scope):
+ raise ICE(site,
+ f'{type(self).__name__}.reserve_bind_lvals() not supported')
+
+ def reserve_orphan_adoption(self, site, expr, scope):
+ raise ICE(site,
+ (f'{type(self).__name__}.reserve_orphan_adoption() '
+ + 'not supported'))
+
+ def push_scope(self, scope):
+ if scope is not self:
+ raise ICE(None, f'{type(self).__name__}: subscopes not supported')
+ self.scope_stack_ix = 0
+
+ def pop_scope(self):
+ assert self.scope_stack_ix == 0
+ self.scope_stack_ix = None
+ return self
+
+
+class MethodRAIIScope(TopRAIIScope):
+ slots = ('scopes', 'scope_stack', 'used_scopes')
+
+ @auto_init
+ def __init__(self):
+ self.scopes = Set()
+ self.scope_stack = []
+ self.used_scopes = None
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ TopRAIIScope.__exit__(self, exc_type, exc_val, exc_tb)
+ self.used_scopes = {scope: i
+ for (i, scope) in enumerate(filter(
+ lambda x: x.used, self.scopes))}
+
+ def reserve_bind_lvals(self, site, exprs, subscope):
+ assert self.used_scopes is None and subscope in self.scopes
+ from .codegen import get_raii_type_info
+ binds = [(expr, get_raii_type_info(expr.ctype()).cident_destructor)
+ for expr in exprs]
+ subscope.allocs += len(binds)
+ def toc_stmt():
+ assert self.used_scopes is not None
+ for (expr, destructor) in binds:
+ site_linemark(site)
+ out(f'DML_RAII_SCOPE_LVAL({self.used_scopes[subscope]}, '
+ + f'{destructor}, {expr.read()});\n')
+ return toc_stmt
+
+ def clear_subscope(self, site, scope):
+ assert self.used_scopes is None or scope in self.used_scopes
+ site_linemark(site)
+ out(f'DML_RAII_SCOPE_CLEANUP({self.used_scopes[scope]});\n')
+
+ def reserve_orphan_adoption(self, site, expr, subscope):
+ assert self.used_scopes is None and subscope in self.scopes
+ subscope.allocs += 1
+ typ = safe_realtype_shallow(expr.ctype())
+ if typ.is_raii:
+ from .codegen import get_raii_type_info
+ destructor = get_raii_type_info(typ).cident_destructor
+ else:
+ assert isinstance(typ, TArray)
+ destructor = 'NULL'
+
+ array_prefix = 'ARRAY_'*(isinstance(typ, TArray))
+
+ def read():
+ assert self.used_scopes is not None
+ return (f'DML_RAII_SCOPE_{array_prefix}ORPHAN('
+ + f'{self.used_scopes[subscope]}, {destructor}, '
+ + f'{expr.read()})')
+ return read
+
+ def push_scope(self, scope):
+ assert self.used_scopes is None
+ assert scope.scope_stack_ix is None
+ scope.scope_stack_ix = len(self.scope_stack)
+ self.scope_stack.append(scope)
+ self.scopes.add(scope)
+
+ def pop_scope(self):
+ assert self.used_scopes is None
+ scope = self.scope_stack.pop()
+ scope.scope_stack_ix = None
+ return scope
+
+class UnusedMethodRAIIScope(MethodRAIIScope):
+ def reserve_orphan_adoption(self, site, expr, subscope):
+ raise ICE(site, 'orphan adopted in UnusedMethodRAIIScope')
+
+ def reserve_bind_lvals(self, site, exprs, subscope):
+ raise ICE(site, 'lval bound in UnusedMethodRAIIScope')
+
+class StaticRAIIScope(TopRAIIScope):
+ def reserve_orphan_adoption(self, site, expr, subscope):
+ assert subscope is self
+ self.allocs += 1
+ typ = safe_realtype_shallow(expr.ctype())
+ assert typ.is_raii or isinstance(typ, TArray)
+ def read():
+ if isinstance(typ, TArray):
+ unconst_typ = safe_realtype_unconst(typ)
+ if deep_const(unconst_typ):
+ return f'DML_STATIC_ARRAY_CONSTSAFE({expr.read()})'
+ else:
+ return (f'DML_STATIC_ARRAY({unconst_typ.declaration("")}, '
+ + f'{expr.read()})')
+
+ return expr.read()
+ return read
+
+
+ @property
+ def scope_stack(self):
+ return [self]
+
+# TODO(RAII): Very niche, and currently unleveraged! This is for orphans
+# adopted in the constant initializers of sessions/saveds. For example...
+#
+# session int *p = cast({0, 1, 2, 3}, int[4]);
+#
+# But no expression using RAIIScope.reserve_orphan_adoption is currently
+# considered constant.
+class SessionRAIIScope(TopRAIIScope):
+ def reserve_orphan_adoption(self, site, expr, subscope):
+ assert subscope is self
+ ix = self.allocs
+ self.allocs += 1
+ typ = safe_realtype_shallow(expr.ctype())
+ if typ.is_raii:
+ from .codegen import get_raii_type_info
+ destructor = get_raii_type_info(typ).cident_destructor
+ else:
+ assert isinstance(typ, TArray)
+ destructor = 'NULL'
+ array_prefix = 'ARRAY_'*isinstance(typ, TArray)
+ def read():
+ return (f'DML_RAII_SESSION_{array_prefix}ORPHAN('
+ + f'_dev->_orphan_allocs[{ix}], {destructor}, '
+ + f'{expr.read()})')
+
+ return read
+
+ @property
+ def scope_stack(self):
+ return [self]
+
+class RAIIScopeDeclarations(Statement):
+ @auto_init
+ def __init__(self, site, methodscope):
+ assert isinstance(methodscope, MethodRAIIScope)
+
+ @property
+ def is_declaration(self):
+ return bool(self.methodscope.used_scopes is None
+ or self.methodscope.used_scopes)
+
+ @property
+ def is_empty(self):
+ return bool(self.methodscope.used_scopes is not None
+ and not self.methodscope.used_scopes)
+
+ def toc_stmt(self):
+ raise ICE(self.site, 'RAIIScopeDeclarations.toc_stmt: nonsensical')
+
+ def toc(self):
+ for (scope, c_uniq) in self.methodscope.used_scopes.items():
+ self.linemark()
+ out(f'_scope_allocation_t _scope_{c_uniq}_allocs'
+ + f'[{scope.allocs}] UNUSED;\n')
+ if self.methodscope.used_scopes:
+ self.linemark()
+ out('uint16 _scope_allocs_lens'
+ + f'[{len(self.methodscope.used_scopes)}] UNUSED = {{0}};\n')
+
+mkRAIIScopeDeclarations = RAIIScopeDeclarations
+
+class RAIIScopeBindLVals(Statement):
+ slots = ('toc_bind_lvals',)
+ @auto_init
+ def __init__(self, site, scope, lvals):
+ self.toc_bind_lvals = TopRAIIScope.active.reserve_bind_lvals(
+ site, lvals, scope)
+
+ def toc_stmt(self):
+ out('{\n', postindent = 1)
+ self.toc_inline()
+ out('}\n', preindent = -1)
+
+ def toc(self):
+ self.toc_bind_lvals()
+
+ @property
+ def is_empty(self):
+ return not self.lvals
+
+mkRAIIScopeBindLVals = RAIIScopeBindLVals
+
+class RAIIScopeClears(Statement):
+ @auto_init
+ def __init__(self, site, scopes): pass
+
+ def toc_stmt(self):
+ out('{\n', postindent = 1)
+ self.toc_inline()
+ out('}\n', preindent = -1)
+
+ def toc(self):
+ for scope in self.scopes:
+ if scope.used:
+ scope.clear_scope(self.site)
+
+ @property
+ def is_empty(self):
+ return all(scope.completed and not scope.used
+ for scope in self.scopes)
+
+def mkRAIIScopeClears(site, scopes):
+ return RAIIScopeClears(site, [scope for scope in scopes
+ if scope.used or not scope.completed])
+
+class StringAppend(Statement):
+ @auto_init
+ def __init__(self, site, tgt, src):
+ assert not src.orphan
+ assert tgt.c_lval
+
+ def toc_stmt(self):
+ self.linemark()
+ out(f'_dml_string_cat(&({self.tgt.read()}), {self.src.read()});\n')
+
+class StringCAppend(Statement):
+ @auto_init
+ def __init__(self, site, tgt, src):
+ assert tgt.c_lval
+
+ def toc_stmt(self):
+ self.linemark()
+ out(f'_dml_string_addstr(&({self.tgt.read()}), {self.src.read()});\n')
+
+def mkStringAppend(site, tgt, src):
+ if not tgt.writable:
+ raise ERVAL(tgt, '+=')
+ elif deep_const(tgt.ctype()):
+ raise ENCONST(site)
+ if isinstance(src, StringConstant):
+ return StringCAppend(site, tgt, src)
+ return StringAppend(site, tgt, mkAdoptedOrphan(site, src))
+
+class VectorAppend(Statement):
+ slots = ('basetyp_info',)
+ @auto_init
+ def __init__(self, site, basetyp, tgt, src):
+ assert not src.orphan
+ assert tgt.c_lval
+ from .codegen import get_raii_type_info
+ self.basetyp_info = (get_raii_type_info(basetyp)
+ if basetyp.is_raii else None)
+
+ def toc_stmt(self):
+ self.linemark()
+ if self.basetyp_info:
+ out('_dml_vect_append_raii(sizeof(%s), %s, &(%s), %s)'
+ % (self.basetyp.declaration(''),
+ self.basetyp_info.cident_copier, self.tgt.read(),
+ self.src.read()))
+ else:
+ out('_dml_vect_append(sizeof(%s), &(%s), %s);\n'
+ % (self.basetyp.declaration(''), self.tgt.read(),
+ self.src.read()))
+
+class VectorCompoundInitAppend(Statement):
+ @auto_init
+ def __init__(self, site, basetype, tgt, src, length):
+ assert tgt.c_lval
+
+ def toc_stmt(self):
+ self.linemark()
+ # Limit the lifetime of the compound literal
+ out('{ _dml_vect_append_array(sizeof(%s), &(%s), %s, %dULL); }\n'
+ % (self.basetype.declaration(''), self.tgt.read(), self.src.read(),
+ self.length))
+
+def mkVectorAppend(site, tgt, src):
+ basetype = safe_realtype_shallow(tgt.ctype()).base
+ if not tgt.writable:
+ raise ERVAL(tgt, '+=')
+ elif deep_const(tgt.ctype()):
+ raise ENCONST(site)
+
+ if isinstance(src, VectorCompoundLiteral):
+ if len(src.inits) == 0:
+ return mkNull(site)
+ elif len(src.inits) == 1:
+ return VectorPushApply(site, 'back', tgt,
+ src.inits[0].as_expr(basetype))
+ else:
+ return VectorCompoundInitAppend(site, basetype, tgt,
+ src.as_array_literal(),
+ len(src.inits))
+
+ return VectorAppend(site, basetype, tgt, mkAdoptedOrphan(site, src))
+
+def str_expr_pseudomethod(expr, sub):
+ expr = (f'({expr})'
+ if expr.priority < StructMember.priority
+ else str(expr))
+ return f'{expr}.{sub}'
+
+class VectorCBufRef(PseudoMethodRef):
+ @auto_init
+ def __init__(self, site, expr):
+ pass
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, 'c_buf')
+ def apply(self, inits, location, scope):
+ expr = self.expr
+ if inits:
+ raise EARG(self.site, '.c_buf')
+ # No deep_const is intentional, as it's only shallow const that results
+ # in invalid generated C.
+ # TODO(RAII): Though perhaps deep_const should be used anyway...
+ if safe_realtype_shallow(expr.ctype()).const:
+ raise ECONST(self.site)
+ if not expr.writable:
+ raise ERVAL(self.expr.site, '.c_buf()')
+
+ base_const = deep_const(expr.ctype())
+
+ return VectorCBufApply(self.site, expr, base_const)
+
+class VectorCBufApply(Expression):
+ priority = dml.expr.Apply.priority
+
+ slots = ('basetype',)
+
+ @auto_init
+ def __init__(self, site, expr, base_const):
+ typ = safe_realtype_shallow(expr.ctype())
+ assert not expr.orphan and expr.c_lval and not typ.const
+ self.basetype = conv_const(base_const, typ.base)
+
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, 'c_buf()')
+
+ def read(self):
+ return (f'DML_VECT_ELEMENTS({self.basetype.declaration("")}, '
+ + f'{self.expr.read()})')
+
+ def ctype(self):
+ return TPtr(self.basetype)
+
+ @property
+ def is_pointer_to_stack_allocation(self):
+ return self.expr.is_stack_allocated
+
+
+mkVectorCBuf = VectorCBufRef
+
+class VectorPopRef(PseudoMethodRef):
+ @auto_init
+ def __init__(self, site, direction, expr):
+ pass
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, f'pop_{self.direction}')
+ def apply(self, inits, location, scope):
+ if inits:
+ raise EARG(self.site, f'.pop_{self.direction}')
+ if not self.expr.writable:
+ raise ERVAL(self.site, f'.pop_{self.direction}()')
+ if deep_const(self.expr.ctype()):
+ raise ECONST(self.site)
+ # Writable orphans are not impossible in principle
+ expr = mkAdoptedOrphan(self.site, self.expr)
+ return VectorPopApply(self.site, self.direction, expr)
+
+class VectorPopApply(Orphan):
+ priority = StructMember.priority
+
+ slots = ('basetype',)
+
+ @auto_init
+ def __init__(self, site, direction, expr):
+ assert not expr.orphan
+ typ = safe_realtype_shallow(expr.ctype())
+ self.basetype = typ.base
+
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, f'pop_{self.direction}()')
+
+ def read(self):
+ t = self.basetype.declaration('')
+ return (f'DML_VECT_POP_{self.direction.upper()}({t}, '
+ + f'{self.expr.read()})')
+
+ def ctype(self):
+ return self.basetype
+
+def mkVectorPopBack(site, expr):
+ return VectorPopRef(site, 'back', expr)
+
+def mkVectorPopFront(site, expr):
+ return VectorPopRef(site, 'front', expr)
+
+class VectorPushRef(PseudoMethodRef):
+ @auto_init
+ def __init__(self, site, direction, expr):
+ pass
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, f'push_{self.direction}')
+ def apply(self, inits, location, scope):
+ if not self.expr.writable:
+ raise ERVAL(self.site, f'.push_{self.direction}(...)')
+ if deep_const(self.expr.ctype()):
+ raise ECONST(self.site)
+ basetype = safe_realtype_shallow(self.expr.ctype()).base
+ [item] = typecheck_inarg_inits(
+ self.site, inits, [('item', basetype)], location, scope,
+ f'push_{self.direction}')
+ # Writable orphans are not impossible in principle
+ expr = mkAdoptedOrphan(self.site, self.expr)
+ return VectorPushApply(self.site, self.direction, expr, item)
+
+class VectorPushApply(Expression):
+ priority = AssignOp.priority
+
+ type = void
+
+ slots = ('basetype',)
+
+ @auto_init
+ def __init__(self, site, direction, expr, item):
+ assert not expr.orphan
+ typ = safe_realtype_shallow(expr.ctype())
+ self.basetype = typ.base
+
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr,
+ f'push_{self.direction}({self.item})')
+
+ def read(self):
+ t = self.basetype.declaration('')
+ return ('DML_SAFE_ASSIGN('
+ + f'DML_VECT_NEW_ELEM_AT_{self.direction.upper()}({t}, '
+ + f'{self.expr.read()}), {self.item.read()})')
+
+def mkVectorPushBack(site, expr):
+ return VectorPushRef(site, 'back', expr)
+
+def mkVectorPushFront(site, expr):
+ return VectorPushRef(site, 'front', expr)
+
+class VectorInsertRef(PseudoMethodRef):
+ @auto_init
+ def __init__(self, site, expr):
+ pass
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, 'insert')
+ def apply(self, inits, location, scope):
+ if not self.expr.writable:
+ raise ERVAL(site, '.insert(...)')
+ if deep_const(self.expr.ctype()):
+ raise ECONST(self.site)
+ basetype = safe_realtype_shallow(self.expr.ctype()).base
+ [index, item] = typecheck_inarg_inits(
+ self.site, inits, [('index', TInt(32, False)),
+ ('item', basetype)],
+ location, scope, 'insert')
+ if index.constant and index.value < 0:
+ raise EOOB(index)
+ # Writable orphans are not impossible in principle
+ expr = mkAdoptedOrphan(self.expr.site, self.expr)
+ return VectorInsertApply(self.site, basetype, expr, index, item)
+
+mkVectorInsert = VectorInsertRef
+
+class VectorInsertApply(Expression):
+ priority = AssignOp.priority
+
+ type = void
+
+ @auto_init
+ def __init__(self, site, basetype, expr, index, item):
+ assert not expr.orphan
+
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr,
+ f'insert({self.index}, {self.item})')
+ def read(self):
+ t = self.basetype.declaration('')
+ return (f'DML_SAFE_ASSIGN(DML_VECT_NEW_ELEM_AT({t}, '
+ + f'{self.expr.read()}, {self.index.read()}), '
+ + f'{self.item.read()})')
+
+class VectorRemoveRef(PseudoMethodRef):
+ @auto_init
+ def __init__(self, site, expr):
+ pass
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr, 'remove')
+ def apply(self, inits, location, scope):
+ if not self.expr.writable:
+ raise ERVAL(self.site, '.remove(...)')
+ if deep_const(self.expr.ctype()):
+ raise ECONST(self.site)
+ basetype = safe_realtype_shallow(self.expr.ctype()).base
+ [index] = typecheck_inarg_inits(
+ self.site, inits, [('index', TInt(32, False))],
+ location, scope, 'remove')
+ if index.constant and index.value < 0:
+ raise EOOB(index)
+ # Writable orphans are not impossible in principle
+ expr = mkAdoptedOrphan(self.expr.site, self.expr)
+ return VectorRemoveApply(self.site, basetype, expr, index)
+
+mkVectorRemove = VectorRemoveRef
+
+class VectorRemoveApply(Orphan):
+ priority = dml.expr.Apply.priority
+
+ @auto_init
+ def __init__(self, site, basetype, expr, index):
+ assert not expr.orphan
+
+ def __str__(self):
+ return str_expr_pseudomethod(self.expr,
+ f'remove({self.index})')
+ def read(self):
+ t = self.basetype.declaration('')
+ return (f'DML_VECT_REMOVE({t}, {self.vect.read()}, '
+ + f'{self.index.read()})')
+
+ def ctype(self):
+ return self.basetype
+
+class VectorAdd(Orphan):
+ priority = dml.expr.Apply.priority
+
+ slots = ('base_type', 'info')
+
+ # Implemented by INHERITING ownership of first argument, but only BORROWING
+ # second argument. Meaning the second argument is never duped, and, if
+ # orphan, must be adopted.
+ def __init__(self, site, lh, rh):
+ self.site = site
+ self.lh = lh
+ self.rh = mkAdoptedOrphan(rh.site, rh)
+ # TODO(RAII) imports...
+ from .codegen import get_raii_type_info, VectorRAIITypeInfo
+ self.base_type = safe_realtype_unconst(lh.ctype()).base
+ self.info = get_raii_type_info(self.ctype())
+ assert isinstance(self.info, VectorRAIITypeInfo)
+
+ def __str__(self):
+ lh = str(self.lh)
+ rh = str(self.rh)
+ if self.lh.priority <= Add.priority:
+ lh = '('+lh+')'
+ if self.rh.priority <= Add.priority:
+ rh = '('+rh+')'
+ return lh + ' + ' + rh
+
+ def ctype(self):
+ return TVector(self.base_type)
+
+ def read(self):
+ lh = self.lh.read()
+ if not self.lh.orphan:
+ lh = self.info.read_dupe(lh)
+ sizeof = f'sizeof({self.base_type.declaration("")})'
+ if self.info.base_info is None:
+ return f'_dml_vect_add({sizeof}, {lh}, {self.rh.read()})'
+ else:
+ copier = self.info.base_info.cident_copier
+ return (f'_dml_vect_add_raii({sizeof}, {copier}, {lh}, '
+ + f'{self.rh.read()})')
+
def possible_side_effect(init):
"""Return True if this expression might have some side effect
This means that the expression has to be evaluated exactly once."""
@@ -4967,7 +6500,7 @@ def possible_side_effect(init):
return False
return True
-def sym_declaration(sym, unused=False):
+def sym_declaration(sym, unused=False, unscoped_raii=False):
assert not isinstance(sym, symtab.StaticSymbol)
refcount = sym.refcount()
if not sym.stmt and refcount == 0 and not possible_side_effect(sym.init):
@@ -4980,7 +6513,7 @@ def sym_declaration(sym, unused=False):
unused = unused or (refcount == 0) or sym.value.startswith("__")
return mkDeclaration(sym.site, sym.value, sym.type,
- sym.init, unused)
+ sym.init, unused, unscoped_raii)
###
@@ -5049,5 +6582,6 @@ def log_statement(site, node, indices, logtype, level, groups, fmt, *args):
groups,
mkStringConstant(site, fmt) ] +
list(args),
- fun.ctype())
+ fun.ctype(),
+ True)
return mkExpressionStatement(site, x)
diff --git a/py/dml/ctree_test.py b/py/dml/ctree_test.py
index f5200a979..39d09b4ed 100644
--- a/py/dml/ctree_test.py
+++ b/py/dml/ctree_test.py
@@ -40,13 +40,13 @@ def test(self):
class DummyStatement(ctree.Statement):
def __repr__(self):
return 'D'
- def toc(self): pass
+ def toc_stmt(self): pass
class DummyDecl(ctree.Statement):
is_declaration = 1
def __repr__(self):
return 'decl'
- def toc(self): pass
+ def toc_stmt(self): pass
class Test_mkcompound(unittest.TestCase):
def test(self):
diff --git a/py/dml/dmlparse.py b/py/dml/dmlparse.py
index 8ce2a9643..005bea610 100644
--- a/py/dml/dmlparse.py
+++ b/py/dml/dmlparse.py
@@ -826,12 +826,23 @@ def constant(t):
@prod_dml12
def extern(t):
'toplevel : EXTERN cdecl_or_ident SEMI'
- t[0] = ast.extern(site(t), t[2])
+ t[0] = ast.extern(site(t), t[2], None)
@prod_dml14
def extern(t):
- 'toplevel : EXTERN cdecl SEMI'
- t[0] = ast.extern(site(t), t[2])
+ 'toplevel : EXTERN cdecl maybe_extern_as SEMI'
+ t[0] = ast.extern(site(t), t[2], t[3])
+
+
+@prod_dml14
+def maybe_extern_as_no(t):
+ 'maybe_extern_as : '
+ t[0] = None
+
+@prod_dml14
+def maybe_extern_as_yes(t):
+ 'maybe_extern_as : AS ident'
+ t[0] = t[2]
@prod
def typedef(t):
@@ -1269,6 +1280,14 @@ def basetype_hook(t):
cdecl_list_enforce_unnamed(t[3])
t[0] = ('hook', t[3])
+@prod_dml14
+def basetype_vect(t):
+ '''basetype : VECT LPAREN cdecl RPAREN'''
+ (_, site, name, typ) = t[3]
+ if name:
+ report(ESYNTAX(site, name, None))
+ t[0] = ('vect', (site, typ))
+
@prod
def cdecl2(t):
'cdecl2 : cdecl3'
@@ -1285,12 +1304,13 @@ def cdecl2_ptr(t):
t[0] = ['pointer'] + t[2]
@prod_dml14
-def cdecl2_vect(t):
+def cdecl2_legacy_vect(t):
'cdecl2 : VECT cdecl2'
# vect is actually experimental in 1.2 as well, but we will probably
# defensively keep it the way it is, because it's used a lot.
# TODO: improve how vect works in 1.4, and make it public (US325)
- report(WEXPERIMENTAL(site(t), 'vect types'))
+ report(WDEPRECATED(site(t),
+ "vect.h vector type. Use 'vect(BASETYPE)' instead."))
t[0] = ['vect'] + t[2]
@prod_dml12
@@ -1586,25 +1606,34 @@ def expression_assign(t):
'expression : expression EQUALS expression'
t[0] = ast.set(site(t, 2), t[1], t[3])
-@prod
-def assignop(t):
- '''assignop : expression PLUSEQUAL expression
- | expression MINUSEQUAL expression
- | expression TIMESEQUAL expression
- | expression DIVEQUAL expression
- | expression MODEQUAL expression
- | expression BOREQUAL expression
- | expression BANDEQUAL expression
- | expression BXOREQUAL expression
- | expression LSHIFTEQUAL expression
- | expression RSHIFTEQUAL expression'''
- t[0] = ast.assignop(site(t, 2), t[1], t[2], t[3])
-
@prod_dml12
def expression_assignop(t):
- '''expression : assignop'''
- (tgt, op, src) = t[1].args
- t[0] = ast.set(t[1].site, tgt, ast.binop(t[1].site, tgt, op[:-1], src))
+ '''expression : expression PLUSEQUAL expression
+ | expression MINUSEQUAL expression
+ | expression TIMESEQUAL expression
+ | expression DIVEQUAL expression
+ | expression MODEQUAL expression
+ | expression BOREQUAL expression
+ | expression BANDEQUAL expression
+ | expression BXOREQUAL expression
+ | expression LSHIFTEQUAL expression
+ | expression RSHIFTEQUAL expression'''
+ t[0] = ast.set(site(t, 2), t[1],
+ ast.binop(t[1].site, t[1], t[2][:-1], t[3]))
+
+@prod_dml14
+def assignop(t):
+ '''assignop : expression PLUSEQUAL single_initializer
+ | expression MINUSEQUAL single_initializer
+ | expression TIMESEQUAL single_initializer
+ | expression DIVEQUAL single_initializer
+ | expression MODEQUAL single_initializer
+ | expression BOREQUAL single_initializer
+ | expression BANDEQUAL single_initializer
+ | expression BXOREQUAL single_initializer
+ | expression LSHIFTEQUAL single_initializer
+ | expression RSHIFTEQUAL single_initializer'''
+ t[0] = ast.assignop(site(t, 2), t[1], t[2], t[3])
@prod
def expression_conditional(t):
@@ -1640,10 +1669,21 @@ def expression_binary_operator(t):
# TODO: proper C-cast
-@prod
+@prod_dml12
def expression_cast(t):
- 'expression : CAST LPAREN expression COMMA ctypedecl RPAREN'
- t[0] = ast.cast(site(t), t[3], t[5])
+ 'expression : CAST LPAREN expression COMMA cdecl RPAREN'
+ (_, psite, name, typ) = t[5]
+ if name:
+ report(ESYNTAX(psite, name, ''))
+ t[0] = ast.cast(site(t), ast.initializer_scalar(site(t, 3), t[3]), typ)
+
+@prod_dml14
+def expression_cast(t):
+ 'expression : CAST LPAREN single_initializer COMMA cdecl RPAREN'
+ (_, psite, name, typ) = t[5]
+ if name:
+ report(ESYNTAX(psite, name, ''))
+ t[0] = ast.cast(site(t), t[3], typ)
# TODO: proper C-sizeof
@@ -1784,15 +1824,33 @@ def typeop_arg_par(t):
'''typeoparg : LPAREN ctypedecl RPAREN'''
t[0] = t[2]
+@prod_dml14
+def maybe_newdelete_spec_yes(t):
+ '''maybe_newdelete_spec : LT ID GT
+ | LT EXTERN GT'''
+ supported_specs = ('extern', 'enriched')
+ spec = t[2]
+ if spec not in supported_specs:
+ suggestions = ' or '.join(f"'{spec}'" for spec in supported_specs)
+ report(ESYNTAX(site(t, 1), spec,
+ f"expected new/delete specification ({suggestions})"))
+ spec = 'enriched'
+ t[0] = spec
+
+@prod
+def maybe_newdelete_spec_no(t):
+ '''maybe_newdelete_spec : '''
+ t[0] = None
+
@prod
def expression_new(t):
- '''expression : NEW ctypedecl'''
- t[0] = ast.new(site(t), t[2], None)
+ '''expression : NEW maybe_newdelete_spec ctypedecl'''
+ t[0] = ast.new(site(t), t[2], t[3], None)
@prod
def expression_new_array(t):
- '''expression : NEW ctypedecl LBRACKET expression RBRACKET'''
- t[0] = ast.new(site(t), t[2], t[4])
+ '''expression : NEW maybe_newdelete_spec ctypedecl LBRACKET expression RBRACKET'''
+ t[0] = ast.new(site(t), t[2], t[3], t[5])
@prod
def expression_paren(t):
@@ -2130,43 +2188,38 @@ def statement_switch(t):
@prod_dml14
def statement_switch(t):
- 'statement_except_hashif : SWITCH LPAREN expression RPAREN LBRACE stmt_or_case_list RBRACE'
+ 'statement_except_hashif : SWITCH LPAREN expression RPAREN LBRACE case_blocks_list RBRACE'
stmts = t[6]
t[0] = ast.switch(site(t), t[3], ast.compound(site(t, 5), stmts))
@prod_dml14
-def stmt_or_case(t):
- '''stmt_or_case : statement_except_hashif
- | cond_case_statement
- | case_statement'''
- t[0] = t[1]
-
-@prod_dml14
-def switch_hashif(t):
- 'cond_case_statement : HASHIF LPAREN expression RPAREN LBRACE stmt_or_case_list RBRACE %prec LOWEST_PREC'
- t[0] = ast.hashif(site(t), t[3], ast.compound(site(t, 5), t[6]), None)
+def case_blocks_list_empty(t):
+ 'case_blocks_list : '
+ t[0] = []
@prod_dml14
-def switch_hashifelse(t):
- 'cond_case_statement : HASHIF LPAREN expression RPAREN LBRACE stmt_or_case_list RBRACE HASHELSE LBRACE stmt_or_case_list RBRACE'
- t[0] = ast.hashif(site(t), t[3], ast.compound(site(t, 5), t[6]),
- ast.compound(site(t, 9), t[10]))
+def case_blocks_list_case(t):
+ 'case_blocks_list : case_statement statement_except_hashif_list case_blocks_list'
+ t[0] = ([t[1]] + ([ast.compound(site(t, 2), t[2])] if t[2] else [])) + t[3]
@prod_dml14
-def stmt_or_case_list_empty(t):
- 'stmt_or_case_list : '
- t[0] = []
+def case_blocks_list_hashif(t):
+ 'case_blocks_list : HASHIF LPAREN expression RPAREN LBRACE case_blocks_list RBRACE case_blocks_list %prec LOWEST_PREC'
+ stmt = ast.hashif(site(t), t[3], ast.compound(site(t, 5), t[6]), None)
+ t[0] = [stmt] + t[8]
@prod_dml14
-def stmt_or_case_list_stmt(t):
- 'stmt_or_case_list : stmt_or_case_list stmt_or_case'
- t[0] = t[1] + [t[2]]
+def case_blocks_list_hashifelse(t):
+ 'case_blocks_list : HASHIF LPAREN expression RPAREN LBRACE case_blocks_list RBRACE HASHELSE LBRACE case_blocks_list RBRACE case_blocks_list'
+ stmt = ast.hashif(site(t), t[3], ast.compound(site(t, 5), t[6]),
+ ast.compound(site(t, 9), t[10]))
+ t[0] = [stmt] + t[12]
# Delete is an expression in C++, not a statement, but we don't care.
@prod
def statement_delete(t):
- 'statement_except_hashif : DELETE expression SEMI'
- t[0] = ast.delete(site(t), t[2])
+ 'statement_except_hashif : DELETE maybe_newdelete_spec expression SEMI'
+ t[0] = ast.delete(site(t), t[2], t[3])
@prod
def statent_try(t):
@@ -2482,6 +2535,16 @@ def statement_list_2(t):
'statement_list : statement_list statement'
t[0] = t[1] + [t[2]]
+@prod_dml14
+def statement_except_hashif_list_1(t):
+ 'statement_except_hashif_list : '
+ t[0] = []
+
+@prod_dml14
+def statement_except_hashif_list_2(t):
+ 'statement_except_hashif_list : statement_except_hashif_list statement_except_hashif'
+ t[0] = t[1] + [t[2]]
+
# local
@prod_dml12
diff --git a/py/dml/expr.py b/py/dml/expr.py
index 4f0ed704a..2bc3145d1 100644
--- a/py/dml/expr.py
+++ b/py/dml/expr.py
@@ -18,6 +18,7 @@
'NonValueArrayRef',
'mkLit', 'Lit',
'mkApply', 'mkApplyInits', 'Apply',
+ 'Orphan', 'OrphanWrap',
'mkNullConstant', 'NullConstant',
'StaticIndex',
'typecheck_inargs',
@@ -173,11 +174,29 @@ class Expression(Code):
# bitslicing.
explicit_type = False
- # Can the expression be assigned to?
+ # An expression is considered orphan if it evaluates to a value of an
+ # object that is never accessed again past the particular evaluation of the
+ # expression. This is typically known by virtue of the C representation of
+ # the expression not being an lvalue; for example, the return value of a
+ # function call can never be accessed save by the function call itself.
+ # Orphanhood is only important for the RAII architecture. This means that
+ # there are some expressions that *could* be considered orphans, and yet
+ # are not, as they cannot be of RAII type and so orphanhood status is
+ # irrelevant. Integer literals are an example of this.
+ orphan = False
+
+ # Can the expression be assigned to in DML?
# If writable is True, there is a method write() which returns a C
# expression to make the assignment.
writable = False
+ # Can the address of the expression be taken safely in DML?
+ # This implies c_lval, and typically implies writable.
+ addressable = False
+
+ # Is the C representation of the expression an lvalue?
+ c_lval = False
+
def __init__(self, site):
assert not site or isinstance(site, Site)
self.site = site
@@ -193,7 +212,14 @@ def read(self):
# Produce a C expression but don't worry about the value.
def discard(self):
- return self.read()
+ if self.orphan and self.ctype().is_raii:
+ from .codegen import get_raii_type_info
+ # TODO(RAII) oh i dislike this. I'd rather discard() produced a statement
+ return get_raii_type_info(self.ctype()).read_destroy(self.read())
+ elif self.constant:
+ return '(void)0'
+ else:
+ return self.read()
def ctype(self):
'''The corresponding DML type of this expression'''
@@ -203,10 +229,16 @@ def apply(self, inits, location, scope):
'Apply this expression as a function'
return mkApplyInits(self.site, self, inits, location, scope)
+ @property
+ def is_stack_allocated(self):
+ '''Returns true only if it's known that the storage for the value that
+ this expression evaluates to is temporary to a method scope'''
+ return self.orphan
+
@property
def is_pointer_to_stack_allocation(self):
'''Returns True only if it's known that the expression is a pointer
- to stack-allocated data'''
+ to storage that is temporary to a method scope'''
return False
def incref(self):
@@ -220,6 +252,11 @@ def copy(self, site):
return type(self)(
site, *(getattr(self, name) for name in self.init_args[2:]))
+ def write(self, source):
+ assert self.c_lval
+ return source.assign_to(self.read(), self.ctype())
+
+
class NonValue(Expression):
'''An expression that is not really a value, but which may validly
appear as a subexpression of certain expressions.
@@ -266,11 +303,14 @@ def __str__(self):
return self.str or self.cexpr
def read(self):
return self.cexpr
- def write(self, source):
- assert self.writable
- return "%s = %s" % (self.cexpr, source.read())
@property
def writable(self):
+ return self.c_lval
+ @property
+ def addressable(self):
+ return self.c_lval
+ @property
+ def c_lval(self):
return self.type is not None
mkLit = Lit
@@ -292,6 +332,32 @@ def copy(self, site):
mkNullConstant = NullConstant
+class Orphan(Expression):
+ """Expressions that evaluate to a value that is allocated on the stack, but
+ not belonging to any local variable. Archetypical example are function
+ applications."""
+ orphan = True
+
+class OrphanWrap(Orphan):
+ @auto_init
+ def __init__(self, site, expr): pass
+
+ def ctype(self):
+ return self.expr.ctype()
+
+ @property
+ def c_lval(self):
+ return self.expr.c_lval
+
+ def __str__(self):
+ return str(self.expr)
+
+ def read(self):
+ return self.expr.read()
+
+ def discard(self):
+ return self.expr.discard()
+
def typecheck_inargs(site, args, inp, kind="function", known_arglen=None):
arglen = len(args) if known_arglen is None else known_arglen
if arglen != len(inp):
@@ -399,16 +465,22 @@ def typecheck_inarg_inits(site, inits, inp, location, scope,
if init.kind != 'initializer_scalar':
raise ESYNTAX(init.site, '{',
'variadic arguments must be simple expressions')
- args.append(coerce_if_eint(codegen_expression(init.args[0],
- location, scope)))
+ arg = codegen_expression(init.args[0], location, scope)
+ if arg.ctype().is_raii:
+ is_string = isinstance(safe_realtype_shallow(arg.ctype()),
+ TString)
+ raise ERAIIVARARG(arg.site, is_string)
+ args.append(coerce_if_eint(arg))
return args
class Apply(Expression):
+ # An Apply expression is always orphan except for the application of
+ # memoized methods.
priority = 160
explicit_type = True
@auto_init
- def __init__(self, site, fun, args, funtype):
+ def __init__(self, site, fun, args, funtype, orphan):
pass
def ctype(self):
return self.funtype.output_type
@@ -419,7 +491,7 @@ def read(self):
return (self.fun.read() +
'(' + ", ".join(e.read() for e in self.args) + ')')
-def mkApplyInits(site, fun, inits, location, scope):
+def mkApplyInits(site, fun, inits, location, scope, orphan=True):
'''Apply a C function to initializers'''
funtype = fun.ctype()
@@ -442,9 +514,9 @@ def mkApplyInits(site, fun, inits, location, scope):
[(str(i + 1), t) for (i, t) in enumerate(funtype.input_types)],
location, scope, 'function', funtype.varargs)
- return Apply(site, fun, args, funtype)
+ return Apply(site, fun, args, funtype, orphan)
-def mkApply(site, fun, args):
+def mkApply(site, fun, args, orphan=True):
'''Apply a C function'''
funtype = fun.ctype()
@@ -485,7 +557,7 @@ def mkApply(site, fun, args):
else:
args = [coerce_if_eint(arg) for arg in args[:known_arglen]]
args.extend(coerced_varargs)
- return Apply(site, fun, args, funtype)
+ return Apply(site, fun, args, funtype, orphan)
class StaticIndex(NonValue):
"""A reference to the index variable of a containing object array,
diff --git a/py/dml/g_backend.py b/py/dml/g_backend.py
index e1ce7b783..fb6e3cd40 100644
--- a/py/dml/g_backend.py
+++ b/py/dml/g_backend.py
@@ -48,7 +48,7 @@ def enc(expr):
return (str(expr),)
try:
- with crep.DeviceInstanceContext():
+ with crep.DeviceInstanceContext(), ctree.StaticRAIIScope():
expr = node.get_expr(tuple(
mkLit(node.site, dollar(node.site) + idxvar, types.TInt(32, False))
for idxvar in node.parent.idxvars()))
diff --git a/py/dml/globals.py b/py/dml/globals.py
index 8fd5592c1..3711934c3 100644
--- a/py/dml/globals.py
+++ b/py/dml/globals.py
@@ -58,6 +58,9 @@
# types.TypeSequence -> codegen.TypeSequenceInfo
type_sequence_infos = {}
+# TypeKey -> GeneratedRAIITypeInfo
+generated_raii_types = {}
+
# 1.4 style integer operations in 1.2, --strict-dml12-int
strict_int_flag = None
def compat_dml12_int(site):
@@ -101,3 +104,5 @@ def compat_dml12_int(site):
build_confidentiality = 0
linemarks = False
+
+session_orphan_allocs = 0
diff --git a/py/dml/io_memory.py b/py/dml/io_memory.py
index 391c75c6e..81d682d91 100644
--- a/py/dml/io_memory.py
+++ b/py/dml/io_memory.py
@@ -221,7 +221,8 @@ def dim_sort_key(data):
regvar, size.read())])
lines.append(
' %s;' % (
- size2.write(mkLit(site, 'bytes', TInt(64, False)))))
+ size2.write(ExpressionInitializer(mkLit(site, 'bytes',
+ TInt(64, False))))))
if partial:
if bigendian:
lines.extend([
@@ -246,7 +247,8 @@ def dim_sort_key(data):
regvar, indices, memop.read(), bytepos_args),
' if (ret) return true;',
' %s;' % (
- value2.write(mkLit(site, 'val', TInt(64, False)))),
+ value2.write(ExpressionInitializer(
+ mkLit(site, 'val', TInt(64, False))))),
' return false;'])
else:
# Shifting/masking can normally be skipped in banks with
@@ -272,7 +274,8 @@ def dim_sort_key(data):
' if (offset >= %s[last].offset' % (regvar,)
+ ' && offset < %s[last].offset + %s[last].size) {'
% (regvar, regvar),
- ' %s;' % (size2.write(mkIntegerLiteral(site, 0)),),
+ ' %s;' % (size2.write(ExpressionInitializer(
+ mkIntegerLiteral(site, 0))),),
' return false;',
' }'])
lines.extend([
diff --git a/py/dml/messages.py b/py/dml/messages.py
index 22ad1c369..3a0f934fa 100644
--- a/py/dml/messages.py
+++ b/py/dml/messages.py
@@ -410,7 +410,8 @@ class EEXTERN(DMLError):
class EEXPORT(DMLError):
"""Can only export non-inline, non-shared, non-throwing methods declared
- outside object arrays."""
+ outside object arrays, that do not have any input parameter or return value
+ of resource-enriched (RAII) type."""
fmt = "cannot export this method"
version = "1.4"
@@ -1369,10 +1370,10 @@ class ECONSTP(DMLError):
class ECONST(DMLError):
"""
- The lvalue that is assigned to is declared as a `const` and
- thus can't be assigned to.
+ Attempted modification (e.g. assignment) of a lvalue that is declared
+ as a `const` and thus can't be modified.
"""
- fmt = "assignment to constant"
+ fmt = "modification of constant"
def __init__(self, site):
DMLError.__init__(self, site);
@@ -1585,6 +1586,52 @@ class EVLACONST(DMLError):
fmt = ("variable length array declared with (partially) const-qualified "
+ "type")
+class EVLARAII(DMLError):
+ """
+ Variable length arrays may not have a resource-enriched (RAII) type as
+ base type. Use `vect` instead.
+ """
+ fmt = ("variable length array declared with resource-enriched (RAII) type:"
+ + " %s. Workaround: use a vector ('vect') instead, and resize it "
+ + "to the desired length.")
+
+class EANONRAIISTRUCT(DMLError):
+ """
+ Anonymous structs declared within methods may not have any member of
+ resource-enriched (RAII) type.
+ """
+ fmt = ("method-local anonymous struct declared that has a member of "
+ + "resource-enriched (RAII) type")
+
+class ENEWRAII(DMLError):
+ """
+ A `new` expression not specified as `enriched` can't be used to allocate a
+ storage for values of resource-enriched (RAII) type.
+ To address this, use `new` instead of `new` or `new`.
+ """
+ fmt = ("'new' expression not specified as 'enriched' used to create "
+ + "pointer with resource-enriched (RAII) basetype '%s'. To address "
+ + "this, use 'new' instead of '%s', and ensure any "
+ + "pointer allocated this way is only deallocated using "
+ + "'delete'!")
+
+class EDELETERAII(DMLError):
+ """
+ A `delete` statement not specified as `enriched` was used on a pointer with
+ resource-enriched (RAII) basetype. Except for extremely niche cases, this
+ is incorrect: an allocated pointer of resource-enriched basetype can only
+ be validly created through a `new` expression specified as `enriched`.
+
+ To address this, use `delete` instead of `delete` or
+ `delete`.
+ """
+ version = "1.4"
+ fmt = ("'delete' statement not specified as 'enriched' used on pointer "
+ + "with resource-enriched (RAII) basetype '%s'. "
+ + "To address this, use 'delete' instead of '%s', and "
+ + "ensure any pointer deallocated through this 'delete' is only "
+ + "allocated using 'new'!")
+
class EIDENTSIZEOF(DMLError):
"""
A variant of the EIDENT message exclusive to usages of `sizeof`: it is
@@ -1607,6 +1654,37 @@ class ELOGGROUPS(DMLError):
fmt = ("Too many loggroup declarations. A maximum of 63 log groups (61 "
+ "excluding builtins) may be declared per device.")
+class ERAIIVARARG(DMLError):
+ """
+ Values of resource-enriched (RAII) type cannot be passed as variadic
+ arguments.
+
+ This error typically occurs as the result of trying to pass a value of
+ type `string` to a `printf`-like function, which is a mistake to begin
+ with: `.c_str()` should be used to convert the `string` to `char *`.
+ """
+ fmt = ("values of resource-enriched (RAII) type cannot be passed as "
+ + "variadic arguments%s")
+ def __init__(self, site, is_string):
+ hint = ("\nif you are trying to pass a string to a 'printf'-like "
+ + "function, use '.c_str()' to convert the string to 'char *'"
+ )*is_string
+
+ DMLError.__init__(self, site, hint)
+
+class ESIZEOFRAII(DMLError):
+ """
+ The 'sizeof'/'sizeoftype' operator cannot be used to retrieve the size of
+ a resource-enriched (RAII) type. This is to prevent primitive memory
+ manipulation with values of such types, as such manipulation is always
+ unsafe.
+ """
+ fmt = ("'sizeof'/'sizeoftype' cannot be used with resource-enriched "
+ + "(RAII) types.\n"
+ + "Don't attempt to work around this by using an integer literal "
+ + "instead: any primitive memory manipulation like memcpy is "
+ + "unsafe when involving values of resource-enriched types.")
+
#
# WARNINGS (keep these as few as possible)
#
diff --git a/py/dml/serialize.py b/py/dml/serialize.py
index 17de64f02..397a580cf 100644
--- a/py/dml/serialize.py
+++ b/py/dml/serialize.py
@@ -90,7 +90,6 @@ def prepare_array_de_serialization(site, t):
while isinstance(base, TArray):
dims.append(base.size)
base = base.base
-
sizeof_base = expr.mkLit(site, f"sizeof({base.declaration('')})",
TNamed('size_t'))
dimsizes_lit = ('(const uint32 []) { %s }'
@@ -113,10 +112,12 @@ def call_c_fun(site, fun, args):
# to target_expr
def serialize(real_type, current_expr, target_expr):
current_site = current_expr.site
- def construct_assign_apply(funname, intype):
- apply_expr = apply_c_fun(current_site, funname,
- [current_expr], attr_value_t)
- return ctree.mkAssignStatement(current_site, target_expr,
+ def construct_assign_apply(funname):
+ return construct_assign_apply_on_expr(funname, current_expr)
+ def construct_assign_apply_on_expr(funname, custom_expr):
+ apply_expr = apply_c_fun(current_site, funname, [custom_expr],
+ attr_value_t)
+ return ctree.AssignStatement(current_site, target_expr,
ctree.ExpressionInitializer(apply_expr))
if real_type.is_int:
if real_type.signed:
@@ -131,18 +132,25 @@ def construct_assign_apply(funname, intype):
funname,
function_type),
[converted_arg],
- function_type)
+ function_type,
+ True)
return ctree.mkCompound(current_site,
- [ctree.mkAssignStatement(
+ [ctree.AssignStatement(
current_site, target_expr,
ctree.ExpressionInitializer(
apply_expr))])
else:
- return construct_assign_apply(funname, real_type)
+ return construct_assign_apply(funname)
elif isinstance(real_type, TBool):
- return construct_assign_apply("SIM_make_attr_boolean", real_type)
+ return construct_assign_apply("SIM_make_attr_boolean")
elif isinstance(real_type, TFloat):
- return construct_assign_apply("SIM_make_attr_floating", real_type)
+ return construct_assign_apply("SIM_make_attr_floating")
+ elif isinstance(real_type, TString):
+ c_string = apply_c_fun(current_site, "_dml_string_str", [current_expr],
+ TPtr(TNamed('char')))
+ return construct_assign_apply_on_expr("SIM_make_attr_string",
+ c_string)
+
elif isinstance(real_type, TArray):
(base, dimsizes, sizeof_base,
dimsizes_expr) = prepare_array_de_serialization(current_site,
@@ -164,15 +172,28 @@ def construct_assign_apply(funname, intype):
len(dimsizes)),
elem_serializer],
attr_value_t)
- return ctree.mkAssignStatement(current_site, target_expr,
+ return ctree.AssignStatement(current_site, target_expr,
ctree.ExpressionInitializer(apply_expr))
-
- elif isinstance(real_type, (TStruct, TVector)):
+ elif isinstance(real_type, TVector):
+ base = real_type.base
+ sizeof_base = expr.mkLit(current_site,
+ f"sizeof({base.declaration('')})",
+ TNamed('size_t'))
+ elem_serializer = expr.mkLit(current_site, lookup_serialize(base),
+ TPtr(serializer_t))
+ apply_expr = apply_c_fun(current_site, '_serialize_vector',
+ [expr.OrphanWrap(current_site, current_expr),
+ sizeof_base,
+ elem_serializer],
+ attr_value_t)
+ return ctree.AssignStatement(current_site, target_expr,
+ ctree.ExpressionInitializer(apply_expr))
+ elif isinstance(real_type, TStruct):
apply_expr = apply_c_fun(
current_site, lookup_serialize(real_type),
- [ctree.mkAddressOf(current_site, current_expr)], attr_value_t)
- return ctree.mkAssignStatement(current_site, target_expr,
- ctree.ExpressionInitializer(apply_expr))
+ [ctree.AddressOf(current_site, current_expr)], attr_value_t)
+ return ctree.AssignStatement(current_site, target_expr,
+ ctree.ExpressionInitializer(apply_expr))
elif isinstance(real_type, TTrait):
id_infos = expr.mkLit(current_site, '_id_infos',
TPtr(TNamed('_id_info_t', const = True)))
@@ -180,7 +201,7 @@ def construct_assign_apply(funname, intype):
TNamed("_identity_t"), ".")
apply_expr = apply_c_fun(current_site, "_serialize_identity",
[id_infos, identity_expr], attr_value_t)
- return ctree.mkAssignStatement(current_site, target_expr,
+ return ctree.AssignStatement(current_site, target_expr,
ctree.ExpressionInitializer(apply_expr))
elif isinstance(real_type, THook):
id_infos = expr.mkLit(current_site,
@@ -189,7 +210,7 @@ def construct_assign_apply(funname, intype):
TPtr(TNamed('_id_info_t', const = True)))
apply_expr = apply_c_fun(current_site, "_serialize_identity",
[id_infos, current_expr], attr_value_t)
- return ctree.mkAssignStatement(current_site, target_expr,
+ return ctree.AssignStatement(current_site, target_expr,
ctree.ExpressionInitializer(apply_expr))
else:
# Callers are responsible for checking that the type is serializeable,
@@ -202,11 +223,12 @@ def construct_assign_apply(funname, intype):
# with a given set_error_t and message.
def deserialize(real_type, current_expr, target_expr, error_out):
current_site = current_expr.site
- def construct_assign_apply(attr_typ, intype):
+ def construct_assign_apply(attr_typ, intype, mod_apply_expr=lambda x:x):
check_expr = apply_c_fun(current_site, 'SIM_attr_is_' + attr_typ,
[current_expr], TBool())
- apply_expr = apply_c_fun(current_site, 'SIM_attr_' + attr_typ,
- [current_expr], intype)
+ apply_expr = mod_apply_expr(apply_c_fun(current_site,
+ 'SIM_attr_' + attr_typ,
+ [current_expr], intype))
error_stmts = error_out('Sim_Set_Illegal_Type', 'expected ' + attr_typ)
target = target_expr
@@ -223,7 +245,7 @@ def construct_assign_apply(attr_typ, intype):
return ctree.mkIf(current_site,
check_expr,
- ctree.mkAssignStatement(
+ ctree.AssignStatement(
current_site, target,
ctree.ExpressionInitializer(apply_expr)),
ctree.mkCompound(current_site, error_stmts))
@@ -237,7 +259,7 @@ def addressof_target_unconst():
def construct_subcall(apply_expr):
(sub_success_decl, sub_success_arg) = \
declare_variable(current_site, "_sub_success", set_error_t)
- assign_stmt = ctree.mkAssignStatement(
+ assign_stmt = ctree.AssignStatement(
current_site, sub_success_arg,
ctree.ExpressionInitializer(apply_expr))
check_expr = ctree.mkLit(current_site,
@@ -253,18 +275,38 @@ def construct_subcall(apply_expr):
if real_type.is_int:
if real_type.is_endian:
- real_type = TInt(real_type.bits, real_type.signed)
- return construct_assign_apply("integer", real_type)
+ def mod_apply_expr(expr):
+ return ctree.source_for_assignment(expr.site, real_type, expr)
+ else:
+ def mod_apply_expr(expr):
+ return expr
+ return construct_assign_apply("integer", TInt(64, True),
+ mod_apply_expr)
elif isinstance(real_type, TBool):
return construct_assign_apply("boolean", real_type)
elif isinstance(real_type, TFloat):
return construct_assign_apply("floating", real_type)
+ elif isinstance(real_type, TString):
+ def mod_apply_expr(expr):
+ return apply_c_fun(expr.site, "_dml_string_new", [expr], TString())
+ return construct_assign_apply("string",
+ TPtr(TNamed('char', const=True)),
+ mod_apply_expr)
elif isinstance(real_type, TArray):
(base, dimsizes, sizeof_base,
dimsizes_expr) = prepare_array_de_serialization(current_site,
real_type)
elem_deserializer = expr.mkLit(current_site, lookup_deserialize(base),
TPtr(deserializer_t))
+
+ if base.is_raii:
+ from .codegen import get_raii_type_info
+ raii_destructor = get_raii_type_info(base).cident_destructor
+ else:
+ raii_destructor = "NULL"
+ raii_destructor = expr.mkLit(current_site, raii_destructor,
+ TPtr(TFunction([TPtr(void)], void)))
+
# elems_are_bytes informs if the final dimension may either be
# deserialized as a list or a data attribute value.
# This is true for all integer types of width 8 bits
@@ -275,9 +317,29 @@ def construct_subcall(apply_expr):
[current_expr, addressof_target_unconst(),
sizeof_base, dimsizes_expr,
ctree.mkIntegerLiteral(current_site, len(dimsizes)),
- elem_deserializer, elems_are_bytes], set_error_t)
+ elem_deserializer, raii_destructor, elems_are_bytes], set_error_t)
return construct_subcall(apply_expr)
- elif isinstance(real_type, (TStruct, TVector)):
+ elif isinstance(real_type, TVector):
+ base = real_type.base
+ sizeof_base = expr.mkLit(current_site,
+ f"sizeof({base.declaration('')})",
+ TNamed('size_t'))
+ elem_deserializer = expr.mkLit(current_site, lookup_deserialize(base),
+ TPtr(deserializer_t))
+ if base.is_raii:
+ from .codegen import get_raii_type_info
+ raii_destructor = get_raii_type_info(base).cident_destructor
+ else:
+ raii_destructor = "NULL"
+ raii_destructor = expr.mkLit(current_site, raii_destructor,
+ TPtr(TFunction([TPtr(void)], void)))
+
+ apply_expr = apply_c_fun(
+ current_site, '_deserialize_vector',
+ [current_expr, addressof_target_unconst(),
+ sizeof_base, elem_deserializer, raii_destructor], set_error_t)
+ return construct_subcall(apply_expr)
+ elif isinstance(real_type, TStruct):
apply_expr = apply_c_fun(
current_site, lookup_deserialize(real_type),
[current_expr, addressof_target_unconst()],
@@ -345,6 +407,8 @@ def map_dmltype_to_attrtype(site, dmltype):
return 'b'
if isinstance(real_type, TFloat):
return 'f'
+ if isinstance(real_type, TString):
+ return 's'
if isinstance(real_type, TStruct):
return '[%s]' % "".join([map_dmltype_to_attrtype(site, mt)
for mt in real_type.members.values()])
@@ -357,9 +421,8 @@ def map_dmltype_to_attrtype(site, dmltype):
return '[%s{%s}]' % (arr_attr_type, arr_length) + or_data
if isinstance(real_type, (TTrait, THook)):
return '[s[i*]]'
- # TODO should be implemented
- #if isinstance(real_type, TVector):
- # return '[%s*]' % (map_dmltype_to_attrtype(site, real_type.base))
+ if isinstance(real_type, TVector):
+ return f'[{map_dmltype_to_attrtype(site, real_type.base)}*]'
raise ICE(site, 'unserializable type: %r' % (dmltype,))
def mark_for_serialization(site, dmltype):
@@ -375,13 +438,15 @@ def mark_for_serialization(site, dmltype):
if not real_type.size.constant:
raise messages.ESERIALIZE(site, dmltype)
mark_for_serialization(site, real_type.base)
+ elif isinstance(real_type, TVector):
+ mark_for_serialization(site, real_type.base)
elif isinstance(real_type, TTrait):
dml.globals.serialized_traits.add(real_type.trait)
elif isinstance(real_type, THook):
real_type.validate(dmltype.declaration_site or site)
from .codegen import get_type_sequence_info
get_type_sequence_info(real_type.msg_types, create_new=True)
- elif not isinstance(real_type, (IntegerType, TBool, TFloat)):
+ elif not isinstance(real_type, (IntegerType, TBool, TFloat, TString)):
raise messages.ESERIALIZE(site, dmltype)
# generate a part of the function name from a description of the dmltype
@@ -416,6 +481,8 @@ def type_signature(dmltype, is_for_serialization):
return 'B'
if isinstance(dmltype, TFloat):
return {'double': 'Fd', 'float': 'Fs'}[dmltype.name]
+ if isinstance(dmltype, TString):
+ return 'STR'
if isinstance(dmltype, TStruct):
return 'S' + dmltype.label
if isinstance(dmltype, TArray):
@@ -442,7 +509,7 @@ def serialize_sources_to_list(site, sources, out_attr):
site, "SIM_alloc_attr_list",
[ctree.mkIntegerConstant(site, size, False)],
attr_value_t)
- attr_assign_statement = ctree.mkAssignStatement(
+ attr_assign_statement = ctree.AssignStatement(
site, out_attr, ctree.ExpressionInitializer(attr_alloc_expr))
imm_attr_decl, imm_attr_ref = declare_variable(
site, "_imm_attr", attr_value_t)
@@ -457,7 +524,7 @@ def serialize_sources_to_list(site, sources, out_attr):
if typ is not None:
sub_serialize = serialize(typ, source, imm_attr_ref)
else:
- sub_serialize = ctree.mkAssignStatement(
+ sub_serialize = ctree.AssignStatement(
site, imm_attr_ref, ctree.ExpressionInitializer(source))
sim_attr_list_set_statement = call_c_fun(
site, "SIM_attr_list_set_item", [ctree.mkAddressOf(site, out_attr),
@@ -493,10 +560,8 @@ def generate_serialize(real_type):
safe_realtype(typ))
for (name, typ) in real_type.members.items())
serialize_sources_to_list(site, sources, out_arg)
- elif isinstance(real_type, TVector):
- raise ICE(site, "TODO: serialize vector")
elif isinstance(real_type, (IntegerType, TBool, TFloat, TTrait,
- TArray, THook)):
+ TArray, THook, TString, TVector)):
serialize(real_type,
ctree.mkDereference(site, in_arg),
out_arg).toc()
@@ -517,7 +582,7 @@ def deserialize_list_to_targets(site, val_attr, targets, error_out_at_index,
index = ctree.mkIntegerConstant(site, i, False)
sim_attr_list_item = apply_c_fun(site, "SIM_attr_list_item",
[val_attr, index], attr_value_t)
- imm_set = ctree.mkAssignStatement(
+ imm_set = ctree.AssignStatement(
site, imm_attr_ref,
ctree.ExpressionInitializer(sim_attr_list_item))
statements.append(imm_set)
@@ -535,7 +600,7 @@ def sub_error_out(exc, msg):
sub_deserialize = deserialize(typ, imm_attr_ref, target,
sub_error_out)
else:
- sub_deserialize = ctree.mkAssignStatement(
+ sub_deserialize = ctree.AssignStatement(
site, target, ctree.ExpressionInitializer(imm_attr_ref))
statements.append(sub_deserialize)
else:
@@ -592,6 +657,7 @@ def generate_deserialize(real_type):
func_code = output.StrOutput()
with func_code:
+ cleanup_on_failure = []
cleanup = []
output.out(function_decl + " {\n", postindent = 1)
out_arg_decl.toc()
@@ -605,13 +671,22 @@ def error_out(exc, msg):
stmts.append(ctree.mkInline(site, 'goto _exit;'))
return stmts
if isinstance(real_type, TStruct):
+ from .codegen import get_raii_type_info
+ raii_info = (get_raii_type_info(real_type)
+ if real_type.is_raii else None)
+ malloc = expr.mkLit(site,
+ f'MM_{("Z" if raii_info is not None else "M")}ALLOC'
+ + f'(1, {real_type.declaration("")})',
+ TPtr(real_type))
(tmp_out_decl, tmp_out_ref) = declare_variable(
- site, "_tmp_out", TPtr(real_type),
- ctree.mkNew(site, real_type))
- cleanup_ref = (tmp_out_ref if not deep_const(real_type)
- else ctree.mkCast(site, tmp_out_ref, TPtr(void)))
- cleanup.append(ctree.mkDelete(site, cleanup_ref))
+ site, "_tmp_out", TPtr(real_type), malloc)
tmp_out_decl.toc()
+ cleanup_ref = '(void *)'*deep_const(real_type) + '_tmp_out'
+ if raii_info is not None:
+ cleanup_on_failure.append(ctree.mkInline(
+ site, raii_info.read_destroy_lval('*_tmp_out') + ';'))
+
+ cleanup.append(ctree.mkInline(site, f'MM_FREE({cleanup_ref});'))
targets = tuple((ctree.mkSubRef(site, tmp_out_ref, name, "->"),
conv_const(real_type.const, safe_realtype(typ)))
for (name, typ) in real_type.members.items())
@@ -620,23 +695,26 @@ def error_out_at_index(_i, exc, msg):
deserialize_list_to_targets(site, in_arg, targets,
error_out_at_index,
f'deserialization of {real_type}')
- ctree.mkAssignStatement(site,
- ctree.mkDereference(site, out_arg),
- ctree.ExpressionInitializer(
- ctree.mkDereference(
- site, tmp_out_ref))).toc()
-
- elif isinstance(real_type, TVector):
- raise ICE(site, "TODO: serialize vector")
- elif isinstance(real_type, (IntegerType, TBool, TFloat, TTrait,
- TArray, THook)):
+ dest = ctree.mkDereference(site, out_arg)
+ src = expr.OrphanWrap(site, ctree.mkDereference(site, tmp_out_ref))
+ ctree.AssignStatement(site, dest, ctree.ExpressionInitializer(src)
+ ).toc()
+
+ elif isinstance(real_type, (IntegerType, TBool, TFloat, TTrait, TArray,
+ THook, TString, TVector)):
deserialize(real_type,
in_arg,
ctree.mkDereference(site, out_arg),
error_out).toc()
else:
assert False
+ if cleanup_on_failure:
+ output.out("if (false) {\n", postindent=1)
output.out("_exit:\n")
+ for stmt in cleanup_on_failure:
+ stmt.toc()
+ if cleanup_on_failure:
+ output.out("}\n", preindent=-1)
for stmt in cleanup:
stmt.toc()
output.out("return _success;\n")
diff --git a/py/dml/structure.py b/py/dml/structure.py
index 81364662d..f72f2a313 100644
--- a/py/dml/structure.py
+++ b/py/dml/structure.py
@@ -73,7 +73,11 @@ def mkglobals(stmts):
by_name = {}
assert not global_scope.symbols()
for stmt in stmts:
- if stmt.kind in ['extern', 'extern_typedef', 'dml_typedef']:
+ if stmt.kind == 'extern':
+ ((_, _, cname, _), name) = stmt.args
+ if name is None:
+ name = cname
+ elif stmt.kind in {'extern_typedef', 'dml_typedef'}:
((_, _, name, _),) = stmt.args
else:
name = stmt.args[0]
@@ -150,7 +154,9 @@ def mkglobals(stmts):
trait_body = None
templates[name] = (stmt.site, template_body, trait_body)
elif stmt[0] == 'extern':
- (_, esite, (_, site, name, typ)) = stmt
+ (_, esite, (_, site, cname, typ), dmlname) = stmt
+ if dmlname is None:
+ dmlname = cname
if typ is None:
# guaranteed by grammar
assert dml.globals.dml_version == (1, 2)
@@ -164,8 +170,8 @@ def mkglobals(stmts):
allow_void=site.dml_version() == (1, 2))
# any substructs are converted to anonymous extern structs
assert not struct_defs
- new_symbols.append(LiteralSymbol(name, typ, site))
- externs.append((name, site, typ))
+ new_symbols.append(LiteralSymbol(dmlname, typ, site, cname))
+ externs.append((cname, site, typ))
elif stmt[0] == 'extern_typedef':
(_, site, (_, _, name, typ)) = stmt
assert not typedefs.get(name, None)
@@ -240,7 +246,7 @@ def check_named_types(t):
t.resolve()
for (mn, mt) in t.members.items():
check_named_types(mt)
- elif isinstance(t, (TPtr, TVector, TArray)):
+ elif isinstance(t, (TPtr, TVectorLegacy, TVector, TArray)):
check_named_types(t.base)
elif isinstance(t, TFunction):
for pt in t.input_types:
@@ -252,7 +258,7 @@ def check_named_types(t):
elif isinstance(t, THook):
for msg_t in t.msg_types:
check_named_types(msg_t)
- elif isinstance(t, (TVoid, IntegerType, TBool, TFloat, TTrait)):
+ elif isinstance(t, (TVoid, IntegerType, TBool, TFloat, TTrait, TString)):
pass
else:
raise ICE(t.declaration_site, "unknown type %r" % t)
@@ -302,13 +308,13 @@ def type_deps(t, include_structs, expanded_typedefs):
return deps
elif isinstance(t, TArray):
return type_deps(t.base, True, expanded_typedefs)
- elif isinstance(t, (TPtr, TVector)):
+ elif isinstance(t, (TPtr, TVectorLegacy, TVector)):
return type_deps(t.base, False, expanded_typedefs)
elif isinstance(t, TFunction):
return ([dep for pt in t.input_types
for dep in type_deps(pt, False, expanded_typedefs)]
+ type_deps(t.output_type, False, expanded_typedefs))
- elif isinstance(t, (IntegerType, TVoid, TBool, TFloat, TTrait)):
+ elif isinstance(t, (IntegerType, TVoid, TBool, TFloat, TTrait, TString)):
return []
elif isinstance(t, TExternStruct):
# extern structs are assumed to be self-contained
@@ -626,6 +632,8 @@ def is_default(decl):
def typecheck_method_override(m1, m2):
'''check that m1 can override m2'''
+ # TODO(RAII) the usage of cmp instead of cmp_fuzzy may break old code,
+ # though I doubt it
assert m1.kind == m2.kind == 'method'
(_, (inp1, outp1, throws1, qualifiers1, _), _, _, _) \
= m1.args
@@ -666,7 +674,8 @@ def typecheck_method_override(m1, m2):
# TODO move to caller
(_, type1) = eval_type(t1, a1.site, None, global_scope)
(_, type2) = eval_type(t2, a2.site, None, global_scope)
- if safe_realtype(type1).cmp(safe_realtype(type2)) != 0:
+ if safe_realtype_unconst(type1).cmp(
+ safe_realtype_unconst(type2)) != 0:
raise EMETH(a1.site, a2.site,
f"mismatching types in input argument {n1}")
@@ -675,7 +684,8 @@ def typecheck_method_override(m1, m2):
((n1, t1), (n2, t2)) = (a1.args, a2.args)
(_, type1) = eval_type(t1, a1.site, None, global_scope)
(_, type2) = eval_type(t2, a2.site, None, global_scope)
- if safe_realtype(type1).cmp(safe_realtype(type2)) != 0:
+ if safe_realtype_unconst(type1).cmp(
+ safe_realtype_unconst(type2)) != 0:
msg = "mismatching types in return value"
if len(outp1) > 1:
msg += f" {i + 1}"
@@ -1864,6 +1874,7 @@ def mkobj2(obj, obj_specs, params, each_stmts):
with ExitStack() as stack:
stack.enter_context(ErrorContext(param, None))
stack.enter_context(crep.DeviceInstanceContext())
+ stack.enter_context(ctree.StaticRAIIScope())
try:
try:
# Evaluate statically, because it triggers caching
@@ -1932,6 +1943,10 @@ def mkobj2(obj, obj_specs, params, each_stmts):
if method.throws or len(method.outp) > 1:
report(EEXPORT(method.site, export.site))
continue
+ if any(t.is_raii
+ for (_, t) in itertools.chain(method.inp, method.outp)):
+ report(EEXPORT(method.site, export.site))
+ continue
func = method_instance(method)
mark_method_referenced(func)
mark_method_exported(func, name, export.site)
diff --git a/py/dml/traits.py b/py/dml/traits.py
index cb5695a45..60e636954 100644
--- a/py/dml/traits.py
+++ b/py/dml/traits.py
@@ -394,11 +394,11 @@ def typecheck_method_override(left, right):
if throws0 != throws1:
raise EMETH(site0, site1, "different nothrow annotations")
for ((n, t0), (_, t1)) in zip(inp0, inp1):
- if realtype(t0).cmp(realtype(t1)) != 0:
+ if safe_realtype_unconst(t0).cmp(safe_realtype_unconst(t1)) != 0:
raise EMETH(site0, site1,
"mismatching types in input argument %s" % (n,))
for (i, ((_, t0), (_, t1))) in enumerate(zip(outp0, outp1)):
- if realtype(t0).cmp(realtype(t1)) != 0:
+ if safe_realtype_unconst(t0).cmp(safe_realtype_unconst(t1)) != 0:
raise EMETH(site0, site1,
"mismatching types in output argument %d" % (i + 1,))
@@ -783,10 +783,10 @@ def lookup(self, name, expr, site):
expr = TraitUpcast(site, expr, impl.vtable_trait)
return TraitMethodDirect(site, expr, impl)
if name in self.vtable_methods:
- (_, inp, outp, throws, independent, _, _) = \
+ (_, inp, outp, throws, independent, _, memoized) = \
self.vtable_methods[name]
return TraitMethodIndirect(site, expr, name, inp, outp, throws,
- independent)
+ independent, memoized)
if name in self.vtable_params:
(_, ptype) = self.vtable_params[name]
return TraitParameter(site, expr, name, ptype)
diff --git a/py/dml/types.py b/py/dml/types.py
index 481b751e2..2708f9195 100644
--- a/py/dml/types.py
+++ b/py/dml/types.py
@@ -12,10 +12,14 @@
'realtype',
'safe_realtype_shallow',
'safe_realtype',
+ 'safe_realtype_unconst',
'conv_const',
+ 'shallow_const',
'deep_const',
+ 'TypeKey',
'type_union',
'compatible_types',
+ 'compatible_types_fuzzy',
'typedefs',
'global_type_declaration_order',
'global_anonymous_structs',
@@ -35,7 +39,7 @@
'TFloat',
'TArray',
'TPtr',
- 'TVector',
+ 'TVectorLegacy',
'TTrait',
'TTraitList',
'StructType',
@@ -44,6 +48,8 @@
'TLayout',
'TFunction',
'THook',
+ 'TString',
+ 'TVector',
'cident',
'void',
)
@@ -51,6 +57,7 @@
import sys
import re
from itertools import *
+from enum import Enum
from .env import is_windows
from .output import out
@@ -126,6 +133,10 @@ def realtype(t):
t2 = realtype(t.base)
if t2 != t:
return TArray(t2, t.size, t.const)
+ elif isinstance(t, TVectorLegacy):
+ t2 = realtype(t.base)
+ if t2 != t:
+ return TVectorLegacy(t2, t.const)
elif isinstance(t, TVector):
t2 = realtype(t.base)
if t2 != t:
@@ -160,13 +171,34 @@ def conv_const(const, t):
t.const = True
return t
+def safe_realtype_unconst(t0):
+ def sub(t):
+ if isinstance(t, (TArray, TVector, TVectorLegacy)):
+ base = sub(t.base)
+ if t.const or base is not t.base:
+ t = t.clone()
+ t.const = False
+ t.base = base
+ elif t.const:
+ t = t.clone()
+ t.const = False
+ return t
+ return sub(safe_realtype(t0))
+
+def shallow_const(t):
+ t = safe_realtype_shallow(t)
+ while not t.const and isinstance(t, (TArray, TVector, TVectorLegacy)):
+ t = safe_realtype_shallow(t.base)
+
+ return t.const
+
def deep_const(origt):
subtypes = [origt]
while subtypes:
st = safe_realtype_shallow(subtypes.pop())
if st.const:
return True
- if isinstance(st, (TArray, TVector)):
+ if isinstance(st, (TArray, TVector, TVectorLegacy)):
subtypes.append(st.base)
elif isinstance(st, StructType):
subtypes.extend(st.members.values())
@@ -192,7 +224,26 @@ def __eq__(self, other):
in zip(self.types, other.types)))
def __hash__(self):
- return hash(tuple(type(elem) for elem in self.types))
+ return hash(tuple(elem.hashed() for elem in self.types))
+
+class TypeKey:
+ '''A wrapper around a DML type with equality and hashing based around cmp.
+ Meant to be used as keys for dictionaries.
+ '''
+ def __init__(self, type, consider_quals=True):
+ self.type = type
+ self.consider_quals=consider_quals
+
+
+ def __eq__(self, other):
+ if not (isinstance(other, TypeKey)
+ and self.consider_quals == other.consider_quals):
+ return NotImplemented
+
+ return self.type.cmp(other.type, self.consider_quals) == 0
+
+ def __hash__(self):
+ return self.type.hashed(self.consider_quals)
class DMLType(metaclass=abc.ABCMeta):
'''The type of a value (expression or declaration) in DML. One DML
@@ -222,26 +273,60 @@ def sizeof(self):
'''Return size, or None if not known'''
return None
- def cmp(self, other):
- """Compare this type to another.
+ def hashed(self, consider_quals=True):
+ '''Hash the DML type in a way compatible with cmp. I.e.
+ a.cmp(b, consider_quals) == 0
+ implies a.hashed(consider_quals) == b.hashed(consider_quals)'''
+ return hash((type(self), self.const if consider_quals else None))
- Return 0 if the types are equivalent,
- Return NotImplemented otherwise.
+ # TODO(RAII) I think consider_quals is useless, but it's a tricky question.
+ # Existing usages of cmp, cmp_fuzzy, and cmp + safe_realtype_unconst should
+ # be reevaluated
+ def cmp(self, other, consider_quals=True):
+ """Strict type compatibility.
+
+ Return 0 if the types are run-time compatible,
+ Return NotImplemented otherwise
+
+ "Run-time compatibility" has two minimal criteria:
+ 1. The C representation of the type MUST be compatible, in a C sense
+ (modulo const qualifiers when consider_quals is False.)
+ 2. A value of one type can be treated by DMLC as though it were of the
+ other type without any additional risk of undefined behavior or invalid
+ generated C.
+ For example, all trait reference types share the same C representation,
+ and so satisfy (1), but trait reference types for different traits do
+ not share vtables; trying to use a vtable for one trait with an
+ incompatible reference would result in undefined behavior, and so do
+ not satisfy (2).
- The exact meaning of this is somewhat fuzzy. The
- method is used for three purposes:
+
+ The method is used for three purposes:
1. in TPtr.canstore(), to judge whether pointer target types
are compatible.
2. in ctree, to compare how large values different numerical
types can hold
- 3. when judging whether a method override is allowed, as an inaccurate
- replacement of TPtr(self).canstore(TPtr(other))[0]
+ 3. when judging whether a method override is allowed
See bug 21900 for further discussions.
"""
- return NotImplemented
+ return (0 if (isinstance(other, type(self))
+ and (not consider_quals or self.const == other.const))
+ else NotImplemented)
+
+ def cmp_fuzzy(self, other, consider_quals=False):
+ """Compare this type to another.
+
+ Return 0 if the types are pretty much equivalent,
+ Return NotImplemented otherwise.
+
+ As implied, the exact meaning of this is fuzzy. It mostly relaxes
+ criteria (1) of 'cmp'); for example, TPtr(void).cmp(TPtr(TBool())) is
+ allowed to return 0, as is TPtr(TBool()).cmp(TArray(TBool())).
+ """
+ return self.cmp(other, consider_quals)
def canstore(self, other):
"""Can a variable of this type store a value of another type.
@@ -254,7 +339,7 @@ def canstore(self, other):
The correctness of the return value can not be trusted; see
bug 21900 for further discussions.
"""
- return (self.cmp(other) == 0, False, False)
+ return (self.cmp(other, False) == 0, False, False)
@abc.abstractmethod
def clone(self):
@@ -288,6 +373,21 @@ def resolve(self):
return self"""
return self
+ @property
+ def is_raii(self):
+ return False
+
+class DMLTypeRAII(DMLType):
+ __slots__ = ()
+ @property
+ def is_raii(self):
+ return True
+
+ def resolve(self):
+ from .codegen import get_raii_type_info
+ _ = get_raii_type_info(self)
+ return self
+
class TVoid(DMLType):
__slots__ = ()
void = True
@@ -301,8 +401,6 @@ def declaration(self, var):
return 'void ' + self.const_str + ' ' + var
def clone(self):
return TVoid()
- def cmp(self, other):
- return 0 if isinstance(realtype(other), TVoid) else NotImplemented
class TUnknown(DMLType):
'''A type unknown to DML. Typically used for a generic C macro
@@ -335,8 +433,6 @@ def describe(self):
return 'pointer to %s' % self.name
def key(self):
return 'device'
- def cmp(self, other):
- return 0 if isinstance(realtype(other), TDevice) else NotImplemented
def canstore(self, other):
constviol = False
if not self.const and other.const:
@@ -389,8 +485,10 @@ def describe(self):
return self.c
def key(self):
raise ICE(self.declaration_site, 'need realtype before key')
- def cmp(self, other):
+ def cmp(self, other, consider_quals=True):
assert False, 'need realtype before cmp'
+ def hashed(self, consider_quals=True):
+ assert False, 'need realtype before hashed'
def clone(self):
return TNamed(self.c, self.const)
@@ -398,6 +496,10 @@ def clone(self):
def declaration(self, var):
return cident(self.c) + ' ' + self.const_str + var
+ @property
+ def is_raii(self):
+ return safe_realtype_shallow(self).is_raii
+
class TBool(DMLType):
__slots__ = ()
def __init__(self):
@@ -409,10 +511,6 @@ def describe(self):
return 'bool'
def declaration(self, var):
return 'bool ' + self.const_str + var
- def cmp(self, other):
- if isinstance(other, TBool):
- return 0
- return NotImplemented
def canstore(self, other):
constviol = False
@@ -467,9 +565,19 @@ def get_member_qualified(self, member):
t = (conv_const(self.const, t[0]),) + t[1:]
return t
- def cmp(self, other):
+ def hashed(self, consider_quals=True):
+ byte_order = self.byte_order if self.is_endian else None
+ return hash((IntegerType, self.const if consider_quals else None,
+ self.bits, self.signed, byte_order))
+
+ def cmp(self, other, consider_quals=True):
+ if consider_quals and (self.const != other.const):
+ return NotImplemented
if not other.is_int:
return NotImplemented
+ if (isinstance(self, TLong) != isinstance(other, TLong)
+ or isinstance (self, TSize) != isinstance(other, TSize)):
+ return NotImplemented
if self.is_endian:
if not other.is_endian:
return NotImplemented
@@ -477,9 +585,20 @@ def cmp(self, other):
return NotImplemented
elif other.is_endian:
return NotImplemented
- if isinstance(self, TLong) != isinstance(other, TLong):
+ return (0 if (self.bits, self.signed) == (other.bits, other.signed)
+ else NotImplemented)
+
+ def cmp_fuzzy(self, other, consider_quals=False):
+ if consider_quals and (self.const != other.const):
+ return NotImplemented
+ if not other.is_int:
return NotImplemented
- if isinstance(self, TSize) != isinstance(other, TSize):
+ if self.is_endian:
+ if not other.is_endian:
+ return NotImplemented
+ if self.byte_order != other.byte_order:
+ return NotImplemented
+ elif other.is_endian:
return NotImplemented
if (dml.globals.dml_version == (1, 2)
and not dml.globals.strict_int_flag):
@@ -488,6 +607,7 @@ def cmp(self, other):
else:
return (0 if (self.bits, self.signed) == (other.bits, other.signed)
else NotImplemented)
+
# This is the most restrictive canstore definition for
# IntegerTypes, if this is overridden then it should be
# because we want to be less restrictive
@@ -552,8 +672,6 @@ def canstore(self, other):
other = realtype(other)
if other.is_int:
trunc = (other.bits > self.bits)
- if dml.globals.compat_dml12 and isinstance(other, TBool):
- return (False, False, constviol)
return (True, trunc, constviol)
if other.is_float and not self.is_bitfields:
return (True, True, constviol)
@@ -586,6 +704,10 @@ def __repr__(self):
def clone(self):
return TLong(self.signed, self.const)
+ def hashed(self, consider_quals=True):
+ return hash((TLong, self.const if consider_quals else None,
+ self.signed))
+
def declaration(self, var):
decl = 'long ' + var
return decl if self.signed else 'unsigned ' + decl
@@ -605,15 +727,19 @@ def __repr__(self):
def clone(self):
return TSize(self.signed, self.const)
+ def hashed(self, consider_quals=True):
+ return hash((TSize, self.const if consider_quals else None,
+ self.signed))
+
def declaration(self, var):
return ('ssize_t ' if self.signed else 'size_t ') + var
class TEndianInt(IntegerType):
'''An integer where the byte storage order is defined.
Corresponds to the (u)?intX_[be|le] family of types defined in
- dmllib.h
+ dml-lib.h
'''
- __slots__ = ('byte_order')
+ __slots__ = ('byte_order',)
def __init__(self, bits, signed, byte_order, members = None, const = False):
IntegerType.__init__(self, bits, signed, members, const)
if (bits % 8 != 0):
@@ -682,7 +808,9 @@ def __repr__(self):
return '%s(%r,%r)' % (self.__class__.__name__, self.name, self.const)
def describe(self):
return self.name
- def cmp(self, other):
+ def cmp(self, other, consider_quals=True):
+ if consider_quals and (self.const != other.const):
+ return NotImplemented
if other.is_float and self.name == other.name:
return 0
return NotImplemented
@@ -717,7 +845,7 @@ def key(self):
% (conv_const(self.const, self.base).key(),
self.size.value))
def describe(self):
- return 'array of size %s of %s' % (self.size.read(),
+ return 'array of size %s of %s' % (str(self.size),
self.base.describe())
def declaration(self, var):
return self.base.declaration(self.const_str + var
@@ -728,6 +856,13 @@ def print_declaration(self, var, init = None, unused = False):
assert not init or self.size.constant
DMLType.print_declaration(self, var, init, unused)
+ def hashed(self, consider_quals=True):
+ cconst = conv_const if consider_quals else lambda c, t: t
+ size = self.size.value if self.size.constant else self.size
+ return hash((TArray,
+ size,
+ cconst(self.const, self.base).hashed(consider_quals)))
+
def sizeof(self):
if not self.size.constant:
# variable-sized array, sizeof is not known
@@ -736,14 +871,31 @@ def sizeof(self):
if elt_size == None:
return None
return self.size.value * elt_size
- def cmp(self, other):
- if dml.globals.compat_dml12:
+ def cmp(self, other, consider_quals=True):
+ if not isinstance(other, TArray):
+ return NotImplemented
+ if not (self.size is other.size
+ or (self.size.constant and other.size.constant
+ and self.size.value == other.size.value)):
+ return NotImplemented
+ cconst = conv_const if consider_quals else lambda c, t: t
+ return cconst(self.const, self.base).cmp(
+ cconst(other.const, other.base), consider_quals)
+
+ def cmp_fuzzy(self, other, consider_quals=False):
+ cconst = conv_const if consider_quals else lambda c, t: t
+ if not dml.globals.compat_dml12:
if isinstance(other, (TArray, TPtr)):
- return self.base.cmp(other.base)
+ return cconst(self.const, self.base).cmp_fuzzy(
+ cconst(other.const and isinstance(other, TArray),
+ other.base),
+ consider_quals)
elif isinstance(other, (TPtr, TArray)):
if self.base.void or other.base.void:
return 0
- if self.base.cmp(other.base) == 0:
+ if (cconst(self.const, self.base).cmp_fuzzy(
+ cconst(other.const and isinstance(other, TArray),
+ other.base), consider_quals) == 0):
return 0
return NotImplemented
def canstore(self, other):
@@ -754,6 +906,10 @@ def resolve(self):
self.base.resolve()
return self
+ @property
+ def is_raii(self):
+ return self.base.is_raii
+
class TPtr(DMLType):
__slots__ = ('base',)
def __init__(self, base, const = False):
@@ -767,34 +923,54 @@ def key(self):
return f'{self.const_str}pointer({self.base.key()})'
def describe(self):
return 'pointer to %s' % (self.base.describe())
- def cmp(self, other):
- if dml.globals.compat_dml12:
- if isinstance(other, TPtr):
+ def cmp_fuzzy(self, other, consider_quals=False):
+ cconst = conv_const if consider_quals else lambda c, t: t
+ if not dml.globals.compat_dml12:
+ if isinstance(other, (TArray, TPtr)):
# Can only compare for voidness or equality
if self.base.void or other.base.void:
return 0
- if self.base.cmp(other.base) == 0:
- return 0
+
+ return self.base.cmp(
+ cconst(other.const and isinstance(other, TArray),
+ other.base),
+ consider_quals)
elif isinstance(other, (TPtr, TArray)):
if self.base.void or other.base.void:
return 0
- if self.base.cmp(other.base) == 0:
+ if (self.base.cmp(
+ cconst(other.const and isinstance(other, TArray),
+ other.base), consider_quals) == 0):
return 0
return NotImplemented
+ def cmp(self, other, consider_quals=True):
+ if DMLType.cmp(self, other, consider_quals) != 0:
+ return NotImplemented
+ return self.base.cmp(other.base, consider_quals)
+
def canstore(self, other):
ok = False
trunc = False
constviol = False
if isinstance(other, (TPtr, TArray)):
+ constviol = (not shallow_const(self.base)
+ and shallow_const(other.base))
if self.base.void or other.base.void:
ok = True
else:
- if not self.base.const and other.base.const:
- constviol = True
- ok = (self.base.cmp(other.base) == 0)
+ # TODO(RAII) this means that in 1.4 you can't do things like
+ # assign int* to unsigned*
+ unconst_self_base = safe_realtype_unconst(self.base)
+ unconst_other_base = safe_realtype_unconst(other.base)
+
+ ok = ((unconst_self_base.cmp_fuzzy
+ if dml.globals.compat_dml12_int
+ else unconst_self_base.cmp)(unconst_other_base)
+ == 0)
elif isinstance(other, TFunction):
- ok = True
+ ok = safe_realtype_unconst(self.base).cmp(other) == 0
+ # TODO(RAII) gate this behind dml.globals.dml_version == (1, 2)?
if self.base.void and isinstance(other, TDevice):
ok = True
#dbg('TPtr.canstore %r %r => %r' % (self, other, ok))
@@ -813,7 +989,7 @@ def resolve(self):
self.base.resolve()
return self
-class TVector(DMLType):
+class TVectorLegacy(DMLType):
__slots__ = ('base',)
def __init__(self, base, const = False):
DMLType.__init__(self, const)
@@ -821,21 +997,28 @@ def __init__(self, base, const = False):
raise DMLTypeError("Null base")
self.base = base
def __repr__(self):
- return "TVector(%r,%r)" % (self.base, self.const)
+ return "TVectorLegacy(%r,%r)" % (self.base, self.const)
def key(self):
- return f'{self.const_str}vector({self.base.key()})'
+ return f'{self.const_str}vectorlegacy({self.base.key()})'
def describe(self):
- return 'vector of %s' % self.base.describe()
- def cmp(self, other):
- if isinstance(other, TVector):
+ return '1.2 vector of %s' % self.base.describe()
+ def cmp(self, other, consider_quals=True):
+ if not isinstance(other, TVectorLegacy):
+ return NotImplemented
+ cconst = conv_const if consider_quals else lambda c, t: t
+ return cconst(self.const, self.base).cmp(
+ cconst(other.const, other.base), consider_quals)
+ def cmp_fuzzy(self, other, consider_quals=False):
+ cconst = conv_const if consider_quals else lambda c, t: t
+ if isinstance(other, TVectorLegacy):
# Can only compare for voidness or equality
if self.base.void or other.base.void:
return 0
- if self.base.cmp(other.base) == 0:
- return 0
+ return cconst(self.const, self.base).cmp(
+ cconst(other.const, other.base), consider_quals)
return NotImplemented
def clone(self):
- return TVector(self.base, self.const)
+ return TVectorLegacy(self.base, self.const)
def declaration(self, var):
s = self.base.declaration('')
return 'VECT(%s) %s%s' % (s, self.const_str, var)
@@ -855,14 +1038,18 @@ def __repr__(self):
def clone(self):
return TTrait(self.trait)
- def cmp(self, other):
- if isinstance(other, TTrait) and self.trait is other.trait:
- return 0
- else:
- return NotImplemented
+ def cmp(self, other, consider_quals=True):
+ return (0 if (DMLType.cmp(self, other, consider_quals) == 0
+ and self.trait is other.trait)
+ else NotImplemented)
def key(self):
return f'{self.const_str}trait({self.trait.name})'
+
+ def hashed(self, consider_quals=True):
+ return hash((TTrait, self.const if consider_quals else None,
+ self.trait))
+
def describe(self):
return 'trait ' + self.trait.name
@@ -882,11 +1069,14 @@ def __repr__(self):
def clone(self):
return TTraitList(self.traitname, self.const)
- def cmp(self, other):
- if isinstance(other, TTraitList) and self.traitname == other.traitname:
- return 0
- else:
- return NotImplemented
+ def hashed(self, consider_quals=True):
+ return hash((TTraitList, self.const if consider_quals else None,
+ self.traitname))
+
+ def cmp(self, other, consider_quals=True):
+ return (0 if (DMLType.cmp(self, other, consider_quals) == 0
+ and self.traitname == other.traitname)
+ else NotImplemented)
def key(self):
return f'{self.const_str}sequence({self.traitname})'
@@ -901,12 +1091,15 @@ def declaration(self, var):
# information is discarded.
return '_each_in_t %s' % (var,)
+is_raii_inprogress = object()
+
class StructType(DMLType):
'''common superclass for DML-defined structs and extern structs'''
- __slots__ = ('members',)
+ __slots__ = ('members', '_raii')
def __init__(self, members, const):
super(StructType, self).__init__(const)
self.members = members
+ self._raii = None
@property
def members_qualified(self):
@@ -917,6 +1110,24 @@ def get_member_qualified(self, member):
t = self.members.get(member)
return t if t is None else conv_const(self.const, t)
+ @property
+ def is_raii(self):
+ if self._raii is not None:
+ assert self._raii is not is_raii_inprogress
+ return self._raii
+
+ self._raii = is_raii_inprogress
+
+ self.resolve()
+
+ for typ in self.members.values():
+ if typ.is_raii:
+ self._raii = True
+ return True
+
+ self._raii = False
+ return False
+
class TExternStruct(StructType):
'''A struct-like type defined by code outside DMLC's control.
'members' is the potential right operands of binary '.',
@@ -955,10 +1166,15 @@ def declaration(self, var):
raise EANONEXT(self.declaration_site)
return "%s %s%s" % (self.typename, self.const_str, var)
- def cmp(self, other):
- if isinstance(other, TExternStruct) and self.id == other.id:
- return 0
- return NotImplemented
+ def hashed(self, consider_quals=True):
+ return hash((TExternStruct,
+ self.const if consider_quals else None,
+ self.id))
+
+ def cmp(self, other, consider_quals=True):
+ return (0 if (DMLType.cmp(self, other, consider_quals) == 0
+ and self.id == other.id)
+ else NotImplemented)
def clone(self):
return TExternStruct(self.members, self.id, self.typename, self.const)
@@ -1007,10 +1223,14 @@ def print_struct_definition(self):
t.print_declaration(n)
out("};\n", preindent = -1)
- def cmp(self, other):
- if isinstance(other, TStruct) and self.label == other.label:
- return 0
- return NotImplemented
+ def hashed(self, consider_quals=True):
+ return hash((TStruct, self.const if consider_quals else None,
+ self.label))
+
+ def cmp(self, other, consider_quals=True):
+ return (0 if (DMLType.cmp(self, other, consider_quals) == 0
+ and self.label == other.label)
+ else NotImplemented)
def clone(self):
return TStruct(self.members, self.label, self.const)
@@ -1137,13 +1357,34 @@ def describe(self):
return ('function(%s) returning %s'
% (inparams, self.output_type.describe()))
- def cmp(self, other):
+ def hashed(self, consider_quals=True):
+ return hash((TFunction,
+ tuple(typ.hashed(consider_quals)
+ for typ in self.input_types),
+ self.output_type.hashed(consider_quals),
+ self.varargs))
+
+ def cmp_fuzzy(self, other, consider_quals=False):
+ if (isinstance(other, TFunction)
+ and len(self.input_types) == len(other.input_types)
+ and all(arg1.cmp_fuzzy(arg2, consider_quals) == 0
+ for (arg1, arg2)
+ in zip(self.input_types, other.input_types))
+ and self.output_type.cmp_fuzzy(other.output_type,
+ consider_quals) == 0
+ and self.varargs == other.varargs):
+ return 0
+ return NotImplemented
+
+ def cmp(self, other, consider_quals=True):
if (isinstance(other, TFunction)
and len(self.input_types) == len(other.input_types)
- and all(arg1.cmp(arg2) == 0
- for (arg1, arg2) in zip(self.input_types,
- other.input_types))
- and self.output_type.cmp(other.output_type) == 0
+ and all(safe_realtype_unconst(arg1).cmp(
+ safe_realtype_unconst(arg2), consider_quals) == 0
+ for (arg1, arg2)
+ in zip(self.input_types, other.input_types))
+ and safe_realtype_unconst(self.output_type).cmp(
+ safe_realtype_unconst(other.output_type), consider_quals) == 0
and self.varargs == other.varargs):
return 0
return NotImplemented
@@ -1179,10 +1420,10 @@ def __repr__(self):
def clone(self):
return THook(self.msg_types, self.validated, self.const)
- def cmp(self, other):
+ def cmp(self, other, consider_quals=True):
if (isinstance(other, THook)
and len(self.msg_types) == len(other.msg_types)
- and all(own_comp.cmp(other_comp) == 0
+ and all(own_comp.cmp(other_comp, consider_quals) == 0
for (own_comp, other_comp) in zip(self.msg_types,
other.msg_types))):
return 0
@@ -1210,6 +1451,60 @@ def validate(self, fallback_site):
raise EHOOKTYPE(self.declaration_site or fallback_site,
typ, e.clarification) from e
+class TString(DMLTypeRAII):
+ __slots__ = ()
+ def __repr__(self):
+ return "TString(%r)" % (self.const,)
+ def describe(self):
+ return 'string'
+ def cmp(self, other, consider_quals=True):
+ return DMLType.cmp(self, other, consider_quals)
+ def clone(self):
+ return TString(self.const)
+ def declaration(self, var):
+ return f'_dml_string_t {self.const_str}{var}'
+
+class TVector(DMLTypeRAII):
+ __slots__ = ('base',)
+ def __init__(self, base, const = False):
+ DMLType.__init__(self, const)
+ if not base:
+ raise DMLTypeError("Null base")
+ self.base = base
+ def __repr__(self):
+ return "TVector(%r,%r)" % (self.base, self.const)
+ def key(self):
+ return f'vector({conv_const(self.const, self.base).key()})'
+ def describe(self):
+ return f'vector of {self.base.describe()}'
+ def __str__(self):
+ return f'vect({self.base})'
+ def cmp_fuzzy(self, other, consider_quals=False):
+ if isinstance(other, TVector):
+ cconst = conv_const if consider_quals else lambda c, t: t
+ # Can only compare for voidness or equality
+ if self.base.void or other.base.void:
+ return 0
+ if cconst(self.const, self.base).cmp(
+ cconst(other.const, other.base), consider_quals) == 0:
+ return 0
+ return NotImplemented
+ def cmp(self, other, consider_quals=True):
+ if isinstance(other, TVector):
+ cconst = conv_const if consider_quals else lambda c, t: t
+ # Can only compare for voidness or equality
+ if cconst(self.const, self.base).cmp(
+ cconst(other.const, other.base), consider_quals) == 0:
+ return 0
+ return NotImplemented
+ def hashed(self, consider_quals=True):
+ cconst = conv_const if consider_quals else lambda c, t: t
+ return hash((TVector, cconst(self.const,
+ self.base).hashed(consider_quals)))
+ def clone(self):
+ return TVector(self.base, self.const)
+ def declaration(self, var):
+ return f'_dml_vect_t {self.const_str}{var}'
intre = re.compile('(u?)int([1-5][0-9]?|6[0-4]?|[789])(_be_t|_le_t)?$')
def parse_type(typename):
@@ -1235,6 +1530,8 @@ def parse_type(typename):
return TInt(64, True)
elif typename == 'uinteger_t' and dml.globals.api_version < '7':
return TInt(64, False)
+ elif typename == 'string':
+ return TString()
else:
return TNamed(typename)
@@ -1249,11 +1546,21 @@ def type_union(type1, type2):
def compatible_types(type1, type2):
# This function intends to verify that two DML types are
# compatible in the sense defined by the C spec, possibly with
- # some DML-specific restrictions added. TODO: DMLType.cmp is only
- # a rough approximation of this; we should write tests and
- # either repair cmp or rewrite the logic from scratch.
+ # some DML-specific restrictions added.
return type1.cmp(type2) == 0
+# TODO(RAII) can we be rid of this and cmp_fuzzy?
+def compatible_types_fuzzy(type1, type2):
+ # This function intends to verify that two DML types are
+ # compatible in the sense defined by the C spec, possibly with
+ # some DML-specific restrictions added.
+ # DMLType.cmp_fuzzy is only a very rough approximation of this,
+ # meant to suite usages such as type-checking the ternary
+ # operator.
+ # Any use of .cmp_fuzzy or compatible_type_fuzzy should be considered
+ # a HACK.
+ return type1.cmp_fuzzy(type2) == 0
+
void = TVoid()
# These are the named types used. This includes both "imported"
# typedefs for types declared in C header files, and types defined in
diff --git a/test/1.4/errors/T_WEXPERIMENTAL.dml b/test/1.4/errors/T_DEPRECATED.dml
similarity index 83%
rename from test/1.4/errors/T_WEXPERIMENTAL.dml
rename to test/1.4/errors/T_DEPRECATED.dml
index 1ea72bc2a..463ff62d6 100644
--- a/test/1.4/errors/T_WEXPERIMENTAL.dml
+++ b/test/1.4/errors/T_DEPRECATED.dml
@@ -8,5 +8,5 @@ dml 1.4;
device test;
-/// WARNING WEXPERIMENTAL
+/// WARNING WDEPRECATED
session int vect x;
diff --git a/test/1.4/errors/T_ECAST.dml b/test/1.4/errors/T_ECAST.dml
index f07c6d4a9..dd4d3cd3e 100644
--- a/test/1.4/errors/T_ECAST.dml
+++ b/test/1.4/errors/T_ECAST.dml
@@ -8,7 +8,7 @@ device test;
typedef struct { uint32 x; } s_t;
typedef layout "little-endian" { uint32 x; } l_t;
-/// WARNING WEXPERIMENTAL
+/// WARNING WDEPRECATED
typedef int vect v_t;
typedef int a_t[1];
typedef void f_t(void);
@@ -44,8 +44,6 @@ method init() {
/// ERROR ECAST
cast(i, v_t);
/// ERROR ECAST
- cast(v, v_t);
- /// ERROR ECAST
cast(l, uint32);
// no error!
cast(a, uint32);
diff --git a/test/1.4/errors/T_ESERIALIZE.dml b/test/1.4/errors/T_ESERIALIZE.dml
index 0a63142ba..9aed60b61 100644
--- a/test/1.4/errors/T_ESERIALIZE.dml
+++ b/test/1.4/errors/T_ESERIALIZE.dml
@@ -33,8 +33,8 @@ typedef void (*f)();
/// ERROR ESERIALIZE
saved f f_ptr;
-// TODO: vectors should be serializable
-/// WARNING WEXPERIMENTAL
+// Legacy vectors should not be serializable
+/// WARNING WDEPRECATED
typedef int vect int_vect;
/// ERROR ESERIALIZE
saved int_vect x;
diff --git a/test/1.4/errors/T_ESWITCH.dml b/test/1.4/errors/T_ESWITCH.dml
index b87c186c7..ecca31404 100644
--- a/test/1.4/errors/T_ESWITCH.dml
+++ b/test/1.4/errors/T_ESWITCH.dml
@@ -11,12 +11,6 @@ method init() {
/// ERROR ESWITCH
{
}
- switch (1)
- // must start with a case
- /// ERROR ESWITCH
- {
- ;
- }
switch (1) {
default:
#if (true) {
diff --git a/test/1.4/errors/T_EVOID.dml b/test/1.4/errors/T_EVOID.dml
index a08076063..8e816581f 100644
--- a/test/1.4/errors/T_EVOID.dml
+++ b/test/1.4/errors/T_EVOID.dml
@@ -35,7 +35,7 @@ extern typedef const void ext_void_t;
/// ERROR EVOID
session void
-/// WARNING WEXPERIMENTAL
+/// WARNING WDEPRECATED
vect v;
// this is allowed in 1.2, and even used by dml-builtins, unclear why
diff --git a/test/1.4/syntax/T_assign.dml b/test/1.4/syntax/T_assign.dml
index d8588e216..84db555f6 100644
--- a/test/1.4/syntax/T_assign.dml
+++ b/test/1.4/syntax/T_assign.dml
@@ -6,6 +6,16 @@ dml 1.4;
device test;
+method m(int i) -> (int) throws {
+ if (i > 4) throw;
+ return i;
+}
+
+method ms(int i) -> (string) throws {
+ if (i > 4) throw;
+ return mk_string_f("%d", i);
+}
+
method init() {
local int i;
@@ -55,9 +65,28 @@ method init() {
c[++j] += ++i;
assert c[1] == 4 && j == 1 && i == 4;
+ // += with method calls
+ i = 0;
+ try {
+ i += m(i + 4);
+ assert i == 4;
+ } catch assert false;
+ try {
+ i += m(i + 4);
+ assert false;
+ } catch {
+ assert i == 4;
+ }
+
// multiple simultaneous assignment
i = 1;
j = 0;
(i, j) = (j, i);
assert i == 0 && j == 1;
+
+ // += with target, source of nonequal types but compatible with +
+ local int arr[4] = {0, 1, 2, 3};
+ local int *p = arr;
+ p += 2;
+ assert *p == 2;
}
diff --git a/test/1.4/types/raii/T_after.cont.py b/test/1.4/types/raii/T_after.cont.py
new file mode 100644
index 000000000..55b759f2f
--- /dev/null
+++ b/test/1.4/types/raii/T_after.cont.py
@@ -0,0 +1,22 @@
+# © 2023 Intel Corporation
+# SPDX-License-Identifier: MPL-2.0
+
+import stest
+
+obj = conf.obj
+SIM_continue(99999)
+stest.expect_equal(obj.trigger, [[0, 0], [0, 0]])
+stest.expect_equal(obj.trigger_hook, [0, 0])
+stest.expect_equal(obj.single_operator, 0)
+stest.expect_equal(obj.multi_operator, [[0, 0], [0, 0]])
+stest.expect_equal(obj.hook_operator, [[0, 0], [0, 0]])
+SIM_continue(2)
+stest.expect_equal(obj.trigger, [[0, 0], [0, 4]])
+stest.expect_equal(obj.trigger_hook, [0, 2])
+stest.expect_equal(obj.single_operator, 5)
+stest.expect_equal(obj.multi_operator, [[0, 5], [3, 0]])
+stest.expect_equal(obj.hook_operator, [[0, 5], [3, 0]])
+SIM_continue(99998)
+stest.expect_equal(obj.single_operator, 5)
+SIM_continue(2)
+stest.expect_equal(obj.single_operator, 3)
diff --git a/test/1.4/types/raii/T_after.dml b/test/1.4/types/raii/T_after.dml
new file mode 100644
index 000000000..1c111dfe6
--- /dev/null
+++ b/test/1.4/types/raii/T_after.dml
@@ -0,0 +1,106 @@
+/*
+ © 2021-2023 Intel Corporation
+ SPDX-License-Identifier: MPL-2.0
+*/
+dml 1.4;
+device test;
+
+import "simics/simulator/callbacks.dml";
+
+template trigger_attr is pseudo_attr {
+ param type = "i";
+ session int val;
+ method inc() default {
+ this.val++;
+ }
+}
+
+attribute trigger[i < 2][j < 2] is trigger_attr {
+ method set(attr_value_t val) throws {
+ after 0.1 s:
+ trigger[SIM_attr_integer(val)][SIM_attr_integer(val)].inc();
+ }
+ method get() -> (attr_value_t) {
+ return SIM_make_attr_uint64(val);
+ }
+}
+
+attribute trigger_hook[i < 2] is (trigger_attr, post_init) {
+ hook() h;
+
+ method post_init() {
+ if (!SIM_is_restoring_state(dev.obj)) {
+ after h: inc();
+ }
+ }
+
+ method inc() {
+ default();
+ after h: inc();
+ }
+
+ method set(attr_value_t val) throws {
+ after 0.1 s: trigger_hook[SIM_attr_integer(val)].h.send_now();
+ }
+ method get() -> (attr_value_t) {
+ return SIM_make_attr_uint64(val);
+ }
+}
+
+attribute operate is (write_only_attr) {
+ param type = "n";
+ method set(attr_value_t val) throws {
+ local vect(int) c = cast({3}, vect(int)) + cast({5}, vect(int));
+ after 0.1 s: single_operator.modify(c, 1);
+ after 0.1 s: multi_operator[0][1].modify(c, 1);
+ after 0.1 s: multi_operator[1][0].modify(c, 0);
+ after 200000 cycles: single_operator.modify(c, 0);
+
+ after 0.1 s: hook_operator[0][1].h.send_now(c, 1);
+ after 0.1 s: hook_operator[1][0].h.send_now(c, 0);
+ }
+}
+
+template operator is (int64_attr) {
+ method modify(vect(int) chunk, int ix) default {
+ this.val = chunk[ix];
+ }
+}
+
+attribute multi_operator[i < 2][j < 2] is (operator);
+attribute single_operator is (operator);
+
+attribute hook_operator[i < 2][j < 2] is (operator, post_init) {
+ hook(vect(int), int) h;
+
+ method post_init() {
+ if (!SIM_is_restoring_state(dev.obj)) {
+ after h -> (c, i): modify(c, i);
+ }
+ }
+
+ method modify(vect(int) chunk, int ix) {
+ default(chunk, ix);
+ after h -> (c, i): modify(c, i);
+ }
+}
+
+// Test methods with const-qualified parameters
+
+typedef layout "little-endian" {
+ const uint32 x[2];
+} l_t;
+
+attribute constig_res[i < 2] is uint64_attr;
+
+method constig(const uint64 i, const l_t l) {
+ constig_res[0].val = i;
+ constig_res[1].val = l.x[0] << 32 | l.x[1];
+}
+
+attribute trigger_constig is (write_only_attr) {
+ param type = "n";
+ method set(attr_value_t val) throws {
+ after 0.1 s: constig(4, {{7, 11}});
+ }
+}
diff --git a/test/1.4/types/raii/T_after.py b/test/1.4/types/raii/T_after.py
new file mode 100644
index 000000000..617d6983b
--- /dev/null
+++ b/test/1.4/types/raii/T_after.py
@@ -0,0 +1,33 @@
+# © 2023 Intel Corporation
+# SPDX-License-Identifier: MPL-2.0
+
+from os.path import join
+import subprocess
+from simicsutils.host import batch_suffix
+import stest
+
+cpu = SIM_create_object("clock", "clock", [["freq_mhz", 1]])
+obj.queue = cpu
+
+obj.trigger_constig = None
+SIM_continue(99999)
+stest.expect_equal(obj.constig_res, [0, 0])
+SIM_continue(2)
+stest.expect_equal(obj.constig_res, [4, 7 << 32 | 11])
+
+obj.single_operator = 0
+obj.multi_operator = [[0, 0], [0, 0]]
+
+obj.trigger = [[1, 1], [1, 1]]
+obj.trigger_hook = [1, 1]
+obj.operate = None
+
+SIM_write_configuration_to_file("checkpointing.chkp", Sim_Save_Nobundle)
+
+subprocess.check_call(
+ [f'{conf.sim.project}/bin/simics{batch_suffix()}'] +
+ ["--batch-mode", "--quiet", "--no-copyright", "--dump-core", "--werror",
+ '--project', conf.sim.project,
+ "-L", scratchdir,
+ "-c", "checkpointing.chkp",
+ "-p", join(basedir, "T_after.cont.py")])
diff --git a/test/1.4/types/raii/T_hooks_basic.dml b/test/1.4/types/raii/T_hooks_basic.dml
new file mode 100644
index 000000000..87403d3a7
--- /dev/null
+++ b/test/1.4/types/raii/T_hooks_basic.dml
@@ -0,0 +1,147 @@
+/*
+ © 2023 Intel Corporation
+ SPDX-License-Identifier: MPL-2.0
+*/
+dml 1.4;
+
+device test;
+
+/// DMLC-FLAG --enable-features-for-internal-testing-dont-use-this
+/// WARNING WEXPERIMENTAL hooks_common.dml
+
+import "hooks_common.dml";
+
+template hookset_test is hookset {
+ shared method testhooks();
+ method testhooks() {
+ local uint64 resumed;
+ count = storage = storage_indexed = 0;
+ last_i_indexed = last_j_indexed = -1;
+
+ assert h0.suspended == 0;
+ resumed = h0.send_now();
+ assert resumed == 0;
+
+ after h0: no_params();
+ assert count == 0 && h0.suspended == 1;
+ resumed = h0.send_now();
+ assert h0.suspended == 0 && resumed == 1 && count == 1;
+ resumed = h0.send_now();
+ assert resumed == 0;
+
+ local vect(int) v = {7};
+ after h0: no_params();
+ after h0: one_serializable_param(v);
+ assert h0.suspended == 2 && count == 1 && storage == 0;
+ resumed = h0.send_now();
+ assert h0.suspended == 0 && resumed == 2 && count == 2 && storage == 7;
+ count = storage = 0;
+
+ local int i = 5;
+ after h1 -> p: one_param(p);
+ assert h1.suspended == 1 && i == 5;
+ resumed = h1.send_now(&i);
+ assert h1.suspended == 0 && resumed == 1 && i == 6;
+ v = {-5};
+ after h1 -> p: one_serializable_param(v);
+ assert h1.suspended == 1 && storage == 0 && i == 6;
+ resumed = h1.send_now(&i);
+ assert h1.suspended == 0 && resumed == 1 && storage == -5 && i == 6;
+ storage = i = 0;
+
+ after h2 -> (p, i): two_params(p, i);
+ assert h2.suspended == 1 && i == 0;
+ resumed = h2.send_now(&i, {9});
+ assert h2.suspended == 0 && resumed == 1 && i == 9;
+ v = {2};
+ after h2 -> (p, i): two_params(p, v);
+ assert h2.suspended == 1;
+ resumed = h2.send_now(&i, {9});
+ assert h2.suspended == 0 && resumed == 1 && i == 2;
+ i = 0;
+
+ assert h3[3][4].suspended == 0;
+ after h3[3][4]: no_params();
+ for (local int idx_0; idx_0 < 6; ++idx_0) {
+ for (local int idx_1; idx_1 < 8; ++idx_1) {
+ if (idx_0 != 3 && idx_1 != 4) {
+ assert h3[idx_0][idx_1].suspended == 0;
+ }
+ }
+ }
+ assert h3[3][4].suspended == 1 && count == 0;
+ resumed = h3[3][4].send_now();
+ assert h3[3][4].suspended == 0 && resumed == 1 && count == 1;
+ count = 0;
+
+ after h0: indexed[3][5].no_params();
+ v = {10};
+ after h0: indexed[2][3].one_serializable_param(v);
+ assert h0.suspended == 2 && last_i_indexed == -1
+ && last_j_indexed == -1 && storage_indexed == 0;
+ resumed = h0.send_now();
+ assert h0.suspended == 0 && resumed == 2 && last_i_indexed == 3
+ && last_j_indexed == 5 && storage_indexed == 10 * (2*7 + 3);
+ (last_i_indexed, last_j_indexed, storage_indexed) = (-1, -1, 0);
+
+ after h1 -> p: indexed[3][5].one_param(p);
+ assert h1.suspended == 1 && i == 0;
+ resumed = h1.send_now(&i);
+ assert h1.suspended == 0 && resumed == 1 && i == 3 * 7 + 5;
+ i = 0;
+ v = {-5};
+ after h1 -> p: indexed[3][5].one_serializable_param(v);
+ assert h1.suspended == 1 && storage_indexed == 0 && i == 0;
+ resumed = h1.send_now(&i);
+ assert h1.suspended == 0 && resumed == 1
+ && storage_indexed == -5 * (3 * 7 + 5) && i == 0;
+ storage_indexed = 0;
+
+ after h2 -> (p, i): indexed[3][5].two_params(p, i);
+ assert h2.suspended == 1;
+ v = {9};
+ resumed = h2.send_now(&i, v);
+ assert h2.suspended == 0 && resumed == 1 && i == 9 * (3*7 + 5);
+ i = 0;
+ v = {2};
+ after h2 -> (p, i): indexed[3][5].two_params(p, v);
+ assert h2.suspended == 1;
+ resumed = h2.send_now(&i, {9});
+ assert h2.suspended == 0 && resumed == 1 && i == 2*(3*7 + 5);
+
+ foreach sub in (each hookset_test in (this)) {
+ sub.testhooks();
+ }
+ }
+}
+
+in each hookset {
+ is hookset_test;
+}
+
+hook(int **) order_test_hook;
+method order_test_callback(int id, int **queue) {
+ **queue = id;
+ ++*queue;
+}
+
+method order_test() {
+ for (local int i = 0; i < 4; ++i) {
+ after order_test_hook -> p: order_test_callback(i, p);
+ }
+ local int queue[4];
+ local int *queue_ptr = queue;
+ assert order_test_hook.suspended == 4;
+ local uint64 resumed = order_test_hook.send_now(&queue_ptr);
+ assert resumed == 4;
+ for (local int i = 0; i < 4; ++i) {
+ assert queue[i] == i;
+ }
+}
+
+method init() {
+ foreach sub in (each hookset_test in (this)) {
+ sub.testhooks();
+ }
+ order_test();
+}
diff --git a/test/1.4/types/raii/T_hooks_checkpointing.cont.py b/test/1.4/types/raii/T_hooks_checkpointing.cont.py
new file mode 100644
index 000000000..ba7b3eddd
--- /dev/null
+++ b/test/1.4/types/raii/T_hooks_checkpointing.cont.py
@@ -0,0 +1,4 @@
+# © 2023 Intel Corporation
+# SPDX-License-Identifier: MPL-2.0
+
+conf.obj.test_state = None
diff --git a/test/1.4/types/raii/T_hooks_checkpointing.dml b/test/1.4/types/raii/T_hooks_checkpointing.dml
new file mode 100644
index 000000000..327109243
--- /dev/null
+++ b/test/1.4/types/raii/T_hooks_checkpointing.dml
@@ -0,0 +1,155 @@
+/*
+ © 2023 Intel Corporation
+ SPDX-License-Identifier: MPL-2.0
+*/
+dml 1.4;
+
+device test;
+
+/// DMLC-FLAG --enable-features-for-internal-testing-dont-use-this
+/// WARNING WEXPERIMENTAL hooks_common.dml
+
+import "hooks_common.dml";
+
+attribute setup_state is write_only_attr {
+ param type = "n";
+ method set(attr_value_t val) throws {
+ #foreach obj in ([dev, g[2], b1, b1.g[3], b2[1], b2[2].g[3]]) {
+ local vect(int) v = {7};
+ after obj.h0: no_params();
+ after obj.h0: one_serializable_param(v);
+ after obj.h0: indexed[3][5].no_params();
+ v = {10};
+ after obj.h0: indexed[3][5].one_serializable_param(v);
+
+ after obj.h1 -> p: one_param(p);
+ after obj.h1 -> p: one_serializable_param({9});
+
+ after obj.h2 -> (p, i): two_params(p, i);
+
+ after obj.h3[3][4]: no_params();
+
+ after obj.h4[3][4] -> p: indexed[3][5].one_param(p);
+ v = {9};
+ after obj.h4[3][4] -> p: indexed[3][5].one_serializable_param(v);
+ v = {3};
+ after obj.h5[0][1] -> (p, i): two_params(p, v);
+ after obj.h5[2][3] -> (p, i): indexed[3][5].two_params(p, i);
+ after obj.h5[4][5] -> (p, i): indexed[3][5].two_params(p, v);
+
+ after obj.h3[0][0]: obj.h3[2][3].send_now();
+
+ after obj.h4[0][0] -> p: obj.h0.send_now();
+ after obj.h4[1][1] -> p: obj.h1.send_now(p);
+ v = {4};
+ after obj.h4[2][2] -> p: obj.h2.send_now(p, v);
+ v = {7};
+ after obj.h4[3][3] -> p: obj.h6.send_now(v);
+
+ after obj.h5[0][0] -> (p, i): obj.h2.send_now(p, i);
+ v = {4};
+ after obj.h5[1][1] -> (p, i): obj.h2.send_now(p, v);
+ after obj.h5[2][2] -> (p, i): obj.h6.send_now(i);
+ after obj.h5[3][3] -> (p, i): obj.h6.send_now({6});
+ }
+ }
+}
+
+attribute test_state is write_only_attr {
+ param type = "n";
+ method set(attr_value_t val) throws {
+ #foreach obj in ([dev, g[2], b1, b1.g[3], b2[1], b2[2].g[3]]) {
+ count = storage = storage_indexed = 0;
+ last_i_indexed = last_j_indexed = -1;
+
+ local uint64 resumed = obj.h0.send_now();
+ assert resumed == 4 && count == 1 && storage == 7
+ && last_i_indexed == 3 && last_j_indexed == 5
+ && storage_indexed == 10 * (7 * 3 + 5);
+ count = storage = storage_indexed = 0;
+ last_i_indexed = last_j_indexed = -1;
+
+ local int x = 4;
+ resumed = obj.h1.send_now(&x);
+ assert resumed == 2 && x == 5 && storage == 9;
+ storage = 0;
+
+ local vect(int) v = {7};
+ resumed = obj.h2.send_now(&x, v);
+ assert resumed == 1 && x == 7;
+
+ resumed = obj.h3[3][4].send_now();
+ assert resumed == 1 && count == 1;
+ count = 0;
+
+ x = 7;
+ assert obj.h4[3][4].suspended == 2;
+ resumed = obj.h4[3][4].send_now(&x);
+ assert resumed == 2 && x == 3 * 7 + 5
+ && storage_indexed == 9 * (3 * 7 + 5);
+ x = storage_indexed = 0;
+
+ x = 0;
+ assert obj.h5[0][1].suspended == 1;
+ v = {5};
+ resumed = obj.h5[0][1].send_now(&x, v);
+ assert resumed == 1 && x == 3;
+ x = 0;
+ assert obj.h5[2][3].suspended == 1;
+ resumed = obj.h5[2][3].send_now(&x, v);
+ assert resumed == 1 && x == 5 * (7*3 + 5);
+ x = 0;
+ assert obj.h5[4][5].suspended == 1;
+ resumed = obj.h5[4][5].send_now(&x, v);
+ assert resumed == 1 && x == 3 * (7*3 + 5);
+
+ after obj.h3[2][3]: no_params();
+ resumed = obj.h3[0][0].send_now();
+ assert resumed == 1 && obj.h3[2][3].suspended == 0 && count == 1;
+ count = 0;
+
+ x = 7;
+ after obj.h0: no_params();
+ resumed = obj.h4[0][0].send_now(&x);
+ assert resumed == 1 && obj.h0.suspended == 0 && count == 1
+ && x == 7;
+ count = 0;
+
+ after obj.h1 -> p: one_param(p);
+ resumed = obj.h4[1][1].send_now(&x);
+ assert resumed == 1 && obj.h1.suspended == 0 && x == 8;
+
+ after obj.h2 -> (p, i): two_params(p, i);
+ resumed = obj.h4[2][2].send_now(&x);
+ assert resumed == 1 && obj.h2.suspended == 0 && x == 4;
+
+ after obj.h6 -> i: one_serializable_param(i);
+ resumed = obj.h4[3][3].send_now(&x);
+ assert resumed == 1 && obj.h6.suspended == 0 && x == 4
+ && storage == 7;
+ storage = 0;
+
+ x = 7;
+ after obj.h2 -> (p, i): two_params(p, i);
+ resumed = obj.h5[0][0].send_now(&x, {9});
+ assert resumed == 1 && obj.h2.suspended == 0 && x == 9;
+
+ x = 7;
+ after obj.h2 -> (p, i): two_params(p, i);
+ resumed = obj.h5[1][1].send_now(&x, {9});
+ assert resumed == 1 && obj.h2.suspended == 0 && x == 4;
+
+ x = 7;
+ after obj.h6 -> i: one_serializable_param(i);
+ resumed = obj.h5[2][2].send_now(&x, {9});
+ assert resumed == 1 && obj.h6.suspended == 0 && x == 7
+ && storage == 9;
+
+ after obj.h6 -> i: one_serializable_param(i);
+ resumed = obj.h5[3][3].send_now(&x, {9});
+ assert resumed == 1 && obj.h6.suspended == 0 && x == 7
+ && storage == 6;
+ storage = 0;
+ }
+ }
+}
diff --git a/test/1.4/types/raii/T_hooks_checkpointing.py b/test/1.4/types/raii/T_hooks_checkpointing.py
new file mode 100644
index 000000000..02d1d4c79
--- /dev/null
+++ b/test/1.4/types/raii/T_hooks_checkpointing.py
@@ -0,0 +1,18 @@
+# © 2023 Intel Corporation
+# SPDX-License-Identifier: MPL-2.0
+
+from os.path import join
+import subprocess
+from simicsutils.host import batch_suffix
+
+obj.setup_state = None
+
+SIM_write_configuration_to_file("checkpointing.chkp", Sim_Save_Nobundle)
+
+subprocess.check_call(
+ [f'{conf.sim.project}/bin/simics{batch_suffix()}'] +
+ ["--batch-mode", "--quiet", "--no-copyright", "--dump-core", "--werror",
+ '--project', conf.sim.project,
+ "-L", scratchdir,
+ "-c", "checkpointing.chkp",
+ "-p", join(basedir, "T_hooks_checkpointing.cont.py")])
diff --git a/test/1.4/types/raii/T_various.dml b/test/1.4/types/raii/T_various.dml
new file mode 100644
index 000000000..e6474806e
--- /dev/null
+++ b/test/1.4/types/raii/T_various.dml
@@ -0,0 +1,224 @@
+/*
+ © 2023 Intel Corporation
+ SPDX-License-Identifier: MPL-2.0
+*/
+dml 1.4;
+
+device test;
+
+typedef struct {
+ int x;
+ string s;
+} s_t;
+typedef struct {
+ vect(int) ints;
+ vect(string) strings;
+ string otherstrings[3];
+} bs_t;
+
+extern typedef struct {
+ char *elements;
+ uint32 size;
+ uint32 start;
+ uint32 len;
+} _dml_vect_t;
+
+extern typedef struct {
+ char *s;
+ uint32 size;
+ uint32 len;
+} _dml_string_t;
+
+extern void _dml_string_addfmt(string *s, const char *format, ...);
+extern string _dml_string_new(const char *msg);
+extern void _dml_string_addstr(string *s, const char *str);
+
+saved s_t sav = {0, "is, frostigt mörker"};
+saved string savic[2] = {"betvingande", "kyla"};
+
+
+typedef struct {
+ string xarx[3];
+} calka_t;
+
+typedef struct {
+ string xarx;
+ const int y;
+} racalka_t;
+
+independent method add_strs(string a, string b) -> (string) {
+ _dml_string_addstr(&a, b.c_str());
+ return a;
+}
+
+independent method splitAt(string str, uint32 at) -> (string, string) {
+ local int idx = at > str.len ? str.len : at;
+ return (mk_string_f("%.*s", idx, str.c_str()),
+ mk_string(str.c_str() + idx));
+}
+
+template t {
+ param apocalyptical : string;
+ param apocalyptical = mk_string_f(
+ "Gnistor som släcks %s %s %s %s %s %s.",
+ "i", "en", "stjärnkall", "och", "bister", "natt");
+
+ param whatdoesthisevenmean : const char *;
+ param whatdoesthisevenmean = cast({{"En ondskans tid nu domnar min hand",
+ 1}}, racalka_t[1])[0].xarx.c_str();
+
+ param avect : vect(int);
+ param avect = cast({1, 2, 3}, vect(int));
+}
+
+is t;
+
+saved struct { string xarx[3]; } salt;
+
+session const char *witness = cast("i svårmod stillar sig", string).c_str();
+
+independent method insert_stringvect(vect(string) *v, uint32 i, string s) {
+ v->insert(i, s);
+}
+
+method init() {
+ local bs_t bs = { .otherstrings = {"bränningen", "vid", "strand"},
+ .ints = {5,3,3}, ...};
+ assert "vid" == bs.otherstrings[1];
+ assert bs.ints.len == 3;
+ local bs_t bs2 = { .strings = {"hård", "tid", "ensamma", "timmar"}, ...};
+ bs = bs2;
+ assert bs.strings.len == 4 && bs.strings[2] == "ensamma";
+ bs.strings = {"världen", "är", "frusen"};
+ assert bs.strings[2] == "frusen";
+
+ local vect(string) v = {"och","människan","vred"};
+ assert v[1] == "människan";
+ assert v.len == 3;
+ v.len = 5;
+ assert v.len == 5 && v[4] == "";
+ v += {"all", "visdom", "är"};
+
+ v.push_back("förlorad");
+ assert v.len == 9;
+ v.pop_front();
+ assert v.len == 8;
+ local bool polarity = true;
+ insert_stringvect(&v, 4, "och glömt");
+ foreach elem in (v) {
+ elem += polarity ? cast(",", string) : cast(";", string);
+ polarity = !polarity;
+ }
+ local string to_print;
+ foreach elem in (v) {
+ to_print += elem;
+ }
+ to_print = mk_string_f("%s;all sång har", to_print.c_str());
+ assert strcmp(to_print.c_str(),
+ "människan,vred;,;och glömt,all;visdom,är;förlorad,;"
+ + "all sång har") == 0;
+
+ local string s1 = "Tystnat.";
+ s1.c_str()[s1.len - 1] = ',';
+ local string s_ = s1 + " och " + "kärleken " + ("med; " + s1);
+ local uint32 prev_len = s_.len;
+ local _dml_string_t *p = cast(&s_, void *);
+ s_.len = 255;
+ assert p->size == 256;
+ s_.len = prev_len;
+ assert prev_len == 36 && p->size == 128; // Shrunk to _DML_BITCEIL(36 + 1)*2
+ s_ = "";
+ assert p->size == 32; // Shrunk to _DML_STRING_INITIAL_SIZE
+ local string *ps = new string;
+ local int *pater = cast({1,2,3}, int[3]);
+ assert pater[1] == 2;
+ try {
+ local const s_t st = {1, "som"};
+ *ps = st.s + " bristande";
+ {
+ local string s2 = " båge";
+ if (*ps == "brinnande")
+ throw;
+ *ps += s2;
+ }
+ _dml_string_addstr(&s1, s1.c_str());
+ *ps = add_strs(*ps, " låga.");
+ } catch;
+
+ do
+ local string s2 = "skriande";
+ while (false);
+ sav.s = "kråka";
+ assert s1 + "SKRIANDE" == "Tystnat,Tystnat,SKRIANDE";
+ assert *ps == "som bristande båge låga.";
+ assert strcmp(cast(this, t).whatdoesthisevenmean,
+ "En ondskans tid nu domnar min hand") == 0;
+ delete ps;
+
+ local (string sa, string sb) = splitAt("FLYGANDESPJUT", 8);
+ assert sa == "FLYGANDE" && sb == "SPJUT";
+
+ local (string sST, vect(int) vST) = memo();
+ assert sST == "SOM EN VÄXANDE VÅG";
+ assert vST.len == 20 && vST[10] == 'X';
+
+ local vect(string[3]) sarrv = {{"som","den","kraft"},
+ {"en","gång","fanns"},
+ {"i ett", "brustet", "svärd"}};
+ assert sarrv[1][1] == "gång";
+
+
+ try {
+ local string s = "hans blick är ";
+ s += ms("som ormens");
+ assert s == "hans blick är som ormens";
+ } catch assert false;
+ {
+ local (string s, vect(int) v) = ("I am", {1, 2});
+ s += " a string";
+ v += {3, 4};
+ assert s == "I am a string" && v.len == 4;
+ for (local uint32 i = 0; i < v.len; ++i) {
+ v[i] = i + 1;
+ }
+ v.len += 3;
+ assert v.len == 7;
+ }
+ local int *chunks_p = mk_chunks(1, 2).x;
+ assert chunks_p[0] == 1 && chunks_p[1] == 2;
+}
+
+typedef struct {
+ int x[2];
+} chunks_t;
+
+independent method mk_chunks(int x, int y) -> (chunks_t) {
+ return {{x, y}};
+}
+
+method ms(const char *s) -> (string) throws {
+ return mk_string(s);
+}
+
+method with_string(vect(string) s) {
+ assert s.len == 2 && s[0] == "som" && s[1] == "FALLANDE BÖLJA";
+}
+
+attribute trigger is write_only_attr {
+ param type = "n";
+ method set(attr_value_t val) throws {
+ local vect(string) s = {"som", "FALLANDE BÖLJA"};
+ after 0.1 s: with_string(s);
+ }
+}
+
+
+independent startup memoized method memo() -> (string, vect(int)) {
+ local vect(int) v;
+ local string s = "SOM EN VÄXANDE VÅG";
+ v.len = s.len;
+ for (local int i = 0; i < s.len; ++i) {
+ v[i] = s[i];
+ }
+ return (s, v);
+}
diff --git a/test/1.4/types/raii/T_various.py b/test/1.4/types/raii/T_various.py
new file mode 100644
index 000000000..f713eda7a
--- /dev/null
+++ b/test/1.4/types/raii/T_various.py
@@ -0,0 +1,11 @@
+# © 2023 Intel Corporation
+# SPDX-License-Identifier: MPL-2.0
+
+import stest
+
+cpu = SIM_create_object("clock", "clock", [["freq_mhz", 1]])
+obj.queue = cpu
+
+obj.trigger = None
+SIM_continue(99999)
+SIM_continue(2)
diff --git a/test/1.4/types/raii/hooks_common.dml b/test/1.4/types/raii/hooks_common.dml
new file mode 100644
index 000000000..30877c5cf
--- /dev/null
+++ b/test/1.4/types/raii/hooks_common.dml
@@ -0,0 +1,115 @@
+/*
+ © 2023 Intel Corporation
+ SPDX-License-Identifier: MPL-2.0
+*/
+dml 1.4;
+
+session int count;
+method no_params() {
+ ++count;
+}
+method one_param(int *x) {
+ ++*x;
+}
+method two_params(int *x, vect(int) i) {
+ *x = i[0];
+}
+session int storage;
+method one_serializable_param(vect(int) i) {
+ storage = i[0];
+}
+
+session int storage_indexed;
+session (int last_i_indexed, int last_j_indexed) = (-1, -1);
+group indexed[i < 5][j < 7] {
+ method no_params() {
+ (last_i_indexed, last_j_indexed) = (i, j);
+ }
+ method one_param(int *x) {
+ *x = i*7 + j;
+ }
+ method two_params(int *x, vect(int) coeff) {
+ *x = coeff[0] * (i*7 + j);
+ }
+ method one_serializable_param(vect(int) coeff) {
+ storage_indexed = coeff[0] * (i*7 + j);
+ }
+}
+
+template hookset {
+ hook() _h0;
+ hook(int *) _h1;
+ hook(int *, vect(int)) _h2;
+
+ hook() _h3[6][8];
+ hook(int *) _h4[6][8];
+ hook(int *, vect(int)) _h5[6][8];
+
+ hook(vect(int)) _h6;
+
+ param h0 default _h0;
+ param h1 default _h1;
+ param h2 default _h2;
+ param h3 default _h3;
+ param h4 default _h4;
+ param h5 default _h5;
+ param h6 default _h6;
+}
+
+template hookset_set {
+ is hookset;
+
+ group g[i < 4] is hookset;
+
+ bank b1 is hookset {
+ group g[i < 4] is hookset;
+ }
+ bank b2[i < 3] is hookset {
+ group g[i < 4] is hookset;
+ }
+}
+
+method enforce_h0_ref(hook() h) -> (hook()) {
+ return h;
+}
+
+method enforce_h1_ref(hook(int *) h) -> (hook(int *)) {
+ return h;
+}
+
+method enforce_h2_ref(hook(int *, vect(int)) h) -> (hook(int *, vect(int))) {
+ return h;
+}
+
+method enforce_h6_ref(hook(vect(int)) h) -> (hook(vect(int))) {
+ return h;
+}
+
+group via_hookref is hookset_set {
+ in each hookset {
+ is init;
+ session hook() h3_arr[6][8];
+ session hook(int *) h4_arr[6][8];
+ session hook(int *, vect(int)) h5_arr[6][8];
+ method init() {
+ for (local int idx_0 = 0; idx_0 < 6; ++idx_0) {
+ for (local int idx_1 = 0; idx_1 < 8; ++idx_1) {
+ h3_arr[idx_0][idx_1] = _h3[idx_0][idx_1];
+ h4_arr[idx_0][idx_1] = _h4[idx_0][idx_1];
+ h5_arr[idx_0][idx_1] = _h5[idx_0][idx_1];
+ }
+ }
+ }
+ param h0 = enforce_h0_ref(_h0);
+ param h1 = enforce_h1_ref(_h1);
+ param h2 = enforce_h2_ref(_h2);
+
+ param h3 = h3_arr;
+ param h4 = h4_arr;
+ param h5 = h5_arr;
+
+ param h6 = enforce_h6_ref(_h6);
+ }
+}
+
+is hookset_set;