Skip to content

Commit

Permalink
Limit number of flags to 32
Browse files Browse the repository at this point in the history
Resolves #370
  • Loading branch information
lukewagner committed Jul 19, 2024
1 parent affacdc commit 649f7fd
Show file tree
Hide file tree
Showing 4 changed files with 24 additions and 47 deletions.
2 changes: 1 addition & 1 deletion design/mvp/Binary.md
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ defvaltype ::= pvt:<primvaltype> => pvt
| 0x71 case*:vec(<case>) => (variant case+) (if |case*| > 0)
| 0x70 t:<valtype> => (list t)
| 0x6f t*:vec(<valtype>) => (tuple t+) (if |t*| > 0)
| 0x6e l*:vec(<label'>) => (flags l+) (if |l*| > 0)
| 0x6e l*:vec(<label'>) => (flags l+) (if 0 < |l*| <= 32)
| 0x6d l*:vec(<label'>) => (enum l+) (if |l*| > 0)
| 0x6b t:<valtype> => (option t)
| 0x6a t?:<valtype>? u?:<valtype>? => (result t? (error u)?)
Expand Down
33 changes: 11 additions & 22 deletions design/mvp/CanonicalABI.md
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,7 @@ back to sequences of `i32`s when there are more than 32 flags.
```python
def alignment_flags(labels):
n = len(labels)
assert(0 < n <= 32)
if n <= 8: return 1
if n <= 16: return 2
return 4
Expand Down Expand Up @@ -216,13 +217,10 @@ def elem_size_variant(cases):

def elem_size_flags(labels):
n = len(labels)
assert(n > 0)
assert(0 < n <= 32)
if n <= 8: return 1
if n <= 16: return 2
return 4 * num_i32_flags(labels)

def num_i32_flags(labels):
return math.ceil(len(labels) / 32)
return 4
```

### Call Context
Expand Down Expand Up @@ -1584,7 +1582,7 @@ def flatten_type(t):
case String() | List(_) : return ['i32', 'i32']
case Record(fields) : return flatten_record(fields)
case Variant(cases) : return flatten_variant(cases)
case Flags(labels) : return ['i32'] * num_i32_flags(labels)
case Flags(labels) : return ['i32']
case Own(_) | Borrow(_) : return ['i32']
```

Expand Down Expand Up @@ -1755,16 +1753,12 @@ def wrap_i64_to_i32(i):
return i % (1 << 32)
```

Finally, flags are lifted by OR-ing together all the flattened `i32` values
and then lifting to a record the same way as when loading flags from linear
memory.
Finally, flags are lifted by lifting to a record the same way as when loading
flags from linear memory.
```python
def lift_flat_flags(vi, labels):
i = 0
shift = 0
for _ in range(num_i32_flags(labels)):
i |= (vi.next('i32') << shift)
shift += 32
assert(0 < len(labels) <= 32)
i = vi.next('i32')
return unpack_flags_from_int(i, labels)
```

Expand Down Expand Up @@ -1857,16 +1851,11 @@ def lower_flat_variant(cx, v, cases):
return [case_index] + payload
```

Finally, flags are lowered by slicing the bit vector into `i32` chunks:
Finally, flags are lowered by packing the flags into on `i32` bitvector.
```python
def lower_flat_flags(v, labels):
i = pack_flags_into_int(v, labels)
flat = []
for _ in range(num_i32_flags(labels)):
flat.append(i & 0xffffffff)
i >>= 32
assert(i == 0)
return flat
assert(0 < len(labels) <= 32)
return [pack_flags_into_int(v, labels)]
```

### Lifting and Lowering Values
Expand Down
26 changes: 8 additions & 18 deletions design/mvp/canonical-abi/definitions.py
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,7 @@ def max_case_alignment(cases):

def alignment_flags(labels):
n = len(labels)
assert(0 < n <= 32)
if n <= 8: return 1
if n <= 16: return 2
return 4
Expand Down Expand Up @@ -266,13 +267,10 @@ def elem_size_variant(cases):

def elem_size_flags(labels):
n = len(labels)
assert(n > 0)
assert(0 < n <= 32)
if n <= 8: return 1
if n <= 16: return 2
return 4 * num_i32_flags(labels)

def num_i32_flags(labels):
return math.ceil(len(labels) / 32)
return 4

### Call Context

Expand Down Expand Up @@ -1100,7 +1098,7 @@ def flatten_type(t):
case String() | List(_) : return ['i32', 'i32']
case Record(fields) : return flatten_record(fields)
case Variant(cases) : return flatten_variant(cases)
case Flags(labels) : return ['i32'] * num_i32_flags(labels)
case Flags(labels) : return ['i32']
case Own(_) | Borrow(_) : return ['i32']

def flatten_record(fields):
Expand Down Expand Up @@ -1222,11 +1220,8 @@ def wrap_i64_to_i32(i):
return i % (1 << 32)

def lift_flat_flags(vi, labels):
i = 0
shift = 0
for _ in range(num_i32_flags(labels)):
i |= (vi.next('i32') << shift)
shift += 32
assert(0 < len(labels) <= 32)
i = vi.next('i32')
return unpack_flags_from_int(i, labels)

### Flat Lowering
Expand Down Expand Up @@ -1294,13 +1289,8 @@ def lower_flat_variant(cx, v, cases):
return [case_index] + payload

def lower_flat_flags(v, labels):
i = pack_flags_into_int(v, labels)
flat = []
for _ in range(num_i32_flags(labels)):
flat.append(i & 0xffffffff)
i >>= 32
assert(i == 0)
return flat
assert(0 < len(labels) <= 32)
return [pack_flags_into_int(v, labels)]

### Lifting and Lowering Values

Expand Down
10 changes: 4 additions & 6 deletions design/mvp/canonical-abi/run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def test_name():
test(t, [2], {'a':False,'b':True})
test(t, [3], {'a':True,'b':True})
test(t, [4], {'a':False,'b':False})
test(Flags([str(i) for i in range(33)]), [0xffffffff,0x1], { str(i):True for i in range(33) })
test(Flags([str(i) for i in range(32)]), [0xffffffff], { str(i):True for i in range(32) })
t = Variant([Case('x',U8()),Case('y',F32()),Case('z',None)])
test(t, [0,42], {'x': 42})
test(t, [0,256], {'x': 0})
Expand Down Expand Up @@ -304,12 +304,10 @@ def test_heap(t, expect, args, byte_array):
[0xff,0xff,0x1,0, 0,0,0,0])
test_heap(t, v, [0,2],
[0xff,0xff,0x3,0, 0,0,0,0])
t = List(Flags([str(i) for i in range(33)]))
v = [{ str(i):b for i in range(33) } for b in [True,False]]
t = List(Flags([str(i) for i in range(32)]))
v = [{ str(i):b for i in range(32) } for b in [True,False]]
test_heap(t, v, [0,2],
[0xff,0xff,0xff,0xff,0x1,0,0,0, 0,0,0,0,0,0,0,0])
test_heap(t, v, [0,2],
[0xff,0xff,0xff,0xff,0x3,0,0,0, 0,0,0,0,0,0,0,0])
[0xff,0xff,0xff,0xff, 0,0,0,0])

def test_flatten(t, params, results):
expect = CoreFuncType(params, results)
Expand Down

0 comments on commit 649f7fd

Please # to comment.