340a3d409d5b7e35b4ccc5fa6841a662e2f3555d
1 # SPDX-License-Identifier: LGPL-2.1-or-later
2 # See Notices.txt for copyright information
5 Copyright (C) 2020 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
7 dynamic-partitionable class similar to Signal, which, when the partition
8 is fully open will be identical to Signal. when partitions are closed,
9 the class turns into a SIMD variant of Signal. *this is dynamic*.
11 the basic fundamental idea is: write code once, and if you want a SIMD
12 version of it, use SimdSignal in place of Signal. job done.
13 this however requires the code to *not* be designed to use nmigen.If,
14 nmigen.Case, or other constructs: only Mux and other logic.
16 * http://bugs.libre-riscv.org/show_bug.cgi?id=132
19 from ieee754
.part_mul_add
.adder
import PartitionedAdder
20 from ieee754
.part_cmp
.eq_gt_ge
import PartitionedEqGtGe
21 from ieee754
.part_bits
.xor
import PartitionedXOR
22 from ieee754
.part_bits
.bool import PartitionedBool
23 from ieee754
.part_bits
.all
import PartitionedAll
24 from ieee754
.part_shift
.part_shift_dynamic
import PartitionedDynamicShift
25 from ieee754
.part_shift
.part_shift_scalar
import PartitionedScalarShift
26 from ieee754
.part_mul_add
.partpoints
import make_partition2
, PartitionPoints
27 from ieee754
.part_mux
.part_mux
import PMux
28 from ieee754
.part_ass
.passign
import PAssign
29 from ieee754
.part_cat
.pcat
import PCat
30 from ieee754
.part_repl
.prepl
import PRepl
31 from ieee754
.part
.simd_scope
import SimdScope
32 from ieee754
.part
.layout_experiment
import layout
33 from operator
import or_
, xor
, and_
, not_
35 from nmigen
import (Signal
, Const
, Cat
)
36 from nmigen
.hdl
.ast
import UserValue
, Shape
40 if isinstance(op1
, SimdSignal
):
45 def applyop(op1
, op2
, op
):
46 if isinstance(op1
, SimdSignal
):
47 result
= SimdSignal
.like(op1
)
49 result
= SimdSignal
.like(op2
)
50 result
.m
.d
.comb
+= result
.sig
.eq(op(getsig(op1
), getsig(op2
)))
56 # for sub-modules to be created on-demand. Mux is done slightly
57 # differently (has its own global)
58 for name
in ['add', 'eq', 'gt', 'ge', 'ls', 'xor', 'bool', 'all']:
62 def get_runlengths(pbit
, size
):
65 # identify where the 1s are, which indicates "start of a new partition"
66 # we want a list of the lengths of all partitions
68 if pbit
& (1 << i
): # it's a 1: ends old partition, starts new
69 res
.append(count
) # add partition
70 count
= 1 # start again
73 # end reached, add whatever is left. could have done this by creating
74 # "fake" extra bit on the partitions, but hey
80 # Prototype https://bugs.libre-soc.org/show_bug.cgi?id=713#c53
81 # this provides a "compatibility" layer with existing SimdSignal
82 # behaviour. the idea is that this interface defines which "combinations"
83 # of partition selections are relevant, and as an added bonus it says
84 # which partition lanes are completely irrelevant (padding, blank).
85 class PartType
: # TODO decide name
86 def __init__(self
, psig
):
90 return list(self
.psig
.partpoints
.values())
93 return Cat(self
.get_mask())
96 return range(1 << len(self
.get_mask()))
98 def get_num_elements(self
, pbit
):
99 keys
= list(self
.psig
.partpoints
.keys())
100 return len(get_runlengths(pbit
, len(keys
)))
102 def get_el_range(self
, pbit
, el_num
):
103 """based on the element number and the current elwid/pbit (case)
104 return the range start/end of the element within its underlying signal
105 this function is not at all designed to be efficient.
107 keys
= list(self
.psig
.partpoints
.keys())
108 runs
= get_runlengths(pbit
, len(keys
))
109 keys
= [0] + keys
+ [len(self
.psig
.sig
)]
111 for i
in range(el_num
):
114 numparts
= runs
[el_num
]
115 return range(keys
[y
], keys
[y
+numparts
])
118 def blanklanes(self
):
122 # this one would be an elwidth version
123 # see https://bugs.libre-soc.org/show_bug.cgi?id=713#c34
124 # it requires an "adapter" which is the layout() function
125 # where the PartitionPoints was *created* by the layout()
126 # function and this class then "understands" the relationship
127 # between elwidth and the PartitionPoints that were created
129 class ElwidPartType
: # TODO decide name
130 def __init__(self
, psig
):
134 return list(self
.psig
._shape
.partpoints
.values()) # i think
136 def get_switch(self
):
137 return self
.psig
.scope
.elwid
# switch on elwid: match get_cases()
140 return self
.psig
._shape
.bitp
.keys() # all possible values of elwid
143 def blanklanes(self
):
144 return self
.psig
.shape
.blankmask
147 # declares priority of the SimdShape
148 PRIORITY_FIXED
= 0b01
149 PRIORITY_ELWID
= 0b10
152 class SimdShape(Shape
):
153 """a SIMD variant of Shape. supports:
154 * fixed overall width with variable (maxed-out) element lengths
155 * fixed element widths with overall size auto-determined
156 * both fixed overall width and fixed element widths
158 Documentation / Analysis:
159 https://libre-soc.org/3d_gpu/architecture/dynamic_simd/shape/
161 naming is preserved to be compatible with Shape(): the (calculated *or*
162 given) fixed_width is *explicitly* passed through as Shape.width
163 in order to ensure downcasting works as expected.
165 a mode flag records what behaviour is required for arithmetic operators.
166 see wiki documentation: it's... complicated.
169 def __init__(self
, scope
, width
=None, # this is actually widths_at_elwid
171 fixed_width
=None): # fixed overall width
172 # record the mode and scope
174 self
.fixed_width
= fixed_width
175 self
.widths_at_elwid
= width
177 # when both of these are set it creates mode_flag=PRIORITY_BOTH
178 # otherwise creates a priority of either FIXED width or ELWIDs
179 if self
.fixed_width
is not None:
180 self
.mode_flag |
= PRIORITY_FIXED
181 if self
.widths_at_elwid
is not None:
182 self
.mode_flag |
= PRIORITY_ELWID
184 print("SimdShape width", width
, "fixed_width", fixed_width
)
185 # this check is done inside layout but do it again here anyway
186 assert self
.fixed_width
!= None or self
.widths_at_elwid
!= None, \
187 "both width (widths_at_elwid) and fixed_width cannot be None"
189 if scope
is not None:
190 (pp
, bitp
, lpoints
, bmask
, width
, lane_shapes
, part_wid
) = \
193 self
.widths_at_elwid
,
196 self
.bitp
= bitp
# binary values for partpoints at each elwidth
197 self
.lpoints
= lpoints
# layout ranges
198 self
.blankmask
= bmask
# blanking mask (partitions always padding)
199 self
.partwid
= part_wid
# smallest alignment start point for elements
200 self
.lane_shapes
= lane_shapes
202 # pass through the calculated width to Shape() so that when/if
203 # objects using this Shape are downcast, they know exactly how to
204 # get *all* bits and need know absolutely nothing about SIMD at all
205 Shape
.__init
__(self
, width
, signed
)
208 def like(cls
, shape
, *, scope
=None):
211 return SimdShape(scope
, shape
.widths_at_elwid
, shape
.signed
,
214 def __mul__(self
, other
):
215 if isinstance(other
, int):
216 # for integer multiply, by a nice coincidence it does not
217 # matter if the LHS is PRIORITY_FIXED or PRIORITY_ELWID.
218 # however the priority has to be preserved.
222 # first, check if fixed_width is needed (if originally,
223 # self was constructed with a fixed_width=None we must
224 # *return* another SimdShape with a fixed_width=None)
225 if self
.mode_flag
& PRIORITY_FIXED
:
226 fixed_width
= self
.width
* other
228 # likewise for lane elwidths: if, originally, self was constructed
229 # with [widths_at_elwidth==lane_shapes==]width not None,
230 # the return result also has to set up explicit lane_shapes
231 if self
.mode_flag
& PRIORITY_ELWID
:
232 lane_shapes
= {k
: v
* other
for k
, v
in self
.lane_shapes
}
234 # wheww, got everything.
235 return SimdShape(self
.scope
, # same scope
236 width
=lane_shapes
, # widths_at_elwid
237 signed
=self
.signed
, # same sign? hmmm XXX
238 fixed_width
=fixed_width
) # overall width
240 raise NotImplementedError(
241 f
"Multiplying a SimdShape by {type(other)} isn't implemented")
243 # TODO (and go over examples, sigh). this is deliberately *after*
244 # the raise NotImplementedError because it needs review.
246 # also probably TODO: potentially the other argument could be
247 # a Shape() not a SimdShape(). sigh.
249 # for SimdShape-to-SimdShape multiply, the rules are slightly
250 # different: both sides have to be PRIORITY_FIXED for a
251 # PRIORITY_FIXED result to be returned. if either (or both)
252 # of the LHS and RHS were given elwidths (lane_shapes != None)
253 # then tough luck: the return result is still PRIORITY_ELWID.
254 # TODO: review that. it *might* be the case (again, due to
255 # a coincidence of multiply, that when PRIORITY_BOTH is set
256 # it is possible to return a PRIORITY_BOTH result. but..
262 # first, check if this is fixed_width mode. this is *only*
263 # possible if *both* LHS *and* RHS are PRIORITY_FIXED.
264 if (self
.mode_flag
== PRIORITY_FIXED
and
265 other
.mode_flag
== PRIORITY_FIXED
):
266 fixed_width
= self
.width
* other
.width
268 # (XXX assume other is SimdShape) - when PRIORITY_ELWID
269 # the result *has* to be a PRIORITY_ELWID (FIXED is *IGNORED*)
270 # use *either* the computed *or* the given lane_shapes
271 lane_shapes
= {k
: v
* other
.lane_shapes
[k
] \
272 for k
, v
in self
.lane_shapes
}
274 # wheww, got everything.
275 return SimdShape(self
.scope
, # same scope
276 width
=lane_shapes
, # widths_at_elwid
277 signed
=self
.signed
, # same sign? hmmm XXX
278 fixed_width
=fixed_width
) # overall width
281 def __rmul__(self
, other
):
282 return self
.__mul
__(other
)
284 def __add__(self
, other
):
285 if isinstance(other
, int):
286 lane_shapes
= {k
: v
+ other
for k
, v
in self
.lane_shapes
}
287 return SimdShape(self
.scope
, lane_shapes
, signed
=self
.signed
)
288 elif isinstance(other
, SimdShape
):
289 assert other
.scope
is self
.scope
, "scope mismatch"
290 o
= other
.lane_shapes
291 lane_shapes
= {k
: v
+ o
[k
] for k
, v
in self
.lane_shapes
}
292 # XXX not correct, we need a width-hint, not an overwrite
293 # lane_shapes argument...
294 return SimdShape(self
.scope
, lane_shapes
, signed
=self
.signed
,
295 fixed_width
=self
.width
+ other
.width
)
297 raise NotImplementedError(
298 f
"Adding a SimdShape to {type(other)} isn't implemented")
300 def __radd__(self
, other
):
301 return self
.__add
__(other
)
304 class SimdSignal(UserValue
):
305 # XXX ################################################### XXX
306 # XXX Keep these functions in the same order as ast.Value XXX
307 # XXX ################################################### XXX
308 def __init__(self
, mask
, shape
=None, *args
,
309 src_loc_at
=0, fixed_width
=None, **kwargs
):
310 super().__init
__(src_loc_at
=src_loc_at
)
311 print("SimdSignal shape", shape
)
312 # create partition points
313 if isinstance(mask
, SimdScope
): # mask parameter is a SimdScope
315 self
.ptype
= ElwidPartType(self
)
316 # SimdShapes can be created with an empty scope. check that now
317 if isinstance(shape
, SimdScope
):
318 if shape
.scope
is None:
319 shape
= SimdScope
.like(shape
, scope
=self
.scope
)
321 # adapt shape to a SimdShape
322 shape
= SimdShape(self
.scope
, shape
, fixed_width
=fixed_width
)
324 self
.sig
= Signal(shape
, *args
, **kwargs
)
325 # get partpoints from SimdShape
326 self
.partpoints
= shape
.partpoints
328 self
.sig
= Signal(shape
, *args
, **kwargs
)
329 width
= len(self
.sig
) # get signal width
330 if isinstance(mask
, PartitionPoints
):
331 self
.partpoints
= mask
333 self
.partpoints
= make_partition2(mask
, width
)
334 self
.ptype
= PartType(self
)
336 def set_module(self
, m
):
339 def get_modname(self
, category
):
340 modnames
[category
] += 1
341 return "%s_%d" % (category
, modnames
[category
])
344 def like(other
, *args
, **kwargs
):
345 """Builds a new SimdSignal with the same PartitionPoints and
346 Signal properties as the other"""
347 result
= SimdSignal(PartitionPoints(other
.partpoints
))
348 result
.sig
= Signal
.like(other
.sig
, *args
, **kwargs
)
355 # nmigen-redirected constructs (Mux, Cat, Switch, Assign)
357 # TODO, http://bugs.libre-riscv.org/show_bug.cgi?id=716
358 # def __Part__(self, offset, width, stride=1, *, src_loc_at=0):
359 raise NotImplementedError("TODO: implement as "
360 "(self>>(offset*stride)[:width]")
361 # TODO, http://bugs.libre-riscv.org/show_bug.cgi?id=716
363 def __Slice__(self
, start
, stop
, *, src_loc_at
=0):
364 # NO. Swizzled shall NOT be deployed, it violates
365 # Project Development Practices
366 raise NotImplementedError("TODO: need PartitionedSlice")
368 def __Repl__(self
, count
, *, src_loc_at
=0):
369 return PRepl(self
.m
, self
, count
, self
.ptype
)
371 def __Cat__(self
, *args
, src_loc_at
=0):
372 print("partsig cat", self
, args
)
373 # TODO: need SwizzledSimdValue-aware Cat
374 args
= [self
] + list(args
)
376 assert isinstance(sig
, SimdSignal
), \
377 "All SimdSignal.__Cat__ arguments must be " \
378 "a SimdSignal. %s is not." % repr(sig
)
379 return PCat(self
.m
, args
, self
.ptype
)
381 def __Mux__(self
, val1
, val2
):
382 # print ("partsig mux", self, val1, val2)
383 assert len(val1
) == len(val2
), \
384 "SimdSignal width sources must be the same " \
385 "val1 == %d, val2 == %d" % (len(val1
), len(val2
))
386 return PMux(self
.m
, self
.partpoints
, self
, val1
, val2
, self
.ptype
)
388 def __Assign__(self
, val
, *, src_loc_at
=0):
389 print("partsig assign", self
, val
)
390 # this is a truly awful hack, outlined here:
391 # https://bugs.libre-soc.org/show_bug.cgi?id=731#c13
392 # during the period between constructing Simd-aware sub-modules
393 # and the elaborate() being called on them there is a window of
394 # opportunity to indicate which of those submodules is LHS and
395 # which is RHS. manic laughter is permitted. *gibber*.
396 if hasattr(self
, "_hack_submodule"):
397 self
._hack
_submodule
.set_lhs_mode(True)
398 if hasattr(val
, "_hack_submodule"):
399 val
._hack
_submodule
.set_lhs_mode(False)
400 return PAssign(self
.m
, self
, val
, self
.ptype
)
402 # TODO, http://bugs.libre-riscv.org/show_bug.cgi?id=458
403 # def __Switch__(self, cases, *, src_loc=None, src_loc_at=0,
406 # no override needed, Value.__bool__ sufficient
407 # def __bool__(self):
409 # unary ops that do not require partitioning
411 def __invert__(self
):
412 result
= SimdSignal
.like(self
)
413 self
.m
.d
.comb
+= result
.sig
.eq(~self
.sig
)
416 # unary ops that require partitioning
419 z
= Const(0, len(self
.sig
))
420 result
, _
= self
.sub_op(z
, self
)
423 # binary ops that need partitioning
425 def add_op(self
, op1
, op2
, carry
):
428 pa
= PartitionedAdder(len(op1
), self
.partpoints
)
429 setattr(self
.m
.submodules
, self
.get_modname('add'), pa
)
433 comb
+= pa
.carry_in
.eq(carry
)
434 result
= SimdSignal
.like(self
)
435 comb
+= result
.sig
.eq(pa
.output
)
436 return result
, pa
.carry_out
438 def sub_op(self
, op1
, op2
, carry
=~
0):
441 pa
= PartitionedAdder(len(op1
), self
.partpoints
)
442 setattr(self
.m
.submodules
, self
.get_modname('add'), pa
)
445 comb
+= pa
.b
.eq(~op2
)
446 comb
+= pa
.carry_in
.eq(carry
)
447 result
= SimdSignal
.like(self
)
448 comb
+= result
.sig
.eq(pa
.output
)
449 return result
, pa
.carry_out
451 def __add__(self
, other
):
452 result
, _
= self
.add_op(self
, other
, carry
=0)
455 def __radd__(self
, other
):
456 # https://bugs.libre-soc.org/show_bug.cgi?id=718
457 result
, _
= self
.add_op(other
, self
)
460 def __sub__(self
, other
):
461 result
, _
= self
.sub_op(self
, other
)
464 def __rsub__(self
, other
):
465 # https://bugs.libre-soc.org/show_bug.cgi?id=718
466 result
, _
= self
.sub_op(other
, self
)
469 def __mul__(self
, other
):
470 raise NotImplementedError # too complicated at the moment
471 return Operator("*", [self
, other
])
473 def __rmul__(self
, other
):
474 raise NotImplementedError # too complicated at the moment
475 return Operator("*", [other
, self
])
477 # not needed: same as Value.__check_divisor
478 # def __check_divisor(self):
480 def __mod__(self
, other
):
481 raise NotImplementedError
482 other
= Value
.cast(other
)
483 other
.__check
_divisor
()
484 return Operator("%", [self
, other
])
486 def __rmod__(self
, other
):
487 raise NotImplementedError
488 self
.__check
_divisor
()
489 return Operator("%", [other
, self
])
491 def __floordiv__(self
, other
):
492 raise NotImplementedError
493 other
= Value
.cast(other
)
494 other
.__check
_divisor
()
495 return Operator("//", [self
, other
])
497 def __rfloordiv__(self
, other
):
498 raise NotImplementedError
499 self
.__check
_divisor
()
500 return Operator("//", [other
, self
])
502 # not needed: same as Value.__check_shamt
503 # def __check_shamt(self):
505 # TODO: detect if the 2nd operand is a Const, a Signal or a
506 # SimdSignal. if it's a Const or a Signal, a global shift
507 # can occur. if it's a SimdSignal, that's much more interesting.
508 def ls_op(self
, op1
, op2
, carry
, shr_flag
=0):
510 if isinstance(op2
, Const
) or isinstance(op2
, Signal
):
512 pa
= PartitionedScalarShift(len(op1
), self
.partpoints
)
516 pa
= PartitionedDynamicShift(len(op1
), self
.partpoints
)
518 # TODO: case where the *shifter* is a SimdSignal but
519 # the thing *being* Shifted is a scalar (Signal, expression)
520 # https://bugs.libre-soc.org/show_bug.cgi?id=718
521 setattr(self
.m
.submodules
, self
.get_modname('ls'), pa
)
524 comb
+= pa
.data
.eq(op1
)
525 comb
+= pa
.shifter
.eq(op2
)
526 comb
+= pa
.shift_right
.eq(shr_flag
)
530 comb
+= pa
.shift_right
.eq(shr_flag
)
531 # XXX TODO: carry-in, carry-out (for arithmetic shift)
532 #comb += pa.carry_in.eq(carry)
533 return (pa
.output
, 0)
535 def __lshift__(self
, other
):
536 z
= Const(0, len(self
.partpoints
)+1)
537 result
, _
= self
.ls_op(self
, other
, carry
=z
) # TODO, carry
540 def __rlshift__(self
, other
):
541 # https://bugs.libre-soc.org/show_bug.cgi?id=718
542 raise NotImplementedError
543 return Operator("<<", [other
, self
])
545 def __rshift__(self
, other
):
546 z
= Const(0, len(self
.partpoints
)+1)
547 result
, _
= self
.ls_op(self
, other
, carry
=z
, shr_flag
=1) # TODO, carry
550 def __rrshift__(self
, other
):
551 # https://bugs.libre-soc.org/show_bug.cgi?id=718
552 raise NotImplementedError
553 return Operator(">>", [other
, self
])
555 # binary ops that don't require partitioning
557 def __and__(self
, other
):
558 return applyop(self
, other
, and_
)
560 def __rand__(self
, other
):
561 return applyop(other
, self
, and_
)
563 def __or__(self
, other
):
564 return applyop(self
, other
, or_
)
566 def __ror__(self
, other
):
567 return applyop(other
, self
, or_
)
569 def __xor__(self
, other
):
570 return applyop(self
, other
, xor
)
572 def __rxor__(self
, other
):
573 return applyop(other
, self
, xor
)
575 # binary comparison ops that need partitioning
577 def _compare(self
, width
, op1
, op2
, opname
, optype
):
578 # print (opname, op1, op2)
579 pa
= PartitionedEqGtGe(width
, self
.partpoints
)
580 setattr(self
.m
.submodules
, self
.get_modname(opname
), pa
)
582 comb
+= pa
.opcode
.eq(optype
) # set opcode
583 if isinstance(op1
, SimdSignal
):
584 comb
+= pa
.a
.eq(op1
.sig
)
587 if isinstance(op2
, SimdSignal
):
588 comb
+= pa
.b
.eq(op2
.sig
)
593 def __eq__(self
, other
):
594 width
= len(self
.sig
)
595 return self
._compare
(width
, self
, other
, "eq", PartitionedEqGtGe
.EQ
)
597 def __ne__(self
, other
):
598 width
= len(self
.sig
)
599 eq
= self
._compare
(width
, self
, other
, "eq", PartitionedEqGtGe
.EQ
)
600 ne
= Signal(eq
.width
)
601 self
.m
.d
.comb
+= ne
.eq(~eq
)
604 def __lt__(self
, other
):
605 width
= len(self
.sig
)
606 # swap operands, use gt to do lt
607 return self
._compare
(width
, other
, self
, "gt", PartitionedEqGtGe
.GT
)
609 def __le__(self
, other
):
610 width
= len(self
.sig
)
611 # swap operands, use ge to do le
612 return self
._compare
(width
, other
, self
, "ge", PartitionedEqGtGe
.GE
)
614 def __gt__(self
, other
):
615 width
= len(self
.sig
)
616 return self
._compare
(width
, self
, other
, "gt", PartitionedEqGtGe
.GT
)
618 def __ge__(self
, other
):
619 width
= len(self
.sig
)
620 return self
._compare
(width
, self
, other
, "ge", PartitionedEqGtGe
.GE
)
622 # no override needed: Value.__abs__ is general enough it does the job
628 # TODO, http://bugs.libre-riscv.org/show_bug.cgi?id=716
629 # def __getitem__(self, key):
631 def __new_sign(self
, signed
):
632 # XXX NO - SimdShape not Shape
633 print("XXX requires SimdShape not Shape")
634 shape
= Shape(len(self
), signed
=signed
)
635 result
= SimdSignal
.like(self
, shape
=shape
)
636 self
.m
.d
.comb
+= result
.sig
.eq(self
.sig
)
639 # http://bugs.libre-riscv.org/show_bug.cgi?id=719
640 def as_unsigned(self
):
641 return self
.__new
_sign
(False)
644 return self
.__new
_sign
(True)
649 """Conversion to boolean.
654 ``1`` if any bits are set, ``0`` otherwise.
656 width
= len(self
.sig
)
657 pa
= PartitionedBool(width
, self
.partpoints
)
658 setattr(self
.m
.submodules
, self
.get_modname("bool"), pa
)
659 self
.m
.d
.comb
+= pa
.a
.eq(self
.sig
)
663 """Check if any bits are ``1``.
668 ``1`` if any bits are set, ``0`` otherwise.
670 return self
!= Const(0) # leverage the __ne__ operator here
671 return Operator("r|", [self
])
674 """Check if all bits are ``1``.
679 ``1`` if all bits are set, ``0`` otherwise.
681 # something wrong with PartitionedAll, but self == Const(-1)"
682 # XXX https://bugs.libre-soc.org/show_bug.cgi?id=176#c17
683 #width = len(self.sig)
684 #pa = PartitionedAll(width, self.partpoints)
685 #setattr(self.m.submodules, self.get_modname("all"), pa)
686 #self.m.d.comb += pa.a.eq(self.sig)
688 return self
== Const(-1) # leverage the __eq__ operator here
691 """Compute pairwise exclusive-or of every bit.
696 ``1`` if an odd number of bits are set, ``0`` if an
697 even number of bits are set.
699 width
= len(self
.sig
)
700 pa
= PartitionedXOR(width
, self
.partpoints
)
701 setattr(self
.m
.submodules
, self
.get_modname("xor"), pa
)
702 self
.m
.d
.comb
+= pa
.a
.eq(self
.sig
)
705 # not needed: Value.implies does the job
706 # def implies(premise, conclusion):
708 # TODO. contains a Value.cast which means an override is needed (on both)
709 # def bit_select(self, offset, width):
710 # def word_select(self, offset, width):
712 # not needed: Value.matches, amazingly, should do the job
713 # def matches(self, *patterns):
715 # TODO, http://bugs.libre-riscv.org/show_bug.cgi?id=713
717 return self
.sig
.shape()