import os
from myhdl import *
import hdlutils, SimulateAvalon
def regrw(OFFSET, LENGTH, START, WIDTH, Clk, Reset, A, WD, Wr, Q, Pulse = None):
''' the WriteRead register '''
@always_seq( Clk.posedge, reset = Reset)
def ccregrw():
if Wr and (A == OFFSET):
Q.next = WD[WIDTH+START:START]
return ccregrw
def ccassign( D, Q):
''' to assign the output ports from internal signals '''
@always_comb
def cca():
Q.next = D
return cca
def sim_intbv_width_1(Clk, Reset, A, WD, Wr, Rd, RQ, TestBit, TestVector):
""" """
lq1 = Signal(intbv(0)[1:])
lq2 = Signal(intbv(0)[2:])
reg1 = regrw(0,1,0,1, Clk, Reset, A, WD, Wr, lq1)
reg2 = regrw(1,1,0,2, Clk, Reset, A, WD, Wr, lq2)
ao1 = ccassign( lq1 , TestBit )
ao2 = ccassign( lq2 , TestVector )
rbd = ConcatSignal( lq2, lq1)
@always_seq(Clk.posedge, reset = Reset)
def mmrdr():
if Rd:
RQ.next = 0
if A == 0 :
RQ.next = rbd[1:]
elif A == 1:
RQ.next = rbd[:1]
return instances()
def test_sim_intbv_width_1():
hw_inst = sim_intbv_width_1(Clk, Reset, A, WD, Wr, Rd, RQ, TestBit, TestVector)
ClkCount = Signal( intbv( 0 )[32:])
tCK = 20
@instance
def clkgen():
yield hdlutils.genClk(Clk, tCK, ClkCount)
@instance
def resetgen():
yield hdlutils.genReset(Clk, tCK, Reset)
@instance
def stimulus():
yield hdlutils.delayclks(Clk, tCK, 10)
for i in range(2):
yield SimulateAvalon.MMread(Clk, tCK, A, Rd, RQ, 1, i, None, None)
# write a few things
yield SimulateAvalon.MMwrite(Clk, tCK, A, WD, Wr, 0, 1)
yield hdlutils.delayclks(Clk, tCK, 2)
yield SimulateAvalon.MMwrite(Clk, tCK, A, WD, Wr, 0, 0)
yield SimulateAvalon.MMwrite(Clk, tCK, A, WD, Wr, 1, 0x1)
yield SimulateAvalon.MMwrite(Clk, tCK, A, WD, Wr, 1, 0x2)
# read it all back
for i in range(2):
yield SimulateAvalon.MMread(Clk, tCK, A, Rd, RQ, 1, i, None, None)
raise StopSimulation
return instances()
def convert():
# force std_logic_vectors instead of unsigned in Interface
toVHDL.numeric_ports = False
# Convert
toVHDL(sim_intbv_width_1, Clk, Reset, A, WD, Wr, Rd, RQ, TestBit, TestVector)
if __name__ == '__main__':
Clk = Signal(bool(0))
Reset = ResetSignal(0, active=1, async=True)
A = Signal(intbv(0)[2:])
WD, RQ = [Signal(intbv(0)[32:]) for _ in range(2)]
Wr , Rd = [ Signal(bool(0)) for _ in range(2) ]
TestBit = Signal(bool(0))
TestVector = Signal(intbv(0)[2:])
hdlutils.simulate(3000, test_sim_intbv_width_1)
convert()
The output of the first process goes to an intermediate intbv signal so we can read the value back. The second process routes the intermediate signal to the output.
This works OK for bitwidths > 1 so TestVector goes from 0 to 1 to 2.
But Testbit should go from 0 to 1 to 0, but stays 1 forever as can be seen in the vcd waveform:
The problem seems to be in the simulation of the ccassign function, in the case of being handed Signals containing an intbv of width 1.