Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
W
White Rabbit Switch - Gateware
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
12
Issues
12
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
image/svg+xml
Discourse
Discourse
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Projects
White Rabbit Switch - Gateware
Commits
fe1cc0c8
Commit
fe1cc0c8
authored
Feb 13, 2012
by
Tomasz Wlostowski
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
wrsw_swcore: testbench for async_mpm (requires cleanup)
parent
a882f884
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
904 additions
and
0 deletions
+904
-0
ILinkedList.svh
testbench/swc_async/mem/ILinkedList.svh
+165
-0
IShadowFBM.svh
testbench/swc_async/mem/IShadowFBM.svh
+52
-0
Manifest.py
testbench/swc_async/mem/Manifest.py
+13
-0
main.sv
testbench/swc_async/mem/main.sv
+566
-0
mpm_top_svwrap.svh
testbench/swc_async/mem/mpm_top_svwrap.svh
+100
-0
run.do
testbench/swc_async/mem/run.do
+8
-0
No files found.
testbench/swc_async/mem/ILinkedList.svh
0 → 100644
View file @
fe1cc0c8
`timescale
1
ns
/
1
ps
/* Fake Page Linked List. */
interface
ILinkedList
(
clk_io_i
,
rst_n_i
)
;
parameter
t_swcore_parameters
P
=
`DEFAULT_SWC_PARAMS
;
input
clk_io_i
,
rst_n_i
;
localparam
int
c_ll_entry_size
=
P
.
g_page_address_width
+
2
;
localparam
int
c_page_size_width
=
clogb2
(
P
.
g_page_size
+
1
)
;
logic
[
P
.
g_page_address_width
-
1
:
0
]
ll_addr
;
logic
[
c_ll_entry_size
-
1
:
0
]
ll_data
;
modport
at_mpm
(
output
ll_addr
,
input
ll_data
)
;
typedef
struct
{
int
next
;
// pointer to the next page in chain
bit
valid
;
// page contains valid data
bit
eof
;
// page is the last in the current chain
bit
allocated
;
// is the page allocated or free?
int
size
;
// size of the data stored in the page
int
dsel
;
// partial select
int
use_count
;
// number of output blocks the page is allocated for
}
ll_entry_t
;
ll_entry_t
list
[
P
.
g_num_pages
]
;
// the list itself
semaphore
af_mutex
;
// allocation lock
/* MPM LL output driver */
reg
[
c_ll_entry_size
-
1
:
0
]
data_packed
;
always
@
(
*
)
begin
/* pack the ll_entry to a format accepted by the MPM */
data_packed
[
P
.
g_page_address_width
]
<=
list
[
ll_addr
]
.
eof
;
data_packed
[
P
.
g_page_address_width
+
1
]
<=
list
[
ll_addr
]
.
valid
;
if
(
list
[
ll_addr
]
.
eof
)
begin
data_packed
[
c_page_size_width
-
1
:
0
]
<=
list
[
ll_addr
]
.
size
;
data_packed
[
P
.
g_page_address_width
-
1
:
P
.
g_page_address_width
-
P
.
g_partial_select_width
]
<=
list
[
ll_addr
]
.
dsel
;
end
else
data_packed
[
P
.
g_page_address_width
-
1
:
0
]
<=
list
[
ll_addr
]
.
next
;
end
always
@
(
posedge
clk_io_i
)
ll_data
<=
data_packed
;
/* Initializes and clears the list */
task
automatic
init
()
;
int
i
;
af_mutex
=
new
(
1
)
;
for
(
i
=
0
;
i
<
P
.
g_num_pages
;
i
++
)
begin
list
[
i
]
.
valid
=
0
;
list
[
i
]
.
allocated
=
0
;
end
endtask
// init
task
automatic
free_chain
(
int
start_page
,
int
force_free
=
0
)
;
int
page
;
page
=
start_page
;
af_mutex
.
get
(
1
)
;
list
[
page
]
.
use_count
--;
if
(
!
list
[
page
]
.
use_count
||
force_free
)
forever
begin
list
[
page
]
.
allocated
=
0
;
list
[
page
]
.
valid
=
0
;
if
(
list
[
page
]
.
eof
)
break
;
page
=
list
[
page
]
.
next
;
end
af_mutex
.
put
(
1
)
;
endtask
// free_chain
task
automatic
alloc_page
(
ref
int
page
)
;
int
i
,
n_allocated
=
0
;
af_mutex
.
get
(
1
)
;
foreach
(
list
[
i
])
if
(
!
list
[
i
]
.
allocated
)
begin
list
[
i
]
.
eof
=
0
;
list
[
i
]
.
allocated
=
1
;
// list[i].use_count = use_count;
if
(
page
>=
0
)
list
[
page
]
.
next
=
i
;
page
=
i
;
af_mutex
.
put
(
1
)
;
return
;
end
// if (! list[i].allocated)
af_mutex
.
put
(
1
)
;
$
error
(
"Fatal: alloc_page(): no pages left"
)
;
$
stop
()
;
endtask
task
automatic
set_valid
(
int
page
)
;
list
[
page
]
.
valid
=
1
;
endtask
// int
task
automatic
set_last
(
int
page
,
int
size
,
int
dsel
)
;
list
[
page
]
.
eof
=
1
;
list
[
page
]
.
dsel
=
dsel
;
list
[
page
]
.
size
=
(
size
%
P
.
g_page_size
==
0
?
P
.
g_page_size
:
size
%
P
.
g_page_size
)
;
endtask
// set_eof
task
automatic
set_use_count
(
int
start_page
,
int
use_count
)
;
list
[
start_page
]
.
use_count
=
use_count
;
endtask
// set_use_count
function
automatic
string
dump_chain
(
int
start_page
)
;
int
page
=
start_page
;
string
str
=
""
;
while
(
!
list
[
page
]
.
eof
)
begin
$
sformat
(
str
,
"%s %4x"
,
str
,
page
)
;
page
=
list
[
page
]
.
next
;
end
$
sformat
(
str
,
"%s %4x [size %d, dsel %1x, usecount %d]"
,
str
,
page
,
list
[
page
]
.
size
,
list
[
page
]
.
dsel
,
list
[
start_page
]
.
use_count
)
;
return
str
;
endfunction
// dump_chain
initial
init
()
;
endinterface
// ILinkedList
testbench/swc_async/mem/IShadowFBM.svh
0 → 100644
View file @
fe1cc0c8
`timescale
1
ns
/
1
ps
/* "shadow" copy of the real F.B. Memory, used for write path verification */
interface
IShadowFBM
(
clk_core_i
,
addr_i
,
data_i
,
we_i
)
;
parameter
t_swcore_parameters
P
=
`DEFAULT_SWC_PARAMS
;
localparam
int
fbm_addr_width
=
clogb2
(
P
.
g_num_pages
)
+
clogb2
(
P
.
g_page_size
/
P
.
g_ratio
)
-
2
;
input
clk_core_i
,
we_i
;
input
[
fbm_addr_width
-
1
:
0
]
addr_i
;
input
[
P
.
g_data_width
*
P
.
g_ratio
-
1
:
0
]
data_i
;
logic
[
P
.
g_data_width
*
P
.
g_ratio
-
1
:
0
]
mem
[
P
.
g_num_pages
*
P
.
g_page_size
/
P
.
g_ratio
]
;
always
@
(
posedge
clk_core_i
)
if
(
we_i
)
begin
mem
[
addr_i
]
<=
data_i
;
end
function
automatic
u64_array_t
read
(
int
pages
[$]
,
input
int
size
)
;
int
page_index
=
0
;
u64_array_t
rval
;
rval
=
new
[
size
]
;
while
(
size
>
0
)
begin
int
i
,
remaining
=
(
size
>
P
.
g_page_size
?
P
.
g_page_size
:
size
)
;
for
(
i
=
0
;
i
<
remaining
;
i
++
)
begin
rval
[
page_index
*
P
.
g_page_size
+
i
]
=
(
mem
[
pages
[
page_index
]
*
(
P
.
g_page_size
/
P
.
g_ratio
)
+
i
/
P
.
g_ratio
]
>>
(
i
%
P
.
g_ratio
*
P
.
g_data_width
))
&
((
1
<<
P
.
g_data_width
)
-
1
)
;
end
page_index
++;
size
-=
remaining
;
end
return
rval
;
endfunction
// read
endinterface
// IShadowFBM
testbench/swc_async/mem/Manifest.py
0 → 100644
View file @
fe1cc0c8
action
=
"simulation"
target
=
"xilinx"
fetchto
=
"../../../ip_cores"
vlog_opt
=
"+incdir+../../../sim +incdir+../../../ip_cores/general-cores/sim +incdir+../../../ip_cores/wr-cores/sim"
files
=
[
"main.sv"
]
modules
=
{
"local"
:
[
"../../../modules/wrsw_swcore/mpm"
],
"git"
:
"git://ohwr.org/hdl-core-lib/general-cores.git::proposed_master"
}
testbench/swc_async/mem/main.sv
0 → 100644
View file @
fe1cc0c8
This diff is collapsed.
Click to expand it.
testbench/swc_async/mem/mpm_top_svwrap.svh
0 → 100644
View file @
fe1cc0c8
`timescale
1
ns
/
1
ps
`define
slice
(
array
,
index
,
ent_size
)
array
[(
ent_size
)
*
((
index
)
+
1
)
-
1
:
(
ent_size
)
*
(
index
)]
/* Interface'ized SystemVerilog wrapper for VHDL MPM module */
module
mpm_top_swwrap
(
clk_core_i
,
clk_io_i
,
rst_n_i
,
wport
,
rport
,
ll
)
;
parameter
t_swcore_parameters
P
=
`DEFAULT_SWC_PARAMS
;
input
clk_io_i
,
clk_core_i
,
rst_n_i
;
IMPMWritePort
wport
[
P
.
g_num_ports
]
;
IMPMReadPort
rport
[
P
.
g_num_ports
]
;
ILinkedList
ll
;
wire
[
P
.
g_num_ports
*
P
.
g_data_width
-
1
:
0
]
wp_data
,
rp_data
;
wire
[
P
.
g_num_ports
-
1
:
0
]
wp_dvalid
,
wp_dlast
,
wp_dreq
,
wp_pg_req
;
wire
[
P
.
g_num_ports
-
1
:
0
]
rp_dvalid
,
rp_dlast
,
rp_dreq
,
rp_pg_req
,
rp_pg_valid
,
rp_abort
;
wire
[
P
.
g_num_ports
*
P
.
g_page_address_width
-
1
:
0
]
wp_pgaddr
;
wire
[
P
.
g_num_ports
*
P
.
g_page_address_width
-
1
:
0
]
rp_pgaddr
;
wire
[
P
.
g_num_ports
*
P
.
g_partial_select_width
-
1
:
0
]
rp_dsel
;
generate
genvar
i
;
for
(
i
=
0
;
i
<
P
.
g_num_ports
;
i
++
)
begin
/* Write port packing */
assign
`slice
(
wp_data
,
i
,
P
.
g_data_width
)
=
wport
[
i
]
.
d
;
assign
`slice
(
wp_dvalid
,
i
,
1
)
=
wport
[
i
]
.
d_valid
;
assign
`slice
(
wp_dlast
,
i
,
1
)
=
wport
[
i
]
.
d_last
;
assign
`slice
(
wp_pgaddr
,
i
,
P
.
g_page_address_width
)
=
wport
[
i
]
.
pg_addr
;
assign
wport
[
i
]
.
dreq
=
wp_dreq
[
i
]
;
assign
wport
[
i
]
.
pg_req
=
wp_pg_req
[
i
]
;
/* Read port packing */
assign
`slice
(
rp_pgaddr
,
i
,
P
.
g_page_address_width
)
=
rport
[
i
]
.
pg_addr
;
assign
rp_abort
[
i
]
=
rport
[
i
]
.
abort
;
assign
rp_pg_valid
[
i
]
=
rport
[
i
]
.
pg_valid
;
assign
rp_dreq
[
i
]
=
rport
[
i
]
.
dreq
;
assign
rport
[
i
]
.
pg_req
=
rp_pg_req
[
i
]
;
assign
rport
[
i
]
.
d_valid
=
rp_dvalid
[
i
]
;
assign
rport
[
i
]
.
d_last
=
rp_dlast
[
i
]
;
assign
rport
[
i
]
.
d_sel
=
`slice
(
rp_dsel
,
i
,
P
.
g_partial_select_width
)
;
assign
rport
[
i
]
.
d
=
`slice
(
rp_data
,
i
,
P
.
g_data_width
)
;
end
endgenerate
mpm_top
#(
.
g_data_width
(
P
.
g_data_width
)
,
.
g_ratio
(
P
.
g_ratio
)
,
.
g_page_size
(
P
.
g_page_size
)
,
.
g_num_pages
(
P
.
g_num_pages
)
,
.
g_num_ports
(
P
.
g_num_ports
)
,
.
g_fifo_size
(
P
.
g_fifo_size
)
,
.
g_page_addr_width
(
P
.
g_page_address_width
)
,
.
g_partial_select_width
(
P
.
g_partial_select_width
)
)
Wrapped_MPM
(
.
clk_io_i
(
clk_io_i
)
,
.
clk_core_i
(
clk_core_i
)
,
.
rst_n_i
(
rst_n_i
)
,
.
wport_d_i
(
wp_data
)
,
.
wport_dvalid_i
(
wp_dvalid
)
,
.
wport_dlast_i
(
wp_dlast
)
,
.
wport_pg_addr_i
(
wp_pgaddr
)
,
.
wport_dreq_o
(
wp_dreq
)
,
.
wport_pg_req_o
(
wp_pg_req
)
,
.
rport_d_o
(
rp_data
)
,
.
rport_dvalid_o
(
rp_dvalid
)
,
.
rport_dlast_o
(
rp_dlast
)
,
.
rport_dreq_i
(
rp_dreq
)
,
.
rport_dsel_o
(
rp_dsel
)
,
.
rport_abort_i
(
rp_abort
)
,
.
rport_pg_addr_i
(
rp_pgaddr
)
,
.
rport_pg_valid_i
(
rp_pg_valid
)
,
.
rport_pg_req_o
(
rp_pg_req
)
,
.
ll_addr_o
(
ll
.
ll_addr
)
,
.
ll_data_i
(
ll
.
ll_data
)
)
;
endmodule
//
mpm_top_swwrap
\ No newline at end of file
testbench/swc_async/mem/run.do
0 → 100644
View file @
fe1cc0c8
#make -f Makefile
vsim -L secureip -L unisim -t 10fs work.main -voptargs="+acc"
set StdArithNoWarnings 1
set NumericStdNoWarnings 1
do wave.do
run 80us
radix -hexadecimal
wave zoomfull
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment