Commit 13ec6711 authored by Federico Vaga's avatar Federico Vaga

kernel: add spec-raw module

Signed-off-by: Federico Vaga's avatarFederico Vaga <federico.vaga@cern.ch>
parent 57608eda
......@@ -27,6 +27,7 @@ CONFIG_WR_NIC ?= m
obj-m += spec.o
obj-$(CONFIG_WR_NIC) += wr-nic.o
obj-$(CONFIG_SPEC_RAW) += spec-raw/
spec-y = spec-pci.o
spec-y += spec-fmc.o
......@@ -59,3 +60,4 @@ clean:
rm -rf *.o *~ .*.cmd *.ko *.mod.c .tmp_versions Module.symvers \
Module.markers modules.order
rm -rf wr_nic/*.o wr_nic/*~
$(MAKE) -C $(LINUX) M=$(shell /bin/pwd) clean
LINUX ?= /lib/modules/$(shell uname -r)/build
ccflags-y += -I$(src)/../../fmc-bus/kernel/include
ccflags-$(CONFIG_SPEC_RAW_DEBUG) += -DDEBUG
obj-m = spec-raw.o
spec-raw-y := spec-raw-core.o
spec-raw-y += spec-raw-dma.o
spec-raw-y += spec-raw-chardev.o
spec-raw-y += spec-raw-irq.o
spec-raw-y += spec-raw-dbg.o
spec-raw-y += spec-raw-sysfs.o
GIT_VERSION = $(shell cd $(src); git describe --dirty --long --tags)
ccflags-y += -DGIT_VERSION=\"$(GIT_VERSION)\"
all modules:
$(MAKE) -C $(LINUX) M=$(shell /bin/pwd) modules
install modules_install:
$(MAKE) -C $(LINUX) M=$(shell /bin/pwd) modules_install
# be able to run the "clean" rule even if $(LINUX) is not valid
clean:
rm -rf *.o *~ .*.cmd *.ko *.mod.c .tmp_versions Module.symvers \
Module.markers modules.order
This diff is collapsed.
/*
* Copyright (C) 2012 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@cern.ch>
* Author: Alessandro Rubini <rubini@gnudd.com>
*
* Released according to the GNU GPL, version 2 or any later version.
*
* This work is part of the White Rabbit project, a research effort led
* by CERN, the European Institute for Nuclear Research.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/fmc.h>
#include <linux/fmc-sdb.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include "spec-raw.h"
struct dentry *root_dir;
struct list_head sr_devices;
static DEFINE_SPINLOCK(sr_lock);
static struct fmc_driver sr_drv;
FMC_PARAM_BUSID(sr_drv);
FMC_PARAM_GATEWARE(sr_drv);
static int sr_show_sdb;
module_param_named(show_sdb, sr_show_sdb, int, 0444);
MODULE_PARM_DESC(show_sdb, "Print a dump of the gateware's SDB tree.");
/* Device part .. */
static int sr_probe(struct fmc_device *fmc);
static int sr_remove(struct fmc_device *fmc);
static struct fmc_driver sr_drv = {
.version = FMC_VERSION,
.driver.name = KBUILD_MODNAME,
.probe = sr_probe,
.remove = sr_remove,
/* no table: we want to match everything */
};
static void sr_getcomponents(struct sr_instance *sr)
{
int err;
/* Try to get SDB information (if there) */
err = fmc_scan_sdb_tree(sr->fmc, 0);
if (err < 0) {
dev_warn(&sr->fmc->dev, "%s: no SDB in the bitstream.\n",
KBUILD_MODNAME);
return ;
}
/* Look for the VIC IRQ controller */
sr->vic_base_addr = fmc_find_sdb_device(sr->fmc->sdb,
0xce42, 0x13, NULL);
/* Look for the DMA engine */
sr->dma_base_addr = fmc_find_sdb_device(sr->fmc->sdb,
0xce42, 0x601, NULL);
/* Look for the IRQ controller */
sr->irq_dma_base_addr = fmc_find_sdb_device(sr->fmc->sdb,
0xce42, 0xd5735ab4, NULL);
if (sr_show_sdb)
fmc_show_sdb_tree(sr->fmc);
}
static int sr_register_misc(struct miscdevice *misc, const struct file_operations *fops,
const char *devname, const char *type)
{
int ret;
misc->minor = MISC_DYNAMIC_MINOR;
misc->fops = fops;
misc->name = kasprintf(GFP_KERNEL, "%s-%s", devname, type);
if (!misc->name)
return -ENOMEM;
ret = misc_register(misc);
if (ret < 0) {
kfree(misc->name);
spin_unlock(&sr_lock);
return ret;
}
pr_info("Created misc device \"%s\"\n", misc->name);
return 0;
}
static void sr_unregister_misc(struct miscdevice *misc)
{
misc_deregister(misc);
kfree(misc->name);
}
/* probe and remove must allocate and release a misc device */
static int sr_probe(struct fmc_device *fmc)
{
struct ual_irq_status *st;
int ret, i;
int index = 0;
char *fwname;
struct sr_instance *sr;
if (fmc->op->validate)
index = fmc->op->validate(fmc, &sr_drv);
if (index < 0)
return -EINVAL; /* not our device: invalid */
/* Check if the carrier is a SPEC */
if (strcmp(fmc->carrier_name, "SPEC")) {
pr_err("spec-raw: works only with SPEC carrier\n");
return -EINVAL;
}
if (sr_drv.gw_n)
fwname = ""; /* use the gateware provided by the user */
else
fwname = NULL; /* use the default SPEC gateware */
dev_info(fmc->hwdev, "Gateware (%s)\n", fwname);
/* We first write a new binary (and lm32) within the carrier */
ret = fmc->op->reprogram(fmc, &sr_drv, fwname);
if (ret) {
dev_err(fmc->hwdev, "write firmware \"%s\": error %i\n",
fwname, ret);
return ret;
}
dev_info(fmc->hwdev, "Gateware successfully loaded\n");
if (dma_set_mask(fmc->hwdev, DMA_BIT_MASK(64))) {
dev_warn(&fmc->dev, "62-bit DMA addressing not available\n");
/* Check if hardware supports 32-bit DMA */
if (dma_set_mask(fmc->hwdev, DMA_BIT_MASK(32))) {
dev_err(&fmc->dev,
"32-bit DMA addressing not available\n");
return -EINVAL;
}
}
/* Create a char device: we want to create it anew */
sr = kzalloc(sizeof(*sr), GFP_KERNEL);
if (!sr)
return -ENOMEM;
sr->fmc = fmc;
fmc_set_drvdata(sr->fmc, sr);
sr_getcomponents(sr);
spin_lock(&sr_lock);
ret = sr_register_misc(&sr->ual.m_reg, &sr_reg_fops,
dev_name(&sr->fmc->dev), "reg");
if (ret) {
spin_unlock(&sr_lock);
goto err_misc_reg;
}
dev_set_drvdata(sr->ual.m_reg.this_device, sr);
ret = sr_register_misc(&sr->ual.m_dma, &sr_dma_fops,
dev_name(&sr->fmc->dev), "dma");
if (ret) {
spin_unlock(&sr_lock);
goto err_misc_dma;
}
dev_set_drvdata(sr->ual.m_dma.this_device, sr);
ret = sr_register_misc(&sr->ual.m_irq, &sr_irq_fops,
dev_name(&sr->fmc->dev), "irq");
if (ret) {
spin_unlock(&sr_lock);
goto err_misc_irq;
}
dev_set_drvdata(sr->ual.m_irq.this_device, sr);
list_add(&sr->list, &sr_devices);
spin_unlock(&sr_lock);
sr->ual.dma_buf = NULL;
mutex_init(&sr->mtx);
spin_lock_init(&sr->lock);
spin_lock_init(&sr->ual.irq_lock);
init_waitqueue_head(&sr->q_dma);
init_waitqueue_head(&sr->ual.q_irq);
sr->ual.r_idx_irq = 0;
sr->ual.w_idx_irq = 0;
/* Initialize subscription list to invalid source/status */
for (i = 0; i < UAL_IRQ_MAX_SUBSCRIPTION; ++i) {
st = &sr->ual.subscribed_irqs[i];
st->source = 0xBADC0FFE;
st->status = 0x0;
st->offset = 0x0;
}
ret = sr_irq_create_sysfs(sr);
if (ret < 0)
goto err_sysfs;
/* Request for the VIC if it is there */
ret = sr_request_irqs(sr);
if (ret < 0)
goto err_req;
/* Create a debug directory */
root_dir = debugfs_create_dir(sr->ual.m_reg.name, NULL);
if (!IS_ERR_OR_NULL(sr->dbg_dir)) {
sr->dbg_dir = debugfs_create_dir(sr->ual.m_reg.name, root_dir);
if (!IS_ERR_OR_NULL(sr->dbg_dir)) {
sr->dma_loopback = debugfs_create_file("test_dma_loopback",
0444, sr->dbg_dir, sr,
&sr_dbgfs_dma_loop_op);
sr->dma_write_seq = debugfs_create_file("write_seq",
0222, sr->dbg_dir, sr,
&sr_dbgfs_dma_write_seq);
sr->dma_write_zero = debugfs_create_file("write_zero",
0222, sr->dbg_dir, sr,
&sr_dbgfs_dma_write_zero);
}
} else {
root_dir = NULL;
dev_warn(&sr->fmc->dev, "Cannot create debugfs\n");
}
dev_info(&sr->fmc->dev, "%s successfully loaded\n", KBUILD_MODNAME);
return 0;
err_req:
sr_irq_remove_sysfs(sr);
err_sysfs:
sr_unregister_misc(&sr->ual.m_irq);
err_misc_irq:
sr_unregister_misc(&sr->ual.m_dma);
err_misc_dma:
sr_unregister_misc(&sr->ual.m_reg);
err_misc_reg:
kfree(sr);
return ret;
}
static int sr_remove(struct fmc_device *fmc)
{
struct sr_instance *sr;
list_for_each_entry(sr, &sr_devices, list)
if (sr->fmc == fmc)
break;
if (sr->fmc != fmc) {
dev_err(&fmc->dev, "remove called but not found\n");
return -ENODEV;
}
sr_irq_remove_sysfs(sr);
sr_unregister_misc(&sr->ual.m_irq);
sr_unregister_misc(&sr->ual.m_reg);
sr_unregister_misc(&sr->ual.m_dma);
if (root_dir)
debugfs_remove_recursive(root_dir);
if (sr->ual.dma_buf)
vfree(sr->ual.dma_buf);
sr_free_irqs(sr);
spin_lock(&sr_lock);
list_del(&sr->list);
kfree(sr);
spin_unlock(&sr_lock);
return 0;
}
static int sr_init(void)
{
int ret;
INIT_LIST_HEAD(&sr_devices);
ret = fmc_driver_register(&sr_drv);
return ret;
}
static void sr_exit(void)
{
fmc_driver_unregister(&sr_drv);
}
module_init(sr_init);
module_exit(sr_exit);
MODULE_AUTHOR("Federico Vaga <federico.vaga@cern.ch>");
MODULE_DESCRIPTION("General Purpose Driver for SPEC");
MODULE_VERSION(GIT_VERSION);
MODULE_LICENSE("GPL");
CERN_SUPER_MODULE;
/*
* Copyright (C) 2014 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@cern.ch>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/fmc.h>
#include <linux/fs.h>
#include <linux/random.h>
#include "spec-raw.h"
#define SR_DMA_LOOP_SIZE 512
/*
* sr_get_user_offset
* It returns the integer value corresponding to the offset written by the user
*/
static int sr_get_user_offset(const char __user *buf, size_t count)
{
char tmp[16];
int val, ret;
if (count < 3) /* 0x0 */
return -EINVAL;
if (copy_from_user(tmp, buf, count))
return -EFAULT;
ret = sscanf(tmp, "0x%x", &val);
if (!ret)
return -EINVAL;
if (val >= SR_DDR_SIZE)
return -EINVAL;
return val;
}
/*
* sr_fix_transfer_length
* It fixes the len according to the DDR dimension
*/
static inline size_t sr_fix_transfer_length(size_t len)
{
size_t len_to_write;
if (SR_DDR_SIZE - len > SR_DMA_LOOP_SIZE)
len_to_write = SR_DMA_LOOP_SIZE;
else
len_to_write = SR_DDR_SIZE - len;
return len_to_write;
}
/*
* sr_write_ddr
* @sr: spec-raw instance
* @off: DDR offset where write data (negative number mean random)
* @data: bytes to write
* @len: data length
*
* It writes onto DDR memory using DMA. This function saves and then reuses the
* the dma descriptor and data field in the sr instance. This is not the best
* choice but we are in debugging mode no-one should use these operations
* unless for debugging purpose.
*/
static int sr_write_ddr(struct sr_instance *sr, int off,
uint8_t *data, size_t len)
{
struct sr_dma_request dma;
void *old_buf;
int err;
/* Save previous status and clear the data ready flag */
dma = sr->dma;
old_buf = sr->ual.dma_buf;
sr->ual.dma_buf = data;
/* Setup DDR offset */
if (off < 0)
get_random_bytes(&sr->dma.dev_mem_off, sizeof (sr->dma.dev_mem_off) );
else
sr->dma.dev_mem_off = off;
/* Configure DMA transfer */
sr->dma.dev_mem_off &= (SR_DDR_SIZE - 1);
sr->dma.length = len;
sr->dma.flags = SR_IOCTL_DMA_FLAG_WRITE;
/* Start DMA [write] and wait until complete */
err = sr_dma_start(sr);
if (err) {
dev_err(&sr->fmc->dev, "Cannot start DMA\n");
return err;
}
wait_event_interruptible(sr->q_dma, sr_is_dma_over(sr));
/* DMA is over, restore previous status */
sr->dma = dma;
sr->ual.dma_buf = old_buf;
return 0;
}
/*
* sr_simple_open
* It just configure the private data
*
* This function is copied from the kernel version 3.13 because it is not
* available on 3.2 (where this module will run)
*/
static int sr_simple_open(struct inode *inode, struct file *file)
{
if (inode->i_private)
file->private_data = inode->i_private;
return 0;
}
/*
* sr_dbg_loop_read
* It writes random bytes on the DDR memory at a random offset, then it reads
* back those bytes to verify that read/write is working
*/
static ssize_t sr_dbg_loop_read(struct file *f, char __user *buf, size_t count,
loff_t *offp)
{
struct sr_instance *sr = f->private_data;
struct sr_dma_request dma;
void *old_buf;
uint8_t datain[SR_DMA_LOOP_SIZE], dataout[SR_DMA_LOOP_SIZE];
int err;
char msg[128]= {"\0"};
if (*offp)
return 0;
dev_info(&sr->fmc->dev, "Run DMA loop test\n");
/* Fill the buffer to write */
get_random_bytes(dataout, SR_DMA_LOOP_SIZE);
/* Protect DMA transfers */
mutex_lock(&sr->mtx);
err = sr_write_ddr(sr, -1, dataout, SR_DMA_LOOP_SIZE);
if (err)
goto out;
dma = sr->dma;
old_buf = sr->ual.dma_buf;
sr->ual.dma_buf = datain;
sr->dma.flags &= ~SR_IOCTL_DMA_FLAG_WRITE;
err = sr_dma_start(sr);
if (err) {
dev_err(&sr->fmc->dev, "Cannot start DMA\n");
goto out;
}
wait_event_interruptible(sr->q_dma, sr_is_dma_over(sr));
if (memcmp(datain, dataout, SR_DMA_LOOP_SIZE))
sprintf(msg, "Test Failed!\n");
else
sprintf(msg, "Test Successful!\n");
sr->dma = dma;
sr->ual.dma_buf = old_buf;
count = strlen(msg);
if (copy_to_user(buf, msg, count)) {
err = -EFAULT;
goto out;
}
*offp += count;
out:
mutex_unlock(&sr->mtx);
return count;
}
const struct file_operations sr_dbgfs_dma_loop_op = {
.owner = THIS_MODULE,
.open = sr_simple_open,
.read = sr_dbg_loop_read,
};
/*
* sr_dbg_write_seq
* At a given offset, it writes on the DDR memory a sequence of numbers
*/
static ssize_t sr_dbg_write_seq(struct file *f, const char __user *buf, size_t count,
loff_t *offp)
{
struct sr_instance *sr = f->private_data;
uint8_t dataout[SR_DMA_LOOP_SIZE];
uint32_t val;
int i, err;
if (*offp)
return 0;
val = sr_get_user_offset(buf, count);
if (val < 0)
return val;
dev_info(&sr->fmc->dev, "Write sequence in DDR at offset 0x%x\n", val);
/* Fill the buffer to write */
for (i = 0; i < SR_DMA_LOOP_SIZE; ++i)
dataout[i] = i & 0xFF;
mutex_lock(&sr->mtx);
err = sr_write_ddr(sr, val, dataout, sr_fix_transfer_length(val));
mutex_unlock(&sr->mtx);
if (err)
return err;
return count;
}
const struct file_operations sr_dbgfs_dma_write_seq = {
.owner = THIS_MODULE,
.open = sr_simple_open,
.write = sr_dbg_write_seq,
};
/*
* sr_dbg_write_zero
* At a given offset, it writes on the DDR memory a sequence of zeros
*/
static ssize_t sr_dbg_write_zero(struct file *f, const char __user *buf,
size_t count, loff_t *offp)
{
struct sr_instance *sr = f->private_data;
uint8_t dataout[SR_DMA_LOOP_SIZE];
uint32_t val;
int err;
if (*offp)
return 0;
val = sr_get_user_offset(buf, count);
if (val < 0)
return val;
dev_info(&sr->fmc->dev, "Write zero in DDR at offset 0x%x\n", val);
/* Fill the buffer to write */
memset(dataout, 0, SR_DMA_LOOP_SIZE);
mutex_lock(&sr->mtx);
err = sr_write_ddr(sr, val, dataout, sr_fix_transfer_length(val));
mutex_unlock(&sr->mtx);
if (err)
return err;
return count;
}
const struct file_operations sr_dbgfs_dma_write_zero = {
.owner = THIS_MODULE,
.open = sr_simple_open,
.write = sr_dbg_write_zero,
};
/*
* Copyright (C) 2014 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@cern.ch>
*
* Released according to the GNU GPL, version 2 or any later version.
*
* FIXME most of the DMA code is copied from the fmc-adc-100m14bcha driver
* (release 2014-05) for a quick and dirty solution while waiting for the
* better one
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/fmc.h>
#include "spec-raw.h"
#include "spec-raw-user.h"
/*
* sr_calculate_nents
* It calculates the necessary nents
*/
static int sr_calculate_nents(void *buf, size_t len)
{
int bytesleft;
void *bufp;
int mapbytes;
int nents = 0;
bytesleft = len;
bufp = buf;
while (bytesleft) {
nents++;
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
bufp += mapbytes;
bytesleft -= mapbytes;
}
return nents;
}
/*
* sr_setup_dma_scatter
* It initializes each element of the scatter list
*/
static void sr_setup_dma_scatter(struct sg_table *sgt, void *buf, size_t len)
{
struct scatterlist *sg;
int bytesleft = 0;
void *bufp = NULL;
int mapbytes;
int i, i_blk;
i_blk = 0;
bytesleft = len;
bufp = buf;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
* we just feed in this, else we stuff in as much
* as we can.
*/
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
/* Map the page */
if (is_vmalloc_addr(bufp))
sg_set_page(sg, vmalloc_to_page(bufp), mapbytes,
offset_in_page(bufp));
else
sg_set_buf(sg, bufp, mapbytes);
/* Configure next values */
bufp += mapbytes;
bytesleft -= mapbytes;
pr_debug("sg item (%p(+0x%lx), len:%d, left:%d)\n",
virt_to_page(bufp), offset_in_page(bufp),
mapbytes, bytesleft);
}
}
/*
* sr_map_dma
* It maps a given buffer for DMA transfer with a scatterlist. It also maps
* a smaller DMA memory for the scatterlist
*/
static int sr_map_dma(struct sr_instance *sr, void *buf, size_t len,
size_t mem_off)
{
enum dma_data_direction direction;
struct device *dev = sr->fmc->hwdev;
struct scatterlist *sg;
struct sr_dma_item *items;
uint32_t dev_mem_off = 0;
unsigned int i, pages, sglen, size;
dma_addr_t tmp;
int err;
pages = sr_calculate_nents(buf, len);
if (!pages) {
dev_info(dev, "No pages to transfer %zu bytes\n",
len);
return -EINVAL;
}
dev_dbg(dev, "using %d pages to transfer %zu bytes\n", pages, len);
/* Create sglists for the transfers */
err = sg_alloc_table(&sr->sgt, pages, GFP_ATOMIC);
if (err) {
dev_err(dev, "cannot allocate sg table (%i pages)\n", pages);
return -ENOMEM;
}
/* Limited to 32-bit (kernel limit) */
size = sizeof(struct sr_dma_item) * sr->sgt.nents;
items = kzalloc(size, GFP_KERNEL);
if (!items) {
dev_err(dev, "cannot allocate coherent dma memory\n");
goto out_mem;
}
sr->items = items;
sr->dma_list_item = dma_map_single(dev, items, size, DMA_TO_DEVICE);
if (!sr->dma_list_item)
goto out_free;
/* Setup the scatter list for the provided block */
sr_setup_dma_scatter(&sr->sgt, buf, len);
/* Map DMA buffers */
direction = sr->dma.flags & SR_IOCTL_DMA_FLAG_WRITE ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
sglen = dma_map_sg(dev, sr->sgt.sgl, sr->sgt.nents, direction);
if (!sglen) {
dev_err(dev, "cannot map dma memory\n");
goto out_map;
}
/* Configure DMA items */
dev_mem_off = mem_off;
for_each_sg(sr->sgt.sgl, sg, sr->sgt.nents, i) {
/* Prepare DMA item */
items[i].start_addr = dev_mem_off;
items[i].dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
items[i].dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
items[i].dma_len = sg_dma_len(sg);
dev_mem_off += items[i].dma_len;
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
tmp = sr->dma_list_item;
tmp += (sizeof(struct sr_dma_item) * (i + 1));
items[i].next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
items[i].next_addr_h = ((uint64_t)tmp) >> 32;
items[i].attribute = 0x1; /* more items */
} else {
items[i].attribute = 0x0; /* last item */
}
/* set the DMA direction 0x2 (write), 0x0 (read) */
items[i].attribute |=
(sr->dma.flags & SR_IOCTL_DMA_FLAG_WRITE ? (1 << 1) : 0);
pr_debug("configure DMA item %d "
"(addr: 0x%llx len: %d)(dev off: 0x%x)"
"(next item: 0x%x)\n",
i, (long long)sg_dma_address(sg),
sg_dma_len(sg), dev_mem_off, items[i].next_addr_l);
/* The first item is written on the device */
if (i == 0) {
fmc_writel(sr->fmc, items[i].start_addr,
sr->dma_base_addr + SR_DMA_ADDR);
fmc_writel(sr->fmc,items[i].dma_addr_l,
sr->dma_base_addr + SR_DMA_ADDR_L);
fmc_writel(sr->fmc, items[i].dma_addr_h,
sr->dma_base_addr + SR_DMA_ADDR_H);
fmc_writel(sr->fmc, items[i].dma_len,
sr->dma_base_addr + SR_DMA_LEN);
fmc_writel(sr->fmc, items[i].next_addr_l,
sr->dma_base_addr + SR_DMA_NEXT_L);
fmc_writel(sr->fmc, items[i].next_addr_h,
sr->dma_base_addr + SR_DMA_NEXT_H);
/* Set that there is a next item */
fmc_writel(sr->fmc, items[i].attribute,
sr->dma_base_addr + SR_DMA_BR);
}
}
return 0;
out_map:
dma_unmap_single(dev, sr->dma_list_item, size, DMA_TO_DEVICE);
out_free:
kfree(sr->items);
out_mem:
sg_free_table(&sr->sgt);
return -ENOMEM;
}
/*
* sr_unmap_dma
* It unmaps the DMA memory of the buffer and of the scatterlist
*/
static void sr_unmap_dma(struct sr_instance *sr)
{
struct device *dev = &sr->fmc->dev;
enum dma_data_direction direction;
unsigned int size;
dev_dbg(dev, "unmap DMA\n");
size = sizeof(struct sr_dma_item) * sr->sgt.nents;
dma_unmap_single(dev, sr->dma_list_item, size, DMA_TO_DEVICE);
direction = sr->dma.flags & SR_IOCTL_DMA_FLAG_WRITE ? DMA_TO_DEVICE :
DMA_FROM_DEVICE;
dma_unmap_sg(dev, sr->sgt.sgl, sr->sgt.nents, direction);
kfree(sr->items);
sr->items = NULL;
sr->dma_list_item = 0;
sg_free_table(&sr->sgt);
}
/*
* sr_dma_start
* It maps the local buffer for DMA according to the last DMA configuration of
* the current instance. Then, it starts the DMA transfer.
*/
int sr_dma_start(struct sr_instance *sr)
{
int res;
dev_dbg(&sr->fmc->dev, "Start DMA\n");
res = sr_map_dma(sr, sr->ual.dma_buf, sr->dma.length, sr->dma.dev_mem_off);
if (res)
return res;
/* flag that DMA is running */
spin_lock(&sr->lock);
sr->flags &= ~SR_FLAG_DATA_RDY;
spin_unlock(&sr->lock);
/* Start DMA transfer */
fmc_writel(sr->fmc, 0x1, sr->dma_base_addr + SR_DMA_CTL);
return 0;
}
/*
* sr_dma_done
* It unmaps the DMA memory
*/
void sr_dma_done(struct sr_instance *sr)
{
dev_dbg(&sr->fmc->dev, "DMA is over\n");
sr_unmap_dma(sr);
/* Now data is ready */
spin_lock(&sr->lock);
sr->flags |= SR_FLAG_DATA_RDY;
spin_unlock(&sr->lock);
}
/*
* sr_is_dma_over
* It returns 1 if the DMA transfer is over and the user can safely read the
* buffer, 0 otherwise
*/
int sr_is_dma_over(struct sr_instance *sr)
{
int ret;
spin_lock(&sr->lock);
ret = !!(sr->flags & SR_FLAG_DATA_RDY);
spin_unlock(&sr->lock);
return ret;
}
/*
* Copyright (C) 2014 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@cern.ch>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/fmc.h>
#include "spec-raw.h"
/* Unfortunately, on the spec this is GPIO9, i.e. IRQ(1) */
static struct fmc_gpio fa_gpio_on[] = {
{
.gpio = FMC_GPIO_IRQ(0),
.mode = GPIOF_DIR_IN,
.irqmode = IRQF_TRIGGER_RISING,
}
};
static struct fmc_gpio fa_gpio_off[] = {
{
.gpio = FMC_GPIO_IRQ(0),
.mode = GPIOF_DIR_IN,
.irqmode = 0,
}
};
/*
* sr_irq_history_add
*/
static inline void sr_irq_history_add(struct sr_instance *sr, struct ual_irq_status *st,
uint32_t status)
{
/* Check if the user request notification for a particular interrupt */
if (!(st->status & status))
return;
sr->ual.last_irqs[sr->ual.w_idx_irq].source = st->source;
sr->ual.last_irqs[sr->ual.w_idx_irq].status = status;
sr->ual.last_irqs[sr->ual.w_idx_irq].offset = st->offset;
sr->ual.w_idx_irq++;
pr_info("%s %d %d\n", __func__, __LINE__, sr->ual.w_idx_irq);
if (sr->ual.w_idx_irq == sr->ual.r_idx_irq)
sr->ual.r_idx_irq++;
if (unlikely(sr->ual.w_idx_irq >= UAL_IRQ_HISTORY))
sr->ual.w_idx_irq = 0;
if (unlikely(sr->ual.r_idx_irq >= UAL_IRQ_HISTORY))
sr->ual.r_idx_irq = 0;
}
irqreturn_t sr_irq_generic_handler(int irq_core_base, void *arg)
{
struct fmc_device *fmc = arg;
struct sr_instance *sr = fmc_get_drvdata(fmc);
struct ual_irq_status *st;
uint32_t irq_status;
int ret;
ret = sr_irq_find_subscription(sr, irq_core_base);
if (ret < 0)
goto out;
st = &sr->ual.subscribed_irqs[ret];
irq_status = fmc_readl(fmc, st->source + st->offset);
/* Clear current interrupts status */
fmc_writel(fmc, irq_status, st->source + st->offset);
dev_dbg(&sr->fmc->dev, "Handle interrupts 0x%x 0x%x\n",
st->source + st->offset, irq_status);
if (irq_status & st->status) {
sr_irq_history_add(sr, st, irq_status);
wake_up_interruptible(&sr->ual.q_irq);
}
out:
fmc->op->irq_ack(fmc);
return IRQ_HANDLED;
}
/*
* sr_irq_dma_handler
* It handles the interrupt coming from the DMA engine:
* - DMA done
* - DMA error
*/
static irqreturn_t sr_irq_dma_handler(int irq_core_base, void *arg)
{
struct fmc_device *fmc = arg;
struct sr_instance *sr = fmc_get_drvdata(fmc);
uint32_t irq_status;
int ret, err;
irq_status = fmc_readl(fmc, sr->irq_dma_base_addr + SR_IRQ_DMA_SRC);
/* Clear current interrupts status */
fmc_writel(fmc, irq_status, sr->irq_dma_base_addr + SR_IRQ_DMA_SRC);
dev_dbg(&sr->fmc->dev, "Handle DMA interrupts 0x%x\n", irq_status);
if (irq_status & SR_IRQ_DMA_MASK) {
ret = sr_irq_find_subscription(sr, irq_core_base);
if (ret >= 0) {
sr_irq_history_add(sr, &sr->ual.subscribed_irqs[ret], irq_status);
wake_up_interruptible(&sr->ual.q_irq);
}
sr_dma_done(sr);
/* Wake up all listener */
wake_up_interruptible(&sr->q_dma);
if (unlikely(irq_status & SR_IRQ_DMA_ERR)) {
err = fmc_readl(sr->fmc, sr->dma_base_addr + SR_DMA_STA);
if (err)
dev_err(&sr->fmc->dev,
"DMA error (status 0x%x)\n", err);
}
} else {
dev_info(&sr->fmc->dev, "Unknown interrupt 0x%x\n", irq_status);
}
fmc->op->irq_ack(fmc);
return IRQ_HANDLED;
}
/*
* sr_request_irqs
* If the VIC component is there, then it enables all the necessary interrupt
* sources and it registers an interrupt handler for each of them:
* - DMA interrupt controller
*/
int sr_request_irqs(struct sr_instance *sr)
{
int err = 0;
/*
* If the VIC is not there, we have no interrupt to handle
* [some old firmware will not work with this module]
*/
if (!sr->vic_base_addr)
return 0;
/* Configure the VIC control*/
/* FIXME check if we have to set also the edge length */
fmc_writel(sr->fmc, 0x3, sr->vic_base_addr + SR_IRQ_VIC_CTRL);
/* Enable all interrupts on VIC */
fmc_writel(sr->fmc, SR_IRQ_VIC_MASK,
sr->vic_base_addr + SR_IRQ_VIC_ENABLE);
fmc_writel(sr->fmc, ~SR_IRQ_VIC_MASK & SR_IRQ_VIC_DISABLE,
sr->vic_base_addr + SR_IRQ_VIC_ENABLE);
/* Request IRQ to VIC for the DMA interrupts */
if (sr->irq_dma_base_addr) {
dev_dbg(&sr->fmc->dev, "Request DMA interrupts\n");
sr->fmc->irq = sr->irq_dma_base_addr;
err = sr->fmc->op->irq_request(sr->fmc, sr_irq_dma_handler,
"spec-raw", 0);
if (err) {
dev_err(&sr->fmc->dev,
"can't request irq %i (error %i)\n",
sr->fmc->irq, err);
return err;
}
/* Enable interrupts: DMA done and DMA error */
fmc_writel(sr->fmc, SR_IRQ_DMA_MASK,
sr->irq_dma_base_addr + SR_IRQ_DMA_ENABLE_MASK);
fmc_writel(sr->fmc, ~SR_IRQ_DMA_MASK & SR_IRQ_DMA_MASK ,
sr->irq_dma_base_addr + SR_IRQ_DMA_DISABLE_MASK);
}
sr->fmc->op->gpio_config(sr->fmc, fa_gpio_on, ARRAY_SIZE(fa_gpio_on));
return err;
}
/*
* sr_free_irqs
* If the VIC component is there, then it frees the IRQ and disable all the
* interrupt sources
*/
void sr_free_irqs(struct sr_instance *sr)
{
struct ual_irq_status *st;
int i;
if (!sr->vic_base_addr)
return ;
dev_dbg(&sr->fmc->dev, "Free all interrupts\n");
/* Release all subscribed interrupts */
for (i = 0; i < UAL_IRQ_MAX_SUBSCRIPTION; ++i) {
st = &sr->ual.subscribed_irqs[i];
if (st->source == 0xBADC0FFE)
continue;
if (st->source != sr->irq_dma_base_addr) {
sr->fmc->irq = st->source;
sr->fmc->op->irq_free(sr->fmc);
}
sr_irq_status_clean(st);
}
/* Release the DMA interrupts */
sr->fmc->irq = sr->irq_dma_base_addr;
sr->fmc->op->irq_free(sr->fmc);
fmc_writel(sr->fmc, 0x0,
sr->vic_base_addr + SR_IRQ_VIC_ENABLE);
fmc_writel(sr->fmc, 0x3,
sr->vic_base_addr + SR_IRQ_VIC_DISABLE);
sr->fmc->op->gpio_config(sr->fmc, fa_gpio_off, ARRAY_SIZE(fa_gpio_off));
}
/*
* Copyright (C) 2014 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@cern.ch>
*/
#include <linux/module.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/fmc.h>
#include "spec-raw.h"
/* Copied from recent kernel 3.11 */
#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \
_name##_show, _name##_store)
#define DEVICE_ATTR_RW(_name) \
struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
/*
* sr_irq_subscription_show
* It shows the interrupt subscription list
*/
static ssize_t sr_irq_subscription_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct sr_instance *sr = dev_get_drvdata(dev);
struct ual_irq_status *st;
int i, count = 0;
for (i = 0; i < UAL_IRQ_MAX_SUBSCRIPTION; ++i) {
st = &sr->ual.subscribed_irqs[i];
if (st->source == 0xBADC0FFE)
continue;
count += sprintf(buf + count, "0x%x 0x%x\n",
st->source, st->status);
}
return count;
}
/*
* sr_irq_subscription_show
* It shows the interrupt subscription list
*/
static ssize_t sr_irq_subscription_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct sr_instance *sr = dev_get_drvdata(dev);
struct ual_irq_status *st;
uint32_t src, off, msk;
int i, err;
char op;
sscanf(buf, "%c 0x%x 0x%x 0x%x", &op, &src, &off, &msk);
/* According to the operation add or remove the interrupt */
switch (op) {
case '+':
for (i = 0; i < UAL_IRQ_MAX_SUBSCRIPTION; ++i) {
st = &sr->ual.subscribed_irqs[i];
if ((st->source == 0xBADC0FFE) || st->source == src)
break;
}
if (i == UAL_IRQ_MAX_SUBSCRIPTION) {
dev_err(dev, "subscription list is full\n");
return -ENOMEM;
}
/* add or update a subscription */
st->source = src;
st->status = msk;
st->offset = off;
/* if it is not the DMA interrupt, then register a new handler */
if (st->source != sr->irq_dma_base_addr) {
sr->fmc->irq = st->source;
err = sr->fmc->op->irq_request(sr->fmc, sr_irq_generic_handler,
"spec-raw", 0);
if (err) {
sr_irq_status_clean(st);
dev_err(&sr->fmc->dev,
"can't request irq %i (error %i)\n",
sr->fmc->irq, err);
return err;
}
}
break;
case '-':
for (i = 0; i < UAL_IRQ_MAX_SUBSCRIPTION; ++i) {
st = &sr->ual.subscribed_irqs[i];
if (st->source == src)
break;
}
if (i == UAL_IRQ_MAX_SUBSCRIPTION) {
dev_err(dev, "subscription not found for source 0x%x\n",
src);
return -ENODEV;
}
/* remove a subscription */
if (st->source != sr->irq_dma_base_addr) {
sr->fmc->irq = st->source;
sr->fmc->op->irq_free(sr->fmc);
}
sr_irq_status_clean(st);
break;
}
return count;
}
static DEVICE_ATTR_RW(sr_irq_subscription);
int sr_irq_create_sysfs(struct sr_instance *sr)
{
return device_create_file(sr->ual.m_irq.this_device,
&dev_attr_sr_irq_subscription);
}
void sr_irq_remove_sysfs(struct sr_instance *sr)
{
device_remove_file(sr->ual.m_irq.this_device,
&dev_attr_sr_irq_subscription);
}
/*
* Copyright (C) 2014 CERN (www.cern.ch)
* Author : Federico Vaga <federico.vaga@cern.ch>
* License : GPL version 2 or later
*/
#define UAL_IRQ_HISTORY 32
#define UAL_IRQ_MAX_SUBSCRIPTION 8
/*
* ual_irq_status
* @source : id of the source of interrupt
* @status : interrupt status register
* @offset : offset from the base address of the interrupt status register
*/
struct ual_irq_status {
uint32_t source;
uint32_t status;
uint32_t offset;
};
#ifdef __KERNEL__
/*
* ual_device
* @m_reg: char device to access device memory to R/W registers
* f_op->read : read a value in the memory
* f_op->write : write a value in the memory
* f_op->llseek: move memory pointer
* @m_dma: char device to access DMA memory
* f_op->read : read the DMA memory
* f_op->write : write the DMA memory
* f_op->ioctl : request DMA transfer
* f_op->mmap : read/write DMA buffer
* f_op->poll : check if new data is ready, or output buffer is empty
* sysfs dma_direction [r/w] : the direction of the transfer
* sysfs dma_length [r/w] : how many bytes transfer on DMA
* sysfs dma_offset [r/w] : device memory offset where start DMA
* sysfs dma_automove_offset [r/w] : if 1 it update the offset after
* each transfer
* @m_irq: char device that provide information about the interrupts
* f_op->poll : it returns when an interrupt occur
* f_op->read : it returns the last occurred interrupts
* sysfs irq_event_request [r/w]: on write it requests to be notified
* when an interrupt occurs. On read it returns the list of
* all requested interrupt events;
* @q_dma: it is the wait queue for DMA buffer. Used by poll_wait() waiting
* for DMA transfer done.
* @q_irq: it is the wait queue for interrupt. Used by poll_wait() waiting
* for interrupt, and waken up when any interrupt occur
* @last_irqs: array content the history of the IRQs (circular buffer)
* @r_idx_irq: read index of the history array
* @w_idx_irq: write intex of the history array
* @dma_buf: DMA buffer
*/
struct ual_device {
struct miscdevice m_reg;
struct miscdevice m_dma;
struct miscdevice m_irq;
struct attribute_group dma;
struct attribute_group irq;
wait_queue_head_t q_dma;
wait_queue_head_t q_irq;
struct ual_irq_status subscribed_irqs[UAL_IRQ_MAX_SUBSCRIPTION];
struct ual_irq_status last_irqs[UAL_IRQ_HISTORY];
unsigned int r_idx_irq;
unsigned int w_idx_irq;
spinlock_t irq_lock;
void *dma_buf;
atomic_t map_count;
void *priv;
};
static inline void sr_irq_status_clean(struct ual_irq_status *st)
{
st->source = 0xBADC0FFE;
st->status = 0x0;
st->offset = 0x0;
}
#endif
/*
* Copyright CERN 2014
* Author: Federico Vaga <federico.vaga@cern.ch>
*/
#ifndef SPEC_RAW_USER_H_
#define SPEC_RAW_USER_H_
#define SR_IOCTL_DMA_FLAG_WRITE (1 << 0)
struct sr_dma_request {
unsigned long int dev_mem_off;
unsigned long int length;
unsigned long int flags;
};
#define SR_IOCTL_MAGIC 's'
#define SR_IOCTL_DMA _IOWR(SR_IOCTL_MAGIC, 1, struct sr_dma_request)
#endif /* SPEC_RAW_USER_H_ */
/*
* Copyright (C) 2014 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@cern.ch>
*/
#ifndef SPEC_RAW_H_
#define SPEC_RAW_H_
#include <linux/miscdevice.h>
#include <linux/scatterlist.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/mutex.h>
#include "spec-raw-user.h"
#include "spec-raw-ual.h"
#define SR_DDR_SIZE 0x10000000 /* 256M */
#define SR_IRQ_DMA_DISABLE_MASK 0x00
#define SR_IRQ_DMA_ENABLE_MASK 0x04
#define SR_IRQ_DMA_MASK_STATUS 0x08
#define SR_IRQ_DMA_SRC 0x0C
#define SR_IRQ_DMA_MASK 0x3
#define SR_IRQ_VIC_CTRL 0x00
#define SR_IRQ_VIC_ENABLE 0x08
#define SR_IRQ_VIC_DISABLE 0x0C
#define SR_IRQ_VIC_STATUS 0x10
/* FIXME enable interrupt source 0 [DMA] (check with Tom Levens is it
* does not work)
*/
#define SR_IRQ_VIC_MASK 0x1
#define SR_IRQ_DMA_DONE 0x1
#define SR_IRQ_DMA_ERR 0x2
#define SR_DMA_CTL 0x00
#define SR_DMA_CTL_SWP 0xC
#define SR_DMA_CTL_ABORT 0x2
#define SR_DMA_CTL_START 0x1
#define SR_DMA_STA 0x04
#define SR_DMA_ADDR 0x08
#define SR_DMA_ADDR_L 0x0C
#define SR_DMA_ADDR_H 0x10
#define SR_DMA_LEN 0x14
#define SR_DMA_NEXT_L 0x18
#define SR_DMA_NEXT_H 0x1C
#define SR_DMA_BR 0x20
#define SR_DMA_BR_DIR 0x2
#define SR_DMA_BR_LAST 0x1
/*
* fa_dma_item: The information about a DMA transfer
* @start_addr: pointer where start to retrieve data from device memory
* @dma_addr_l: low 32bit of the dma address on host memory
* @dma_addr_h: high 32bit of the dma address on host memory
* @dma_len: number of bytes to transfer from device to host
* @next_addr_l: low 32bit of the address of the next memory area to use
* @next_addr_h: high 32bit of the address of the next memory area to use
* @attribute: dma information about data transferm. At the moment it is used
* only to provide the "last item" bit, direction is fixed to
* device->host
*/
struct sr_dma_item {
uint32_t start_addr; /* 0x00 */
uint32_t dma_addr_l; /* 0x04 */
uint32_t dma_addr_h; /* 0x08 */
uint32_t dma_len; /* 0x0C */
uint32_t next_addr_l; /* 0x10 */
uint32_t next_addr_h; /* 0x14 */
uint32_t attribute; /* 0x18 */
uint32_t reserved; /* ouch */
};
#define SR_FLAG_DATA_RDY (1 << 0)
struct sr_instance {
struct ual_device ual;
struct list_head list;
struct fmc_device *fmc;
wait_queue_head_t q_dma;
struct spinlock lock;
unsigned int flags;
struct mutex mtx;
/* Debug FS */
struct dentry *dbg_dir;
struct dentry *dma_loopback;
struct dentry *dma_write_seq;
struct dentry *dma_write_zero;
/* DMA */
unsigned int dma_base_addr;
struct sr_dma_request dma;
struct sg_table sgt;
dma_addr_t dma_list_item;
struct sr_dma_item *items;
/* IRQ */
unsigned int vic_base_addr;
unsigned int irq_dma_base_addr;
unsigned int irq_base_addr;
};
extern const struct file_operations sr_reg_fops;
extern const struct file_operations sr_dma_fops;
extern const struct file_operations sr_irq_fops;
extern const struct file_operations sr_dbgfs_dma_loop_op;
extern const struct file_operations sr_dbgfs_dma_write_seq;
extern const struct file_operations sr_dbgfs_dma_write_zero;
extern struct list_head sr_devices;
extern int sr_dma_start(struct sr_instance *sr);
extern void sr_dma_done(struct sr_instance *sr);
extern int sr_is_dma_over(struct sr_instance *sr);
extern int sr_request_irqs(struct sr_instance *sr);
extern void sr_free_irqs(struct sr_instance *sr);
extern int sr_irq_create_sysfs(struct sr_instance *sr);
extern void sr_irq_remove_sysfs(struct sr_instance *sr);
extern irqreturn_t sr_irq_generic_handler(int irq_core_base, void *arg);
static inline int sr_irq_find_subscription(struct sr_instance *sr, uint32_t source)
{
int i;
/* Check if the user request notification for this source */
for (i = 0; i < UAL_IRQ_MAX_SUBSCRIPTION; ++i)
if (sr->ual.subscribed_irqs[i].source == source )
break;
if (i == UAL_IRQ_MAX_SUBSCRIPTION) /* no subscription found */
return -1;
return i;
}
#endif /* SPEC_RAW_H_ */
......@@ -9,4 +9,5 @@ wr-dio-pps
wr-dio-agent
wr-dio-ruler
stamp-frame
Makefile.specific
\ No newline at end of file
Makefile.specific
spec-dma-dump
......@@ -14,6 +14,7 @@ LIBSHARED = libspec.so
PROGS = spec-cl spec-fwloader spec-vuart specmem
PROGS += wr-dio-cmd wr-dio-pps wr-dio-agent wr-dio-ruler
PROGS += stamp-frame
PROGS += spec-dma-dump
all: $(LIB) $(PROGS) $(LIBSHARED)
......
/*
* Copyright CERN 2014
* Author: Federico Vaga <federico.vaga@cern.ch>
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <getopt.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <string.h>
#include <spec-raw/spec-raw-user.h>
void srdd_help()
{
fprintf(stderr, "spec-dma-dump [options] <file>\n");
fprintf(stderr, "-l <num>: buffer byte length\n");
fprintf(stderr, "-o 0x<hex>: offset in device memory in HEX format\n");
fprintf(stderr, "-f 0x<hex>: bit mask of flags in HEX format\n");
fprintf(stderr, "-h : print this help\n");
fprintf(stderr, "\n\n");
fprintf(stderr, "FLAGS\n");
fprintf(stderr, " bit 0 : 1 for write, 0 for read\n");
exit(1);
}
int main (int argc, char *argv[])
{
struct sr_dma_request dma = {0, 0, 0};
int i, fd, ret, err;
char c, *device;
void *map;
uint8_t *data;
while( (c = getopt(argc, argv, "hl:o:f:")) >=0 ){
switch (c) {
case '?':
case 'h':
srdd_help();
break;
case 'l':
ret = sscanf(optarg, "%li", &dma.length);
if (!ret)
srdd_help();
break;
case 'o':
ret = sscanf(optarg, "0x%lx", &dma.dev_mem_off);
if (!ret)
srdd_help();
break;
case 'f':
ret = sscanf(optarg, "0x%lx", &dma.flags);
if (!ret)
srdd_help();
break;
}
}
if (optind != argc - 1 )
srdd_help();
device = argv[optind];
fprintf(stdout, "[srdd] open device %s\n", device);
fd = open(device, O_RDONLY);
if (fd < 0) {
fprintf(stderr, "[srdd] cannot open device %s\n", device);
fprintf(stderr, " %s\n", strerror(errno));
exit(errno);
}
fprintf(stdout, "[srdd] configure DMA transfer\n");
fprintf(stdout, " off 0x%lx len %lu flags %lu\n",
dma.dev_mem_off, dma.length, dma.flags);
err = ioctl(fd, SR_IOCTL_DMA, &dma);
if (err < 0) {
fprintf(stderr, "[srdd] cannot configure DMA %s\n", device);
fprintf(stderr, " %s\n", strerror(errno));
goto out;
}
map = mmap(0, dma.length, PROT_READ, MAP_SHARED, fd, 0);
if (map == MAP_FAILED) {
fprintf(stderr, "[srdd] cannot mmap memory\n");
fprintf(stderr, " %s\n", strerror(errno));
goto out;
}
fprintf(stdout, "[srdd] DDR Memory content:\n");
data = map;
for (i = 0; i < dma.length; i += 4)
fprintf(stdout, "0x%08lx %02x %02x %02x %02x\n",
dma.dev_mem_off + i, data[i], data[i + 1],
data[i + 2], data[i + 3]);
munmap(map, dma.length);
out:
close(fd);
exit(0);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment