Commit 5a0505dd authored by Federico Vaga's avatar Federico Vaga

submodule: use ZIO with DMA mapping

Signed-off-by: Federico Vaga's avatarFederico Vaga <federico.vaga@cern.ch>
parent 6e245784
......@@ -49,7 +49,6 @@ fmc-adc-100m14b-y += fa-spec-irq.o
fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-core.o
fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-regtable.o
fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-dma.o
fmc-adc-100m14b-y += zio-helpers.o
all modules:
$(MAKE) -C $(LINUX) M=$(shell /bin/pwd) modules
......
......@@ -17,35 +17,34 @@
#include <linux/fmc.h>
#include <linux/zio.h>
#include <linux/zio-dma.h>
#include <linux/zio-buffer.h>
#include <linux/zio-trigger.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h"
#include "zio-helpers.h"
static int fa_spec_dma_fill(struct zio_dma_sg *zdma, int page_idx,
int block_idx, void *page_desc,
uint32_t dev_mem_off,
struct scatterlist *sg)
static int gncore_dma_fill(struct zio_dma_sg *zsg)
{
struct fa_dma_item *item = (struct fa_dma_item *)page_desc;
struct zio_channel *chan = zdma->chan;
struct gncore_dma_item *item = (struct gncore_dma_item *)zsg->page_desc;
struct scatterlist *sg = zsg->sg;
struct zio_channel *chan = zsg->zsgt->chan;
struct fa_dev *fa = chan->cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
dma_addr_t tmp;
/* Prepare DMA item */
item->start_addr = dev_mem_off;
item->start_addr = zsg->dev_mem_off;
item->dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
item->dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
item->dma_len = sg_dma_len(sg);
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
tmp = zdma->dma_page_desc_pool;
tmp += (zdma->page_desc_size * (page_idx + 1));
tmp = zsg->zsgt->dma_page_desc_pool;
tmp += (zsg->zsgt->page_desc_size * (zsg->page_idx + 1));
item->next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
item->next_addr_h = ((uint64_t)tmp) >> 32;
item->attribute = 0x1; /* more items */
......@@ -53,14 +52,8 @@ static int fa_spec_dma_fill(struct zio_dma_sg *zdma, int page_idx,
item->attribute = 0x0; /* last item */
}
dev_dbg(zdma->hwdev, "configure DMA item %d (block %d)"
"(addr: 0x%llx len: %d)(dev off: 0x%x)"
"(next item: 0x%x)\n",
page_idx, block_idx, (long long)sg_dma_address(sg),
sg_dma_len(sg), dev_mem_off, item->next_addr_l);
/* The first item is written on the device */
if (page_idx == 0) {
if (zsg->page_idx == 0) {
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR], item->start_addr);
fa_writel(fa, spec_data->fa_dma_base,
......@@ -78,6 +71,11 @@ static int fa_spec_dma_fill(struct zio_dma_sg *zdma, int page_idx,
&fa_spec_regs[ZFA_DMA_BR_LAST], item->attribute);
}
dev_dbg(zsg->zsgt->hwdev, "configure DMA item %d (block %d)"
"(addr: 0x%llx len: %d)(dev off: 0x%x) (next item: 0x%x)\n",
zsg->page_idx, zsg->block_idx, (long long)sg_dma_address(sg),
sg_dma_len(sg), zsg->dev_mem_off, item->next_addr_l);
return 0;
}
......@@ -109,8 +107,8 @@ int fa_spec_dma_start(struct zio_cset *cset)
for (i = 0; i < fa->zdma->n_blocks; ++i)
fa->zdma->sg_blocks[i].dev_mem_off = zfad_block->dev_mem_off;
err = zio_dma_map_sg(fa->zdma, sizeof(struct fa_dma_item),
fa_spec_dma_fill);
err = zio_dma_map_sg(fa->zdma, sizeof(struct gncore_dma_item),
gncore_dma_fill);
if (err)
goto out_map_sg;
......
......@@ -32,7 +32,7 @@
* only to provide the "last item" bit, direction is fixed to
* device->host
*/
struct fa_dma_item {
struct gncore_dma_item {
uint32_t start_addr; /* 0x00 */
uint32_t dma_addr_l; /* 0x04 */
uint32_t dma_addr_h; /* 0x08 */
......
......@@ -325,7 +325,7 @@ struct fa_dev {
unsigned int fa_utc_base;
/* DMA description */
struct zio_dma_sg *zdma;
struct zio_dma_sgt *zdma;
/* carrier specific functions (init/exit/reset/readout/irq handling) */
struct fa_carrier_op *carrier_op;
......
/*
* Copyright CERN 2014
* Author: Federico Vaga <federico.vaga@gmail.com>
*
* handle DMA mapping
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include "zio-helpers.h"
static int zio_calculate_nents(struct zio_blocks_sg *sg_blocks,
unsigned int n_blocks)
{
int i, bytesleft;
void *bufp;
int mapbytes;
int nents = 0;
for (i = 0; i < n_blocks; ++i) {
bytesleft = sg_blocks[i].block->datalen;
bufp = sg_blocks[i].block->data;
sg_blocks[i].first_nent = nents;
while (bytesleft) {
nents++;
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
bufp += mapbytes;
bytesleft -= mapbytes;
}
}
return nents;
}
static void zio_dma_setup_scatter(struct zio_dma_sg *zdma)
{
struct scatterlist *sg;
int bytesleft = 0;
void *bufp = NULL;
int mapbytes;
int i, i_blk;
i_blk = 0;
for_each_sg(zdma->sgt.sgl, sg, zdma->sgt.nents, i) {
if (i_blk < zdma->n_blocks && i == zdma->sg_blocks[i_blk].first_nent) {
WARN(bytesleft, "unmapped byte in block %i\n",
i_blk - 1);
/*
* Configure the DMA for a new block, reset index and
* data pointer
*/
bytesleft = zdma->sg_blocks[i_blk].block->datalen;
bufp = zdma->sg_blocks[i_blk].block->data;
i_blk++; /* index the next block */
if (unlikely(i_blk > zdma->n_blocks))
BUG();
}
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
* we just feed in this, else we stuff in as much
* as we can.
*/
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
/* Map the page */
if (is_vmalloc_addr(bufp))
sg_set_page(sg, vmalloc_to_page(bufp), mapbytes,
offset_in_page(bufp));
else
sg_set_buf(sg, bufp, mapbytes);
/* Configure next values */
bufp += mapbytes;
bytesleft -= mapbytes;
pr_debug("sg item (%p(+0x%lx), len:%d, left:%d)\n",
virt_to_page(bufp), offset_in_page(bufp),
mapbytes, bytesleft);
}
}
/*
* zio_alloc_scatterlist
* @chan: zio channel associated to this scatterlist
* @hwdev: low level device responsible of the DMA
* @blocks: array of zio_block to transfer
* @n_blocks: number of blocks to transfer
* @gfp: gfp flags for memory allocation
*
* The function allocates and initializes a scatterlist ready for DMA
* transfer
*/
struct zio_dma_sg *zio_dma_alloc_sg(struct zio_channel *chan,
struct device *hwdev,
struct zio_block **blocks, /* FIXME to array */
unsigned int n_blocks, gfp_t gfp)
{
struct zio_dma_sg *zdma;
unsigned int i, pages;
int err;
if (unlikely(!chan || !hwdev || !blocks || !n_blocks))
return -EINVAL;
/*
* Allocate a new zio_dma_sg structure that will contains all necessary
* information for DMA
*/
zdma = kzalloc(sizeof(struct zio_dma_sg), gfp);
if (!zdma)
return ERR_PTR(-ENOMEM);
zdma->chan = chan;
/* Allocate a new list of blocks with sg information */
zdma->sg_blocks = kzalloc(sizeof(struct zio_blocks_sg) * n_blocks, gfp);
if (!zdma->sg_blocks) {
err = -ENOMEM;
goto out;
}
/* fill the zio_dma_sg structure */
zdma->hwdev = hwdev;
zdma->n_blocks = n_blocks;
for (i = 0; i < n_blocks; ++i)
zdma->sg_blocks[i].block = blocks[i];
/* calculate the number of necessary pages to transfer */
pages = zio_calculate_nents(zdma->sg_blocks, zdma->n_blocks);
if (!pages) {
err = -EINVAL;
goto out_calc_nents;
}
/* Create sglists for the transfers */
err = sg_alloc_table(&zdma->sgt, pages, gfp);
if (err)
goto out_alloc_sg;
/* Setup the scatter list for the provided block */
zio_dma_setup_scatter(zdma);
return zdma;
out_alloc_sg:
out_calc_nents:
kfree(zdma->sg_blocks);
out:
kfree(zdma);
return ERR_PTR(err);
}
EXPORT_SYMBOL(zio_dma_alloc_sg);
/*
* zio_free_scatterlist
* @zdma: zio DMA transfer descriptor
*
* It releases resources
*/
void zio_dma_free_sg(struct zio_dma_sg *zdma)
{
kfree(zdma->sg_blocks);
kfree(zdma);
}
EXPORT_SYMBOL(zio_dma_free_sg);
/*
* zio_dma_map_sg
* @zdma: zio DMA descriptor from zio_dma_alloc_sg()
* @page_desc_size: the size (in byte) of the dma transfer descriptor of the
* specific hw
* @fill_desc: callback for the driver in order to fill each transfer
* descriptor
*
*It maps a sg table
*
* fill_desc
* @zdma: zio DMA descriptor from zio_dma_alloc_sg()
* @page_idx: index of the current page transfer
* @block_idx: index of the current zio_block
* @page_desc: current descriptor to fill
* @dev_mem_offset: offset within the device memory
* @sg: current sg descriptor
*/
int zio_dma_map_sg(struct zio_dma_sg *zdma, size_t page_desc_size,
int (*fill_desc)(struct zio_dma_sg *zdma, int page_idx,
int block_idx, void *page_desc,
uint32_t dev_mem_offset,
struct scatterlist *sg))
{
unsigned int i, err = 0, sglen, i_blk;
uint32_t dev_mem_off = 0;
struct scatterlist *sg;
void *item_ptr;
size_t size;
if (unlikely(!zdma || !fill_desc))
return -EINVAL;
/* Limited to 32-bit (kernel limit) */
zdma->page_desc_size = page_desc_size;
size = zdma->page_desc_size * zdma->sgt.nents;
zdma->page_desc_pool = kzalloc(size, GFP_ATOMIC);
if (!zdma->page_desc_pool) {
dev_err(zdma->hwdev, "cannot allocate coherent dma memory\n");
return -ENOMEM;
}
zdma->dma_page_desc_pool = dma_map_single(zdma->hwdev,
zdma->page_desc_pool, size,
DMA_TO_DEVICE);
if (!zdma->dma_page_desc_pool) {
err = -ENOMEM;
goto out_map_single;
}
/* Map DMA buffers */
sglen = dma_map_sg(zdma->hwdev, zdma->sgt.sgl, zdma->sgt.nents,
DMA_FROM_DEVICE);
if (!sglen) {
dev_err(zdma->hwdev, "cannot map dma SG memory\n");
goto out_map_sg;
}
i_blk = 0;
for_each_sg(zdma->sgt.sgl, sg, zdma->sgt.nents, i) {
dev_dbg(zdma->hwdev, "%d 0x%x\n", i, dev_mem_off);
if (i_blk < zdma->n_blocks && i == zdma->sg_blocks[i_blk].first_nent) {
dev_dbg(zdma->hwdev, "%d is the first nent of block %d\n", i, i_blk);
dev_mem_off = zdma->sg_blocks[i_blk].dev_mem_off;
i_blk++; /* index the next block */
if (unlikely(i_blk > zdma->n_blocks)) {
dev_err(zdma->hwdev, "DMA map out of block\n");
BUG();
}
}
item_ptr = zdma->page_desc_pool + (zdma->page_desc_size * i);
err = fill_desc(zdma, i, i_blk, item_ptr, dev_mem_off, sg);
if (err) {
dev_err(zdma->hwdev, "Cannot fill descriptor %d\n", i);
goto out_fill_desc;
}
dev_mem_off += sg_dma_len(sg);
}
return 0;
out_fill_desc:
dma_unmap_sg(zdma->hwdev, zdma->sgt.sgl, zdma->sgt.nents,
DMA_FROM_DEVICE);
out_map_sg:
dma_unmap_single(zdma->hwdev, zdma->dma_page_desc_pool, size,
DMA_TO_DEVICE);
out_map_single:
kfree(zdma->page_desc_pool);
return err;
}
EXPORT_SYMBOL(zio_dma_map_sg);
/*
* zio_dma_unmap_sg
* @zdma: zio DMA descriptor from zio_dma_alloc_sg()
*
* It unmaps a sg table
*/
void zio_dma_unmap_sg(struct zio_dma_sg *zdma)
{
size_t size;
size = zdma->page_desc_size * zdma->sgt.nents;
dma_unmap_sg(zdma->hwdev, zdma->sgt.sgl, zdma->sgt.nents,
DMA_FROM_DEVICE);
dma_unmap_single(zdma->hwdev, zdma->dma_page_desc_pool, size,
DMA_TO_DEVICE);
kfree(zdma->page_desc_pool);
zdma->dma_page_desc_pool = 0;
zdma->page_desc_pool = NULL;
}
EXPORT_SYMBOL(zio_dma_unmap_sg);
/*
* Copyright CERN 2014
* Author: Federico Vaga <federico.vaga@gmail.com>
*
* handle DMA mapping
*/
#ifndef ZIO_HELPERS_H_
#define ZIO_HELPERS_H_
#include <linux/zio.h>
#include <linux/scatterlist.h>
/*
* It describe a zio block to be mapped with sg
* @block: is the block to map
* @first_nent: it tells the index of the first DMA transfer corresponding to
* the start of this block
* @dev_mem_off: device memory offset where retrieve data for this block
*/
struct zio_blocks_sg {
struct zio_block *block;
unsigned int first_nent;
unsigned long dev_mem_off;
};
/*
* it describes the DMA sg mapping
* @hwdev: the low level driver which will do DMA
* @sg_blocks: one or more blocks to map
* @n_blocks: number of blocks to map
* @sgt: scatter gather table
* @page_desc_size: size of the transfer descriptor
* @page_desc_pool: vector of transfer descriptors
* @dma_page_desc_pool: dma address of the vector of transfer descriptors
*/
struct zio_dma_sg {
struct zio_channel *chan;
struct device *hwdev;
struct zio_blocks_sg *sg_blocks;
unsigned int n_blocks;
struct sg_table sgt;
size_t page_desc_size;
void *page_desc_pool;
dma_addr_t dma_page_desc_pool;
};
extern struct zio_dma_sg *zio_dma_alloc_sg(struct zio_channel *chan,
struct device *hwdev,
struct zio_block **blocks,
unsigned int n_blocks,
gfp_t gfp);
extern void zio_dma_free_sg(struct zio_dma_sg *zdma);
extern int zio_dma_map_sg(struct zio_dma_sg *zdma, size_t page_desc_size,
int (*fill_desc)(struct zio_dma_sg *zdma,
int page_idx,
int block_idx, void *page_desc,
uint32_t dev_mem_offset,
struct scatterlist *sg));
extern void zio_dma_unmap_sg(struct zio_dma_sg *zdma);
#endif /* ZIO_HELPERS_H_ */
zio @ e926a4cb
Subproject commit 39cdae7ab46a7538af59cc2c8806171cf0ad6dcf
Subproject commit e926a4cb797dd6eb9a0afb9b34a989e522cdff75
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment