Commit c22b1cb0 authored by Federico Vaga's avatar Federico Vaga Committed by Alessandro Rubini

zio: map a pre-allocated sg table for DMA

Signed-off-by: Federico Vaga's avatarFederico Vaga <federico.vaga@cern.ch>
parent a25859fd
......@@ -25,19 +25,70 @@
#include "zio-helpers.h"
static int fa_spec_dma_fill(struct zio_dma_sg *zdma, int page_idx,
int block_idx, void *page_desc,
uint32_t dev_mem_off,
struct scatterlist *sg)
{
struct fa_dma_item *item = (struct fa_dma_item *)page_desc;
struct zio_channel *chan = zdma->chan;
struct fa_dev *fa = chan->cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
dma_addr_t tmp;
/* Prepare DMA item */
item->start_addr = dev_mem_off;
item->dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
item->dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
item->dma_len = sg_dma_len(sg);
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
tmp = zdma->dma_page_desc_pool;
tmp += (zdma->page_desc_size * (page_idx + 1));
item->next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
item->next_addr_h = ((uint64_t)tmp) >> 32;
item->attribute = 0x1; /* more items */
} else {
item->attribute = 0x0; /* last item */
}
dev_dbg(zdma->hwdev, "configure DMA item %d (block %d)"
"(addr: 0x%llx len: %d)(dev off: 0x%x)"
"(next item: 0x%x)\n",
page_idx, block_idx, (long long)sg_dma_address(sg),
sg_dma_len(sg), dev_mem_off, item->next_addr_l);
/* The first item is written on the device */
if (page_idx == 0) {
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR], item->start_addr);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_L], item->dma_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_H], item->dma_addr_h);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_LEN], item->dma_len);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_L], item->next_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_H], item->next_addr_h);
/* Set that there is a next item */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_BR_LAST], item->attribute);
}
return 0;
}
int fa_spec_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
struct device *dev = &fa->fmc->dev;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *zfad_block = interleave->priv_d;
struct zio_block *blocks[fa->n_shots];
uint32_t dev_mem_off = 0;
dma_addr_t tmp;
struct scatterlist *sg;
unsigned int i, sglen, size, i_blk;
int err;
int i, err;
/*
* FIXME very inefficient because arm trigger already prepare
......@@ -47,108 +98,28 @@ int fa_spec_dma_start(struct zio_cset *cset)
for (i = 0; i < fa->n_shots; ++i)
blocks[i] = zfad_block[i].block;
fa->zdma = zio_dma_alloc_sg(fa->fmc->hwdev, blocks, fa->n_shots,
GFP_ATOMIC);
fa->zdma = zio_dma_alloc_sg(interleave, fa->fmc->hwdev, blocks,
fa->n_shots, GFP_ATOMIC);
if (IS_ERR(fa->zdma))
return PTR_ERR(fa->zdma);
/* Limited to 32-bit (kernel limit) TODO the type should be generic */
size = sizeof(struct fa_dma_item) * fa->zdma->sgt.nents;
spec_data->items = kzalloc(size, GFP_ATOMIC);
if (!spec_data->items) {
dev_err(fa->fmc->hwdev, "cannot allocate coherent dma memory\n");
err = -ENOMEM;
goto out_alloc_item;
}
spec_data->dma_list_item = dma_map_single(fa->fmc->hwdev,
spec_data->items, size,
DMA_TO_DEVICE);
if (!spec_data->dma_list_item) {
err = -ENOMEM;
goto out_map_single;
}
/* Fix block memory offset
* FIXME when official ZIO has multishot and DMA
*/
for (i = 0; i < fa->zdma->n_blocks; ++i)
fa->zdma->sg_blocks[i].dev_mem_off = zfad_block->dev_mem_off;
/* Map DMA buffers */
sglen = dma_map_sg(fa->fmc->hwdev, fa->zdma->sgt.sgl,
fa->zdma->sgt.nents, DMA_FROM_DEVICE);
if (!sglen) {
dev_err(dev, "cannot map dma memory\n");
err = zio_dma_map_sg(fa->zdma, sizeof(struct fa_dma_item),
fa_spec_dma_fill);
if (err)
goto out_map_sg;
}
/* Configure DMA items */
i_blk = 0;
for_each_sg(fa->zdma->sgt.sgl, sg, fa->zdma->sgt.nents, i) {
if (i_blk < fa->n_shots && i == zfad_block[i_blk].first_nent) {
/*
* FIXME if we trust our configuration, dev_mem_off is
* useless in multishot
*/
dev_mem_off = zfad_block[i_blk].dev_mem_off;
i_blk++; /* index the next block */
if (unlikely(i_blk > fa->n_shots)) {
dev_err(dev, "DMA map out of block\n");
BUG();
}
}
/* Prepare DMA item */
spec_data->items[i].start_addr = dev_mem_off;
spec_data->items[i].dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
spec_data->items[i].dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
spec_data->items[i].dma_len = sg_dma_len(sg);
dev_mem_off += spec_data->items[i].dma_len;
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
tmp = spec_data->dma_list_item;
tmp += (sizeof(struct fa_dma_item) * (i + 1));
spec_data->items[i].next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
spec_data->items[i].next_addr_h = ((uint64_t)tmp) >> 32;
spec_data->items[i].attribute = 0x1; /* more items */
} else {
spec_data->items[i].attribute = 0x0; /* last item */
}
pr_debug("configure DMA item %d "
"(addr: 0x%llx len: %d)(dev off: 0x%x)"
"(next item: 0x%x)\n",
i, (long long)sg_dma_address(sg),
sg_dma_len(sg), dev_mem_off, spec_data->items[i].next_addr_l);
/* The first item is written on the device */
if (i == 0) {
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR],
spec_data->items[i].start_addr);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_L],
spec_data->items[i].dma_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_H],
spec_data->items[i].dma_addr_h);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_LEN],
spec_data->items[i].dma_len);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_L],
spec_data->items[i].next_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_H],
spec_data->items[i].next_addr_h);
/* Set that there is a next item */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_BR_LAST],
spec_data->items[i].attribute);
}
}
/* Start DMA transfer */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_CTL_START], 1);
return 0;
out_map_sg:
dma_unmap_single(fa->fmc->hwdev, spec_data->dma_list_item, size,
DMA_TO_DEVICE);
out_map_single:
kfree(spec_data->items);
out_alloc_item:
zio_dma_free_sg(fa->zdma);
return err;
}
......@@ -156,18 +127,9 @@ out_alloc_item:
void fa_spec_dma_done(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
unsigned int size;
struct fa_spec_data *spec_data = fa->carrier_data;
size = sizeof(struct fa_dma_item) * fa->zdma->sgt.nents;
dma_unmap_sg(fa->fmc->hwdev, fa->zdma->sgt.sgl, fa->zdma->sgt.nents,
DMA_FROM_DEVICE);
dma_unmap_single(fa->fmc->hwdev, spec_data->dma_list_item, size,
DMA_TO_DEVICE);
kfree(spec_data->items);
zio_dma_unmap_sg(fa->zdma);
zio_dma_free_sg(fa->zdma);
spec_data->items = NULL;
spec_data->dma_list_item = 0;
}
void fa_spec_dma_error(struct zio_cset *cset)
......
......@@ -102,7 +102,8 @@ static void zio_dma_setup_scatter(struct zio_dma_sg *zdma)
* The function allocates and initializes a scatterlist ready for DMA
* transfer
*/
struct zio_dma_sg *zio_dma_alloc_sg(struct device *hwdev,
struct zio_dma_sg *zio_dma_alloc_sg(struct zio_channel *chan,
struct device *hwdev,
struct zio_block **blocks, /* FIXME to array */
unsigned int n_blocks, gfp_t gfp)
{
......@@ -120,7 +121,7 @@ struct zio_dma_sg *zio_dma_alloc_sg(struct device *hwdev,
zdma = kzalloc(sizeof(struct zio_dma_sg), gfp);
if (!zdma)
return ERR_PTR(-ENOMEM);
zdma->chan = chan;
/* Allocate a new list of blocks with sg information */
zdma->sg_blocks = kzalloc(sizeof(struct zio_blocks_sg) * n_blocks, gfp);
if (!zdma->sg_blocks) {
......@@ -175,3 +176,122 @@ void zio_dma_free_sg(struct zio_dma_sg *zdma)
kfree(zdma);
}
EXPORT_SYMBOL(zio_dma_free_sg);
/*
* zio_dma_map_sg
* @zdma: zio DMA descriptor from zio_dma_alloc_sg()
* @page_desc_size: the size (in byte) of the dma transfer descriptor of the
* specific hw
* @fill_desc: callback for the driver in order to fill each transfer
* descriptor
*
*It maps a sg table
*
* fill_desc
* @zdma: zio DMA descriptor from zio_dma_alloc_sg()
* @page_idx: index of the current page transfer
* @block_idx: index of the current zio_block
* @page_desc: current descriptor to fill
* @dev_mem_offset: offset within the device memory
* @sg: current sg descriptor
*/
int zio_dma_map_sg(struct zio_dma_sg *zdma, size_t page_desc_size,
int (*fill_desc)(struct zio_dma_sg *zdma, int page_idx,
int block_idx, void *page_desc,
uint32_t dev_mem_offset,
struct scatterlist *sg))
{
unsigned int i, err = 0, sglen, i_blk;
uint32_t dev_mem_off = 0;
struct scatterlist *sg;
void *item_ptr;
size_t size;
if (unlikely(!zdma || !fill_desc))
return -EINVAL;
/* Limited to 32-bit (kernel limit) */
zdma->page_desc_size = page_desc_size;
size = zdma->page_desc_size * zdma->sgt.nents;
zdma->page_desc_pool = kzalloc(size, GFP_ATOMIC);
if (!zdma->page_desc_pool) {
dev_err(zdma->hwdev, "cannot allocate coherent dma memory\n");
return -ENOMEM;
}
zdma->dma_page_desc_pool = dma_map_single(zdma->hwdev,
zdma->page_desc_pool, size,
DMA_TO_DEVICE);
if (!zdma->dma_page_desc_pool) {
err = -ENOMEM;
goto out_map_single;
}
/* Map DMA buffers */
sglen = dma_map_sg(zdma->hwdev, zdma->sgt.sgl, zdma->sgt.nents,
DMA_FROM_DEVICE);
if (!sglen) {
dev_err(zdma->hwdev, "cannot map dma SG memory\n");
goto out_map_sg;
}
i_blk = 0;
for_each_sg(zdma->sgt.sgl, sg, zdma->sgt.nents, i) {
dev_info(zdma->hwdev, "%d 0x%x\n", i, dev_mem_off);
if (i_blk < zdma->n_blocks && i == zdma->sg_blocks[i_blk].first_nent) {
dev_info(zdma->hwdev, "%d is the first nent of block %d\n", i, i_blk);
dev_mem_off = zdma->sg_blocks[i_blk].dev_mem_off;
i_blk++; /* index the next block */
if (unlikely(i_blk > zdma->n_blocks)) {
dev_err(zdma->hwdev, "DMA map out of block\n");
BUG();
}
}
item_ptr = zdma->page_desc_pool + (zdma->page_desc_size * i);
err = fill_desc(zdma, i, i_blk, item_ptr, dev_mem_off, sg);
if (err) {
dev_err(zdma->hwdev, "Cannot fill descriptor %d\n", i);
goto out_fill_desc;
}
dev_mem_off += sg_dma_len(sg);
}
return 0;
out_fill_desc:
dma_unmap_sg(zdma->hwdev, zdma->sgt.sgl, zdma->sgt.nents,
DMA_FROM_DEVICE);
out_map_sg:
dma_unmap_single(zdma->hwdev, zdma->dma_page_desc_pool, size,
DMA_TO_DEVICE);
out_map_single:
kfree(zdma->page_desc_pool);
return err;
}
EXPORT_SYMBOL(zio_dma_map_sg);
/*
* zio_dma_unmap_sg
* @zdma: zio DMA descriptor from zio_dma_alloc_sg()
*
* It unmaps a sg table
*/
void zio_dma_unmap_sg(struct zio_dma_sg *zdma)
{
size_t size;
size = zdma->page_desc_size * zdma->sgt.nents;
dma_unmap_sg(zdma->hwdev, zdma->sgt.sgl, zdma->sgt.nents,
DMA_FROM_DEVICE);
dma_unmap_single(zdma->hwdev, zdma->dma_page_desc_pool, size,
DMA_TO_DEVICE);
kfree(zdma->page_desc_pool);
zdma->dma_page_desc_pool = 0;
zdma->page_desc_pool = NULL;
}
EXPORT_SYMBOL(zio_dma_unmap_sg);
......@@ -30,32 +30,33 @@ struct zio_blocks_sg {
* @sg_blocks: one or more blocks to map
* @n_blocks: number of blocks to map
* @sgt: scatter gather table
* @page_desc_size: size of the transfer descriptor
* @page_desc_pool: vector of transfer descriptors
* @dma_page_desc_pool: dma address of the vector of transfer descriptors
*/
struct zio_dma_sg {
struct zio_channel *chan;
struct device *hwdev;
struct zio_blocks_sg *sg_blocks;
unsigned int n_blocks;
struct sg_table sgt;
size_t page_desc_size;
void *page_desc_pool;
dma_addr_t dma_page_desc_pool;
};
/*
* It describe the current sg item
* @blk_index: current block index
* @page_index: current page index
* @is_first_nent_block: it tells if this description point to the first page
* transfer for current block
*/
struct zio_dma_sg_desc {
struct scatterlist *sg;
unsigned int blk_index;
unsigned int page_index;
int is_first_nent_block;
};
extern struct zio_dma_sg *zio_dma_alloc_sg(struct device *hwdev,
extern struct zio_dma_sg *zio_dma_alloc_sg(struct zio_channel *chan,
struct device *hwdev,
struct zio_block **blocks,
unsigned int n_blocks,
gfp_t gfp);
extern void zio_dma_free_sg(struct zio_dma_sg *zdma);
extern int zio_dma_map_sg(struct zio_dma_sg *zdma, size_t page_desc_size,
int (*fill_desc)(struct zio_dma_sg *zdma,
int page_idx,
int block_idx, void *page_desc,
uint32_t dev_mem_offset,
struct scatterlist *sg));
extern void zio_dma_unmap_sg(struct zio_dma_sg *zdma);
#endif /* ZIO_HELPERS_H_ */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment