Commit a25859fd authored by Federico Vaga's avatar Federico Vaga Committed by Alessandro Rubini

zio: create DMA-sg list from an array of zio_blocks

Signed-off-by: Federico Vaga's avatarFederico Vaga <federico.vaga@cern.ch>
parent 8b590933
...@@ -42,6 +42,7 @@ fmc-adc-100m14b-y += fa-spec-irq.o ...@@ -42,6 +42,7 @@ fmc-adc-100m14b-y += fa-spec-irq.o
fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-core.o fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-core.o
fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-regtable.o fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-regtable.o
fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-dma.o fmc-adc-100m14b-$(CONFIG_FMC_ADC_SVEC) += fa-svec-dma.o
fmc-adc-100m14b-y += zio-helpers.o
all modules: all modules:
$(MAKE) -C $(LINUX) M=$(shell /bin/pwd) modules $(MAKE) -C $(LINUX) M=$(shell /bin/pwd) modules
......
...@@ -23,165 +23,60 @@ ...@@ -23,165 +23,60 @@
#include "fmc-adc-100m14b4cha.h" #include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h" #include "fa-spec.h"
/* #include "zio-helpers.h"
* zfat_calculate_nents
*
* It calculates the number of necessary nents
*/
static int zfat_calculate_nents(struct zfad_block *zfad_block,
unsigned int n_blocks)
{
int i, bytesleft;
void *bufp;
int mapbytes;
int nents = 0;
for (i = 0; i < n_blocks; ++i) {
bytesleft = zfad_block[i].block->datalen;
bufp = zfad_block[i].block->data;
zfad_block[i].first_nent = nents;
while (bytesleft) {
nents++;
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
bufp += mapbytes;
bytesleft -= mapbytes;
}
}
return nents;
}
/* int fa_spec_dma_start(struct zio_cset *cset)
* zfad_setup_dma_scatter
*
* Initialize each element of the scatter list
*/
static void zfad_setup_dma_scatter(struct fa_dev *fa,
struct zfad_block *zfad_block,
unsigned int n_blocks)
{
struct fa_spec_data *spec_data = fa->carrier_data;
struct scatterlist *sg;
int bytesleft = 0;
void *bufp = NULL;
int mapbytes;
int i, i_blk;
dev_dbg(&fa->zdev->head.dev, "Setup dma scatterlist for %zu bytes\n",
zfad_block->block->datalen);
i_blk = 0;
for_each_sg(spec_data->sgt.sgl, sg, spec_data->sgt.nents, i) {
if (i_blk < n_blocks && i == zfad_block[i_blk].first_nent) {
WARN(bytesleft, "unmapped byte in block %i\n",
i_blk - 1);
/*
* Configure the DMA for a new block, reset index and
* data pointer
*/
bytesleft = zfad_block[i_blk].block->datalen;
bufp = zfad_block[i_blk].block->data;
i_blk++; /* index the next block */
if (unlikely(i_blk > n_blocks)) {
dev_err(&fa->zdev->head.dev,
"DMA map out of block\n");
BUG();
}
}
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
* we just feed in this, else we stuff in as much
* as we can.
*/
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
/* Map the page */
if (is_vmalloc_addr(bufp))
sg_set_page(sg, vmalloc_to_page(bufp), mapbytes,
offset_in_page(bufp));
else
sg_set_buf(sg, bufp, mapbytes);
/* Configure next values */
bufp += mapbytes;
bytesleft -= mapbytes;
pr_debug("sg item (%p(+0x%lx), len:%d, left:%d)\n",
virt_to_page(bufp), offset_in_page(bufp),
mapbytes, bytesleft);
}
}
/*
* zfad_map_dma
* @cset: channel set
* @zfad_block: the block to map through DMA
*
* Map a scatter/gather table for the DMA transfer from the FMC-ADC.
* The DMA controller can store a single item, but more then one transfer
* could be necessary
*/
static int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block,
unsigned int n_blocks)
{ {
struct fa_dev *fa = cset->zdev->priv_d; struct fa_dev *fa = cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data; struct fa_spec_data *spec_data = fa->carrier_data;
struct device *dev = &fa->fmc->dev; struct device *dev = &fa->fmc->dev;
struct scatterlist *sg; struct zio_channel *interleave = cset->interleave;
struct fa_dma_item *items; struct zfad_block *zfad_block = interleave->priv_d;
struct zio_block *blocks[fa->n_shots];
uint32_t dev_mem_off = 0; uint32_t dev_mem_off = 0;
unsigned int i, pages, sglen, size, i_blk;
dma_addr_t tmp; dma_addr_t tmp;
struct scatterlist *sg;
unsigned int i, sglen, size, i_blk;
int err; int err;
pages = zfat_calculate_nents(zfad_block, n_blocks); /*
if (!pages) { * FIXME very inefficient because arm trigger already prepare
dev_info(dev, "No pages to transfer %i\n", * something like zio_block_sg. In the future ZIO can alloc more
n_blocks); * than 1 block at time
return -EINVAL; */
} for (i = 0; i < fa->n_shots; ++i)
dev_dbg(dev, "using %d pages to transfer %i blocks\n", blocks[i] = zfad_block[i].block;
pages, n_blocks);
/* Create sglists for the transfers */ fa->zdma = zio_dma_alloc_sg(fa->fmc->hwdev, blocks, fa->n_shots,
err = sg_alloc_table(&spec_data->sgt, pages, GFP_ATOMIC); GFP_ATOMIC);
if (err) {
dev_err(dev, "cannot allocate sg table (%i pages)\n", pages);
goto out;
}
/* Limited to 32-bit (kernel limit) */ /* Limited to 32-bit (kernel limit) TODO the type should be generic */
size = sizeof(*items) * spec_data->sgt.nents; size = sizeof(struct fa_dma_item) * fa->zdma->sgt.nents;
items = kzalloc(size, GFP_ATOMIC); spec_data->items = kzalloc(size, GFP_ATOMIC);
if (!items) { if (!spec_data->items) {
dev_err(fa->fmc->hwdev, "cannot allocate coherent dma memory\n"); dev_err(fa->fmc->hwdev, "cannot allocate coherent dma memory\n");
goto out_mem; err = -ENOMEM;
goto out_alloc_item;
} }
spec_data->items = items; spec_data->dma_list_item = dma_map_single(fa->fmc->hwdev,
spec_data->dma_list_item = dma_map_single(fa->fmc->hwdev, items, size, spec_data->items, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!spec_data->dma_list_item) if (!spec_data->dma_list_item) {
goto out_free; err = -ENOMEM;
goto out_map_single;
}
/* Setup the scatter list for the provided block */
zfad_setup_dma_scatter(fa, zfad_block, n_blocks);
/* Map DMA buffers */ /* Map DMA buffers */
sglen = dma_map_sg(fa->fmc->hwdev, spec_data->sgt.sgl, sglen = dma_map_sg(fa->fmc->hwdev, fa->zdma->sgt.sgl,
spec_data->sgt.nents, DMA_FROM_DEVICE); fa->zdma->sgt.nents, DMA_FROM_DEVICE);
if (!sglen) { if (!sglen) {
dev_err(dev, "cannot map dma memory\n"); dev_err(dev, "cannot map dma memory\n");
goto out_map; goto out_map_sg;
} }
/* Configure DMA items */ /* Configure DMA items */
i_blk = 0; i_blk = 0;
for_each_sg(spec_data->sgt.sgl, sg, spec_data->sgt.nents, i) { for_each_sg(fa->zdma->sgt.sgl, sg, fa->zdma->sgt.nents, i) {
if (i_blk < n_blocks && i == zfad_block[i_blk].first_nent) { if (i_blk < fa->n_shots && i == zfad_block[i_blk].first_nent) {
/* /*
* FIXME if we trust our configuration, dev_mem_off is * FIXME if we trust our configuration, dev_mem_off is
* useless in multishot * useless in multishot
...@@ -189,122 +84,90 @@ static int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block, ...@@ -189,122 +84,90 @@ static int zfad_map_dma(struct zio_cset *cset, struct zfad_block *zfad_block,
dev_mem_off = zfad_block[i_blk].dev_mem_off; dev_mem_off = zfad_block[i_blk].dev_mem_off;
i_blk++; /* index the next block */ i_blk++; /* index the next block */
if (unlikely(i_blk > n_blocks)) { if (unlikely(i_blk > fa->n_shots)) {
dev_err(dev, "DMA map out of block\n"); dev_err(dev, "DMA map out of block\n");
BUG(); BUG();
} }
} }
/* Prepare DMA item */ /* Prepare DMA item */
items[i].start_addr = dev_mem_off; spec_data->items[i].start_addr = dev_mem_off;
items[i].dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF; spec_data->items[i].dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
items[i].dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32; spec_data->items[i].dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
items[i].dma_len = sg_dma_len(sg); spec_data->items[i].dma_len = sg_dma_len(sg);
dev_mem_off += items[i].dma_len; dev_mem_off += spec_data->items[i].dma_len;
if (!sg_is_last(sg)) {/* more transfers */ if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */ /* uint64_t so it works on 32 and 64 bit */
tmp = spec_data->dma_list_item; tmp = spec_data->dma_list_item;
tmp += (sizeof(struct fa_dma_item) * (i + 1)); tmp += (sizeof(struct fa_dma_item) * (i + 1));
items[i].next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF; spec_data->items[i].next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
items[i].next_addr_h = ((uint64_t)tmp) >> 32; spec_data->items[i].next_addr_h = ((uint64_t)tmp) >> 32;
items[i].attribute = 0x1; /* more items */ spec_data->items[i].attribute = 0x1; /* more items */
} else { } else {
items[i].attribute = 0x0; /* last item */ spec_data->items[i].attribute = 0x0; /* last item */
} }
pr_debug("configure DMA item %d " pr_debug("configure DMA item %d "
"(addr: 0x%llx len: %d)(dev off: 0x%x)" "(addr: 0x%llx len: %d)(dev off: 0x%x)"
"(next item: 0x%x)\n", "(next item: 0x%x)\n",
i, (long long)sg_dma_address(sg), i, (long long)sg_dma_address(sg),
sg_dma_len(sg), dev_mem_off, items[i].next_addr_l); sg_dma_len(sg), dev_mem_off, spec_data->items[i].next_addr_l);
/* The first item is written on the device */ /* The first item is written on the device */
if (i == 0) { if (i == 0) {
fa_writel(fa, spec_data->fa_dma_base, fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR], &fa_spec_regs[ZFA_DMA_ADDR],
items[i].start_addr); spec_data->items[i].start_addr);
fa_writel(fa, spec_data->fa_dma_base, fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_L], &fa_spec_regs[ZFA_DMA_ADDR_L],
items[i].dma_addr_l); spec_data->items[i].dma_addr_l);
fa_writel(fa, spec_data->fa_dma_base, fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_H], &fa_spec_regs[ZFA_DMA_ADDR_H],
items[i].dma_addr_h); spec_data->items[i].dma_addr_h);
fa_writel(fa, spec_data->fa_dma_base, fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_LEN], &fa_spec_regs[ZFA_DMA_LEN],
items[i].dma_len); spec_data->items[i].dma_len);
fa_writel(fa, spec_data->fa_dma_base, fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_L], &fa_spec_regs[ZFA_DMA_NEXT_L],
items[i].next_addr_l); spec_data->items[i].next_addr_l);
fa_writel(fa, spec_data->fa_dma_base, fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_H], &fa_spec_regs[ZFA_DMA_NEXT_H],
items[i].next_addr_h); spec_data->items[i].next_addr_h);
/* Set that there is a next item */ /* Set that there is a next item */
fa_writel(fa, spec_data->fa_dma_base, fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_BR_LAST], &fa_spec_regs[ZFA_DMA_BR_LAST],
items[i].attribute); spec_data->items[i].attribute);
} }
} }
/* Start DMA transfer */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_CTL_START], 1);
return 0; return 0;
out_map: out_map_sg:
dma_unmap_single(fa->fmc->hwdev, spec_data->dma_list_item, size, dma_unmap_single(fa->fmc->hwdev, spec_data->dma_list_item, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
out_free: out_map_single:
kfree(spec_data->items); kfree(spec_data->items);
out_mem: out_alloc_item:
sg_free_table(&spec_data->sgt); zio_dma_free_sg(fa->zdma);
out: return err;
return -ENOMEM;
} }
/* void fa_spec_dma_done(struct zio_cset *cset)
* zfad_unmap_dma
* @cset: channel set
* @zfad_block: the block to map through DMA
*
* It unmaps a blocks
*/
static void zfad_unmap_dma(struct zio_cset *cset)
{ {
struct fa_dev *fa = cset->zdev->priv_d; struct fa_dev *fa = cset->zdev->priv_d;
unsigned int size; unsigned int size;
struct fa_spec_data *spec_data = fa->carrier_data; struct fa_spec_data *spec_data = fa->carrier_data;
dev_dbg(fa->fmc->hwdev, "unmap DMA\n"); size = sizeof(struct fa_dma_item) * fa->zdma->sgt.nents;
size = sizeof(struct fa_dma_item) * spec_data->sgt.nents; dma_unmap_sg(fa->fmc->hwdev, fa->zdma->sgt.sgl, fa->zdma->sgt.nents,
DMA_FROM_DEVICE);
dma_unmap_single(fa->fmc->hwdev, spec_data->dma_list_item, size, dma_unmap_single(fa->fmc->hwdev, spec_data->dma_list_item, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_sg(fa->fmc->hwdev, spec_data->sgt.sgl, spec_data->sgt.nents,
DMA_FROM_DEVICE);
kfree(spec_data->items); kfree(spec_data->items);
zio_dma_free_sg(fa->zdma);
spec_data->items = NULL; spec_data->items = NULL;
spec_data->dma_list_item = 0; spec_data->dma_list_item = 0;
sg_free_table(&spec_data->sgt);
}
int fa_spec_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *zfad_block = interleave->priv_d;
int res;
res = zfad_map_dma(cset, zfad_block, fa->n_shots);
if (res)
return res;
/* Start DMA transfer */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_CTL_START], 1);
return 0;
}
void fa_spec_dma_done(struct zio_cset *cset)
{
zfad_unmap_dma(cset);
} }
void fa_spec_dma_error(struct zio_cset *cset) void fa_spec_dma_error(struct zio_cset *cset)
...@@ -313,7 +176,7 @@ void fa_spec_dma_error(struct zio_cset *cset) ...@@ -313,7 +176,7 @@ void fa_spec_dma_error(struct zio_cset *cset)
struct fa_spec_data *spec_data = fa->carrier_data; struct fa_spec_data *spec_data = fa->carrier_data;
uint32_t val; uint32_t val;
zfad_unmap_dma(cset); fa_spec_dma_done(cset);
val = fa_readl(fa, spec_data->fa_dma_base, val = fa_readl(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_STA]); &fa_spec_regs[ZFA_DMA_STA]);
if (val) if (val)
......
...@@ -89,7 +89,6 @@ struct fa_spec_data { ...@@ -89,7 +89,6 @@ struct fa_spec_data {
/* DMA attributes */ /* DMA attributes */
unsigned int fa_dma_base; unsigned int fa_dma_base;
unsigned int fa_irq_dma_base; unsigned int fa_irq_dma_base;
struct sg_table sgt;
struct fa_dma_item *items; struct fa_dma_item *items;
dma_addr_t dma_list_item; dma_addr_t dma_list_item;
unsigned int n_dma_err; /* statistics */ unsigned int n_dma_err; /* statistics */
......
...@@ -324,6 +324,9 @@ struct fa_dev { ...@@ -324,6 +324,9 @@ struct fa_dev {
unsigned int fa_irq_adc_base; unsigned int fa_irq_adc_base;
unsigned int fa_utc_base; unsigned int fa_utc_base;
/* DMA description */
struct zio_dma_sg *zdma;
/* carrier specific functions (init/exit/reset/readout/irq handling) */ /* carrier specific functions (init/exit/reset/readout/irq handling) */
struct fa_carrier_op *carrier_op; struct fa_carrier_op *carrier_op;
/* carrier private data */ /* carrier private data */
......
/*
* Copyright CERN 2014
* Author: Federico Vaga <federico.vaga@gmail.com>
*
* handle DMA mapping
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include "zio-helpers.h"
static int zio_calculate_nents(struct zio_blocks_sg *sg_blocks,
unsigned int n_blocks)
{
int i, bytesleft;
void *bufp;
int mapbytes;
int nents = 0;
for (i = 0; i < n_blocks; ++i) {
bytesleft = sg_blocks[i].block->datalen;
bufp = sg_blocks[i].block->data;
sg_blocks[i].first_nent = nents;
while (bytesleft) {
nents++;
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
bufp += mapbytes;
bytesleft -= mapbytes;
}
}
return nents;
}
static void zio_dma_setup_scatter(struct zio_dma_sg *zdma)
{
struct scatterlist *sg;
int bytesleft = 0;
void *bufp = NULL;
int mapbytes;
int i, i_blk;
i_blk = 0;
for_each_sg(zdma->sgt.sgl, sg, zdma->sgt.nents, i) {
if (i_blk < zdma->n_blocks && i == zdma->sg_blocks[i_blk].first_nent) {
WARN(bytesleft, "unmapped byte in block %i\n",
i_blk - 1);
/*
* Configure the DMA for a new block, reset index and
* data pointer
*/
bytesleft = zdma->sg_blocks[i_blk].block->datalen;
bufp = zdma->sg_blocks[i_blk].block->data;
i_blk++; /* index the next block */
if (unlikely(i_blk > zdma->n_blocks))
BUG();
}
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
* we just feed in this, else we stuff in as much
* as we can.
*/
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
/* Map the page */
if (is_vmalloc_addr(bufp))
sg_set_page(sg, vmalloc_to_page(bufp), mapbytes,
offset_in_page(bufp));
else
sg_set_buf(sg, bufp, mapbytes);
/* Configure next values */
bufp += mapbytes;
bytesleft -= mapbytes;
pr_debug("sg item (%p(+0x%lx), len:%d, left:%d)\n",
virt_to_page(bufp), offset_in_page(bufp),
mapbytes, bytesleft);
}
}
/*
* zio_alloc_scatterlist
* @chan: zio channel associated to this scatterlist
* @hwdev: low level device responsible of the DMA
* @blocks: array of zio_block to transfer
* @n_blocks: number of blocks to transfer
* @gfp: gfp flags for memory allocation
*
* The function allocates and initializes a scatterlist ready for DMA
* transfer
*/
struct zio_dma_sg *zio_dma_alloc_sg(struct device *hwdev,
struct zio_block **blocks, /* FIXME to array */
unsigned int n_blocks, gfp_t gfp)
{
struct zio_dma_sg *zdma;
unsigned int i, pages;
int err;
if (unlikely(!chan || !hwdev || !blocks || !n_blocks))
return -EINVAL;
/*
* Allocate a new zio_dma_sg structure that will contains all necessary
* information for DMA
*/
zdma = kzalloc(sizeof(struct zio_dma_sg), gfp);
if (!zdma)
return ERR_PTR(-ENOMEM);
/* Allocate a new list of blocks with sg information */
zdma->sg_blocks = kzalloc(sizeof(struct zio_blocks_sg) * n_blocks, gfp);
if (!zdma->sg_blocks) {
err = -ENOMEM;
goto out;
}
/* fill the zio_dma_sg structure */
zdma->hwdev = hwdev;
zdma->n_blocks = n_blocks;
for (i = 0; i < n_blocks; ++i)
zdma->sg_blocks[i].block = blocks[i];
/* calculate the number of necessary pages to transfer */
pages = zio_calculate_nents(zdma->sg_blocks, zdma->n_blocks);
if (!pages) {
err = -EINVAL;
goto out_calc_nents;
}
/* Create sglists for the transfers */
err = sg_alloc_table(&zdma->sgt, pages, gfp);
if (err)
goto out_alloc_sg;
/* Setup the scatter list for the provided block */
zio_dma_setup_scatter(zdma);
return zdma;
out_alloc_sg:
out_calc_nents:
kfree(zdma->sg_blocks);
out:
kfree(zdma);
return ERR_PTR(err);
}
EXPORT_SYMBOL(zio_dma_alloc_sg);
/*
* zio_free_scatterlist
* @zdma: zio DMA transfer descriptor
*
* It releases resources
*/
void zio_dma_free_sg(struct zio_dma_sg *zdma)
{
kfree(zdma->sg_blocks);
kfree(zdma);
}
EXPORT_SYMBOL(zio_dma_free_sg);
/*
* Copyright CERN 2014
* Author: Federico Vaga <federico.vaga@gmail.com>
*
* handle DMA mapping
*/
#ifndef ZIO_HELPERS_H_
#define ZIO_HELPERS_H_
#include <linux/zio.h>
#include <linux/scatterlist.h>
/*
* It describe a zio block to be mapped with sg
* @block: is the block to map
* @first_nent: it tells the index of the first DMA transfer corresponding to
* the start of this block
* @dev_mem_off: device memory offset where retrieve data for this block
*/
struct zio_blocks_sg {
struct zio_block *block;
unsigned int first_nent;
unsigned long dev_mem_off;
};
/*
* it describes the DMA sg mapping
* @hwdev: the low level driver which will do DMA
* @sg_blocks: one or more blocks to map
* @n_blocks: number of blocks to map
* @sgt: scatter gather table
*/
struct zio_dma_sg {
struct device *hwdev;
struct zio_blocks_sg *sg_blocks;
unsigned int n_blocks;
struct sg_table sgt;
};
/*
* It describe the current sg item
* @blk_index: current block index
* @page_index: current page index
* @is_first_nent_block: it tells if this description point to the first page
* transfer for current block
*/
struct zio_dma_sg_desc {
struct scatterlist *sg;
unsigned int blk_index;
unsigned int page_index;
int is_first_nent_block;
};
extern struct zio_dma_sg *zio_dma_alloc_sg(struct device *hwdev,
struct zio_block **blocks,
unsigned int n_blocks,
gfp_t gfp);
extern void zio_dma_free_sg(struct zio_dma_sg *zdma);
#endif /* ZIO_HELPERS_H_ */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment