Commit d4e88870 authored by Federico Vaga's avatar Federico Vaga

Change DMA management

Signed-off-by: 's avatarFederico Vaga <federico.vaga@gmail.com>
parent e5669b8e
......@@ -18,6 +18,7 @@ spec-fmc-adc-objs = fa-zio-drv.o
spec-fmc-adc-objs += fa-core.o
spec-fmc-adc-objs += fa-spec.o
spec-fmc-adc-objs += fa-zio-trg.o
spec-fmc-adc-objs += fa-dma.o
all: modules
......
/*
* Copyright CERN 2012
* Author: Federico Vaga <federico.vaga@gmail.com>
*
* DMA handle
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/zio.h>
#include <linux/zio-buffer.h>
#include <linux/zio-trigger.h>
#include "spec.h"
#include "fmc-adc.h"
/* Initialize each element of the scatter list */
static void zfad_setup_dma_scatter(struct spec_fa *fa, struct zio_block *block)
{
struct scatterlist *sg;
int bytesleft = block->datalen;
void *bufp = block->data;
int mapbytes;
int i;
pr_info("%s:%d\n", __func__, __LINE__);
for_each_sg(fa->sgt.sgl, sg, fa->sgt.nents, i) {
/*
* If there are less bytes left than what fits
* in the current page (plus page alignment offset)
* we just feed in this, else we stuff in as much
* as we can.
*/
if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
mapbytes = bytesleft;
else
mapbytes = PAGE_SIZE - offset_in_page(bufp);
sg_set_buf(sg, bufp, mapbytes);
bufp += mapbytes;
bytesleft -= mapbytes;
}
BUG_ON(bytesleft);
}
/*
* Map a scatter/gather table for the DMA transfer from the FMC-ADC.
* The DMA controller can store a single item, but more then one transfer
* could be necessary
*/
int zfad_map_dma(struct zio_cset *cset)
{
struct spec_fa *fa = cset->zdev->priv_d;
struct scatterlist *sg;
struct zio_block *block = cset->interleave->active_block;
struct dma_item *items;
unsigned int i, pages, sglen, dev_data_mem = 0;
int err;
pr_info("%s:%d\n", __func__, __LINE__);
/* Create sglists for the transfers, PAGE_SIZE granularity */
pages = DIV_ROUND_UP(block->datalen, PAGE_SIZE);
dev_dbg(&cset->head.dev, "using %d pages for transfer\n", pages);
/* Create sglists for the transfers */
err = sg_alloc_table(&fa->sgt, pages, GFP_ATOMIC);
if (err)
goto out;
items = kzalloc(sizeof(struct dma_item) * fa->sgt.nents, GFP_ATOMIC);
if (!items)
goto out_mem;
/* Setup the scatter list for the provided block */
zfad_setup_dma_scatter(fa, block);
/* Configure DMA items */
for_each_sg(fa->sgt.sgl, sg, fa->sgt.nents, i) {
/* Prepare DMA item */
items[i].start_addr = dev_data_mem;
items[i].dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
items[i].dma_addr_h = sg_dma_address(sg) >> 32;
items[i].dma_len = sg_dma_len(sg);
dev_data_mem += items[i].dma_len;
if (i < fa->sgt.nents - 1) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
items[i].next_addr_l = ((uint64_t)&items[i+1]) & 0xFFFFFFFF;
items[i].next_addr_h = ((uint64_t)&items[i+1]) >> 32;
items[i].attribute = 0x1; /* more items */
} else {
items[i].attribute = 0x0; /* last item */
}
/* The first item is written on the device */
if (i == 0) {
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_ADDR],
items[i].start_addr);
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_ADDR_L],
items[i].dma_addr_l);
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_ADDR_H],
items[i].dma_addr_h);
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_LEN],
items[i].dma_len);
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_NEXT_L],
items[i].next_addr_l);
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_NEXT_H],
items[i].next_addr_h);
/* Set that there is a next item */
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_BR_LAST],
items[i].attribute);
}
}
/* Map DMA buffers */
sglen = dma_map_sg(&fa->spec->pdev->dev, fa->sgt.sgl, fa->sgt.nents,
DMA_FROM_DEVICE);
if (!sglen)
goto out_free;
cset->priv_d = items;
return 0;
out_free:
kfree(items);
out_mem:
sg_free_table(&fa->sgt);
out:
return -ENOMEM;
}
/*
*
*/
void zfad_unmap_dma(struct zio_cset *cset)
{
struct spec_fa *fa = cset->zdev->priv_d;
pr_info("%s:%d\n", __func__, __LINE__);
dma_unmap_sg(&fa->spec->pdev->dev, fa->sgt.sgl, fa->sgt.nents,
DMA_FROM_DEVICE);
kfree(cset->priv_d);
sg_free_table(&fa->sgt);
}
/*
* Copyright CERN 2012
* Copyright CERN 2012
* Author: Federico Vaga <federico.vaga@gmail.com>
*
* Driver for the mezzanine ADC for the SPEC
......@@ -11,8 +11,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/zio.h>
#include <linux/zio-buffer.h>
......@@ -226,91 +224,6 @@ static const struct zio_sysfs_operations zfad_s_op = {
};
/*
* Map a scatter/gather table for the DMA transfer from the FMC-ADC.
*/
static int zfad_map_dma(struct zio_cset *cset)
{
struct spec_fa *fa = cset->zdev->priv_d;
struct scatterlist *sg, *nsg;
struct zio_block *block = cset->interleave->active_block;
unsigned int i, pages, size, sglen;
int err;
/* Create sglists for the transfers, PAGE_SIZE granularity */
size = cset->interleave->current_ctrl->nsamples *
cset->interleave->current_ctrl->ssize;
pages = DIV_ROUND_UP(size, PAGE_SIZE);
dev_dbg(&cset->head.dev, "using %d pages for transfer\n", pages);
/* Create sglists for the transfers */
err = sg_alloc_table(&fa->sgt, pages, GFP_ATOMIC);
if (err)
goto out;
/* Configure DMA list's items */
for_each_sg(fa->sgt.sgl, sg, fa->sgt.nents, i) {
if (i == 0) { /* first item on the device */
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_ADDR], 0);
/* Set the low 32bit address */
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_ADDR_L],
sg_dma_address_l(sg));
/* Set the high 32bit address */
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_ADDR_H],
sg_dma_address_h(sg));
/* Set the first item len */
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_LEN],
sg_dma_len(sg));
if (fa->sgt.nents == 1) /* single transfer */
continue;
/*
* data requires more then one transfer, then prepare
* the next item. We need the next SG
*/
nsg = sg_next(sg);
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_NEXT_L],
sg_dma_address_l(nsg));
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_NEXT_H],
sg_dma_address_l(nsg));
/* Set that there is a next item */
zfa_common_conf_set(&cset->head.dev,
&zfad_regs[ZFA_DMA_BR_LAST], 1);
} else { /* other items in the memory */
/*
* Each item in the list is made of the following
* registers: DMACSTARTR, DMAHSTARTLR, DMAH-STARTHR,
* DMALENR, DMANEXTLR, DMANEXTHR and DMAATTRIBR
*/
if (i == fa->sgt.nents - 1) /* last item*/
continue;
nsg = sg_next(sg);
/* FIXME set NEXT_L NEXT_H and BR_LAST*/
}
}
/* Map DMA buffers */
sglen = dma_map_sg(&fa->spec->pdev->dev, fa->sgt.sgl, fa->sgt.nents,
DMA_FROM_DEVICE);
if (!sglen)
goto out_free;
return 0;
out_free:
sg_free_table(&fa->sgt);
out:
return -ENOMEM;
}
/*
* Prepare the FMC-ADC for the DMA transfer. FMC-ADC fire the hardware trigger,
* it acquires all samples in its DDR memory and then it allows the driver to
......
......@@ -157,17 +157,6 @@ static const struct zio_sysfs_operations zfat_s_op = {
.info_get = zfat_info_get,
};
/*
*
*/
static void zfad_unmap_dma(struct zio_cset *cset)
{
struct spec_fa *fa = cset->zdev->priv_d;
dma_unmap_sg(&fa->spec->pdev->dev);
sg_free_table(&fa->sgt);
}
irqreturn_t zfadc_irq(int irq, void *ptr)
{
struct zfat_instance *zfat = ptr;
......
......@@ -7,6 +7,7 @@
#ifndef _FMC_ADC_H_
#define _FMC_ADC_H_
#include <linux/zio.h>
#include "spec.h"
/* ADC register offset */
......@@ -15,6 +16,9 @@
#define FA_IRQ_MEM_OFF 0x50000
#define FA_ADC_MEM_OFF 0x90000
/* ADC DDR memory */
#define FA_MAX_ACQ_BYTE 0x10000000 /* 256MB */
struct spec_fa {
struct spec_dev *spec;
struct zio_device *hwzdev;
......@@ -23,6 +27,24 @@ struct spec_fa {
unsigned char __iomem *base; /* regs files are byte-oriented */
};
/* The information about a DMA transfer */
struct dma_item {
uint32_t start_addr; /* 0x00 */
uint32_t dma_addr_l; /* 0x04 */
uint32_t dma_addr_h; /* 0x08 */
uint32_t dma_len; /* 0x0C */
uint32_t next_addr_l; /* 0x10 */
uint32_t next_addr_h; /* 0x14 */
uint32_t attribute; /* 0x18 */
/*
* attribute is used only to provide the "last item" bit, direction is
* fixed to device->host
*/
};
extern int zfad_map_dma(struct zio_cset *cset);
extern void zfad_unmap_dma(struct zio_cset *cset);
/*
* ZFA_CHx_MULT
* address offset between two registers of the same type on consecutive channel
......@@ -160,17 +182,6 @@ enum zfat_irq {
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
static inline uint32_t sg_dma_address_l(struct scatterlist *sg)
{
return ((uint32_t) (sg_dma_address(sg) & 0xFFFFFFFF));
}
static inline uint32_t sg_dma_address_h(struct scatterlist *sg)
{
return ((uint32_t) (32 >> (sg_dma_address(sg) & (~0xFFFFFFFF))));
}
static inline struct spec_fa *get_zfadc(struct device *dev)
{
switch (to_zio_head(dev)->zobj_type) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment