Commit 33a56062 authored by Federico Vaga's avatar Federico Vaga

kernel: use DMA engine instead of custum implementation

This patch port the FMC ADC driver to use DMA engines to handle
DMA data transfers in order to move out any custum dependency with
the carrier.

The dedicated code for `svec` and `spec` are not anymore useful, so
I removed those files. Anyway, it is not completely true that we can
get rid off the knowledge about the carrier; for instance the `svec`
needs a special configuration because of the VME bus.

The Linux kernel API does not offer a way to set the DMA context
(usefull for VME transfers) to a transfer. So, I have to call the
operation directly.

In order to find the correct dmaengine channel suitable for the
FMC-ADC instance, the dmaengine filter function compares the device
instances.
If we are on a SPEC, then the dmaengine is on the SPEC itself (in the
gateware we have the gennum dma). So, the dmaengine and the FMC-ADC
must have the same FMC carrier
If we are on a SVEC, then the dmaengine is on the VME bridge. So,
the dmaengine and FMC carrier share the same VME bridge.
Signed-off-by: Federico Vaga's avatarFederico Vaga <federico.vaga@cern.ch>
parent 95fb2544
......@@ -43,8 +43,6 @@ fmc-adc-100m14b4ch-y += onewire.o
fmc-adc-100m14b4ch-y += spi.o
fmc-adc-100m14b4ch-y += fa-spec-core.o
fmc-adc-100m14b4ch-y += fa-spec-regtable.o
fmc-adc-100m14b4ch-y += fa-spec-dma.o
fmc-adc-100m14b4ch-y += fa-spec-irq.o
fmc-adc-100m14b4ch-$(CONFIG_FMC_ADC_SVEC) += fa-svec-core.o
fmc-adc-100m14b4ch-$(CONFIG_FMC_ADC_SVEC) += fa-svec-regtable.o
fmc-adc-100m14b4ch-$(CONFIG_FMC_ADC_SVEC) += fa-svec-dma.o
......@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/version.h>
#include <linux/dmaengine.h>
#include "fmc-adc-100m14b4cha.h"
......@@ -45,7 +46,7 @@ struct workqueue_struct *fa_workqueue;
static int sg_alloc_table_from_pages_no_squash(struct sg_table *sgt,
struct page **pages,
unsigned int n_pages,
unsigned long offset,
unsigned int offset,
unsigned long size,
gfp_t gfp_mask)
{
......@@ -377,16 +378,6 @@ static int __fa_init(struct fa_dev *fa)
struct zio_device *zdev = fa->zdev;
int i, addr;
/* Check if hardware supports 64-bit DMA */
if (dma_set_mask(fa->pdev->dev.parent, DMA_BIT_MASK(64))) {
/* Check if hardware supports 32-bit DMA */
if (dma_set_mask(fa->pdev->dev.parent, DMA_BIT_MASK(32))) {
dev_err(fa->msgdev,
"32-bit DMA addressing not available\n");
return -EINVAL;
}
}
/* Use identity calibration */
fa_identity_calib_set(fa);
fa->mshot_max_samples = fa_readl(fa, fa->fa_adc_csr_base,
......@@ -481,19 +472,6 @@ static int fa_resource_validation(struct platform_device *pdev)
/* Special Configurations */
switch (pdev->id_entry->driver_data) {
case ADC_VER_SPEC:
r = platform_get_resource(pdev, IORESOURCE_IRQ, ADC_IRQ_DMA);
if (!r) {
dev_err(&pdev->dev,
"The ADC needs an interrupt number for the DMA\n");
return -ENXIO;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, ADC_CARR_DMA);
if (!r) {
dev_err(&pdev->dev,
"The ADC needs address to SPEC DMA engine\n");
return -ENXIO;
}
break;
#ifdef CONFIG_FMC_ADC_SVEC
case ADC_VER_SVEC:
......@@ -542,14 +520,12 @@ int fa_probe(struct platform_device *pdev)
fa->carrier_op = &fa_spec_op;
fa->sg_alloc_table_from_pages = sg_alloc_table_from_pages;
break;
#ifdef CONFIG_FMC_ADC_SVEC
case ADC_VER_SVEC:
memops.read = ioread32be;
memops.write = iowrite32be;
fa->carrier_op = &fa_svec_op;
fa->sg_alloc_table_from_pages = sg_alloc_table_from_pages_no_squash;
break;
#endif
default:
dev_err(fa->msgdev, "Unknow version %lu\n",
pdev->id_entry->driver_data);
......@@ -623,12 +599,10 @@ static const struct platform_device_id fa_id[] = {
.name = "adc-100m-spec",
.driver_data = ADC_VER_SPEC,
},
#ifdef CONFIG_FMC_ADC_SVEC
{
.name = "adc-100m-svec",
.driver_data = ADC_VER_SVEC,
},
#endif
/* TODO we should support different version */
};
......
This diff is collapsed.
......@@ -14,7 +14,6 @@
#include <linux/interrupt.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h"
/*
* fat_get_irq_status
......
......@@ -141,7 +141,4 @@ struct fa_carrier_op fa_spec_op = {
.enable_irqs = fa_spec_enable_irqs,
.disable_irqs = fa_spec_disable_irqs,
.ack_irq = fa_spec_ack_irq,
.dma_start = fa_spec_dma_start,
.dma_done = fa_spec_dma_done,
.dma_error = fa_spec_dma_error,
};
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright CERN 2012-2019
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mm.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h"
static int gncore_dma_fill(struct zio_dma_sg *zsg)
{
struct gncore_dma_item *item = (struct gncore_dma_item *)zsg->page_desc;
struct scatterlist *sg = zsg->sg;
struct zio_channel *chan = zsg->zsgt->chan;
struct fa_dev *fa = chan->cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
dma_addr_t tmp;
/* Prepare DMA item */
item->start_addr = zsg->dev_mem_off;
item->dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
item->dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
item->dma_len = sg_dma_len(sg);
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
tmp = zsg->zsgt->dma_page_desc_pool;
tmp += (zsg->zsgt->page_desc_size * (zsg->page_idx + 1));
item->next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
item->next_addr_h = ((uint64_t)tmp) >> 32;
item->attribute = 0x1; /* more items */
} else {
item->attribute = 0x0; /* last item */
}
/* The first item is written on the device */
if (zsg->page_idx == 0) {
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR], item->start_addr);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_L], item->dma_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_H], item->dma_addr_h);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_LEN], item->dma_len);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_L], item->next_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_H], item->next_addr_h);
/* Set that there is a next item */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_BR_LAST], item->attribute);
}
dev_dbg(fa->msgdev, "DMA item %d (block %d)\n"
" addr 0x%x\n"
" addr_l 0x%x\n"
" addr_h 0x%x\n"
" length %d\n"
" next_l 0x%x\n"
" next_h 0x%x\n"
" last 0x%x\n",
zsg->page_idx, zsg->block_idx,
item->start_addr, item->dma_addr_l, item->dma_addr_h,
item->dma_len, item->next_addr_l, item->next_addr_h,
item->attribute);
return 0;
}
int fa_spec_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *zfad_block = interleave->priv_d;
struct zio_block *blocks[fa->n_shots];
int i, err;
/*
* FIXME very inefficient because arm trigger already prepare
* something like zio_block_sg. In the future ZIO can alloc more
* than 1 block at time
*/
for (i = 0; i < fa->n_shots; ++i)
blocks[i] = zfad_block[i].block;
fa->zdma = zio_dma_alloc_sg(interleave, fa->pdev->dev.parent, blocks,
fa->n_shots, GFP_ATOMIC);
if (IS_ERR(fa->zdma))
return PTR_ERR(fa->zdma);
/* Fix block memory offset
* FIXME when official ZIO has multishot and DMA
*/
for (i = 0; i < fa->zdma->n_blocks; ++i)
fa->zdma->sg_blocks[i].dev_mem_off = zfad_block[i].dev_mem_off;
err = zio_dma_map_sg(fa->zdma, sizeof(struct gncore_dma_item),
gncore_dma_fill);
if (err)
goto out_map_sg;
/* Start DMA transfer */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_CTL_START], 1);
return 0;
out_map_sg:
zio_dma_free_sg(fa->zdma);
return err;
}
void fa_spec_dma_done(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
zio_dma_unmap_sg(fa->zdma);
zio_dma_free_sg(fa->zdma);
}
void fa_spec_dma_error(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
uint32_t val;
fa_spec_dma_done(cset);
val = fa_readl(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_STA]);
if (val)
dev_err(fa->msgdev,
"DMA error (status 0x%x). All acquisition lost\n", val);
}
......@@ -39,7 +39,4 @@ static void fa_svec_exit(struct fa_dev *fa)
struct fa_carrier_op fa_svec_op = {
.init = fa_svec_init,
.exit = fa_svec_exit,
.dma_start = fa_svec_dma_start,
.dma_done = fa_svec_dma_done,
.dma_error = fa_svec_dma_error,
};
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2019 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-svec.h"
#include "vmebus.h"
#define VME_NO_ADDR_INCREMENT 1
/* FIXME: move to include again */
#ifndef lower_32_bits
#define lower_32_bits(n) ((u32)(n))
#endif /* lower_32_bits */
static void build_dma_desc(struct vme_dma *desc, unsigned long vme_addr,
void *addr_dest, ssize_t len)
{
struct vme_dma_attr *vme;
struct vme_dma_attr *pci;
memset(desc, 0, sizeof(struct vme_dma));
vme = &desc->src;
pci = &desc->dst;
desc->dir = VME_DMA_FROM_DEVICE;
desc->length = len;
desc->novmeinc = VME_NO_ADDR_INCREMENT;
desc->ctrl.pci_block_size = VME_DMA_BSIZE_4096;
desc->ctrl.pci_backoff_time = VME_DMA_BACKOFF_0;
desc->ctrl.vme_block_size = VME_DMA_BSIZE_4096;
desc->ctrl.vme_backoff_time = VME_DMA_BACKOFF_0;
vme->data_width = VME_D32;
vme->am = VME_A24_USER_DATA_SCT;
/*vme->am = VME_A24_USER_MBLT;*/
vme->addru = upper_32_bits(vme_addr);
vme->addrl = lower_32_bits(vme_addr);
pci->addru = upper_32_bits((unsigned long)addr_dest);
pci->addrl = lower_32_bits((unsigned long)addr_dest);
}
/* Endianess */
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN 0
#endif
#ifndef BIG_ENDIAN
#define BIG_ENDIAN 1
#endif
static int __get_endian(void)
{
int i = 1;
char *p = (char *)&i;
if (p[0] == 1)
return LITTLE_ENDIAN;
else
return BIG_ENDIAN;
}
static void __endianness(unsigned int byte_length, void *buffer)
{
int i, size;
uint32_t *ptr;
/* CPU may be little endian, VME is big endian */
if (__get_endian() == LITTLE_ENDIAN) {
ptr = buffer;
/* swap samples and trig timetag all seen as 32bits words */
size = byte_length/4;
for (i = 0; i < size; ++i, ++ptr)
*ptr = __be32_to_cpu(*ptr);
}
}
int fa_svec_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_svec_data *svec_data = fa->carrier_data;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *fa_dma_block = interleave->priv_d;
int i;
struct vme_dma desc; /* Vme driver DMA structure */
/*
* write the data address in the ddr_addr register: this
* address has been computed after ACQ_END by looking to the
* trigger position see fa-irq.c::irq_acq_end.
* Be careful: the SVEC HW version expects an address of 32bits word
* therefore mem-offset in byte is translated into 32bit word
**/
fa_writel(fa, svec_data->fa_dma_ddr_addr,
&fa_svec_regfield[FA_DMA_DDR_ADDR],
fa_dma_block[0].dev_mem_off/4);
pr_info("%s:%d 0x%x\n", __func__, __LINE__,
fa_dma_block[0].dev_mem_off/4);
/* Execute DMA shot by shot */
for (i = 0; i < fa->n_shots; ++i) {
dev_info(fa->msgdev,
"configure DMA descriptor shot %d "
"vme addr: 0x%llx destination address: 0x%p len: %d\n",
i, (long long)svec_data->vme_ddr_data,
fa_dma_block[i].block->data,
(int)fa_dma_block[i].block->datalen);
memset(fa_dma_block[i].block->data, 5, fa_dma_block[i].block->datalen);
build_dma_desc(&desc, svec_data->vme_ddr_data,
fa_dma_block[i].block->data,
fa_dma_block[i].block->datalen);
if (vme_do_dma_kernel(&desc))
return -1;
__endianness(fa_dma_block[i].block->datalen,
fa_dma_block[i].block->data);
}
return 0;
}
void fa_svec_dma_done(struct zio_cset *cset)
{
/* nothing special to do */
}
void fa_svec_dma_error(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
dev_err(fa->msgdev,
"DMA error. All acquisition lost\n");
}
......@@ -382,6 +382,8 @@ static int zfat_arm_trigger(struct zio_ti *ti)
dev_mem_off += size;
dev_dbg(fa->msgdev, "next dev_mem_off 0x%x (+%d)\n",
dev_mem_off, size);
zfad_block[i].cset = ti->cset;
}
err = ti->cset->raw_io(ti->cset);
......
......@@ -150,6 +150,7 @@ struct fa_calib {
};
#ifdef __KERNEL__ /* All the rest is only of kernel users */
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
......@@ -168,12 +169,10 @@ extern int fa_enable_test_data_adc;
enum fa_irq_resource {
ADC_IRQ_TRG = 0,
ADC_IRQ_DMA,
};
enum fa_mem_resource {
ADC_MEM_BASE = 0,
ADC_CARR_DMA, /* SPEC only, remove it when we support DMA engine */
};
enum fa_bus_resource {
......@@ -386,9 +385,6 @@ struct fa_carrier_op {
int (*enable_irqs) (struct fa_dev *);
int (*disable_irqs) (struct fa_dev *);
int (*ack_irq) (struct fa_dev *, int irq_id);
int (*dma_start)(struct zio_cset *cset);
void (*dma_done)(struct zio_cset *cset);
void (*dma_error)(struct zio_cset *cset);
};
/*
......@@ -426,6 +422,7 @@ struct fa_dev {
/* DMA description */
struct zio_dma_sgt *zdma;
struct sg_table sgt;
/* carrier specific functions (init/exit/reset/readout/irq handling) */
struct fa_carrier_op *carrier_op;
......@@ -442,6 +439,7 @@ struct fa_dev {
/* Acquisition */
unsigned int n_shots;
unsigned int n_fires;
unsigned int transfers_left;
unsigned int mshot_max_samples;
/* Statistic informations */
......@@ -465,8 +463,10 @@ struct fa_dev {
/* Operations */
int (*sg_alloc_table_from_pages)(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
unsigned long offset, unsigned long size,
struct page **pages,
unsigned int n_pages,
unsigned int offset,
unsigned long size,
gfp_t gfp_mask);
};
......@@ -476,11 +476,19 @@ struct fa_dev {
* @dev_mem_off is the offset in ADC internal memory. It points to the first
* sample of the stored shot
* @first_nent is the index of the first nent used for this block
* @cset: channel set source for the block
* @tx: DMA transfer descriptor
* @cookie: transfer token
*/
struct zfad_block {
struct zio_block *block;
uint32_t dev_mem_off;
unsigned int first_nent;
struct zio_cset *cset;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
struct sg_table sgt;
void *dma_ctx;
};
/*
......@@ -602,9 +610,6 @@ extern int zfad_pattern_data_enable(struct fa_dev *fa, uint16_t pattern,
unsigned int enable);
/* Function exported by fa-dma.c */
extern int zfad_dma_start(struct zio_cset *cset);
extern void zfad_dma_done(struct zio_cset *cset);
extern void zfad_dma_error(struct zio_cset *cset);
extern void fa_irq_work(struct work_struct *work);
/* Functions exported by fa-zio-drv.c */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment