...
 
Commits (12)
......@@ -25,8 +25,8 @@ ccflags-$(CONFIG_FMC_ADC_SVEC) += -I$(VMEBUS_ABS)/include
ccflags-$(CONFIG_FMC_ADC_DEBUG) += -DDEBUG
# Extract ZIO minimum compatible version
ccflags-y += -D__ZIO_MIN_MAJOR_VERSION=$(shell echo $(ZIO_VERSION) | cut -d '-' -f 2 | cut -d '.' -f 1; )
ccflags-y += -D__ZIO_MIN_MINOR_VERSION=$(shell echo $(ZIO_VERSION) | cut -d '-' -f 2 | cut -d '.' -f 2; )
ccflags-y += -D__ZIO_MIN_MAJOR_VERSION=$(shell echo $(ZIO_VERSION) | cut -d '-' -f 1 | cut -d '.' -f 1 | tr -d 'v'; )
ccflags-y += -D__ZIO_MIN_MINOR_VERSION=$(shell echo $(ZIO_VERSION) | cut -d '-' -f 1 | cut -d '.' -f 2; )
subdirs-ccflags-y = $(ccflags-y)
......@@ -38,12 +38,6 @@ fmc-adc-100m14b4ch-y += fa-regtable.o
fmc-adc-100m14b4ch-y += fa-zio-trg.o
fmc-adc-100m14b4ch-y += fa-irq.o
fmc-adc-100m14b4ch-y += fa-debug.o
fmc-adc-100m14b4ch-y += fa-dma.o
fmc-adc-100m14b4ch-y += onewire.o
fmc-adc-100m14b4ch-y += spi.o
fmc-adc-100m14b4ch-y += fa-spec-core.o
fmc-adc-100m14b4ch-y += fa-spec-regtable.o
fmc-adc-100m14b4ch-y += fa-spec-dma.o
fmc-adc-100m14b4ch-y += fa-spec-irq.o
fmc-adc-100m14b4ch-$(CONFIG_FMC_ADC_SVEC) += fa-svec-core.o
fmc-adc-100m14b4ch-$(CONFIG_FMC_ADC_SVEC) += fa-svec-regtable.o
fmc-adc-100m14b4ch-$(CONFIG_FMC_ADC_SVEC) += fa-svec-dma.o
......@@ -26,13 +26,12 @@ endif
VMEBUS_ABS ?= $(abspath $(VMEBUS) )
GIT_VERSION = $(shell git describe --always --dirty --long --tags)
export GIT_VERSION
export ZIO_VERSION
all modules:
$(MAKE) -C $(LINUX) M=$(CURDIR) ZIO_ABS=$(ZIO_ABS) \
ZIO_EXTRA_SYMBOLS-y=$(ZIO_EXTRA_SYMBOLS-y) \
ZIO_VERSION=$(ZIO_VERSION) \
GIT_VERSION=$(GIT_VERSION) \
VMEBUS_ABS=$(VMEBUS_ABS) modules
install modules_install: modules
......
......@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/version.h>
#include <linux/dmaengine.h>
#include "fmc-adc-100m14b4cha.h"
......@@ -36,6 +37,39 @@ static const int zfad_hw_range[] = {
/* fmc-adc specific workqueue */
struct workqueue_struct *fa_workqueue;
/**
* Description:
* The version from the Linux kernel automatically squash contiguous pages.
* Sometimes we do not want to squash (e.g. SVEC)
*/
static int sg_alloc_table_from_pages_no_squash(struct sg_table *sgt,
struct page **pages,
unsigned int n_pages,
unsigned int offset,
unsigned long size,
gfp_t gfp_mask)
{
struct scatterlist *sg;
int err, i;
err = sg_alloc_table(sgt, n_pages, GFP_KERNEL);
if (unlikely(err))
return err;
for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
unsigned long chunk_size;
chunk_size = PAGE_SIZE - offset;
sg_set_page(sg, pages[i], min(size, chunk_size), offset);
offset = 0;
size -= chunk_size;
}
return 0;
}
/*
* zfad_convert_hw_range
* @usr_val: range value
......@@ -344,16 +378,6 @@ static int __fa_init(struct fa_dev *fa)
struct zio_device *zdev = fa->zdev;
int i, addr;
/* Check if hardware supports 64-bit DMA */
if (dma_set_mask(fa->pdev->dev.parent, DMA_BIT_MASK(64))) {
/* Check if hardware supports 32-bit DMA */
if (dma_set_mask(fa->pdev->dev.parent, DMA_BIT_MASK(32))) {
dev_err(fa->msgdev,
"32-bit DMA addressing not available\n");
return -EINVAL;
}
}
/* Use identity calibration */
fa_identity_calib_set(fa);
fa->mshot_max_samples = fa_readl(fa, fa->fa_adc_csr_base,
......@@ -438,36 +462,9 @@ static int fa_resource_validation(struct platform_device *pdev)
return -ENXIO;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, ADC_CARR_MEM_BASE);
if (!r) {
dev_err(&pdev->dev,
"The ADC needs the carrier base address\n");
return -ENXIO;
}
r = platform_get_resource(pdev, IORESOURCE_BUS, ADC_BUS_FMC_SLOT);
if (!r) {
dev_err(&pdev->dev,
"The ADC needs to be assigned to an FMC slot\n");
return -ENXIO;
}
/* Special Configurations */
switch (pdev->id_entry->driver_data) {
case ADC_VER_SPEC:
r = platform_get_resource(pdev, IORESOURCE_IRQ, ADC_IRQ_DMA);
if (!r) {
dev_err(&pdev->dev,
"The ADC needs an interrupt number for the DMA\n");
return -ENXIO;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, ADC_CARR_DMA);
if (!r) {
dev_err(&pdev->dev,
"The ADC needs address to SPEC DMA engine\n");
return -ENXIO;
}
break;
#ifdef CONFIG_FMC_ADC_SVEC
case ADC_VER_SVEC:
......@@ -513,15 +510,13 @@ int fa_probe(struct platform_device *pdev)
case ADC_VER_SPEC:
memops.read = ioread32;
memops.write = iowrite32;
fa->carrier_op = &fa_spec_op;
fa->sg_alloc_table_from_pages = sg_alloc_table_from_pages;
break;
#ifdef CONFIG_FMC_ADC_SVEC
case ADC_VER_SVEC:
memops.read = ioread32be;
memops.write = iowrite32be;
fa->carrier_op = &fa_svec_op;
fa->sg_alloc_table_from_pages = sg_alloc_table_from_pages_no_squash;
break;
#endif
default:
dev_err(fa->msgdev, "Unknow version %lu\n",
pdev->id_entry->driver_data);
......@@ -536,13 +531,6 @@ int fa_probe(struct platform_device *pdev)
fa->fa_spi_base = fa->fa_top_level + 0x1800;
fa->fa_utc_base = fa->fa_top_level + 0x1900;
r = platform_get_resource(fa->pdev, IORESOURCE_MEM, ADC_CARR_MEM_BASE);
fa->fa_carrier_csr_base = ioremap(r->start, resource_size(r));
err = fa->carrier_op->init(fa);
if (err < 0)
goto out;
/* init all subsystems */
for (i = 0, m = mods; i < ARRAY_SIZE(mods); i++, m++) {
dev_dbg(fa->msgdev, "Calling init for \"%s\"\n", m->name);
......@@ -587,8 +575,6 @@ int fa_remove(struct platform_device *pdev)
m->exit(fa);
}
fa->carrier_op->exit(fa);
return 0;
}
......@@ -598,12 +584,10 @@ static const struct platform_device_id fa_id[] = {
.name = "adc-100m-spec",
.driver_data = ADC_VER_SPEC,
},
#ifdef CONFIG_FMC_ADC_SVEC
{
.name = "adc-100m-svec",
.driver_data = ADC_VER_SVEC,
},
#endif
/* TODO we should support different version */
};
......
This diff is collapsed.
......@@ -16,264 +16,6 @@
#include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h"
/**
* It maps the ZIO blocks with an sg table, then it starts the DMA transfer
* from the ADC to the host memory.
*
* @param cset
*/
int zfad_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct zfad_block *zfad_block = cset->interleave->priv_d;
uint32_t dev_mem_off, trg_pos, pre_samp;
uint32_t val = 0;
int try = 5, err;
/*
* All programmed triggers fire, so the acquisition is ended.
* If the state machine is _idle_ we can start the DMA transfer.
* If the state machine it is not idle, try again 5 times
*/
while (try-- && val != FA100M14B4C_STATE_IDLE) {
/* udelay(2); */
val = fa_readl(fa, fa->fa_adc_csr_base,
&zfad_regs[ZFA_STA_FSM]);
}
if (val != FA100M14B4C_STATE_IDLE) {
/* we can't DMA if the state machine is not idle */
dev_warn(fa->msgdev,
"Can't start DMA on the last acquisition, "
"State Machine is not IDLE (status:%d)\n", val);
return -EBUSY;
}
/*
* Disable all triggers to prevent fires between
* different DMA transfers required for multi-shots
*/
fa_writel(fa, fa->fa_adc_csr_base, &zfad_regs[ZFAT_CFG_SRC], 0);
/* Fix dev_mem_addr in single-shot mode */
if (fa->n_shots == 1) {
int nchan = FA100M14B4C_NCHAN;
struct zio_control *ctrl = cset->chan[nchan].current_ctrl;
/* get pre-samples from the current control (interleave chan) */
pre_samp = ctrl->attr_trigger.std_val[ZIO_ATTR_TRIG_PRE_SAMP];
/* Get trigger position in DDR */
trg_pos = fa_readl(fa, fa->fa_adc_csr_base,
&zfad_regs[ZFAT_POS]);
/*
* compute mem offset (in bytes): pre-samp is converted to
* bytes
*/
dev_mem_off = trg_pos - (pre_samp * cset->ssize * nchan);
dev_dbg(fa->msgdev,
"Trigger @ 0x%08x, pre_samp %i, offset 0x%08x\n",
trg_pos, pre_samp, dev_mem_off);
zfad_block[0].dev_mem_off = dev_mem_off;
}
dev_dbg(fa->msgdev, "Start DMA transfer\n");
err = fa->carrier_op->dma_start(cset);
if (err)
return err;
return 0;
}
/**
* It completes a DMA transfer.
* It tells to the ZIO framework that all blocks are done. Then, it re-enable
* the trigger for the next acquisition. If the device is configured for
* continuous acquisition, the function automatically start the next
* acquisition
*
* @param cset
*/
void zfad_dma_done(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *zfad_block = interleave->priv_d;
struct zio_control *ctrl = NULL;
struct zio_ti *ti = cset->ti;
struct zio_block *block;
struct zio_timestamp ztstamp;
int i;
uint32_t *trig_timetag;
fa->carrier_op->dma_done(cset);
/* for each shot, set the timetag of each ctrl block by reading the
* trig-timetag appended after the samples. Set also the acquisition
* start timetag on every blocks
*/
ztstamp.secs = fa_readl(fa, fa->fa_utc_base,
&zfad_regs[ZFA_UTC_ACQ_START_SECONDS]);
ztstamp.ticks = fa_readl(fa, fa->fa_utc_base,
&zfad_regs[ZFA_UTC_ACQ_START_COARSE]);
ztstamp.bins = fa_readl(fa, fa->fa_utc_base,
&zfad_regs[ZFA_UTC_ACQ_START_FINE]);
for (i = 0; i < fa->n_shots; ++i) {
block = zfad_block[i].block;
ctrl = zio_get_ctrl(block);
trig_timetag = (uint32_t *)(block->data + block->datalen
- FA_TRIG_TIMETAG_BYTES);
if (unlikely((*(trig_timetag + 1) >> 8) != 0xACCE55))
dev_err(fa->msgdev,
"Wrong acquisition TAG, expected 0xACCE55 but got 0x%X (0x%X)\n",
(*(trig_timetag + 1) >> 8), *trig_timetag);
ctrl->tstamp.secs = ((uint64_t)*(trig_timetag + 1) & 0xFF) << 32;
ctrl->tstamp.secs |= *(trig_timetag);
ctrl->tstamp.ticks = *(trig_timetag + 2);
ctrl->tstamp.bins = 0;
ctrl->attr_trigger.ext_val[FA100M14B4C_TATTR_STA]= *(trig_timetag + 3);
/* Acquisition start Timetag */
ctrl->attr_channel.ext_val[FA100M14B4C_DATTR_ACQ_START_S] =
ztstamp.secs;
ctrl->attr_channel.ext_val[FA100M14B4C_DATTR_ACQ_START_C] =
ztstamp.ticks;
ctrl->attr_channel.ext_val[FA100M14B4C_DATTR_ACQ_START_F] =
ztstamp.bins;
/* resize the datalen, by removing the trigger tstamp */
block->datalen = block->datalen - FA_TRIG_TIMETAG_BYTES;
/* update seq num */
ctrl->seq_num = i;
}
/* Sync the channel current control with the last ctrl block*/
memcpy(&interleave->current_ctrl->tstamp,
&ctrl->tstamp, sizeof(struct zio_timestamp));
/* Update sequence number */
interleave->current_ctrl->seq_num = ctrl->seq_num;
/*
* All DMA transfers done! Inform the trigger about this, so
* it can store blocks into the buffer
*/
dev_dbg(fa->msgdev, "%i blocks transfered\n", fa->n_shots);
zio_trigger_data_done(cset);
fa_writel(fa, fa->fa_adc_csr_base, &zfad_regs[ZFAT_CFG_SRC],
ti->zattr_set.ext_zattr[FA100M14B4C_TATTR_SRC].value);
}
/**
* It handles the error condition of a DMA transfer.
* The function turn off the state machine by sending the STOP command
*
* @param cset
*/
void zfad_dma_error(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
fa->carrier_op->dma_error(cset);
zfad_fsm_command(fa, FA100M14B4C_CMD_STOP);
fa->n_dma_err++;
if (fa->n_fires == 0)
dev_err(fa->msgdev,
"DMA error occurs but no block was acquired\n");
}
/*
* zfat_irq_acq_end
* @fa: fmc-adc descriptor
*
* The ADC end the acquisition, so, if the state machine is idle, we can
* retrieve data from the ADC DDR memory.
*/
void zfat_irq_acq_end(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
dev_dbg(fa->msgdev, "Acquisition done\n");
/*
* because the driver doesn't listen anymore trig-event
* we agreed that the HW will provide a dedicated register
* to check the real number of shots in order to compare it
* with the requested one.
* This ultimate check is not crucial because the HW implements
* a solid state machine and acq-end can happens only after
* the execution of the n requested shots.
*/
fa->n_fires = fa->n_shots - fa_readl(fa, fa->fa_adc_csr_base,
&zfad_regs[ZFAT_SHOTS_REM]);
if (fa->n_fires != fa->n_shots) {
dev_err(fa->msgdev,
"Expected %i trigger fires, but %i occurs\n",
fa->n_shots, fa->n_fires);
}
}
/*
* job executed within a work thread
* Depending of the carrier the job slightly differs:
* SVEC: dma_start() blocks till the the DMA ends
* (fully managed by the vmebus driver)
* Therefore the DMA outcome can be processed immediately
* SPEC: dma_start() launch the job an returns immediately.
* An interrupt DMA_DONE or ERROR is expecting to signal the end
* of the DMA transaction
* (See fa-spec-irq.c::fa-spec_irq_handler)
*/
static void fa_irq_work(struct work_struct *work)
{
struct fa_dev *fa = container_of(work, struct fa_dev, irq_work);
struct zio_cset *cset = fa->zdev->cset;
int res;
zfat_irq_acq_end(cset);
res = zfad_dma_start(cset);
if (!res) {
/*
* No error.
* If there is an IRQ DMA src to notify the ends of the DMA,
* leave the workqueue.
* dma_done will be proceed on DMA_END reception.
* Otherwhise call dma_done in sequence
*/
if (fa->irq_src & FA_IRQ_SRC_DMA)
/*
* waiting for END_OF_DMA IRQ
* with the CSET_BUSY flag Raised
* The flag will be lowered by the irq_handler
* handling END_DMA
*/
goto end;
zfad_dma_done(cset);
}
/*
* Lower CSET_HW_BUSY
*/
spin_lock(&cset->lock);
cset->flags &= ~ZIO_CSET_HW_BUSY;
spin_unlock(&cset->lock);
end:
if (res) {
/* Stop acquisition on error */
zfad_dma_error(cset);
} else if (fa->enable_auto_start) {
/* Automatic start next acquisition */
dev_dbg(fa->msgdev, "Automatic start\n");
zfad_fsm_command(fa, FA100M14B4C_CMD_START);
}
}
/*
* fat_get_irq_status
* @fa: adc descriptor
......@@ -380,9 +122,6 @@ int fa_setup_irqs(struct fa_dev *fa)
/* set IRQ sources to listen */
fa->irq_src = FA_IRQ_SRC_ACQ;
if (fa->carrier_op->setup_irqs)
err = fa->carrier_op->setup_irqs(fa);
return err;
}
......@@ -395,10 +134,6 @@ int fa_free_irqs(struct fa_dev *fa)
*/
fa_disable_irqs(fa);
/* Release carrier IRQs (if any) */
if (fa->carrier_op->free_irqs)
fa->carrier_op->free_irqs(fa);
/* Release ADC IRQs */
free_irq(platform_get_irq(fa->pdev, ADC_IRQ_TRG), fa);
......@@ -412,9 +147,6 @@ int fa_enable_irqs(struct fa_dev *fa)
fa_writel(fa, fa->fa_irq_adc_base,
&zfad_regs[ZFA_IRQ_ADC_ENABLE_MASK],
FA_IRQ_ADC_ACQ_END);
if (fa->carrier_op->enable_irqs)
fa->carrier_op->enable_irqs(fa);
return 0;
}
......@@ -426,8 +158,6 @@ int fa_disable_irqs(struct fa_dev *fa)
&zfad_regs[ZFA_IRQ_ADC_DISABLE_MASK],
FA_IRQ_ADC_ACQ_END);
if (fa->carrier_op->disable_irqs)
fa->carrier_op->disable_irqs(fa);
return 0;
}
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2019 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h"
static int fa_spec_init(struct fa_dev *fa)
{
struct resource *r;
struct fa_spec_data *cdata;
uint32_t val;
cdata = kzalloc(sizeof(struct fa_spec_data), GFP_KERNEL);
if (!cdata)
return -ENOMEM;
r = platform_get_resource(fa->pdev, IORESOURCE_MEM, ADC_CARR_DMA);
cdata->fa_dma_base = ioremap(r->start, resource_size(r));
cdata->fa_irq_dma_base = cdata->fa_dma_base + 0x0200;
dev_info(fa->msgdev,
"Spec Base addrs: irq_dmma: %p, dma_ctrl: %p, csr: %p\n",
cdata->fa_irq_dma_base, cdata->fa_dma_base,
fa->fa_carrier_csr_base);
/* Reset the FMC slot */
fa_writel(fa, fa->fa_carrier_csr_base,
&fa_spec_regs[ZFA_CAR_FMC_RES], 1);
mdelay(50);
fa_writel(fa, fa->fa_carrier_csr_base,
&fa_spec_regs[ZFA_CAR_FMC_RES], 0);
mdelay(50);
/* Verify that the FMC is plugged (0 is plugged) */
val = fa_readl(fa, fa->fa_carrier_csr_base,
&fa_spec_regs[ZFA_CAR_FMC_PRES]);
if (val) {
dev_err(fa->msgdev, "No FCM ADC plugged\n");
return -ENODEV;
}
/* Verify that system PLL is locked (1 is calibrated) */
val = fa_readl(fa, fa->fa_carrier_csr_base,
&fa_spec_regs[ZFA_CAR_SYS_PLL]);
if (!val) {
dev_err(fa->msgdev, "System PLL not locked\n");
return -ENODEV;
}
/* Verify that DDR3 calibration is done (1 is calibrated) */
val = fa_readl(fa, fa->fa_carrier_csr_base,
&fa_spec_regs[ZFA_CAR_DDR_CAL]);
if (!val) {
dev_err(fa->msgdev, "DDR3 Calibration not done\n");
return -ENODEV;
}
/* Set DMA to transfer data from device to host */
fa_writel(fa, cdata->fa_dma_base,
&fa_spec_regs[ZFA_DMA_BR_DIR], 0);
/* register carrier data */
fa->carrier_data = cdata;
dev_info(fa->msgdev, "spec::%s successfully executed\n", __func__);
return 0;
}
static int fa_spec_reset(struct fa_dev *fa)
{
/*struct spec_dev *spec = fa->fmc->carrier_data;*/
dev_info(fa->msgdev, "%s: resetting ADC core through Gennum.\n",
__func__);
return 0;
}
static void fa_spec_exit(struct fa_dev *fa)
{
kfree(fa->carrier_data);
}
/* Unfortunately, on the spec this is GPIO9, i.e. IRQ(1) */
/* FIXME find a way to get rid of fmc here
* This is used only by the SPEC design, is it not possible to avoid it
* and let the VHDL configure the GPIO?
*/
/* static struct fmc_gpio fa_gpio_on[] = { */
/* { */
/* .gpio = FMC_GPIO_IRQ(0), */
/* .mode = GPIOF_DIR_IN, */
/* .irqmode = IRQF_TRIGGER_RISING, */
/* } */
/* }; */
/* static struct fmc_gpio fa_gpio_off[] = { */
/* { */
/* .gpio = FMC_GPIO_IRQ(0), */
/* .mode = GPIOF_DIR_IN, */
/* .irqmode = 0, */
/* } */
/* }; */
static int fa_spec_setup_irqs(struct fa_dev *fa)
{
struct resource *r;
int err;
r = platform_get_resource(fa->pdev, IORESOURCE_IRQ, ADC_IRQ_DMA);
err = request_any_context_irq(r->start, fa_spec_irq_handler, 0,
r->name, fa);
if (err < 0) {
dev_err(fa->msgdev, "can't request irq 0x%llx (error %i)\n",
r->start, err);
return err;
}
//fmc_gpio_config(fmc, fa_gpio_on, ARRAY_SIZE(fa_gpio_on));
dev_info(fa->msgdev, "spec::%s successfully executed\n", __func__);
/* Add SPEC specific IRQ sources to listen */
fa->irq_src |= FA_IRQ_SRC_DMA;
return 0;
}
static int fa_spec_free_irqs(struct fa_dev *fa)
{
/* Release DMA IRQs */
free_irq(platform_get_irq(fa->pdev, ADC_IRQ_DMA), fa);
/* fmc_gpio_config(fmc, fa_gpio_off, ARRAY_SIZE(fa_gpio_off)); */
return 0;
}
static int fa_spec_enable_irqs(struct fa_dev *fa)
{
struct fa_spec_data *spec_data = fa->carrier_data;
fa_writel(fa, spec_data->fa_irq_dma_base,
&fa_spec_regs[ZFA_IRQ_DMA_ENABLE_MASK],
FA_SPEC_IRQ_DMA_ALL);
return 0;
}
static int fa_spec_disable_irqs(struct fa_dev *fa)
{
struct fa_spec_data *spec_data = fa->carrier_data;
fa_writel(fa, spec_data->fa_irq_dma_base,
&fa_spec_regs[ZFA_IRQ_DMA_DISABLE_MASK],
FA_SPEC_IRQ_DMA_NONE);
return 0;
}
static int fa_spec_ack_irq(struct fa_dev *fa, int irq_id)
{
return 0;
}
struct fa_carrier_op fa_spec_op = {
.init = fa_spec_init,
.reset_core = fa_spec_reset,
.exit = fa_spec_exit,
.setup_irqs = fa_spec_setup_irqs,
.free_irqs = fa_spec_free_irqs,
.enable_irqs = fa_spec_enable_irqs,
.disable_irqs = fa_spec_disable_irqs,
.ack_irq = fa_spec_ack_irq,
.dma_start = fa_spec_dma_start,
.dma_done = fa_spec_dma_done,
.dma_error = fa_spec_dma_error,
};
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright CERN 2012-2019
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mm.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h"
static int gncore_dma_fill(struct zio_dma_sg *zsg)
{
struct gncore_dma_item *item = (struct gncore_dma_item *)zsg->page_desc;
struct scatterlist *sg = zsg->sg;
struct zio_channel *chan = zsg->zsgt->chan;
struct fa_dev *fa = chan->cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
dma_addr_t tmp;
/* Prepare DMA item */
item->start_addr = zsg->dev_mem_off;
item->dma_addr_l = sg_dma_address(sg) & 0xFFFFFFFF;
item->dma_addr_h = (uint64_t)sg_dma_address(sg) >> 32;
item->dma_len = sg_dma_len(sg);
if (!sg_is_last(sg)) {/* more transfers */
/* uint64_t so it works on 32 and 64 bit */
tmp = zsg->zsgt->dma_page_desc_pool;
tmp += (zsg->zsgt->page_desc_size * (zsg->page_idx + 1));
item->next_addr_l = ((uint64_t)tmp) & 0xFFFFFFFF;
item->next_addr_h = ((uint64_t)tmp) >> 32;
item->attribute = 0x1; /* more items */
} else {
item->attribute = 0x0; /* last item */
}
/* The first item is written on the device */
if (zsg->page_idx == 0) {
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR], item->start_addr);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_L], item->dma_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_ADDR_H], item->dma_addr_h);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_LEN], item->dma_len);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_L], item->next_addr_l);
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_NEXT_H], item->next_addr_h);
/* Set that there is a next item */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_BR_LAST], item->attribute);
}
dev_dbg(fa->msgdev, "DMA item %d (block %d)\n"
" addr 0x%x\n"
" addr_l 0x%x\n"
" addr_h 0x%x\n"
" length %d\n"
" next_l 0x%x\n"
" next_h 0x%x\n"
" last 0x%x\n",
zsg->page_idx, zsg->block_idx,
item->start_addr, item->dma_addr_l, item->dma_addr_h,
item->dma_len, item->next_addr_l, item->next_addr_h,
item->attribute);
return 0;
}
int fa_spec_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *zfad_block = interleave->priv_d;
struct zio_block *blocks[fa->n_shots];
int i, err;
/*
* FIXME very inefficient because arm trigger already prepare
* something like zio_block_sg. In the future ZIO can alloc more
* than 1 block at time
*/
for (i = 0; i < fa->n_shots; ++i)
blocks[i] = zfad_block[i].block;
fa->zdma = zio_dma_alloc_sg(interleave, fa->pdev->dev.parent, blocks,
fa->n_shots, GFP_ATOMIC);
if (IS_ERR(fa->zdma))
return PTR_ERR(fa->zdma);
/* Fix block memory offset
* FIXME when official ZIO has multishot and DMA
*/
for (i = 0; i < fa->zdma->n_blocks; ++i)
fa->zdma->sg_blocks[i].dev_mem_off = zfad_block[i].dev_mem_off;
err = zio_dma_map_sg(fa->zdma, sizeof(struct gncore_dma_item),
gncore_dma_fill);
if (err)
goto out_map_sg;
/* Start DMA transfer */
fa_writel(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_CTL_START], 1);
return 0;
out_map_sg:
zio_dma_free_sg(fa->zdma);
return err;
}
void fa_spec_dma_done(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
zio_dma_unmap_sg(fa->zdma);
zio_dma_free_sg(fa->zdma);
}
void fa_spec_dma_error(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_spec_data *spec_data = fa->carrier_data;
uint32_t val;
fa_spec_dma_done(cset);
val = fa_readl(fa, spec_data->fa_dma_base,
&fa_spec_regs[ZFA_DMA_STA]);
if (val)
dev_err(fa->msgdev,
"DMA error (status 0x%x). All acquisition lost\n", val);
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright CERN 2012-2019
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-spec.h"
/*
* fa_get_irq_status
* @fa: adc descriptor
* @irq_status: destination of irq status
* @irq_multi: destination of irq multi
*
* Get irq and clear the register. To clear an interrupt we have to write 1
* on the handled interrupt. We handle all interrupt so we clear all interrupts
*/
static void fa_get_irq_status(struct fa_dev *fa, uint32_t *irq_status)
{
struct fa_spec_data *cdata = fa->carrier_data;
/* Get current interrupts status */
*irq_status = fa_readl(fa, cdata->fa_irq_dma_base,
&fa_spec_regs[ZFA_IRQ_DMA_SRC]);
dev_dbg(fa->msgdev,
"core DMA: %p fired an interrupt. IRQ status register: 0x%x\n",
cdata->fa_irq_dma_base, *irq_status);
if (*irq_status)
/* Clear current interrupts status */
fa_writel(fa, cdata->fa_irq_dma_base,
&fa_spec_regs[ZFA_IRQ_DMA_SRC], *irq_status);
}
/*
* zfad_irq
* @irq:
* @ptr: pointer to fmc_device
*
* The ADC svec firmware fires interrupt from a single wishbone core
* and throught the VIC ACQ_END and TRIG events. Note about "TRIG"
* event: the main reason to listen this interrupt was to read the
* intermediate time stamps in case of multishots.
* With the new firmware (>=3.0) the stamps come with the data,
* therefore the driver doesn't have to listen "TRIG" event. This
* enhancement remove completely the risk of loosing interrupt in case
* of small number of samples and makes the retry loop in the hanlder
* obsolete.
*/
irqreturn_t fa_spec_irq_handler(int irq, void *arg)
{
struct fa_dev *fa = arg;
struct zio_cset *cset = fa->zdev->cset;
uint32_t status;
/* irq to handle */
fa_get_irq_status(fa, &status);
if (!status)
return IRQ_NONE;
if (unlikely(!fa->n_shots || !cset->interleave->priv_d)) {
/*
* Mainly this may happen when you are playing with DMA with
* an user-space program or another driver. 99% of the time
* is for debugging purpose. So, if you are seriusly working
* with DMA with two different programs/drivers ... well *you*
* have a problem and this driver may crash badly.
*/
dev_err(fa->msgdev,
"No programmed shot, implies no DMA to perform\n");
goto out;
}
/* FIXME handle it better */
/* if (unlikely(fa->last_irq_core_src == irq_core_base)) { */
/* WARN(1, "Cannot handle two consecutives %s interrupt." */
/* "The ADC doesn't behave properly\n", */
/* (irq_core_base == fa->fa_irq_adc_base) ? "ACQ" : "DMA"); */
/* /\* Stop Acquisition, ADC it is not working properly *\/ */
/* zfad_fsm_command(fa, FA100M14B4C_CMD_STOP); */
/* fa->last_irq_core_src = FA_SPEC_IRQ_SRC_NONE; */
/* goto out; */
/* } */
dev_dbg(fa->msgdev, "Handle ADC interrupts\n");
if (status & FA_SPEC_IRQ_DMA_DONE)
zfad_dma_done(cset);
else if (unlikely(status & FA_SPEC_IRQ_DMA_ERR))
zfad_dma_error(cset);
/* register the core which just fired the IRQ */
/* check proper sequence of IRQ in case of multi IRQ (ACQ + DMA)*/
/* FIXME */
/* fa->last_irq_core_src = irq_core_base; */
out:
/*
* DMA transaction is finished
* we can safely lower CSET_BUSY
*/
spin_lock(&cset->lock);
cset->flags &= ~ZIO_CSET_HW_BUSY;
spin_unlock(&cset->lock);
return IRQ_HANDLED;
}
/*
* Copyright CERN 2012
* Author: Federico Vaga <federico.vaga@gmail.com>
*
* Table of register masks, used by driver functions
*/
#include "fa-spec.h"
/* Definition of the fa spec registers field: offset - mask - isbitfield */
const struct zfa_field_desc fa_spec_regs[] = {
/* Carrier CSR */
[ZFA_CAR_FMC_PRES] = {0x04, 0x1, 1},
[ZFA_CAR_P2L_PLL] = {0x04, 0x2, 1},
[ZFA_CAR_SYS_PLL] = {0x04, 0x4, 1},
[ZFA_CAR_DDR_CAL] = {0x04, 0x8, 1},
[ZFA_CAR_FMC_RES] = {0x0c, 0x1, 1},
/* IRQ */
[ZFA_IRQ_DMA_DISABLE_MASK] = {0x00, 0x00000003, 0},
[ZFA_IRQ_DMA_ENABLE_MASK] = {0x04, 0x00000003, 0},
[ZFA_IRQ_DMA_MASK_STATUS] = {0x08, 0x00000003, 0},
[ZFA_IRQ_DMA_SRC] = {0x0C, 0x00000003, 0},
/* DMA */
[ZFA_DMA_CTL_SWP] = {0x00, 0x0000000C, 1},
[ZFA_DMA_CTL_ABORT] = {0x00, 0x00000002, 1},
[ZFA_DMA_CTL_START] = {0x00, 0x00000001, 1},
[ZFA_DMA_STA] = {0x04, 0x00000007, 0},
[ZFA_DMA_ADDR] = {0x08, 0xFFFFFFFF, 0},
[ZFA_DMA_ADDR_L] = {0x0C, 0xFFFFFFFF, 0},
[ZFA_DMA_ADDR_H] = {0x10, 0xFFFFFFFF, 0},
[ZFA_DMA_LEN] = {0x14, 0xFFFFFFFF, 0},
[ZFA_DMA_NEXT_L] = {0x18, 0xFFFFFFFF, 0},
[ZFA_DMA_NEXT_H] = {0x1C, 0xFFFFFFFF, 0},
[ZFA_DMA_BR_DIR] = {0x20, 0x00000002, 1},
[ZFA_DMA_BR_LAST] = {0x20, 0x00000001, 1},
};
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright CERN 2012-2019
*/
#ifndef __FA_SPEC_CORE_H__
#define __FA_SPEC_CORE_H__
#include <linux/scatterlist.h>
#include <linux/irqreturn.h>
#include "fmc-adc-100m14b4cha.h"
#include "field-desc.h"
/* default spec gateware */
#define FA_GATEWARE_SPEC "fmc/spec-fmc-adc-100m14b.bin"
/* Should be replaced by an sdb query */
#define SPEC_FA_DMA_MEM_OFF 0x01000
/*
* fa_dma_item: The information about a DMA transfer
* @start_addr: pointer where start to retrieve data from device memory
* @dma_addr_l: low 32bit of the dma address on host memory
* @dma_addr_h: high 32bit of the dma address on host memory
* @dma_len: number of bytes to transfer from device to host
* @next_addr_l: low 32bit of the address of the next memory area to use
* @next_addr_h: high 32bit of the address of the next memory area to use
* @attribute: dma information about data transferm. At the moment it is used
* only to provide the "last item" bit, direction is fixed to
* device->host
*/
struct gncore_dma_item {
uint32_t start_addr; /* 0x00 */
uint32_t dma_addr_l; /* 0x04 */
uint32_t dma_addr_h; /* 0x08 */
uint32_t dma_len; /* 0x0C */
uint32_t next_addr_l; /* 0x10 */
uint32_t next_addr_h; /* 0x14 */
uint32_t attribute; /* 0x18 */
uint32_t reserved; /* ouch */
};
/* SPEC CSR */
enum fa_spec_regs_id {
/* CSR */
ZFA_CAR_FMC_PRES,
ZFA_CAR_P2L_PLL,
ZFA_CAR_SYS_PLL,
ZFA_CAR_DDR_CAL,
ZFA_CAR_FMC_RES,
/* IRQ DMA: DMA spec specific irq controller */
ZFA_IRQ_DMA_DISABLE_MASK,
ZFA_IRQ_DMA_ENABLE_MASK,
ZFA_IRQ_DMA_MASK_STATUS,
ZFA_IRQ_DMA_SRC,
/* DMA */
ZFA_DMA_CTL_SWP,
ZFA_DMA_CTL_ABORT,
ZFA_DMA_CTL_START,
ZFA_DMA_STA,
ZFA_DMA_ADDR,
ZFA_DMA_ADDR_L,
ZFA_DMA_ADDR_H,
ZFA_DMA_LEN,
ZFA_DMA_NEXT_L,
ZFA_DMA_NEXT_H,
ZFA_DMA_BR_DIR,
ZFA_DMA_BR_LAST,
};
/* SPEC ADC have to listen two IRQ sources managed by two different cores */
#define FA_SPEC_IRQ_SRC_NONE 0
#define FA_SPEC_IRQ_SRC_ACQ 1
#define FA_SPEC_IRQ_SRC_DMA 2
/* DMA spec specific IRQ values */
enum fa_spec_irq {
FA_SPEC_IRQ_DMA_NONE = 0x0,
FA_SPEC_IRQ_DMA_DONE = 0x1,
FA_SPEC_IRQ_DMA_ERR = 0x2,
FA_SPEC_IRQ_DMA_ALL = 0x3,
};
/* specific carrier data */
struct fa_spec_data {
/* DMA attributes */
void *fa_dma_base;
void *fa_irq_dma_base;
struct fa_dma_item *items;
dma_addr_t dma_list_item;
unsigned int n_dma_err; /* statistics */
};
/* spec specific hardware registers */
extern const struct zfa_field_desc fa_spec_regs[];
/* spec irq handler */
extern irqreturn_t fa_spec_irq_handler(int irq, void *dev_id);
/* functions exported by fa-dma.c */
extern int fa_spec_dma_start(struct zio_cset *cset);
extern void fa_spec_dma_done(struct zio_cset *cset);
extern void fa_spec_dma_error(struct zio_cset *cset);
#endif /* __FA_SPEC_CORE_H__*/
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2019 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-svec.h"
static int fa_svec_init(struct fa_dev *fa)
{
struct fa_svec_data *cdata;
struct resource *r;
unsigned int res_i;
cdata = kzalloc(sizeof(struct fa_svec_data), GFP_KERNEL);
if (!cdata)
return -ENOMEM;
r = platform_get_resource(fa->pdev, IORESOURCE_BUS, ADC_CARR_VME_ADDR);
cdata->vme_ddr_data = r->start;
r = platform_get_resource(fa->pdev, IORESOURCE_BUS, ADC_BUS_FMC_SLOT);
switch(r->start) {
case 1:
res_i = FA_CAR_FMC0_RES;
break;
case 2:
res_i = FA_CAR_FMC1_RES;
break;
default:
return -EINVAL;
}
cdata->fa_dma_ddr_addr = fa->fa_top_level + 0x2000;
fa_writel(fa, fa->fa_carrier_csr_base, &fa_svec_regfield[res_i], 1);
mdelay(50);
fa_writel(fa, fa->fa_carrier_csr_base, &fa_svec_regfield[res_i], 0);
mdelay(50);
/* register carrier data */
fa->carrier_data = cdata;
return 0;
}
static void fa_svec_exit(struct fa_dev *fa)
{
kfree(fa->carrier_data);
}
struct fa_carrier_op fa_svec_op = {
.init = fa_svec_init,
.exit = fa_svec_exit,
.dma_start = fa_svec_dma_start,
.dma_done = fa_svec_dma_done,
.dma_error = fa_svec_dma_error,
};
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2012-2019 CERN (www.cern.ch)
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <asm/byteorder.h>
#include "fmc-adc-100m14b4cha.h"
#include "fa-svec.h"
#include "vmebus.h"
#define VME_NO_ADDR_INCREMENT 1
/* FIXME: move to include again */
#ifndef lower_32_bits
#define lower_32_bits(n) ((u32)(n))
#endif /* lower_32_bits */
static void build_dma_desc(struct vme_dma *desc, unsigned long vme_addr,
void *addr_dest, ssize_t len)
{
struct vme_dma_attr *vme;
struct vme_dma_attr *pci;
memset(desc, 0, sizeof(struct vme_dma));
vme = &desc->src;
pci = &desc->dst;
desc->dir = VME_DMA_FROM_DEVICE;
desc->length = len;
desc->novmeinc = VME_NO_ADDR_INCREMENT;
desc->ctrl.pci_block_size = VME_DMA_BSIZE_4096;
desc->ctrl.pci_backoff_time = VME_DMA_BACKOFF_0;
desc->ctrl.vme_block_size = VME_DMA_BSIZE_4096;
desc->ctrl.vme_backoff_time = VME_DMA_BACKOFF_0;
vme->data_width = VME_D32;
vme->am = VME_A24_USER_DATA_SCT;
/*vme->am = VME_A24_USER_MBLT;*/
vme->addru = upper_32_bits(vme_addr);
vme->addrl = lower_32_bits(vme_addr);
pci->addru = upper_32_bits((unsigned long)addr_dest);
pci->addrl = lower_32_bits((unsigned long)addr_dest);
}
/* Endianess */
#ifndef LITTLE_ENDIAN
#define LITTLE_ENDIAN 0
#endif
#ifndef BIG_ENDIAN
#define BIG_ENDIAN 1
#endif
static int __get_endian(void)
{
int i = 1;
char *p = (char *)&i;
if (p[0] == 1)
return LITTLE_ENDIAN;
else
return BIG_ENDIAN;
}
static void __endianness(unsigned int byte_length, void *buffer)
{
int i, size;
uint32_t *ptr;
/* CPU may be little endian, VME is big endian */
if (__get_endian() == LITTLE_ENDIAN) {
ptr = buffer;
/* swap samples and trig timetag all seen as 32bits words */
size = byte_length/4;
for (i = 0; i < size; ++i, ++ptr)
*ptr = __be32_to_cpu(*ptr);
}
}
int fa_svec_dma_start(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
struct fa_svec_data *svec_data = fa->carrier_data;
struct zio_channel *interleave = cset->interleave;
struct zfad_block *fa_dma_block = interleave->priv_d;
int i;
struct vme_dma desc; /* Vme driver DMA structure */
/*
* write the data address in the ddr_addr register: this
* address has been computed after ACQ_END by looking to the
* trigger position see fa-irq.c::irq_acq_end.
* Be careful: the SVEC HW version expects an address of 32bits word
* therefore mem-offset in byte is translated into 32bit word
**/
fa_writel(fa, svec_data->fa_dma_ddr_addr,
&fa_svec_regfield[FA_DMA_DDR_ADDR],
fa_dma_block[0].dev_mem_off/4);
pr_info("%s:%d 0x%x\n", __func__, __LINE__,
fa_dma_block[0].dev_mem_off/4);
/* Execute DMA shot by shot */
for (i = 0; i < fa->n_shots; ++i) {
dev_info(fa->msgdev,
"configure DMA descriptor shot %d "
"vme addr: 0x%llx destination address: 0x%p len: %d\n",
i, (long long)svec_data->vme_ddr_data,
fa_dma_block[i].block->data,
(int)fa_dma_block[i].block->datalen);
memset(fa_dma_block[i].block->data, 5, fa_dma_block[i].block->datalen);
build_dma_desc(&desc, svec_data->vme_ddr_data,
fa_dma_block[i].block->data,
fa_dma_block[i].block->datalen);
if (vme_do_dma_kernel(&desc))
return -1;
__endianness(fa_dma_block[i].block->datalen,
fa_dma_block[i].block->data);
}
return 0;
}
void fa_svec_dma_done(struct zio_cset *cset)
{
/* nothing special to do */
}
void fa_svec_dma_error(struct zio_cset *cset)
{
struct fa_dev *fa = cset->zdev->priv_d;
dev_err(fa->msgdev,
"DMA error. All acquisition lost\n");
}
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright CERN 2012-2019
* Author: Federico Vaga <federico.vaga@gmail.com>
*/
#include "fa-svec.h"
/* fa svec specific registers field: offset - mask - isbitfield */
const struct zfa_field_desc fa_svec_regfield[] = {
[FA_DMA_DDR_ADDR] = {0x0000, 0x00FFFFFF, 0},
[FA_DMA_DDR_DATA] = {0x0000, 0x00FFFFFF, 0},
/* CSR */
[FA_CAR_FMC0_PRES] = {0x0004, 0x00000001, 1},
[FA_CAR_FMC1_PRES] = {0x0004, 0x00000002, 1},
[FA_CAR_SYS_PLL] = {0x0004, 0x00000004, 1},
[FA_CAR_DDR0_CAL] = {0x0004, 0x00000008, 1},
[FA_CAR_DDR1_CAL] = {0x0004, 0x00000010, 1},
[FA_CAR_FMC0_RES] = {0x000C, 0x00000001, 1},
[FA_CAR_FMC1_RES] = {0x000C, 0x00000002, 1},
};
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright CERN 2012-2019
*/
#ifndef __FA_SVEC_CORE_H__
#define __FA_SVEC_CORE_H__
#include <linux/irqreturn.h>
#include "fmc-adc-100m14b4cha.h"
#include "field-desc.h"
/* default spec gateware */
#define FA_GATEWARE_SVEC "fmc/svec-fmc-adc-100m14b.bin"
/* SPEC CSR */
enum fa_spec_regs_id {
/* DMA */
FA_DMA_DDR_ADDR = 0,
FA_DMA_DDR_DATA,
/* CSR */
FA_CAR_FMC0_PRES,
FA_CAR_FMC1_PRES,
FA_CAR_SYS_PLL,
FA_CAR_DDR0_CAL,
FA_CAR_DDR1_CAL,
FA_CAR_FMC0_RES,
FA_CAR_FMC1_RES,
};
/* specific carrier data */
struct fa_svec_data {
/* DMA attributes */
unsigned long vme_ddr_data; /* offset */
void *fa_dma_ddr_addr; /* offset */
unsigned int n_dma_err; /* statistics */
};
/* svec specific hardware registers */
extern const struct zfa_field_desc fa_svec_regfield[];
/* svec irq handler */
extern irqreturn_t fa_svec_irq_handler(int irq, void *dev_id);
/* functions exported by fa-svec-dma.c */
extern int fa_svec_dma_start(struct zio_cset *cset);
extern void fa_svec_dma_done(struct zio_cset *cset);
extern void fa_svec_dma_error(struct zio_cset *cset);
#endif /* __FA_SVEC_CORE_H__*/
......@@ -382,6 +382,8 @@ static int zfat_arm_trigger(struct zio_ti *ti)
dev_mem_off += size;
dev_dbg(fa->msgdev, "next dev_mem_off 0x%x (+%d)\n",
dev_mem_off, size);
zfad_block[i].cset = ti->cset;
}
err = ti->cset->raw_io(ti->cset);
......
......@@ -150,6 +150,7 @@ struct fa_calib {
};
#ifdef __KERNEL__ /* All the rest is only of kernel users */
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/workqueue.h>
......@@ -168,17 +169,13 @@ extern int fa_enable_test_data_adc;
enum fa_irq_resource {
ADC_IRQ_TRG = 0,
ADC_IRQ_DMA,
};
enum fa_mem_resource {
ADC_MEM_BASE = 0,
ADC_CARR_MEM_BASE,
ADC_CARR_DMA, /* SPEC only, remove it when we support DMA engine */
};
enum fa_bus_resource {
ADC_BUS_FMC_SLOT = 0,
ADC_CARR_VME_ADDR,
};
......@@ -377,20 +374,6 @@ enum fa_irq_adc {
carrier specific stuff, such as DMA or resets, from
mezzanine-specific operations). */
struct fa_dev; /* forward declaration */
struct fa_carrier_op {
char* (*get_gwname)(void);
int (*init) (struct fa_dev *);
int (*reset_core) (struct fa_dev *);
void (*exit) (struct fa_dev *);
int (*setup_irqs) (struct fa_dev *);
int (*free_irqs) (struct fa_dev *);
int (*enable_irqs) (struct fa_dev *);
int (*disable_irqs) (struct fa_dev *);
int (*ack_irq) (struct fa_dev *, int irq_id);
int (*dma_start)(struct zio_cset *cset);
void (*dma_done)(struct zio_cset *cset);
void (*dma_error)(struct zio_cset *cset);
};
/*
* fa_dev: is the descriptor of the FMC ADC mezzanine
......@@ -421,18 +404,14 @@ struct fa_dev {
void *fa_spi_base;
void *fa_ow_base;
void *fa_top_level;
void *fa_carrier_csr_base;
void *fa_irq_vic_base;
void *fa_irq_adc_base;
void *fa_utc_base;
/* DMA description */
struct zio_dma_sgt *zdma;
struct sg_table sgt;
/* carrier specific functions (init/exit/reset/readout/irq handling) */
struct fa_carrier_op *carrier_op;
/* carrier private data */
void *carrier_data;
int irq_src; /* list of irq sources to listen */
struct work_struct irq_work;
/*
......@@ -444,6 +423,7 @@ struct fa_dev {
/* Acquisition */
unsigned int n_shots;
unsigned int n_fires;
unsigned int transfers_left;
unsigned int mshot_max_samples;
/* Statistic informations */
......@@ -464,6 +444,14 @@ struct fa_dev {
int enable_auto_start;
struct dentry *reg_dump;
/* Operations */
int (*sg_alloc_table_from_pages)(struct sg_table *sgt,
struct page **pages,
unsigned int n_pages,
unsigned int offset,
unsigned long size,
gfp_t gfp_mask);
};
/*
......@@ -472,11 +460,19 @@ struct fa_dev {
* @dev_mem_off is the offset in ADC internal memory. It points to the first
* sample of the stored shot
* @first_nent is the index of the first nent used for this block
* @cset: channel set source for the block
* @tx: DMA transfer descriptor
* @cookie: transfer token
*/
struct zfad_block {
struct zio_block *block;
uint32_t dev_mem_off;
unsigned int first_nent;
struct zio_cset *cset;
struct dma_async_tx_descriptor *tx;
dma_cookie_t cookie;
struct sg_table sgt;
void *dma_ctx;
};
/*
......@@ -577,12 +573,6 @@ extern struct bin_attribute dev_attr_calibration;
/* Global variable exported by fa-core.c */
extern struct workqueue_struct *fa_workqueue;
/* Global variable exported by fa-spec.c */
extern struct fa_carrier_op fa_spec_op;
/* Global variable exported by fa-svec.c */
extern struct fa_carrier_op fa_svec_op;
/* Global variable exported by fa-regfield.c */
extern const struct zfa_field_desc zfad_regs[];
......@@ -597,6 +587,9 @@ extern int zfad_get_chx_index(unsigned long addr, struct zio_channel *chan);
extern int zfad_pattern_data_enable(struct fa_dev *fa, uint16_t pattern,
unsigned int enable);
/* Function exported by fa-dma.c */
extern void fa_irq_work(struct work_struct *work);
/* Functions exported by fa-zio-drv.c */
extern int fa_zio_register(void);
extern void fa_zio_unregister(void);
......@@ -608,11 +601,7 @@ extern int fa_trig_init(void);
extern void fa_trig_exit(void);
/* Functions exported by fa-irq.c */
extern int zfad_dma_start(struct zio_cset *cset);
extern void zfad_dma_done(struct zio_cset *cset);
extern void zfad_dma_error(struct zio_cset *cset);
extern void zfat_irq_trg_fire(struct zio_cset *cset);
extern void zfat_irq_acq_end(struct zio_cset *cset);
extern int fa_setup_irqs(struct fa_dev *fa);
extern int fa_free_irqs(struct fa_dev *fa);
extern int fa_enable_irqs(struct fa_dev *fa);
......