aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/xdma
diff options
context:
space:
mode:
authorRuslan Bukin <br@FreeBSD.org>2016-12-20 18:02:07 +0000
committerRuslan Bukin <br@FreeBSD.org>2016-12-20 18:02:07 +0000
commit85debf7f6e3ef3bfc55df76c8525d7d4b08c5d77 (patch)
tree34b7244139a824d4f20d3b9c95698558f2049367 /sys/dev/xdma
parentd21656dcabd2cd5740927495479e0819bc3e9ca0 (diff)
downloadsrc-85debf7f6e3ef3bfc55df76c8525d7d4b08c5d77.tar.gz
src-85debf7f6e3ef3bfc55df76c8525d7d4b08c5d77.zip
Add xDMA -- the DMA abstraction layer, initial verison.
xDMA is a DMA framework designed to abstract the interaction between device drivers and DMA engines. Project wiki: https://wiki.freebsd.org/xdma Sponsored by: DARPA, AFRL Differential Revision: https://reviews.freebsd.org/D8807
Notes
Notes: svn path=/head/; revision=310330
Diffstat (limited to 'sys/dev/xdma')
-rw-r--r--sys/dev/xdma/xdma.c673
-rw-r--r--sys/dev/xdma/xdma.h148
-rw-r--r--sys/dev/xdma/xdma_fdt_test.c417
-rw-r--r--sys/dev/xdma/xdma_if.m94
4 files changed, 1332 insertions, 0 deletions
diff --git a/sys/dev/xdma/xdma.c b/sys/dev/xdma/xdma.c
new file mode 100644
index 000000000000..7de64ae981b9
--- /dev/null
+++ b/sys/dev/xdma/xdma.c
@@ -0,0 +1,673 @@
+/*-
+ * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_platform.h"
+#include <sys/param.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/queue.h>
+#include <sys/kobj.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/limits.h>
+#include <sys/lock.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/sx.h>
+#include <sys/bus_dma.h>
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+#include <xdma_if.h>
+
+MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
+
+/*
+ * Multiple xDMA controllers may work with single DMA device,
+ * so we have global lock for physical channel management.
+ */
+static struct mtx xdma_mtx;
+#define XDMA_LOCK() mtx_lock(&xdma_mtx)
+#define XDMA_UNLOCK() mtx_unlock(&xdma_mtx)
+#define XDMA_ASSERT_LOCKED() mtx_assert(&xdma_mtx, MA_OWNED)
+
+/*
+ * Per channel locks.
+ */
+#define XCHAN_LOCK(xchan) mtx_lock(&xchan->mtx_lock)
+#define XCHAN_UNLOCK(xchan) mtx_unlock(&xchan->mtx_lock)
+#define XCHAN_ASSERT_LOCKED(xchan) mtx_assert(&xchan->mtx_lock, MA_OWNED)
+
+/*
+ * Allocate virtual xDMA channel.
+ */
+xdma_channel_t *
+xdma_channel_alloc(xdma_controller_t *xdma)
+{
+ xdma_channel_t *xchan;
+ int ret;
+
+ xchan = malloc(sizeof(xdma_channel_t), M_XDMA, M_WAITOK | M_ZERO);
+ if (xchan == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate memory for channel.\n", __func__);
+ return (NULL);
+ }
+ xchan->xdma = xdma;
+
+ XDMA_LOCK();
+
+ /* Request a real channel from hardware driver. */
+ ret = XDMA_CHANNEL_ALLOC(xdma->dma_dev, xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't request hardware channel.\n", __func__);
+ XDMA_UNLOCK();
+ free(xchan, M_XDMA);
+
+ return (NULL);
+ }
+
+ TAILQ_INIT(&xchan->ie_handlers);
+ mtx_init(&xchan->mtx_lock, "xDMA", NULL, MTX_DEF);
+
+ TAILQ_INSERT_TAIL(&xdma->channels, xchan, xchan_next);
+
+ XDMA_UNLOCK();
+
+ return (xchan);
+}
+
+int
+xdma_channel_free(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int err;
+
+ xdma = xchan->xdma;
+
+ XDMA_LOCK();
+
+ /* Free the real DMA channel. */
+ err = XDMA_CHANNEL_FREE(xdma->dma_dev, xchan);
+ if (err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't free real hw channel.\n", __func__);
+ XDMA_UNLOCK();
+ return (-1);
+ }
+
+ xdma_teardown_all_intr(xchan);
+
+ /* Deallocate descriptors, if any. */
+ xdma_desc_free(xchan);
+
+ mtx_destroy(&xchan->mtx_lock);
+
+ TAILQ_REMOVE(&xdma->channels, xchan, xchan_next);
+
+ free(xchan, M_XDMA);
+
+ XDMA_UNLOCK();
+
+ return (0);
+}
+
+int
+xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg,
+ void **ihandler)
+{
+ struct xdma_intr_handler *ih;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ /* Sanity check. */
+ if (cb == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't setup interrupt handler.\n",
+ __func__);
+
+ return (-1);
+ }
+
+ ih = malloc(sizeof(struct xdma_intr_handler),
+ M_XDMA, M_WAITOK | M_ZERO);
+ if (ih == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate memory for interrupt handler.\n",
+ __func__);
+
+ return (-1);
+ }
+
+ ih->cb = cb;
+ ih->cb_user = arg;
+
+ TAILQ_INSERT_TAIL(&xchan->ie_handlers, ih, ih_next);
+
+ if (ihandler != NULL) {
+ *ihandler = ih;
+ }
+
+ return (0);
+}
+
+int
+xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih)
+{
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ /* Sanity check. */
+ if (ih == NULL) {
+ device_printf(xdma->dev,
+ "%s: Can't teardown interrupt.\n", __func__);
+ return (-1);
+ }
+
+ TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
+ free(ih, M_XDMA);
+
+ return (0);
+}
+
+int
+xdma_teardown_all_intr(xdma_channel_t *xchan)
+{
+ struct xdma_intr_handler *ih_tmp;
+ struct xdma_intr_handler *ih;
+ xdma_controller_t *xdma;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
+ TAILQ_REMOVE(&xchan->ie_handlers, ih, ih_next);
+ free(ih, M_XDMA);
+ }
+
+ return (0);
+}
+
+static void
+xdma_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
+{
+ xdma_channel_t *xchan;
+ int i;
+
+ xchan = (xdma_channel_t *)arg;
+ KASSERT(xchan != NULL, ("xchan is NULL"));
+
+ if (err) {
+ xchan->map_err = 1;
+ return;
+ }
+
+ for (i = 0; i < nseg; i++) {
+ xchan->descs_phys[i].ds_addr = segs[i].ds_addr;
+ xchan->descs_phys[i].ds_len = segs[i].ds_len;
+ }
+}
+
+static int
+xdma_desc_alloc_bus_dma(xdma_channel_t *xchan, uint32_t desc_size,
+ uint32_t align)
+{
+ xdma_controller_t *xdma;
+ bus_size_t all_desc_sz;
+ xdma_config_t *conf;
+ int nsegments;
+ int err;
+
+ xdma = xchan->xdma;
+ conf = &xchan->conf;
+
+ nsegments = conf->block_num;
+ all_desc_sz = (nsegments * desc_size);
+
+ err = bus_dma_tag_create(
+ bus_get_dma_tag(xdma->dev),
+ align, desc_size, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ all_desc_sz, nsegments, /* maxsize, nsegments*/
+ desc_size, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &xchan->dma_tag);
+ if (err) {
+ device_printf(xdma->dev,
+ "%s: Can't create bus_dma tag.\n", __func__);
+ return (-1);
+ }
+
+ err = bus_dmamem_alloc(xchan->dma_tag, (void **)&xchan->descs,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT, &xchan->dma_map);
+ if (err) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate memory for descriptors.\n", __func__);
+ return (-1);
+ }
+
+ xchan->descs_phys = malloc(nsegments * sizeof(xdma_descriptor_t), M_XDMA,
+ (M_WAITOK | M_ZERO));
+
+ xchan->map_err = 0;
+ err = bus_dmamap_load(xchan->dma_tag, xchan->dma_map, xchan->descs,
+ all_desc_sz, xdma_dmamap_cb, xchan, BUS_DMA_WAITOK);
+ if (err) {
+ device_printf(xdma->dev,
+ "%s: Can't load DMA map.\n", __func__);
+ return (-1);
+ }
+
+ if (xchan->map_err != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't load DMA map.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+/*
+ * This function called by DMA controller driver.
+ */
+int
+xdma_desc_alloc(xdma_channel_t *xchan, uint32_t desc_size, uint32_t align)
+{
+ xdma_controller_t *xdma;
+ xdma_config_t *conf;
+ int ret;
+
+ XCHAN_ASSERT_LOCKED(xchan);
+
+ xdma = xchan->xdma;
+ if (xdma == NULL) {
+ device_printf(xdma->dev,
+ "%s: Channel was not allocated properly.\n", __func__);
+ return (-1);
+ }
+
+ if (xchan->flags & XCHAN_DESC_ALLOCATED) {
+ device_printf(xdma->dev,
+ "%s: Descriptors already allocated.\n", __func__);
+ return (-1);
+ }
+
+ if ((xchan->flags & XCHAN_CONFIGURED) == 0) {
+ device_printf(xdma->dev,
+ "%s: Channel has no configuration.\n", __func__);
+ return (-1);
+ }
+
+ conf = &xchan->conf;
+
+ XCHAN_UNLOCK(xchan);
+ ret = xdma_desc_alloc_bus_dma(xchan, desc_size, align);
+ XCHAN_LOCK(xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't allocate memory for descriptors.\n",
+ __func__);
+ return (-1);
+ }
+
+ xchan->flags |= XCHAN_DESC_ALLOCATED;
+
+ /* We are going to write to descriptors. */
+ bus_dmamap_sync(xchan->dma_tag, xchan->dma_map, BUS_DMASYNC_PREWRITE);
+
+ return (0);
+}
+
+int
+xdma_desc_free(xdma_channel_t *xchan)
+{
+
+ if ((xchan->flags & XCHAN_DESC_ALLOCATED) == 0) {
+ /* No descriptors allocated. */
+ return (-1);
+ }
+
+ bus_dmamap_unload(xchan->dma_tag, xchan->dma_map);
+ bus_dmamem_free(xchan->dma_tag, xchan->descs, xchan->dma_map);
+ bus_dma_tag_destroy(xchan->dma_tag);
+ free(xchan->descs_phys, M_XDMA);
+
+ xchan->flags &= ~(XCHAN_DESC_ALLOCATED);
+
+ return (0);
+}
+
+int
+xdma_prep_memcpy(xdma_channel_t *xchan, uintptr_t src_addr,
+ uintptr_t dst_addr, size_t len)
+{
+ xdma_controller_t *xdma;
+ xdma_config_t *conf;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ conf = &xchan->conf;
+ conf->direction = XDMA_MEM_TO_MEM;
+ conf->src_addr = src_addr;
+ conf->dst_addr = dst_addr;
+ conf->block_len = len;
+ conf->block_num = 1;
+
+ xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_MEMCPY);
+
+ XCHAN_LOCK(xchan);
+
+ /* Deallocate old descriptors, if any. */
+ xdma_desc_free(xchan);
+
+ ret = XDMA_CHANNEL_PREP_MEMCPY(xdma->dma_dev, xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't prepare memcpy transfer.\n", __func__);
+ XDMA_UNLOCK();
+
+ return (-1);
+ }
+
+ if (xchan->flags & XCHAN_DESC_ALLOCATED) {
+ /* Driver created xDMA descriptors. */
+ bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
+ BUS_DMASYNC_POSTWRITE);
+ }
+
+ XCHAN_UNLOCK(xchan);
+
+ return (0);
+}
+
+int
+xdma_prep_cyclic(xdma_channel_t *xchan, enum xdma_direction dir,
+ uintptr_t src_addr, uintptr_t dst_addr, int block_len,
+ int block_num, int src_width, int dst_width)
+{
+ xdma_controller_t *xdma;
+ xdma_config_t *conf;
+ int ret;
+
+ xdma = xchan->xdma;
+ KASSERT(xdma != NULL, ("xdma is NULL"));
+
+ conf = &xchan->conf;
+ conf->direction = dir;
+ conf->src_addr = src_addr;
+ conf->dst_addr = dst_addr;
+ conf->block_len = block_len;
+ conf->block_num = block_num;
+ conf->src_width = src_width;
+ conf->dst_width = dst_width;
+
+ xchan->flags |= (XCHAN_CONFIGURED | XCHAN_TYPE_CYCLIC);
+
+ XCHAN_LOCK(xchan);
+
+ /* Deallocate old descriptors, if any. */
+ xdma_desc_free(xchan);
+
+ ret = XDMA_CHANNEL_PREP_CYCLIC(xdma->dma_dev, xchan);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't prepare cyclic transfer.\n", __func__);
+ XDMA_UNLOCK();
+ return (-1);
+ }
+
+ if (xchan->flags & XCHAN_DESC_ALLOCATED) {
+ /* Driver has created xDMA descriptors. */
+ bus_dmamap_sync(xchan->dma_tag, xchan->dma_map,
+ BUS_DMASYNC_POSTWRITE);
+ }
+
+ XCHAN_UNLOCK(xchan);
+
+ return (0);
+}
+
+int
+xdma_begin(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_BEGIN);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't begin the channel operation.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+int
+xdma_terminate(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_TERMINATE);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't terminate the channel operation.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+int
+xdma_pause(xdma_channel_t *xchan)
+{
+ xdma_controller_t *xdma;
+ int ret;
+
+ xdma = xchan->xdma;
+
+ ret = XDMA_CHANNEL_CONTROL(xdma->dma_dev, xchan, XDMA_CMD_PAUSE);
+ if (ret != 0) {
+ device_printf(xdma->dev,
+ "%s: Can't pause the channel operation.\n", __func__);
+ return (-1);
+ }
+
+ return (ret);
+}
+
+int
+xdma_callback(xdma_channel_t *xchan)
+{
+ struct xdma_intr_handler *ih_tmp;
+ struct xdma_intr_handler *ih;
+
+ TAILQ_FOREACH_SAFE(ih, &xchan->ie_handlers, ih_next, ih_tmp) {
+ if (ih->cb != NULL) {
+ ih->cb(ih->cb_user);
+ }
+ }
+
+ return (0);
+}
+
+void
+xdma_assert_locked(void)
+{
+
+ XDMA_ASSERT_LOCKED();
+}
+
+#ifdef FDT
+/*
+ * Notify the DMA driver we have machine-dependent data in FDT.
+ */
+static int
+xdma_ofw_md_data(xdma_controller_t *xdma, pcell_t *cells, int ncells)
+{
+ uint32_t ret;
+
+ ret = XDMA_OFW_MD_DATA(xdma->dma_dev, cells, ncells, (void **)&xdma->data);
+
+ return (ret);
+}
+
+/*
+ * Allocate xdma controller.
+ */
+xdma_controller_t *
+xdma_ofw_get(device_t dev, const char *prop)
+{
+ phandle_t node, parent;
+ xdma_controller_t *xdma;
+ device_t dma_dev;
+ pcell_t *cells;
+ int ncells;
+ int error;
+ int ndmas;
+ int idx;
+
+ node = ofw_bus_get_node(dev);
+ if (node <= 0) {
+ device_printf(dev,
+ "%s called on not ofw based device.\n", __func__);
+ }
+
+ error = ofw_bus_parse_xref_list_get_length(node,
+ "dmas", "#dma-cells", &ndmas);
+ if (error) {
+ device_printf(dev,
+ "%s can't get dmas list.\n", __func__);
+ return (NULL);
+ }
+
+ if (ndmas == 0) {
+ device_printf(dev,
+ "%s dmas list is empty.\n", __func__);
+ return (NULL);
+ }
+
+ error = ofw_bus_find_string_index(node, "dma-names", prop, &idx);
+ if (error != 0) {
+ device_printf(dev,
+ "%s can't find string index.\n", __func__);
+ return (NULL);
+ }
+
+ error = ofw_bus_parse_xref_list_alloc(node, "dmas", "#dma-cells",
+ idx, &parent, &ncells, &cells);
+ if (error != 0) {
+ device_printf(dev,
+ "%s can't get dma device xref.\n", __func__);
+ return (NULL);
+ }
+
+ dma_dev = OF_device_from_xref(parent);
+ if (dma_dev == NULL) {
+ device_printf(dev,
+ "%s can't get dma device.\n", __func__);
+ return (NULL);
+ }
+
+ xdma = malloc(sizeof(struct xdma_controller), M_XDMA, M_WAITOK | M_ZERO);
+ if (xdma == NULL) {
+ device_printf(dev,
+ "%s can't allocate memory for xdma.\n", __func__);
+ return (NULL);
+ }
+ xdma->dev = dev;
+ xdma->dma_dev = dma_dev;
+
+ TAILQ_INIT(&xdma->channels);
+
+ xdma_ofw_md_data(xdma, cells, ncells);
+ free(cells, M_OFWPROP);
+
+ return (xdma);
+}
+#endif
+
+/*
+ * Free xDMA controller object.
+ */
+int
+xdma_put(xdma_controller_t *xdma)
+{
+
+ XDMA_LOCK();
+
+ /* Ensure no channels allocated. */
+ if (!TAILQ_EMPTY(&xdma->channels)) {
+ device_printf(xdma->dev, "%s: Can't free xDMA\n", __func__);
+ return (-1);
+ }
+
+ free(xdma->data, M_DEVBUF);
+ free(xdma, M_XDMA);
+
+ XDMA_UNLOCK();
+
+ return (0);
+}
+
+static void
+xdma_init(void)
+{
+
+ mtx_init(&xdma_mtx, "xDMA", NULL, MTX_DEF);
+}
+
+SYSINIT(xdma, SI_SUB_DRIVERS, SI_ORDER_FIRST, xdma_init, NULL);
diff --git a/sys/dev/xdma/xdma.h b/sys/dev/xdma/xdma.h
new file mode 100644
index 000000000000..d4638e75c3fa
--- /dev/null
+++ b/sys/dev/xdma/xdma.h
@@ -0,0 +1,148 @@
+/*-
+ * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _DEV_EXTRES_XDMA_H_
+#define _DEV_EXTRES_XDMA_H_
+
+enum xdma_direction {
+ XDMA_MEM_TO_MEM,
+ XDMA_MEM_TO_DEV,
+ XDMA_DEV_TO_MEM,
+ XDMA_DEV_TO_DEV,
+};
+
+enum xdma_operation_type {
+ XDMA_MEMCPY,
+ XDMA_SG,
+ XDMA_CYCLIC,
+};
+
+enum xdma_command {
+ XDMA_CMD_BEGIN,
+ XDMA_CMD_PAUSE,
+ XDMA_CMD_TERMINATE,
+ XDMA_CMD_TERMINATE_ALL,
+};
+
+struct xdma_controller {
+ device_t dev; /* DMA consumer device_t. */
+ device_t dma_dev; /* A real DMA device_t. */
+ void *data; /* OFW MD part. */
+
+ /* List of virtual channels allocated. */
+ TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
+};
+
+typedef struct xdma_controller xdma_controller_t;
+
+struct xdma_channel_config {
+ enum xdma_direction direction;
+ uintptr_t src_addr; /* Physical address. */
+ uintptr_t dst_addr; /* Physical address. */
+ int block_len; /* In bytes. */
+ int block_num; /* Count of blocks. */
+ int src_width; /* In bytes. */
+ int dst_width; /* In bytes. */
+};
+
+typedef struct xdma_channel_config xdma_config_t;
+
+struct xdma_descriptor {
+ uintptr_t ds_addr;
+ uint32_t ds_len;
+};
+
+typedef struct xdma_descriptor xdma_descriptor_t;
+
+struct xdma_channel {
+ xdma_controller_t *xdma;
+ xdma_config_t conf;
+
+ uint8_t flags;
+#define XCHAN_DESC_ALLOCATED (1 << 0)
+#define XCHAN_CONFIGURED (1 << 1)
+#define XCHAN_TYPE_CYCLIC (1 << 2)
+#define XCHAN_TYPE_MEMCPY (1 << 3)
+
+ /* A real hardware driver channel. */
+ void *chan;
+
+ /* Interrupt handlers. */
+ TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
+
+ /* Descriptors. */
+ bus_dma_tag_t dma_tag;
+ bus_dmamap_t dma_map;
+ void *descs;
+ xdma_descriptor_t *descs_phys;
+ uint8_t map_err;
+
+ struct mtx mtx_lock;
+
+ TAILQ_ENTRY(xdma_channel) xchan_next;
+};
+
+typedef struct xdma_channel xdma_channel_t;
+
+/* xDMA controller alloc/free */
+xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
+int xdma_put(xdma_controller_t *xdma);
+
+xdma_channel_t * xdma_channel_alloc(xdma_controller_t *);
+int xdma_channel_free(xdma_channel_t *);
+
+int xdma_prep_cyclic(xdma_channel_t *, enum xdma_direction,
+ uintptr_t, uintptr_t, int, int, int, int);
+int xdma_prep_memcpy(xdma_channel_t *, uintptr_t, uintptr_t, size_t len);
+int xdma_desc_alloc(xdma_channel_t *, uint32_t, uint32_t);
+int xdma_desc_free(xdma_channel_t *xchan);
+
+/* Channel Control */
+int xdma_begin(xdma_channel_t *xchan);
+int xdma_pause(xdma_channel_t *xchan);
+int xdma_terminate(xdma_channel_t *xchan);
+
+/* Interrupt callback */
+int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *), void *arg, void **);
+int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
+int xdma_teardown_all_intr(xdma_channel_t *xchan);
+int xdma_callback(struct xdma_channel *xchan);
+void xdma_assert_locked(void);
+
+struct xdma_intr_handler {
+ int (*cb)(void *);
+ void *cb_user;
+ struct mtx ih_lock;
+ TAILQ_ENTRY(xdma_intr_handler) ih_next;
+};
+
+#endif /* !_DEV_EXTRES_XDMA_H_ */
diff --git a/sys/dev/xdma/xdma_fdt_test.c b/sys/dev/xdma/xdma_fdt_test.c
new file mode 100644
index 000000000000..7cdaf2d24280
--- /dev/null
+++ b/sys/dev/xdma/xdma_fdt_test.c
@@ -0,0 +1,417 @@
+/*-
+ * Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
+ * All rights reserved.
+ *
+ * This software was developed by SRI International and the University of
+ * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+ * ("CTSRD"), as part of the DARPA CRASH research programme.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* xDMA memcpy test driver. */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/kthread.h>
+#include <sys/module.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/resource.h>
+#include <sys/rman.h>
+
+#include <machine/bus.h>
+
+#include <dev/xdma/xdma.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+/*
+ * To use this test add a compatible node to your dts, e.g.
+ *
+ * xdma_test {
+ * compatible = "freebsd,xdma-test";
+ *
+ * dmas = <&dma 0 0 0xffffffff>;
+ * dma-names = "test";
+ * };
+ */
+
+struct xdmatest_softc {
+ device_t dev;
+ xdma_controller_t *xdma;
+ xdma_channel_t *xchan;
+ void *ih;
+ struct intr_config_hook config_intrhook;
+ char *src;
+ char *dst;
+ uint32_t len;
+ uintptr_t src_phys;
+ uintptr_t dst_phys;
+ bus_dma_tag_t src_dma_tag;
+ bus_dmamap_t src_dma_map;
+ bus_dma_tag_t dst_dma_tag;
+ bus_dmamap_t dst_dma_map;
+ struct mtx mtx;
+ int done;
+ struct proc *newp;
+};
+
+static int xdmatest_probe(device_t dev);
+static int xdmatest_attach(device_t dev);
+static int xdmatest_detach(device_t dev);
+
+static int
+xdmatest_intr(void *arg)
+{
+ struct xdmatest_softc *sc;
+
+ sc = arg;
+
+ sc->done = 1;
+
+ mtx_lock(&sc->mtx);
+ wakeup(sc);
+ mtx_unlock(&sc->mtx);
+
+ return (0);
+}
+
+static void
+xdmatest_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err)
+{
+ bus_addr_t *addr;
+
+ if (err)
+ return;
+
+ addr = (bus_addr_t*)arg;
+ *addr = segs[0].ds_addr;
+}
+
+static int
+xdmatest_alloc_test_memory(struct xdmatest_softc *sc)
+{
+ int err;
+
+ sc->len = (0x1000000 - 8); /* 16mb */
+ sc->len = 8;
+
+ /* Source memory. */
+
+ err = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev),
+ 1024, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ sc->len, 1, /* maxsize, nsegments*/
+ sc->len, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->src_dma_tag);
+ if (err) {
+ device_printf(sc->dev,
+ "%s: Can't create bus_dma tag.\n", __func__);
+ return (-1);
+ }
+
+ err = bus_dmamem_alloc(sc->src_dma_tag, (void **)&sc->src,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT, &sc->src_dma_map);
+ if (err) {
+ device_printf(sc->dev,
+ "%s: Can't allocate memory.\n", __func__);
+ return (-1);
+ }
+
+ err = bus_dmamap_load(sc->src_dma_tag, sc->src_dma_map, sc->src,
+ sc->len, xdmatest_dmamap_cb, &sc->src_phys, BUS_DMA_WAITOK);
+ if (err) {
+ device_printf(sc->dev,
+ "%s: Can't load DMA map.\n", __func__);
+ return (-1);
+ }
+
+ /* Destination memory. */
+
+ err = bus_dma_tag_create(
+ bus_get_dma_tag(sc->dev),
+ 1024, 0, /* alignment, boundary */
+ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ sc->len, 1, /* maxsize, nsegments*/
+ sc->len, 0, /* maxsegsize, flags */
+ NULL, NULL, /* lockfunc, lockarg */
+ &sc->dst_dma_tag);
+ if (err) {
+ device_printf(sc->dev,
+ "%s: Can't create bus_dma tag.\n", __func__);
+ return (-1);
+ }
+
+ err = bus_dmamem_alloc(sc->dst_dma_tag, (void **)&sc->dst,
+ BUS_DMA_WAITOK | BUS_DMA_COHERENT, &sc->dst_dma_map);
+ if (err) {
+ device_printf(sc->dev,
+ "%s: Can't allocate memory.\n", __func__);
+ return (-1);
+ }
+
+ err = bus_dmamap_load(sc->dst_dma_tag, sc->dst_dma_map, sc->dst,
+ sc->len, xdmatest_dmamap_cb, &sc->dst_phys, BUS_DMA_WAITOK);
+ if (err) {
+ device_printf(sc->dev,
+ "%s: Can't load DMA map.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+static int
+xdmatest_test(struct xdmatest_softc *sc)
+{
+ int err;
+ int i;
+
+ /* Get xDMA controller. */
+ sc->xdma = xdma_ofw_get(sc->dev, "test");
+ if (sc->xdma == NULL) {
+ device_printf(sc->dev, "Can't find xDMA controller.\n");
+ return (-1);
+ }
+
+ /* Alloc xDMA virtual channel. */
+ sc->xchan = xdma_channel_alloc(sc->xdma);
+ if (sc->xchan == NULL) {
+ device_printf(sc->dev, "Can't alloc virtual DMA channel.\n");
+ return (-1);
+ }
+
+ /* Setup callback. */
+ err = xdma_setup_intr(sc->xchan, xdmatest_intr, sc, &sc->ih);
+ if (err) {
+ device_printf(sc->dev, "Can't setup xDMA interrupt handler.\n");
+ return (-1);
+ }
+
+ /* We are going to fill memory. */
+ bus_dmamap_sync(sc->src_dma_tag, sc->src_dma_map, BUS_DMASYNC_PREWRITE);
+ bus_dmamap_sync(sc->dst_dma_tag, sc->dst_dma_map, BUS_DMASYNC_PREWRITE);
+
+ /* Fill memory. */
+ for (i = 0; i < sc->len; i++) {
+ sc->src[i] = (i & 0xff);
+ sc->dst[i] = 0;
+ }
+
+ /* Configure channel for memcpy transfer. */
+ err = xdma_prep_memcpy(sc->xchan, sc->src_phys, sc->dst_phys, sc->len);
+ if (err != 0) {
+ device_printf(sc->dev, "Can't configure virtual channel.\n");
+ return (-1);
+ }
+
+ /* Start operation. */
+ xdma_begin(sc->xchan);
+
+ return (0);
+}
+
+static int
+xdmatest_verify(struct xdmatest_softc *sc)
+{
+ int err;
+ int i;
+
+ /* We have memory updated by DMA controller. */
+ bus_dmamap_sync(sc->src_dma_tag, sc->src_dma_map, BUS_DMASYNC_POSTREAD);
+ bus_dmamap_sync(sc->dst_dma_tag, sc->dst_dma_map, BUS_DMASYNC_POSTWRITE);
+
+ for (i = 0; i < sc->len; i++) {
+ if (sc->dst[i] != sc->src[i]) {
+ device_printf(sc->dev,
+ "%s: Test failed: iter %d\n", __func__, i);
+ return (-1);
+ }
+ }
+
+ err = xdma_channel_free(sc->xchan);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "%s: Test failed: can't deallocate channel.\n", __func__);
+ return (-1);
+ }
+
+ err = xdma_put(sc->xdma);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "%s: Test failed: can't deallocate xDMA.\n", __func__);
+ return (-1);
+ }
+
+ return (0);
+}
+
+static void
+xdmatest_worker(void *arg)
+{
+ struct xdmatest_softc *sc;
+ int timeout;
+ int err;
+
+ sc = arg;
+
+ device_printf(sc->dev, "Worker %d started.\n",
+ device_get_unit(sc->dev));
+
+ while (1) {
+ sc->done = 0;
+
+ mtx_lock(&sc->mtx);
+
+ xdmatest_test(sc);
+
+ timeout = 100;
+
+ do {
+ mtx_sleep(sc, &sc->mtx, 0, "xdmatest_wait", hz);
+ } while (timeout-- && sc->done == 0);
+
+ if (timeout != 0) {
+ err = xdmatest_verify(sc);
+ if (err == 0) {
+ /* Test succeeded. */
+ mtx_unlock(&sc->mtx);
+ continue;
+ }
+ }
+
+ mtx_unlock(&sc->mtx);
+ device_printf(sc->dev,
+ "%s: Test failed.\n", __func__);
+ break;
+ }
+}
+
+static void
+xdmatest_delayed_attach(void *arg)
+{
+ struct xdmatest_softc *sc;
+
+ sc = arg;
+
+ if (kproc_create(xdmatest_worker, (void *)sc, &sc->newp, 0, 0,
+ "xdmatest_worker") != 0) {
+ device_printf(sc->dev,
+ "%s: Failed to create worker thread.\n", __func__);
+ }
+
+ config_intrhook_disestablish(&sc->config_intrhook);
+}
+
+static int
+xdmatest_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_is_compatible(dev, "freebsd,xdma-test"))
+ return (ENXIO);
+
+ device_set_desc(dev, "xDMA test driver");
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+xdmatest_attach(device_t dev)
+{
+ struct xdmatest_softc *sc;
+ int err;
+
+ sc = device_get_softc(dev);
+ sc->dev = dev;
+
+ mtx_init(&sc->mtx, device_get_nameunit(dev), "xdmatest", MTX_DEF);
+
+ /* Allocate test memory */
+ err = xdmatest_alloc_test_memory(sc);
+ if (err != 0) {
+ device_printf(sc->dev, "Can't allocate test memory.\n");
+ return (-1);
+ }
+
+ /* We'll run test later, but before / mount. */
+ sc->config_intrhook.ich_func = xdmatest_delayed_attach;
+ sc->config_intrhook.ich_arg = sc;
+ if (config_intrhook_establish(&sc->config_intrhook) != 0)
+ device_printf(dev, "config_intrhook_establish failed\n");
+
+ return (0);
+}
+
+static int
+xdmatest_detach(device_t dev)
+{
+ struct xdmatest_softc *sc;
+
+ sc = device_get_softc(dev);
+
+ bus_dmamap_unload(sc->src_dma_tag, sc->src_dma_map);
+ bus_dmamem_free(sc->src_dma_tag, sc->src, sc->src_dma_map);
+ bus_dma_tag_destroy(sc->src_dma_tag);
+
+ bus_dmamap_unload(sc->dst_dma_tag, sc->dst_dma_map);
+ bus_dmamem_free(sc->dst_dma_tag, sc->dst, sc->dst_dma_map);
+ bus_dma_tag_destroy(sc->dst_dma_tag);
+
+ return (0);
+}
+
+static device_method_t xdmatest_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, xdmatest_probe),
+ DEVMETHOD(device_attach, xdmatest_attach),
+ DEVMETHOD(device_detach, xdmatest_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t xdmatest_driver = {
+ "xdmatest",
+ xdmatest_methods,
+ sizeof(struct xdmatest_softc),
+};
+
+static devclass_t xdmatest_devclass;
+
+DRIVER_MODULE(xdmatest, simplebus, xdmatest_driver, xdmatest_devclass, 0, 0);
diff --git a/sys/dev/xdma/xdma_if.m b/sys/dev/xdma/xdma_if.m
new file mode 100644
index 000000000000..3062cf55df49
--- /dev/null
+++ b/sys/dev/xdma/xdma_if.m
@@ -0,0 +1,94 @@
+#-
+# Copyright (c) 2016 Ruslan Bukin <br@bsdpad.com>
+# All rights reserved.
+#
+# This software was developed by SRI International and the University of
+# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
+# ("CTSRD"), as part of the DARPA CRASH research programme.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# $FreeBSD$
+#
+
+#include <machine/bus.h>
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#endif
+
+#include <dev/xdma/xdma.h>
+
+INTERFACE xdma;
+
+#
+# Prepare a channel for cyclic transfer.
+#
+METHOD int channel_prep_cyclic {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Prepare a channel for memcpy transfer.
+#
+METHOD int channel_prep_memcpy {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Notify driver we have machine-dependend data.
+#
+METHOD int ofw_md_data {
+ device_t dev;
+ pcell_t *cells;
+ int ncells;
+ void **data;
+};
+
+#
+# Allocate both virtual and harware channels.
+#
+METHOD int channel_alloc {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Free the channel, including descriptors.
+#
+METHOD int channel_free {
+ device_t dev;
+ struct xdma_channel *xchan;
+};
+
+#
+# Begin, pause or terminate the channel operation.
+#
+METHOD int channel_control {
+ device_t dev;
+ struct xdma_channel *xchan;
+ int cmd;
+};