diff options
author | Michal Meloun <mmel@FreeBSD.org> | 2020-12-02 16:54:24 +0000 |
---|---|---|
committer | Michal Meloun <mmel@FreeBSD.org> | 2020-12-02 16:54:24 +0000 |
commit | 8f9d5a8dbf4ea69c5f9a1e3a36e23732ffaa5c75 (patch) | |
tree | a21fcec70993c4b393570e669c63cbf99c85c771 /sys/dev/nvme/nvme_qpair.c | |
parent | ecce515d54bcea54fea03f731aad646c87761d22 (diff) |
NVME: Multiple busdma related fixes.
- in nvme_qpair_process_completions() do dma sync before completion buffer
is used.
- in nvme_qpair_submit_tracker(), don't do explicit wmb() also for arm
and arm64. Bus_dmamap_sync() on these architectures is sufficient to ensure
that all CPU stores are visible to external (including DMA) observers.
- Allocate completion buffer as BUS_DMA_COHERENT. On not-DMA coherent systems,
buffers continuously owned (and accessed) by DMA must be allocated with this
flag. Note that BUS_DMA_COHERENT flag is no-op on DMA coherent systems
(or coherent buses in mixed systems).
MFC after: 4 weeks
Reviewed by: mav, imp
Differential Revision: https://reviews.freebsd.org/D27446
Notes
Notes:
svn path=/head/; revision=368279
Diffstat (limited to 'sys/dev/nvme/nvme_qpair.c')
-rw-r--r-- | sys/dev/nvme/nvme_qpair.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/sys/dev/nvme/nvme_qpair.c b/sys/dev/nvme/nvme_qpair.c index 350a53b1b29f..09b9336e5ac0 100644 --- a/sys/dev/nvme/nvme_qpair.c +++ b/sys/dev/nvme/nvme_qpair.c @@ -547,6 +547,8 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair) if (!qpair->is_enabled) return (false); + bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, + BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); /* * A panic can stop the CPU this routine is running on at any point. If * we're called during a panic, complete the sq_head wrap protocol for @@ -580,8 +582,6 @@ nvme_qpair_process_completions(struct nvme_qpair *qpair) } } - bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, - BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); while (1) { cpl = qpair->cpl[qpair->cq_head]; @@ -722,7 +722,7 @@ nvme_qpair_construct(struct nvme_qpair *qpair, bus_dma_tag_set_domain(qpair->dma_tag, qpair->domain); if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, - BUS_DMA_NOWAIT, &qpair->queuemem_map)) { + BUS_DMA_COHERENT | BUS_DMA_NOWAIT, &qpair->queuemem_map)) { nvme_printf(ctrlr, "failed to alloc qpair memory\n"); goto out; } @@ -982,7 +982,7 @@ nvme_qpair_submit_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr) bus_dmamap_sync(qpair->dma_tag, qpair->queuemem_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); -#ifndef __powerpc__ +#if !defined( __powerpc__) && !defined( __aarch64__) && !defined( __arm__) /* * powerpc's bus_dmamap_sync() already includes a heavyweight sync, but * no other archs do. |