On Mon, 30 May 2022 22:45:34 -0700 Christoph Hellwig wrote:
>
> This is compeltely broken. If the underlying device is ot DMA capable and
> we can't just set a random mask and still allow DMA mappings.
You are right. Thanks for taking a look.
And IMO we can do less on the exporter side as the udmabuf misc device
is different from those that for instance are dma capable on the PCI bus.
Regular dma games go only on the importer side.
--- y/drivers/dma-buf/udmabuf.c
+++ u/drivers/dma-buf/udmabuf.c
@@ -55,7 +55,7 @@ static int mmap_udmabuf(struct dma_buf *
}
static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
- enum dma_data_direction direction)
+ enum dma_data_direction direction, int mapping)
{
struct udmabuf *ubuf = buf->priv;
struct sg_table *sg;
@@ -69,6 +69,8 @@ static struct sg_table *get_sg_table(str
GFP_KERNEL);
if (ret < 0)
goto err;
+ if (!mapping)
+ return sg;
ret = dma_map_sgtable(dev, sg, direction, 0);
if (ret < 0)
goto err;
@@ -91,7 +93,7 @@ static void put_sg_table(struct device *
static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
enum dma_data_direction direction)
{
- return get_sg_table(at->dev, at->dmabuf, direction);
+ return get_sg_table(at->dev, at->dmabuf, direction, 1);
}
static void unmap_udmabuf(struct dma_buf_attachment *at,
@@ -123,14 +125,10 @@ static int begin_cpu_udmabuf(struct dma_
struct device *dev = ubuf->device->this_device;
if (!ubuf->sg) {
- ubuf->sg = get_sg_table(dev, buf, direction);
+ ubuf->sg = get_sg_table(dev, buf, direction, 0);
if (IS_ERR(ubuf->sg))
return PTR_ERR(ubuf->sg);
- } else {
- dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
- direction);
}
-
return 0;
}
@@ -138,12 +136,14 @@ static int end_cpu_udmabuf(struct dma_bu
enum dma_data_direction direction)
{
struct udmabuf *ubuf = buf->priv;
+ /*
+ * keep dev intact, thanks.
+ *
struct device *dev = ubuf->device->this_device;
+ */
if (!ubuf->sg)
return -EINVAL;
-
- dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
return 0;
}