summaryrefslogtreecommitdiff
path: root/drivers/misc/habanalabs/common/command_buffer.c
diff options
context:
space:
mode:
authorOfir Bitton <obitton@habana.ai>2020-10-22 15:13:10 +0300
committerOded Gabbay <ogabbay@kernel.org>2020-11-30 11:47:36 +0300
commit5c05487f15509320572c13fce8f490fb914cf7d4 (patch)
tree522f42f1ed69d2ccdc348c336defeca9208beb22 /drivers/misc/habanalabs/common/command_buffer.c
parent5e5867e51d6eb0a086ead83796efbae4234eab1a (diff)
downloadlinux-5c05487f15509320572c13fce8f490fb914cf7d4.tar.xz
habanalabs: mmu map wrapper for sizes larger than a page
We introduce a new wrapper which allows us to mmu map any size to any host va_range available. In addition we remove duplicated code from various places in driver and using this new wrapper instead. This wrapper supports mapping only contiguous physical memory blocks and will be used for mappings that are done to the driver ASID. Signed-off-by: Ofir Bitton <obitton@habana.ai> Reviewed-by: Oded Gabbay <ogabbay@kernel.org> Signed-off-by: Oded Gabbay <ogabbay@kernel.org>
Diffstat (limited to 'drivers/misc/habanalabs/common/command_buffer.c')
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index 0c482358f350..2856bb3423ee 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -67,9 +67,9 @@ static int cb_map_mem(struct hl_ctx *ctx, struct hl_cb *cb)
bus_addr = cb->bus_address;
offset = 0;
list_for_each_entry(va_block, &cb->va_block_list, node) {
- rc = hl_mmu_map(ctx, va_block->start, bus_addr, va_block->size,
- list_is_last(&va_block->node,
- &cb->va_block_list));
+ rc = hl_mmu_map_page(ctx, va_block->start, bus_addr,
+ va_block->size, list_is_last(&va_block->node,
+ &cb->va_block_list));
if (rc) {
dev_err(hdev->dev, "Failed to map VA %#llx to CB\n",
va_block->start);
@@ -92,7 +92,7 @@ err_va_umap:
list_for_each_entry(va_block, &cb->va_block_list, node) {
if (offset <= 0)
break;
- hl_mmu_unmap(ctx, va_block->start, va_block->size,
+ hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
offset <= va_block->size);
offset -= va_block->size;
}
@@ -119,7 +119,7 @@ static void cb_unmap_mem(struct hl_ctx *ctx, struct hl_cb *cb)
mutex_lock(&ctx->mmu_lock);
list_for_each_entry(va_block, &cb->va_block_list, node)
- if (hl_mmu_unmap(ctx, va_block->start, va_block->size,
+ if (hl_mmu_unmap_page(ctx, va_block->start, va_block->size,
list_is_last(&va_block->node,
&cb->va_block_list)))
dev_warn_ratelimited(hdev->dev,