summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_gt_pagefault.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/xe/xe_gt_pagefault.c')
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c79
1 files changed, 65 insertions, 14 deletions
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 9292d5468868..00af059a8971 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -212,6 +212,12 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf)
* TODO: Change to read lock? Using write lock for simplicity.
*/
down_write(&vm->lock);
+
+ if (xe_vm_is_closed(vm)) {
+ err = -ENOENT;
+ goto unlock_vm;
+ }
+
vma = lookup_vma(vm, pf->page_addr);
if (!vma) {
err = -EINVAL;
@@ -287,7 +293,7 @@ static bool get_pagefault(struct pf_queue *pf_queue, struct pagefault *pf)
PFD_VIRTUAL_ADDR_LO_SHIFT;
pf_queue->tail = (pf_queue->tail + PF_MSG_LEN_DW) %
- PF_QUEUE_NUM_DW;
+ pf_queue->num_dw;
ret = true;
}
spin_unlock_irq(&pf_queue->lock);
@@ -299,7 +305,8 @@ static bool pf_queue_full(struct pf_queue *pf_queue)
{
lockdep_assert_held(&pf_queue->lock);
- return CIRC_SPACE(pf_queue->head, pf_queue->tail, PF_QUEUE_NUM_DW) <=
+ return CIRC_SPACE(pf_queue->head, pf_queue->tail,
+ pf_queue->num_dw) <=
PF_MSG_LEN_DW;
}
@@ -312,22 +319,23 @@ int xe_guc_pagefault_handler(struct xe_guc *guc, u32 *msg, u32 len)
u32 asid;
bool full;
- /*
- * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
- */
- BUILD_BUG_ON(PF_QUEUE_NUM_DW % PF_MSG_LEN_DW);
-
if (unlikely(len != PF_MSG_LEN_DW))
return -EPROTO;
asid = FIELD_GET(PFD_ASID, msg[1]);
pf_queue = gt->usm.pf_queue + (asid % NUM_PF_QUEUE);
+ /*
+ * The below logic doesn't work unless PF_QUEUE_NUM_DW % PF_MSG_LEN_DW == 0
+ */
+ xe_gt_assert(gt, !(pf_queue->num_dw % PF_MSG_LEN_DW));
+
spin_lock_irqsave(&pf_queue->lock, flags);
full = pf_queue_full(pf_queue);
if (!full) {
memcpy(pf_queue->data + pf_queue->head, msg, len * sizeof(u32));
- pf_queue->head = (pf_queue->head + len) % PF_QUEUE_NUM_DW;
+ pf_queue->head = (pf_queue->head + len) %
+ pf_queue->num_dw;
queue_work(gt->usm.pf_wq, &pf_queue->worker);
} else {
drm_warn(&xe->drm, "PF Queue full, shouldn't be possible");
@@ -382,18 +390,59 @@ static void pf_queue_work_func(struct work_struct *w)
static void acc_queue_work_func(struct work_struct *w);
+static void pagefault_fini(void *arg)
+{
+ struct xe_gt *gt = arg;
+ struct xe_device *xe = gt_to_xe(gt);
+
+ if (!xe->info.has_usm)
+ return;
+
+ destroy_workqueue(gt->usm.acc_wq);
+ destroy_workqueue(gt->usm.pf_wq);
+}
+
+static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ xe_dss_mask_t all_dss;
+ int num_dss, num_eus;
+
+ bitmap_or(all_dss, gt->fuse_topo.g_dss_mask, gt->fuse_topo.c_dss_mask,
+ XE_MAX_DSS_FUSE_BITS);
+
+ num_dss = bitmap_weight(all_dss, XE_MAX_DSS_FUSE_BITS);
+ num_eus = bitmap_weight(gt->fuse_topo.eu_mask_per_dss,
+ XE_MAX_EU_FUSE_BITS) * num_dss;
+
+ /* user can issue separate page faults per EU and per CS */
+ pf_queue->num_dw =
+ (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
+
+ pf_queue->gt = gt;
+ pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
+ sizeof(u32), GFP_KERNEL);
+ if (!pf_queue->data)
+ return -ENOMEM;
+
+ spin_lock_init(&pf_queue->lock);
+ INIT_WORK(&pf_queue->worker, pf_queue_work_func);
+
+ return 0;
+}
+
int xe_gt_pagefault_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
- int i;
+ int i, ret = 0;
if (!xe->info.has_usm)
return 0;
for (i = 0; i < NUM_PF_QUEUE; ++i) {
- gt->usm.pf_queue[i].gt = gt;
- spin_lock_init(&gt->usm.pf_queue[i].lock);
- INIT_WORK(&gt->usm.pf_queue[i].worker, pf_queue_work_func);
+ ret = xe_alloc_pf_queue(gt, &gt->usm.pf_queue[i]);
+ if (ret)
+ return ret;
}
for (i = 0; i < NUM_ACC_QUEUE; ++i) {
gt->usm.acc_queue[i].gt = gt;
@@ -409,10 +458,12 @@ int xe_gt_pagefault_init(struct xe_gt *gt)
gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
WQ_UNBOUND | WQ_HIGHPRI,
NUM_ACC_QUEUE);
- if (!gt->usm.acc_wq)
+ if (!gt->usm.acc_wq) {
+ destroy_workqueue(gt->usm.pf_wq);
return -ENOMEM;
+ }
- return 0;
+ return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt);
}
void xe_gt_pagefault_reset(struct xe_gt *gt)