summaryrefslogtreecommitdiff
path: root/drivers/misc/habanalabs/common
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-08-04 21:05:48 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-08-04 21:05:48 +0300
commit228dfe98a313f6b6bff5da8b2c5e650e297ebf1a (patch)
tree501dc44c2453743eb2298fe23d6cdd0b5e91b2c2 /drivers/misc/habanalabs/common
parent798cd57cd5f871452461746032cf6ee50b0fd69a (diff)
parentb5276c924497705ca927ad85a763c37f2de98349 (diff)
downloadlinux-228dfe98a313f6b6bff5da8b2c5e650e297ebf1a.tar.xz
Merge tag 'char-misc-6.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc driver updates from Greg KH: "Here is the large set of char and misc and other driver subsystem changes for 6.0-rc1. Highlights include: - large set of IIO driver updates, additions, and cleanups - new habanalabs device support added (loads of register maps much like GPUs have) - soundwire driver updates - phy driver updates - slimbus driver updates - tiny virt driver fixes and updates - misc driver fixes and updates - interconnect driver updates - hwtracing driver updates - fpga driver updates - extcon driver updates - firmware driver updates - counter driver update - mhi driver fixes and updates - binder driver fixes and updates - speakup driver fixes All of these have been in linux-next for a while without any reported problems" * tag 'char-misc-6.0-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (634 commits) drivers: lkdtm: fix clang -Wformat warning char: remove VR41XX related char driver misc: Mark MICROCODE_MINOR unused spmi: trace: fix stack-out-of-bound access in SPMI tracing functions dt-bindings: iio: adc: Add compatible for MT8188 iio: light: isl29028: Fix the warning in isl29028_remove() iio: accel: sca3300: Extend the trigger buffer from 16 to 32 bytes iio: fix iio_format_avail_range() printing for none IIO_VAL_INT iio: adc: max1027: unlock on error path in max1027_read_single_value() iio: proximity: sx9324: add empty line in front of bullet list iio: magnetometer: hmc5843: Remove duplicate 'the' iio: magn: yas530: Use DEFINE_RUNTIME_DEV_PM_OPS() and pm_ptr() macros iio: magnetometer: ak8974: Use DEFINE_RUNTIME_DEV_PM_OPS() and pm_ptr() macros iio: light: veml6030: Use DEFINE_RUNTIME_DEV_PM_OPS() and pm_ptr() macros iio: light: vcnl4035: Use DEFINE_RUNTIME_DEV_PM_OPS() and pm_ptr() macros iio: light: vcnl4000: Use DEFINE_RUNTIME_DEV_PM_OPS() and pm_ptr() macros iio: light: tsl2591: Use DEFINE_RUNTIME_DEV_PM_OPS() and pm_ptr() iio: light: tsl2583: Use DEFINE_RUNTIME_DEV_PM_OPS and pm_ptr() iio: light: isl29028: Use DEFINE_RUNTIME_DEV_PM_OPS() and pm_ptr() iio: light: gp2ap002: Switch to DEFINE_RUNTIME_DEV_PM_OPS and pm_ptr() ...
Diffstat (limited to 'drivers/misc/habanalabs/common')
-rw-r--r--drivers/misc/habanalabs/common/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/asid.c5
-rw-r--r--drivers/misc/habanalabs/common/command_buffer.c12
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c296
-rw-r--r--drivers/misc/habanalabs/common/context.c73
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c221
-rw-r--r--drivers/misc/habanalabs/common/decoder.c133
-rw-r--r--drivers/misc/habanalabs/common/device.c242
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c211
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h756
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c82
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_ioctl.c54
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c45
-rw-r--r--drivers/misc/habanalabs/common/irq.c160
-rw-r--r--drivers/misc/habanalabs/common/memory.c115
-rw-r--r--drivers/misc/habanalabs/common/memory_mgr.c2
-rw-r--r--drivers/misc/habanalabs/common/mmu/Makefile3
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c496
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v1.c9
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c399
-rw-r--r--drivers/misc/habanalabs/common/pci/pci.c40
-rw-r--r--drivers/misc/habanalabs/common/security.c600
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c10
23 files changed, 3313 insertions, 654 deletions
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 934a3a4aedc9..e6abffea9f87 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -11,4 +11,5 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/firmware_if.o \
- common/state_dump.o common/memory_mgr.o
+ common/security.o common/state_dump.o \
+ common/memory_mgr.o common/decoder.o
diff --git a/drivers/misc/habanalabs/common/asid.c b/drivers/misc/habanalabs/common/asid.c
index ede04c032b6e..c9c2619cc43d 100644
--- a/drivers/misc/habanalabs/common/asid.c
+++ b/drivers/misc/habanalabs/common/asid.c
@@ -11,8 +11,7 @@
int hl_asid_init(struct hl_device *hdev)
{
- hdev->asid_bitmap = kcalloc(BITS_TO_LONGS(hdev->asic_prop.max_asid),
- sizeof(*hdev->asid_bitmap), GFP_KERNEL);
+ hdev->asid_bitmap = bitmap_zalloc(hdev->asic_prop.max_asid, GFP_KERNEL);
if (!hdev->asid_bitmap)
return -ENOMEM;
@@ -27,7 +26,7 @@ int hl_asid_init(struct hl_device *hdev)
void hl_asid_fini(struct hl_device *hdev)
{
mutex_destroy(&hdev->asid_mutex);
- kfree(hdev->asid_bitmap);
+ bitmap_free(hdev->asid_bitmap);
}
unsigned long hl_asid_alloc(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/common/command_buffer.c b/drivers/misc/habanalabs/common/command_buffer.c
index e13b2b39c058..b027f66f8bd4 100644
--- a/drivers/misc/habanalabs/common/command_buffer.c
+++ b/drivers/misc/habanalabs/common/command_buffer.c
@@ -143,8 +143,7 @@ static void cb_fini(struct hl_device *hdev, struct hl_cb *cb)
gen_pool_free(hdev->internal_cb_pool,
(uintptr_t)cb->kernel_address, cb->size);
else
- hdev->asic_funcs->asic_dma_free_coherent(hdev, cb->size,
- cb->kernel_address, cb->bus_address);
+ hl_asic_dma_free_coherent(hdev, cb->size, cb->kernel_address, cb->bus_address);
kfree(cb);
}
@@ -195,14 +194,11 @@ static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size,
cb->is_internal = true;
cb->bus_address = hdev->internal_cb_va_base + cb_offset;
} else if (ctx_id == HL_KERNEL_ASID_ID) {
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
- &cb->bus_address, GFP_ATOMIC);
+ p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_ATOMIC);
if (!p)
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- cb_size, &cb->bus_address, GFP_KERNEL);
+ p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address, GFP_KERNEL);
} else {
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, cb_size,
- &cb->bus_address,
+ p = hl_asic_dma_alloc_coherent(hdev, cb_size, &cb->bus_address,
GFP_USER | __GFP_ZERO);
}
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index fb30b7de4aab..90a4574cbe2d 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -12,7 +12,7 @@
#include <linux/slab.h>
#define HL_CS_FLAGS_TYPE_MASK (HL_CS_FLAGS_SIGNAL | HL_CS_FLAGS_WAIT | \
- HL_CS_FLAGS_COLLECTIVE_WAIT)
+ HL_CS_FLAGS_COLLECTIVE_WAIT)
#define MAX_TS_ITER_NUM 10
@@ -29,11 +29,88 @@ enum hl_cs_wait_status {
};
static void job_wq_completion(struct work_struct *work);
-static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- u64 timeout_us, u64 seq,
+static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp);
static void cs_do_release(struct kref *ref);
+static void hl_push_cs_outcome(struct hl_device *hdev,
+ struct hl_cs_outcome_store *outcome_store,
+ u64 seq, ktime_t ts, int error)
+{
+ struct hl_cs_outcome *node;
+ unsigned long flags;
+
+ /*
+ * CS outcome store supports the following operations:
+ * push outcome - store a recent CS outcome in the store
+ * pop outcome - retrieve a SPECIFIC (by seq) CS outcome from the store
+ * It uses 2 lists: used list and free list.
+ * It has a pre-allocated amount of nodes, each node stores
+ * a single CS outcome.
+ * Initially, all the nodes are in the free list.
+ * On push outcome, a node (any) is taken from the free list, its
+ * information is filled in, and the node is moved to the used list.
+ * It is possible, that there are no nodes left in the free list.
+ * In this case, we will lose some information about old outcomes. We
+ * will pop the OLDEST node from the used list, and make it free.
+ * On pop, the node is searched for in the used list (using a search
+ * index).
+ * If found, the node is then removed from the used list, and moved
+ * back to the free list. The outcome data that the node contained is
+ * returned back to the user.
+ */
+
+ spin_lock_irqsave(&outcome_store->db_lock, flags);
+
+ if (list_empty(&outcome_store->free_list)) {
+ node = list_last_entry(&outcome_store->used_list,
+ struct hl_cs_outcome, list_link);
+ hash_del(&node->map_link);
+ dev_dbg(hdev->dev, "CS %llu outcome was lost\n", node->seq);
+ } else {
+ node = list_last_entry(&outcome_store->free_list,
+ struct hl_cs_outcome, list_link);
+ }
+
+ list_del_init(&node->list_link);
+
+ node->seq = seq;
+ node->ts = ts;
+ node->error = error;
+
+ list_add(&node->list_link, &outcome_store->used_list);
+ hash_add(outcome_store->outcome_map, &node->map_link, node->seq);
+
+ spin_unlock_irqrestore(&outcome_store->db_lock, flags);
+}
+
+static bool hl_pop_cs_outcome(struct hl_cs_outcome_store *outcome_store,
+ u64 seq, ktime_t *ts, int *error)
+{
+ struct hl_cs_outcome *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&outcome_store->db_lock, flags);
+
+ hash_for_each_possible(outcome_store->outcome_map, node, map_link, seq)
+ if (node->seq == seq) {
+ *ts = node->ts;
+ *error = node->error;
+
+ hash_del(&node->map_link);
+ list_del_init(&node->list_link);
+ list_add(&node->list_link, &outcome_store->free_list);
+
+ spin_unlock_irqrestore(&outcome_store->db_lock, flags);
+
+ return true;
+ }
+
+ spin_unlock_irqrestore(&outcome_store->db_lock, flags);
+
+ return false;
+}
+
static void hl_sob_reset(struct kref *ref)
{
struct hl_hw_sob *hw_sob = container_of(ref, struct hl_hw_sob,
@@ -171,7 +248,7 @@ static void cs_job_do_release(struct kref *ref)
kfree(job);
}
-static void cs_job_put(struct hl_cs_job *job)
+static void hl_cs_job_put(struct hl_cs_job *job)
{
kref_put(&job->refcount, cs_job_do_release);
}
@@ -266,7 +343,7 @@ static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
return rc;
}
-static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
+static void hl_complete_job(struct hl_device *hdev, struct hl_cs_job *job)
{
struct hl_cs *cs = job->cs;
@@ -285,12 +362,12 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
/* For H/W queue jobs, if a user CB was allocated by driver and MMU is
* enabled, the user CB isn't released in cs_parser() and thus should be
- * released here.
- * This is also true for INT queues jobs which were allocated by driver
+ * released here. This is also true for INT queues jobs which were
+ * allocated by driver.
*/
- if (job->is_kernel_allocated_cb &&
+ if ((job->is_kernel_allocated_cb &&
((job->queue_type == QUEUE_TYPE_HW && hdev->mmu_enable) ||
- job->queue_type == QUEUE_TYPE_INT)) {
+ job->queue_type == QUEUE_TYPE_INT))) {
atomic_dec(&job->user_cb->cs_cnt);
hl_cb_put(job->user_cb);
}
@@ -318,11 +395,10 @@ static void complete_job(struct hl_device *hdev, struct hl_cs_job *job)
* flow by calling 'hl_hw_queue_update_ci'.
*/
if (cs_needs_completion(cs) &&
- (job->queue_type == QUEUE_TYPE_EXT ||
- job->queue_type == QUEUE_TYPE_HW))
+ (job->queue_type == QUEUE_TYPE_EXT || job->queue_type == QUEUE_TYPE_HW))
cs_put(cs);
- cs_job_put(job);
+ hl_cs_job_put(job);
}
/*
@@ -612,7 +688,7 @@ static void cs_do_release(struct kref *ref)
* still holds a pointer to them (but no reference).
*/
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- complete_job(hdev, job);
+ hl_complete_job(hdev, job);
if (!cs->submitted) {
/*
@@ -642,9 +718,9 @@ static void cs_do_release(struct kref *ref)
* staged submission
*/
if (cs->staged_last) {
- struct hl_cs *staged_cs, *tmp;
+ struct hl_cs *staged_cs, *tmp_cs;
- list_for_each_entry_safe(staged_cs, tmp,
+ list_for_each_entry_safe(staged_cs, tmp_cs,
&cs->staged_cs_node, staged_cs_node)
staged_cs_put(hdev, staged_cs);
}
@@ -678,7 +754,7 @@ out:
*/
hl_debugfs_remove_cs(cs);
- hl_ctx_put(cs->ctx);
+ hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL;
/* We need to mark an error for not submitted because in that case
* the hl fence release flow is different. Mainly, we don't need
@@ -698,8 +774,14 @@ out:
div_u64(jiffies - cs->submission_time_jiffies, HZ));
}
- if (cs->timestamp)
+ if (cs->timestamp) {
cs->fence->timestamp = ktime_get();
+ hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence,
+ cs->fence->timestamp, cs->fence->error);
+ }
+
+ hl_ctx_put(cs->ctx);
+
complete_all(&cs->fence->completion);
complete_multi_cs(hdev, cs);
@@ -714,10 +796,11 @@ out:
static void cs_timedout(struct work_struct *work)
{
struct hl_device *hdev;
+ u64 event_mask;
int rc;
struct hl_cs *cs = container_of(work, struct hl_cs,
work_tdr.work);
- bool skip_reset_on_timeout = cs->skip_reset_on_timeout;
+ bool skip_reset_on_timeout = cs->skip_reset_on_timeout, device_reset = false;
rc = cs_get_unless_zero(cs);
if (!rc)
@@ -728,17 +811,28 @@ static void cs_timedout(struct work_struct *work)
return;
}
- /* Mark the CS is timed out so we won't try to cancel its TDR */
- if (likely(!skip_reset_on_timeout))
- cs->timedout = true;
-
hdev = cs->ctx->hdev;
+ if (likely(!skip_reset_on_timeout)) {
+ if (hdev->reset_on_lockup)
+ device_reset = true;
+ else
+ hdev->reset_info.needs_reset = true;
+
+ /* Mark the CS is timed out so we won't try to cancel its TDR */
+ cs->timedout = true;
+ }
+
/* Save only the first CS timeout parameters */
- rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_disable, 0, 1);
- if (!rc) {
+ rc = atomic_cmpxchg(&hdev->last_error.cs_timeout.write_enable, 1, 0);
+ if (rc) {
hdev->last_error.cs_timeout.timestamp = ktime_get();
hdev->last_error.cs_timeout.seq = cs->sequence;
+
+ event_mask = device_reset ? (HL_NOTIFIER_EVENT_CS_TIMEOUT |
+ HL_NOTIFIER_EVENT_DEVICE_RESET) : HL_NOTIFIER_EVENT_CS_TIMEOUT;
+
+ hl_notifier_event_send_all(hdev, event_mask);
}
switch (cs->type) {
@@ -773,12 +867,8 @@ static void cs_timedout(struct work_struct *work)
cs_put(cs);
- if (likely(!skip_reset_on_timeout)) {
- if (hdev->reset_on_lockup)
- hl_device_reset(hdev, HL_DRV_RESET_TDR);
- else
- hdev->reset_info.needs_reset = true;
- }
+ if (device_reset)
+ hl_device_reset(hdev, HL_DRV_RESET_TDR);
}
static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx,
@@ -916,7 +1006,7 @@ static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs)
staged_cs_put(hdev, cs);
list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
- complete_job(hdev, job);
+ hl_complete_job(hdev, job);
}
void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
@@ -933,6 +1023,7 @@ void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
flush_workqueue(hdev->cq_wq[i]);
+ flush_workqueue(hdev->cs_cmplt_wq);
}
/* Make sure we don't have leftovers in the CS mirror list */
@@ -940,7 +1031,7 @@ void hl_cs_rollback_all(struct hl_device *hdev, bool skip_wq_flush)
cs_get(cs);
cs->aborted = true;
dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n",
- cs->ctx->asid, cs->sequence);
+ cs->ctx->asid, cs->sequence);
cs_rollback(hdev, cs);
cs_put(cs);
}
@@ -989,7 +1080,10 @@ void hl_release_pending_user_interrupts(struct hl_device *hdev)
wake_pending_user_interrupt_threads(interrupt);
}
- interrupt = &hdev->common_user_interrupt;
+ interrupt = &hdev->common_user_cq_interrupt;
+ wake_pending_user_interrupt_threads(interrupt);
+
+ interrupt = &hdev->common_decoder_interrupt;
wake_pending_user_interrupt_threads(interrupt);
}
@@ -1001,7 +1095,17 @@ static void job_wq_completion(struct work_struct *work)
struct hl_device *hdev = cs->ctx->hdev;
/* job is no longer needed */
- complete_job(hdev, job);
+ hl_complete_job(hdev, job);
+}
+
+static void cs_completion(struct work_struct *work)
+{
+ struct hl_cs *cs = container_of(work, struct hl_cs, finish_work);
+ struct hl_device *hdev = cs->ctx->hdev;
+ struct hl_cs_job *job, *tmp;
+
+ list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
+ hl_complete_job(hdev, job);
}
static int validate_queue_index(struct hl_device *hdev,
@@ -1024,7 +1128,13 @@ static int validate_queue_index(struct hl_device *hdev,
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
- dev_err(hdev->dev, "Queue index %d is invalid\n",
+ dev_err(hdev->dev, "Queue index %d is not applicable\n",
+ chunk->queue_index);
+ return -EINVAL;
+ }
+
+ if (hw_queue_prop->binned) {
+ dev_err(hdev->dev, "Queue index %d is binned out\n",
chunk->queue_index);
return -EINVAL;
}
@@ -1166,17 +1276,16 @@ static int hl_cs_sanity_checks(struct hl_fpriv *hpriv, union hl_cs_args *args)
cs_type = hl_cs_get_cs_type(cs_type_flags);
num_chunks = args->in.num_chunks_execute;
- if (unlikely((cs_type != CS_TYPE_DEFAULT) &&
- !hdev->supports_sync_stream)) {
+ if (unlikely((cs_type == CS_TYPE_SIGNAL || cs_type == CS_TYPE_WAIT ||
+ cs_type == CS_TYPE_COLLECTIVE_WAIT) &&
+ !hdev->supports_sync_stream)) {
dev_err(hdev->dev, "Sync stream CS is not supported\n");
return -EINVAL;
}
if (cs_type == CS_TYPE_DEFAULT) {
if (!num_chunks) {
- dev_err(hdev->dev,
- "Got execute CS with 0 chunks, context %d\n",
- ctx->asid);
+ dev_err(hdev->dev, "Got execute CS with 0 chunks, context %d\n", ctx->asid);
return -EINVAL;
}
} else if (num_chunks != 1) {
@@ -1276,7 +1385,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
u32 encaps_signals_handle, u32 timeout,
u16 *signal_initial_sob_count)
{
- bool staged_mid, int_queues_only = true;
+ bool staged_mid, int_queues_only = true, using_hw_queues = false;
struct hl_device *hdev = hpriv->hdev;
struct hl_cs_chunk *cs_chunk_array;
struct hl_cs_counters_atomic *cntr;
@@ -1365,6 +1474,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
chunk->queue_index);
}
+ if (queue_type == QUEUE_TYPE_HW)
+ using_hw_queues = true;
+
job = hl_cs_allocate_job(hdev, queue_type,
is_kernel_allocated_cb);
if (!job) {
@@ -1385,6 +1497,7 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
job->hw_queue_id = chunk->queue_index;
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+ cs->jobs_cnt++;
list_add_tail(&job->cs_node, &cs->job_list);
@@ -1425,6 +1538,9 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
goto free_cs_object;
}
+ if (using_hw_queues)
+ INIT_WORK(&cs->finish_work, cs_completion);
+
/*
* store the (external/HW queues) streams used by the CS in the
* fence object for multi-CS completion
@@ -1773,6 +1889,7 @@ static int cs_ioctl_signal_wait_create_jobs(struct hl_device *hdev,
cs_get(cs);
cs->jobs_in_queue_cnt[job->hw_queue_id]++;
+ cs->jobs_cnt++;
list_add_tail(&job->cs_node, &cs->job_list);
@@ -2191,6 +2308,9 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
if (rc)
goto free_cs_object;
+ if (q_type == QUEUE_TYPE_HW)
+ INIT_WORK(&cs->finish_work, cs_completion);
+
rc = hl_hw_queue_schedule_cs(cs);
if (rc) {
/* In case wait cs failed here, it means the signal cs
@@ -2321,12 +2441,12 @@ out:
}
static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence,
- enum hl_cs_wait_status *status, u64 timeout_us,
- s64 *timestamp)
+ enum hl_cs_wait_status *status, u64 timeout_us, s64 *timestamp)
{
struct hl_device *hdev = ctx->hdev;
+ ktime_t timestamp_kt;
long completion_rc;
- int rc = 0;
+ int rc = 0, error;
if (IS_ERR(fence)) {
rc = PTR_ERR(fence);
@@ -2338,12 +2458,16 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence
}
if (!fence) {
- dev_dbg(hdev->dev,
- "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
+ if (!hl_pop_cs_outcome(&ctx->outcome_store, seq, &timestamp_kt, &error)) {
+ dev_dbg(hdev->dev,
+ "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n",
seq, ctx->cs_sequence);
+ *status = CS_WAIT_STATUS_GONE;
+ return 0;
+ }
- *status = CS_WAIT_STATUS_GONE;
- return 0;
+ completion_rc = 1;
+ goto report_results;
}
if (!timeout_us) {
@@ -2358,18 +2482,20 @@ static int hl_wait_for_fence(struct hl_ctx *ctx, u64 seq, struct hl_fence *fence
&fence->completion, timeout);
}
+ error = fence->error;
+ timestamp_kt = fence->timestamp;
+
+report_results:
if (completion_rc > 0) {
*status = CS_WAIT_STATUS_COMPLETED;
if (timestamp)
- *timestamp = ktime_to_ns(fence->timestamp);
+ *timestamp = ktime_to_ns(timestamp_kt);
} else {
*status = CS_WAIT_STATUS_BUSY;
}
- if (fence->error == -ETIMEDOUT)
- rc = -ETIMEDOUT;
- else if (fence->error == -EIO)
- rc = -EIO;
+ if (error == -ETIMEDOUT || error == -EIO)
+ rc = error;
return rc;
}
@@ -2443,8 +2569,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
* function won't sleep as it is called with timeout 0 (i.e.
* poll the fence)
*/
- rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence,
- &status, 0, NULL);
+ rc = hl_wait_for_fence(mcs_data->ctx, seq_arr[i], fence, &status, 0, NULL);
if (rc) {
dev_err(hdev->dev,
"wait_for_fence error :%d for CS seq %llu\n",
@@ -2482,7 +2607,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
* For this we have to validate that the timestamp is
* earliest of all timestamps so far.
*/
- if (mcs_data->update_ts &&
+ if (fence && mcs_data->update_ts &&
(ktime_compare(fence->timestamp, first_cs_time) < 0))
first_cs_time = fence->timestamp;
break;
@@ -2513,8 +2638,7 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data, struct multi_cs_com
return rc;
}
-static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
- u64 timeout_us, u64 seq,
+static int _hl_cs_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx, u64 timeout_us, u64 seq,
enum hl_cs_wait_status *status, s64 *timestamp)
{
struct hl_fence *fence;
@@ -2815,8 +2939,7 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
s64 timestamp;
int rc;
- rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
- &status, &timestamp);
+ rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq, &status, &timestamp);
if (rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
@@ -2880,7 +3003,7 @@ static int ts_buff_get_kernel_ts_record(struct hl_mmap_mem_buf *buf,
u64 current_cq_counter;
/* Validate ts_offset not exceeding last max */
- if (requested_offset_record > cb_last) {
+ if (requested_offset_record >= cb_last) {
dev_err(buf->mmg->dev, "Ts offset exceeds max CB offset(0x%llx)\n",
(u64)(uintptr_t)cb_last);
return -EINVAL;
@@ -2936,8 +3059,8 @@ start_over:
*pend = requested_offset_record;
- dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB(0x%llx)\n",
- (u64)(uintptr_t)requested_offset_record);
+ dev_dbg(buf->mmg->dev, "Found available node in TS kernel CB %p\n",
+ requested_offset_record);
return 0;
}
@@ -2965,6 +3088,13 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
goto put_ctx;
}
+ /* Validate the cq offset */
+ if (((u64 *) cq_cb->kernel_address + cq_counters_offset) >=
+ ((u64 *) cq_cb->kernel_address + (cq_cb->size / sizeof(u64)))) {
+ rc = -EINVAL;
+ goto put_cq_cb;
+ }
+
if (register_ts_record) {
dev_dbg(hdev->dev, "Timestamp registration: interrupt id: %u, ts offset: %llu, cq_offset: %llu\n",
interrupt->interrupt_id, ts_offset, cq_counters_offset);
@@ -3094,7 +3224,6 @@ put_ctx:
static int _hl_interrupt_wait_ioctl_user_addr(struct hl_device *hdev, struct hl_ctx *ctx,
u64 timeout_us, u64 user_address,
u64 target_value, struct hl_user_interrupt *interrupt,
-
u32 *status,
u64 *timestamp)
{
@@ -3216,33 +3345,46 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
struct hl_user_interrupt *interrupt;
union hl_wait_cs_args *args = data;
u32 status = HL_WAIT_CS_STATUS_BUSY;
- u64 timestamp;
- int rc;
+ u64 timestamp = 0;
+ int rc, int_idx;
prop = &hdev->asic_prop;
- if (!prop->user_interrupt_count) {
+ if (!(prop->user_interrupt_count + prop->user_dec_intr_count)) {
dev_err(hdev->dev, "no user interrupts allowed");
return -EPERM;
}
interrupt_id = FIELD_GET(HL_WAIT_CS_FLAGS_INTERRUPT_MASK, args->in.flags);
- first_interrupt = prop->first_available_user_msix_interrupt;
- last_interrupt = prop->first_available_user_msix_interrupt +
- prop->user_interrupt_count - 1;
+ first_interrupt = prop->first_available_user_interrupt;
+ last_interrupt = prop->first_available_user_interrupt + prop->user_interrupt_count - 1;
+
+ if (interrupt_id < prop->user_dec_intr_count) {
+
+ /* Check if the requested core is enabled */
+ if (!(prop->decoder_enabled_mask & BIT(interrupt_id))) {
+ dev_err(hdev->dev, "interrupt on a disabled core(%u) not allowed",
+ interrupt_id);
+ return -EINVAL;
+ }
+
+ interrupt = &hdev->user_interrupt[interrupt_id];
- if ((interrupt_id < first_interrupt || interrupt_id > last_interrupt) &&
- interrupt_id != HL_COMMON_USER_INTERRUPT_ID) {
+ } else if (interrupt_id >= first_interrupt && interrupt_id <= last_interrupt) {
+
+ int_idx = interrupt_id - first_interrupt + prop->user_dec_intr_count;
+ interrupt = &hdev->user_interrupt[int_idx];
+
+ } else if (interrupt_id == HL_COMMON_USER_CQ_INTERRUPT_ID) {
+ interrupt = &hdev->common_user_cq_interrupt;
+ } else if (interrupt_id == HL_COMMON_DEC_INTERRUPT_ID) {
+ interrupt = &hdev->common_decoder_interrupt;
+ } else {
dev_err(hdev->dev, "invalid user interrupt %u", interrupt_id);
return -EINVAL;
}
- if (interrupt_id == HL_COMMON_USER_INTERRUPT_ID)
- interrupt = &hdev->common_user_interrupt;
- else
- interrupt = &hdev->user_interrupt[interrupt_id - first_interrupt];
-
if (args->in.flags & HL_WAIT_CS_FLAGS_INTERRUPT_KERNEL_CQ)
rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx, &hpriv->mem_mgr, &hpriv->mem_mgr,
args->in.interrupt_timeout_us, args->in.cq_counters_handle,
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index ed2cfd0c6e99..2f4620b7990c 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -102,13 +102,13 @@ static void hl_ctx_fini(struct hl_ctx *ctx)
hl_device_set_debug_mode(hdev, ctx, false);
hdev->asic_funcs->ctx_fini(ctx);
+
+ hl_dec_ctx_fini(ctx);
+
hl_cb_va_pool_fini(ctx);
hl_vm_ctx_fini(ctx);
hl_asid_free(hdev, ctx->asid);
hl_encaps_sig_mgr_fini(hdev, &ctx->sig_mgr);
-
- /* Scrub both SRAM and DRAM */
- hdev->asic_funcs->scrub_device_mem(hdev, 0, 0);
} else {
dev_dbg(hdev->dev, "closing kernel context\n");
hdev->asic_funcs->ctx_fini(ctx);
@@ -125,15 +125,22 @@ void hl_ctx_do_release(struct kref *ref)
hl_ctx_fini(ctx);
- if (ctx->hpriv)
- hl_hpriv_put(ctx->hpriv);
+ if (ctx->hpriv) {
+ struct hl_fpriv *hpriv = ctx->hpriv;
+
+ mutex_lock(&hpriv->ctx_lock);
+ hpriv->ctx = NULL;
+ mutex_unlock(&hpriv->ctx_lock);
+
+ hl_hpriv_put(hpriv);
+ }
kfree(ctx);
}
int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
{
- struct hl_ctx_mgr *mgr = &hpriv->ctx_mgr;
+ struct hl_ctx_mgr *ctx_mgr = &hpriv->ctx_mgr;
struct hl_ctx *ctx;
int rc;
@@ -143,9 +150,9 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
goto out_err;
}
- mutex_lock(&mgr->ctx_lock);
- rc = idr_alloc(&mgr->ctx_handles, ctx, 1, 0, GFP_KERNEL);
- mutex_unlock(&mgr->ctx_lock);
+ mutex_lock(&ctx_mgr->lock);
+ rc = idr_alloc(&ctx_mgr->handles, ctx, 1, 0, GFP_KERNEL);
+ mutex_unlock(&ctx_mgr->lock);
if (rc < 0) {
dev_err(hdev->dev, "Failed to allocate IDR for a new CTX\n");
@@ -170,9 +177,9 @@ int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv)
return 0;
remove_from_idr:
- mutex_lock(&mgr->ctx_lock);
- idr_remove(&mgr->ctx_handles, ctx->handle);
- mutex_unlock(&mgr->ctx_lock);
+ mutex_lock(&ctx_mgr->lock);
+ idr_remove(&ctx_mgr->handles, ctx->handle);
+ mutex_unlock(&ctx_mgr->lock);
free_ctx:
kfree(ctx);
out_err:
@@ -181,7 +188,7 @@ out_err:
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
- int rc = 0;
+ int rc = 0, i;
ctx->hdev = hdev;
@@ -197,6 +204,13 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
if (!ctx->cs_pending)
return -ENOMEM;
+ INIT_LIST_HEAD(&ctx->outcome_store.used_list);
+ INIT_LIST_HEAD(&ctx->outcome_store.free_list);
+ hash_init(ctx->outcome_store.outcome_map);
+ for (i = 0; i < ARRAY_SIZE(ctx->outcome_store.nodes_pool); ++i)
+ list_add(&ctx->outcome_store.nodes_pool[i].list_link,
+ &ctx->outcome_store.free_list);
+
hl_hw_block_mem_init(ctx);
if (is_kernel_ctx) {
@@ -262,6 +276,11 @@ err_hw_block_mem_fini:
return rc;
}
+static int hl_ctx_get_unless_zero(struct hl_ctx *ctx)
+{
+ return kref_get_unless_zero(&ctx->refcount);
+}
+
void hl_ctx_get(struct hl_ctx *ctx)
{
kref_get(&ctx->refcount);
@@ -280,11 +299,15 @@ struct hl_ctx *hl_get_compute_ctx(struct hl_device *hdev)
mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node) {
+ mutex_lock(&hpriv->ctx_lock);
+ ctx = hpriv->ctx;
+ if (ctx && !hl_ctx_get_unless_zero(ctx))
+ ctx = NULL;
+ mutex_unlock(&hpriv->ctx_lock);
+
/* There can only be a single user which has opened the compute device, so exit
- * immediately once we find him
+ * immediately once we find its context or if we see that it has been released
*/
- ctx = hpriv->ctx;
- hl_ctx_get(ctx);
break;
}
@@ -376,37 +399,37 @@ int hl_ctx_get_fences(struct hl_ctx *ctx, u64 *seq_arr,
/*
* hl_ctx_mgr_init - initialize the context manager
*
- * @mgr: pointer to context manager structure
+ * @ctx_mgr: pointer to context manager structure
*
* This manager is an object inside the hpriv object of the user process.
* The function is called when a user process opens the FD.
*/
-void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr)
+void hl_ctx_mgr_init(struct hl_ctx_mgr *ctx_mgr)
{
- mutex_init(&mgr->ctx_lock);
- idr_init(&mgr->ctx_handles);
+ mutex_init(&ctx_mgr->lock);
+ idr_init(&ctx_mgr->handles);
}
/*
* hl_ctx_mgr_fini - finalize the context manager
*
* @hdev: pointer to device structure
- * @mgr: pointer to context manager structure
+ * @ctx_mgr: pointer to context manager structure
*
* This function goes over all the contexts in the manager and frees them.
* It is called when a process closes the FD.
*/
-void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
+void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *ctx_mgr)
{
struct hl_ctx *ctx;
struct idr *idp;
u32 id;
- idp = &mgr->ctx_handles;
+ idp = &ctx_mgr->handles;
idr_for_each_entry(idp, ctx, id)
kref_put(&ctx->refcount, hl_ctx_do_release);
- idr_destroy(&mgr->ctx_handles);
- mutex_destroy(&mgr->ctx_lock);
+ idr_destroy(&ctx_mgr->handles);
+ mutex_destroy(&ctx_mgr->lock);
}
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index c6744bfc6da4..64439f33a19b 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -152,12 +152,12 @@ static int command_submission_show(struct seq_file *s, void *data)
if (first) {
first = false;
seq_puts(s, "\n");
- seq_puts(s, " CS ID CTX ASID CS RefCnt Submitted Completed\n");
- seq_puts(s, "------------------------------------------------------\n");
+ seq_puts(s, " CS ID CS TYPE CTX ASID CS RefCnt Submitted Completed\n");
+ seq_puts(s, "----------------------------------------------------------------\n");
}
seq_printf(s,
- " %llu %d %d %d %d\n",
- cs->sequence, cs->ctx->asid,
+ " %llu %d %d %d %d %d\n",
+ cs->sequence, cs->type, cs->ctx->asid,
kref_read(&cs->refcount),
cs->submitted, cs->completed);
}
@@ -183,17 +183,18 @@ static int command_submission_jobs_show(struct seq_file *s, void *data)
if (first) {
first = false;
seq_puts(s, "\n");
- seq_puts(s, " JOB ID CS ID CTX ASID JOB RefCnt H/W Queue\n");
- seq_puts(s, "----------------------------------------------------\n");
+ seq_puts(s, " JOB ID CS ID CS TYPE CTX ASID JOB RefCnt H/W Queue\n");
+ seq_puts(s, "---------------------------------------------------------------\n");
}
if (job->cs)
seq_printf(s,
- " %02d %llu %d %d %d\n",
- job->id, job->cs->sequence, job->cs->ctx->asid,
- kref_read(&job->refcount), job->hw_queue_id);
+ " %02d %llu %d %d %d %d\n",
+ job->id, job->cs->sequence, job->cs->type,
+ job->cs->ctx->asid, kref_read(&job->refcount),
+ job->hw_queue_id);
else
seq_printf(s,
- " %02d 0 %d %d %d\n",
+ " %02d 0 0 %d %d %d\n",
job->id, HL_KERNEL_ASID_ID,
kref_read(&job->refcount), job->hw_queue_id);
}
@@ -449,7 +450,7 @@ static int mmu_show(struct seq_file *s, void *data)
if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
virt_addr);
- return 0;
+ goto put_ctx;
}
hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
@@ -475,6 +476,10 @@ static int mmu_show(struct seq_file *s, void *data)
i, hops_info.hop_info[i].hop_pte_val);
}
+put_ctx:
+ if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
+ hl_ctx_put(ctx);
+
return 0;
}
@@ -521,6 +526,66 @@ err:
return -EINVAL;
}
+static int mmu_ack_error(struct seq_file *s, void *data)
+{
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ int rc;
+
+ if (!hdev->mmu_enable)
+ return 0;
+
+ if (!dev_entry->mmu_cap_mask) {
+ dev_err(hdev->dev, "mmu_cap_mask is not set\n");
+ goto err;
+ }
+
+ rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
+ if (rc)
+ goto err;
+
+ return 0;
+err:
+ return -EINVAL;
+}
+
+static ssize_t mmu_ack_error_value_write(struct file *file,
+ const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct seq_file *s = file->private_data;
+ struct hl_debugfs_entry *entry = s->private;
+ struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
+ struct hl_device *hdev = dev_entry->hdev;
+ char kbuf[MMU_KBUF_SIZE];
+ ssize_t rc;
+
+ if (!hdev->mmu_enable)
+ return count;
+
+ if (count > sizeof(kbuf) - 1)
+ goto err;
+
+ if (copy_from_user(kbuf, buf, count))
+ goto err;
+
+ kbuf[count] = 0;
+
+ if (strncmp(kbuf, "0x", 2))
+ goto err;
+
+ rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
+ if (rc)
+ goto err;
+
+ return count;
+err:
+ dev_err(hdev->dev, "usage: echo <0xmmu_cap_mask > > mmu_error\n");
+
+ return -EINVAL;
+}
+
static int engines_show(struct seq_file *s, void *data)
{
struct hl_debugfs_entry *entry = s->private;
@@ -543,7 +608,7 @@ static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
- u64 val = entry->memory_scrub_val;
+ u64 val = hdev->memory_scrub_val;
int rc;
if (!hl_device_operational(hdev, NULL)) {
@@ -666,7 +731,8 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
dev_err(hdev->dev,
"virt addr 0x%llx is not mapped\n",
virt_addr);
- return -EINVAL;
+ rc = -EINVAL;
+ goto put_ctx;
}
rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
@@ -677,6 +743,9 @@ static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
rc = -EINVAL;
}
+put_ctx:
+ hl_ctx_put(ctx);
+
return rc;
}
@@ -695,8 +764,7 @@ static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
if (addr >= mem_reg->region_base &&
addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
*found = true;
- return hdev->asic_funcs->access_dev_mem(hdev, mem_reg, i,
- addr, val, acc_type);
+ return hdev->asic_funcs->access_dev_mem(hdev, i, addr, val, acc_type);
}
}
return 0;
@@ -728,7 +796,7 @@ static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
}
static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
- enum debugfs_access_type acc_type)
+ enum debugfs_access_type acc_type)
{
size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
sizeof(u64) : sizeof(u32);
@@ -1349,6 +1417,17 @@ static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_check_razwi_happened(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+
+ hdev->asic_funcs->check_if_razwi_happened(hdev);
+
+ return 0;
+}
+
static const struct file_operations hl_mem_scrub_fops = {
.owner = THIS_MODULE,
.write = hl_memory_scrub,
@@ -1438,6 +1517,11 @@ static const struct file_operations hl_timeout_locked_fops = {
.write = hl_timeout_locked_write
};
+static const struct file_operations hl_razwi_check_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_check_razwi_happened
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1446,7 +1530,8 @@ static const struct hl_info_list hl_debugfs_list[] = {
{"vm", vm_show, NULL},
{"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
{"mmu", mmu_show, mmu_asid_va_write},
- {"engines", engines_show, NULL}
+ {"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
+ {"engines", engines_show, NULL},
};
static int hl_debugfs_open(struct inode *inode, struct file *file)
@@ -1477,6 +1562,53 @@ static const struct file_operations hl_debugfs_fops = {
.release = single_release,
};
+static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry)
+{
+ debugfs_create_u8("i2c_bus",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_bus);
+
+ debugfs_create_u8("i2c_addr",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_addr);
+
+ debugfs_create_u8("i2c_reg",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_reg);
+
+ debugfs_create_u8("i2c_len",
+ 0644,
+ dev_entry->root,
+ &dev_entry->i2c_len);
+
+ debugfs_create_file("i2c_data",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_i2c_data_fops);
+
+ debugfs_create_file("led0",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led0_fops);
+
+ debugfs_create_file("led1",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led1_fops);
+
+ debugfs_create_file("led2",
+ 0200,
+ dev_entry->root,
+ dev_entry,
+ &hl_led2_fops);
+}
+
void hl_debugfs_add_device(struct hl_device *hdev)
{
struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
@@ -1516,7 +1648,7 @@ void hl_debugfs_add_device(struct hl_device *hdev)
debugfs_create_x64("memory_scrub_val",
0644,
dev_entry->root,
- &dev_entry->memory_scrub_val);
+ &hdev->memory_scrub_val);
debugfs_create_file("memory_scrub",
0200,
@@ -1547,50 +1679,6 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_power_fops);
- debugfs_create_u8("i2c_bus",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_bus);
-
- debugfs_create_u8("i2c_addr",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_addr);
-
- debugfs_create_u8("i2c_reg",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_reg);
-
- debugfs_create_u8("i2c_len",
- 0644,
- dev_entry->root,
- &dev_entry->i2c_len);
-
- debugfs_create_file("i2c_data",
- 0644,
- dev_entry->root,
- dev_entry,
- &hl_i2c_data_fops);
-
- debugfs_create_file("led0",
- 0200,
- dev_entry->root,
- dev_entry,
- &hl_led0_fops);
-
- debugfs_create_file("led1",
- 0200,
- dev_entry->root,
- dev_entry,
- &hl_led1_fops);
-
- debugfs_create_file("led2",
- 0200,
- dev_entry->root,
- dev_entry,
- &hl_led2_fops);
-
debugfs_create_file("device",
0200,
dev_entry->root,
@@ -1615,6 +1703,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_security_violations_fops);
+ debugfs_create_file("dump_razwi_events",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_razwi_check_fops);
+
debugfs_create_file("dma_size",
0200,
dev_entry->root,
@@ -1663,6 +1757,9 @@ void hl_debugfs_add_device(struct hl_device *hdev)
entry->info_ent = &hl_debugfs_list[i];
entry->dev_entry = dev_entry;
}
+
+ if (!hdev->asic_prop.fw_security_enabled)
+ add_secured_nodes(dev_entry);
}
void hl_debugfs_remove_device(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/common/decoder.c b/drivers/misc/habanalabs/common/decoder.c
new file mode 100644
index 000000000000..2aab14d74b53
--- /dev/null
+++ b/drivers/misc/habanalabs/common/decoder.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+#define VCMD_CONTROL_OFFSET 0x40 /* SWREG16 */
+#define VCMD_IRQ_STATUS_OFFSET 0x44 /* SWREG17 */
+
+#define VCMD_IRQ_STATUS_ENDCMD_MASK 0x1
+#define VCMD_IRQ_STATUS_BUSERR_MASK 0x2
+#define VCMD_IRQ_STATUS_TIMEOUT_MASK 0x4
+#define VCMD_IRQ_STATUS_CMDERR_MASK 0x8
+#define VCMD_IRQ_STATUS_ABORT_MASK 0x10
+#define VCMD_IRQ_STATUS_RESET_MASK 0x20
+
+static void dec_print_abnrm_intr_source(struct hl_device *hdev, u32 irq_status)
+{
+ const char *format = "abnormal interrupt source:%s%s%s%s%s%s\n";
+ char *intr_source[6] = {"Unknown", "", "", "", "", ""};
+ int i = 0;
+
+ if (!irq_status)
+ return;
+
+ if (irq_status & VCMD_IRQ_STATUS_ENDCMD_MASK)
+ intr_source[i++] = " ENDCMD";
+ if (irq_status & VCMD_IRQ_STATUS_BUSERR_MASK)
+ intr_source[i++] = " BUSERR";
+ if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
+ intr_source[i++] = " TIMEOUT";
+ if (irq_status & VCMD_IRQ_STATUS_CMDERR_MASK)
+ intr_source[i++] = " CMDERR";
+ if (irq_status & VCMD_IRQ_STATUS_ABORT_MASK)
+ intr_source[i++] = " ABORT";
+ if (irq_status & VCMD_IRQ_STATUS_RESET_MASK)
+ intr_source[i++] = " RESET";
+
+ dev_err(hdev->dev, format, intr_source[0], intr_source[1],
+ intr_source[2], intr_source[3], intr_source[4], intr_source[5]);
+}
+
+static void dec_error_intr_work(struct hl_device *hdev, u32 base_addr, u32 core_id)
+{
+ bool reset_required = false;
+ u32 irq_status;
+
+ irq_status = RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
+
+ dev_err(hdev->dev, "Decoder abnormal interrupt %#x, core %d\n", irq_status, core_id);
+
+ dec_print_abnrm_intr_source(hdev, irq_status);
+
+ if (irq_status & VCMD_IRQ_STATUS_TIMEOUT_MASK)
+ reset_required = true;
+
+ /* Clear the interrupt */
+ WREG32(base_addr + VCMD_IRQ_STATUS_OFFSET, irq_status);
+
+ /* Flush the interrupt clear */
+ RREG32(base_addr + VCMD_IRQ_STATUS_OFFSET);
+
+ if (reset_required)
+ hl_device_reset(hdev, HL_DRV_RESET_HARD);
+}
+
+static void dec_completion_abnrm(struct work_struct *work)
+{
+ struct hl_dec *dec = container_of(work, struct hl_dec, completion_abnrm_work);
+ struct hl_device *hdev = dec->hdev;
+
+ dec_error_intr_work(hdev, dec->base_addr, dec->core_id);
+}
+
+void hl_dec_fini(struct hl_device *hdev)
+{
+ kfree(hdev->dec);
+}
+
+int hl_dec_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_dec *dec;
+ int rc, j;
+
+ /* if max core is 0, nothing to do*/
+ if (!prop->max_dec)
+ return 0;
+
+ hdev->dec = kcalloc(prop->max_dec, sizeof(struct hl_dec), GFP_KERNEL);
+ if (!hdev->dec)
+ return -ENOMEM;
+
+ for (j = 0 ; j < prop->max_dec ; j++) {
+ dec = hdev->dec + j;
+
+ dec->hdev = hdev;
+ INIT_WORK(&dec->completion_abnrm_work, dec_completion_abnrm);
+ dec->core_id = j;
+ dec->base_addr = hdev->asic_funcs->get_dec_base_addr(hdev, j);
+ if (!dec->base_addr) {
+ dev_err(hdev->dev, "Invalid base address of decoder %d\n", j);
+ rc = -EINVAL;
+ goto err_dec_fini;
+ }
+ }
+
+ return 0;
+
+err_dec_fini:
+ hl_dec_fini(hdev);
+
+ return rc;
+}
+
+void hl_dec_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_dec *dec;
+ int j;
+
+ for (j = 0 ; j < prop->max_dec ; j++) {
+ if (!!(prop->decoder_enabled_mask & BIT(j))) {
+ dec = hdev->dec + j;
+ /* Stop the decoder */
+ WREG32(dec->base_addr + VCMD_CONTROL_OFFSET, 0);
+ }
+ }
+}
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index b4f14c6d3970..b30aeb1c657f 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -15,6 +15,14 @@
#define HL_RESET_DELAY_USEC 10000 /* 10ms */
+enum dma_alloc_type {
+ DMA_ALLOC_COHERENT,
+ DMA_ALLOC_CPU_ACCESSIBLE,
+ DMA_ALLOC_POOL,
+};
+
+#define MEM_SCRUB_DEFAULT_VAL 0x1122334455667788
+
/*
* hl_set_dram_bar- sets the bar to allow later access to address
*
@@ -44,7 +52,7 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
enum debugfs_access_type acc_type, enum pci_region region_type)
{
struct pci_mem_region *region = &hdev->pci_mem_region[region_type];
- u64 old_base, rc;
+ u64 old_base = 0, rc;
if (region_type == PCI_REGION_DRAM) {
old_base = hl_set_dram_bar(hdev, addr);
@@ -88,6 +96,75 @@ static int hl_access_sram_dram_region(struct hl_device *hdev, u64 addr, u64 *val
return 0;
}
+static void *hl_dma_alloc_common(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag, enum dma_alloc_type alloc_type)
+{
+ void *ptr;
+
+ switch (alloc_type) {
+ case DMA_ALLOC_COHERENT:
+ ptr = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, size, dma_handle, flag);
+ break;
+ case DMA_ALLOC_CPU_ACCESSIBLE:
+ ptr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, size, dma_handle);
+ break;
+ case DMA_ALLOC_POOL:
+ ptr = hdev->asic_funcs->asic_dma_pool_zalloc(hdev, size, flag, dma_handle);
+ break;
+ }
+
+ return ptr;
+}
+
+static void hl_asic_dma_free_common(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle, enum dma_alloc_type alloc_type)
+{
+ switch (alloc_type) {
+ case DMA_ALLOC_COHERENT:
+ hdev->asic_funcs->asic_dma_free_coherent(hdev, size, cpu_addr, dma_handle);
+ break;
+ case DMA_ALLOC_CPU_ACCESSIBLE:
+ hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, size, cpu_addr);
+ break;
+ case DMA_ALLOC_POOL:
+ hdev->asic_funcs->asic_dma_pool_free(hdev, cpu_addr, dma_handle);
+ break;
+ }
+}
+
+void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ return hl_dma_alloc_common(hdev, size, dma_handle, flag, DMA_ALLOC_COHERENT);
+}
+
+void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ hl_asic_dma_free_common(hdev, size, cpu_addr, dma_handle, DMA_ALLOC_COHERENT);
+}
+
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle)
+{
+ return hl_dma_alloc_common(hdev, size, dma_handle, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+}
+
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr)
+{
+ hl_asic_dma_free_common(hdev, size, vaddr, 0, DMA_ALLOC_CPU_ACCESSIBLE);
+}
+
+void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle)
+{
+ return hl_dma_alloc_common(hdev, size, dma_handle, mem_flags, DMA_ALLOC_POOL);
+}
+
+void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr)
+{
+ hl_asic_dma_free_common(hdev, 0, vaddr, dma_addr, DMA_ALLOC_POOL);
+}
+
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
@@ -168,14 +245,13 @@ int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
* hl_access_dev_mem - access device memory
*
* @hdev: pointer to habanalabs device structure
- * @region: the memory region the address belongs to
* @region_type: the type of the region the address belongs to
* @addr: the address to access
* @val: the value to write from or read to
* @acc_type: the type of access (r/w, 32/64)
*/
-int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
- enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type)
+int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
+ u64 addr, u64 *val, enum debugfs_access_type acc_type)
{
switch (region_type) {
case PCI_REGION_CFG:
@@ -195,16 +271,20 @@ enum hl_device_status hl_device_status(struct hl_device *hdev)
{
enum hl_device_status status;
- if (hdev->reset_info.in_reset)
- status = HL_DEVICE_STATUS_IN_RESET;
- else if (hdev->reset_info.needs_reset)
+ if (hdev->reset_info.in_reset) {
+ if (hdev->reset_info.in_compute_reset)
+ status = HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE;
+ else
+ status = HL_DEVICE_STATUS_IN_RESET;
+ } else if (hdev->reset_info.needs_reset) {
status = HL_DEVICE_STATUS_NEEDS_RESET;
- else if (hdev->disabled)
+ } else if (hdev->disabled) {
status = HL_DEVICE_STATUS_MALFUNCTION;
- else if (!hdev->init_done)
+ } else if (!hdev->init_done) {
status = HL_DEVICE_STATUS_IN_DEVICE_CREATION;
- else
+ } else {
status = HL_DEVICE_STATUS_OPERATIONAL;
+ }
return status;
}
@@ -220,6 +300,7 @@ bool hl_device_operational(struct hl_device *hdev,
switch (current_status) {
case HL_DEVICE_STATUS_IN_RESET:
+ case HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE:
case HL_DEVICE_STATUS_MALFUNCTION:
case HL_DEVICE_STATUS_NEEDS_RESET:
return false;
@@ -245,6 +326,7 @@ static void hpriv_release(struct kref *ref)
hl_debugfs_remove_file(hpriv);
+ mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
if ((!hdev->pldm) && (hdev->pdev) &&
@@ -271,9 +353,14 @@ static void hpriv_release(struct kref *ref)
list_del(&hpriv->dev_node);
mutex_unlock(&hdev->fpriv_list_lock);
- if ((hdev->reset_if_device_not_idle && !device_is_idle)
- || hdev->reset_upon_device_release)
+ if (!device_is_idle || hdev->reset_upon_device_release) {
hl_device_reset(hdev, HL_DRV_RESET_DEV_RELEASE);
+ } else {
+ int rc = hdev->asic_funcs->scrub_device_mem(hdev);
+
+ if (rc)
+ dev_err(hdev->dev, "failed to scrub memory from hpriv release (%d)\n", rc);
+ }
/* Now we can mark the compute_ctx as not active. Even if a reset is running in a different
* thread, we don't care because the in_reset is marked so if a user will try to open
@@ -330,8 +417,8 @@ static int hl_device_release(struct inode *inode, struct file *filp)
*/
hl_release_pending_user_interrupts(hpriv->hdev);
- hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hdev, &hpriv->ctx_mgr);
+ hl_mem_mgr_fini(&hpriv->mem_mgr);
hdev->compute_ctx_in_release = 1;
@@ -379,7 +466,7 @@ out:
* @*filp: pointer to file structure
* @*vma: pointer to vm_area_struct of the process
*
- * Called when process does an mmap on habanalabs device. Call the device's mmap
+ * Called when process does an mmap on habanalabs device. Call the relevant mmap
* function at the end of the common code.
*/
static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
@@ -404,7 +491,6 @@ static int hl_mmap(struct file *filp, struct vm_area_struct *vma)
case HL_MMAP_TYPE_TS_BUFF:
return hl_mem_mgr_mmap(&hpriv->mem_mgr, vma, NULL);
}
-
return -EINVAL;
}
@@ -563,6 +649,14 @@ static int device_early_init(struct hl_device *hdev)
gaudi_set_asic_funcs(hdev);
strscpy(hdev->asic_name, "GAUDI SEC", sizeof(hdev->asic_name));
break;
+ case ASIC_GAUDI2:
+ gaudi2_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI2", sizeof(hdev->asic_name));
+ break;
+ case ASIC_GAUDI2_SEC:
+ gaudi2_set_asic_funcs(hdev);
+ strscpy(hdev->asic_name, "GAUDI2 SEC", sizeof(hdev->asic_name));
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -604,12 +698,20 @@ static int device_early_init(struct hl_device *hdev)
goto free_cq_wq;
}
+ hdev->cs_cmplt_wq = alloc_workqueue("hl-cs-completions", WQ_UNBOUND, 0);
+ if (!hdev->cs_cmplt_wq) {
+ dev_err(hdev->dev,
+ "Failed to allocate CS completions workqueue\n");
+ rc = -ENOMEM;
+ goto free_eq_wq;
+ }
+
hdev->ts_free_obj_wq = alloc_workqueue("hl-ts-free-obj", WQ_UNBOUND, 0);
if (!hdev->ts_free_obj_wq) {
dev_err(hdev->dev,
"Failed to allocate Timestamp registration free workqueue\n");
rc = -ENOMEM;
- goto free_eq_wq;
+ goto free_cs_cmplt_wq;
}
hdev->pf_wq = alloc_workqueue("hl-prefetch", WQ_UNBOUND, 0);
@@ -666,6 +768,8 @@ free_pf_wq:
destroy_workqueue(hdev->pf_wq);
free_ts_free_wq:
destroy_workqueue(hdev->ts_free_obj_wq);
+free_cs_cmplt_wq:
+ destroy_workqueue(hdev->cs_cmplt_wq);
free_eq_wq:
destroy_workqueue(hdev->eq_wq);
free_cq_wq:
@@ -706,6 +810,7 @@ static void device_early_fini(struct hl_device *hdev)
destroy_workqueue(hdev->pf_wq);
destroy_workqueue(hdev->ts_free_obj_wq);
+ destroy_workqueue(hdev->cs_cmplt_wq);
destroy_workqueue(hdev->eq_wq);
destroy_workqueue(hdev->device_reset_work.wq);
@@ -1159,8 +1264,7 @@ static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
* of heartbeat, the device CPU is marked as disable
* so this message won't be sent
*/
- if (hl_fw_send_pci_access_msg(hdev,
- CPUCP_PACKET_DISABLE_PCI_ACCESS))
+ if (hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0))
dev_warn(hdev->dev,
"Failed to disable PCI access by F/W\n");
}
@@ -1202,7 +1306,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
skip_wq_flush = !!(flags & HL_DRV_RESET_DEV_RELEASE);
delay_reset = !!(flags & HL_DRV_RESET_DELAY);
- if (!hard_reset && !hdev->asic_prop.supports_soft_reset) {
+ if (!hard_reset && !hdev->asic_prop.supports_compute_reset) {
hard_instead_soft = true;
hard_reset = true;
}
@@ -1225,7 +1329,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
}
if (hard_instead_soft)
- dev_dbg(hdev->dev, "Doing hard-reset instead of soft-reset\n");
+ dev_dbg(hdev->dev, "Doing hard-reset instead of compute reset\n");
do_reset:
/* Re-entry of reset thread */
@@ -1241,13 +1345,20 @@ do_reset:
/* Block future CS/VM/JOB completion operations */
spin_lock(&hdev->reset_info.lock);
if (hdev->reset_info.in_reset) {
- /* We only allow scheduling of a hard reset during soft reset */
- if (hard_reset && hdev->reset_info.is_in_soft_reset)
+ /* We only allow scheduling of a hard reset during compute reset */
+ if (hard_reset && hdev->reset_info.in_compute_reset)
hdev->reset_info.hard_reset_schedule_flags = flags;
spin_unlock(&hdev->reset_info.lock);
return 0;
}
+
+ /* This still allows the completion of some KDMA ops
+ * Update this before in_reset because in_compute_reset implies we are in reset
+ */
+ hdev->reset_info.in_compute_reset = !hard_reset;
+
hdev->reset_info.in_reset = 1;
+
spin_unlock(&hdev->reset_info.lock);
if (delay_reset)
@@ -1255,9 +1366,6 @@ do_reset:
handle_reset_trigger(hdev, flags);
- /* This still allows the completion of some KDMA ops */
- hdev->reset_info.is_in_soft_reset = !hard_reset;
-
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
@@ -1445,7 +1553,8 @@ kill_processes:
goto out_err;
}
- hl_fw_set_max_power(hdev);
+ if (!hdev->asic_prop.fw_security_enabled)
+ hl_fw_set_max_power(hdev);
} else {
rc = hdev->asic_funcs->non_hard_reset_late_init(hdev);
if (rc) {
@@ -1453,13 +1562,19 @@ kill_processes:
dev_err(hdev->dev,
"Failed late init in reset after device release\n");
else
- dev_err(hdev->dev, "Failed late init after soft reset\n");
+ dev_err(hdev->dev, "Failed late init after compute reset\n");
goto out_err;
}
}
+ rc = hdev->asic_funcs->scrub_device_mem(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "scrub mem failed from device reset (%d)\n", rc);
+ return rc;
+ }
+
spin_lock(&hdev->reset_info.lock);
- hdev->reset_info.is_in_soft_reset = false;
+ hdev->reset_info.in_compute_reset = 0;
/* Schedule hard reset only if requested and if not already in hard reset.
* We keep 'in_reset' enabled, so no other reset can go in during the hard
@@ -1489,11 +1604,11 @@ kill_processes:
*/
hdev->asic_funcs->enable_events_from_fw(hdev);
} else if (!reset_upon_device_release) {
- hdev->reset_info.soft_reset_cnt++;
+ hdev->reset_info.compute_reset_cnt++;
}
if (schedule_hard_reset) {
- dev_info(hdev->dev, "Performing hard reset scheduled during soft reset\n");
+ dev_info(hdev->dev, "Performing hard reset scheduled during compute reset\n");
flags = hdev->reset_info.hard_reset_schedule_flags;
hdev->reset_info.hard_reset_schedule_flags = 0;
hdev->disabled = true;
@@ -1506,20 +1621,24 @@ kill_processes:
out_err:
hdev->disabled = true;
- hdev->reset_info.is_in_soft_reset = false;
+
+ spin_lock(&hdev->reset_info.lock);
+ hdev->reset_info.in_compute_reset = 0;
if (hard_reset) {
dev_err(hdev->dev, "Failed to reset! Device is NOT usable\n");
hdev->reset_info.hard_reset_cnt++;
} else if (reset_upon_device_release) {
+ spin_unlock(&hdev->reset_info.lock);
dev_err(hdev->dev, "Failed to reset device after user release\n");
flags |= HL_DRV_RESET_HARD;
flags &= ~HL_DRV_RESET_DEV_RELEASE;
hard_reset = true;
goto again;
} else {
- dev_err(hdev->dev, "Failed to do soft-reset\n");
- hdev->reset_info.soft_reset_cnt++;
+ spin_unlock(&hdev->reset_info.lock);
+ dev_err(hdev->dev, "Failed to do compute reset\n");
+ hdev->reset_info.compute_reset_cnt++;
flags |= HL_DRV_RESET_HARD;
hard_reset = true;
goto again;
@@ -1527,13 +1646,16 @@ out_err:
hdev->reset_info.in_reset = 0;
+ spin_unlock(&hdev->reset_info.lock);
+
return rc;
}
-static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event)
+static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64 event_mask)
{
mutex_lock(&notifier_event->lock);
- notifier_event->events_mask |= event;
+ notifier_event->events_mask |= event_mask;
+
if (notifier_event->eventfd)
eventfd_signal(notifier_event->eventfd, 1);
@@ -1544,17 +1666,17 @@ static void hl_notifier_event_send(struct hl_notifier_event *notifier_event, u64
* hl_notifier_event_send_all - notify all user processes via eventfd
*
* @hdev: pointer to habanalabs device structure
- * @event: the occurred event
+ * @event_mask: the occurred event/s
* Returns 0 for success or an error on failure.
*/
-void hl_notifier_event_send_all(struct hl_device *hdev, u64 event)
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask)
{
struct hl_fpriv *hpriv;
mutex_lock(&hdev->fpriv_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_list, dev_node)
- hl_notifier_event_send(&hpriv->notifier_event, event);
+ hl_notifier_event_send(&hpriv->notifier_event, event_mask);
mutex_unlock(&hdev->fpriv_list_lock);
@@ -1562,7 +1684,7 @@ void hl_notifier_event_send_all(struct hl_device *hdev, u64 event)
mutex_lock(&hdev->fpriv_ctrl_list_lock);
list_for_each_entry(hpriv, &hdev->fpriv_ctrl_list, dev_node)
- hl_notifier_event_send(&hpriv->notifier_event, event);
+ hl_notifier_event_send(&hpriv->notifier_event, event_mask);
mutex_unlock(&hdev->fpriv_ctrl_list_lock);
}
@@ -1617,13 +1739,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (rc)
goto free_dev_ctrl;
- user_interrupt_cnt = hdev->asic_prop.user_interrupt_count;
+ user_interrupt_cnt = hdev->asic_prop.user_dec_intr_count +
+ hdev->asic_prop.user_interrupt_count;
if (user_interrupt_cnt) {
- hdev->user_interrupt = kcalloc(user_interrupt_cnt,
- sizeof(*hdev->user_interrupt),
- GFP_KERNEL);
-
+ hdev->user_interrupt = kcalloc(user_interrupt_cnt, sizeof(*hdev->user_interrupt),
+ GFP_KERNEL);
if (!hdev->user_interrupt) {
rc = -ENOMEM;
goto early_fini;
@@ -1636,7 +1757,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
*/
rc = hdev->asic_funcs->sw_init(hdev);
if (rc)
- goto user_interrupts_fini;
+ goto free_usr_intr_mem;
/* initialize completion structure for multi CS wait */
@@ -1684,6 +1805,13 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->completion_queue[i].cq_idx = i;
}
+ hdev->shadow_cs_queue = kcalloc(hdev->asic_prop.max_pending_cs,
+ sizeof(*hdev->shadow_cs_queue), GFP_KERNEL);
+ if (!hdev->shadow_cs_queue) {
+ rc = -ENOMEM;
+ goto cq_fini;
+ }
+
/*
* Initialize the event queue. Must be done before hw_init,
* because there the address of the event queue is being
@@ -1692,7 +1820,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
rc = hl_eq_init(hdev, &hdev->event_queue);
if (rc) {
dev_err(hdev->dev, "failed to initialize event queue\n");
- goto cq_fini;
+ goto free_shadow_cs_queue;
}
/* MMU S/W must be initialized before kernel context is created */
@@ -1713,6 +1841,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
hdev->asic_funcs->state_dump_init(hdev);
+ hdev->memory_scrub_val = MEM_SCRUB_DEFAULT_VAL;
hl_debugfs_add_device(hdev);
/* debugfs nodes are created in hl_ctx_init so it must be called after
@@ -1731,6 +1860,12 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
goto release_ctx;
}
+ rc = hl_dec_init(hdev);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to initialize the decoder module\n");
+ goto cb_pool_fini;
+ }
+
/*
* From this point, override rc (=0) in case of an error to allow
* debugging (by adding char devices and create sysfs nodes as part of
@@ -1794,7 +1929,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
/* Need to call this again because the max power might change,
* depending on card type for certain ASICs
*/
- if (hdev->asic_prop.set_max_power_on_device_init)
+ if (hdev->asic_prop.set_max_power_on_device_init &&
+ !hdev->asic_prop.fw_security_enabled)
hl_fw_set_max_power(hdev);
/*
@@ -1824,6 +1960,8 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
return 0;
+cb_pool_fini:
+ hl_cb_pool_fini(hdev);
release_ctx:
if (hl_ctx_put(hdev->kernel_ctx) != 1)
dev_err(hdev->dev,
@@ -1834,6 +1972,8 @@ mmu_fini:
hl_mmu_fini(hdev);
eq_fini:
hl_eq_fini(hdev, &hdev->event_queue);
+free_shadow_cs_queue:
+ kfree(hdev->shadow_cs_queue);
cq_fini:
for (i = 0 ; i < cq_ready_cnt ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
@@ -1842,7 +1982,7 @@ hw_queues_destroy:
hl_hw_queues_destroy(hdev);
sw_fini:
hdev->asic_funcs->sw_fini(hdev);
-user_interrupts_fini:
+free_usr_intr_mem:
kfree(hdev->user_interrupt);
early_fini:
device_early_fini(hdev);
@@ -1928,7 +2068,7 @@ void hl_device_fini(struct hl_device *hdev)
* message won't be send. Also, in case of heartbeat, the device CPU is
* marked as disable so this message won't be sent
*/
- hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS);
+ hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS, 0x0);
/* Mark device as disabled */
hdev->disabled = true;
@@ -1974,12 +2114,16 @@ void hl_device_fini(struct hl_device *hdev)
hl_debugfs_remove_device(hdev);
+ hl_dec_fini(hdev);
+
hl_vm_fini(hdev);
hl_mmu_fini(hdev);
hl_eq_fini(hdev, &hdev->event_queue);
+ kfree(hdev->shadow_cs_queue);
+
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
hl_cq_fini(hdev, &hdev->completion_queue[i]);
kfree(hdev->completion_queue);
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 828a36af5b14..608ca67527a5 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -15,6 +15,14 @@
#define FW_FILE_MAX_SIZE 0x1400000 /* maximum size of 20MB */
+struct fw_binning_conf {
+ u64 tpc_binning;
+ u32 dec_binning;
+ u32 hbm_binning;
+ u32 edma_binning;
+ u32 mme_redundancy;
+};
+
static char *extract_fw_ver_from_str(const char *fw_str)
{
char *str, *fw_ver, *whitespace;
@@ -33,7 +41,7 @@ static char *extract_fw_ver_from_str(const char *fw_str)
ver_offset = str - fw_str;
/* Copy until the next whitespace */
- whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
+ whitespace = strnstr(str, " ", VERSION_MAX_LEN - ver_offset);
if (!whitespace)
goto free_fw_ver;
@@ -46,6 +54,43 @@ free_fw_ver:
return NULL;
}
+static int extract_fw_sub_versions(struct hl_device *hdev, char *preboot_ver)
+{
+ char major[8], minor[8], *first_dot, *second_dot;
+ int rc;
+
+ first_dot = strnstr(preboot_ver, ".", 10);
+ if (first_dot) {
+ strscpy(major, preboot_ver, first_dot - preboot_ver + 1);
+ rc = kstrtou32(major, 10, &hdev->fw_major_version);
+ } else {
+ rc = -EINVAL;
+ }
+
+ if (rc) {
+ dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
+ goto out;
+ }
+
+ /* skip the first dot */
+ first_dot++;
+
+ second_dot = strnstr(first_dot, ".", 10);
+ if (second_dot) {
+ strscpy(minor, first_dot, second_dot - first_dot + 1);
+ rc = kstrtou32(minor, 10, &hdev->fw_minor_version);
+ } else {
+ rc = -EINVAL;
+ }
+
+ if (rc)
+ dev_err(hdev->dev, "Error %d parsing preboot minor version\n", rc);
+
+out:
+ kfree(preboot_ver);
+ return rc;
+}
+
static int hl_request_fw(struct hl_device *hdev,
const struct firmware **firmware_p,
const char *fw_name)
@@ -197,14 +242,14 @@ int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
return rc;
}
-int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value)
{
struct cpucp_packet pkt = {};
pkt.ctl = cpu_to_le32(opcode << CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.value = cpu_to_le64(value);
- return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
- sizeof(pkt), 0, NULL);
+ return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), 0, NULL);
}
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
@@ -218,8 +263,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u32 tmp, expected_ack_val, pi;
int rc;
- pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len,
- &pkt_dma_addr);
+ pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
if (!pkt) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for packet to CPU\n");
@@ -231,7 +275,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
mutex_lock(&hdev->send_cpu_message_lock);
/* CPU-CP messages can be sent during soft-reset */
- if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) {
+ if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
rc = 0;
goto out;
}
@@ -267,7 +311,14 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if (rc == -ETIMEDOUT) {
- dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
+ /* If FW performed reset just before sending it a packet, we will get a timeout.
+ * This is expected behavior, hence no need for error message.
+ */
+ if (!hl_device_operational(hdev, NULL) && !hdev->reset_info.in_compute_reset)
+ dev_dbg(hdev->dev, "Device CPU packet timeout (0x%x) due to FW reset\n",
+ tmp);
+ else
+ dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
hdev->device_cpu_disabled = true;
goto out;
}
@@ -276,11 +327,15 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
if (rc) {
- dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
- rc,
- (tmp & CPUCP_PKT_CTL_OPCODE_MASK)
- >> CPUCP_PKT_CTL_OPCODE_SHIFT);
+ dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
+ rc, (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT);
+
+ /* propagate the return code from the f/w to the callers who want to check it */
+ if (result)
+ *result = rc;
+
rc = -EIO;
+
} else if (result) {
*result = le64_to_cpu(pkt->result);
}
@@ -296,7 +351,7 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
out:
mutex_unlock(&hdev->send_cpu_message_lock);
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt);
+ hl_cpu_accessible_dma_pool_free(hdev, len, pkt);
return rc;
}
@@ -517,6 +572,11 @@ static bool fw_report_boot_dev0(struct hl_device *hdev, u32 err_val,
err_val &= ~CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL;
}
+ if (err_val & CPU_BOOT_ERR0_BINNING_FAIL) {
+ dev_err(hdev->dev, "Device boot error - binning failure\n");
+ err_exists = true;
+ }
+
if (sts_val & CPU_BOOT_DEV_STS0_ENABLED)
dev_dbg(hdev->dev, "Device status0 %#x\n", sts_val);
@@ -637,10 +697,8 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
u64 result;
int rc;
- cpucp_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- sizeof(struct cpucp_info),
- &cpucp_info_dma_addr);
+ cpucp_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, sizeof(struct cpucp_info),
+ &cpucp_info_dma_addr);
if (!cpucp_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP info packet\n");
@@ -701,8 +759,7 @@ int hl_fw_cpucp_info_get(struct hl_device *hdev,
prop->fw_app_cpu_boot_dev_sts1 = RREG32(sts_boot_dev_sts1_reg);
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- sizeof(struct cpucp_info), cpucp_info_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_info), cpucp_info_cpu_addr);
return rc;
}
@@ -785,9 +842,8 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
u64 result;
int rc;
- eeprom_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- max_size, &eeprom_info_dma_addr);
+ eeprom_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, max_size,
+ &eeprom_info_dma_addr);
if (!eeprom_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP EEPROM packet\n");
@@ -815,8 +871,7 @@ int hl_fw_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size)
memcpy(data, eeprom_info_cpu_addr, min((size_t)result, max_size));
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, max_size,
- eeprom_info_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, max_size, eeprom_info_cpu_addr);
return rc;
}
@@ -833,8 +888,7 @@ int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
int i, rc;
data_size = sizeof(struct cpucp_monitor_dump);
- mon_dump_cpu_addr = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, data_size,
- &mon_dump_dma_addr);
+ mon_dump_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, data_size, &mon_dump_dma_addr);
if (!mon_dump_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP monitor-dump packet\n");
@@ -864,7 +918,7 @@ int hl_fw_get_monitor_dump(struct hl_device *hdev, void *data)
}
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, data_size, mon_dump_cpu_addr);
return rc;
}
@@ -1057,10 +1111,9 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
u64 result;
int rc;
- cpucp_repl_rows_info_cpu_addr =
- hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- sizeof(struct cpucp_hbm_row_info),
- &cpucp_repl_rows_info_dma_addr);
+ cpucp_repl_rows_info_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev,
+ sizeof(struct cpucp_hbm_row_info),
+ &cpucp_repl_rows_info_dma_addr);
if (!cpucp_repl_rows_info_cpu_addr) {
dev_err(hdev->dev,
"Failed to allocate DMA memory for CPU-CP replaced rows info packet\n");
@@ -1085,9 +1138,8 @@ int hl_fw_dram_replaced_row_get(struct hl_device *hdev,
memcpy(info, cpucp_repl_rows_info_cpu_addr, sizeof(*info));
out:
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- sizeof(struct cpucp_hbm_row_info),
- cpucp_repl_rows_info_cpu_addr);
+ hl_cpu_accessible_dma_pool_free(hdev, sizeof(struct cpucp_hbm_row_info),
+ cpucp_repl_rows_info_cpu_addr);
return rc;
}
@@ -1234,15 +1286,10 @@ static void detect_cpu_boot_status(struct hl_device *hdev, u32 status)
}
}
-static int hl_fw_read_preboot_caps(struct hl_device *hdev,
- u32 cpu_boot_status_reg,
- u32 sts_boot_dev_sts0_reg,
- u32 sts_boot_dev_sts1_reg,
- u32 boot_err0_reg, u32 boot_err1_reg,
- u32 timeout)
+static int hl_fw_wait_preboot_ready(struct hl_device *hdev)
{
- struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 status, reg_val;
+ struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load;
+ u32 status;
int rc;
/* Need to check two possible scenarios:
@@ -1255,13 +1302,13 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
*/
rc = hl_poll_timeout(
hdev,
- cpu_boot_status_reg,
+ pre_fw_load->cpu_boot_status_reg,
status,
(status == CPU_BOOT_STATUS_NIC_FW_RDY) ||
(status == CPU_BOOT_STATUS_READY_TO_BOOT) ||
(status == CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT),
hdev->fw_poll_interval_usec,
- timeout);
+ pre_fw_load->wait_for_preboot_timeout);
if (rc) {
dev_err(hdev->dev, "CPU boot ready status timeout\n");
@@ -1271,12 +1318,32 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
* of reading specific errors
*/
if (status != -1)
- fw_read_errors(hdev, boot_err0_reg, boot_err1_reg,
- sts_boot_dev_sts0_reg,
- sts_boot_dev_sts1_reg);
+ fw_read_errors(hdev, pre_fw_load->boot_err0_reg,
+ pre_fw_load->boot_err1_reg,
+ pre_fw_load->sts_boot_dev_sts0_reg,
+ pre_fw_load->sts_boot_dev_sts1_reg);
return -EIO;
}
+ hdev->fw_loader.fw_comp_loaded |= FW_TYPE_PREBOOT_CPU;
+
+ return 0;
+}
+
+static int hl_fw_read_preboot_caps(struct hl_device *hdev)
+{
+ struct pre_fw_load_props *pre_fw_load;
+ struct asic_fixed_properties *prop;
+ u32 reg_val;
+ int rc;
+
+ prop = &hdev->asic_prop;
+ pre_fw_load = &hdev->fw_loader.pre_fw_load;
+
+ rc = hl_fw_wait_preboot_ready(hdev);
+ if (rc)
+ return rc;
+
/*
* the registers DEV_STS* contain FW capabilities/features.
* We can rely on this registers only if bit CPU_BOOT_DEV_STS*_ENABLED
@@ -1287,13 +1354,13 @@ static int hl_fw_read_preboot_caps(struct hl_device *hdev,
* In case it is not enabled the stored value will be left 0- all
* caps/features are off
*/
- reg_val = RREG32(sts_boot_dev_sts0_reg);
+ reg_val = RREG32(pre_fw_load->sts_boot_dev_sts0_reg);
if (reg_val & CPU_BOOT_DEV_STS0_ENABLED) {
prop->fw_cpu_boot_dev_sts0_valid = true;
prop->fw_preboot_cpu_boot_dev_sts0 = reg_val;
}
- reg_val = RREG32(sts_boot_dev_sts1_reg);
+ reg_val = RREG32(pre_fw_load->sts_boot_dev_sts1_reg);
if (reg_val & CPU_BOOT_DEV_STS1_ENABLED) {
prop->fw_cpu_boot_dev_sts1_valid = true;
prop->fw_preboot_cpu_boot_dev_sts1 = reg_val;
@@ -1436,24 +1503,21 @@ static int hl_fw_static_read_preboot_status(struct hl_device *hdev)
return 0;
}
-int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 sts_boot_dev_sts0_reg,
- u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
- u32 boot_err1_reg, u32 timeout)
+int hl_fw_read_preboot_status(struct hl_device *hdev)
{
int rc;
if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU))
return 0;
+ /* get FW pre-load parameters */
+ hdev->asic_funcs->init_firmware_preload_params(hdev);
+
/*
* In order to determine boot method (static VS dymanic) we need to
* read the boot caps register
*/
- rc = hl_fw_read_preboot_caps(hdev, cpu_boot_status_reg,
- sts_boot_dev_sts0_reg,
- sts_boot_dev_sts1_reg, boot_err0_reg,
- boot_err1_reg, timeout);
+ rc = hl_fw_read_preboot_caps(hdev);
if (rc)
return rc;
@@ -1989,18 +2053,14 @@ static int hl_fw_dynamic_read_device_fw_version(struct hl_device *hdev,
preboot_ver = extract_fw_ver_from_str(prop->preboot_ver);
if (preboot_ver) {
- char major[8];
int rc;
dev_info(hdev->dev, "preboot version %s\n", preboot_ver);
- sprintf(major, "%.2s", preboot_ver);
- kfree(preboot_ver);
- rc = kstrtou32(major, 10, &hdev->fw_major_version);
- if (rc) {
- dev_err(hdev->dev, "Error %d parsing preboot major version\n", rc);
+ /* This function takes care of freeing preboot_ver */
+ rc = extract_fw_sub_versions(hdev, preboot_ver);
+ if (rc)
return rc;
- }
}
break;
@@ -2361,6 +2421,19 @@ static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
case HL_COMMS_RESET_CAUSE_TYPE:
msg.reset_cause = *(__u8 *) data;
break;
+
+ case HL_COMMS_BINNING_CONF_TYPE:
+ {
+ struct fw_binning_conf *binning_conf = (struct fw_binning_conf *) data;
+
+ msg.tpc_binning_conf = cpu_to_le64(binning_conf->tpc_binning);
+ msg.dec_binning_conf = cpu_to_le32(binning_conf->dec_binning);
+ msg.hbm_binning_conf = cpu_to_le32(binning_conf->hbm_binning);
+ msg.edma_binning_conf = cpu_to_le32(binning_conf->edma_binning);
+ msg.mme_redundancy_conf = cpu_to_le32(binning_conf->mme_redundancy);
+ break;
+ }
+
default:
dev_err(hdev->dev,
"Send COMMS message - invalid message type %u\n",
@@ -2418,7 +2491,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
int rc;
dev_info(hdev->dev,
- "Loading firmware to device, may take some time...\n");
+ "Loading %sfirmware to device, may take some time...\n",
+ hdev->asic_prop.fw_security_enabled ? "secured " : "");
/* initialize FW descriptor as invalid */
fw_loader->dynamic_loader.fw_desc_valid = false;
@@ -2429,6 +2503,13 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
*/
dyn_regs = &fw_loader->dynamic_loader.comm_desc.cpu_dyn_regs;
+ /* if no preboot loaded indication- wait for preboot */
+ if (!(hdev->fw_loader.fw_comp_loaded & FW_TYPE_PREBOOT_CPU)) {
+ rc = hl_fw_wait_preboot_ready(hdev);
+ if (rc)
+ return -EIO;
+ }
+
rc = hl_fw_dynamic_send_protocol_cmd(hdev, fw_loader, COMMS_RST_STATE,
0, true,
fw_loader->cpu_timeout);
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index b0b0f3f89865..d59bba9e55c9 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -31,6 +31,9 @@
#define HL_NAME "habanalabs"
+struct hl_device;
+struct hl_fpriv;
+
/* Use upper bits of mmap offset to store habana driver specific information.
* bits[63:59] - Encode mmap type
* bits[45:0] - mmap offset value
@@ -69,9 +72,12 @@
#define HL_PCI_ELBI_TIMEOUT_MSEC 10 /* 10ms */
-#define HL_SIM_MAX_TIMEOUT_US 10000000 /* 10s */
+#define HL_SIM_MAX_TIMEOUT_US 100000000 /* 100s */
-#define HL_COMMON_USER_INTERRUPT_ID 0xFFF
+#define HL_INVALID_QUEUE UINT_MAX
+
+#define HL_COMMON_USER_CQ_INTERRUPT_ID 0xFFF
+#define HL_COMMON_DEC_INTERRUPT_ID 0xFFE
#define HL_STATE_DUMP_HIST_LEN 5
@@ -99,6 +105,18 @@ enum hl_mmu_page_table_location {
MMU_NUM_PGT_LOCATIONS /* num of PGT locations */
};
+/**
+ * enum hl_mmu_enablement - what mmu modules to enable
+ * @MMU_EN_NONE: mmu disabled.
+ * @MMU_EN_ALL: enable all.
+ * @MMU_EN_PMMU_ONLY: Enable only the PMMU leaving the DMMU disabled.
+ */
+enum hl_mmu_enablement {
+ MMU_EN_NONE = 0,
+ MMU_EN_ALL = 1,
+ MMU_EN_PMMU_ONLY = 3, /* N/A for Goya/Gaudi */
+};
+
/*
* HL_RSVD_SOBS 'sync stream' reserved sync objects per QMAN stream
* HL_RSVD_MONS 'sync stream' reserved monitors per QMAN stream
@@ -118,7 +136,12 @@ enum hl_mmu_page_table_location {
#define HL_PCI_NUM_BARS 6
-#define HL_MAX_DCORES 4
+/* Completion queue entry relates to completed job */
+#define HL_COMPLETION_MODE_JOB 0
+/* Completion queue entry relates to completed command submission */
+#define HL_COMPLETION_MODE_CS 1
+
+#define HL_MAX_DCORES 8
/*
* Reset Flags
@@ -159,6 +182,51 @@ enum hl_mmu_page_table_location {
#define HL_DRV_RESET_FW_FATAL_ERR (1 << 6)
#define HL_DRV_RESET_DELAY (1 << 7)
+/*
+ * Security
+ */
+
+#define HL_PB_SHARED 1
+#define HL_PB_NA 0
+#define HL_PB_SINGLE_INSTANCE 1
+#define HL_BLOCK_SIZE 0x1000
+#define HL_BLOCK_GLBL_ERR_MASK 0xF40
+#define HL_BLOCK_GLBL_ERR_ADDR 0xF44
+#define HL_BLOCK_GLBL_ERR_CAUSE 0xF48
+#define HL_BLOCK_GLBL_SEC_OFFS 0xF80
+#define HL_BLOCK_GLBL_SEC_SIZE (HL_BLOCK_SIZE - HL_BLOCK_GLBL_SEC_OFFS)
+#define HL_BLOCK_GLBL_SEC_LEN (HL_BLOCK_GLBL_SEC_SIZE / sizeof(u32))
+#define UNSET_GLBL_SEC_BIT(array, b) ((array)[((b) / 32)] |= (1 << ((b) % 32)))
+
+enum hl_protection_levels {
+ SECURED_LVL,
+ PRIVILEGED_LVL,
+ NON_SECURED_LVL
+};
+
+/**
+ * struct iterate_module_ctx - HW module iterator
+ * @fn: function to apply to each HW module instance
+ * @data: optional internal data to the function iterator
+ */
+struct iterate_module_ctx {
+ /*
+ * callback for the HW module iterator
+ * @hdev: pointer to the habanalabs device structure
+ * @block: block (ASIC specific definition can be dcore/hdcore)
+ * @inst: HW module instance within the block
+ * @offset: current HW module instance offset from the 1-st HW module instance
+ * in the 1-st block
+ * @data: function specific data
+ */
+ void (*fn)(struct hl_device *hdev, int block, int inst, u32 offset, void *data);
+ void *data;
+};
+
+struct hl_block_glbl_sec {
+ u32 sec_array[HL_BLOCK_GLBL_SEC_LEN];
+};
+
#define HL_MAX_SOBS_PER_MONITOR 8
/**
@@ -183,28 +251,32 @@ struct hl_gen_wait_properties {
/**
* struct pgt_info - MMU hop page info.
- * @node: hash linked-list node for the pgts shadow hash of pgts.
+ * @node: hash linked-list node for the pgts on host (shadow pgts for device resident MMU and
+ * actual pgts for host resident MMU).
* @phys_addr: physical address of the pgt.
- * @shadow_addr: shadow hop in the host.
+ * @virt_addr: host virtual address of the pgt (see above device/host resident).
+ * @shadow_addr: shadow hop in the host for device resident MMU.
* @ctx: pointer to the owner ctx.
- * @num_of_ptes: indicates how many ptes are used in the pgt.
+ * @num_of_ptes: indicates how many ptes are used in the pgt. used only for dynamically
+ * allocated HOPs (all HOPs but HOP0)
*
- * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop)
- * is needed during mapping, a new page is allocated and this structure holds
- * its essential information. During unmapping, if no valid PTEs remained in the
- * page, it is freed with its pgt_info structure.
+ * The MMU page tables hierarchy can be placed either on the device's DRAM (in which case shadow
+ * pgts will be stored on host memory) or on host memory (in which case no shadow is required).
+ *
+ * When a new level (hop) is needed during mapping this structure will be used to describe
+ * the newly allocated hop as well as to track number of PTEs in it.
+ * During unmapping, if no valid PTEs remained in the page of a newly allocated hop, it is
+ * freed with its pgt_info structure.
*/
struct pgt_info {
struct hlist_node node;
u64 phys_addr;
+ u64 virt_addr;
u64 shadow_addr;
struct hl_ctx *ctx;
int num_of_ptes;
};
-struct hl_device;
-struct hl_fpriv;
-
/**
* enum hl_pci_match_mode - pci match mode per region
* @PCI_ADDRESS_MATCH_MODE: address match mode
@@ -337,21 +409,23 @@ enum hl_collective_mode {
/**
* struct hw_queue_properties - queue information.
* @type: queue type.
- * @queue_cb_alloc_flags: bitmap which indicates if the hw queue supports CB
- * that allocated by the Kernel driver and therefore,
- * a CB handle can be provided for jobs on this queue.
- * Otherwise, a CB address must be provided.
+ * @cb_alloc_flags: bitmap which indicates if the hw queue supports CB
+ * that allocated by the Kernel driver and therefore,
+ * a CB handle can be provided for jobs on this queue.
+ * Otherwise, a CB address must be provided.
* @collective_mode: collective mode of current queue
* @driver_only: true if only the driver is allowed to send a job to this queue,
* false otherwise.
+ * @binned: True if the queue is binned out and should not be used
* @supports_sync_stream: True if queue supports sync stream
*/
struct hw_queue_properties {
- enum hl_queue_type type;
- enum queue_cb_alloc_flags cb_alloc_flags;
- enum hl_collective_mode collective_mode;
- u8 driver_only;
- u8 supports_sync_stream;
+ enum hl_queue_type type;
+ enum queue_cb_alloc_flags cb_alloc_flags;
+ enum hl_collective_mode collective_mode;
+ u8 driver_only;
+ u8 binned;
+ u8 supports_sync_stream;
};
/**
@@ -401,6 +475,8 @@ enum hl_device_hw_state {
* @hop_masks: array holds HOPs masks.
* @last_mask: mask to get the bit indicating this is the last hop.
* @pgt_size: size for page tables.
+ * @supported_pages_mask: bitmask for supported page size (relevant only for MMUs
+ * supporting multiple page size).
* @page_size: default page size used to allocate memory.
* @num_hops: The amount of hops supported by the translation table.
* @hop_table_size: HOP table size.
@@ -415,6 +491,7 @@ struct hl_mmu_properties {
u64 hop_masks[MMU_HOP_MAX];
u64 last_mask;
u64 pgt_size;
+ u64 supported_pages_mask;
u32 page_size;
u32 num_hops;
u32 hop_table_size;
@@ -455,7 +532,7 @@ struct hl_hints_range {
* @dram_user_base_address: DRAM physical start address for user access.
* @dram_size: DRAM total size.
* @dram_pci_bar_size: size of PCI bar towards DRAM.
- * @max_power_default: max power of the device after reset
+ * @max_power_default: max power of the device after reset.
* @dc_power_default: power consumed by the device in mode idle.
* @dram_size_for_default_page_mapping: DRAM size needed to map to avoid page
* fault.
@@ -463,12 +540,19 @@ struct hl_hints_range {
* @pcie_aux_dbi_reg_addr: Address of the PCIE_AUX DBI register.
* @mmu_pgt_addr: base physical address in DRAM of MMU page tables.
* @mmu_dram_default_page_addr: DRAM default page physical address.
+ * @tpc_enabled_mask: which TPCs are enabled.
+ * @tpc_binning_mask: which TPCs are binned. 0 means usable and 1 means binned.
+ * @dram_enabled_mask: which DRAMs are enabled.
+ * @dram_binning_mask: which DRAMs are binned. 0 means usable, 1 means binned.
* @cb_va_start_addr: virtual start address of command buffers which are mapped
* to the device's MMU.
* @cb_va_end_addr: virtual end address of command buffers which are mapped to
* the device's MMU.
* @dram_hints_align_mask: dram va hint addresses alignment mask which is used
* for hints validity check.
+ * @cfg_base_address: config space base address.
+ * @mmu_cache_mng_addr: address of the MMU cache.
+ * @mmu_cache_mng_size: size of the MMU cache.
* @device_dma_offset_for_host_access: the offset to add to host DMA addresses
* to enable the device to access them.
* @host_base_address: host physical start address for host DMA from device
@@ -493,6 +577,12 @@ struct hl_hints_range {
* @high_pll: high PLL frequency used by the device.
* @cb_pool_cb_cnt: number of CBs in the CB pool.
* @cb_pool_cb_size: size of each CB in the CB pool.
+ * @decoder_enabled_mask: which decoders are enabled.
+ * @decoder_binning_mask: which decoders are binned, 0 means usable and 1
+ * means binned (at most one binned decoder per dcore).
+ * @edma_enabled_mask: which EDMAs are enabled.
+ * @edma_binning_mask: which EDMAs are binned, 0 means usable and 1 means
+ * binned (at most one binned DMA).
* @max_pending_cs: maximum of concurrent pending command submissions
* @max_queues: maximum amount of queues in the system
* @fw_preboot_cpu_boot_dev_sts0: bitmap representation of preboot cpu
@@ -513,6 +603,13 @@ struct hl_hints_range {
* @fw_app_cpu_boot_dev_sts1: bitmap representation of application security
* status reported by FW, bit description can be
* found in CPU_BOOT_DEV_STS1
+ * @max_dec: maximum number of decoders
+ * @hmmu_hif_enabled_mask: mask of HMMUs/HIFs that are not isolated (enabled)
+ * 1- enabled, 0- isolated.
+ * @faulty_dram_cluster_map: mask of faulty DRAM cluster.
+ * 1- faulty cluster, 0- good cluster.
+ * @xbar_edge_enabled_mask: mask of XBAR_EDGEs that are not isolated (enabled)
+ * 1- enabled, 0- isolated.
* @device_mem_alloc_default_page_size: may be different than dram_page_size only for ASICs for
* which the property supports_user_set_page_size is true
* (i.e. the DRAM supports multiple page sizes), otherwise
@@ -523,14 +620,17 @@ struct hl_hints_range {
* @sync_stream_first_mon: first monitor available for sync stream use
* @first_available_user_sob: first sob available for the user
* @first_available_user_mon: first monitor available for the user
- * @first_available_user_msix_interrupt: first available msix interrupt
- * reserved for the user
+ * @first_available_user_interrupt: first available interrupt reserved for the user
* @first_available_cq: first available CQ for the user.
* @user_interrupt_count: number of user interrupts.
+ * @user_dec_intr_count: number of decoder interrupts exposed to user.
+ * @cache_line_size: device cache line size.
* @server_type: Server type that the ASIC is currently installed in.
* The value is according to enum hl_server_type in uapi file.
- * @tpc_enabled_mask: which TPCs are enabled.
* @completion_queues_count: number of completion queues.
+ * @completion_mode: 0 - job based completion, 1 - cs based completion
+ * @mme_master_slave_mode: 0 - Each MME works independently, 1 - MME works
+ * in Master/Slave mode
* @fw_security_enabled: true if security measures are enabled in firmware,
* false otherwise
* @fw_cpu_boot_dev_sts0_valid: status bits are valid and can be fetched from
@@ -547,7 +647,7 @@ struct hl_hints_range {
* false otherwise.
* @use_get_power_for_reset_history: To support backward compatibility for Goya
* and Gaudi
- * @supports_soft_reset: is soft reset supported.
+ * @supports_compute_reset: is a reset which is not a hard-reset supported by this asic.
* @allow_inference_soft_reset: true if the ASIC supports soft reset that is
* initiated by user or TDR. This is only true
* in inference ASICs, as there is no real-world
@@ -585,9 +685,16 @@ struct asic_fixed_properties {
u64 pcie_aux_dbi_reg_addr;
u64 mmu_pgt_addr;
u64 mmu_dram_default_page_addr;
+ u64 tpc_enabled_mask;
+ u64 tpc_binning_mask;
+ u64 dram_enabled_mask;
+ u64 dram_binning_mask;
u64 cb_va_start_addr;
u64 cb_va_end_addr;
u64 dram_hints_align_mask;
+ u64 cfg_base_address;
+ u64 mmu_cache_mng_addr;
+ u64 mmu_cache_mng_size;
u64 device_dma_offset_for_host_access;
u64 host_base_address;
u64 host_end_address;
@@ -610,6 +717,10 @@ struct asic_fixed_properties {
u32 high_pll;
u32 cb_pool_cb_cnt;
u32 cb_pool_cb_size;
+ u32 decoder_enabled_mask;
+ u32 decoder_binning_mask;
+ u32 edma_enabled_mask;
+ u32 edma_binning_mask;
u32 max_pending_cs;
u32 max_queues;
u32 fw_preboot_cpu_boot_dev_sts0;
@@ -618,6 +729,10 @@ struct asic_fixed_properties {
u32 fw_bootfit_cpu_boot_dev_sts1;
u32 fw_app_cpu_boot_dev_sts0;
u32 fw_app_cpu_boot_dev_sts1;
+ u32 max_dec;
+ u32 hmmu_hif_enabled_mask;
+ u32 faulty_dram_cluster_map;
+ u32 xbar_edge_enabled_mask;
u32 device_mem_alloc_default_page_size;
u16 collective_first_sob;
u16 collective_first_mon;
@@ -625,12 +740,15 @@ struct asic_fixed_properties {
u16 sync_stream_first_mon;
u16 first_available_user_sob[HL_MAX_DCORES];
u16 first_available_user_mon[HL_MAX_DCORES];
- u16 first_available_user_msix_interrupt;
+ u16 first_available_user_interrupt;
u16 first_available_cq[HL_MAX_DCORES];
u16 user_interrupt_count;
+ u16 user_dec_intr_count;
+ u16 cache_line_size;
u16 server_type;
- u8 tpc_enabled_mask;
u8 completion_queues_count;
+ u8 completion_mode;
+ u8 mme_master_slave_mode;
u8 fw_security_enabled;
u8 fw_cpu_boot_dev_sts0_valid;
u8 fw_cpu_boot_dev_sts1_valid;
@@ -642,7 +760,7 @@ struct asic_fixed_properties {
u8 dynamic_fw_load;
u8 gic_interrupts_enable;
u8 use_get_power_for_reset_history;
- u8 supports_soft_reset;
+ u8 supports_compute_reset;
u8 allow_inference_soft_reset;
u8 configurable_stop_on_err;
u8 set_max_power_on_device_init;
@@ -811,7 +929,6 @@ struct hl_cb {
* QUEUES
*/
-struct hl_cs;
struct hl_cs_job;
/* Queue length of external and HW queues */
@@ -934,12 +1051,14 @@ struct hl_cq {
* @wait_list_head: head to the list of user threads pending on this interrupt
* @wait_list_lock: protects wait_list_head
* @interrupt_id: msix interrupt id
+ * @is_decoder: whether this entry represents a decoder interrupt
*/
struct hl_user_interrupt {
struct hl_device *hdev;
struct list_head wait_list_head;
spinlock_t wait_list_lock;
u32 interrupt_id;
+ bool is_decoder;
};
/**
@@ -1025,23 +1144,36 @@ struct hl_eq {
bool check_eqe_index;
};
-
-/*
- * ASICs
+/**
+ * struct hl_dec - describes a decoder sw instance.
+ * @hdev: pointer to the device structure.
+ * @completion_abnrm_work: workqueue object to run when decoder generates an error interrupt
+ * @core_id: ID of the decoder.
+ * @base_addr: base address of the decoder.
*/
+struct hl_dec {
+ struct hl_device *hdev;
+ struct work_struct completion_abnrm_work;
+ u32 core_id;
+ u32 base_addr;
+};
/**
* enum hl_asic_type - supported ASIC types.
* @ASIC_INVALID: Invalid ASIC type.
- * @ASIC_GOYA: Goya device.
- * @ASIC_GAUDI: Gaudi device.
+ * @ASIC_GOYA: Goya device (HL-1000).
+ * @ASIC_GAUDI: Gaudi device (HL-2000).
* @ASIC_GAUDI_SEC: Gaudi secured device (HL-2000).
+ * @ASIC_GAUDI2: Gaudi2 device.
+ * @ASIC_GAUDI2_SEC: Gaudi2 secured device.
*/
enum hl_asic_type {
ASIC_INVALID,
ASIC_GOYA,
ASIC_GAUDI,
- ASIC_GAUDI_SEC
+ ASIC_GAUDI_SEC,
+ ASIC_GAUDI2,
+ ASIC_GAUDI2_SEC,
};
struct hl_cs_parser;
@@ -1177,6 +1309,24 @@ struct dynamic_fw_load_mgr {
};
/**
+ * struct pre_fw_load_props - needed properties for pre-FW load
+ * @cpu_boot_status_reg: cpu_boot_status register address
+ * @sts_boot_dev_sts0_reg: sts_boot_dev_sts0 register address
+ * @sts_boot_dev_sts1_reg: sts_boot_dev_sts1 register address
+ * @boot_err0_reg: boot_err0 register address
+ * @boot_err1_reg: boot_err1 register address
+ * @wait_for_preboot_timeout: timeout to poll for preboot ready
+ */
+struct pre_fw_load_props {
+ u32 cpu_boot_status_reg;
+ u32 sts_boot_dev_sts0_reg;
+ u32 sts_boot_dev_sts1_reg;
+ u32 boot_err0_reg;
+ u32 boot_err1_reg;
+ u32 wait_for_preboot_timeout;
+};
+
+/**
* struct fw_image_props - properties of FW image
* @image_name: name of the image
* @src_off: offset in src FW to copy from
@@ -1192,6 +1342,7 @@ struct fw_image_props {
* struct fw_load_mgr - manager FW loading process
* @dynamic_loader: specific structure for dynamic load
* @static_loader: specific structure for static load
+ * @pre_fw_load_props: parameter for pre FW load
* @boot_fit_img: boot fit image properties
* @linux_img: linux image properties
* @cpu_timeout: CPU response timeout in usec
@@ -1207,6 +1358,7 @@ struct fw_load_mgr {
struct dynamic_fw_load_mgr dynamic_loader;
struct static_fw_load_mgr static_loader;
};
+ struct pre_fw_load_props pre_fw_load;
struct fw_image_props boot_fit_img;
struct fw_image_props linux_img;
u32 cpu_timeout;
@@ -1217,6 +1369,8 @@ struct fw_load_mgr {
u8 fw_comp_loaded;
};
+struct hl_cs;
+
/**
* struct hl_asic_funcs - ASIC specific functions that are can be called from
* common code.
@@ -1248,7 +1402,7 @@ struct fw_load_mgr {
* dma_free_coherent(). This is ASIC function because
* its implementation is not trivial when the driver
* is loaded in simulation mode (not upstreamed).
- * @scrub_device_mem: Scrub device memory given an address and size
+ * @scrub_device_mem: Scrub the entire SRAM and DRAM.
* @scrub_device_dram: Scrub the dram memory of the device.
* @get_int_queue_base: get the internal queue base address.
* @test_queues: run simple test on all queues for sanity check.
@@ -1257,10 +1411,11 @@ struct fw_load_mgr {
* @asic_dma_pool_free: free small DMA allocation from pool.
* @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool.
* @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool.
+ * @asic_dma_unmap_single: unmap a single DMA buffer
+ * @asic_dma_map_single: map a single buffer to a DMA
* @hl_dma_unmap_sgtable: DMA unmap scatter-gather table.
* @cs_parser: parse Command Submission.
* @asic_dma_map_sgtable: DMA map scatter-gather table.
- * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB.
* @add_end_of_cb_packets: Add packets to the end of CB, if device requires it.
* @update_eq_ci: update event queue CI.
* @context_switch: called upon ASID context switch.
@@ -1282,6 +1437,8 @@ struct fw_load_mgr {
* @non_hard_reset_late_init: perform certain actions needed after a reset which is not hard-reset
* @hw_queues_lock: acquire H/W queues lock.
* @hw_queues_unlock: release H/W queues lock.
+ * @kdma_lock: acquire H/W queues lock. Relevant from GRECO ASIC
+ * @kdma_unlock: release H/W queues lock. Relevant from GRECO ASIC
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
* @get_monitor_dump: retrieve monitor registers dump from F/W.
@@ -1298,6 +1455,7 @@ struct fw_load_mgr {
* @halt_coresight: stop the ETF and ETR traces.
* @ctx_init: context dependent initialization.
* @ctx_fini: context dependent cleanup.
+ * @pre_schedule_cs: Perform pre-CS-scheduling operations.
* @get_queue_id_for_cq: Get the H/W queue id related to the given CQ index.
* @load_firmware_to_device: load the firmware to the device's memory
* @load_boot_fit_to_device: load boot fit to device's memory
@@ -1308,9 +1466,11 @@ struct fw_load_mgr {
* @reset_sob: Reset a SOB.
* @reset_sob_group: Reset SOB group
* @get_device_time: Get the device time.
+ * @pb_print_security_errors: print security errors according block and cause
* @collective_wait_init_cs: Generate collective master/slave packets
* and place them in the relevant cs jobs
* @collective_wait_create_jobs: allocate collective wait cs jobs
+ * @get_dec_base_addr: get the base address of a given decoder.
* @scramble_addr: Routine to scramble the address prior of mapping it
* in the MMU.
* @descramble_addr: Routine to de-scramble the address prior of
@@ -1324,18 +1484,18 @@ struct fw_load_mgr {
* driver is ready to receive asynchronous events. This
* function should be called during the first init and
* after every hard-reset of the device
+ * @ack_mmu_errors: check and ack mmu errors, page fault, access violation.
* @get_msi_info: Retrieve asic-specific MSI ID of the f/w async event
* @map_pll_idx_to_fw_idx: convert driver specific per asic PLL index to
* generic f/w compatible PLL Indexes
+ * @init_firmware_preload_params: initialize pre FW-load parameters.
* @init_firmware_loader: initialize data for FW loader.
* @init_cpu_scrambler_dram: Enable CPU specific DRAM scrambling
* @state_dump_init: initialize constants required for state dump
* @get_sob_addr: get SOB base address offset.
* @set_pci_memory_regions: setting properties of PCI memory regions
* @get_stream_master_qid_arr: get pointer to stream masters QID array
- * @is_valid_dram_page_size: return true if page size is supported in device
- * memory allocation, otherwise false.
- * @get_valid_dram_page_orders: get valid device memory allocation page orders
+ * @check_if_razwi_happened: check if there was a razwi due to RR violation.
* @access_dev_mem: access device memory
* @set_dram_bar_base: set the base of the DRAM BAR
*/
@@ -1360,7 +1520,7 @@ struct hl_asic_funcs {
dma_addr_t *dma_handle, gfp_t flag);
void (*asic_dma_free_coherent)(struct hl_device *hdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle);
- int (*scrub_device_mem)(struct hl_device *hdev, u64 addr, u64 size);
+ int (*scrub_device_mem)(struct hl_device *hdev);
int (*scrub_device_dram)(struct hl_device *hdev, u64 val);
void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id,
dma_addr_t *dma_handle, u16 *queue_len);
@@ -1373,16 +1533,21 @@ struct hl_asic_funcs {
size_t size, dma_addr_t *dma_handle);
void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev,
size_t size, void *vaddr);
+ void (*asic_dma_unmap_single)(struct hl_device *hdev,
+ dma_addr_t dma_addr, int len,
+ enum dma_data_direction dir);
+ dma_addr_t (*asic_dma_map_single)(struct hl_device *hdev,
+ void *addr, int len,
+ enum dma_data_direction dir);
void (*hl_dma_unmap_sgtable)(struct hl_device *hdev,
struct sg_table *sgt,
enum dma_data_direction dir);
int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser);
int (*asic_dma_map_sgtable)(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
- u32 (*get_dma_desc_list_size)(struct hl_device *hdev,
- struct sg_table *sgt);
void (*add_end_of_cb_packets)(struct hl_device *hdev,
void *kernel_address, u32 len,
+ u32 original_len,
u64 cq_addr, u32 cq_val, u32 msix_num,
bool eb);
void (*update_eq_ci)(struct hl_device *hdev, u32 val);
@@ -1410,6 +1575,8 @@ struct hl_asic_funcs {
int (*non_hard_reset_late_init)(struct hl_device *hdev);
void (*hw_queues_lock)(struct hl_device *hdev);
void (*hw_queues_unlock)(struct hl_device *hdev);
+ void (*kdma_lock)(struct hl_device *hdev, int dcore_id);
+ void (*kdma_unlock)(struct hl_device *hdev, int dcore_id);
u32 (*get_pci_id)(struct hl_device *hdev);
int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size);
int (*get_monitor_dump)(struct hl_device *hdev, void *data);
@@ -1422,6 +1589,7 @@ struct hl_asic_funcs {
void (*halt_coresight)(struct hl_device *hdev, struct hl_ctx *ctx);
int (*ctx_init)(struct hl_ctx *ctx);
void (*ctx_fini)(struct hl_ctx *ctx);
+ int (*pre_schedule_cs)(struct hl_cs *cs);
u32 (*get_queue_id_for_cq)(struct hl_device *hdev, u32 cq_idx);
int (*load_firmware_to_device)(struct hl_device *hdev);
int (*load_boot_fit_to_device)(struct hl_device *hdev);
@@ -1434,11 +1602,14 @@ struct hl_asic_funcs {
void (*reset_sob)(struct hl_device *hdev, void *data);
void (*reset_sob_group)(struct hl_device *hdev, u16 sob_group);
u64 (*get_device_time)(struct hl_device *hdev);
+ void (*pb_print_security_errors)(struct hl_device *hdev,
+ u32 block_addr, u32 cause, u32 offended_addr);
int (*collective_wait_init_cs)(struct hl_cs *cs);
int (*collective_wait_create_jobs)(struct hl_device *hdev,
struct hl_ctx *ctx, struct hl_cs *cs,
u32 wait_queue_id, u32 collective_engine_id,
u32 encaps_signal_offset);
+ u32 (*get_dec_base_addr)(struct hl_device *hdev, u32 core_id);
u64 (*scramble_addr)(struct hl_device *hdev, u64 addr);
u64 (*descramble_addr)(struct hl_device *hdev, u64 addr);
void (*ack_protection_bits_errors)(struct hl_device *hdev);
@@ -1447,20 +1618,21 @@ struct hl_asic_funcs {
int (*hw_block_mmap)(struct hl_device *hdev, struct vm_area_struct *vma,
u32 block_id, u32 block_size);
void (*enable_events_from_fw)(struct hl_device *hdev);
+ int (*ack_mmu_errors)(struct hl_device *hdev, u64 mmu_cap_mask);
void (*get_msi_info)(__le32 *table);
int (*map_pll_idx_to_fw_idx)(u32 pll_idx);
+ void (*init_firmware_preload_params)(struct hl_device *hdev);
void (*init_firmware_loader)(struct hl_device *hdev);
void (*init_cpu_scrambler_dram)(struct hl_device *hdev);
void (*state_dump_init)(struct hl_device *hdev);
u32 (*get_sob_addr)(struct hl_device *hdev, u32 sob_id);
void (*set_pci_memory_regions)(struct hl_device *hdev);
u32* (*get_stream_master_qid_arr)(void);
- bool (*is_valid_dram_page_size)(u32 page_size);
+ void (*check_if_razwi_happened)(struct hl_device *hdev);
int (*mmu_get_real_page_size)(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop,
u32 page_size, u32 *real_page_size, bool is_dram_addr);
- void (*get_valid_dram_page_orders)(struct hl_info_dev_memalloc_page_sizes *info);
- int (*access_dev_mem)(struct hl_device *hdev, struct pci_mem_region *region,
- enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
+ int (*access_dev_mem)(struct hl_device *hdev, enum pci_region region_type,
+ u64 addr, u64 *val, enum debugfs_access_type acc_type);
u64 (*set_dram_bar_base)(struct hl_device *hdev, u64 addr);
};
@@ -1535,16 +1707,55 @@ struct hl_dmabuf_priv {
uint64_t device_address;
};
+#define HL_CS_OUTCOME_HISTORY_LEN 256
+
+/**
+ * struct hl_cs_outcome - represents a single completed CS outcome
+ * @list_link: link to either container's used list or free list
+ * @map_link: list to the container hash map
+ * @ts: completion ts
+ * @seq: the original cs sequence
+ * @error: error code cs completed with, if any
+ */
+struct hl_cs_outcome {
+ struct list_head list_link;
+ struct hlist_node map_link;
+ ktime_t ts;
+ u64 seq;
+ int error;
+};
+
+/**
+ * struct hl_cs_outcome_store - represents a limited store of completed CS outcomes
+ * @outcome_map: index of completed CS searcheable by sequence number
+ * @used_list: list of outcome objects currently in use
+ * @free_list: list of outcome objects currently not in use
+ * @nodes_pool: a static pool of preallocated outcome objects
+ * @db_lock: any operation on the store must take this lock
+ */
+struct hl_cs_outcome_store {
+ DECLARE_HASHTABLE(outcome_map, 8);
+ struct list_head used_list;
+ struct list_head free_list;
+ struct hl_cs_outcome nodes_pool[HL_CS_OUTCOME_HISTORY_LEN];
+ spinlock_t db_lock;
+};
+
/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
* @mmu_shadow_hash: holds a mapping from shadow address to pgt_info structure.
+ * @hr_mmu_phys_hash: if host-resident MMU is used, holds a mapping from
+ * MMU-hop-page physical address to its host-resident
+ * pgt_info structure.
* @hpriv: pointer to the private (Kernel Driver) data of the process (fd).
* @hdev: pointer to the device structure.
* @refcount: reference counter for the context. Context is released only when
* this hits 0l. It is incremented on CS and CS_WAIT.
* @cs_pending: array of hl fence objects representing pending CS.
+ * @outcome_store: storage data structure used to remember ouitcomes of completed
+ * command submissions for a long time after CS id wraparound.
* @va_range: holds available virtual addresses for host and dram mappings.
* @mem_hash_lock: protects the mem_hash.
* @mmu_lock: protects the MMU page tables. Any change to the PGT, modifying the
@@ -1576,10 +1787,12 @@ struct hl_dmabuf_priv {
struct hl_ctx {
DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS);
DECLARE_HASHTABLE(mmu_shadow_hash, MMU_HASH_TABLE_BITS);
+ DECLARE_HASHTABLE(hr_mmu_phys_hash, MMU_HASH_TABLE_BITS);
struct hl_fpriv *hpriv;
struct hl_device *hdev;
struct kref refcount;
struct hl_fence **cs_pending;
+ struct hl_cs_outcome_store outcome_store;
struct hl_va_range *va_range[HL_VA_RANGE_TYPE_MAX];
struct mutex mem_hash_lock;
struct mutex mmu_lock;
@@ -1601,12 +1814,12 @@ struct hl_ctx {
/**
* struct hl_ctx_mgr - for handling multiple contexts.
- * @ctx_lock: protects ctx_handles.
- * @ctx_handles: idr to hold all ctx handles.
+ * @lock: protects ctx_handles.
+ * @handles: idr to hold all ctx handles.
*/
struct hl_ctx_mgr {
- struct mutex ctx_lock;
- struct idr ctx_handles;
+ struct mutex lock;
+ struct idr handles;
};
@@ -1665,6 +1878,7 @@ struct hl_userptr {
* @timeout_jiffies: cs timeout in jiffies.
* @submission_time_jiffies: submission time of the cs
* @type: CS_TYPE_*.
+ * @jobs_cnt: counter of submitted jobs on all queues.
* @encaps_sig_hdl_id: encaps signals handle id, set for the first staged cs.
* @sob_addr_offset: sob offset from the configuration base address.
* @initial_sob_count: count of completed signals in SOB before current submission of signal or
@@ -1703,6 +1917,7 @@ struct hl_cs {
u64 timeout_jiffies;
u64 submission_time_jiffies;
enum hl_cs_type type;
+ u32 jobs_cnt;
u32 encaps_sig_hdl_id;
u32 sob_addr_offset;
u16 initial_sob_count;
@@ -1961,6 +2176,8 @@ struct hl_notifier_event {
* @dev_node: node in the device list of file private data
* @refcount: number of related contexts.
* @restore_phase_mutex: lock for context switch and restore phase.
+ * @ctx_lock: protects the pointer to current executing context pointer. TODO: remove for multiple
+ * ctx per process.
*/
struct hl_fpriv {
struct hl_device *hdev;
@@ -1974,6 +2191,7 @@ struct hl_fpriv {
struct list_head dev_node;
struct kref refcount;
struct mutex restore_phase_mutex;
+ struct mutex ctx_lock;
};
@@ -2027,8 +2245,8 @@ struct hl_debugfs_entry {
* @state_dump_sem: protects state_dump.
* @addr: next address to read/write from/to in read/write32.
* @mmu_addr: next virtual address to translate to physical address in mmu_show.
+ * @mmu_cap_mask: mmu hw capability mask, to be used in mmu_ack_error.
* @userptr_lookup: the target user ptr to look up for on demand.
- * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
* @mmu_asid: ASID to use while translating in mmu_show.
* @state_dump_head: index of the latest state dump
* @i2c_bus: generic u8 debugfs file for bus value to use in i2c_data_read.
@@ -2058,8 +2276,8 @@ struct hl_dbg_device_entry {
struct rw_semaphore state_dump_sem;
u64 addr;
u64 mmu_addr;
+ u64 mmu_cap_mask;
u64 userptr_lookup;
- u64 memory_scrub_val;
u32 mmu_asid;
u32 state_dump_head;
u8 i2c_bus;
@@ -2255,9 +2473,11 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
/* Timeout should be longer when working with simulator but cap the
* increased timeout to some maximum
*/
-#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
+#define hl_poll_timeout_common(hdev, addr, val, cond, sleep_us, timeout_us, elbi) \
({ \
ktime_t __timeout; \
+ u32 __elbi_read; \
+ int __rc = 0; \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
@@ -2266,19 +2486,103 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
- (val) = RREG32(addr); \
+ if (elbi) { \
+ __rc = hl_pci_elbi_read(hdev, addr, &__elbi_read); \
+ if (__rc) \
+ break; \
+ (val) = __elbi_read; \
+ } else {\
+ (val) = RREG32((u32)addr); \
+ } \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = RREG32(addr); \
+ if (elbi) { \
+ __rc = hl_pci_elbi_read(hdev, addr, &__elbi_read); \
+ if (__rc) \
+ break; \
+ (val) = __elbi_read; \
+ } else {\
+ (val) = RREG32((u32)addr); \
+ } \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
- (cond) ? 0 : -ETIMEDOUT; \
+ __rc ? __rc : ((cond) ? 0 : -ETIMEDOUT); \
})
+#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \
+ hl_poll_timeout_common(hdev, addr, val, cond, sleep_us, timeout_us, false)
+
+#define hl_poll_timeout_elbi(hdev, addr, val, cond, sleep_us, timeout_us) \
+ hl_poll_timeout_common(hdev, addr, val, cond, sleep_us, timeout_us, true)
+
+/*
+ * poll array of register addresses.
+ * condition is satisfied if all registers values match the expected value.
+ * once some register in the array satisfies the condition it will not be polled again,
+ * this is done both for efficiency and due to some registers are "clear on read".
+ * TODO: use read from PCI bar in other places in the code (SW-91406)
+ */
+#define hl_poll_reg_array_timeout_common(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us, elbi) \
+({ \
+ ktime_t __timeout; \
+ u64 __elem_bitmask; \
+ u32 __read_val; \
+ u8 __arr_idx; \
+ int __rc = 0; \
+ \
+ if (hdev->pdev) \
+ __timeout = ktime_add_us(ktime_get(), timeout_us); \
+ else \
+ __timeout = ktime_add_us(ktime_get(),\
+ min(((u64)timeout_us * 10), \
+ (u64) HL_SIM_MAX_TIMEOUT_US)); \
+ \
+ might_sleep_if(sleep_us); \
+ if (arr_size >= 64) \
+ __rc = -EINVAL; \
+ else \
+ __elem_bitmask = BIT_ULL(arr_size) - 1; \
+ for (;;) { \
+ if (__rc) \
+ break; \
+ for (__arr_idx = 0; __arr_idx < (arr_size); __arr_idx++) { \
+ if (!(__elem_bitmask & BIT_ULL(__arr_idx))) \
+ continue; \
+ if (elbi) { \
+ __rc = hl_pci_elbi_read(hdev, (addr_arr)[__arr_idx], &__read_val); \
+ if (__rc) \
+ break; \
+ } else { \
+ __read_val = RREG32((u32)(addr_arr)[__arr_idx]); \
+ } \
+ if (__read_val == (expected_val)) \
+ __elem_bitmask &= ~BIT_ULL(__arr_idx); \
+ } \
+ if (__rc || (__elem_bitmask == 0)) \
+ break; \
+ if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) \
+ break; \
+ if (sleep_us) \
+ usleep_range((sleep_us >> 2) + 1, sleep_us); \
+ } \
+ __rc ? __rc : ((__elem_bitmask == 0) ? 0 : -ETIMEDOUT); \
+})
+
+#define hl_poll_reg_array_timeout(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us) \
+ hl_poll_reg_array_timeout_common(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us, false)
+
+#define hl_poll_reg_array_timeout_elbi(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us) \
+ hl_poll_reg_array_timeout_common(hdev, addr_arr, arr_size, expected_val, sleep_us, \
+ timeout_us, true)
+
/*
* address in this macro points always to a memory location in the
* host's (server's) memory. That location is updated asynchronously
@@ -2299,7 +2603,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
__timeout = ktime_add_us(ktime_get(),\
- min((u64)(timeout_us * 10), \
+ min((u64)(timeout_us * 100), \
(u64) HL_SIM_MAX_TIMEOUT_US)); \
might_sleep_if(sleep_us); \
for (;;) { \
@@ -2322,29 +2626,21 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(cond) ? 0 : -ETIMEDOUT; \
})
-#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
- timeout_us) \
+#define HL_USR_MAPPED_BLK_INIT(blk, base, sz) \
({ \
- ktime_t __timeout; \
- if (hdev->pdev) \
- __timeout = ktime_add_us(ktime_get(), timeout_us); \
- else \
- __timeout = ktime_add_us(ktime_get(),\
- min((u64)(timeout_us * 10), \
- (u64) HL_SIM_MAX_TIMEOUT_US)); \
- might_sleep_if(sleep_us); \
- for (;;) { \
- (val) = readl(addr); \
- if (cond) \
- break; \
- if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
- (val) = readl(addr); \
- break; \
- } \
- if (sleep_us) \
- usleep_range((sleep_us >> 2) + 1, sleep_us); \
- } \
- (cond) ? 0 : -ETIMEDOUT; \
+ struct user_mapped_block *p = blk; \
+\
+ p->address = base; \
+ p->size = sz; \
+})
+
+#define HL_USR_INTR_STRUCT_INIT(usr_intr, hdev, intr_id, decoder) \
+({ \
+ usr_intr.hdev = hdev; \
+ usr_intr.interrupt_id = intr_id; \
+ usr_intr.is_decoder = decoder; \
+ INIT_LIST_HEAD(&usr_intr.wait_list_head); \
+ spin_lock_init(&usr_intr.wait_list_lock); \
})
struct hwmon_chip_info;
@@ -2364,27 +2660,15 @@ struct hl_device_reset_work {
};
/**
- * struct hr_mmu_hop_addrs - used for holding per-device host-resident mmu hop
- * information.
- * @virt_addr: the virtual address of the hop.
- * @phys-addr: the physical address of the hop (used by the device-mmu).
- * @shadow_addr: The shadow of the hop used by the driver for walking the hops.
- */
-struct hr_mmu_hop_addrs {
- u64 virt_addr;
- u64 phys_addr;
- u64 shadow_addr;
-};
-
-/**
* struct hl_mmu_hr_pgt_priv - used for holding per-device mmu host-resident
* page-table internal information.
- * @mmu_pgt_pool: pool of page tables used by MMU for allocating hops.
- * @mmu_shadow_hop0: shadow array of hop0 tables.
+ * @mmu_pgt_pool: pool of page tables used by a host-resident MMU for
+ * allocating hops.
+ * @mmu_asid_hop0: per-ASID array of host-resident hop0 tables.
*/
struct hl_mmu_hr_priv {
- struct gen_pool *mmu_pgt_pool;
- struct hr_mmu_hop_addrs *mmu_shadow_hop0;
+ struct gen_pool *mmu_pgt_pool;
+ struct pgt_info *mmu_asid_hop0;
};
/**
@@ -2437,12 +2721,28 @@ struct hl_mmu_per_hop_info {
struct hl_mmu_hop_info {
u64 scrambled_vaddr;
u64 unscrambled_paddr;
- struct hl_mmu_per_hop_info hop_info[MMU_ARCH_5_HOPS];
+ struct hl_mmu_per_hop_info hop_info[MMU_ARCH_6_HOPS];
u32 used_hops;
enum hl_va_range_type range_type;
};
/**
+ * struct hl_hr_mmu_funcs - Device related host resident MMU functions.
+ * @get_hop0_pgt_info: get page table info structure for HOP0.
+ * @get_pgt_info: get page table info structure for HOP other than HOP0.
+ * @add_pgt_info: add page table info structure to hash.
+ * @get_tlb_mapping_params: get mapping parameters needed for getting TLB info for specific mapping.
+ */
+struct hl_hr_mmu_funcs {
+ struct pgt_info *(*get_hop0_pgt_info)(struct hl_ctx *ctx);
+ struct pgt_info *(*get_pgt_info)(struct hl_ctx *ctx, u64 phys_hop_addr);
+ void (*add_pgt_info)(struct hl_ctx *ctx, struct pgt_info *pgt_info, dma_addr_t phys_addr);
+ int (*get_tlb_mapping_params)(struct hl_device *hdev, struct hl_mmu_properties **mmu_prop,
+ struct hl_mmu_hop_info *hops,
+ u64 virt_addr, bool *is_huge);
+};
+
+/**
* struct hl_mmu_funcs - Device related MMU functions.
* @init: initialize the MMU module.
* @fini: release the MMU module.
@@ -2456,22 +2756,21 @@ struct hl_mmu_hop_info {
* @get_tlb_info: returns the list of hops and hop-entries used that were
* created in order to translate the giver virtual address to a
* physical one.
+ * @hr_funcs: functions specific to host resident MMU.
*/
struct hl_mmu_funcs {
int (*init)(struct hl_device *hdev);
void (*fini)(struct hl_device *hdev);
int (*ctx_init)(struct hl_ctx *ctx);
void (*ctx_fini)(struct hl_ctx *ctx);
- int (*map)(struct hl_ctx *ctx,
- u64 virt_addr, u64 phys_addr, u32 page_size,
- bool is_dram_addr);
- int (*unmap)(struct hl_ctx *ctx,
- u64 virt_addr, bool is_dram_addr);
+ int (*map)(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size,
+ bool is_dram_addr);
+ int (*unmap)(struct hl_ctx *ctx, u64 virt_addr, bool is_dram_addr);
void (*flush)(struct hl_ctx *ctx);
void (*swap_out)(struct hl_ctx *ctx);
void (*swap_in)(struct hl_ctx *ctx);
- int (*get_tlb_info)(struct hl_ctx *ctx,
- u64 virt_addr, struct hl_mmu_hop_info *hops);
+ int (*get_tlb_info)(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops);
+ struct hl_hr_mmu_funcs hr_funcs;
};
/**
@@ -2568,23 +2867,33 @@ struct hl_clk_throttle {
};
/**
+ * struct user_mapped_block - describes a hw block allowed to be mmapped by user
+ * @address: physical HW block address
+ * @size: allowed size for mmap
+ */
+struct user_mapped_block {
+ u32 address;
+ u32 size;
+};
+
+/**
* struct cs_timeout_info - info of last CS timeout occurred.
* @timestamp: CS timeout timestamp.
- * @write_disable: if set writing to CS parameters in the structure is disabled so,
- * the first (root cause) CS timeout will not be overwritten.
+ * @write_enable: if set writing to CS parameters in the structure is enabled. otherwise - disabled,
+ * so the first (root cause) CS timeout will not be overwritten.
* @seq: CS timeout sequence number.
*/
struct cs_timeout_info {
ktime_t timestamp;
- atomic_t write_disable;
+ atomic_t write_enable;
u64 seq;
};
/**
* struct razwi_info - info about last razwi error occurred.
* @timestamp: razwi timestamp.
- * @write_disable: if set writing to razwi parameters in the structure is disabled so the
- * first (root cause) razwi will not be overwritten.
+ * @write_enable: if set writing to razwi parameters in the structure is enabled.
+ * otherwise - disabled, so the first (root cause) razwi will not be overwritten.
* @addr: address that caused razwi.
* @engine_id_1: engine id of the razwi initiator, if it was initiated by engine that does
* not have engine id it will be set to U16_MAX.
@@ -2596,7 +2905,7 @@ struct cs_timeout_info {
*/
struct razwi_info {
ktime_t timestamp;
- atomic_t write_disable;
+ atomic_t write_enable;
u64 addr;
u16 engine_id_1;
u16 engine_id_2;
@@ -2604,25 +2913,59 @@ struct razwi_info {
u8 type;
};
+#define MAX_QMAN_STREAMS_INFO 4
+#define OPCODE_INFO_MAX_ADDR_SIZE 8
+/**
+ * struct undefined_opcode_info - info about last undefined opcode error
+ * @timestamp: timestamp of the undefined opcode error
+ * @cb_addr_streams: CB addresses (per stream) that are currently exists in the PQ
+ * entiers. In case all streams array entries are
+ * filled with values, it means the execution was in Lower-CP.
+ * @cq_addr: the address of the current handled command buffer
+ * @cq_size: the size of the current handled command buffer
+ * @cb_addr_streams_len: num of streams - actual len of cb_addr_streams array.
+ * should be equal to 1 incase of undefined opcode
+ * in Upper-CP (specific stream) and equal to 4 incase
+ * of undefined opcode in Lower-CP.
+ * @engine_id: engine-id that the error occurred on
+ * @stream_id: the stream id the error occurred on. In case the stream equals to
+ * MAX_QMAN_STREAMS_INFO it means the error occurred on a Lower-CP.
+ * @write_enable: if set, writing to undefined opcode parameters in the structure
+ * is enable so the first (root cause) undefined opcode will not be
+ * overwritten.
+ */
+struct undefined_opcode_info {
+ ktime_t timestamp;
+ u64 cb_addr_streams[MAX_QMAN_STREAMS_INFO][OPCODE_INFO_MAX_ADDR_SIZE];
+ u64 cq_addr;
+ u32 cq_size;
+ u32 cb_addr_streams_len;
+ u32 engine_id;
+ u32 stream_id;
+ bool write_enable;
+};
+
/**
* struct last_error_session_info - info about last session errors occurred.
* @cs_timeout: CS timeout error last information.
* @razwi: razwi last information.
+ * @undef_opcode: undefined opcode information
*/
struct last_error_session_info {
- struct cs_timeout_info cs_timeout;
- struct razwi_info razwi;
+ struct cs_timeout_info cs_timeout;
+ struct razwi_info razwi;
+ struct undefined_opcode_info undef_opcode;
};
/**
* struct hl_reset_info - holds current device reset information.
* @lock: lock to protect critical reset flows.
- * @soft_reset_cnt: number of soft reset since the driver was loaded.
- * @hard_reset_cnt: number of hard reset since the driver was loaded.
- * @hard_reset_schedule_flags: hard reset is scheduled to after current soft reset,
+ * @compute_reset_cnt: number of compte resets since the driver was loaded.
+ * @hard_reset_cnt: number of hard resets since the driver was loaded.
+ * @hard_reset_schedule_flags: hard reset is scheduled to after current compute reset,
* here we hold the hard reset flags.
* @in_reset: is device in reset flow.
- * @is_in_soft_reset: Device is currently in soft reset process.
+ * @in_compute_reset: Device is currently in reset but not in hard-reset.
* @needs_reset: true if reset_on_lockup is false and device should be reset
* due to lockup.
* @hard_reset_pending: is there a hard reset work pending.
@@ -2637,11 +2980,11 @@ struct last_error_session_info {
*/
struct hl_reset_info {
spinlock_t lock;
- u32 soft_reset_cnt;
+ u32 compute_reset_cnt;
u32 hard_reset_cnt;
u32 hard_reset_schedule_flags;
u8 in_reset;
- u8 is_in_soft_reset;
+ u8 in_compute_reset;
u8 needs_reset;
u8 hard_reset_pending;
@@ -2671,12 +3014,17 @@ struct hl_reset_info {
* @user_interrupt: array of hl_user_interrupt. upon the corresponding user
* interrupt, driver will monitor the list of fences
* registered to this interrupt.
- * @common_user_interrupt: common user interrupt for all user interrupts.
- * upon any user interrupt, driver will monitor the
+ * @common_user_cq_interrupt: common user CQ interrupt for all user CQ interrupts.
+ * upon any user CQ interrupt, driver will monitor the
* list of fences registered to this common structure.
+ * @common_decoder_interrupt: common decoder interrupt for all user decoder interrupts.
+ * @shadow_cs_queue: pointer to a shadow queue that holds pointers to
+ * outstanding command submissions.
* @cq_wq: work queues of completion queues for executing work in process
* context.
* @eq_wq: work queue of event queue for executing work in process context.
+ * @cs_cmplt_wq: work queue of CS completions for executing work in process
+ * context.
* @ts_free_obj_wq: work queue for timestamp registration objects release.
* @pf_wq: work queue for MMU pre-fetch operations.
* @kernel_ctx: Kernel driver context structure.
@@ -2716,6 +3064,7 @@ struct hl_reset_info {
* @aggregated_cs_counters: aggregated cs counters among all contexts
* @mmu_priv: device-specific MMU data.
* @mmu_func: device-related MMU functions.
+ * @dec: list of decoder sw instance
* @fw_loader: FW loader manager.
* @pci_mem_region: array of memory regions in the PCI
* @state_dump_specs: constants and dictionaries needed to dump system state.
@@ -2724,8 +3073,10 @@ struct hl_reset_info {
* @last_error: holds information about last session in which CS timeout or razwi error occurred.
* @reset_info: holds current device reset information.
* @stream_master_qid_arr: pointer to array with QIDs of master streams.
- * @fw_major_version: major version of current loaded preboot
+ * @fw_major_version: major version of current loaded preboot.
+ * @fw_minor_version: minor version of current loaded preboot.
* @dram_used_mem: current DRAM memory consumption.
+ * @memory_scrub_val: the value to which the dram will be scrubbed to using cb scrub_device_dram
* @timeout_jiffies: device CS timeout value.
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, the driver will restore
@@ -2747,10 +3098,18 @@ struct hl_reset_info {
* used for CPU boot status
* @fw_comms_poll_interval_usec: FW comms/protocol poll interval in usec.
* used for COMMs protocols cmds(COMMS_STS_*)
+ * @dram_binning: contains mask of drams that is received from the f/w which indicates which
+ * drams are binned-out
+ * @tpc_binning: contains mask of tpc engines that is received from the f/w which indicates which
+ * tpc engines are binned-out
* @card_type: Various ASICs have several card types. This indicates the card
* type of the current device.
* @major: habanalabs kernel driver major.
* @high_pll: high PLL profile frequency.
+ * @decoder_binning: contains mask of decoder engines that is received from the f/w which
+ * indicates which decoder engines are binned-out
+ * @edma_binning: contains mask of edma engines that is received from the f/w which
+ * indicates which edma engines are binned-out
* @id: device minor.
* @id_control: minor of the control device
* @cpu_pci_msb_addr: 50-bit extension bits for the device CPU's 40-bit
@@ -2759,7 +3118,6 @@ struct hl_reset_info {
* @disabled: is device disabled.
* @late_init_done: is late init stage was done during initialization.
* @hwmon_initialized: is H/W monitor sensors was initialized.
- * @heartbeat: is heartbeat sanity check towards CPU-CP enabled.
* @reset_on_lockup: true if a reset should be done in case of stuck CS, false
* otherwise.
* @dram_default_page_mapping: is DRAM default page mapping enabled.
@@ -2792,6 +3150,21 @@ struct hl_reset_info {
* @is_compute_ctx_active: Whether there is an active compute context executing.
* @compute_ctx_in_release: true if the current compute context is being released.
* @supports_mmu_prefetch: true if prefetch is supported, otherwise false.
+ * @reset_upon_device_release: reset the device when the user closes the file descriptor of the
+ * device.
+ * @nic_ports_mask: Controls which NIC ports are enabled. Used only for testing.
+ * @fw_components: Controls which f/w components to load to the device. There are multiple f/w
+ * stages and sometimes we want to stop at a certain stage. Used only for testing.
+ * @mmu_enable: Whether to enable or disable the device MMU(s). Used only for testing.
+ * @cpu_queues_enable: Whether to enable queues communication vs. the f/w. Used only for testing.
+ * @pldm: Whether we are running in Palladium environment. Used only for testing.
+ * @hard_reset_on_fw_events: Whether to do device hard-reset when a fatal event is received from
+ * the f/w. Used only for testing.
+ * @bmc_enable: Whether we are running in a box with BMC. Used only for testing.
+ * @reset_on_preboot_fail: Whether to reset the device if preboot f/w fails to load.
+ * Used only for testing.
+ * @heartbeat: Controls if we want to enable the heartbeat mechanism vs. the f/w, which verifies
+ * that the f/w is always alive. Used only for testing.
*/
struct hl_device {
struct pci_dev *pdev;
@@ -2809,9 +3182,12 @@ struct hl_device {
enum hl_asic_type asic_type;
struct hl_cq *completion_queue;
struct hl_user_interrupt *user_interrupt;
- struct hl_user_interrupt common_user_interrupt;
+ struct hl_user_interrupt common_user_cq_interrupt;
+ struct hl_user_interrupt common_decoder_interrupt;
+ struct hl_cs **shadow_cs_queue;
struct workqueue_struct **cq_wq;
struct workqueue_struct *eq_wq;
+ struct workqueue_struct *cs_cmplt_wq;
struct workqueue_struct *ts_free_obj_wq;
struct workqueue_struct *pf_wq;
struct hl_ctx *kernel_ctx;
@@ -2855,6 +3231,8 @@ struct hl_device {
struct hl_mmu_priv mmu_priv;
struct hl_mmu_funcs mmu_func[MMU_NUM_PGT_LOCATIONS];
+ struct hl_dec *dec;
+
struct fw_load_mgr fw_loader;
struct pci_mem_region pci_mem_region[PCI_REGION_NUMBER];
@@ -2870,7 +3248,9 @@ struct hl_device {
u32 *stream_master_qid_arr;
u32 fw_major_version;
+ u32 fw_minor_version;
atomic64_t dram_used_mem;
+ u64 memory_scrub_val;
u64 timeout_jiffies;
u64 max_power;
u64 boot_error_status_mask;
@@ -2881,10 +3261,14 @@ struct hl_device {
u64 fw_poll_interval_usec;
ktime_t last_successful_open_ktime;
u64 fw_comms_poll_interval_usec;
+ u64 dram_binning;
+ u64 tpc_binning;
enum cpucp_card_types card_type;
u32 major;
u32 high_pll;
+ u32 decoder_binning;
+ u32 edma_binning;
u16 id;
u16 id_control;
u16 cpu_pci_msb_addr;
@@ -2892,7 +3276,6 @@ struct hl_device {
u8 disabled;
u8 late_init_done;
u8 hwmon_initialized;
- u8 heartbeat;
u8 reset_on_lockup;
u8 dram_default_page_mapping;
u8 memory_scrub;
@@ -2916,24 +3299,18 @@ struct hl_device {
u8 is_compute_ctx_active;
u8 compute_ctx_in_release;
u8 supports_mmu_prefetch;
+ u8 reset_upon_device_release;
/* Parameters for bring-up */
u64 nic_ports_mask;
u64 fw_components;
u8 mmu_enable;
- u8 mmu_huge_page_opt;
- u8 reset_pcilink;
u8 cpu_queues_enable;
u8 pldm;
- u8 axi_drain;
- u8 sram_scrambler_enable;
- u8 dram_scrambler_enable;
u8 hard_reset_on_fw_events;
u8 bmc_enable;
- u8 rl_enable;
u8 reset_on_preboot_fail;
- u8 reset_upon_device_release;
- u8 reset_if_device_not_idle;
+ u8 heartbeat;
};
@@ -3049,13 +3426,22 @@ static inline bool hl_mem_area_crosses_range(u64 address, u32 size,
}
uint64_t hl_set_dram_bar_default(struct hl_device *hdev, u64 addr);
+void *hl_asic_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag);
+void hl_asic_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle);
+void *hl_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle);
+void hl_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr);
+void *hl_asic_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags,
+ dma_addr_t *dma_handle);
+void hl_asic_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr);
int hl_dma_map_sgtable(struct hl_device *hdev, struct sg_table *sgt, enum dma_data_direction dir);
void hl_dma_unmap_sgtable(struct hl_device *hdev, struct sg_table *sgt,
enum dma_data_direction dir);
int hl_access_cfg_region(struct hl_device *hdev, u64 addr, u64 *val,
enum debugfs_access_type acc_type);
-int hl_access_dev_mem(struct hl_device *hdev, struct pci_mem_region *region,
- enum pci_region region_type, u64 addr, u64 *val, enum debugfs_access_type acc_type);
+int hl_access_dev_mem(struct hl_device *hdev, enum pci_region region_type,
+ u64 addr, u64 *val, enum debugfs_access_type acc_type);
int hl_device_open(struct inode *inode, struct file *filp);
int hl_device_open_ctrl(struct inode *inode, struct file *filp);
bool hl_device_operational(struct hl_device *hdev,
@@ -3085,7 +3471,8 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q);
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q);
irqreturn_t hl_irq_handler_cq(int irq, void *arg);
irqreturn_t hl_irq_handler_eq(int irq, void *arg);
-irqreturn_t hl_irq_handler_user_cq(int irq, void *arg);
+irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg);
+irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg);
irqreturn_t hl_irq_handler_default(int irq, void *arg);
u32 hl_cq_inc_ptr(u32 ptr);
@@ -3119,7 +3506,7 @@ int hl_device_utilization(struct hl_device *hdev, u32 *utilization);
int hl_build_hwmon_channel_info(struct hl_device *hdev,
struct cpucp_sensor *sensors_arr);
-void hl_notifier_event_send_all(struct hl_device *hdev, u64 event);
+void hl_notifier_event_send_all(struct hl_device *hdev, u64 event_mask);
int hl_sysfs_init(struct hl_device *hdev);
void hl_sysfs_fini(struct hl_device *hdev);
@@ -3158,6 +3545,7 @@ void hl_multi_cs_completion_init(struct hl_device *hdev);
void goya_set_asic_funcs(struct hl_device *hdev);
void gaudi_set_asic_funcs(struct hl_device *hdev);
+void gaudi2_set_asic_funcs(struct hl_device *hdev);
int hl_vm_ctx_init(struct hl_ctx *ctx);
void hl_vm_ctx_fini(struct hl_ctx *ctx);
@@ -3201,10 +3589,39 @@ int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va,
u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte);
u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop,
u8 hop_idx, u64 hop_addr, u64 virt_addr);
+void hl_mmu_hr_flush(struct hl_ctx *ctx);
+int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
+ u64 pgt_size);
+void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size);
+void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size);
+u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, u64 phys_pte_addr,
+ u32 hop_table_size);
+void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u64 val, u32 hop_table_size);
+void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u32 hop_table_size);
+int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size);
+void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr);
+struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx,
+ struct hl_hr_mmu_funcs *hr_func,
+ u64 curr_pte);
+struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop);
+struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
+ struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop,
+ u64 curr_pte, bool *is_new_hop);
+int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
+ struct hl_hr_mmu_funcs *hr_func);
void hl_mmu_swap_out(struct hl_ctx *ctx);
void hl_mmu_swap_in(struct hl_ctx *ctx);
int hl_mmu_if_set_funcs(struct hl_device *hdev);
void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
+void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu);
int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr);
int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
struct hl_mmu_hop_info *hops);
@@ -3214,7 +3631,7 @@ bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr);
int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name,
void __iomem *dst, u32 src_offset, u32 size);
-int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode);
+int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode, u64 value);
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
u16 len, u32 timeout, u64 *result);
int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type);
@@ -3248,10 +3665,7 @@ int hl_fw_cpucp_power_get(struct hl_device *hdev, u64 *power);
void hl_fw_ask_hard_reset_without_linux(struct hl_device *hdev);
void hl_fw_ask_halt_machine_without_linux(struct hl_device *hdev);
int hl_fw_init_cpu(struct hl_device *hdev);
-int hl_fw_read_preboot_status(struct hl_device *hdev, u32 cpu_boot_status_reg,
- u32 sts_boot_dev_sts0_reg,
- u32 sts_boot_dev_sts1_reg, u32 boot_err0_reg,
- u32 boot_err1_reg, u32 timeout);
+int hl_fw_read_preboot_status(struct hl_device *hdev);
int hl_fw_dynamic_send_protocol_cmd(struct hl_device *hdev,
struct fw_load_mgr *fw_loader,
enum comms_cmd cmd, unsigned int size,
@@ -3298,6 +3712,11 @@ void hl_encaps_handle_do_release(struct kref *ref);
void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs *cs, struct hl_cs_job *job,
struct hl_cs_compl *cs_cmpl);
+
+int hl_dec_init(struct hl_device *hdev);
+void hl_dec_fini(struct hl_device *hdev);
+void hl_dec_ctx_fini(struct hl_ctx *ctx);
+
void hl_release_pending_user_interrupts(struct hl_device *hdev);
int hl_cs_signal_sob_wraparound_handler(struct hl_device *hdev, u32 q_idx,
struct hl_hw_sob **hw_sob, u32 count, bool encaps_sig);
@@ -3426,6 +3845,55 @@ static inline void hl_debugfs_set_state_dump(struct hl_device *hdev,
#endif
+/* Security */
+int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
+ const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
+ int array_size);
+int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
+ int mm_array_size, int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], int blocks_array_size);
+void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], u32 block_offset,
+ int array_size);
+void hl_secure_block(struct hl_device *hdev,
+ struct hl_block_glbl_sec sgs_array[], int array_size);
+int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size, u64 mask);
+int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size);
+int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size,
+ u64 mask);
+int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array,
+ u32 regs_range_array_size);
+int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size);
+int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array,
+ u32 regs_range_array_size);
+void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size);
+void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size, u64 mask);
+void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size);
+
/* IOCTLs */
long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index 37edb69a7255..f733ead605e7 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -54,10 +54,15 @@ MODULE_PARM_DESC(boot_error_status_mask,
#define PCI_IDS_GAUDI 0x1000
#define PCI_IDS_GAUDI_SEC 0x1010
+#define PCI_IDS_GAUDI2 0x1020
+#define PCI_IDS_GAUDI2_SEC 0x1030
+
static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GOYA), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI), },
{ PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI_SEC), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2), },
+ { PCI_DEVICE(PCI_VENDOR_ID_HABANALABS, PCI_IDS_GAUDI2_SEC), },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, ids);
@@ -84,6 +89,12 @@ static enum hl_asic_type get_asic_type(u16 device)
case PCI_IDS_GAUDI_SEC:
asic_type = ASIC_GAUDI_SEC;
break;
+ case PCI_IDS_GAUDI2:
+ asic_type = ASIC_GAUDI2;
+ break;
+ case PCI_IDS_GAUDI2_SEC:
+ asic_type = ASIC_GAUDI2_SEC;
+ break;
default:
asic_type = ASIC_INVALID;
break;
@@ -96,6 +107,7 @@ static bool is_asic_secured(enum hl_asic_type asic_type)
{
switch (asic_type) {
case ASIC_GAUDI_SEC:
+ case ASIC_GAUDI2_SEC:
return true;
default:
return false;
@@ -137,6 +149,7 @@ int hl_device_open(struct inode *inode, struct file *filp)
mutex_init(&hpriv->notifier_event.lock);
mutex_init(&hpriv->restore_phase_mutex);
+ mutex_init(&hpriv->ctx_lock);
kref_init(&hpriv->refcount);
nonseekable_open(inode, filp);
@@ -152,7 +165,8 @@ int hl_device_open(struct inode *inode, struct file *filp)
"Can't open %s because it is %s\n",
dev_name(hdev->dev), hdev->status[status]);
- if (status == HL_DEVICE_STATUS_IN_RESET)
+ if (status == HL_DEVICE_STATUS_IN_RESET ||
+ status == HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE)
rc = -EAGAIN;
else
rc = -EPERM;
@@ -195,8 +209,9 @@ int hl_device_open(struct inode *inode, struct file *filp)
hl_debugfs_add_file(hpriv);
- atomic_set(&hdev->last_error.cs_timeout.write_disable, 0);
- atomic_set(&hdev->last_error.razwi.write_disable, 0);
+ atomic_set(&hdev->last_error.cs_timeout.write_enable, 1);
+ atomic_set(&hdev->last_error.razwi.write_enable, 1);
+ hdev->last_error.undef_opcode.write_enable = true;
hdev->open_counter++;
hdev->last_successful_open_jif = jiffies;
@@ -209,6 +224,7 @@ out_err:
hl_mem_mgr_fini(&hpriv->mem_mgr);
hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr);
filp->private_data = NULL;
+ mutex_destroy(&hpriv->ctx_lock);
mutex_destroy(&hpriv->restore_phase_mutex);
mutex_destroy(&hpriv->notifier_event.lock);
put_pid(hpriv->taskpid);
@@ -277,43 +293,56 @@ out_err:
static void set_driver_behavior_per_device(struct hl_device *hdev)
{
- hdev->pldm = 0;
+ hdev->nic_ports_mask = 0;
hdev->fw_components = FW_TYPE_ALL_TYPES;
+ hdev->mmu_enable = MMU_EN_ALL;
hdev->cpu_queues_enable = 1;
- hdev->heartbeat = 1;
- hdev->mmu_enable = 1;
- hdev->sram_scrambler_enable = 1;
- hdev->dram_scrambler_enable = 1;
- hdev->bmc_enable = 1;
+ hdev->pldm = 0;
hdev->hard_reset_on_fw_events = 1;
+ hdev->bmc_enable = 1;
hdev->reset_on_preboot_fail = 1;
- hdev->reset_if_device_not_idle = 1;
-
- hdev->reset_pcilink = 0;
- hdev->axi_drain = 0;
+ hdev->heartbeat = 1;
}
static void copy_kernel_module_params_to_device(struct hl_device *hdev)
{
+ hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
+
hdev->major = hl_major;
hdev->memory_scrub = memory_scrub;
hdev->reset_on_lockup = reset_on_lockup;
hdev->boot_error_status_mask = boot_error_status_mask;
+}
- if (timeout_locked)
- hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
- else
- hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+static void fixup_device_params_per_asic(struct hl_device *hdev)
+{
+ switch (hdev->asic_type) {
+ case ASIC_GOYA:
+ case ASIC_GAUDI:
+ case ASIC_GAUDI_SEC:
+ hdev->reset_upon_device_release = 0;
+ break;
+ default:
+ hdev->reset_upon_device_release = 1;
+ break;
+ }
}
static int fixup_device_params(struct hl_device *hdev)
{
- hdev->asic_prop.fw_security_enabled = is_asic_secured(hdev->asic_type);
+ int tmp_timeout;
+
+ tmp_timeout = timeout_locked;
hdev->fw_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
hdev->fw_comms_poll_interval_usec = HL_FW_STATUS_POLL_INTERVAL_USEC;
+ if (tmp_timeout)
+ hdev->timeout_jiffies = msecs_to_jiffies(tmp_timeout * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
hdev->stop_on_err = true;
hdev->reset_info.curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
hdev->reset_info.prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
@@ -321,6 +350,18 @@ static int fixup_device_params(struct hl_device *hdev)
/* Enable only after the initialization of the device */
hdev->disabled = true;
+ if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU) &&
+ (hdev->fw_components & ~FW_TYPE_PREBOOT_CPU)) {
+ pr_err("Preboot must be set along with other components");
+ return -EINVAL;
+ }
+
+ /* If CPU queues not enabled, no way to do heartbeat */
+ if (!hdev->cpu_queues_enable)
+ hdev->heartbeat = 0;
+
+ fixup_device_params_per_asic(hdev);
+
return 0;
}
@@ -345,7 +386,7 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
if (!hdev)
return -ENOMEM;
- /* can be NULL in case of simulator device */
+ /* Will be NULL in case of simulator device */
hdev->pdev = pdev;
/* Assign status description string */
@@ -355,6 +396,9 @@ static int create_hdev(struct hl_device **dev, struct pci_dev *pdev)
strncpy(hdev->status[HL_DEVICE_STATUS_NEEDS_RESET], "needs reset", HL_STR_MAX);
strncpy(hdev->status[HL_DEVICE_STATUS_IN_DEVICE_CREATION],
"in device creation", HL_STR_MAX);
+ strncpy(hdev->status[HL_DEVICE_STATUS_IN_RESET_AFTER_DEVICE_RELEASE],
+ "in reset after device release", HL_STR_MAX);
+
/* First, we must find out which ASIC are we handling. This is needed
* to configure the behavior of the driver (kernel parameters)
diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
index c7864d6bb0a1..6a30bd98ab5e 100644
--- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c
+++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c
@@ -47,7 +47,7 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
u32 size = args->return_size;
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u64 sram_kmd_size, dram_kmd_size;
+ u64 sram_kmd_size, dram_kmd_size, dram_available_size;
if ((!size) || (!out))
return -EINVAL;
@@ -62,19 +62,22 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.dram_base_address =
hdev->mmu_enable && prop->dram_supports_virtual_memory ?
prop->dmmu.start_addr : prop->dram_user_base_address;
- hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
+ hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
+ hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
+
hw_ip.sram_size = prop->sram_size - sram_kmd_size;
- if (hdev->mmu_enable)
- hw_ip.dram_size =
- DIV_ROUND_DOWN_ULL(prop->dram_size - dram_kmd_size,
- prop->dram_page_size) *
- prop->dram_page_size;
+ dram_available_size = prop->dram_size - dram_kmd_size;
+
+ if (hdev->mmu_enable == MMU_EN_ALL)
+ hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size,
+ prop->dram_page_size) * prop->dram_page_size;
else
- hw_ip.dram_size = prop->dram_size - dram_kmd_size;
+ hw_ip.dram_size = dram_available_size;
if (hw_ip.dram_size > PAGE_SIZE)
hw_ip.dram_enabled = 1;
+
hw_ip.dram_page_size = prop->dram_page_size;
hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
hw_ip.num_of_events = prop->num_of_events;
@@ -93,8 +96,12 @@ static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
- hw_ip.first_available_interrupt_id = prop->first_available_user_msix_interrupt;
+ hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask;
+ hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
+ hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
+
+ hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
hw_ip.server_type = prop->server_type;
return copy_to_user(out, &hw_ip,
@@ -287,7 +294,7 @@ static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
return -EINVAL;
reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
- reset_count.soft_reset_cnt = hdev->reset_info.soft_reset_cnt;
+ reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
return copy_to_user(out, &reset_count,
min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
@@ -610,6 +617,28 @@ static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
+static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
+{
+ struct hl_device *hdev = hpriv->hdev;
+ u32 max_size = args->return_size;
+ struct hl_info_undefined_opcode_event info = {0};
+ void __user *out = (void __user *) (uintptr_t) args->return_pointer;
+
+ if ((!max_size) || (!out))
+ return -EINVAL;
+
+ info.timestamp = ktime_to_ns(hdev->last_error.undef_opcode.timestamp);
+ info.engine_id = hdev->last_error.undef_opcode.engine_id;
+ info.cq_addr = hdev->last_error.undef_opcode.cq_addr;
+ info.cq_size = hdev->last_error.undef_opcode.cq_size;
+ info.stream_id = hdev->last_error.undef_opcode.stream_id;
+ info.cb_addr_streams_len = hdev->last_error.undef_opcode.cb_addr_streams_len;
+ memcpy(info.cb_addr_streams, hdev->last_error.undef_opcode.cb_addr_streams,
+ sizeof(info.cb_addr_streams));
+
+ return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
+}
+
static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
{
void __user *out = (void __user *) (uintptr_t) args->return_pointer;
@@ -626,7 +655,7 @@ static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_
* For this reason for all ASICs that not support multiple page size the function will
* return an empty bitmask indicating that multiple page sizes is not supported.
*/
- hdev->asic_funcs->get_valid_dram_page_orders(&info);
+ info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
}
@@ -718,6 +747,9 @@ static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
case HL_INFO_RAZWI_EVENT:
return razwi_info(hpriv, args);
+ case HL_INFO_UNDEFINED_OPCODE_EVENT:
+ return undefined_opcode_info(hpriv, args);
+
case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
return dev_mem_alloc_page_sizes_info(hpriv, args);
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 6103e479e855..3f15ab9d827f 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -308,6 +308,7 @@ static void ext_queue_schedule_job(struct hl_cs_job *job)
cq_addr = cq->bus_address + cq->pi * sizeof(struct hl_cq_entry);
hdev->asic_funcs->add_end_of_cb_packets(hdev, cb->kernel_address, len,
+ job->user_cb_size,
cq_addr,
le32_to_cpu(cq_pkt.data),
q->msi_vec,
@@ -695,6 +696,16 @@ int hl_hw_queue_schedule_cs(struct hl_cs *cs)
goto unroll_cq_resv;
}
+ rc = hdev->asic_funcs->pre_schedule_cs(cs);
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed in pre-submission operations of CS %d.%llu\n",
+ ctx->asid, cs->sequence);
+ goto unroll_cq_resv;
+ }
+
+ hdev->shadow_cs_queue[cs->sequence &
+ (hdev->asic_prop.max_pending_cs - 1)] = cs;
if (cs->encaps_signals && cs->staged_first) {
rc = encaps_sig_first_staged_cs_handler(hdev, cs);
@@ -806,13 +817,9 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
int rc;
if (is_cpu_queue)
- p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- &q->bus_address);
+ p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address);
else
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- &q->bus_address,
+ p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -838,14 +845,10 @@ static int ext_and_cpu_queue_init(struct hl_device *hdev, struct hl_hw_queue *q,
free_queue:
if (is_cpu_queue)
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address);
+ hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
else
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address,
- q->bus_address);
+ hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
+ q->bus_address);
return rc;
}
@@ -884,10 +887,8 @@ static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q)
{
void *p;
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- &q->bus_address,
- GFP_KERNEL | __GFP_ZERO);
+ p = hl_asic_dma_alloc_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -1060,14 +1061,10 @@ static void queue_fini(struct hl_device *hdev, struct hl_hw_queue *q)
kfree(q->shadow_queue);
if (q->queue_type == QUEUE_TYPE_CPU)
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address);
+ hl_cpu_accessible_dma_pool_free(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address);
else
- hdev->asic_funcs->asic_dma_free_coherent(hdev,
- HL_QUEUE_SIZE_IN_BYTES,
- q->kernel_address,
- q->bus_address);
+ hl_asic_dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, q->kernel_address,
+ q->bus_address);
}
int hl_hw_queues_create(struct hl_device *hdev)
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 8500e15ef743..94d537fd4fde 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -67,6 +67,56 @@ static void irq_handle_eqe(struct work_struct *work)
}
/**
+ * job_finish - queue job finish work
+ *
+ * @hdev: pointer to device structure
+ * @cs_seq: command submission sequence
+ * @cq: completion queue
+ *
+ */
+static void job_finish(struct hl_device *hdev, u32 cs_seq, struct hl_cq *cq)
+{
+ struct hl_hw_queue *queue;
+ struct hl_cs_job *job;
+
+ queue = &hdev->kernel_queues[cq->hw_queue_id];
+ job = queue->shadow_queue[hl_pi_2_offset(cs_seq)];
+ queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
+
+ atomic_inc(&queue->ci);
+}
+
+/**
+ * cs_finish - queue all cs jobs finish work
+ *
+ * @hdev: pointer to device structure
+ * @cs_seq: command submission sequence
+ *
+ */
+static void cs_finish(struct hl_device *hdev, u16 cs_seq)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_hw_queue *queue;
+ struct hl_cs *cs;
+ struct hl_cs_job *job;
+
+ cs = hdev->shadow_cs_queue[cs_seq & (prop->max_pending_cs - 1)];
+ if (!cs) {
+ dev_warn(hdev->dev,
+ "No pointer to CS in shadow array at index %d\n",
+ cs_seq);
+ return;
+ }
+
+ list_for_each_entry(job, &cs->job_list, cs_node) {
+ queue = &hdev->kernel_queues[job->hw_queue_id];
+ atomic_inc(&queue->ci);
+ }
+
+ queue_work(hdev->cs_cmplt_wq, &cs->finish_work);
+}
+
+/**
* hl_irq_handler_cq - irq handler for completion queue
*
* @irq: irq number
@@ -77,9 +127,7 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
{
struct hl_cq *cq = arg;
struct hl_device *hdev = cq->hdev;
- struct hl_hw_queue *queue;
- struct hl_cs_job *job;
- bool shadow_index_valid;
+ bool shadow_index_valid, entry_ready;
u16 shadow_index;
struct hl_cq_entry *cq_entry, *cq_base;
@@ -93,37 +141,41 @@ irqreturn_t hl_irq_handler_cq(int irq, void *arg)
cq_base = cq->kernel_address;
while (1) {
- bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
- CQ_ENTRY_READY_MASK)
- >> CQ_ENTRY_READY_SHIFT);
+ cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
+ entry_ready = !!FIELD_GET(CQ_ENTRY_READY_MASK,
+ le32_to_cpu(cq_entry->data));
if (!entry_ready)
break;
- cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
-
/* Make sure we read CQ entry contents after we've
* checked the ownership bit.
*/
dma_rmb();
- shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
- CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
- >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
-
- shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
- CQ_ENTRY_SHADOW_INDEX_MASK)
- >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
+ shadow_index_valid =
+ !!FIELD_GET(CQ_ENTRY_SHADOW_INDEX_VALID_MASK,
+ le32_to_cpu(cq_entry->data));
- queue = &hdev->kernel_queues[cq->hw_queue_id];
+ shadow_index = FIELD_GET(CQ_ENTRY_SHADOW_INDEX_MASK,
+ le32_to_cpu(cq_entry->data));
- if ((shadow_index_valid) && (!hdev->disabled)) {
- job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
- queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
+ /*
+ * CQ interrupt handler has 2 modes of operation:
+ * 1. Interrupt per CS completion: (Single CQ for all queues)
+ * CQ entry represents a completed CS
+ *
+ * 2. Interrupt per CS job completion in queue: (CQ per queue)
+ * CQ entry represents a completed job in a certain queue
+ */
+ if (shadow_index_valid && !hdev->disabled) {
+ if (hdev->asic_prop.completion_mode ==
+ HL_COMPLETION_MODE_CS)
+ cs_finish(hdev, shadow_index);
+ else
+ job_finish(hdev, shadow_index, cq);
}
- atomic_inc(&queue->ci);
-
/* Clear CQ entry ready bit */
cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
~CQ_ENTRY_READY_MASK);
@@ -217,8 +269,7 @@ static int handle_registration_node(struct hl_device *hdev, struct hl_user_pendi
return 0;
}
-static void handle_user_cq(struct hl_device *hdev,
- struct hl_user_interrupt *user_cq)
+static void handle_user_interrupt(struct hl_device *hdev, struct hl_user_interrupt *intr)
{
struct hl_user_pending_interrupt *pend, *temp_pend;
struct list_head *ts_reg_free_list_head = NULL;
@@ -240,8 +291,8 @@ static void handle_user_cq(struct hl_device *hdev,
if (!job)
return;
- spin_lock(&user_cq->wait_list_lock);
- list_for_each_entry_safe(pend, temp_pend, &user_cq->wait_list_head, wait_list_node) {
+ spin_lock(&intr->wait_list_lock);
+ list_for_each_entry_safe(pend, temp_pend, &intr->wait_list_head, wait_list_node) {
if ((pend->cq_kernel_addr && *(pend->cq_kernel_addr) >= pend->cq_target_value) ||
!pend->cq_kernel_addr) {
if (pend->ts_reg_info.buf) {
@@ -258,7 +309,7 @@ static void handle_user_cq(struct hl_device *hdev,
}
}
}
- spin_unlock(&user_cq->wait_list_lock);
+ spin_unlock(&intr->wait_list_lock);
if (ts_reg_free_list_head) {
INIT_WORK(&job->free_obj, hl_ts_free_objects);
@@ -271,22 +322,24 @@ static void handle_user_cq(struct hl_device *hdev,
}
/**
- * hl_irq_handler_user_cq - irq handler for user completion queues
+ * hl_irq_handler_user_interrupt - irq handler for user interrupts
*
* @irq: irq number
* @arg: pointer to user interrupt structure
*
*/
-irqreturn_t hl_irq_handler_user_cq(int irq, void *arg)
+irqreturn_t hl_irq_handler_user_interrupt(int irq, void *arg)
{
- struct hl_user_interrupt *user_cq = arg;
- struct hl_device *hdev = user_cq->hdev;
+ struct hl_user_interrupt *user_int = arg;
+ struct hl_device *hdev = user_int->hdev;
- /* Handle user cq interrupts registered on all interrupts */
- handle_user_cq(hdev, &hdev->common_user_interrupt);
+ if (user_int->is_decoder)
+ handle_user_interrupt(hdev, &hdev->common_decoder_interrupt);
+ else
+ handle_user_interrupt(hdev, &hdev->common_user_cq_interrupt);
- /* Handle user cq interrupts registered on this specific interrupt */
- handle_user_cq(hdev, user_cq);
+ /* Handle user cq or decoder interrupts registered on this specific irq */
+ handle_user_interrupt(hdev, user_int);
return IRQ_HANDLED;
}
@@ -304,9 +357,7 @@ irqreturn_t hl_irq_handler_default(int irq, void *arg)
struct hl_device *hdev = user_interrupt->hdev;
u32 interrupt_id = user_interrupt->interrupt_id;
- dev_err(hdev->dev,
- "got invalid user interrupt %u",
- interrupt_id);
+ dev_err(hdev->dev, "got invalid user interrupt %u", interrupt_id);
return IRQ_HANDLED;
}
@@ -360,7 +411,7 @@ irqreturn_t hl_irq_handler_eq(int irq, void *arg)
*/
dma_rmb();
- if (hdev->disabled && !hdev->reset_info.is_in_soft_reset) {
+ if (hdev->disabled && !hdev->reset_info.in_compute_reset) {
dev_warn(hdev->dev, "Device disabled but received an EQ event\n");
goto skip_irq;
}
@@ -390,11 +441,26 @@ skip_irq:
}
/**
+ * hl_irq_handler_dec_abnrm - Decoder error interrupt handler
+ * @irq: IRQ number
+ * @arg: pointer to decoder structure.
+ */
+irqreturn_t hl_irq_handler_dec_abnrm(int irq, void *arg)
+{
+ struct hl_dec *dec = arg;
+
+ schedule_work(&dec->completion_abnrm_work);
+
+ return IRQ_HANDLED;
+}
+
+/**
* hl_cq_init - main initialization function for an cq object
*
* @hdev: pointer to device structure
* @q: pointer to cq structure
* @hw_queue_id: The H/W queue ID this completion queue belongs to
+ * HL_INVALID_QUEUE if cq is not attached to any specific queue
*
* Allocate dma-able memory for the completion queue and initialize fields
* Returns 0 on success
@@ -403,8 +469,8 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
{
void *p;
- p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
- &q->bus_address, GFP_KERNEL | __GFP_ZERO);
+ p = hl_asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, &q->bus_address,
+ GFP_KERNEL | __GFP_ZERO);
if (!p)
return -ENOMEM;
@@ -429,9 +495,7 @@ int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
*/
void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
{
- hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
- q->kernel_address,
- q->bus_address);
+ hl_asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, q->kernel_address, q->bus_address);
}
void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
@@ -464,9 +528,7 @@ int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
{
void *p;
- p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
- HL_EQ_SIZE_IN_BYTES,
- &q->bus_address);
+ p = hl_cpu_accessible_dma_pool_alloc(hdev, HL_EQ_SIZE_IN_BYTES, &q->bus_address);
if (!p)
return -ENOMEM;
@@ -490,9 +552,7 @@ void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
{
flush_workqueue(hdev->eq_wq);
- hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
- HL_EQ_SIZE_IN_BYTES,
- q->kernel_address);
+ hl_cpu_accessible_dma_pool_free(hdev, HL_EQ_SIZE_IN_BYTES, q->kernel_address);
}
void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 663dd7e589d4..61bc1bfe984a 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2021 HabanaLabs, Ltd.
+ * Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -27,7 +27,7 @@ static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
{
struct asic_fixed_properties *prop = &hdev->asic_prop;
- u32 psize;
+ u64 psize;
/*
* for ASIC that supports setting the allocation page size by user we will address
@@ -36,8 +36,8 @@ static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u
if (prop->supports_user_set_page_size && args->alloc.page_size) {
psize = args->alloc.page_size;
- if (!hdev->asic_funcs->is_valid_dram_page_size(psize)) {
- dev_err(hdev->dev, "user page size (%#x) is not valid\n", psize);
+ if (!is_power_of_2(psize)) {
+ dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
return -EINVAL;
}
} else {
@@ -305,33 +305,20 @@ static void dram_pg_pool_do_release(struct kref *ref)
*
* This function does the following:
* - For DRAM memory only
- * - iterate over the pack, scrub and free each physical block structure by
+ * - iterate over the pack, free each physical block structure by
* returning it to the general pool.
- * In case of error during scrubbing, initiate hard reset.
- * Once hard reset is triggered, scrubbing is bypassed while freeing the
- * memory continues.
* - Free the hl_vm_phys_pg_pack structure.
*/
-static int free_phys_pg_pack(struct hl_device *hdev,
+static void free_phys_pg_pack(struct hl_device *hdev,
struct hl_vm_phys_pg_pack *phys_pg_pack)
{
struct hl_vm *vm = &hdev->vm;
u64 i;
- int rc = 0;
if (phys_pg_pack->created_from_userptr)
goto end;
if (phys_pg_pack->contiguous) {
- if (hdev->memory_scrub && !hdev->disabled) {
- rc = hdev->asic_funcs->scrub_device_mem(hdev,
- phys_pg_pack->pages[0],
- phys_pg_pack->total_size);
- if (rc)
- dev_err(hdev->dev,
- "Failed to scrub contiguous device memory\n");
- }
-
gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
phys_pg_pack->total_size);
@@ -340,15 +327,6 @@ static int free_phys_pg_pack(struct hl_device *hdev,
dram_pg_pool_do_release);
} else {
for (i = 0 ; i < phys_pg_pack->npages ; i++) {
- if (hdev->memory_scrub && !hdev->disabled && rc == 0) {
- rc = hdev->asic_funcs->scrub_device_mem(
- hdev,
- phys_pg_pack->pages[i],
- phys_pg_pack->page_size);
- if (rc)
- dev_err(hdev->dev,
- "Failed to scrub device memory\n");
- }
gen_pool_free(vm->dram_pg_pool,
phys_pg_pack->pages[i],
phys_pg_pack->page_size);
@@ -357,14 +335,11 @@ static int free_phys_pg_pack(struct hl_device *hdev,
}
}
- if (rc && !hdev->disabled)
- hl_device_reset(hdev, HL_DRV_RESET_HARD);
-
end:
kvfree(phys_pg_pack->pages);
kfree(phys_pg_pack);
- return rc;
+ return;
}
/**
@@ -384,40 +359,35 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
spin_lock(&vm->idr_lock);
phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
- if (phys_pg_pack) {
- if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
- dev_err(hdev->dev, "handle %u is mapped, cannot free\n",
- handle);
- spin_unlock(&vm->idr_lock);
- return -EINVAL;
- }
-
- if (phys_pg_pack->exporting_cnt) {
- dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
- spin_unlock(&vm->idr_lock);
- return -EINVAL;
- }
-
- /*
- * must remove from idr before the freeing of the physical
- * pages as the refcount of the pool is also the trigger of the
- * idr destroy
- */
- idr_remove(&vm->phys_pg_pack_handles, handle);
+ if (!phys_pg_pack) {
spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
+ return -EINVAL;
+ }
- atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
- atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
+ if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
+ spin_unlock(&vm->idr_lock);
+ dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
+ return -EINVAL;
+ }
- return free_phys_pg_pack(hdev, phys_pg_pack);
- } else {
+ if (phys_pg_pack->exporting_cnt) {
spin_unlock(&vm->idr_lock);
- dev_err(hdev->dev,
- "free device memory failed, no match for handle %u\n",
- handle);
+ dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
return -EINVAL;
}
+ /* must remove from idr before the freeing of the physical pages as the refcount of the pool
+ * is also the trigger of the idr destroy
+ */
+ idr_remove(&vm->phys_pg_pack_handles, handle);
+ spin_unlock(&vm->idr_lock);
+
+ atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
+ atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
+
+ free_phys_pg_pack(hdev, phys_pg_pack);
+
return 0;
}
@@ -657,7 +627,7 @@ static u64 get_va_block(struct hl_device *hdev,
/* Check if we need to ignore hint address */
if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
- (!is_align_pow_2 && is_hint_dram_addr &&
+ (!is_align_pow_2 && is_hint_dram_addr &&
do_div(tmp_hint_addr, va_range->page_size))) {
if (force_hint) {
@@ -1245,16 +1215,16 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
if (rc) {
dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
+ mutex_unlock(&ctx->mmu_lock);
goto map_err;
}
rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
ctx->asid, ret_vaddr, phys_pg_pack->total_size);
+ mutex_unlock(&ctx->mmu_lock);
if (rc)
goto map_err;
- mutex_unlock(&ctx->mmu_lock);
-
/*
* prefetch is done upon user's request. it is performed in WQ as and so can
* be outside the MMU lock. the operation itself is already protected by the mmu lock
@@ -1278,13 +1248,11 @@ static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device
*device_addr = ret_vaddr;
if (is_userptr)
- rc = free_phys_pg_pack(hdev, phys_pg_pack);
+ free_phys_pg_pack(hdev, phys_pg_pack);
return rc;
map_err:
- mutex_unlock(&ctx->mmu_lock);
-
if (add_va_block(hdev, va_range, ret_vaddr,
ret_vaddr + phys_pg_pack->total_size - 1))
dev_warn(hdev->dev,
@@ -2509,17 +2477,20 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
* va_range_init() - initialize virtual addresses range.
* @hdev: pointer to the habanalabs device structure.
* @va_ranges: pointer to va_ranges array.
- * @start: range start address.
- * @end: range end address.
+ * @range_type: virtual address range type.
+ * @start: range start address, inclusive.
+ * @end: range end address, inclusive.
* @page_size: page size for this va_range.
*
* This function does the following:
* - Initializes the virtual addresses list of the given range with the given
* addresses.
*/
-static int va_range_init(struct hl_device *hdev, struct hl_va_range *va_range,
- u64 start, u64 end, u32 page_size)
+static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
+ enum hl_va_range_type range_type, u64 start,
+ u64 end, u32 page_size)
{
+ struct hl_va_range *va_range = va_ranges[range_type];
int rc;
INIT_LIST_HEAD(&va_range->list);
@@ -2637,7 +2608,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
- rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST],
+ rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
host_range_start, host_range_end, host_page_size);
if (rc) {
dev_err(hdev->dev, "failed to init host vm range\n");
@@ -2648,7 +2619,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
rc = va_range_init(hdev,
- ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE],
+ ctx->va_range, HL_VA_RANGE_TYPE_HOST_HUGE,
host_huge_range_start, host_huge_range_end,
host_huge_page_size);
if (rc) {
@@ -2664,7 +2635,7 @@ static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
- rc = va_range_init(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM],
+ rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
dram_range_start, dram_range_end, dram_page_size);
if (rc) {
dev_err(hdev->dev, "failed to init dram vm range\n");
diff --git a/drivers/misc/habanalabs/common/memory_mgr.c b/drivers/misc/habanalabs/common/memory_mgr.c
index ea5f2bd31b0a..56df962d2f3c 100644
--- a/drivers/misc/habanalabs/common/memory_mgr.c
+++ b/drivers/misc/habanalabs/common/memory_mgr.c
@@ -135,7 +135,7 @@ int hl_mmap_mem_buf_put_handle(struct hl_mem_mgr *mmg, u64 handle)
}
/**
- * @hl_mmap_mem_buf_alloc - allocate a new mappable buffer
+ * hl_mmap_mem_buf_alloc - allocate a new mappable buffer
*
* @mmg: parent unifed memory manager
* @behavior: behavior object describing this buffer polymorphic behavior
diff --git a/drivers/misc/habanalabs/common/mmu/Makefile b/drivers/misc/habanalabs/common/mmu/Makefile
index d852c3874658..1806c524e04a 100644
--- a/drivers/misc/habanalabs/common/mmu/Makefile
+++ b/drivers/misc/habanalabs/common/mmu/Makefile
@@ -1,2 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o
+HL_COMMON_MMU_FILES := common/mmu/mmu.o common/mmu/mmu_v1.o \
+ common/mmu/mmu_v2_hr.o
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index f3734718d94f..60740de47b34 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2020 HabanaLabs, Ltd.
+ * Copyright 2016-2022 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -51,8 +51,17 @@ int hl_mmu_init(struct hl_device *hdev)
return rc;
}
- if (hdev->mmu_func[MMU_HR_PGT].init != NULL)
+ if (hdev->mmu_func[MMU_HR_PGT].init != NULL) {
rc = hdev->mmu_func[MMU_HR_PGT].init(hdev);
+ if (rc)
+ goto fini_dr_mmu;
+ }
+
+ return 0;
+
+fini_dr_mmu:
+ if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
+ hdev->mmu_func[MMU_DR_PGT].fini(hdev);
return rc;
}
@@ -103,8 +112,17 @@ int hl_mmu_ctx_init(struct hl_ctx *ctx)
return rc;
}
- if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL)
+ if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) {
rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx);
+ if (rc)
+ goto fini_dr_ctx;
+ }
+
+ return 0;
+
+fini_dr_ctx:
+ if (hdev->mmu_func[MMU_DR_PGT].fini != NULL)
+ hdev->mmu_func[MMU_DR_PGT].fini(hdev);
return rc;
}
@@ -607,6 +625,11 @@ int hl_mmu_if_set_funcs(struct hl_device *hdev)
case ASIC_GAUDI_SEC:
hl_mmu_v1_set_funcs(hdev, &hdev->mmu_func[MMU_DR_PGT]);
break;
+ case ASIC_GAUDI2:
+ case ASIC_GAUDI2_SEC:
+ /* MMUs in Gaudi2 are always host resident */
+ hl_mmu_v2_hr_set_funcs(hdev, &hdev->mmu_func[MMU_HR_PGT]);
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -745,3 +768,470 @@ u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *m
return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift);
}
+static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk,
+ void *data)
+{
+ struct hl_device *hdev = (struct hl_device *)data;
+
+ hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1,
+ (void *)chunk->start_addr, chunk->phys_addr);
+}
+
+void hl_mmu_hr_flush(struct hl_ctx *ctx)
+{
+ /* a flush operation requires memory barrier */
+ mb();
+}
+
+/**
+ * hl_mmu_hr_pool_destroy() - destroy genpool
+ * @hdev: habanalabs device structure.
+ * @hr_priv: MMU HR private data.
+ * @hop_table_size: HOP table size.
+ *
+ * This function does the following:
+ * - free entries allocated for shadow HOP0
+ * - free pool chunks
+ * - free pool
+ */
+static void hl_mmu_hr_pool_destroy(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct gen_pool **pool = &hr_priv->mmu_pgt_pool;
+ struct pgt_info *hop0_pgt;
+ int asid;
+
+ if (ZERO_OR_NULL_PTR(*pool))
+ return;
+
+ /* Free the Fixed allocation of HOPs0 */
+ if (hr_priv->mmu_asid_hop0) {
+ for (asid = 0 ; asid < prop->max_asid ; asid++) {
+ hop0_pgt = &hr_priv->mmu_asid_hop0[asid];
+ if (ZERO_OR_NULL_PTR(hop0_pgt->virt_addr))
+ continue;
+
+ gen_pool_free(*pool, (uintptr_t) hop0_pgt->virt_addr, hop_table_size);
+ }
+ }
+
+ gen_pool_for_each_chunk(*pool, mmu_dma_mem_free_from_chunk, hdev);
+ gen_pool_destroy(*pool);
+
+ /* Make sure that if we arrive here again without init was called we
+ * won't cause kernel panic. This can happen for example if we fail
+ * during hard reset code at certain points
+ */
+ *pool = NULL;
+}
+
+/**
+ * hl_mmu_hr_init() - initialize the MMU module.
+ * @hdev: habanalabs device structure.
+ * @hr_priv: MMU HR private data.
+ * @hop_table_size: HOP table size.
+ * @pgt_size: memory size allocated for the page table
+ *
+ * @return 0 on success otherwise non-zero error code
+ *
+ * This function does the following:
+ * - Create a pool of pages for pgt_infos.
+ * - Create a shadow table for pgt
+ */
+int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size,
+ u64 pgt_size)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ size_t pool_chunk_size = SZ_4M;
+ struct pgt_info *hop0_pgt;
+ dma_addr_t dma_addr;
+ u64 virt_addr;
+ int i, rc;
+
+ /*
+ * we set alloc size as PAGE_SIZE (sine dma_alloc_coherent allocation order/size is
+ * PAGE_SHIFT/PAGE_SIZE) in order to be able to control the allocations alignment.
+ * This way we can call "DMA alloc align" according to dma_alloc granularity and supply
+ * allocations with higher-order alignment restrictions
+ */
+ hr_priv->mmu_pgt_pool = gen_pool_create(PAGE_SHIFT, -1);
+ if (ZERO_OR_NULL_PTR(hr_priv->mmu_pgt_pool)) {
+ dev_err(hdev->dev, "Failed to create hr page pool\n");
+ return -ENOMEM;
+ }
+
+ hr_priv->mmu_asid_hop0 = kvcalloc(prop->max_asid, sizeof(struct pgt_info), GFP_KERNEL);
+ if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
+ dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n");
+ rc = -ENOMEM;
+ goto destroy_mmu_pgt_pool;
+ }
+
+ for (i = 0 ; i < pgt_size ; i += pool_chunk_size) {
+ virt_addr = (uintptr_t) hl_asic_dma_alloc_coherent(hdev, pool_chunk_size,
+ &dma_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (ZERO_OR_NULL_PTR(virt_addr)) {
+ dev_err(hdev->dev,
+ "Failed to allocate memory for host-resident page pool\n");
+ rc = -ENOMEM;
+ goto destroy_mmu_pgt_pool;
+ }
+
+ rc = gen_pool_add_virt(hr_priv->mmu_pgt_pool, virt_addr, (phys_addr_t) dma_addr,
+ pool_chunk_size, -1);
+ if (rc) {
+ dev_err(hdev->dev, "Failed to fill host-resident page pool\n");
+ goto destroy_mmu_pgt_pool;
+ }
+ }
+
+ for (i = 0 ; i < prop->max_asid ; i++) {
+ hop0_pgt = &hr_priv->mmu_asid_hop0[i];
+ hop0_pgt->virt_addr = (uintptr_t)
+ gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
+ hop_table_size,
+ (dma_addr_t *) &hop0_pgt->phys_addr,
+ hop_table_size);
+ if (!hop0_pgt->virt_addr) {
+ dev_err(hdev->dev, "Failed to allocate HOP from pgt pool\n");
+ rc = -ENOMEM;
+ goto destroy_mmu_pgt_pool;
+ }
+ }
+
+ /* MMU H/W init will be done in device hw_init() */
+
+ return 0;
+
+destroy_mmu_pgt_pool:
+ hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
+ if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0))
+ kvfree(hr_priv->mmu_asid_hop0);
+
+ return rc;
+}
+
+/**
+ * hl_mmu_hr_fini() - release the MMU module.
+ * @hdev: habanalabs device structure.
+ * @hr_priv: MMU host resident private info.
+ * @hop_table_size: HOP table size
+ *
+ * This function does the following:
+ * - Disable MMU in H/W.
+ * - Free the pgt_infos pool.
+ *
+ * All contexts should be freed before calling this function.
+ */
+void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size)
+{
+ /* MMU H/W fini was already done in device hw_fini() */
+
+ hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size);
+
+ if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) {
+ kvfree(hr_priv->mmu_asid_hop0);
+
+ /* Make sure that if we arrive here again without init was
+ * called we won't cause kernel panic. This can happen for
+ * example if we fail during hard reset code at certain points
+ */
+ hr_priv->mmu_asid_hop0 = NULL;
+ }
+}
+
+/**
+ * hl_mmu_hr_free_hop_remove_pgt() - free HOP and remove PGT from hash
+ * @pgt_info: page table info structure.
+ * @hr_priv: MMU HR private data.
+ * @hop_table_size: HOP table size.
+ */
+void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size)
+{
+ gen_pool_free(hr_priv->mmu_pgt_pool, pgt_info->virt_addr, hop_table_size);
+ hash_del(&pgt_info->node);
+ kfree(pgt_info);
+}
+
+/**
+ * hl_mmu_hr_pte_phys_to_virt() - translate PTE phys addr to virt addr
+ * @ctx: pointer to the context structure
+ * @pgt: pgt_info for the HOP hosting the PTE
+ * @phys_pte_addr: phys address of the PTE
+ * @hop_table_size: HOP table size
+ *
+ * @return PTE virtual address
+ *
+ * The function use the pgt_info to get HOP base virt addr and obtain the PTE's virt addr
+ * by adding the PTE offset.
+ */
+u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt,
+ u64 phys_pte_addr, u32 hop_table_size)
+{
+ u64 page_mask = (hop_table_size - 1);
+ u64 pte_offset = phys_pte_addr & page_mask;
+
+ return pgt->virt_addr + pte_offset;
+}
+
+/**
+ * hl_mmu_hr_write_pte() - write HR PTE
+ * @ctx: pointer to the context structure
+ * @pgt_info: HOP's page table info structure
+ * @phys_pte_addr: phys PTE address
+ * @val: raw PTE data
+ * @hop_table_size: HOP table size
+ */
+void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u64 val, u32 hop_table_size)
+{
+ /*
+ * The value to write is the phys address of the next hop +
+ * flags at the 12 LSBs.
+ */
+ u64 virt_addr = hl_mmu_hr_pte_phys_to_virt(ctx, pgt_info, phys_pte_addr, hop_table_size);
+
+ *((u64 *) (uintptr_t) virt_addr) = val;
+}
+
+/**
+ * hl_mmu_hr_clear_pte() - clear HR PTE
+ * @ctx: pointer to the context structure
+ * @pgt_info: HOP's page table info structure
+ * @phys_pte_addr: phys PTE address
+ * @hop_table_size: HOP table size
+ */
+void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr,
+ u32 hop_table_size)
+{
+ /* no need to transform the value to physical address */
+ hl_mmu_hr_write_pte(ctx, pgt_info, phys_pte_addr, 0, hop_table_size);
+}
+
+/**
+ * hl_mmu_hr_put_pte() - put HR PTE and remove it if necessary (no more PTEs)
+ * @ctx: pointer to the context structure
+ * @pgt_info: HOP's page table info structure
+ * @hr_priv: HR MMU private info
+ * @hop_table_size: HOP table size
+ *
+ * @return number of PTEs still in the HOP
+ */
+int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info,
+ struct hl_mmu_hr_priv *hr_priv,
+ u32 hop_table_size)
+{
+ int num_of_ptes_left;
+
+ pgt_info->num_of_ptes--;
+
+ /*
+ * Need to save the number of ptes left because free_hop might free
+ * the pgt_info
+ */
+ num_of_ptes_left = pgt_info->num_of_ptes;
+ if (!num_of_ptes_left)
+ hl_mmu_hr_free_hop_remove_pgt(pgt_info, hr_priv, hop_table_size);
+
+ return num_of_ptes_left;
+}
+
+/**
+ * hl_mmu_hr_get_pte() - increase PGT PTE count
+ * @ctx: pointer to the context structure
+ * @hr_func: host resident functions
+ * @phys_hop_addr: HOP phys address
+ */
+void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr)
+{
+ hr_func->get_pgt_info(ctx, phys_hop_addr)->num_of_ptes++;
+}
+
+/**
+ * hl_mmu_hr_get_next_hop_pgt_info() - get pgt_info structure for the next HOP
+ * @ctx: pointer to the context structure.
+ * @hr_func: host resident functions.
+ * @curr_pte: current PTE value.
+ *
+ * @return pgt_info structure on success, otherwise NULL.
+ */
+struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx,
+ struct hl_hr_mmu_funcs *hr_func,
+ u64 curr_pte)
+{
+ u64 next_hop_phys_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+
+ if (next_hop_phys_addr == ULLONG_MAX)
+ return NULL;
+
+ return hr_func->get_pgt_info(ctx, next_hop_phys_addr);
+}
+
+/**
+ * hl_mmu_hr_alloc_hop() - allocate HOP
+ * @ctx: pointer to the context structure.
+ * @hr_priv: host resident private info structure.
+ * @hr_func: host resident functions.
+ * @mmu_prop: MMU properties.
+ *
+ * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
+ */
+struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ dma_addr_t phys_addr;
+ void *virt_addr;
+ int i, retry = 1;
+
+ pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL);
+ if (!pgt_info)
+ return NULL;
+
+ for (i = 0; i <= retry; i++) {
+ virt_addr = gen_pool_dma_zalloc_align(hr_priv->mmu_pgt_pool,
+ mmu_prop->hop_table_size,
+ &phys_addr,
+ mmu_prop->hop_table_size);
+ if (virt_addr)
+ break;
+
+ /* No memory in pool - get some and try again */
+ virt_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &phys_addr,
+ GFP_KERNEL | __GFP_ZERO);
+ if (ZERO_OR_NULL_PTR(virt_addr))
+ break;
+
+ if (gen_pool_add_virt(hr_priv->mmu_pgt_pool, (unsigned long)virt_addr,
+ phys_addr, SZ_2M, -1)) {
+ hl_asic_dma_free_coherent(hdev, SZ_2M, virt_addr, phys_addr);
+ virt_addr = NULL;
+ break;
+ }
+ }
+
+ if (ZERO_OR_NULL_PTR(virt_addr)) {
+ dev_err(hdev->dev, "failed to allocate page\n");
+ goto pool_alloc_err;
+ }
+
+ pgt_info->phys_addr = phys_addr;
+ pgt_info->shadow_addr = (unsigned long) NULL;
+ pgt_info->virt_addr = (unsigned long)virt_addr;
+ pgt_info->ctx = ctx;
+ pgt_info->num_of_ptes = 0;
+ hr_func->add_pgt_info(ctx, pgt_info, phys_addr);
+
+ return pgt_info;
+
+pool_alloc_err:
+ kfree(pgt_info);
+
+ return NULL;
+}
+
+/**
+ * hl_mmu_hr_get_alloc_next_hop() - get the next HOP, allocate it if it does not exist
+ * @ctx: pointer to the context structure.
+ * @hr_priv: host resident private info structure.
+ * @hr_func: host resident functions.
+ * @mmu_prop: MMU properties.
+ * @curr_pte: current PTE value.
+ * @is_new_hop: set to true if HOP is new (caller responsibility to set it to false).
+ *
+ * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL.
+ */
+struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx,
+ struct hl_mmu_hr_priv *hr_priv,
+ struct hl_hr_mmu_funcs *hr_func,
+ struct hl_mmu_properties *mmu_prop,
+ u64 curr_pte, bool *is_new_hop)
+{
+ u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte);
+
+ if (hop_addr != ULLONG_MAX)
+ return hr_func->get_pgt_info(ctx, hop_addr);
+
+ *is_new_hop = true;
+ return hl_mmu_hr_alloc_hop(ctx, hr_priv, hr_func, mmu_prop);
+}
+
+/**
+ * hl_mmu_hr_get_tlb_info() - get the TLB info (info for a specific mapping)
+ * @ctx: pointer to the context structure.
+ * @virt_addr: the virt address for which to get info.
+ * @hops: HOPs info structure.
+ * @hr_func: host resident functions.
+ *
+ * @return 0 on success, otherwise non 0 error code..
+ */
+int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops,
+ struct hl_hr_mmu_funcs *hr_func)
+{
+ /* using 6 HOPs as this is the maximum number of HOPs */
+ struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_mmu_properties *mmu_prop;
+ int rc, i, used_hops;
+ bool is_huge;
+
+ rc = hr_func->get_tlb_mapping_params(hdev, &mmu_prop, hops, virt_addr, &is_huge);
+ if (rc)
+ return rc;
+
+ used_hops = mmu_prop->num_hops;
+
+ /* huge pages use one less hop */
+ if (is_huge)
+ used_hops--;
+
+ hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+
+ for (i = 0 ; i < used_hops ; i++) {
+ if (i == 0)
+ hops_pgt_info[i] = hr_func->get_hop0_pgt_info(ctx);
+ else
+ hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx, hr_func,
+ hops->hop_info[i - 1].hop_pte_val);
+
+ if (!hops_pgt_info[i])
+ return -EFAULT;
+
+ hops->hop_info[i].hop_addr = hops_pgt_info[i]->phys_addr;
+ hops->hop_info[i].hop_pte_addr =
+ hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops->hop_info[i].hop_addr,
+ hops->scrambled_vaddr);
+ hops->hop_info[i].hop_pte_val = *(u64 *) (uintptr_t)
+ hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hops->hop_info[i].hop_pte_addr,
+ mmu_prop->hop_table_size);
+
+ if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK))
+ return -EFAULT;
+
+ if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask)
+ break;
+ }
+
+ /* if passed over all hops then no last hop was found */
+ if (i == mmu_prop->num_hops)
+ return -EFAULT;
+
+ if (hops->scrambled_vaddr != virt_addr)
+ hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr
+ (hdev, hops->hop_info[i].hop_pte_val);
+ else
+ hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val;
+
+ hops->used_hops = i + 1;
+
+ return 0;
+}
+
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v1.c b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
index e2d91a69acc2..8a40de4a4761 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu_v1.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v1.c
@@ -393,9 +393,8 @@ static int hl_mmu_v1_init(struct hl_device *hdev)
goto err_pool_add;
}
- hdev->mmu_priv.dr.mmu_shadow_hop0 = kvmalloc_array(prop->max_asid,
- prop->mmu_hop_table_size,
- GFP_KERNEL | __GFP_ZERO);
+ hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, prop->mmu_hop_table_size,
+ GFP_KERNEL);
if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) {
rc = -ENOMEM;
goto err_pool_add;
@@ -412,7 +411,7 @@ err_pool_add:
}
/**
- * hl_mmu_fini() - release the MMU module.
+ * hl_mmu_v1_fini() - release the MMU module.
* @hdev: habanalabs device structure.
*
* This function does the following:
@@ -438,7 +437,7 @@ static void hl_mmu_v1_fini(struct hl_device *hdev)
}
/**
- * hl_mmu_ctx_init() - initialize a context for using the MMU module.
+ * hl_mmu_v1_ctx_init() - initialize a context for using the MMU module.
* @ctx: pointer to the context structure to initialize.
*
* Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
diff --git a/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c b/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c
new file mode 100644
index 000000000000..afe7ef964f82
--- /dev/null
+++ b/drivers/misc/habanalabs/common/mmu/mmu_v2_hr.c
@@ -0,0 +1,399 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020-2022 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "../habanalabs.h"
+#include "../../include/hw_ip/mmu/mmu_general.h"
+
+#include <linux/slab.h>
+
+static struct pgt_info *hl_mmu_v2_hr_get_pgt_info(struct hl_ctx *ctx, u64 phys_hop_addr)
+{
+ struct pgt_info *pgt_info = NULL;
+
+ hash_for_each_possible(ctx->hr_mmu_phys_hash, pgt_info, node,
+ (unsigned long) phys_hop_addr)
+ if (phys_hop_addr == pgt_info->phys_addr)
+ break;
+
+ return pgt_info;
+}
+
+static void hl_mmu_v2_hr_add_pgt_info(struct hl_ctx *ctx, struct pgt_info *pgt_info,
+ dma_addr_t phys_addr)
+{
+ hash_add(ctx->hr_mmu_phys_hash, &pgt_info->node, phys_addr);
+}
+
+static struct pgt_info *hl_mmu_v2_hr_get_hop0_pgt_info(struct hl_ctx *ctx)
+{
+ return &ctx->hdev->mmu_priv.hr.mmu_asid_hop0[ctx->asid];
+}
+
+/**
+ * hl_mmu_v2_hr_init() - initialize the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Create a pool of pages for pgt_infos.
+ * - Create a shadow table for pgt
+ *
+ * Return: 0 for success, non-zero for failure.
+ */
+static inline int hl_mmu_v2_hr_init(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ return hl_mmu_hr_init(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size,
+ prop->mmu_pgt_size);
+}
+
+/**
+ * hl_mmu_v2_hr_fini() - release the MMU module.
+ * @hdev: habanalabs device structure.
+ *
+ * This function does the following:
+ * - Disable MMU in H/W.
+ * - Free the pgt_infos pool.
+ *
+ * All contexts should be freed before calling this function.
+ */
+static inline void hl_mmu_v2_hr_fini(struct hl_device *hdev)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+
+ hl_mmu_hr_fini(hdev, &hdev->mmu_priv.hr, prop->mmu_hop_table_size);
+}
+
+/**
+ * hl_mmu_v2_hr_ctx_init() - initialize a context for using the MMU module.
+ * @ctx: pointer to the context structure to initialize.
+ *
+ * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all
+ * page tables hops related to this context.
+ * Return: 0 on success, non-zero otherwise.
+ */
+static int hl_mmu_v2_hr_ctx_init(struct hl_ctx *ctx)
+{
+ hash_init(ctx->hr_mmu_phys_hash);
+ return 0;
+}
+
+/*
+ * hl_mmu_v2_hr_ctx_fini - disable a ctx from using the mmu module
+ *
+ * @ctx: pointer to the context structure
+ *
+ * This function does the following:
+ * - Free any pgts which were not freed yet
+ * - Free the mutex
+ * - Free DRAM default page mapping hops
+ */
+static void hl_mmu_v2_hr_ctx_fini(struct hl_ctx *ctx)
+{
+ struct hl_device *hdev = ctx->hdev;
+ struct pgt_info *pgt_info;
+ struct hlist_node *tmp;
+ int i;
+
+ if (!hash_empty(ctx->hr_mmu_phys_hash))
+ dev_err(hdev->dev, "ctx %d is freed while it has pgts in use\n",
+ ctx->asid);
+
+ hash_for_each_safe(ctx->hr_mmu_phys_hash, i, tmp, pgt_info, node) {
+ dev_err_ratelimited(hdev->dev,
+ "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n",
+ pgt_info->phys_addr, ctx->asid, pgt_info->num_of_ptes);
+ hl_mmu_hr_free_hop_remove_pgt(pgt_info, &ctx->hdev->mmu_priv.hr,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+ }
+}
+
+static int _hl_mmu_v2_hr_unmap(struct hl_ctx *ctx,
+ u64 virt_addr, bool is_dram_addr)
+{
+ u64 curr_pte, scrambled_virt_addr, hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 };
+ struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop;
+ struct hl_mmu_properties *mmu_prop;
+ bool is_huge = false;
+ int i, hop_last;
+
+ prop = &hdev->asic_prop;
+
+ /* shifts and masks are the same in PMMU and HMMU, use one of them */
+ mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu;
+ hop_last = mmu_prop->num_hops - 1;
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+ curr_pte = 0;
+
+ for (i = 0 ; i < mmu_prop->num_hops ; i++) {
+ /* we get HOP0 differently, it doesn't need curr_pte */
+ if (i == 0)
+ hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
+ else
+ hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx,
+ &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs, curr_pte);
+ if (!hops_pgt_info[i])
+ goto not_mapped;
+
+ hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops_pgt_info[i]->phys_addr,
+ scrambled_virt_addr);
+ if (hop_pte_phys_addr[i] == U64_MAX)
+ return -EFAULT;
+
+ curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ if ((i < hop_last) && (curr_pte & mmu_prop->last_mask)) {
+ hop_last = i;
+ is_huge = true;
+ break;
+ }
+ }
+
+ if (is_dram_addr && !is_huge) {
+ dev_err(hdev->dev, "DRAM unmapping should use huge pages only\n");
+ return -EFAULT;
+ }
+
+ if (!(curr_pte & PAGE_PRESENT_MASK))
+ goto not_mapped;
+
+ for (i = hop_last ; i > 0 ; i--) {
+ hl_mmu_hr_clear_pte(ctx, hops_pgt_info[i], hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ if (hl_mmu_hr_put_pte(ctx, hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
+ ctx->hdev->asic_prop.mmu_hop_table_size))
+ goto mapped;
+ }
+ hl_mmu_hr_clear_pte(ctx, hops_pgt_info[0], hop_pte_phys_addr[0],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+mapped:
+ return 0;
+
+not_mapped:
+ dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", virt_addr);
+
+ return -EINVAL;
+}
+
+static int hl_mmu_v2_get_last_hop(struct hl_mmu_properties *mmu_prop, u32 page_size)
+{
+ int hop;
+
+ for (hop = (mmu_prop->num_hops - 1); hop; hop--) {
+ if (mmu_prop->hop_shifts[hop] == 0)
+ continue;
+
+ if (page_size <= (1 << mmu_prop->hop_shifts[hop]))
+ break;
+ }
+
+ return hop;
+}
+
+static int _hl_mmu_v2_hr_map(struct hl_ctx *ctx,
+ u64 virt_addr, u64 phys_addr,
+ u32 page_size, bool is_dram_addr)
+{
+ u64 hop_pte_phys_addr[MMU_ARCH_6_HOPS] = { 0 },
+ curr_pte = 0, scrambled_virt_addr, scrambled_phys_addr;
+ struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL };
+ bool hop_new[MMU_ARCH_6_HOPS] = { false };
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ struct hl_mmu_properties *mmu_prop;
+ int i, hop_last, rc = -ENOMEM;
+
+ /*
+ * This mapping function can map a page or a huge page. For huge page
+ * there are only 4 hops rather than 5. Currently the DRAM allocation
+ * uses huge pages only but user memory could have been allocated with
+ * one of the two page sizes. Since this is a common code for all the
+ * three cases, we need this hugs page check.
+ */
+ if (is_dram_addr)
+ mmu_prop = &prop->dmmu;
+ else if (page_size == prop->pmmu_huge.page_size)
+ mmu_prop = &prop->pmmu_huge;
+ else
+ mmu_prop = &prop->pmmu;
+
+ hop_last = hl_mmu_v2_get_last_hop(mmu_prop, page_size);
+ if (hop_last <= 0) {
+ dev_err(ctx->hdev->dev, "Invalid last HOP %d\n", hop_last);
+ return -EFAULT;
+ }
+
+ scrambled_virt_addr = hdev->asic_funcs->scramble_addr(hdev, virt_addr);
+ scrambled_phys_addr = hdev->asic_funcs->scramble_addr(hdev, phys_addr);
+
+ for (i = 0 ; i <= hop_last ; i++) {
+
+ if (i == 0)
+ hops_pgt_info[i] = hl_mmu_v2_hr_get_hop0_pgt_info(ctx);
+ else
+ hops_pgt_info[i] = hl_mmu_hr_get_alloc_next_hop(ctx,
+ &ctx->hdev->mmu_priv.hr,
+ &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
+ mmu_prop, curr_pte, &hop_new[i]);
+ if (!hops_pgt_info[i])
+ goto err;
+
+ hop_pte_phys_addr[i] = hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, i,
+ hops_pgt_info[i]->phys_addr,
+ scrambled_virt_addr);
+ curr_pte = *(u64 *) (uintptr_t) hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+ }
+
+ if (curr_pte & PAGE_PRESENT_MASK) {
+ dev_err(hdev->dev, "mapping already exists for virt_addr 0x%llx\n",
+ scrambled_virt_addr);
+
+ for (i = 0 ; i <= hop_last ; i++)
+ dev_dbg(hdev->dev, "hop%d pte: 0x%llx (0x%llx)\n",
+ i,
+ *(u64 *) (uintptr_t)
+ hl_mmu_hr_pte_phys_to_virt(ctx, hops_pgt_info[i],
+ hop_pte_phys_addr[i],
+ ctx->hdev->asic_prop.mmu_hop_table_size),
+ hop_pte_phys_addr[i]);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ curr_pte = (scrambled_phys_addr & HOP_PHYS_ADDR_MASK) | mmu_prop->last_mask
+ | PAGE_PRESENT_MASK;
+
+ /* Write the PTEs */
+ hl_mmu_hr_write_pte(ctx, hops_pgt_info[hop_last], hop_pte_phys_addr[hop_last], curr_pte,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ /* for each new hop, add its address to the table of previous-hop */
+ for (i = 1 ; i <= hop_last ; i++) {
+ if (hop_new[i]) {
+ curr_pte = (hops_pgt_info[i]->phys_addr & HOP_PHYS_ADDR_MASK) |
+ PAGE_PRESENT_MASK;
+ hl_mmu_hr_write_pte(ctx, hops_pgt_info[i - 1], hop_pte_phys_addr[i - 1],
+ curr_pte, ctx->hdev->asic_prop.mmu_hop_table_size);
+ if (i - 1)
+ hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
+ hops_pgt_info[i - 1]->phys_addr);
+ }
+ }
+
+ hl_mmu_hr_get_pte(ctx, &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs,
+ hops_pgt_info[hop_last]->phys_addr);
+
+ return 0;
+
+err:
+ for (i = 1 ; i <= hop_last ; i++)
+ if (hop_new[i] && hops_pgt_info[i])
+ hl_mmu_hr_free_hop_remove_pgt(hops_pgt_info[i], &ctx->hdev->mmu_priv.hr,
+ ctx->hdev->asic_prop.mmu_hop_table_size);
+
+ return rc;
+}
+
+/*
+ * hl_mmu_v2_swap_out - marks all mapping of the given ctx as swapped out
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_hr_swap_out(struct hl_ctx *ctx)
+{
+
+}
+
+/*
+ * hl_mmu_v2_swap_in - marks all mapping of the given ctx as swapped in
+ *
+ * @ctx: pointer to the context structure
+ *
+ */
+static void hl_mmu_v2_hr_swap_in(struct hl_ctx *ctx)
+{
+
+}
+
+static int hl_mmu_v2_hr_get_tlb_mapping_params(struct hl_device *hdev,
+ struct hl_mmu_properties **mmu_prop,
+ struct hl_mmu_hop_info *hops,
+ u64 virt_addr, bool *is_huge)
+{
+ struct asic_fixed_properties *prop = &hdev->asic_prop;
+ bool is_dram_addr, is_pmmu_addr, is_pmmu_h_addr;
+
+ is_dram_addr = hl_mem_area_inside_range(virt_addr, prop->dmmu.page_size,
+ prop->dmmu.start_addr,
+ prop->dmmu.end_addr);
+ is_pmmu_addr = hl_mem_area_inside_range(virt_addr, prop->pmmu.page_size,
+ prop->pmmu.start_addr,
+ prop->pmmu.end_addr);
+ is_pmmu_h_addr = hl_mem_area_inside_range(virt_addr,
+ prop->pmmu_huge.page_size,
+ prop->pmmu_huge.start_addr,
+ prop->pmmu_huge.end_addr);
+ if (is_dram_addr) {
+ *mmu_prop = &prop->dmmu;
+ *is_huge = true;
+ hops->range_type = HL_VA_RANGE_TYPE_DRAM;
+ } else if (is_pmmu_addr) {
+ *mmu_prop = &prop->pmmu;
+ *is_huge = false;
+ hops->range_type = HL_VA_RANGE_TYPE_HOST;
+ } else if (is_pmmu_h_addr) {
+ *mmu_prop = &prop->pmmu_huge;
+ *is_huge = true;
+ hops->range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hl_mmu_v2_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr,
+ struct hl_mmu_hop_info *hops)
+{
+ return hl_mmu_hr_get_tlb_info(ctx, virt_addr, hops,
+ &ctx->hdev->mmu_func[MMU_HR_PGT].hr_funcs);
+}
+
+/*
+ * hl_mmu_v2_prepare - prepare mmu_if for working with mmu v2
+ *
+ * @hdev: pointer to the device structure
+ * @mmu_if: pointer to the mmu interface structure
+ */
+void hl_mmu_v2_hr_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu)
+{
+ mmu->init = hl_mmu_v2_hr_init;
+ mmu->fini = hl_mmu_v2_hr_fini;
+ mmu->ctx_init = hl_mmu_v2_hr_ctx_init;
+ mmu->ctx_fini = hl_mmu_v2_hr_ctx_fini;
+ mmu->map = _hl_mmu_v2_hr_map;
+ mmu->unmap = _hl_mmu_v2_hr_unmap;
+ mmu->flush = hl_mmu_hr_flush;
+ mmu->swap_out = hl_mmu_v2_hr_swap_out;
+ mmu->swap_in = hl_mmu_v2_hr_swap_in;
+ mmu->get_tlb_info = hl_mmu_v2_hr_get_tlb_info;
+ mmu->hr_funcs.get_hop0_pgt_info = hl_mmu_v2_hr_get_hop0_pgt_info;
+ mmu->hr_funcs.get_pgt_info = hl_mmu_v2_hr_get_pgt_info;
+ mmu->hr_funcs.add_pgt_info = hl_mmu_v2_hr_add_pgt_info;
+ mmu->hr_funcs.get_tlb_mapping_params = hl_mmu_v2_hr_get_tlb_mapping_params;
+}
diff --git a/drivers/misc/habanalabs/common/pci/pci.c b/drivers/misc/habanalabs/common/pci/pci.c
index 610acd4a8057..5fe3da5fba30 100644
--- a/drivers/misc/habanalabs/common/pci/pci.c
+++ b/drivers/misc/habanalabs/common/pci/pci.c
@@ -225,27 +225,6 @@ int hl_pci_iatu_write(struct hl_device *hdev, u32 addr, u32 data)
}
/**
- * hl_pci_reset_link_through_bridge() - Reset PCI link.
- * @hdev: Pointer to hl_device structure.
- */
-static void hl_pci_reset_link_through_bridge(struct hl_device *hdev)
-{
- struct pci_dev *pdev = hdev->pdev;
- struct pci_dev *parent_port;
- u16 val;
-
- parent_port = pdev->bus->self;
- pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val);
- val |= PCI_BRIDGE_CTL_BUS_RESET;
- pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
- ssleep(1);
-
- val &= ~(PCI_BRIDGE_CTL_BUS_RESET);
- pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val);
- ssleep(3);
-}
-
-/**
* hl_pci_set_inbound_region() - Configure inbound region
* @hdev: Pointer to hl_device structure.
* @region: Inbound region number.
@@ -280,21 +259,19 @@ int hl_pci_set_inbound_region(struct hl_device *hdev, u8 region,
}
/* Point to the specified address */
- rc |= hl_pci_iatu_write(hdev, offset + 0x14,
- lower_32_bits(pci_region->addr));
- rc |= hl_pci_iatu_write(hdev, offset + 0x18,
- upper_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x14, lower_32_bits(pci_region->addr));
+ rc |= hl_pci_iatu_write(hdev, offset + 0x18, upper_32_bits(pci_region->addr));
+
+ /* Set bar type as memory */
rc |= hl_pci_iatu_write(hdev, offset + 0x0, 0);
/* Enable + bar/address match + match enable + bar number */
ctrl_reg_val = FIELD_PREP(IATU_REGION_CTRL_REGION_EN_MASK, 1);
- ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK,
- pci_region->mode);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_MATCH_MODE_MASK, pci_region->mode);
ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_NUM_MATCH_EN_MASK, 1);
if (pci_region->mode == PCI_BAR_MATCH_MODE)
- ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK,
- pci_region->bar);
+ ctrl_reg_val |= FIELD_PREP(IATU_REGION_CTRL_BAR_NUM_MASK, pci_region->bar);
rc |= hl_pci_iatu_write(hdev, offset + 0x4, ctrl_reg_val);
@@ -396,9 +373,6 @@ int hl_pci_init(struct hl_device *hdev)
struct pci_dev *pdev = hdev->pdev;
int rc;
- if (hdev->reset_pcilink)
- hl_pci_reset_link_through_bridge(hdev);
-
rc = pci_enable_device_mem(pdev);
if (rc) {
dev_err(hdev->dev, "can't enable PCI device\n");
@@ -445,7 +419,7 @@ disable_device:
}
/**
- * hl_fw_fini() - PCI finalization code.
+ * hl_pci_fini() - PCI finalization code.
* @hdev: Pointer to hl_device structure
*
* Unmap PCI bars and disable PCI device.
diff --git a/drivers/misc/habanalabs/common/security.c b/drivers/misc/habanalabs/common/security.c
new file mode 100644
index 000000000000..6196c0487c8b
--- /dev/null
+++ b/drivers/misc/habanalabs/common/security.c
@@ -0,0 +1,600 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright 2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ */
+
+#include "habanalabs.h"
+
+/**
+ * hl_get_pb_block - return the relevant block within the block array
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_addr: register address in the desired block
+ * @pb_blocks: blocks array
+ * @array_size: blocks array size
+ *
+ */
+static int hl_get_pb_block(struct hl_device *hdev, u32 mm_reg_addr,
+ const u32 pb_blocks[], int array_size)
+{
+ int i;
+ u32 start_addr, end_addr;
+
+ for (i = 0 ; i < array_size ; i++) {
+ start_addr = pb_blocks[i];
+ end_addr = start_addr + HL_BLOCK_SIZE;
+
+ if ((mm_reg_addr >= start_addr) && (mm_reg_addr < end_addr))
+ return i;
+ }
+
+ dev_err(hdev->dev, "No protection domain was found for 0x%x\n",
+ mm_reg_addr);
+ return -EDOM;
+}
+
+/**
+ * hl_unset_pb_in_block - clear a specific protection bit in a block
+ *
+ * @hdev: pointer to hl_device structure
+ * @reg_offset: register offset will be converted to bit offset in pb block
+ * @sgs_entry: pb array
+ *
+ */
+static int hl_unset_pb_in_block(struct hl_device *hdev, u32 reg_offset,
+ struct hl_block_glbl_sec *sgs_entry)
+{
+ if ((reg_offset >= HL_BLOCK_SIZE) || (reg_offset & 0x3)) {
+ dev_err(hdev->dev,
+ "Register offset(%d) is out of range(%d) or invalid\n",
+ reg_offset, HL_BLOCK_SIZE);
+ return -EINVAL;
+ }
+
+ UNSET_GLBL_SEC_BIT(sgs_entry->sec_array,
+ (reg_offset & (HL_BLOCK_SIZE - 1)) >> 2);
+
+ return 0;
+}
+
+/**
+ * hl_unsecure_register - locate the relevant block for this register and
+ * remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_addr: register address to unsecure
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @array_size: blocks array size
+ *
+ */
+int hl_unsecure_register(struct hl_device *hdev, u32 mm_reg_addr, int offset,
+ const u32 pb_blocks[], struct hl_block_glbl_sec sgs_array[],
+ int array_size)
+{
+ u32 reg_offset;
+ int block_num;
+
+ block_num = hl_get_pb_block(hdev, mm_reg_addr + offset, pb_blocks,
+ array_size);
+ if (block_num < 0)
+ return block_num;
+
+ reg_offset = (mm_reg_addr + offset) - pb_blocks[block_num];
+
+ return hl_unset_pb_in_block(hdev, reg_offset, &sgs_array[block_num]);
+}
+
+/**
+ * hl_unsecure_register_range - locate the relevant block for this register
+ * range and remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_range: register address range to unsecure
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @array_size: blocks array size
+ *
+ */
+static int hl_unsecure_register_range(struct hl_device *hdev,
+ struct range mm_reg_range, int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[],
+ int array_size)
+{
+ u32 reg_offset;
+ int i, block_num, rc = 0;
+
+ block_num = hl_get_pb_block(hdev,
+ mm_reg_range.start + offset, pb_blocks,
+ array_size);
+ if (block_num < 0)
+ return block_num;
+
+ for (i = mm_reg_range.start ; i <= mm_reg_range.end ; i += 4) {
+ reg_offset = (i + offset) - pb_blocks[block_num];
+ rc |= hl_unset_pb_in_block(hdev, reg_offset,
+ &sgs_array[block_num]);
+ }
+
+ return rc;
+}
+
+/**
+ * hl_unsecure_registers - locate the relevant block for all registers and
+ * remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_array: register address array to unsecure
+ * @mm_array_size: register array size
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @blocks_array_size: blocks array size
+ *
+ */
+int hl_unsecure_registers(struct hl_device *hdev, const u32 mm_reg_array[],
+ int mm_array_size, int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
+{
+ int i, rc = 0;
+
+ for (i = 0 ; i < mm_array_size ; i++) {
+ rc = hl_unsecure_register(hdev, mm_reg_array[i], offset,
+ pb_blocks, sgs_array, blocks_array_size);
+
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * hl_unsecure_registers_range - locate the relevant block for all register
+ * ranges and remove corresponding protection bit
+ *
+ * @hdev: pointer to hl_device structure
+ * @mm_reg_range_array: register address range array to unsecure
+ * @mm_array_size: register array size
+ * @offset: additional offset to the register address
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @blocks_array_size: blocks array size
+ *
+ */
+static int hl_unsecure_registers_range(struct hl_device *hdev,
+ const struct range mm_reg_range_array[], int mm_array_size,
+ int offset, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], int blocks_array_size)
+{
+ int i, rc = 0;
+
+ for (i = 0 ; i < mm_array_size ; i++) {
+ rc = hl_unsecure_register_range(hdev, mm_reg_range_array[i],
+ offset, pb_blocks, sgs_array, blocks_array_size);
+
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+/**
+ * hl_ack_pb_security_violations - Ack security violation
+ *
+ * @hdev: pointer to hl_device structure
+ * @pb_blocks: blocks array
+ * @block_offset: additional offset to the block
+ * @array_size: blocks array size
+ *
+ */
+static void hl_ack_pb_security_violations(struct hl_device *hdev,
+ const u32 pb_blocks[], u32 block_offset, int array_size)
+{
+ int i;
+ u32 cause, addr, block_base;
+
+ for (i = 0 ; i < array_size ; i++) {
+ block_base = pb_blocks[i] + block_offset;
+ cause = RREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE);
+ if (cause) {
+ addr = RREG32(block_base + HL_BLOCK_GLBL_ERR_ADDR);
+ hdev->asic_funcs->pb_print_security_errors(hdev,
+ block_base, cause, addr);
+ WREG32(block_base + HL_BLOCK_GLBL_ERR_CAUSE, cause);
+ }
+ }
+}
+
+/**
+ * hl_config_glbl_sec - set pb in HW according to given pb array
+ *
+ * @hdev: pointer to hl_device structure
+ * @pb_blocks: blocks array
+ * @sgs_array: pb array
+ * @block_offset: additional offset to the block
+ * @array_size: blocks array size
+ *
+ */
+void hl_config_glbl_sec(struct hl_device *hdev, const u32 pb_blocks[],
+ struct hl_block_glbl_sec sgs_array[], u32 block_offset,
+ int array_size)
+{
+ int i, j;
+ u32 sgs_base;
+
+ if (hdev->pldm)
+ usleep_range(100, 1000);
+
+ for (i = 0 ; i < array_size ; i++) {
+ sgs_base = block_offset + pb_blocks[i] +
+ HL_BLOCK_GLBL_SEC_OFFS;
+
+ for (j = 0 ; j < HL_BLOCK_GLBL_SEC_LEN ; j++)
+ WREG32(sgs_base + j * sizeof(u32),
+ sgs_array[i].sec_array[j]);
+ }
+}
+
+/**
+ * hl_secure_block - locally memsets a block to 0
+ *
+ * @hdev: pointer to hl_device structure
+ * @sgs_array: pb array to clear
+ * @array_size: blocks array size
+ *
+ */
+void hl_secure_block(struct hl_device *hdev,
+ struct hl_block_glbl_sec sgs_array[], int array_size)
+{
+ int i;
+
+ for (i = 0 ; i < array_size ; i++)
+ memset((char *)(sgs_array[i].sec_array), 0,
+ HL_BLOCK_GLBL_SEC_SIZE);
+}
+
+/**
+ * hl_init_pb_with_mask - set selected pb instances with mask in HW according
+ * to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_array: register array
+ * @regs_array_size: register array size
+ * @mask: enabled instances mask: 1- enabled, 0- disabled
+ */
+int hl_init_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size, u64 mask)
+{
+ int i, j;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ hl_unsecure_registers(hdev, regs_array, regs_array_size, 0, pb_blocks,
+ glbl_sec, blocks_array_size);
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_dcores ; i++) {
+ for (j = 0 ; j < num_instances ; j++) {
+ int seq = i * num_instances + j;
+
+ if (!(mask & BIT_ULL(seq)))
+ continue;
+
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ i * dcore_offset + j * instance_offset,
+ blocks_array_size);
+ }
+ }
+
+ kfree(glbl_sec);
+
+ return 0;
+}
+
+/**
+ * hl_init_pb - set pb in HW according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_array: register array
+ * @regs_array_size: register array size
+ *
+ */
+int hl_init_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size)
+{
+ return hl_init_pb_with_mask(hdev, num_dcores, dcore_offset,
+ num_instances, instance_offset, pb_blocks,
+ blocks_array_size, regs_array, regs_array_size,
+ ULLONG_MAX);
+}
+
+/**
+ * hl_init_pb_ranges_with_mask - set pb instances using mask in HW according to
+ * given configuration unsecurring registers
+ * ranges instead of specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_range_array: register range array
+ * @regs_range_array_size: register range array size
+ * @mask: enabled instances mask: 1- enabled, 0- disabled
+ */
+int hl_init_pb_ranges_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size,
+ u64 mask)
+{
+ int i, j, rc = 0;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ rc = hl_unsecure_registers_range(hdev, regs_range_array,
+ regs_range_array_size, 0, pb_blocks, glbl_sec,
+ blocks_array_size);
+ if (rc)
+ goto free_glbl_sec;
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_dcores ; i++) {
+ for (j = 0 ; j < num_instances ; j++) {
+ int seq = i * num_instances + j;
+
+ if (!(mask & BIT_ULL(seq)))
+ continue;
+
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ i * dcore_offset + j * instance_offset,
+ blocks_array_size);
+ }
+ }
+
+free_glbl_sec:
+ kfree(glbl_sec);
+
+ return rc;
+}
+
+/**
+ * hl_init_pb_ranges - set pb in HW according to given configuration unsecurring
+ * registers ranges instead of specific registers
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_range_array: register range array
+ * @regs_range_array_size: register range array size
+ *
+ */
+int hl_init_pb_ranges(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size)
+{
+ return hl_init_pb_ranges_with_mask(hdev, num_dcores, dcore_offset,
+ num_instances, instance_offset, pb_blocks,
+ blocks_array_size, regs_range_array,
+ regs_range_array_size, ULLONG_MAX);
+}
+
+/**
+ * hl_init_pb_single_dcore - set pb for a single docre in HW
+ * according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @dcore_offset: offset from the dcore0
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_array: register array
+ * @regs_array_size: register array size
+ *
+ */
+int hl_init_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const u32 *regs_array, u32 regs_array_size)
+{
+ int i, rc = 0;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ rc = hl_unsecure_registers(hdev, regs_array, regs_array_size, 0,
+ pb_blocks, glbl_sec, blocks_array_size);
+ if (rc)
+ goto free_glbl_sec;
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_instances ; i++)
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ dcore_offset + i * instance_offset,
+ blocks_array_size);
+
+free_glbl_sec:
+ kfree(glbl_sec);
+
+ return rc;
+}
+
+/**
+ * hl_init_pb_ranges_single_dcore - set pb for a single docre in HW according
+ * to given configuration unsecurring
+ * registers ranges instead of specific
+ * registers
+ *
+ * @hdev: pointer to hl_device structure
+ * @dcore_offset: offset from the dcore0
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @regs_range_array: register range array
+ * @regs_range_array_size: register range array size
+ *
+ */
+int hl_init_pb_ranges_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size,
+ const struct range *regs_range_array, u32 regs_range_array_size)
+{
+ int i;
+ struct hl_block_glbl_sec *glbl_sec;
+
+ glbl_sec = kcalloc(blocks_array_size,
+ sizeof(struct hl_block_glbl_sec),
+ GFP_KERNEL);
+ if (!glbl_sec)
+ return -ENOMEM;
+
+ hl_secure_block(hdev, glbl_sec, blocks_array_size);
+ hl_unsecure_registers_range(hdev, regs_range_array,
+ regs_range_array_size, 0, pb_blocks, glbl_sec,
+ blocks_array_size);
+
+ /* Fill all blocks with the same configuration */
+ for (i = 0 ; i < num_instances ; i++)
+ hl_config_glbl_sec(hdev, pb_blocks, glbl_sec,
+ dcore_offset + i * instance_offset,
+ blocks_array_size);
+
+ kfree(glbl_sec);
+
+ return 0;
+}
+
+/**
+ * hl_ack_pb_with_mask - ack pb with mask in HW according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ * @mask: enabled instances mask: 1- enabled, 0- disabled
+ *
+ */
+void hl_ack_pb_with_mask(struct hl_device *hdev, u32 num_dcores,
+ u32 dcore_offset, u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size, u64 mask)
+{
+ int i, j;
+
+ /* ack all blocks */
+ for (i = 0 ; i < num_dcores ; i++) {
+ for (j = 0 ; j < num_instances ; j++) {
+ int seq = i * num_instances + j;
+
+ if (!(mask & BIT_ULL(seq)))
+ continue;
+
+ hl_ack_pb_security_violations(hdev, pb_blocks,
+ i * dcore_offset + j * instance_offset,
+ blocks_array_size);
+ }
+ }
+}
+
+/**
+ * hl_ack_pb - ack pb in HW according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @num_dcores: number of decores to apply configuration to
+ * set to HL_PB_SHARED if need to apply only once
+ * @dcore_offset: offset between dcores
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ *
+ */
+void hl_ack_pb(struct hl_device *hdev, u32 num_dcores, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size)
+{
+ hl_ack_pb_with_mask(hdev, num_dcores, dcore_offset, num_instances,
+ instance_offset, pb_blocks, blocks_array_size,
+ ULLONG_MAX);
+}
+
+/**
+ * hl_ack_pb_single_dcore - ack pb for single docre in HW
+ * according to given configuration
+ *
+ * @hdev: pointer to hl_device structure
+ * @dcore_offset: offset from dcore0
+ * @num_instances: number of instances to apply configuration to
+ * @instance_offset: offset between instances
+ * @pb_blocks: blocks array
+ * @blocks_array_size: blocks array size
+ *
+ */
+void hl_ack_pb_single_dcore(struct hl_device *hdev, u32 dcore_offset,
+ u32 num_instances, u32 instance_offset,
+ const u32 pb_blocks[], u32 blocks_array_size)
+{
+ int i;
+
+ /* ack all blocks */
+ for (i = 0 ; i < num_instances ; i++)
+ hl_ack_pb_security_violations(hdev, pb_blocks,
+ dcore_offset + i * instance_offset,
+ blocks_array_size);
+
+}
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 9ebeb18ab85e..6c5271f01160 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -73,6 +73,7 @@ static DEVICE_ATTR_RO(clk_cur_freq_mhz);
static struct attribute *hl_dev_clk_attrs[] = {
&dev_attr_clk_max_freq_mhz.attr,
&dev_attr_clk_cur_freq_mhz.attr,
+ NULL,
};
static ssize_t vrm_ver_show(struct device *dev, struct device_attribute *attr, char *buf)
@@ -93,6 +94,7 @@ static DEVICE_ATTR_RO(vrm_ver);
static struct attribute *hl_dev_vrm_attrs[] = {
&dev_attr_vrm_ver.attr,
+ NULL,
};
static ssize_t uboot_ver_show(struct device *dev, struct device_attribute *attr,
@@ -243,6 +245,12 @@ static ssize_t device_type_show(struct device *dev,
case ASIC_GAUDI_SEC:
str = "GAUDI SEC";
break;
+ case ASIC_GAUDI2:
+ str = "GAUDI2";
+ break;
+ case ASIC_GAUDI2_SEC:
+ str = "GAUDI2 SEC";
+ break;
default:
dev_err(hdev->dev, "Unrecognized ASIC type %d\n",
hdev->asic_type);
@@ -283,7 +291,7 @@ static ssize_t soft_reset_cnt_show(struct device *dev,
{
struct hl_device *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hdev->reset_info.soft_reset_cnt);
+ return sprintf(buf, "%d\n", hdev->reset_info.compute_reset_cnt);
}
static ssize_t hard_reset_cnt_show(struct device *dev,