summaryrefslogtreecommitdiff
path: root/drivers/misc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc')
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c3
-rw-r--r--drivers/misc/ad525x_dpot-spi.c3
-rw-r--r--drivers/misc/ad525x_dpot.c4
-rw-r--r--drivers/misc/ad525x_dpot.h2
-rw-r--r--drivers/misc/bcm-vk/bcm_vk_tty.c6
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.c2
-rw-r--r--drivers/misc/cb710/sgbuf2.c2
-rw-r--r--drivers/misc/cxl/guest.c30
-rw-r--r--drivers/misc/cxl/pci.c35
-rw-r--r--drivers/misc/eeprom/at25.c8
-rw-r--r--drivers/misc/eeprom/eeprom_93xx46.c18
-rw-r--r--drivers/misc/enclosure.c16
-rw-r--r--drivers/misc/fastrpc.c23
-rw-r--r--drivers/misc/gehc-achc.c1
-rw-r--r--drivers/misc/genwqe/card_base.c2
-rw-r--r--drivers/misc/genwqe/card_utils.c10
-rw-r--r--drivers/misc/habanalabs/Kconfig2
-rw-r--r--drivers/misc/habanalabs/common/Makefile2
-rw-r--r--drivers/misc/habanalabs/common/command_submission.c209
-rw-r--r--drivers/misc/habanalabs/common/context.c8
-rw-r--r--drivers/misc/habanalabs/common/debugfs.c51
-rw-r--r--drivers/misc/habanalabs/common/device.c159
-rw-r--r--drivers/misc/habanalabs/common/firmware_if.c28
-rw-r--r--drivers/misc/habanalabs/common/habanalabs.h64
-rw-r--r--drivers/misc/habanalabs/common/habanalabs_drv.c24
-rw-r--r--drivers/misc/habanalabs/common/hw_queue.c9
-rw-r--r--drivers/misc/habanalabs/common/hwmgr.c (renamed from drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c)38
-rw-r--r--drivers/misc/habanalabs/common/hwmon.c194
-rw-r--r--drivers/misc/habanalabs/common/irq.c5
-rw-r--r--drivers/misc/habanalabs/common/memory.c515
-rw-r--r--drivers/misc/habanalabs/common/mmu/mmu.c30
-rw-r--r--drivers/misc/habanalabs/common/sysfs.c6
-rw-r--r--drivers/misc/habanalabs/gaudi/Makefile2
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi.c33
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudiP.h4
-rw-r--r--drivers/misc/habanalabs/gaudi/gaudi_security.c115
-rw-r--r--drivers/misc/habanalabs/goya/goya.c13
-rw-r--r--drivers/misc/habanalabs/goya/goyaP.h1
-rw-r--r--drivers/misc/habanalabs/goya/goya_hwmgr.c31
-rw-r--r--drivers/misc/habanalabs/include/common/cpucp_if.h22
-rw-r--r--drivers/misc/habanalabs/include/common/hl_boot_if.h189
-rw-r--r--drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h2
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h10
-rw-r--r--drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h1
-rw-r--r--drivers/misc/hisi_hikey_usb.c119
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c3
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.h2
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d_spi.c4
-rw-r--r--drivers/misc/lkdtm/bugs.c77
-rw-r--r--drivers/misc/lkdtm/core.c1
-rw-r--r--drivers/misc/lkdtm/lkdtm.h1
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/hbm.c12
-rw-r--r--drivers/misc/mei/hw-me-regs.h1
-rw-r--r--drivers/misc/mei/pci-me.c1
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/pxp/Kconfig13
-rw-r--r--drivers/misc/mei/pxp/Makefile7
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c229
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.h18
-rw-r--r--drivers/misc/ocxl/config.c13
-rw-r--r--drivers/misc/pvpanic/pvpanic-mmio.c9
-rw-r--r--drivers/misc/pvpanic/pvpanic-pci.c26
-rw-r--r--drivers/misc/pvpanic/pvpanic.c16
-rw-r--r--drivers/misc/sgi-xp/xpnet.c9
-rw-r--r--drivers/misc/tifm_7xx1.c2
-rw-r--r--drivers/misc/tifm_core.c8
69 files changed, 1969 insertions, 542 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 85ba901bc11b..0f5a49fc7c9e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -224,6 +224,7 @@ config HI6421V600_IRQ
tristate "HiSilicon Hi6421v600 IRQ and powerkey"
depends on OF
depends on SPMI
+ depends on HAS_IOMEM
select MFD_CORE
select REGMAP_SPMI
help
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index bd869ec5edba..0ee0c6d808c3 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -69,7 +69,8 @@ static int ad_dpot_i2c_probe(struct i2c_client *client,
static int ad_dpot_i2c_remove(struct i2c_client *client)
{
- return ad_dpot_remove(&client->dev);
+ ad_dpot_remove(&client->dev);
+ return 0;
}
static const struct i2c_device_id ad_dpot_id[] = {
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index aea931dd272e..a9e75d80ad36 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -92,7 +92,8 @@ static int ad_dpot_spi_probe(struct spi_device *spi)
static int ad_dpot_spi_remove(struct spi_device *spi)
{
- return ad_dpot_remove(&spi->dev);
+ ad_dpot_remove(&spi->dev);
+ return 0;
}
static const struct spi_device_id ad_dpot_spi_id[] = {
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 5d8f3f6a95f2..756ef6912b5a 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -743,7 +743,7 @@ exit:
}
EXPORT_SYMBOL(ad_dpot_probe);
-int ad_dpot_remove(struct device *dev)
+void ad_dpot_remove(struct device *dev)
{
struct dpot_data *data = dev_get_drvdata(dev);
int i;
@@ -753,8 +753,6 @@ int ad_dpot_remove(struct device *dev)
ad_dpot_remove_files(dev, data->feat, i);
kfree(data);
-
- return 0;
}
EXPORT_SYMBOL(ad_dpot_remove);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index ee8dc9f5a45a..72a9d6801937 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -209,6 +209,6 @@ struct ad_dpot_bus_data {
int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata,
unsigned long devid, const char *name);
-int ad_dpot_remove(struct device *dev);
+void ad_dpot_remove(struct device *dev);
#endif
diff --git a/drivers/misc/bcm-vk/bcm_vk_tty.c b/drivers/misc/bcm-vk/bcm_vk_tty.c
index 1b6076a89ca6..6669625ba4c8 100644
--- a/drivers/misc/bcm-vk/bcm_vk_tty.c
+++ b/drivers/misc/bcm-vk/bcm_vk_tty.c
@@ -267,13 +267,13 @@ int bcm_vk_tty_init(struct bcm_vk *vk, char *name)
struct device *tty_dev;
tty_port_init(&vk->tty[i].port);
- tty_dev = tty_port_register_device(&vk->tty[i].port, tty_drv,
- i, dev);
+ tty_dev = tty_port_register_device_attr(&vk->tty[i].port,
+ tty_drv, i, dev, vk,
+ NULL);
if (IS_ERR(tty_dev)) {
err = PTR_ERR(tty_dev);
goto unwind;
}
- dev_set_drvdata(tty_dev, vk);
vk->tty[i].is_opened = false;
}
diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c
index baf83594a01d..8c72eb590f79 100644
--- a/drivers/misc/cardreader/rtsx_pcr.c
+++ b/drivers/misc/cardreader/rtsx_pcr.c
@@ -1536,7 +1536,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device,
(int)pcidev->revision);
- ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pcidev->dev, DMA_BIT_MASK(32));
if (ret < 0)
return ret;
diff --git a/drivers/misc/cb710/sgbuf2.c b/drivers/misc/cb710/sgbuf2.c
index e5a4ed3701eb..a798fad5f03c 100644
--- a/drivers/misc/cb710/sgbuf2.c
+++ b/drivers/misc/cb710/sgbuf2.c
@@ -47,7 +47,7 @@ static inline bool needs_unaligned_copy(const void *ptr)
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
return false;
#else
- return ((ptr - NULL) & 3) != 0;
+ return ((uintptr_t)ptr & 3) != 0;
#endif
}
diff --git a/drivers/misc/cxl/guest.c b/drivers/misc/cxl/guest.c
index 186308f1f8eb..9d485c9e3fff 100644
--- a/drivers/misc/cxl/guest.c
+++ b/drivers/misc/cxl/guest.c
@@ -20,34 +20,38 @@ static void pci_error_handlers(struct cxl_afu *afu,
pci_channel_state_t state)
{
struct pci_dev *afu_dev;
+ struct pci_driver *afu_drv;
+ const struct pci_error_handlers *err_handler;
if (afu->phb == NULL)
return;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- if (!afu_dev->driver)
+ afu_drv = to_pci_driver(afu_dev->dev.driver);
+ if (!afu_drv)
continue;
+ err_handler = afu_drv->err_handler;
switch (bus_error_event) {
case CXL_ERROR_DETECTED_EVENT:
afu_dev->error_state = state;
- if (afu_dev->driver->err_handler &&
- afu_dev->driver->err_handler->error_detected)
- afu_dev->driver->err_handler->error_detected(afu_dev, state);
- break;
+ if (err_handler &&
+ err_handler->error_detected)
+ err_handler->error_detected(afu_dev, state);
+ break;
case CXL_SLOT_RESET_EVENT:
afu_dev->error_state = state;
- if (afu_dev->driver->err_handler &&
- afu_dev->driver->err_handler->slot_reset)
- afu_dev->driver->err_handler->slot_reset(afu_dev);
- break;
+ if (err_handler &&
+ err_handler->slot_reset)
+ err_handler->slot_reset(afu_dev);
+ break;
case CXL_RESUME_EVENT:
- if (afu_dev->driver->err_handler &&
- afu_dev->driver->err_handler->resume)
- afu_dev->driver->err_handler->resume(afu_dev);
- break;
+ if (err_handler &&
+ err_handler->resume)
+ err_handler->resume(afu_dev);
+ break;
}
}
}
diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c
index 2ba899f5659f..3de0aea62ade 100644
--- a/drivers/misc/cxl/pci.c
+++ b/drivers/misc/cxl/pci.c
@@ -1795,6 +1795,8 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
pci_channel_state_t state)
{
struct pci_dev *afu_dev;
+ struct pci_driver *afu_drv;
+ const struct pci_error_handlers *err_handler;
pci_ers_result_t result = PCI_ERS_RESULT_NEED_RESET;
pci_ers_result_t afu_result = PCI_ERS_RESULT_NEED_RESET;
@@ -1805,14 +1807,16 @@ static pci_ers_result_t cxl_vphb_error_detected(struct cxl_afu *afu,
return result;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- if (!afu_dev->driver)
+ afu_drv = to_pci_driver(afu_dev->dev.driver);
+ if (!afu_drv)
continue;
afu_dev->error_state = state;
- if (afu_dev->driver->err_handler)
- afu_result = afu_dev->driver->err_handler->error_detected(afu_dev,
- state);
+ err_handler = afu_drv->err_handler;
+ if (err_handler)
+ afu_result = err_handler->error_detected(afu_dev,
+ state);
/* Disconnect trumps all, NONE trumps NEED_RESET */
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
result = PCI_ERS_RESULT_DISCONNECT;
@@ -1972,6 +1976,8 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
struct cxl_afu *afu;
struct cxl_context *ctx;
struct pci_dev *afu_dev;
+ struct pci_driver *afu_drv;
+ const struct pci_error_handlers *err_handler;
pci_ers_result_t afu_result = PCI_ERS_RESULT_RECOVERED;
pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED;
int i;
@@ -2028,12 +2034,13 @@ static pci_ers_result_t cxl_pci_slot_reset(struct pci_dev *pdev)
* shouldn't start new work until we call
* their resume function.
*/
- if (!afu_dev->driver)
+ afu_drv = to_pci_driver(afu_dev->dev.driver);
+ if (!afu_drv)
continue;
- if (afu_dev->driver->err_handler &&
- afu_dev->driver->err_handler->slot_reset)
- afu_result = afu_dev->driver->err_handler->slot_reset(afu_dev);
+ err_handler = afu_drv->err_handler;
+ if (err_handler && err_handler->slot_reset)
+ afu_result = err_handler->slot_reset(afu_dev);
if (afu_result == PCI_ERS_RESULT_DISCONNECT)
result = PCI_ERS_RESULT_DISCONNECT;
@@ -2060,6 +2067,8 @@ static void cxl_pci_resume(struct pci_dev *pdev)
struct cxl *adapter = pci_get_drvdata(pdev);
struct cxl_afu *afu;
struct pci_dev *afu_dev;
+ struct pci_driver *afu_drv;
+ const struct pci_error_handlers *err_handler;
int i;
/* Everything is back now. Drivers should restart work now.
@@ -2074,9 +2083,13 @@ static void cxl_pci_resume(struct pci_dev *pdev)
continue;
list_for_each_entry(afu_dev, &afu->phb->bus->devices, bus_list) {
- if (afu_dev->driver && afu_dev->driver->err_handler &&
- afu_dev->driver->err_handler->resume)
- afu_dev->driver->err_handler->resume(afu_dev);
+ afu_drv = to_pci_driver(afu_dev->dev.driver);
+ if (!afu_drv)
+ continue;
+
+ err_handler = afu_drv->err_handler;
+ if (err_handler && err_handler->resume)
+ err_handler->resume(afu_dev);
}
}
spin_unlock(&adapter->afu_list_lock);
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 4d09b672ac3c..632325474233 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -366,6 +366,13 @@ static const struct of_device_id at25_of_match[] = {
};
MODULE_DEVICE_TABLE(of, at25_of_match);
+static const struct spi_device_id at25_spi_ids[] = {
+ { .name = "at25",},
+ { .name = "fm25",},
+ { }
+};
+MODULE_DEVICE_TABLE(spi, at25_spi_ids);
+
static int at25_probe(struct spi_device *spi)
{
struct at25_data *at25 = NULL;
@@ -491,6 +498,7 @@ static struct spi_driver at25_driver = {
.dev_groups = sernum_groups,
},
.probe = at25_probe,
+ .id_table = at25_spi_ids,
};
module_spi_driver(at25_driver);
diff --git a/drivers/misc/eeprom/eeprom_93xx46.c b/drivers/misc/eeprom/eeprom_93xx46.c
index 29d8971ec558..1f15399e5cb4 100644
--- a/drivers/misc/eeprom/eeprom_93xx46.c
+++ b/drivers/misc/eeprom/eeprom_93xx46.c
@@ -406,6 +406,23 @@ static const struct of_device_id eeprom_93xx46_of_table[] = {
};
MODULE_DEVICE_TABLE(of, eeprom_93xx46_of_table);
+static const struct spi_device_id eeprom_93xx46_spi_ids[] = {
+ { .name = "eeprom-93xx46",
+ .driver_data = (kernel_ulong_t)&at93c46_data, },
+ { .name = "at93c46",
+ .driver_data = (kernel_ulong_t)&at93c46_data, },
+ { .name = "at93c46d",
+ .driver_data = (kernel_ulong_t)&atmel_at93c46d_data, },
+ { .name = "at93c56",
+ .driver_data = (kernel_ulong_t)&at93c56_data, },
+ { .name = "at93c66",
+ .driver_data = (kernel_ulong_t)&at93c66_data, },
+ { .name = "93lc46b",
+ .driver_data = (kernel_ulong_t)&microchip_93lc46b_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, eeprom_93xx46_spi_ids);
+
static int eeprom_93xx46_probe_dt(struct spi_device *spi)
{
const struct of_device_id *of_id =
@@ -555,6 +572,7 @@ static struct spi_driver eeprom_93xx46_driver = {
},
.probe = eeprom_93xx46_probe,
.remove = eeprom_93xx46_remove,
+ .id_table = eeprom_93xx46_spi_ids,
};
module_spi_driver(eeprom_93xx46_driver);
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index f950d0155876..1b010d9267c9 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -426,7 +426,7 @@ static ssize_t components_show(struct device *cdev,
{
struct enclosure_device *edev = to_enclosure_device(cdev);
- return snprintf(buf, 40, "%d\n", edev->components);
+ return sysfs_emit(buf, "%d\n", edev->components);
}
static DEVICE_ATTR_RO(components);
@@ -481,7 +481,7 @@ static ssize_t get_component_fault(struct device *cdev,
if (edev->cb->get_fault)
edev->cb->get_fault(edev, ecomp);
- return snprintf(buf, 40, "%d\n", ecomp->fault);
+ return sysfs_emit(buf, "%d\n", ecomp->fault);
}
static ssize_t set_component_fault(struct device *cdev,
@@ -505,7 +505,7 @@ static ssize_t get_component_status(struct device *cdev,
if (edev->cb->get_status)
edev->cb->get_status(edev, ecomp);
- return snprintf(buf, 40, "%s\n", enclosure_status[ecomp->status]);
+ return sysfs_emit(buf, "%s\n", enclosure_status[ecomp->status]);
}
static ssize_t set_component_status(struct device *cdev,
@@ -539,7 +539,7 @@ static ssize_t get_component_active(struct device *cdev,
if (edev->cb->get_active)
edev->cb->get_active(edev, ecomp);
- return snprintf(buf, 40, "%d\n", ecomp->active);
+ return sysfs_emit(buf, "%d\n", ecomp->active);
}
static ssize_t set_component_active(struct device *cdev,
@@ -563,7 +563,7 @@ static ssize_t get_component_locate(struct device *cdev,
if (edev->cb->get_locate)
edev->cb->get_locate(edev, ecomp);
- return snprintf(buf, 40, "%d\n", ecomp->locate);
+ return sysfs_emit(buf, "%d\n", ecomp->locate);
}
static ssize_t set_component_locate(struct device *cdev,
@@ -593,7 +593,7 @@ static ssize_t get_component_power_status(struct device *cdev,
if (ecomp->power_status == -1)
return (edev->cb->get_power_status) ? -EIO : -ENOTTY;
- return snprintf(buf, 40, "%s\n", ecomp->power_status ? "on" : "off");
+ return sysfs_emit(buf, "%s\n", ecomp->power_status ? "on" : "off");
}
static ssize_t set_component_power_status(struct device *cdev,
@@ -623,7 +623,7 @@ static ssize_t get_component_type(struct device *cdev,
{
struct enclosure_component *ecomp = to_enclosure_component(cdev);
- return snprintf(buf, 40, "%s\n", enclosure_type[ecomp->type]);
+ return sysfs_emit(buf, "%s\n", enclosure_type[ecomp->type]);
}
static ssize_t get_component_slot(struct device *cdev,
@@ -638,7 +638,7 @@ static ssize_t get_component_slot(struct device *cdev,
else
slot = ecomp->number;
- return snprintf(buf, 40, "%d\n", slot);
+ return sysfs_emit(buf, "%d\n", slot);
}
static DEVICE_ATTR(fault, S_IRUGO | S_IWUSR, get_component_fault,
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index beda610e6b30..39aca7753719 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -24,7 +24,7 @@
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
#define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/
-#define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/
+#define FASTRPC_MAX_SESSIONS 13 /*12 compute, 1 cpz*/
#define FASTRPC_ALIGN 128
#define FASTRPC_MAX_FDLIST 16
#define FASTRPC_MAX_CRCLIST 64
@@ -814,10 +814,12 @@ static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx)
rpra[i].pv = (u64) ctx->args[i].ptr;
pages[i].addr = ctx->maps[i]->phys;
+ mmap_read_lock(current->mm);
vma = find_vma(current->mm, ctx->args[i].ptr);
if (vma)
pages[i].addr += ctx->args[i].ptr -
vma->vm_start;
+ mmap_read_unlock(current->mm);
pg_start = (ctx->args[i].ptr & PAGE_MASK) >> PAGE_SHIFT;
pg_end = ((ctx->args[i].ptr + len - 1) & PAGE_MASK) >>
@@ -890,15 +892,17 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
inbufs = REMOTE_SCALARS_INBUFS(ctx->sc);
for (i = inbufs; i < ctx->nbufs; ++i) {
- void *src = (void *)(uintptr_t)rpra[i].pv;
- void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
- u64 len = rpra[i].len;
+ if (!ctx->maps[i]) {
+ void *src = (void *)(uintptr_t)rpra[i].pv;
+ void *dst = (void *)(uintptr_t)ctx->args[i].ptr;
+ u64 len = rpra[i].len;
- if (!kernel) {
- if (copy_to_user((void __user *)dst, src, len))
- return -EFAULT;
- } else {
- memcpy(dst, src, len);
+ if (!kernel) {
+ if (copy_to_user((void __user *)dst, src, len))
+ return -EFAULT;
+ } else {
+ memcpy(dst, src, len);
+ }
}
}
@@ -1763,3 +1767,4 @@ static void fastrpc_exit(void)
module_exit(fastrpc_exit);
MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/misc/gehc-achc.c b/drivers/misc/gehc-achc.c
index 02f33bc60c56..4c9c5394da6f 100644
--- a/drivers/misc/gehc-achc.c
+++ b/drivers/misc/gehc-achc.c
@@ -539,6 +539,7 @@ static int gehc_achc_probe(struct spi_device *spi)
static const struct spi_device_id gehc_achc_id[] = {
{ "ge,achc", 0 },
+ { "achc", 0 },
{ }
};
MODULE_DEVICE_TABLE(spi, gehc_achc_id);
diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c
index 2e1befbd1ad9..693981891870 100644
--- a/drivers/misc/genwqe/card_base.c
+++ b/drivers/misc/genwqe/card_base.c
@@ -1090,7 +1090,7 @@ static int genwqe_pci_setup(struct genwqe_dev *cd)
/* check for 64-bit DMA address supported (DAC) */
/* check for 32-bit DMA address supported (SAC) */
- if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) ||
+ if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64)) &&
dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32))) {
dev_err(&pci_dev->dev,
"err: neither DMA32 nor DMA64 supported\n");
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 039b923d1d60..1167463f26fb 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -233,8 +233,8 @@ static void genwqe_unmap_pages(struct genwqe_dev *cd, dma_addr_t *dma_list,
struct pci_dev *pci_dev = cd->pci_dev;
for (i = 0; (i < num_pages) && (dma_list[i] != 0x0); i++) {
- pci_unmap_page(pci_dev, dma_list[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pci_dev->dev, dma_list[i], PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
dma_list[i] = 0x0;
}
}
@@ -251,12 +251,12 @@ static int genwqe_map_pages(struct genwqe_dev *cd,
dma_addr_t daddr;
dma_list[i] = 0x0;
- daddr = pci_map_page(pci_dev, page_list[i],
+ daddr = dma_map_page(&pci_dev->dev, page_list[i],
0, /* map_offs */
PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL); /* FIXME rd/rw */
+ DMA_BIDIRECTIONAL); /* FIXME rd/rw */
- if (pci_dma_mapping_error(pci_dev, daddr)) {
+ if (dma_mapping_error(&pci_dev->dev, daddr)) {
dev_err(&pci_dev->dev,
"[%s] err: no dma addr daddr=%016llx!\n",
__func__, (long long)daddr);
diff --git a/drivers/misc/habanalabs/Kconfig b/drivers/misc/habanalabs/Kconfig
index 293d79811372..861c81006c6d 100644
--- a/drivers/misc/habanalabs/Kconfig
+++ b/drivers/misc/habanalabs/Kconfig
@@ -8,6 +8,8 @@ config HABANA_AI
depends on PCI && HAS_IOMEM
select GENERIC_ALLOCATOR
select HWMON
+ select DMA_SHARED_BUFFER
+ select CRC32
help
Enables PCIe card driver for Habana's AI Processors (AIP) that are
designed to accelerate Deep Learning inference and training workloads.
diff --git a/drivers/misc/habanalabs/common/Makefile b/drivers/misc/habanalabs/common/Makefile
index 6ebe3c7001ff..82c3824cad00 100644
--- a/drivers/misc/habanalabs/common/Makefile
+++ b/drivers/misc/habanalabs/common/Makefile
@@ -11,4 +11,4 @@ HL_COMMON_FILES := common/habanalabs_drv.o common/device.o common/context.o \
common/command_buffer.o common/hw_queue.o common/irq.o \
common/sysfs.o common/hwmon.o common/memory.o \
common/command_submission.o common/firmware_if.o \
- common/state_dump.o
+ common/state_dump.o common/hwmgr.o
diff --git a/drivers/misc/habanalabs/common/command_submission.c b/drivers/misc/habanalabs/common/command_submission.c
index 7b0516cf808b..4c8000fd246c 100644
--- a/drivers/misc/habanalabs/common/command_submission.c
+++ b/drivers/misc/habanalabs/common/command_submission.c
@@ -143,6 +143,7 @@ static void hl_fence_init(struct hl_fence *fence, u64 sequence)
fence->cs_sequence = sequence;
fence->error = 0;
fence->timestamp = ktime_set(0, 0);
+ fence->mcs_handling_done = false;
init_completion(&fence->completion);
}
@@ -405,7 +406,7 @@ static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs)
static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
{
bool next_entry_found = false;
- struct hl_cs *next;
+ struct hl_cs *next, *first_cs;
if (!cs_needs_timeout(cs))
return;
@@ -415,20 +416,26 @@ static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs)
/* We need to handle tdr only once for the complete staged submission.
* Hence, we choose the CS that reaches this function first which is
* the CS marked as 'staged_last'.
+ * In case single staged cs was submitted which has both first and last
+ * indications, then "cs_find_first" below will return NULL, since we
+ * removed the cs node from the list before getting here,
+ * in such cases just continue with the cs to cancel it's TDR work.
*/
- if (cs->staged_cs && cs->staged_last)
- cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (cs->staged_cs && cs->staged_last) {
+ first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence);
+ if (first_cs)
+ cs = first_cs;
+ }
spin_unlock(&hdev->cs_mirror_lock);
/* Don't cancel TDR in case this CS was timedout because we might be
* running from the TDR context
*/
- if (cs && (cs->timedout ||
- hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT))
+ if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT)
return;
- if (cs && cs->tdr_active)
+ if (cs->tdr_active)
cancel_delayed_work_sync(&cs->work_tdr);
spin_lock(&hdev->cs_mirror_lock);
@@ -529,10 +536,21 @@ static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs)
mcs_compl->timestamp =
ktime_to_ns(fence->timestamp);
complete_all(&mcs_compl->completion);
+
+ /*
+ * Setting mcs_handling_done inside the lock ensures
+ * at least one fence have mcs_handling_done set to
+ * true before wait for mcs finish. This ensures at
+ * least one CS will be set as completed when polling
+ * mcs fences.
+ */
+ fence->mcs_handling_done = true;
}
spin_unlock(&mcs_compl->lock);
}
+ /* In case CS completed without mcs completion initialized */
+ fence->mcs_handling_done = true;
}
static inline void cs_release_sob_reset_handler(struct hl_device *hdev,
@@ -1288,6 +1306,12 @@ static int cs_ioctl_default(struct hl_fpriv *hpriv, void __user *chunks,
if (rc)
goto free_cs_object;
+ /* If this is a staged submission we must return the staged sequence
+ * rather than the internal CS sequence
+ */
+ if (cs->staged_cs)
+ *cs_seq = cs->staged_sequence;
+
/* Validate ALL the CS chunks before submitting the CS */
for (i = 0 ; i < num_chunks ; i++) {
struct hl_cs_chunk *chunk = &cs_chunk_array[i];
@@ -1988,6 +2012,15 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
goto free_cs_chunk_array;
}
+ if (!hdev->nic_ports_mask) {
+ atomic64_inc(&ctx->cs_counters.validation_drop_cnt);
+ atomic64_inc(&cntr->validation_drop_cnt);
+ dev_err(hdev->dev,
+ "Collective operations not supported when NIC ports are disabled");
+ rc = -EINVAL;
+ goto free_cs_chunk_array;
+ }
+
collective_engine_id = chunk->collective_engine_id;
}
@@ -2026,9 +2059,10 @@ static int cs_ioctl_signal_wait(struct hl_fpriv *hpriv, enum hl_cs_type cs_type,
spin_unlock(&ctx->sig_mgr.lock);
if (!handle_found) {
- dev_err(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
+ /* treat as signal CS already finished */
+ dev_dbg(hdev->dev, "Cannot find encapsulated signals handle for seq 0x%llx\n",
signal_seq);
- rc = -EINVAL;
+ rc = 0;
goto free_cs_chunk_array;
}
@@ -2348,32 +2382,48 @@ static int hl_cs_poll_fences(struct multi_cs_data *mcs_data)
break;
}
- mcs_data->stream_master_qid_map |= fence->stream_master_qid_map;
-
- if (status == CS_WAIT_STATUS_BUSY)
- continue;
-
- mcs_data->completion_bitmap |= BIT(i);
-
- /*
- * best effort to extract timestamp. few notes:
- * - if even single fence is gone we cannot extract timestamp
- * (as fence not exist anymore)
- * - for all completed CSs we take the earliest timestamp.
- * for this we have to validate that:
- * 1. given timestamp was indeed set
- * 2. the timestamp is earliest of all timestamps so far
- */
+ switch (status) {
+ case CS_WAIT_STATUS_BUSY:
+ /* CS did not finished, keep waiting on its QID*/
+ mcs_data->stream_master_qid_map |=
+ fence->stream_master_qid_map;
+ break;
+ case CS_WAIT_STATUS_COMPLETED:
+ /*
+ * Using mcs_handling_done to avoid possibility of mcs_data
+ * returns to user indicating CS completed before it finished
+ * all of its mcs handling, to avoid race the next time the
+ * user waits for mcs.
+ */
+ if (!fence->mcs_handling_done)
+ break;
- if (status == CS_WAIT_STATUS_GONE) {
+ mcs_data->completion_bitmap |= BIT(i);
+ /*
+ * For all completed CSs we take the earliest timestamp.
+ * For this we have to validate that the timestamp is
+ * earliest of all timestamps so far.
+ */
+ if (mcs_data->update_ts &&
+ (ktime_compare(fence->timestamp, first_cs_time) < 0))
+ first_cs_time = fence->timestamp;
+ break;
+ case CS_WAIT_STATUS_GONE:
mcs_data->update_ts = false;
mcs_data->gone_cs = true;
- } else if (mcs_data->update_ts &&
- (ktime_compare(fence->timestamp,
- ktime_set(0, 0)) > 0) &&
- (ktime_compare(fence->timestamp, first_cs_time) < 0)) {
- first_cs_time = fence->timestamp;
+ /*
+ * It is possible to get an old sequence numbers from user
+ * which related to already completed CSs and their fences
+ * already gone. In this case, CS set as completed but
+ * no need to consider its QID for mcs completion.
+ */
+ mcs_data->completion_bitmap |= BIT(i);
+ break;
+ default:
+ dev_err(hdev->dev, "Invalid fence status\n");
+ return -EINVAL;
}
+
}
hl_fences_put(mcs_data->fence_arr, arr_len);
@@ -2613,7 +2663,8 @@ static int hl_multi_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
* completed after the poll function.
*/
if (!mcs_data.completion_bitmap) {
- dev_err(hdev->dev, "Multi-CS got completion on wait but no CS completed\n");
+ dev_warn_ratelimited(hdev->dev,
+ "Multi-CS got completion on wait but no CS completed\n");
rc = -EFAULT;
}
}
@@ -2625,11 +2676,18 @@ put_ctx:
free_seq_arr:
kfree(cs_seq_arr);
- /* update output args */
- memset(args, 0, sizeof(*args));
if (rc)
return rc;
+ if (mcs_data.wait_status == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for Multi-CS\n");
+ return -EINTR;
+ }
+
+ /* update output args */
+ memset(args, 0, sizeof(*args));
+
if (mcs_data.completion_bitmap) {
args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
args->out.cs_completion_map = mcs_data.completion_bitmap;
@@ -2643,8 +2701,6 @@ free_seq_arr:
/* update if some CS was gone */
if (mcs_data.timestamp)
args->out.flags |= HL_WAIT_CS_STATUS_FLAG_GONE;
- } else if (mcs_data.wait_status == -ERESTARTSYS) {
- args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
} else {
args->out.status = HL_WAIT_CS_STATUS_BUSY;
}
@@ -2664,16 +2720,17 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq,
&status, &timestamp);
+ if (rc == -ERESTARTSYS) {
+ dev_err_ratelimited(hdev->dev,
+ "user process got signal while waiting for CS handle %llu\n",
+ seq);
+ return -EINTR;
+ }
+
memset(args, 0, sizeof(*args));
if (rc) {
- if (rc == -ERESTARTSYS) {
- dev_err_ratelimited(hdev->dev,
- "user process got signal while waiting for CS handle %llu\n",
- seq);
- args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED;
- rc = -EINTR;
- } else if (rc == -ETIMEDOUT) {
+ if (rc == -ETIMEDOUT) {
dev_err_ratelimited(hdev->dev,
"CS %llu has timed-out while user process is waiting for it\n",
seq);
@@ -2710,13 +2767,14 @@ static int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data)
static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
u32 timeout_us, u64 user_address,
- u32 target_value, u16 interrupt_offset,
- enum hl_cs_wait_status *status)
+ u64 target_value, u16 interrupt_offset,
+ enum hl_cs_wait_status *status,
+ u64 *timestamp)
{
struct hl_user_pending_interrupt *pend;
struct hl_user_interrupt *interrupt;
unsigned long timeout, flags;
- u32 completion_value;
+ u64 completion_value;
long completion_rc;
int rc = 0;
@@ -2740,26 +2798,31 @@ static int _hl_interrupt_wait_ioctl(struct hl_device *hdev, struct hl_ctx *ctx,
else
interrupt = &hdev->user_interrupt[interrupt_offset];
- if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ /* Add pending user interrupt to relevant list for the interrupt
+ * handler to monitor
+ */
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+ /* We check for completion value as interrupt could have been received
+ * before we added the node to the wait list
+ */
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
- goto free_fence;
+ goto remove_pending_user_interrupt;
}
- if (completion_value >= target_value)
+ if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
- else
+ /* There was no interrupt, we assume the completion is now. */
+ pend->fence.timestamp = ktime_get();
+ } else
*status = CS_WAIT_STATUS_BUSY;
if (!timeout_us || (*status == CS_WAIT_STATUS_COMPLETED))
- goto free_fence;
-
- /* Add pending user interrupt to relevant list for the interrupt
- * handler to monitor
- */
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
- list_add_tail(&pend->wait_list_node, &interrupt->wait_list_head);
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+ goto remove_pending_user_interrupt;
wait_again:
/* Wait for interrupt handler to signal completion */
@@ -2770,7 +2833,16 @@ wait_again:
* If comparison fails, keep waiting until timeout expires
*/
if (completion_rc > 0) {
- if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 4)) {
+ spin_lock_irqsave(&interrupt->wait_list_lock, flags);
+ /* reinit_completion must be called before we check for user
+ * completion value, otherwise, if interrupt is received after
+ * the comparison and before the next wait_for_completion,
+ * we will reach timeout and fail
+ */
+ reinit_completion(&pend->fence.completion);
+ spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
+
+ if (copy_from_user(&completion_value, u64_to_user_ptr(user_address), 8)) {
dev_err(hdev->dev, "Failed to copy completion value from user\n");
rc = -EFAULT;
@@ -2780,18 +2852,13 @@ wait_again:
if (completion_value >= target_value) {
*status = CS_WAIT_STATUS_COMPLETED;
} else {
- spin_lock_irqsave(&interrupt->wait_list_lock, flags);
- reinit_completion(&pend->fence.completion);
timeout = completion_rc;
-
- spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
goto wait_again;
}
} else if (completion_rc == -ERESTARTSYS) {
dev_err_ratelimited(hdev->dev,
"user process got signal while waiting for interrupt ID %d\n",
interrupt->interrupt_id);
- *status = HL_WAIT_CS_STATUS_INTERRUPTED;
rc = -EINTR;
} else {
*status = CS_WAIT_STATUS_BUSY;
@@ -2802,7 +2869,8 @@ remove_pending_user_interrupt:
list_del(&pend->wait_list_node);
spin_unlock_irqrestore(&interrupt->wait_list_lock, flags);
-free_fence:
+ *timestamp = ktime_to_ns(pend->fence.timestamp);
+
kfree(pend);
hl_ctx_put(ctx);
@@ -2816,6 +2884,7 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
struct asic_fixed_properties *prop;
union hl_wait_cs_args *args = data;
enum hl_cs_wait_status status;
+ u64 timestamp;
int rc;
prop = &hdev->asic_prop;
@@ -2845,9 +2914,8 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
rc = _hl_interrupt_wait_ioctl(hdev, hpriv->ctx,
args->in.interrupt_timeout_us, args->in.addr,
- args->in.target, interrupt_offset, &status);
-
- memset(args, 0, sizeof(*args));
+ args->in.target, interrupt_offset, &status,
+ &timestamp);
if (rc) {
if (rc != -EINTR)
@@ -2857,6 +2925,13 @@ static int hl_interrupt_wait_ioctl(struct hl_fpriv *hpriv, void *data)
return rc;
}
+ memset(args, 0, sizeof(*args));
+
+ if (timestamp) {
+ args->out.timestamp_nsec = timestamp;
+ args->out.flags |= HL_WAIT_CS_STATUS_FLAG_TIMESTAMP_VLD;
+ }
+
switch (status) {
case CS_WAIT_STATUS_COMPLETED:
args->out.status = HL_WAIT_CS_STATUS_COMPLETED;
diff --git a/drivers/misc/habanalabs/common/context.c b/drivers/misc/habanalabs/common/context.c
index 22978303ad63..d0aaccd4df2c 100644
--- a/drivers/misc/habanalabs/common/context.c
+++ b/drivers/misc/habanalabs/common/context.c
@@ -181,12 +181,6 @@ out_err:
return rc;
}
-void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx)
-{
- if (kref_put(&ctx->refcount, hl_ctx_do_release) == 1)
- return;
-}
-
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
{
int rc = 0;
@@ -392,7 +386,7 @@ void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr)
idp = &mgr->ctx_handles;
idr_for_each_entry(idp, ctx, id)
- hl_ctx_free(hdev, ctx);
+ kref_put(&ctx->refcount, hl_ctx_do_release);
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->ctx_lock);
diff --git a/drivers/misc/habanalabs/common/debugfs.c b/drivers/misc/habanalabs/common/debugfs.c
index 985f1f3dbd20..1f2a3dc6c4e2 100644
--- a/drivers/misc/habanalabs/common/debugfs.c
+++ b/drivers/misc/habanalabs/common/debugfs.c
@@ -1167,6 +1167,45 @@ static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
return count;
}
+static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ char tmp_buf[200];
+ ssize_t rc;
+
+ if (*ppos)
+ return 0;
+
+ sprintf(tmp_buf, "%d\n",
+ jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
+ rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
+ strlen(tmp_buf) + 1);
+
+ return rc;
+}
+
+static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
+ struct hl_device *hdev = entry->hdev;
+ u32 value;
+ ssize_t rc;
+
+ rc = kstrtouint_from_user(buf, count, 10, &value);
+ if (rc)
+ return rc;
+
+ if (value)
+ hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
+ else
+ hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+ return count;
+}
+
static const struct file_operations hl_data32b_fops = {
.owner = THIS_MODULE,
.read = hl_data_read32,
@@ -1240,6 +1279,12 @@ static const struct file_operations hl_state_dump_fops = {
.write = hl_state_dump_write
};
+static const struct file_operations hl_timeout_locked_fops = {
+ .owner = THIS_MODULE,
+ .read = hl_timeout_locked_read,
+ .write = hl_timeout_locked_write
+};
+
static const struct hl_info_list hl_debugfs_list[] = {
{"command_buffers", command_buffers_show, NULL},
{"command_submission", command_submission_show, NULL},
@@ -1421,6 +1466,12 @@ void hl_debugfs_add_device(struct hl_device *hdev)
dev_entry,
&hl_state_dump_fops);
+ debugfs_create_file("timeout_locked",
+ 0644,
+ dev_entry->root,
+ dev_entry,
+ &hl_timeout_locked_fops);
+
for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
debugfs_create_file(hl_debugfs_list[i].name,
0444,
diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c
index 97c7c86580e6..2022e5d7b3ad 100644
--- a/drivers/misc/habanalabs/common/device.c
+++ b/drivers/misc/habanalabs/common/device.c
@@ -69,13 +69,6 @@ static void hpriv_release(struct kref *ref)
mutex_destroy(&hpriv->restore_phase_mutex);
- mutex_lock(&hdev->fpriv_list_lock);
- list_del(&hpriv->dev_node);
- hdev->compute_ctx = NULL;
- mutex_unlock(&hdev->fpriv_list_lock);
-
- kfree(hpriv);
-
if ((!hdev->pldm) && (hdev->pdev) &&
(!hdev->asic_funcs->is_device_idle(hdev,
idle_mask,
@@ -87,9 +80,32 @@ static void hpriv_release(struct kref *ref)
device_is_idle = false;
}
+ /* We need to remove the user from the list to make sure the reset process won't
+ * try to kill the user process. Because, if we got here, it means there are no
+ * more driver/device resources that the user process is occupying so there is
+ * no need to kill it
+ *
+ * However, we can't set the compute_ctx to NULL at this stage. This is to prevent
+ * a race between the release and opening the device again. We don't want to let
+ * a user open the device while there a reset is about to happen.
+ */
+ mutex_lock(&hdev->fpriv_list_lock);
+ list_del(&hpriv->dev_node);
+ mutex_unlock(&hdev->fpriv_list_lock);
+
if ((hdev->reset_if_device_not_idle && !device_is_idle)
|| hdev->reset_upon_device_release)
hl_device_reset(hdev, HL_RESET_DEVICE_RELEASE);
+
+ /* Now we can mark the compute_ctx as empty. Even if a reset is running in a different
+ * thread, we don't care because the in_reset is marked so if a user will try to open
+ * the device it will fail on that, even if compute_ctx is NULL.
+ */
+ mutex_lock(&hdev->fpriv_list_lock);
+ hdev->compute_ctx = NULL;
+ mutex_unlock(&hdev->fpriv_list_lock);
+
+ kfree(hpriv);
}
void hl_hpriv_get(struct hl_fpriv *hpriv)
@@ -530,6 +546,19 @@ static void hl_device_heartbeat(struct work_struct *work)
return;
reschedule:
+ /*
+ * prev_reset_trigger tracks consecutive fatal h/w errors until first
+ * heartbeat immediately post reset.
+ * If control reached here, then at least one heartbeat work has been
+ * scheduled since last reset/init cycle.
+ * So if the device is not already in reset cycle, reset the flag
+ * prev_reset_trigger as no reset occurred with HL_RESET_FW_FATAL_ERR
+ * status for at least one heartbeat. From this point driver restarts
+ * tracking future consecutive fatal errors.
+ */
+ if (!(atomic_read(&hdev->in_reset)))
+ hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+
schedule_delayed_work(&hdev->work_heartbeat,
usecs_to_jiffies(HL_HEARTBEAT_PER_USEC));
}
@@ -909,6 +938,65 @@ static void device_disable_open_processes(struct hl_device *hdev)
mutex_unlock(&hdev->fpriv_list_lock);
}
+static void handle_reset_trigger(struct hl_device *hdev, u32 flags)
+{
+ u32 cur_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
+
+ /*
+ * 'reset cause' is being updated here, because getting here
+ * means that it's the 1st time and the last time we're here
+ * ('in_reset' makes sure of it). This makes sure that
+ * 'reset_cause' will continue holding its 1st recorded reason!
+ */
+ if (flags & HL_RESET_HEARTBEAT) {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
+ cur_reset_trigger = HL_RESET_HEARTBEAT;
+ } else if (flags & HL_RESET_TDR) {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
+ cur_reset_trigger = HL_RESET_TDR;
+ } else if (flags & HL_RESET_FW_FATAL_ERR) {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ cur_reset_trigger = HL_RESET_FW_FATAL_ERR;
+ } else {
+ hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ }
+
+ /*
+ * If reset cause is same twice, then reset_trigger_repeated
+ * is set and if this reset is due to a fatal FW error
+ * device is set to an unstable state.
+ */
+ if (hdev->prev_reset_trigger != cur_reset_trigger) {
+ hdev->prev_reset_trigger = cur_reset_trigger;
+ hdev->reset_trigger_repeated = 0;
+ } else {
+ hdev->reset_trigger_repeated = 1;
+ }
+
+ /* If reset is due to heartbeat, device CPU is no responsive in
+ * which case no point sending PCI disable message to it.
+ *
+ * If F/W is performing the reset, no need to send it a message to disable
+ * PCI access
+ */
+ if ((flags & HL_RESET_HARD) &&
+ !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
+ /* Disable PCI access from device F/W so he won't send
+ * us additional interrupts. We disable MSI/MSI-X at
+ * the halt_engines function and we can't have the F/W
+ * sending us interrupts after that. We need to disable
+ * the access here because if the device is marked
+ * disable, the message won't be send. Also, in case
+ * of heartbeat, the device CPU is marked as disable
+ * so this message won't be sent
+ */
+ if (hl_fw_send_pci_access_msg(hdev,
+ CPUCP_PACKET_DISABLE_PCI_ACCESS))
+ dev_warn(hdev->dev,
+ "Failed to disable PCI access by F/W\n");
+ }
+}
+
/*
* hl_device_reset - reset the device
*
@@ -954,7 +1042,7 @@ int hl_device_reset(struct hl_device *hdev, u32 flags)
goto do_reset;
}
- if (!hard_reset && !hdev->allow_external_soft_reset) {
+ if (!hard_reset && !hdev->allow_inference_soft_reset) {
hard_instead_soft = true;
hard_reset = true;
}
@@ -978,47 +1066,21 @@ do_reset:
if (rc)
return 0;
- /*
- * 'reset cause' is being updated here, because getting here
- * means that it's the 1st time and the last time we're here
- * ('in_reset' makes sure of it). This makes sure that
- * 'reset_cause' will continue holding its 1st recorded reason!
- */
- if (flags & HL_RESET_HEARTBEAT)
- hdev->curr_reset_cause = HL_RESET_CAUSE_HEARTBEAT;
- else if (flags & HL_RESET_TDR)
- hdev->curr_reset_cause = HL_RESET_CAUSE_TDR;
- else
- hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
-
- /* If reset is due to heartbeat, device CPU is no responsive in
- * which case no point sending PCI disable message to it.
- *
- * If F/W is performing the reset, no need to send it a message to disable
- * PCI access
- */
- if (hard_reset && !(flags & (HL_RESET_HEARTBEAT | HL_RESET_FW))) {
- /* Disable PCI access from device F/W so he won't send
- * us additional interrupts. We disable MSI/MSI-X at
- * the halt_engines function and we can't have the F/W
- * sending us interrupts after that. We need to disable
- * the access here because if the device is marked
- * disable, the message won't be send. Also, in case
- * of heartbeat, the device CPU is marked as disable
- * so this message won't be sent
- */
- if (hl_fw_send_pci_access_msg(hdev,
- CPUCP_PACKET_DISABLE_PCI_ACCESS))
- dev_warn(hdev->dev,
- "Failed to disable PCI access by F/W\n");
- }
+ handle_reset_trigger(hdev, flags);
/* This also blocks future CS/VM/JOB completion operations */
hdev->disabled = true;
take_release_locks(hdev);
- dev_err(hdev->dev, "Going to RESET device!\n");
+ if (hard_reset)
+ dev_info(hdev->dev, "Going to reset device\n");
+ else if (flags & HL_RESET_DEVICE_RELEASE)
+ dev_info(hdev->dev,
+ "Going to reset device after it was released by user\n");
+ else
+ dev_info(hdev->dev,
+ "Going to reset compute engines of inference device\n");
}
again:
@@ -1108,6 +1170,17 @@ kill_processes:
hdev->device_cpu_disabled = false;
hdev->hard_reset_pending = false;
+ if (hdev->reset_trigger_repeated &&
+ (hdev->prev_reset_trigger == HL_RESET_FW_FATAL_ERR)) {
+ /* if there 2 back to back resets from FW,
+ * ensure driver puts the driver in a unusable state
+ */
+ dev_crit(hdev->dev,
+ "Consecutive FW fatal errors received, stopping hard reset\n");
+ rc = -EIO;
+ goto out_err;
+ }
+
if (hdev->kernel_ctx) {
dev_crit(hdev->dev,
"kernel ctx was alive during hard reset, something is terribly wrong\n");
diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c
index 8d2568c63f19..4e68fb9d2a6b 100644
--- a/drivers/misc/habanalabs/common/firmware_if.c
+++ b/drivers/misc/habanalabs/common/firmware_if.c
@@ -2162,18 +2162,17 @@ static void hl_fw_linux_update_state(struct hl_device *hdev,
}
/**
- * hl_fw_dynamic_report_reset_cause - send a COMMS message with the cause
- * of the newly triggered hard reset
+ * hl_fw_dynamic_send_msg - send a COMMS message with attached data
*
* @hdev: pointer to the habanalabs device structure
* @fw_loader: managing structure for loading device's FW
- * @reset_cause: enumerated cause for the recent hard reset
+ * @msg_type: message type
+ * @data: data to be sent
*
* @return 0 on success, otherwise non-zero error code
*/
-static int hl_fw_dynamic_report_reset_cause(struct hl_device *hdev,
- struct fw_load_mgr *fw_loader,
- enum comms_reset_cause reset_cause)
+static int hl_fw_dynamic_send_msg(struct hl_device *hdev,
+ struct fw_load_mgr *fw_loader, u8 msg_type, void *data)
{
struct lkd_msg_comms msg;
int rc;
@@ -2181,11 +2180,20 @@ static int hl_fw_dynamic_report_reset_cause(struct hl_device *hdev,
memset(&msg, 0, sizeof(msg));
/* create message to be sent */
- msg.header.type = HL_COMMS_RESET_CAUSE_TYPE;
+ msg.header.type = msg_type;
msg.header.size = cpu_to_le16(sizeof(struct comms_msg_header));
msg.header.magic = cpu_to_le32(HL_COMMS_MSG_MAGIC);
- msg.reset_cause = reset_cause;
+ switch (msg_type) {
+ case HL_COMMS_RESET_CAUSE_TYPE:
+ msg.reset_cause = *(__u8 *) data;
+ break;
+ default:
+ dev_err(hdev->dev,
+ "Send COMMS message - invalid message type %u\n",
+ msg_type);
+ return -EINVAL;
+ }
rc = hl_fw_dynamic_request_descriptor(hdev, fw_loader,
sizeof(struct lkd_msg_comms));
@@ -2252,8 +2260,8 @@ static int hl_fw_dynamic_init_cpu(struct hl_device *hdev,
goto protocol_err;
if (hdev->curr_reset_cause) {
- rc = hl_fw_dynamic_report_reset_cause(hdev, fw_loader,
- hdev->curr_reset_cause);
+ rc = hl_fw_dynamic_send_msg(hdev, fw_loader,
+ HL_COMMS_RESET_CAUSE_TYPE, &hdev->curr_reset_cause);
if (rc)
goto protocol_err;
diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h
index bebebcb163ee..a2002cbf794b 100644
--- a/drivers/misc/habanalabs/common/habanalabs.h
+++ b/drivers/misc/habanalabs/common/habanalabs.h
@@ -26,6 +26,7 @@
#include <linux/sched/signal.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/coresight.h>
+#include <linux/dma-buf.h>
#define HL_NAME "habanalabs"
@@ -68,6 +69,9 @@
#define HL_STATE_DUMP_HIST_LEN 5
+/* Default value for device reset trigger , an invalid value */
+#define HL_RESET_TRIGGER_DEFAULT 0xFF
+
#define OBJ_NAMES_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
#define SYNC_TO_ENGINE_HASH_TABLE_BITS 7 /* 1 << 7 buckets */
@@ -132,13 +136,18 @@ enum hl_mmu_page_table_location {
* - HL_RESET_FW
* F/W will perform the reset. No need to ask it to reset the device. This is relevant
* only when running with secured f/w
+ *
+ * - HL_RESET_FW_FATAL_ERR
+ * Set if reset is due to a fatal error from FW
*/
+
#define HL_RESET_HARD (1 << 0)
#define HL_RESET_FROM_RESET_THREAD (1 << 1)
#define HL_RESET_HEARTBEAT (1 << 2)
#define HL_RESET_TDR (1 << 3)
#define HL_RESET_DEVICE_RELEASE (1 << 4)
#define HL_RESET_FW (1 << 5)
+#define HL_RESET_FW_FATAL_ERR (1 << 6)
#define HL_MAX_SOBS_PER_MONITOR 8
@@ -447,6 +456,9 @@ struct hl_hints_range {
* for hints validity check.
* device_dma_offset_for_host_access: the offset to add to host DMA addresses
* to enable the device to access them.
+ * @max_freq_value: current max clk frequency.
+ * @clk_pll_index: clock PLL index that specify which PLL determines the clock
+ * we display to the user
* @mmu_pgt_size: MMU page tables total size.
* @mmu_pte_size: PTE size in MMU page tables.
* @mmu_hop_table_size: MMU hop table size.
@@ -543,6 +555,8 @@ struct asic_fixed_properties {
u64 cb_va_end_addr;
u64 dram_hints_align_mask;
u64 device_dma_offset_for_host_access;
+ u64 max_freq_value;
+ u32 clk_pll_index;
u32 mmu_pgt_size;
u32 mmu_pte_size;
u32 mmu_hop_table_size;
@@ -601,6 +615,9 @@ struct asic_fixed_properties {
* masters QIDs that multi cs is waiting on
* @error: mark this fence with error
* @timestamp: timestamp upon completion
+ * @mcs_handling_done: indicates that corresponding command submission has
+ * finished msc handling, this does not mean it was part
+ * of the mcs
*/
struct hl_fence {
struct completion completion;
@@ -609,6 +626,7 @@ struct hl_fence {
u32 stream_master_qid_map;
int error;
ktime_t timestamp;
+ u8 mcs_handling_done;
};
/**
@@ -1353,6 +1371,23 @@ struct hl_cs_counters_atomic {
};
/**
+ * struct hl_dmabuf_priv - a dma-buf private object.
+ * @dmabuf: pointer to dma-buf object.
+ * @ctx: pointer to the dma-buf owner's context.
+ * @phys_pg_pack: pointer to physical page pack if the dma-buf was exported for
+ * memory allocation handle.
+ * @device_address: physical address of the device's memory. Relevant only
+ * if phys_pg_pack is NULL (dma-buf was exported from address).
+ * The total size can be taken from the dmabuf object.
+ */
+struct hl_dmabuf_priv {
+ struct dma_buf *dmabuf;
+ struct hl_ctx *ctx;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ uint64_t device_address;
+};
+
+/**
* struct hl_ctx - user/kernel context.
* @mem_hash: holds mapping from virtual address to virtual memory area
* descriptor (hl_vm_phys_pg_list or hl_userptr).
@@ -1662,6 +1697,7 @@ struct hl_vm_hw_block_list_node {
* @npages: num physical pages in the pack.
* @total_size: total size of all the pages in this list.
* @mapping_cnt: number of shared mappings.
+ * @exporting_cnt: number of dma-buf exporting.
* @asid: the context related to this list.
* @page_size: size of each page in the pack.
* @flags: HL_MEM_* flags related to this list.
@@ -1676,6 +1712,7 @@ struct hl_vm_phys_pg_pack {
u64 npages;
u64 total_size;
atomic_t mapping_cnt;
+ u32 exporting_cnt;
u32 asid;
u32 page_size;
u32 flags;
@@ -2396,6 +2433,7 @@ struct multi_cs_data {
* the error will be ignored by the driver during
* device initialization. Mainly used to debug and
* workaround firmware bugs
+ * @dram_pci_bar_start: start bus address of PCIe bar towards DRAM.
* @last_successful_open_jif: timestamp (jiffies) of the last successful
* device open.
* @last_open_session_duration_jif: duration (jiffies) of the last device open
@@ -2440,8 +2478,12 @@ struct multi_cs_data {
* @collective_mon_idx: helper index for collective initialization
* @supports_coresight: is CoreSight supported.
* @supports_soft_reset: is soft reset supported.
- * @allow_external_soft_reset: true if soft reset initiated by user or TDR is
- * allowed.
+ * @allow_inference_soft_reset: true if the ASIC supports soft reset that is
+ * initiated by user or TDR. This is only true
+ * in inference ASICs, as there is no real-world
+ * use-case of doing soft-reset in training (due
+ * to the fact that training runs on multiple
+ * devices)
* @supports_cb_mapping: is mapping a CB to the device's MMU supported.
* @needs_reset: true if reset_on_lockup is false and device should be reset
* due to lockup.
@@ -2452,6 +2494,10 @@ struct multi_cs_data {
* @supports_staged_submission: true if staged submissions are supported
* @curr_reset_cause: saves an enumerated reset cause when a hard reset is
* triggered, and cleared after it is shared with preboot.
+ * @prev_reset_trigger: saves the previous trigger which caused a reset, overidden
+ * with a new value on next reset
+ * @reset_trigger_repeated: set if device reset is triggered more than once with
+ * same cause.
* @skip_reset_on_timeout: Skip device reset if CS has timed out, wait for it to
* complete instead.
* @device_cpu_is_halted: Flag to indicate whether the device CPU was already
@@ -2537,6 +2583,7 @@ struct hl_device {
u64 max_power;
u64 clock_gating_mask;
u64 boot_error_status_mask;
+ u64 dram_pci_bar_start;
u64 last_successful_open_jif;
u64 last_open_session_duration_jif;
u64 open_counter;
@@ -2572,13 +2619,15 @@ struct hl_device {
u8 collective_mon_idx;
u8 supports_coresight;
u8 supports_soft_reset;
- u8 allow_external_soft_reset;
+ u8 allow_inference_soft_reset;
u8 supports_cb_mapping;
u8 needs_reset;
u8 process_kill_trial_cnt;
u8 device_fini_pending;
u8 supports_staged_submission;
u8 curr_reset_cause;
+ u8 prev_reset_trigger;
+ u8 reset_trigger_repeated;
u8 skip_reset_on_timeout;
u8 device_cpu_is_halted;
u8 supports_wait_for_multi_cs;
@@ -2956,6 +3005,15 @@ int hl_set_voltage(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
int hl_set_current(struct hl_device *hdev,
int sensor_index, u32 attr, long value);
+int hl_set_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value);
+int hl_get_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value);
+int hl_get_clk_rate(struct hl_device *hdev,
+ u32 *cur_clk, u32 *max_clk);
+void hl_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
+void hl_add_device_attr(struct hl_device *hdev,
+ struct attribute_group *dev_attr_grp);
void hw_sob_get(struct hl_hw_sob *hw_sob);
void hw_sob_put(struct hl_hw_sob *hw_sob);
void hl_encaps_handle_do_release(struct kref *ref);
diff --git a/drivers/misc/habanalabs/common/habanalabs_drv.c b/drivers/misc/habanalabs/common/habanalabs_drv.c
index a75e4fceb9d8..949d1b5c5c41 100644
--- a/drivers/misc/habanalabs/common/habanalabs_drv.c
+++ b/drivers/misc/habanalabs/common/habanalabs_drv.c
@@ -225,6 +225,17 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
if (!hpriv)
return -ENOMEM;
+ /* Prevent other routines from reading partial hpriv data by
+ * initializing hpriv fields before inserting it to the list
+ */
+ hpriv->hdev = hdev;
+ filp->private_data = hpriv;
+ hpriv->filp = filp;
+ hpriv->is_control = true;
+ nonseekable_open(inode, filp);
+
+ hpriv->taskpid = find_get_pid(current->pid);
+
mutex_lock(&hdev->fpriv_list_lock);
if (!hl_device_operational(hdev, NULL)) {
@@ -238,19 +249,15 @@ int hl_device_open_ctrl(struct inode *inode, struct file *filp)
list_add(&hpriv->dev_node, &hdev->fpriv_list);
mutex_unlock(&hdev->fpriv_list_lock);
- hpriv->hdev = hdev;
- filp->private_data = hpriv;
- hpriv->filp = filp;
- hpriv->is_control = true;
- nonseekable_open(inode, filp);
-
- hpriv->taskpid = find_get_pid(current->pid);
-
return 0;
out_err:
mutex_unlock(&hdev->fpriv_list_lock);
+ filp->private_data = NULL;
+ put_pid(hpriv->taskpid);
+
kfree(hpriv);
+
return rc;
}
@@ -339,6 +346,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
set_driver_behavior_per_device(hdev);
hdev->curr_reset_cause = HL_RESET_CAUSE_UNKNOWN;
+ hdev->prev_reset_trigger = HL_RESET_TRIGGER_DEFAULT;
if (timeout_locked)
hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000);
diff --git a/drivers/misc/habanalabs/common/hw_queue.c b/drivers/misc/habanalabs/common/hw_queue.c
index 76b7de8f1406..0743319b10c7 100644
--- a/drivers/misc/habanalabs/common/hw_queue.c
+++ b/drivers/misc/habanalabs/common/hw_queue.c
@@ -437,6 +437,7 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
struct hl_cs_compl *cs_cmpl)
{
struct hl_cs_encaps_sig_handle *handle = cs->encaps_sig_hdl;
+ u32 offset = 0;
cs_cmpl->hw_sob = handle->hw_sob;
@@ -446,9 +447,13 @@ void hl_hw_queue_encaps_sig_set_sob_info(struct hl_device *hdev,
* set offset 1 for example he mean to wait only for the first
* signal only, which will be pre_sob_val, and if he set offset 2
* then the value required is (pre_sob_val + 1) and so on...
+ * if user set wait offset to 0, then treat it as legacy wait cs,
+ * wait for the next signal.
*/
- cs_cmpl->sob_val = handle->pre_sob_val +
- (job->encaps_sig_wait_offset - 1);
+ if (job->encaps_sig_wait_offset)
+ offset = job->encaps_sig_wait_offset - 1;
+
+ cs_cmpl->sob_val = handle->pre_sob_val + offset;
}
static int init_wait_cs(struct hl_device *hdev, struct hl_cs *cs,
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c b/drivers/misc/habanalabs/common/hwmgr.c
index 9b60eadd4c35..5451019f143f 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_hwmgr.c
+++ b/drivers/misc/habanalabs/common/hwmgr.c
@@ -1,29 +1,26 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2018 HabanaLabs, Ltd.
+ * Copyright 2019-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
-#include "gaudiP.h"
-#include "../include/gaudi/gaudi_fw_if.h"
+#include "habanalabs.h"
-void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
+void hl_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
{
- struct gaudi_device *gaudi = hdev->asic_specific;
-
- if (freq == PLL_LAST)
- hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
+ hdev->asic_prop.max_freq_value);
}
-int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
+int hl_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
{
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
if (value < 0) {
dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
@@ -33,7 +30,7 @@ int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
*max_clk = (value / 1000 / 1000);
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
if (value < 0) {
dev_err(hdev->dev,
@@ -51,15 +48,14 @@ static ssize_t clk_max_freq_mhz_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- struct gaudi_device *gaudi = hdev->asic_specific;
long value;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, false);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, false);
- gaudi->max_freq_value = value;
+ hdev->asic_prop.max_freq_value = value;
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
@@ -68,7 +64,6 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct hl_device *hdev = dev_get_drvdata(dev);
- struct gaudi_device *gaudi = hdev->asic_specific;
int rc;
u64 value;
@@ -83,9 +78,10 @@ static ssize_t clk_max_freq_mhz_store(struct device *dev,
goto fail;
}
- gaudi->max_freq_value = value * 1000 * 1000;
+ hdev->asic_prop.max_freq_value = value * 1000 * 1000;
- hl_set_frequency(hdev, HL_GAUDI_MME_PLL, gaudi->max_freq_value);
+ hl_set_frequency(hdev, hdev->asic_prop.clk_pll_index,
+ hdev->asic_prop.max_freq_value);
fail:
return count;
@@ -100,7 +96,7 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
- value = hl_get_frequency(hdev, HL_GAUDI_MME_PLL, true);
+ value = hl_get_frequency(hdev, hdev->asic_prop.clk_pll_index, true);
return sprintf(buf, "%lu\n", (value / 1000 / 1000));
}
@@ -108,14 +104,14 @@ static ssize_t clk_cur_freq_mhz_show(struct device *dev,
static DEVICE_ATTR_RW(clk_max_freq_mhz);
static DEVICE_ATTR_RO(clk_cur_freq_mhz);
-static struct attribute *gaudi_dev_attrs[] = {
+static struct attribute *hl_dev_attrs[] = {
&dev_attr_clk_max_freq_mhz.attr,
&dev_attr_clk_cur_freq_mhz.attr,
NULL,
};
-void gaudi_add_device_attr(struct hl_device *hdev,
+void hl_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp)
{
- dev_attr_grp->attrs = gaudi_dev_attrs;
+ dev_attr_grp->attrs = hl_dev_attrs;
}
diff --git a/drivers/misc/habanalabs/common/hwmon.c b/drivers/misc/habanalabs/common/hwmon.c
index 6b421d76b311..e33f65be8a00 100644
--- a/drivers/misc/habanalabs/common/hwmon.c
+++ b/drivers/misc/habanalabs/common/hwmon.c
@@ -113,6 +113,9 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
{
struct hl_device *hdev = dev_get_drvdata(dev);
int rc;
+ u32 cpucp_attr;
+ bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
@@ -121,65 +124,134 @@ static int hl_read(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_input:
+ cpucp_attr = cpucp_temp_input;
+ break;
case hwmon_temp_max:
+ cpucp_attr = cpucp_temp_max;
+ break;
case hwmon_temp_crit:
+ cpucp_attr = cpucp_temp_crit;
+ break;
case hwmon_temp_max_hyst:
+ cpucp_attr = cpucp_temp_max_hyst;
+ break;
case hwmon_temp_crit_hyst:
+ cpucp_attr = cpucp_temp_crit_hyst;
+ break;
case hwmon_temp_offset:
+ cpucp_attr = cpucp_temp_offset;
+ break;
case hwmon_temp_highest:
+ cpucp_attr = cpucp_temp_highest;
break;
default:
return -EINVAL;
}
- rc = hl_get_temperature(hdev, channel, attr, val);
+ if (use_cpucp_enum)
+ rc = hl_get_temperature(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_temperature(hdev, channel, attr, val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_input:
+ cpucp_attr = cpucp_in_input;
+ break;
case hwmon_in_min:
+ cpucp_attr = cpucp_in_min;
+ break;
case hwmon_in_max:
+ cpucp_attr = cpucp_in_max;
+ break;
case hwmon_in_highest:
+ cpucp_attr = cpucp_in_highest;
break;
default:
return -EINVAL;
}
- rc = hl_get_voltage(hdev, channel, attr, val);
+ if (use_cpucp_enum)
+ rc = hl_get_voltage(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_voltage(hdev, channel, attr, val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_input:
+ cpucp_attr = cpucp_curr_input;
+ break;
case hwmon_curr_min:
+ cpucp_attr = cpucp_curr_min;
+ break;
case hwmon_curr_max:
+ cpucp_attr = cpucp_curr_max;
+ break;
case hwmon_curr_highest:
+ cpucp_attr = cpucp_curr_highest;
break;
default:
return -EINVAL;
}
- rc = hl_get_current(hdev, channel, attr, val);
+ if (use_cpucp_enum)
+ rc = hl_get_current(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_current(hdev, channel, attr, val);
break;
case hwmon_fan:
switch (attr) {
case hwmon_fan_input:
+ cpucp_attr = cpucp_fan_input;
+ break;
case hwmon_fan_min:
+ cpucp_attr = cpucp_fan_min;
+ break;
case hwmon_fan_max:
+ cpucp_attr = cpucp_fan_max;
break;
default:
return -EINVAL;
}
- rc = hl_get_fan_speed(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ rc = hl_get_fan_speed(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_fan_speed(hdev, channel, attr, val);
break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
+ cpucp_attr = cpucp_pwm_input;
+ break;
case hwmon_pwm_enable:
+ cpucp_attr = cpucp_pwm_enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_cpucp_enum)
+ rc = hl_get_pwm_info(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_pwm_info(hdev, channel, attr, val);
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ cpucp_attr = CPUCP_POWER_INPUT;
+ break;
+ case hwmon_power_input_highest:
+ cpucp_attr = CPUCP_POWER_INPUT_HIGHEST;
break;
default:
return -EINVAL;
}
- rc = hl_get_pwm_info(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ rc = hl_get_power(hdev, channel, cpucp_attr, val);
+ else
+ rc = hl_get_power(hdev, channel, attr, val);
break;
default:
return -EINVAL;
@@ -191,6 +263,9 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val)
{
struct hl_device *hdev = dev_get_drvdata(dev);
+ u32 cpucp_attr;
+ bool use_cpucp_enum = (hdev->asic_prop.fw_app_cpu_boot_dev_sts0 &
+ CPU_BOOT_DEV_STS0_MAP_HWMON_EN) ? true : false;
if (!hl_device_operational(hdev, NULL))
return -ENODEV;
@@ -199,40 +274,78 @@ static int hl_write(struct device *dev, enum hwmon_sensor_types type,
case hwmon_temp:
switch (attr) {
case hwmon_temp_offset:
+ cpucp_attr = cpucp_temp_offset;
+ break;
case hwmon_temp_reset_history:
+ cpucp_attr = cpucp_temp_reset_history;
break;
default:
return -EINVAL;
}
- hl_set_temperature(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_temperature(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_temperature(hdev, channel, attr, val);
break;
case hwmon_pwm:
switch (attr) {
case hwmon_pwm_input:
+ cpucp_attr = cpucp_pwm_input;
+ break;
case hwmon_pwm_enable:
+ cpucp_attr = cpucp_pwm_enable;
break;
default:
return -EINVAL;
}
- hl_set_pwm_info(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_pwm_info(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_pwm_info(hdev, channel, attr, val);
break;
case hwmon_in:
switch (attr) {
case hwmon_in_reset_history:
+ cpucp_attr = cpucp_in_reset_history;
break;
default:
return -EINVAL;
}
- hl_set_voltage(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_voltage(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_voltage(hdev, channel, attr, val);
break;
case hwmon_curr:
switch (attr) {
case hwmon_curr_reset_history:
+ cpucp_attr = cpucp_curr_reset_history;
break;
default:
return -EINVAL;
}
- hl_set_current(hdev, channel, attr, val);
+
+ if (use_cpucp_enum)
+ hl_set_current(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_current(hdev, channel, attr, val);
+ break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_reset_history:
+ cpucp_attr = CPUCP_POWER_RESET_INPUT_HISTORY;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (use_cpucp_enum)
+ hl_set_power(hdev, channel, cpucp_attr, val);
+ else
+ hl_set_power(hdev, channel, attr, val);
break;
default:
return -EINVAL;
@@ -296,6 +409,15 @@ static umode_t hl_is_visible(const void *data, enum hwmon_sensor_types type,
return 0644;
}
break;
+ case hwmon_power:
+ switch (attr) {
+ case hwmon_power_input:
+ case hwmon_power_input_highest:
+ return 0444;
+ case hwmon_power_reset_history:
+ return 0200;
+ }
+ break;
default:
break;
}
@@ -551,6 +673,60 @@ int hl_set_current(struct hl_device *hdev,
return rc;
}
+int hl_set_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long value)
+{
+ struct cpucp_packet pkt;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+ pkt.value = __cpu_to_le64(value);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, NULL);
+
+ if (rc)
+ dev_err(hdev->dev,
+ "Failed to set power of sensor %d, error %d\n",
+ sensor_index, rc);
+
+ return rc;
+}
+
+int hl_get_power(struct hl_device *hdev,
+ int sensor_index, u32 attr, long *value)
+{
+ struct cpucp_packet pkt;
+ u64 result;
+ int rc;
+
+ memset(&pkt, 0, sizeof(pkt));
+
+ pkt.ctl = cpu_to_le32(CPUCP_PACKET_POWER_GET <<
+ CPUCP_PKT_CTL_OPCODE_SHIFT);
+ pkt.sensor_index = __cpu_to_le16(sensor_index);
+ pkt.type = __cpu_to_le16(attr);
+
+ rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
+ 0, &result);
+
+ *value = (long) result;
+
+ if (rc) {
+ dev_err(hdev->dev,
+ "Failed to get power of sensor %d, error %d\n",
+ sensor_index, rc);
+ *value = 0;
+ }
+
+ return rc;
+}
+
int hl_hwmon_init(struct hl_device *hdev)
{
struct device *dev = hdev->pdev ? &hdev->pdev->dev : hdev->dev;
diff --git a/drivers/misc/habanalabs/common/irq.c b/drivers/misc/habanalabs/common/irq.c
index 39b14a933393..96d82b682674 100644
--- a/drivers/misc/habanalabs/common/irq.c
+++ b/drivers/misc/habanalabs/common/irq.c
@@ -141,10 +141,13 @@ static void handle_user_cq(struct hl_device *hdev,
struct hl_user_interrupt *user_cq)
{
struct hl_user_pending_interrupt *pend;
+ ktime_t now = ktime_get();
spin_lock(&user_cq->wait_list_lock);
- list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node)
+ list_for_each_entry(pend, &user_cq->wait_list_head, wait_list_node) {
+ pend->fence.timestamp = now;
complete_all(&pend->fence.completion);
+ }
spin_unlock(&user_cq->wait_list_lock);
}
diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c
index 33986933aa9e..9bd626a00de3 100644
--- a/drivers/misc/habanalabs/common/memory.c
+++ b/drivers/misc/habanalabs/common/memory.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright 2016-2019 HabanaLabs, Ltd.
+ * Copyright 2016-2021 HabanaLabs, Ltd.
* All Rights Reserved.
*/
@@ -11,6 +11,9 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
+#include <linux/pci-p2pdma.h>
+
+MODULE_IMPORT_NS(DMA_BUF);
#define HL_MMU_DEBUG 0
@@ -347,6 +350,12 @@ static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
return -EINVAL;
}
+ if (phys_pg_pack->exporting_cnt) {
+ dev_dbg(hdev->dev, "handle %u is exported, cannot free\n", handle);
+ spin_unlock(&vm->idr_lock);
+ return -EINVAL;
+ }
+
/*
* must remove from idr before the freeing of the physical
* pages as the refcount of the pool is also the trigger of the
@@ -1487,13 +1496,487 @@ int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
return 0;
}
+static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
+ struct device *dev, enum dma_data_direction dir)
+{
+ dma_addr_t addr;
+ int rc;
+
+ addr = dma_map_resource(dev, bar_address, chunk_size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ rc = dma_mapping_error(dev, addr);
+ if (rc)
+ return rc;
+
+ sg_set_page(sg, NULL, chunk_size, 0);
+ sg_dma_address(sg) = addr;
+ sg_dma_len(sg) = chunk_size;
+
+ return 0;
+}
+
+static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
+ u64 page_size, struct device *dev,
+ enum dma_data_direction dir)
+{
+ u64 chunk_size, bar_address, dma_max_seg_size;
+ struct asic_fixed_properties *prop;
+ int rc, i, j, nents, cur_page;
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+
+ prop = &hdev->asic_prop;
+
+ dma_max_seg_size = dma_get_max_seg_size(dev);
+
+ /* We would like to align the max segment size to PAGE_SIZE, so the
+ * SGL will contain aligned addresses that can be easily mapped to
+ * an MMU
+ */
+ dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
+ if (dma_max_seg_size < PAGE_SIZE) {
+ dev_err_ratelimited(hdev->dev,
+ "dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
+ dma_max_seg_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ /* If the size of each page is larger than the dma max segment size,
+ * then we can't combine pages and the number of entries in the SGL
+ * will just be the
+ * <number of pages> * <chunks of max segment size in each page>
+ */
+ if (page_size > dma_max_seg_size)
+ nents = npages * DIV_ROUND_UP_ULL(page_size, dma_max_seg_size);
+ else
+ /* Get number of non-contiguous chunks */
+ for (i = 1, nents = 1, chunk_size = page_size ; i < npages ; i++) {
+ if (pages[i - 1] + page_size != pages[i] ||
+ chunk_size + page_size > dma_max_seg_size) {
+ nents++;
+ chunk_size = page_size;
+ continue;
+ }
+
+ chunk_size += page_size;
+ }
+
+ rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
+ if (rc)
+ goto error_free;
+
+ cur_page = 0;
+
+ if (page_size > dma_max_seg_size) {
+ u64 size_left, cur_device_address = 0;
+
+ size_left = page_size;
+
+ /* Need to split each page into the number of chunks of
+ * dma_max_seg_size
+ */
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (size_left == page_size)
+ cur_device_address =
+ pages[cur_page] - prop->dram_base_address;
+ else
+ cur_device_address += dma_max_seg_size;
+
+ chunk_size = min(size_left, dma_max_seg_size);
+
+ bar_address = hdev->dram_pci_bar_start + cur_device_address;
+
+ rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
+ if (rc)
+ goto error_unmap;
+
+ if (size_left > dma_max_seg_size) {
+ size_left -= dma_max_seg_size;
+ } else {
+ cur_page++;
+ size_left = page_size;
+ }
+ }
+ } else {
+ /* Merge pages and put them into the scatterlist */
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ chunk_size = page_size;
+ for (j = cur_page + 1 ; j < npages ; j++) {
+ if (pages[j - 1] + page_size != pages[j] ||
+ chunk_size + page_size > dma_max_seg_size)
+ break;
+
+ chunk_size += page_size;
+ }
+
+ bar_address = hdev->dram_pci_bar_start +
+ (pages[cur_page] - prop->dram_base_address);
+
+ rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
+ if (rc)
+ goto error_unmap;
+
+ cur_page = j;
+ }
+ }
+
+ /* Because we are not going to include a CPU list we want to have some
+ * chance that other users will detect this by setting the orig_nents
+ * to 0 and using only nents (length of DMA list) when going over the
+ * sgl
+ */
+ sgt->orig_nents = 0;
+
+ return sgt;
+
+error_unmap:
+ for_each_sgtable_dma_sg(sgt, sg, i) {
+ if (!sg_dma_len(sg))
+ continue;
+
+ dma_unmap_resource(dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+
+ sg_free_table(sgt);
+
+error_free:
+ kfree(sgt);
+ return ERR_PTR(rc);
+}
+
+static int hl_dmabuf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev;
+ int rc;
+
+ hl_dmabuf = dmabuf->priv;
+ hdev = hl_dmabuf->ctx->hdev;
+
+ rc = pci_p2pdma_distance_many(hdev->pdev, &attachment->dev, 1, true);
+
+ if (rc < 0)
+ attachment->peer2peer = false;
+ return 0;
+}
+
+static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction dir)
+{
+ struct dma_buf *dma_buf = attachment->dmabuf;
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev;
+ struct sg_table *sgt;
+
+ hl_dmabuf = dma_buf->priv;
+ hdev = hl_dmabuf->ctx->hdev;
+ phys_pg_pack = hl_dmabuf->phys_pg_pack;
+
+ if (!attachment->peer2peer) {
+ dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
+ return ERR_PTR(-EPERM);
+ }
+
+ if (phys_pg_pack)
+ sgt = alloc_sgt_from_device_pages(hdev,
+ phys_pg_pack->pages,
+ phys_pg_pack->npages,
+ phys_pg_pack->page_size,
+ attachment->dev,
+ dir);
+ else
+ sgt = alloc_sgt_from_device_pages(hdev,
+ &hl_dmabuf->device_address,
+ 1,
+ hl_dmabuf->dmabuf->size,
+ attachment->dev,
+ dir);
+
+ if (IS_ERR(sgt))
+ dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
+
+ return sgt;
+}
+
+static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ /* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
+ * only in the 'device' domain (after all, it maps a PCI bar address which points to the
+ * device memory).
+ *
+ * Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
+ * a sync of the memory to the CPU's cache, as it never resided inside that cache.
+ */
+ for_each_sgtable_dma_sg(sgt, sg, i)
+ dma_unmap_resource(attachment->dev, sg_dma_address(sg),
+ sg_dma_len(sg), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+
+ /* Need to restore orig_nents because sg_free_table use that field */
+ sgt->orig_nents = sgt->nents;
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
+static void hl_release_dmabuf(struct dma_buf *dmabuf)
+{
+ struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
+ struct hl_ctx *ctx = hl_dmabuf->ctx;
+ struct hl_device *hdev = ctx->hdev;
+ struct hl_vm *vm = &hdev->vm;
+
+ if (hl_dmabuf->phys_pg_pack) {
+ spin_lock(&vm->idr_lock);
+ hl_dmabuf->phys_pg_pack->exporting_cnt--;
+ spin_unlock(&vm->idr_lock);
+ }
+
+ hl_ctx_put(hl_dmabuf->ctx);
+
+ kfree(hl_dmabuf);
+}
+
+static const struct dma_buf_ops habanalabs_dmabuf_ops = {
+ .attach = hl_dmabuf_attach,
+ .map_dma_buf = hl_map_dmabuf,
+ .unmap_dma_buf = hl_unmap_dmabuf,
+ .release = hl_release_dmabuf,
+};
+
+static int export_dmabuf_common(struct hl_ctx *ctx,
+ struct hl_dmabuf_priv *hl_dmabuf,
+ u64 total_size, int flags, int *dmabuf_fd)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct hl_device *hdev = ctx->hdev;
+ int rc, fd;
+
+ exp_info.ops = &habanalabs_dmabuf_ops;
+ exp_info.size = total_size;
+ exp_info.flags = flags;
+ exp_info.priv = hl_dmabuf;
+
+ hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(hl_dmabuf->dmabuf)) {
+ dev_err(hdev->dev, "failed to export dma-buf\n");
+ return PTR_ERR(hl_dmabuf->dmabuf);
+ }
+
+ fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
+ if (fd < 0) {
+ dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf\n");
+ rc = fd;
+ goto err_dma_buf_put;
+ }
+
+ hl_dmabuf->ctx = ctx;
+ hl_ctx_get(hdev, hl_dmabuf->ctx);
+
+ *dmabuf_fd = fd;
+
+ return 0;
+
+err_dma_buf_put:
+ dma_buf_put(hl_dmabuf->dmabuf);
+ return rc;
+}
+
+/**
+ * export_dmabuf_from_addr() - export a dma-buf object for the given memory
+ * address and size.
+ * @ctx: pointer to the context structure.
+ * @device_addr: device memory physical address.
+ * @size: size of device memory.
+ * @flags: DMA-BUF file/FD flags.
+ * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
+ *
+ * Create and export a dma-buf object for an existing memory allocation inside
+ * the device memory, and return a FD which is associated with the dma-buf
+ * object.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 device_addr,
+ u64 size, int flags, int *dmabuf_fd)
+{
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop;
+ u64 bar_address;
+ int rc;
+
+ prop = &hdev->asic_prop;
+
+ if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
+ dev_dbg(hdev->dev,
+ "exported device memory address 0x%llx should be aligned to 0x%lx\n",
+ device_addr, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (size < PAGE_SIZE) {
+ dev_dbg(hdev->dev,
+ "exported device memory size %llu should be equal to or greater than %lu\n",
+ size, PAGE_SIZE);
+ return -EINVAL;
+ }
+
+ if (device_addr < prop->dram_user_base_address ||
+ device_addr + size > prop->dram_end_address ||
+ device_addr + size < device_addr) {
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
+ device_addr, size);
+ return -EINVAL;
+ }
+
+ bar_address = hdev->dram_pci_bar_start +
+ (device_addr - prop->dram_base_address);
+
+ if (bar_address + size >
+ hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
+ bar_address + size < bar_address) {
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
+ device_addr, size);
+ return -EINVAL;
+ }
+
+ hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
+ if (!hl_dmabuf)
+ return -ENOMEM;
+
+ hl_dmabuf->device_address = device_addr;
+
+ rc = export_dmabuf_common(ctx, hl_dmabuf, size, flags, dmabuf_fd);
+ if (rc)
+ goto err_free_dmabuf_wrapper;
+
+ return 0;
+
+err_free_dmabuf_wrapper:
+ kfree(hl_dmabuf);
+ return rc;
+}
+
+/**
+ * export_dmabuf_from_handle() - export a dma-buf object for the given memory
+ * handle.
+ * @ctx: pointer to the context structure.
+ * @handle: device memory allocation handle.
+ * @flags: DMA-BUF file/FD flags.
+ * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
+ *
+ * Create and export a dma-buf object for an existing memory allocation inside
+ * the device memory, and return a FD which is associated with the dma-buf
+ * object.
+ *
+ * Return: 0 on success, non-zero for failure.
+ */
+static int export_dmabuf_from_handle(struct hl_ctx *ctx, u64 handle, int flags,
+ int *dmabuf_fd)
+{
+ struct hl_vm_phys_pg_pack *phys_pg_pack;
+ struct hl_dmabuf_priv *hl_dmabuf;
+ struct hl_device *hdev = ctx->hdev;
+ struct asic_fixed_properties *prop;
+ struct hl_vm *vm = &hdev->vm;
+ u64 bar_address;
+ int rc, i;
+
+ prop = &hdev->asic_prop;
+
+ if (upper_32_bits(handle)) {
+ dev_dbg(hdev->dev, "no match for handle 0x%llx\n", handle);
+ return -EINVAL;
+ }
+
+ spin_lock(&vm->idr_lock);
+
+ phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) handle);
+ if (!phys_pg_pack) {
+ spin_unlock(&vm->idr_lock);
+ dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) handle);
+ return -EINVAL;
+ }
+
+ /* increment now to avoid freeing device memory while exporting */
+ phys_pg_pack->exporting_cnt++;
+
+ spin_unlock(&vm->idr_lock);
+
+ if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
+ dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", handle);
+ rc = -EINVAL;
+ goto err_dec_exporting_cnt;
+ }
+
+ for (i = 0 ; i < phys_pg_pack->npages ; i++) {
+
+ bar_address = hdev->dram_pci_bar_start +
+ (phys_pg_pack->pages[i] -
+ prop->dram_base_address);
+
+ if (bar_address + phys_pg_pack->page_size >
+ hdev->dram_pci_bar_start + prop->dram_pci_bar_size ||
+ bar_address + phys_pg_pack->page_size < bar_address) {
+
+ dev_dbg(hdev->dev,
+ "DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
+ phys_pg_pack->pages[i],
+ phys_pg_pack->page_size);
+
+ rc = -EINVAL;
+ goto err_dec_exporting_cnt;
+ }
+ }
+
+ hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
+ if (!hl_dmabuf) {
+ rc = -ENOMEM;
+ goto err_dec_exporting_cnt;
+ }
+
+ hl_dmabuf->phys_pg_pack = phys_pg_pack;
+
+ rc = export_dmabuf_common(ctx, hl_dmabuf, phys_pg_pack->total_size,
+ flags, dmabuf_fd);
+ if (rc)
+ goto err_free_dmabuf_wrapper;
+
+ return 0;
+
+err_free_dmabuf_wrapper:
+ kfree(hl_dmabuf);
+
+err_dec_exporting_cnt:
+ spin_lock(&vm->idr_lock);
+ phys_pg_pack->exporting_cnt--;
+ spin_unlock(&vm->idr_lock);
+
+ return rc;
+}
+
static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
{
struct hl_device *hdev = hpriv->hdev;
struct hl_ctx *ctx = hpriv->ctx;
u64 block_handle, device_addr = 0;
u32 handle = 0, block_size;
- int rc;
+ int rc, dmabuf_fd = -EBADF;
switch (args->in.op) {
case HL_MEM_OP_ALLOC:
@@ -1542,6 +2025,16 @@ static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
args->out.block_size = block_size;
break;
+ case HL_MEM_OP_EXPORT_DMABUF_FD:
+ rc = export_dmabuf_from_addr(ctx,
+ args->in.export_dmabuf_fd.handle,
+ args->in.export_dmabuf_fd.mem_size,
+ args->in.flags,
+ &dmabuf_fd);
+ memset(args, 0, sizeof(*args));
+ args->out.fd = dmabuf_fd;
+ break;
+
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
rc = -ENOTTY;
@@ -1560,7 +2053,7 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
struct hl_ctx *ctx = hpriv->ctx;
u64 block_handle, device_addr = 0;
u32 handle = 0, block_size;
- int rc;
+ int rc, dmabuf_fd = -EBADF;
if (!hl_device_operational(hdev, &status)) {
dev_warn_ratelimited(hdev->dev,
@@ -1651,6 +2144,22 @@ int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
args->out.block_size = block_size;
break;
+ case HL_MEM_OP_EXPORT_DMABUF_FD:
+ if (hdev->asic_prop.dram_supports_virtual_memory)
+ rc = export_dmabuf_from_handle(ctx,
+ args->in.export_dmabuf_fd.handle,
+ args->in.flags,
+ &dmabuf_fd);
+ else
+ rc = export_dmabuf_from_addr(ctx,
+ args->in.export_dmabuf_fd.handle,
+ args->in.export_dmabuf_fd.mem_size,
+ args->in.flags,
+ &dmabuf_fd);
+ memset(args, 0, sizeof(*args));
+ args->out.fd = dmabuf_fd;
+ break;
+
default:
dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
rc = -ENOTTY;
diff --git a/drivers/misc/habanalabs/common/mmu/mmu.c b/drivers/misc/habanalabs/common/mmu/mmu.c
index 792d25b79ea6..aa96917f62e5 100644
--- a/drivers/misc/habanalabs/common/mmu/mmu.c
+++ b/drivers/misc/habanalabs/common/mmu/mmu.c
@@ -501,23 +501,25 @@ static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr,
if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) &&
!is_power_of_2(prop->dram_page_size)) {
- unsigned long dram_page_size = prop->dram_page_size;
- u64 page_offset_mask;
- u64 phys_addr_mask;
- u32 bit;
+ u64 dram_page_size, dram_base, abs_phys_addr, abs_virt_addr,
+ page_id, page_start;
+ u32 page_off;
/*
- * find last set bit in page_size to cover all bits of page
- * offset. note that 1 has to be added to bit index.
- * note that the internal ulong variable is used to avoid
- * alignment issue.
+ * Bit arithmetics cannot be used for non power of two page
+ * sizes. In addition, since bit arithmetics is not used,
+ * we cannot ignore dram base. All that shall be considerd.
*/
- bit = find_last_bit(&dram_page_size,
- sizeof(dram_page_size) * BITS_PER_BYTE) + 1;
- page_offset_mask = (BIT_ULL(bit) - 1);
- phys_addr_mask = ~page_offset_mask;
- *phys_addr = (tmp_phys_addr & phys_addr_mask) |
- (virt_addr & page_offset_mask);
+
+ dram_page_size = prop->dram_page_size;
+ dram_base = prop->dram_base_address;
+ abs_phys_addr = tmp_phys_addr - dram_base;
+ abs_virt_addr = virt_addr - dram_base;
+ page_id = DIV_ROUND_DOWN_ULL(abs_phys_addr, dram_page_size);
+ page_start = page_id * dram_page_size;
+ div_u64_rem(abs_virt_addr, dram_page_size, &page_off);
+
+ *phys_addr = page_start + page_off + dram_base;
} else {
/*
* find the correct hop shift field in hl_mmu_properties
diff --git a/drivers/misc/habanalabs/common/sysfs.c b/drivers/misc/habanalabs/common/sysfs.c
index 34f9f2779962..42c1769ad25d 100644
--- a/drivers/misc/habanalabs/common/sysfs.c
+++ b/drivers/misc/habanalabs/common/sysfs.c
@@ -206,12 +206,12 @@ static ssize_t soft_reset_store(struct device *dev,
goto out;
}
- if (!hdev->allow_external_soft_reset) {
- dev_err(hdev->dev, "Device does not support soft-reset\n");
+ if (!hdev->allow_inference_soft_reset) {
+ dev_err(hdev->dev, "Device does not support inference soft-reset\n");
goto out;
}
- dev_warn(hdev->dev, "Soft-Reset requested through sysfs\n");
+ dev_warn(hdev->dev, "Inference Soft-Reset requested through sysfs\n");
hl_device_reset(hdev, 0);
diff --git a/drivers/misc/habanalabs/gaudi/Makefile b/drivers/misc/habanalabs/gaudi/Makefile
index c9f4703cff24..10577c33a816 100644
--- a/drivers/misc/habanalabs/gaudi/Makefile
+++ b/drivers/misc/habanalabs/gaudi/Makefile
@@ -1,3 +1,3 @@
# SPDX-License-Identifier: GPL-2.0-only
-HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_hwmgr.o gaudi/gaudi_security.o \
+HL_GAUDI_FILES := gaudi/gaudi.o gaudi/gaudi_security.o \
gaudi/gaudi_coresight.o
diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c
index 383865be3c2c..825737dfe381 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi.c
@@ -395,7 +395,7 @@ static struct hl_hw_obj_name_entry gaudi_so_id_to_str[] = {
static struct hl_hw_obj_name_entry gaudi_monitor_id_to_str[] = {
{ .id = 200, .name = "MON_OBJ_DMA_DOWN_FEEDBACK_RESET" },
- { .id = 201, .name = "MON_OBJ_DMA_UP_FEADBACK_RESET" },
+ { .id = 201, .name = "MON_OBJ_DMA_UP_FEEDBACK_RESET" },
{ .id = 203, .name = "MON_OBJ_DRAM_TO_SRAM_QUEUE_FENCE" },
{ .id = 204, .name = "MON_OBJ_TPC_0_CLK_GATE" },
{ .id = 205, .name = "MON_OBJ_TPC_1_CLK_GATE" },
@@ -661,6 +661,9 @@ static int gaudi_set_fixed_properties(struct hl_device *hdev)
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+ prop->clk_pll_index = HL_GAUDI_MME_PLL;
+ prop->max_freq_value = GAUDI_MAX_CLK_FREQ;
+
return 0;
}
@@ -795,6 +798,7 @@ static int gaudi_early_init(struct hl_device *hdev)
}
prop->dram_pci_bar_size = pci_resource_len(pdev, HBM_BAR_ID);
+ hdev->dram_pci_bar_start = pci_resource_start(pdev, HBM_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) {
@@ -1837,8 +1841,6 @@ static int gaudi_sw_init(struct hl_device *hdev)
gaudi->cpucp_info_get = gaudi_cpucp_info_get;
- gaudi->max_freq_value = GAUDI_MAX_CLK_FREQ;
-
hdev->asic_specific = gaudi;
/* Create DMA pool for small allocations */
@@ -2616,7 +2618,7 @@ static void gaudi_init_e2e(struct hl_device *hdev)
static void gaudi_init_hbm_cred(struct hl_device *hdev)
{
- uint32_t hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
+ u32 hbm0_wr, hbm1_wr, hbm0_rd, hbm1_rd;
if (hdev->asic_prop.fw_security_enabled)
return;
@@ -5802,6 +5804,7 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
{
struct gaudi_device *gaudi = hdev->asic_specific;
struct packet_msg_prot *cq_pkt;
+ u64 msi_addr;
u32 tmp;
cq_pkt = kernel_address + len - (sizeof(struct packet_msg_prot) * 2);
@@ -5823,10 +5826,12 @@ static void gaudi_add_end_of_cb_packets(struct hl_device *hdev,
cq_pkt->ctl = cpu_to_le32(tmp);
cq_pkt->value = cpu_to_le32(1);
- if (!gaudi->multi_msi_mode)
- msi_vec = 0;
+ if (gaudi->multi_msi_mode)
+ msi_addr = mmPCIE_MSI_INTR_0 + msi_vec * 4;
+ else
+ msi_addr = mmPCIE_CORE_MSI_REQ;
- cq_pkt->addr = cpu_to_le64(CFG_BASE + mmPCIE_MSI_INTR_0 + msi_vec * 4);
+ cq_pkt->addr = cpu_to_le64(CFG_BASE + msi_addr);
}
static void gaudi_update_eq_ci(struct hl_device *hdev, u32 val)
@@ -7929,6 +7934,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 ctl = le32_to_cpu(eq_entry->hdr.ctl);
+ u32 fw_fatal_err_flag = 0;
u16 event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK)
>> EQ_CTL_EVENT_TYPE_SHIFT);
bool reset_required;
@@ -7969,6 +7975,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_NIC0_CS_DBG_DERR ... GAUDI_EVENT_NIC4_CS_DBG_DERR:
gaudi_print_irq_info(hdev, event_type, true);
gaudi_handle_ecc_event(hdev, event_type, &eq_entry->ecc_data);
+ fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_GIC500:
@@ -7976,6 +7983,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
case GAUDI_EVENT_L2_RAM_ECC:
case GAUDI_EVENT_PLL0 ... GAUDI_EVENT_PLL17:
gaudi_print_irq_info(hdev, event_type, false);
+ fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_0:
@@ -7986,6 +7994,7 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
gaudi_hbm_read_interrupts(hdev,
gaudi_hbm_event_to_dev(event_type),
&eq_entry->hbm_ecc_data);
+ fw_fatal_err_flag = HL_RESET_FW_FATAL_ERR;
goto reset_device;
case GAUDI_EVENT_HBM0_SPI_1:
@@ -8168,9 +8177,9 @@ static void gaudi_handle_eqe(struct hl_device *hdev,
reset_device:
if (hdev->asic_prop.fw_security_enabled)
- hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW);
+ hl_device_reset(hdev, HL_RESET_HARD | HL_RESET_FW | fw_fatal_err_flag);
else if (hdev->hard_reset_on_fw_events)
- hl_device_reset(hdev, HL_RESET_HARD);
+ hl_device_reset(hdev, HL_RESET_HARD | fw_fatal_err_flag);
else
hl_fw_unmask_irq(hdev, event_type);
}
@@ -9436,9 +9445,9 @@ static const struct hl_asic_funcs gaudi_funcs = {
.debugfs_read64 = gaudi_debugfs_read64,
.debugfs_write64 = gaudi_debugfs_write64,
.debugfs_read_dma = gaudi_debugfs_read_dma,
- .add_device_attr = gaudi_add_device_attr,
+ .add_device_attr = hl_add_device_attr,
.handle_eqe = gaudi_handle_eqe,
- .set_pll_profile = gaudi_set_pll_profile,
+ .set_pll_profile = hl_set_pll_profile,
.get_events_stat = gaudi_get_events_stat,
.read_pte = gaudi_read_pte,
.write_pte = gaudi_write_pte,
@@ -9462,7 +9471,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.halt_coresight = gaudi_halt_coresight,
.ctx_init = gaudi_ctx_init,
.ctx_fini = gaudi_ctx_fini,
- .get_clk_rate = gaudi_get_clk_rate,
+ .get_clk_rate = hl_get_clk_rate,
.get_queue_id_for_cq = gaudi_get_queue_id_for_cq,
.load_firmware_to_device = gaudi_load_firmware_to_device,
.load_boot_fit_to_device = gaudi_load_boot_fit_to_device,
diff --git a/drivers/misc/habanalabs/gaudi/gaudiP.h b/drivers/misc/habanalabs/gaudi/gaudiP.h
index bbbf1c343e75..f325e36a71e6 100644
--- a/drivers/misc/habanalabs/gaudi/gaudiP.h
+++ b/drivers/misc/habanalabs/gaudi/gaudiP.h
@@ -319,7 +319,6 @@ struct gaudi_internal_qman_info {
* the actual number of internal queues because they are not in
* consecutive order.
* @hbm_bar_cur_addr: current address of HBM PCI bar.
- * @max_freq_value: current max clk frequency.
* @events: array that holds all event id's
* @events_stat: array that holds histogram of all received events.
* @events_stat_aggregate: same as events_stat but doesn't get cleared on reset
@@ -345,7 +344,6 @@ struct gaudi_device {
struct gaudi_collective_properties collective_props;
u64 hbm_bar_cur_addr;
- u64 max_freq_value;
u32 events[GAUDI_EVENT_SIZE];
u32 events_stat[GAUDI_EVENT_SIZE];
@@ -359,10 +357,8 @@ void gaudi_init_security(struct hl_device *hdev);
void gaudi_ack_protection_bits_errors(struct hl_device *hdev);
void gaudi_add_device_attr(struct hl_device *hdev,
struct attribute_group *dev_attr_grp);
-void gaudi_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq);
int gaudi_debug_coresight(struct hl_device *hdev, void *data);
void gaudi_halt_coresight(struct hl_device *hdev);
-int gaudi_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
void gaudi_mmu_prepare_reg(struct hl_device *hdev, u64 reg, u32 asid);
#endif /* GAUDIP_H_ */
diff --git a/drivers/misc/habanalabs/gaudi/gaudi_security.c b/drivers/misc/habanalabs/gaudi/gaudi_security.c
index cb265c00cf73..25ac87cebd45 100644
--- a/drivers/misc/habanalabs/gaudi/gaudi_security.c
+++ b/drivers/misc/habanalabs/gaudi/gaudi_security.c
@@ -8,16 +8,21 @@
#include "gaudiP.h"
#include "../include/gaudi/asic_reg/gaudi_regs.h"
-#define GAUDI_NUMBER_OF_RR_REGS 24
-#define GAUDI_NUMBER_OF_LBW_RANGES 12
+#define GAUDI_NUMBER_OF_LBW_RR_REGS 28
+#define GAUDI_NUMBER_OF_HBW_RR_REGS 24
+#define GAUDI_NUMBER_OF_LBW_RANGES 10
-static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_HIT_WPROT,
mmDMA_IF_W_S_DMA0_HIT_WPROT,
mmDMA_IF_W_S_DMA1_HIT_WPROT,
+ mmDMA_IF_E_S_SOB_HIT_WPROT,
mmDMA_IF_E_S_DMA0_HIT_WPROT,
mmDMA_IF_E_S_DMA1_HIT_WPROT,
+ mmDMA_IF_W_N_SOB_HIT_WPROT,
mmDMA_IF_W_N_DMA0_HIT_WPROT,
mmDMA_IF_W_N_DMA1_HIT_WPROT,
+ mmDMA_IF_E_N_SOB_HIT_WPROT,
mmDMA_IF_E_N_DMA0_HIT_WPROT,
mmDMA_IF_E_N_DMA1_HIT_WPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AW,
@@ -38,13 +43,17 @@ static u64 gaudi_rr_lbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AW,
};
-static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_HIT_RPROT,
mmDMA_IF_W_S_DMA0_HIT_RPROT,
mmDMA_IF_W_S_DMA1_HIT_RPROT,
+ mmDMA_IF_E_S_SOB_HIT_RPROT,
mmDMA_IF_E_S_DMA0_HIT_RPROT,
mmDMA_IF_E_S_DMA1_HIT_RPROT,
+ mmDMA_IF_W_N_SOB_HIT_RPROT,
mmDMA_IF_W_N_DMA0_HIT_RPROT,
mmDMA_IF_W_N_DMA1_HIT_RPROT,
+ mmDMA_IF_E_N_SOB_HIT_RPROT,
mmDMA_IF_E_N_DMA0_HIT_RPROT,
mmDMA_IF_E_N_DMA1_HIT_RPROT,
mmSIF_RTR_0_LBW_RANGE_PROT_HIT_AR,
@@ -65,13 +74,17 @@ static u64 gaudi_rr_lbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_HIT_AR,
};
-static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MIN_WPROT_0,
mmDMA_IF_W_S_DMA0_MIN_WPROT_0,
mmDMA_IF_W_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_S_SOB_MIN_WPROT_0,
mmDMA_IF_E_S_DMA0_MIN_WPROT_0,
mmDMA_IF_E_S_DMA1_MIN_WPROT_0,
+ mmDMA_IF_W_N_SOB_MIN_WPROT_0,
mmDMA_IF_W_N_DMA0_MIN_WPROT_0,
mmDMA_IF_W_N_DMA1_MIN_WPROT_0,
+ mmDMA_IF_E_N_SOB_MIN_WPROT_0,
mmDMA_IF_E_N_DMA0_MIN_WPROT_0,
mmDMA_IF_E_N_DMA1_MIN_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AW_0,
@@ -92,13 +105,17 @@ static u64 gaudi_rr_lbw_min_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AW_0,
};
-static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MAX_WPROT_0,
mmDMA_IF_W_S_DMA0_MAX_WPROT_0,
mmDMA_IF_W_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_S_SOB_MAX_WPROT_0,
mmDMA_IF_E_S_DMA0_MAX_WPROT_0,
mmDMA_IF_E_S_DMA1_MAX_WPROT_0,
+ mmDMA_IF_W_N_SOB_MAX_WPROT_0,
mmDMA_IF_W_N_DMA0_MAX_WPROT_0,
mmDMA_IF_W_N_DMA1_MAX_WPROT_0,
+ mmDMA_IF_E_N_SOB_MAX_WPROT_0,
mmDMA_IF_E_N_DMA0_MAX_WPROT_0,
mmDMA_IF_E_N_DMA1_MAX_WPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AW_0,
@@ -119,13 +136,17 @@ static u64 gaudi_rr_lbw_max_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AW_0,
};
-static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MIN_RPROT_0,
mmDMA_IF_W_S_DMA0_MIN_RPROT_0,
mmDMA_IF_W_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_S_SOB_MIN_RPROT_0,
mmDMA_IF_E_S_DMA0_MIN_RPROT_0,
mmDMA_IF_E_S_DMA1_MIN_RPROT_0,
+ mmDMA_IF_W_N_SOB_MIN_RPROT_0,
mmDMA_IF_W_N_DMA0_MIN_RPROT_0,
mmDMA_IF_W_N_DMA1_MIN_RPROT_0,
+ mmDMA_IF_E_N_SOB_MIN_RPROT_0,
mmDMA_IF_E_N_DMA0_MIN_RPROT_0,
mmDMA_IF_E_N_DMA1_MIN_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MIN_AR_0,
@@ -146,13 +167,17 @@ static u64 gaudi_rr_lbw_min_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MIN_AR_0,
};
-static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_LBW_RR_REGS] = {
+ mmDMA_IF_W_S_SOB_MAX_RPROT_0,
mmDMA_IF_W_S_DMA0_MAX_RPROT_0,
mmDMA_IF_W_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_S_SOB_MAX_RPROT_0,
mmDMA_IF_E_S_DMA0_MAX_RPROT_0,
mmDMA_IF_E_S_DMA1_MAX_RPROT_0,
+ mmDMA_IF_W_N_SOB_MAX_RPROT_0,
mmDMA_IF_W_N_DMA0_MAX_RPROT_0,
mmDMA_IF_W_N_DMA1_MAX_RPROT_0,
+ mmDMA_IF_E_N_SOB_MAX_RPROT_0,
mmDMA_IF_E_N_DMA0_MAX_RPROT_0,
mmDMA_IF_E_N_DMA1_MAX_RPROT_0,
mmSIF_RTR_0_LBW_RANGE_PROT_MAX_AR_0,
@@ -173,7 +198,7 @@ static u64 gaudi_rr_lbw_max_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_7_LBW_RANGE_PROT_MAX_AR_0,
};
-static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AW,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AW,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AW,
@@ -200,7 +225,7 @@ static u64 gaudi_rr_hbw_hit_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AW
};
-static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_HIT_AR,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_HIT_AR,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_HIT_AR,
@@ -227,7 +252,7 @@ static u64 gaudi_rr_hbw_hit_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_HIT_AR
};
-static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AW_0,
@@ -254,7 +279,7 @@ static u64 gaudi_rr_hbw_base_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AW_0
};
-static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AW_0,
@@ -281,7 +306,7 @@ static u64 gaudi_rr_hbw_base_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AW_0
};
-static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AW_0,
@@ -308,7 +333,7 @@ static u64 gaudi_rr_hbw_mask_low_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AW_0
};
-static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AW_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AW_0,
@@ -335,7 +360,7 @@ static u64 gaudi_rr_hbw_mask_high_aw_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_HIGH_AW_0
};
-static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_LOW_AR_0,
@@ -362,7 +387,7 @@ static u64 gaudi_rr_hbw_base_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_LOW_AR_0
};
-static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_BASE_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_BASE_HIGH_AR_0,
@@ -389,7 +414,7 @@ static u64 gaudi_rr_hbw_base_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_BASE_HIGH_AR_0
};
-static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_LOW_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_LOW_AR_0,
@@ -416,7 +441,7 @@ static u64 gaudi_rr_hbw_mask_low_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
mmNIF_RTR_CTRL_7_RANGE_SEC_MASK_LOW_AR_0
};
-static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_RR_REGS] = {
+static u64 gaudi_rr_hbw_mask_high_ar_regs[GAUDI_NUMBER_OF_HBW_RR_REGS] = {
mmDMA_IF_W_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_W_S_DOWN_CH1_RANGE_SEC_MASK_HIGH_AR_0,
mmDMA_IF_E_S_DOWN_CH0_RANGE_SEC_MASK_HIGH_AR_0,
@@ -12849,50 +12874,44 @@ static void gaudi_init_range_registers_lbw(struct hl_device *hdev)
u32 lbw_rng_end[GAUDI_NUMBER_OF_LBW_RANGES];
int i, j;
- lbw_rng_start[0] = (0xFBFE0000 & 0x3FFFFFF) - 1;
- lbw_rng_end[0] = (0xFBFFF000 & 0x3FFFFFF) + 1;
+ lbw_rng_start[0] = (0xFC0E8000 & 0x3FFFFFF) - 1; /* 0x000E7FFF */
+ lbw_rng_end[0] = (0xFC11FFFF & 0x3FFFFFF) + 1; /* 0x00120000 */
- lbw_rng_start[1] = (0xFC0E8000 & 0x3FFFFFF) - 1;
- lbw_rng_end[1] = (0xFC120000 & 0x3FFFFFF) + 1;
+ lbw_rng_start[1] = (0xFC1E8000 & 0x3FFFFFF) - 1; /* 0x001E7FFF */
+ lbw_rng_end[1] = (0xFC48FFFF & 0x3FFFFFF) + 1; /* 0x00490000 */
- lbw_rng_start[2] = (0xFC1E8000 & 0x3FFFFFF) - 1;
- lbw_rng_end[2] = (0xFC48FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[2] = (0xFC600000 & 0x3FFFFFF) - 1; /* 0x005FFFFF */
+ lbw_rng_end[2] = (0xFCC48FFF & 0x3FFFFFF) + 1; /* 0x00C49000 */
- lbw_rng_start[3] = (0xFC600000 & 0x3FFFFFF) - 1;
- lbw_rng_end[3] = (0xFCC48FFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[3] = (0xFCC4A000 & 0x3FFFFFF) - 1; /* 0x00C49FFF */
+ lbw_rng_end[3] = (0xFCCDFFFF & 0x3FFFFFF) + 1; /* 0x00CE0000 */
- lbw_rng_start[4] = (0xFCC4A000 & 0x3FFFFFF) - 1;
- lbw_rng_end[4] = (0xFCCDFFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[4] = (0xFCCE4000 & 0x3FFFFFF) - 1; /* 0x00CE3FFF */
+ lbw_rng_end[4] = (0xFCD1FFFF & 0x3FFFFFF) + 1; /* 0x00D20000 */
- lbw_rng_start[5] = (0xFCCE4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[5] = (0xFCD1FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[5] = (0xFCD24000 & 0x3FFFFFF) - 1; /* 0x00D23FFF */
+ lbw_rng_end[5] = (0xFCD5FFFF & 0x3FFFFFF) + 1; /* 0x00D60000 */
- lbw_rng_start[6] = (0xFCD24000 & 0x3FFFFFF) - 1;
- lbw_rng_end[6] = (0xFCD5FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[6] = (0xFCD64000 & 0x3FFFFFF) - 1; /* 0x00D63FFF */
+ lbw_rng_end[6] = (0xFCD9FFFF & 0x3FFFFFF) + 1; /* 0x00DA0000 */
- lbw_rng_start[7] = (0xFCD64000 & 0x3FFFFFF) - 1;
- lbw_rng_end[7] = (0xFCD9FFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[7] = (0xFCDA4000 & 0x3FFFFFF) - 1; /* 0x00DA3FFF */
+ lbw_rng_end[7] = (0xFCDDFFFF & 0x3FFFFFF) + 1; /* 0x00DE0000 */
- lbw_rng_start[8] = (0xFCDA4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[8] = (0xFCDDFFFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[8] = (0xFCDE4000 & 0x3FFFFFF) - 1; /* 0x00DE3FFF */
+ lbw_rng_end[8] = (0xFCE05FFF & 0x3FFFFFF) + 1; /* 0x00E06000 */
- lbw_rng_start[9] = (0xFCDE4000 & 0x3FFFFFF) - 1;
- lbw_rng_end[9] = (0xFCE05FFF & 0x3FFFFFF) + 1;
+ lbw_rng_start[9] = (0xFCFC9000 & 0x3FFFFFF) - 1; /* 0x00FC8FFF */
+ lbw_rng_end[9] = (0xFFFFFFFE & 0x3FFFFFF) + 1; /* 0x03FFFFFF */
- lbw_rng_start[10] = (0xFEC43000 & 0x3FFFFFF) - 1;
- lbw_rng_end[10] = (0xFEC43FFF & 0x3FFFFFF) + 1;
-
- lbw_rng_start[11] = (0xFE484000 & 0x3FFFFFF) - 1;
- lbw_rng_end[11] = (0xFE484FFF & 0x3FFFFFF) + 1;
-
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++) {
WREG32(gaudi_rr_lbw_hit_aw_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
WREG32(gaudi_rr_lbw_hit_ar_regs[i],
(1 << GAUDI_NUMBER_OF_LBW_RANGES) - 1);
}
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++)
+ for (i = 0 ; i < GAUDI_NUMBER_OF_LBW_RR_REGS ; i++)
for (j = 0 ; j < GAUDI_NUMBER_OF_LBW_RANGES ; j++) {
WREG32(gaudi_rr_lbw_min_aw_regs[i] + (j << 2),
lbw_rng_start[j]);
@@ -12939,12 +12958,12 @@ static void gaudi_init_range_registers_hbw(struct hl_device *hdev)
* 6th range is the host
*/
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_hit_aw_regs[i], 0x1F);
WREG32(gaudi_rr_hbw_hit_ar_regs[i], 0x1D);
}
- for (i = 0 ; i < GAUDI_NUMBER_OF_RR_REGS ; i++) {
+ for (i = 0 ; i < GAUDI_NUMBER_OF_HBW_RR_REGS ; i++) {
WREG32(gaudi_rr_hbw_base_low_aw_regs[i], dram_addr_lo);
WREG32(gaudi_rr_hbw_base_low_ar_regs[i], dram_addr_lo);
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 031c1849da14..5536e8c27bd5 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -471,6 +471,8 @@ int goya_set_fixed_properties(struct hl_device *hdev)
prop->server_type = HL_SERVER_TYPE_UNKNOWN;
+ prop->clk_pll_index = HL_GOYA_MME_PLL;
+
return 0;
}
@@ -622,6 +624,7 @@ static int goya_early_init(struct hl_device *hdev)
}
prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID);
+ hdev->dram_pci_bar_start = pci_resource_start(pdev, DDR_BAR_ID);
/* If FW security is enabled at this point it means no access to ELBI */
if (hdev->asic_prop.fw_security_enabled) {
@@ -959,7 +962,7 @@ static int goya_sw_init(struct hl_device *hdev)
spin_lock_init(&goya->hw_queues_lock);
hdev->supports_coresight = true;
hdev->supports_soft_reset = true;
- hdev->allow_external_soft_reset = true;
+ hdev->allow_inference_soft_reset = true;
hdev->supports_wait_for_multi_cs = false;
hdev->asic_funcs->set_pci_memory_regions(hdev);
@@ -4829,6 +4832,12 @@ void goya_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry)
case GOYA_ASYNC_EVENT_ID_PLL0 ... GOYA_ASYNC_EVENT_ID_PLL6:
case GOYA_ASYNC_EVENT_ID_AXI_ECC:
case GOYA_ASYNC_EVENT_ID_L2_RAM_ECC:
+ goya_print_irq_info(hdev, event_type, false);
+ if (hdev->hard_reset_on_fw_events)
+ hl_device_reset(hdev, (HL_RESET_HARD |
+ HL_RESET_FW_FATAL_ERR));
+ break;
+
case GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET:
goya_print_irq_info(hdev, event_type, false);
if (hdev->hard_reset_on_fw_events)
@@ -5649,7 +5658,7 @@ static const struct hl_asic_funcs goya_funcs = {
.halt_coresight = goya_halt_coresight,
.ctx_init = goya_ctx_init,
.ctx_fini = goya_ctx_fini,
- .get_clk_rate = goya_get_clk_rate,
+ .get_clk_rate = hl_get_clk_rate,
.get_queue_id_for_cq = goya_get_queue_id_for_cq,
.load_firmware_to_device = goya_load_firmware_to_device,
.load_boot_fit_to_device = goya_load_boot_fit_to_device,
diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h
index 0b05da614729..97add7b04f82 100644
--- a/drivers/misc/habanalabs/goya/goyaP.h
+++ b/drivers/misc/habanalabs/goya/goyaP.h
@@ -235,7 +235,6 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size,
void *vaddr);
void goya_mmu_remove_device_cpu_mappings(struct hl_device *hdev);
-int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk);
u32 goya_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx);
u64 goya_get_device_time(struct hl_device *hdev);
diff --git a/drivers/misc/habanalabs/goya/goya_hwmgr.c b/drivers/misc/habanalabs/goya/goya_hwmgr.c
index 7d007125727f..59b2624ff81a 100644
--- a/drivers/misc/habanalabs/goya/goya_hwmgr.c
+++ b/drivers/misc/habanalabs/goya/goya_hwmgr.c
@@ -32,37 +32,6 @@ void goya_set_pll_profile(struct hl_device *hdev, enum hl_pll_frequency freq)
}
}
-int goya_get_clk_rate(struct hl_device *hdev, u32 *cur_clk, u32 *max_clk)
-{
- long value;
-
- if (!hl_device_operational(hdev, NULL))
- return -ENODEV;
-
- value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, false);
-
- if (value < 0) {
- dev_err(hdev->dev, "Failed to retrieve device max clock %ld\n",
- value);
- return value;
- }
-
- *max_clk = (value / 1000 / 1000);
-
- value = hl_get_frequency(hdev, HL_GOYA_MME_PLL, true);
-
- if (value < 0) {
- dev_err(hdev->dev,
- "Failed to retrieve device current clock %ld\n",
- value);
- return value;
- }
-
- *cur_clk = (value / 1000 / 1000);
-
- return 0;
-}
-
static ssize_t mme_clk_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
diff --git a/drivers/misc/habanalabs/include/common/cpucp_if.h b/drivers/misc/habanalabs/include/common/cpucp_if.h
index 9ff6a448f0d4..ae13231fda94 100644
--- a/drivers/misc/habanalabs/include/common/cpucp_if.h
+++ b/drivers/misc/habanalabs/include/common/cpucp_if.h
@@ -542,11 +542,14 @@ enum cpucp_packet_rc {
*/
enum cpucp_temp_type {
cpucp_temp_input,
+ cpucp_temp_min = 4,
+ cpucp_temp_min_hyst,
cpucp_temp_max = 6,
cpucp_temp_max_hyst,
cpucp_temp_crit,
cpucp_temp_crit_hyst,
cpucp_temp_offset = 19,
+ cpucp_temp_lowest = 21,
cpucp_temp_highest = 22,
cpucp_temp_reset_history = 23
};
@@ -555,6 +558,7 @@ enum cpucp_in_attributes {
cpucp_in_input,
cpucp_in_min,
cpucp_in_max,
+ cpucp_in_lowest = 6,
cpucp_in_highest = 7,
cpucp_in_reset_history
};
@@ -563,6 +567,7 @@ enum cpucp_curr_attributes {
cpucp_curr_input,
cpucp_curr_min,
cpucp_curr_max,
+ cpucp_curr_lowest = 6,
cpucp_curr_highest = 7,
cpucp_curr_reset_history
};
@@ -599,6 +604,16 @@ enum cpucp_pll_type_attributes {
};
/*
+ * cpucp_power_type aligns with hwmon_power_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_power_type {
+ CPUCP_POWER_INPUT = 8,
+ CPUCP_POWER_INPUT_HIGHEST = 9,
+ CPUCP_POWER_RESET_INPUT_HISTORY = 11
+};
+
+/*
* MSI type enumeration table for all ASICs and future SW versions.
* For future ASIC-LKD compatibility, we can only add new enumerations.
* at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
@@ -731,6 +746,9 @@ struct cpucp_security_info {
* @pll_map: Bit map of supported PLLs for current ASIC version.
* @mme_binning_mask: MME binning mask,
* (0 = functional, 1 = binned)
+ * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
+ * (0 = functional 1 = binned)
+ * @memory_repair_flag: eFuse flag indicating memory repair
*/
struct cpucp_info {
struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
@@ -749,7 +767,9 @@ struct cpucp_info {
__le64 reserved3;
__le64 reserved4;
__u8 reserved5;
- __u8 pad[7];
+ __u8 dram_binning_mask;
+ __u8 memory_repair_flag;
+ __u8 pad[5];
struct cpucp_security_info sec_info;
__le32 reserved6;
__u8 pll_map[PLL_MAP_LEN];
diff --git a/drivers/misc/habanalabs/include/common/hl_boot_if.h b/drivers/misc/habanalabs/include/common/hl_boot_if.h
index 3099653234e4..2626df6ef3ef 100644
--- a/drivers/misc/habanalabs/include/common/hl_boot_if.h
+++ b/drivers/misc/habanalabs/include/common/hl_boot_if.h
@@ -15,6 +15,28 @@
#define VERSION_MAX_LEN 128
+enum cpu_boot_err {
+ CPU_BOOT_ERR_DRAM_INIT_FAIL = 0,
+ CPU_BOOT_ERR_FIT_CORRUPTED = 1,
+ CPU_BOOT_ERR_TS_INIT_FAIL = 2,
+ CPU_BOOT_ERR_DRAM_SKIPPED = 3,
+ CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4,
+ CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5,
+ CPU_BOOT_ERR_NIC_FW_FAIL = 6,
+ CPU_BOOT_ERR_SECURITY_NOT_RDY = 7,
+ CPU_BOOT_ERR_SECURITY_FAIL = 8,
+ CPU_BOOT_ERR_EFUSE_FAIL = 9,
+ CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10,
+ CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11,
+ CPU_BOOT_ERR_PLL_FAIL = 12,
+ CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
+ CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
+ CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_ENABLED = 31,
+ CPU_BOOT_ERR_SCND_EN = 63,
+ CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
/*
* CPU error bits in BOOT_ERROR registers
*
@@ -78,25 +100,13 @@
* CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
* should be contacted.
*
- * CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD HALT ACK from ARC0 is not received
- * within specified retries after issuing
- * HALT request. ARC0 appears to be in bad
- * reset.
- *
- * CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD HALT ACK from ARC1 is not received
- * within specified retries after issuing
- * HALT request. ARC1 appears to be in bad
- * reset.
+ * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during
+ * the execution of ppboot or preboot.
+ * for example: stack overflow.
*
- * CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD RUN ACK from ARC0 is not received
- * within specified timeout after issuing
- * RUN request. ARC0 appears to be in bad
- * reset.
- *
- * CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD RUN ACK from ARC1 is not received
- * within specified timeout after issuing
- * RUN request. ARC1 appears to be in bad
- * reset.
+ * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning
+ * malfunctioning components might still be
+ * in use.
*
* CPU_BOOT_ERR0_ENABLED Error registers enabled.
* This is a main indication that the
@@ -104,26 +114,57 @@
* registers. Meaning the error bits are
* not garbage, but actual error statuses.
*/
-#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << 0)
-#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << 1)
-#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << 2)
-#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << 3)
-#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << 4)
-#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << 5)
-#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << 6)
-#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << 7)
-#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << 8)
-#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << 9)
-#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << 10)
-#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << 11)
-#define CPU_BOOT_ERR0_PLL_FAIL (1 << 12)
-#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << 13)
-#define CPU_BOOT_ERR0_ARC0_HALT_ACK_NOT_RCVD (1 << 14)
-#define CPU_BOOT_ERR0_ARC1_HALT_ACK_NOT_RCVD (1 << 15)
-#define CPU_BOOT_ERR0_ARC0_RUN_ACK_NOT_RCVD (1 << 16)
-#define CPU_BOOT_ERR0_ARC1_RUN_ACK_NOT_RCVD (1 << 17)
-#define CPU_BOOT_ERR0_ENABLED (1 << 31)
-#define CPU_BOOT_ERR1_ENABLED (1 << 31)
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL)
+#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
+#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
+#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+
+enum cpu_boot_dev_sts {
+ CPU_BOOT_DEV_STS_SECURITY_EN = 0,
+ CPU_BOOT_DEV_STS_DEBUG_EN = 1,
+ CPU_BOOT_DEV_STS_WATCHDOG_EN = 2,
+ CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3,
+ CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4,
+ CPU_BOOT_DEV_STS_E2E_CRED_EN = 5,
+ CPU_BOOT_DEV_STS_HBM_CRED_EN = 6,
+ CPU_BOOT_DEV_STS_RL_EN = 7,
+ CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8,
+ CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9,
+ CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10,
+ CPU_BOOT_DEV_STS_PLL_INFO_EN = 11,
+ CPU_BOOT_DEV_STS_SP_SRAM_EN = 12,
+ CPU_BOOT_DEV_STS_CLK_GATE_EN = 13,
+ CPU_BOOT_DEV_STS_HBM_ECC_EN = 14,
+ CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15,
+ CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16,
+ CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17,
+ CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18,
+ CPU_BOOT_DEV_STS_DYN_PLL_EN = 19,
+ CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20,
+ CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21,
+ CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
+ CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
+ CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_ENABLED = 31,
+ CPU_BOOT_DEV_STS_SCND_EN = 63,
+ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
+};
/*
* BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
@@ -233,7 +274,7 @@
* was not served before.
* Initialized in: linux
*
- * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
* prevent IRQs overriding each other.
* Initialized in: linux
*
@@ -252,6 +293,11 @@
* where a bit is set if the engine is not idle.
* Initialized in: linux
*
+ * CPU_BOOT_DEV_STS0_MAP_HWMON_EN
+ * If set, means f/w supports proprietary
+ * HWMON enum mapping to cpucp enums.
+ * Initialized in: linux
+ *
* CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
* This is a main indication that the
* running FW populates the device status
@@ -261,34 +307,35 @@
* Initialized in: preboot
*
*/
-#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << 0)
-#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << 1)
-#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << 2)
-#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << 3)
-#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << 4)
-#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << 5)
-#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << 6)
-#define CPU_BOOT_DEV_STS0_RL_EN (1 << 7)
-#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << 8)
-#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << 9)
-#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << 10)
-#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << 11)
-#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << 12)
-#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << 13)
-#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << 14)
-#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << 15)
-#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << 16)
-#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << 17)
-#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << 18)
-#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << 19)
-#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << 20)
-#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << 21)
-#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << 22)
-#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << 23)
-#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << 24)
-#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << 25)
-#define CPU_BOOT_DEV_STS0_ENABLED (1 << 31)
-#define CPU_BOOT_DEV_STS1_ENABLED (1 << 31)
+#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN)
+#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
+#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
enum cpu_boot_status {
CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
@@ -405,6 +452,8 @@ struct cpu_dyn_regs {
enum comms_msg_type {
HL_COMMS_DESC_TYPE = 0,
HL_COMMS_RESET_CAUSE_TYPE = 1,
+ HL_COMMS_FW_CFG_SKIP_TYPE = 2,
+ HL_COMMS_BINNING_CONF_TYPE = 3,
};
/* TODO: remove this struct after the code is updated to use message */
@@ -464,6 +513,9 @@ struct lkd_fw_comms_msg {
struct {
__u8 reset_cause;
};
+ struct {
+ __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
+ };
};
};
@@ -507,8 +559,6 @@ struct lkd_fw_comms_msg {
* COMMS_SKIP_BMC Perform actions required for BMC-less servers.
* Do not wait for BMC response.
*
- * COMMS_LOW_PLL_OPP Initialize PLLs for low OPP.
- *
* COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
* space is allocated in a ELBI access only
* address range.
@@ -524,7 +574,6 @@ enum comms_cmd {
COMMS_RST_DEV = 6,
COMMS_GOTO_WFE = 7,
COMMS_SKIP_BMC = 8,
- COMMS_LOW_PLL_OPP = 9,
COMMS_PREP_DESC_ELBI = 10,
COMMS_INVLD_LAST
};
diff --git a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
index ffdfbd9b3220..1a6576666794 100644
--- a/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
+++ b/drivers/misc/habanalabs/include/gaudi/asic_reg/gaudi_regs.h
@@ -308,6 +308,8 @@
#define mmPCIE_AUX_FLR_CTRL 0xC07394
#define mmPCIE_AUX_DBI 0xC07490
+#define mmPCIE_CORE_MSI_REQ 0xC04100
+
#define mmPSOC_PCI_PLL_NR 0xC72100
#define mmSRAM_W_PLL_NR 0x4C8100
#define mmPSOC_HBM_PLL_NR 0xC74100
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
index 34ca4fe50d91..2dba02757d37 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_fw_if.h
@@ -8,8 +8,6 @@
#ifndef GAUDI_FW_IF_H
#define GAUDI_FW_IF_H
-#include <linux/types.h>
-
#define GAUDI_EVENT_QUEUE_MSI_IDX 8
#define GAUDI_NIC_PORT1_MSI_IDX 10
#define GAUDI_NIC_PORT3_MSI_IDX 12
@@ -78,13 +76,13 @@ struct gaudi_nic_status {
__u32 high_ber_cnt;
};
-struct gaudi_flops_2_data {
+struct gaudi_cold_rst_data {
union {
struct {
- __u32 spsram_init_done : 1;
- __u32 reserved : 31;
+ u32 spsram_init_done : 1;
+ u32 reserved : 31;
};
- __u32 data;
+ __le32 data;
};
};
diff --git a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
index b9bd5a7f71eb..92f25c2ae083 100644
--- a/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
+++ b/drivers/misc/habanalabs/include/gaudi/gaudi_reg_map.h
@@ -33,6 +33,7 @@
#define mmRDWR_TEST mmPSOC_GLOBAL_CONF_SCRATCHPAD_30
#define mmBTL_ID mmPSOC_GLOBAL_CONF_SCRATCHPAD_31
#define mmPREBOOT_PCIE_EN mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_1
+#define mmCOLD_RST_DATA mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_2
#define mmUPD_PENDING_STS mmPSOC_GLOBAL_CONF_COLD_RST_FLOPS_3
#endif /* GAUDI_REG_MAP_H_ */
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
index 989d7d129469..2165ec35a343 100644
--- a/drivers/misc/hisi_hikey_usb.c
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -34,7 +34,6 @@ struct hisi_hikey_usb {
struct device *dev;
struct gpio_desc *otg_switch;
struct gpio_desc *typec_vbus;
- struct gpio_desc *hub_vbus;
struct gpio_desc *reset;
struct regulator *regulator;
@@ -54,9 +53,6 @@ static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value)
{
int ret, status;
- if (hisi_hikey_usb->hub_vbus)
- gpiod_set_value_cansleep(hisi_hikey_usb->hub_vbus, value);
-
if (!hisi_hikey_usb->regulator)
return;
@@ -147,75 +143,50 @@ static int hub_usb_role_switch_set(struct usb_role_switch *sw, enum usb_role rol
return 0;
}
-static int hisi_hikey_usb_parse_kirin970(struct platform_device *pdev,
+static int hisi_hikey_usb_of_role_switch(struct platform_device *pdev,
struct hisi_hikey_usb *hisi_hikey_usb)
{
- struct regulator *regulator;
-
- regulator = devm_regulator_get(&pdev->dev, "hub-vdd");
- if (IS_ERR(regulator)) {
- if (PTR_ERR(regulator) == -EPROBE_DEFER) {
- dev_info(&pdev->dev,
- "waiting for hub-vdd-supply to be probed\n");
- return PTR_ERR(regulator);
- }
- dev_err(&pdev->dev,
- "get hub-vdd-supply failed with error %ld\n",
- PTR_ERR(regulator));
- return PTR_ERR(regulator);
- }
- hisi_hikey_usb->regulator = regulator;
-
- hisi_hikey_usb->reset = devm_gpiod_get(&pdev->dev, "hub_reset_en_gpio",
- GPIOD_OUT_HIGH);
- return PTR_ERR_OR_ZERO(hisi_hikey_usb->reset);
-}
-
-static int hisi_hikey_usb_probe(struct platform_device *pdev)
-{
struct device *dev = &pdev->dev;
- struct hisi_hikey_usb *hisi_hikey_usb;
struct usb_role_switch_desc hub_role_switch = {NULL};
- int ret;
- hisi_hikey_usb = devm_kzalloc(dev, sizeof(*hisi_hikey_usb), GFP_KERNEL);
- if (!hisi_hikey_usb)
- return -ENOMEM;
-
- hisi_hikey_usb->dev = &pdev->dev;
+ if (!device_property_read_bool(dev, "usb-role-switch"))
+ return 0;
hisi_hikey_usb->otg_switch = devm_gpiod_get(dev, "otg-switch",
GPIOD_OUT_HIGH);
- if (IS_ERR(hisi_hikey_usb->otg_switch))
+ if (IS_ERR(hisi_hikey_usb->otg_switch)) {
+ dev_err(dev, "get otg-switch failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->otg_switch));
return PTR_ERR(hisi_hikey_usb->otg_switch);
+ }
hisi_hikey_usb->typec_vbus = devm_gpiod_get(dev, "typec-vbus",
GPIOD_OUT_LOW);
- if (IS_ERR(hisi_hikey_usb->typec_vbus))
+ if (IS_ERR(hisi_hikey_usb->typec_vbus)) {
+ dev_err(dev, "get typec-vbus failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->typec_vbus));
return PTR_ERR(hisi_hikey_usb->typec_vbus);
+ }
- /* Parse Kirin 970-specific OF data */
- if (of_device_is_compatible(pdev->dev.of_node,
- "hisilicon,kirin970_hikey_usbhub")) {
- ret = hisi_hikey_usb_parse_kirin970(pdev, hisi_hikey_usb);
- if (ret)
- return ret;
- } else {
- /* hub-vdd33-en is optional */
- hisi_hikey_usb->hub_vbus = devm_gpiod_get_optional(dev, "hub-vdd33-en",
- GPIOD_OUT_HIGH);
- if (IS_ERR(hisi_hikey_usb->hub_vbus))
- return PTR_ERR(hisi_hikey_usb->hub_vbus);
+ hisi_hikey_usb->reset = devm_gpiod_get_optional(dev,
+ "hub-reset-en",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(hisi_hikey_usb->reset)) {
+ dev_err(dev, "get hub-reset-en failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->reset));
+ return PTR_ERR(hisi_hikey_usb->reset);
}
hisi_hikey_usb->dev_role_sw = usb_role_switch_get(dev);
if (!hisi_hikey_usb->dev_role_sw)
return -EPROBE_DEFER;
- if (IS_ERR(hisi_hikey_usb->dev_role_sw))
+ if (IS_ERR(hisi_hikey_usb->dev_role_sw)) {
+ dev_err(dev, "get device role switch failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->dev_role_sw));
return PTR_ERR(hisi_hikey_usb->dev_role_sw);
+ }
INIT_WORK(&hisi_hikey_usb->work, relay_set_role_switch);
- mutex_init(&hisi_hikey_usb->lock);
hub_role_switch.fwnode = dev_fwnode(dev);
hub_role_switch.set = hub_usb_role_switch_set;
@@ -225,10 +196,44 @@ static int hisi_hikey_usb_probe(struct platform_device *pdev)
&hub_role_switch);
if (IS_ERR(hisi_hikey_usb->hub_role_sw)) {
+ dev_err(dev,
+ "failed to register hub role with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->hub_role_sw));
usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
return PTR_ERR(hisi_hikey_usb->hub_role_sw);
}
+ return 0;
+}
+
+static int hisi_hikey_usb_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hisi_hikey_usb *hisi_hikey_usb;
+ int ret;
+
+ hisi_hikey_usb = devm_kzalloc(dev, sizeof(*hisi_hikey_usb), GFP_KERNEL);
+ if (!hisi_hikey_usb)
+ return -ENOMEM;
+
+ hisi_hikey_usb->dev = &pdev->dev;
+ mutex_init(&hisi_hikey_usb->lock);
+
+ hisi_hikey_usb->regulator = devm_regulator_get(dev, "hub-vdd");
+ if (IS_ERR(hisi_hikey_usb->regulator)) {
+ if (PTR_ERR(hisi_hikey_usb->regulator) == -EPROBE_DEFER) {
+ dev_info(dev, "waiting for hub-vdd-supply\n");
+ return PTR_ERR(hisi_hikey_usb->regulator);
+ }
+ dev_err(dev, "get hub-vdd-supply failed with error %ld\n",
+ PTR_ERR(hisi_hikey_usb->regulator));
+ return PTR_ERR(hisi_hikey_usb->regulator);
+ }
+
+ ret = hisi_hikey_usb_of_role_switch(pdev, hisi_hikey_usb);
+ if (ret)
+ return ret;
+
platform_set_drvdata(pdev, hisi_hikey_usb);
return 0;
@@ -238,18 +243,20 @@ static int hisi_hikey_usb_remove(struct platform_device *pdev)
{
struct hisi_hikey_usb *hisi_hikey_usb = platform_get_drvdata(pdev);
- if (hisi_hikey_usb->hub_role_sw)
+ if (hisi_hikey_usb->hub_role_sw) {
usb_role_switch_unregister(hisi_hikey_usb->hub_role_sw);
- if (hisi_hikey_usb->dev_role_sw)
- usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
+ if (hisi_hikey_usb->dev_role_sw)
+ usb_role_switch_put(hisi_hikey_usb->dev_role_sw);
+ } else {
+ hub_power_ctrl(hisi_hikey_usb, HUB_VBUS_POWER_OFF);
+ }
return 0;
}
static const struct of_device_id id_table_hisi_hikey_usb[] = {
- { .compatible = "hisilicon,gpio_hubv1" },
- { .compatible = "hisilicon,kirin970_hikey_usbhub" },
+ { .compatible = "hisilicon,usbhub" },
{}
};
MODULE_DEVICE_TABLE(of, id_table_hisi_hikey_usb);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 70c5bb1e6f49..3a7808b796b1 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -878,7 +878,7 @@ static int lis3lv02d_add_fs(struct lis3lv02d *lis3)
return sysfs_create_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
}
-int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
+void lis3lv02d_remove_fs(struct lis3lv02d *lis3)
{
sysfs_remove_group(&lis3->pdev->dev.kobj, &lis3lv02d_attribute_group);
platform_device_unregister(lis3->pdev);
@@ -894,7 +894,6 @@ int lis3lv02d_remove_fs(struct lis3lv02d *lis3)
pm_runtime_set_suspended(lis3->pm_dev);
}
kfree(lis3->reg_cache);
- return 0;
}
EXPORT_SYMBOL_GPL(lis3lv02d_remove_fs);
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
index c394c0b08519..195bd2fd2eb5 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.h
+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
@@ -312,7 +312,7 @@ int lis3lv02d_joystick_enable(struct lis3lv02d *lis3);
void lis3lv02d_joystick_disable(struct lis3lv02d *lis3);
void lis3lv02d_poweroff(struct lis3lv02d *lis3);
int lis3lv02d_poweron(struct lis3lv02d *lis3);
-int lis3lv02d_remove_fs(struct lis3lv02d *lis3);
+void lis3lv02d_remove_fs(struct lis3lv02d *lis3);
int lis3lv02d_init_dt(struct lis3lv02d *lis3);
extern struct lis3lv02d lis3_dev;
diff --git a/drivers/misc/lis3lv02d/lis3lv02d_spi.c b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
index f664ed123730..9e40dfb60742 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d_spi.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d_spi.c
@@ -102,7 +102,9 @@ static int lis302dl_spi_remove(struct spi_device *spi)
lis3lv02d_joystick_disable(lis3);
lis3lv02d_poweroff(lis3);
- return lis3lv02d_remove_fs(&lis3_dev);
+ lis3lv02d_remove_fs(&lis3_dev);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c
index 4282b625200f..f4cb94a9aa9c 100644
--- a/drivers/misc/lkdtm/bugs.c
+++ b/drivers/misc/lkdtm/bugs.c
@@ -151,6 +151,83 @@ void lkdtm_REPORT_STACK(void)
pr_info("Stack offset: %d\n", (int)(stack_addr - (uintptr_t)&magic));
}
+static pid_t stack_canary_pid;
+static unsigned long stack_canary;
+static unsigned long stack_canary_offset;
+
+static noinline void __lkdtm_REPORT_STACK_CANARY(void *stack)
+{
+ int i = 0;
+ pid_t pid = task_pid_nr(current);
+ unsigned long *canary = (unsigned long *)stack;
+ unsigned long current_offset = 0, init_offset = 0;
+
+ /* Do our best to find the canary in a 16 word window ... */
+ for (i = 1; i < 16; i++) {
+ canary = (unsigned long *)stack + i;
+#ifdef CONFIG_STACKPROTECTOR
+ if (*canary == current->stack_canary)
+ current_offset = i;
+ if (*canary == init_task.stack_canary)
+ init_offset = i;
+#endif
+ }
+
+ if (current_offset == 0) {
+ /*
+ * If the canary doesn't match what's in the task_struct,
+ * we're either using a global canary or the stack frame
+ * layout changed.
+ */
+ if (init_offset != 0) {
+ pr_err("FAIL: global stack canary found at offset %ld (canary for pid %d matches init_task's)!\n",
+ init_offset, pid);
+ } else {
+ pr_warn("FAIL: did not correctly locate stack canary :(\n");
+ pr_expected_config(CONFIG_STACKPROTECTOR);
+ }
+
+ return;
+ } else if (init_offset != 0) {
+ pr_warn("WARNING: found both current and init_task canaries nearby?!\n");
+ }
+
+ canary = (unsigned long *)stack + current_offset;
+ if (stack_canary_pid == 0) {
+ stack_canary = *canary;
+ stack_canary_pid = pid;
+ stack_canary_offset = current_offset;
+ pr_info("Recorded stack canary for pid %d at offset %ld\n",
+ stack_canary_pid, stack_canary_offset);
+ } else if (pid == stack_canary_pid) {
+ pr_warn("ERROR: saw pid %d again -- please use a new pid\n", pid);
+ } else {
+ if (current_offset != stack_canary_offset) {
+ pr_warn("ERROR: canary offset changed from %ld to %ld!?\n",
+ stack_canary_offset, current_offset);
+ return;
+ }
+
+ if (*canary == stack_canary) {
+ pr_warn("FAIL: canary identical for pid %d and pid %d at offset %ld!\n",
+ stack_canary_pid, pid, current_offset);
+ } else {
+ pr_info("ok: stack canaries differ between pid %d and pid %d at offset %ld.\n",
+ stack_canary_pid, pid, current_offset);
+ /* Reset the test. */
+ stack_canary_pid = 0;
+ }
+ }
+}
+
+void lkdtm_REPORT_STACK_CANARY(void)
+{
+ /* Use default char array length that triggers stack protection. */
+ char data[8] __aligned(sizeof(void *)) = { };
+
+ __lkdtm_REPORT_STACK_CANARY((void *)&data);
+}
+
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c
index fe6fd34b8caf..609d9ee2acc0 100644
--- a/drivers/misc/lkdtm/core.c
+++ b/drivers/misc/lkdtm/core.c
@@ -111,6 +111,7 @@ static const struct crashtype crashtypes[] = {
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(CORRUPT_STACK_STRONG),
CRASHTYPE(REPORT_STACK),
+ CRASHTYPE(REPORT_STACK_CANARY),
CRASHTYPE(CORRUPT_LIST_ADD),
CRASHTYPE(CORRUPT_LIST_DEL),
CRASHTYPE(STACK_GUARD_PAGE_LEADING),
diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h
index c212a253edde..d6137c70ebbe 100644
--- a/drivers/misc/lkdtm/lkdtm.h
+++ b/drivers/misc/lkdtm/lkdtm.h
@@ -69,6 +69,7 @@ void lkdtm_EXHAUST_STACK(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_CORRUPT_STACK_STRONG(void);
void lkdtm_REPORT_STACK(void);
+void lkdtm_REPORT_STACK_CANARY(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index f5fd5b786607..0e0bcd0da852 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -47,3 +47,5 @@ config INTEL_MEI_TXE
Intel Bay Trail
source "drivers/misc/mei/hdcp/Kconfig"
+source "drivers/misc/mei/pxp/Kconfig"
+
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index f1c76f7ee804..d8e5165917f2 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -26,3 +26,4 @@ mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
+obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 99b5c1ecc444..be41843df75b 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -1298,7 +1298,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_STARTING) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
return 0;
}
@@ -1381,7 +1382,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_DR_SETUP) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
return 0;
}
@@ -1448,7 +1450,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
return 0;
}
@@ -1490,7 +1493,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->dev_state != MEI_DEV_INIT_CLIENTS ||
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
- if (dev->dev_state == MEI_DEV_POWER_DOWN) {
+ if (dev->dev_state == MEI_DEV_POWER_DOWN ||
+ dev->dev_state == MEI_DEV_POWERING_DOWN) {
dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
return 0;
}
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index cb34925e10f1..67bb6a25fd0a 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -92,6 +92,7 @@
#define MEI_DEV_ID_CDF 0x18D3 /* Cedar Fork */
#define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */
+#define MEI_DEV_ID_ICP_N 0x38E0 /* Ice Lake Point N */
#define MEI_DEV_ID_JSP_N 0x4DE0 /* Jasper Lake Point N */
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index c3393b383e59..3a45aaf002ac 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -96,6 +96,7 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
{MEI_PCI_DEVICE(MEI_DEV_ID_CMP_H_3, MEI_ME_PCH8_ITOUCH_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_N, MEI_ME_PCH12_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_LP, MEI_ME_PCH15_CFG)},
{MEI_PCI_DEVICE(MEI_DEV_ID_TGP_H, MEI_ME_PCH15_SPS_CFG)},
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index aec0483b8e72..fa20d9a27813 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -69,9 +69,9 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(36));
if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No suitable DMA available.\n");
goto end;
diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig
new file mode 100644
index 000000000000..4029b96afc04
--- /dev/null
+++ b/drivers/misc/mei/pxp/Kconfig
@@ -0,0 +1,13 @@
+
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020, Intel Corporation. All rights reserved.
+#
+config INTEL_MEI_PXP
+ tristate "Intel PXP services of ME Interface"
+ select INTEL_MEI_ME
+ depends on DRM_I915
+ help
+ MEI Support for PXP Services on Intel platforms.
+
+ Enables the ME FW services required for PXP support through
+ I915 display driver of Intel.
diff --git a/drivers/misc/mei/pxp/Makefile b/drivers/misc/mei/pxp/Makefile
new file mode 100644
index 000000000000..0329950d5794
--- /dev/null
+++ b/drivers/misc/mei/pxp/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2020, Intel Corporation. All rights reserved.
+#
+# Makefile - PXP client driver for Intel MEI Bus Driver.
+
+obj-$(CONFIG_INTEL_MEI_PXP) += mei_pxp.o
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
new file mode 100644
index 000000000000..f7380d387bab
--- /dev/null
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2020 - 2021 Intel Corporation
+ */
+
+/**
+ * DOC: MEI_PXP Client Driver
+ *
+ * The mei_pxp driver acts as a translation layer between PXP
+ * protocol implementer (I915) and ME FW by translating PXP
+ * negotiation messages to ME FW command payloads and vice versa.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/component.h>
+#include <drm/drm_connector.h>
+#include <drm/i915_component.h>
+#include <drm/i915_pxp_tee_interface.h>
+
+#include "mei_pxp.h"
+
+/**
+ * mei_pxp_send_message() - Sends a PXP message to ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @message: a message buffer to send
+ * @size: size of the message
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_pxp_send_message(struct device *dev, const void *message, size_t size)
+{
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !message)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ /* temporary drop const qualifier till the API is fixed */
+ byte = mei_cldev_send(cldev, (u8 *)message, size);
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_pxp_receive_message() - Receives a PXP message from ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @buffer: a message buffer to contain the received message
+ * @size: size of the buffer
+ * Return: bytes sent on Success, <0 on Failure
+ */
+static int
+mei_pxp_receive_message(struct device *dev, void *buffer, size_t size)
+{
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !buffer)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ byte = mei_cldev_recv(cldev, buffer, size);
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ return byte;
+}
+
+static const struct i915_pxp_component_ops mei_pxp_ops = {
+ .owner = THIS_MODULE,
+ .send = mei_pxp_send_message,
+ .recv = mei_pxp_receive_message,
+};
+
+static int mei_component_master_bind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ comp_master->ops = &mei_pxp_ops;
+ comp_master->tee_dev = dev;
+ ret = component_bind_all(dev, comp_master);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mei_component_master_unbind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+
+ component_unbind_all(dev, comp_master);
+}
+
+static const struct component_master_ops mei_component_master_ops = {
+ .bind = mei_component_master_bind,
+ .unbind = mei_component_master_unbind,
+};
+
+/**
+ * mei_pxp_component_match - compare function for matching mei pxp.
+ *
+ * The function checks if the driver is i915, the subcomponent is PXP
+ * and the grand parent of pxp and the parent of i915 are the same
+ * PCH device.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_PXP)
+ * @data: compare data (mei pxp device)
+ *
+ * Return:
+ * * 1 - if components match
+ * * 0 - otherwise
+ */
+static int mei_pxp_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ struct device *base = data;
+
+ if (strcmp(dev->driver->name, "i915") ||
+ subcomponent != I915_COMPONENT_PXP)
+ return 0;
+
+ base = base->parent;
+ if (!base)
+ return 0;
+
+ base = base->parent;
+ dev = dev->parent;
+
+ return (base && dev && dev == base);
+}
+
+static int mei_pxp_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct i915_pxp_component *comp_master;
+ struct component_match *master_match;
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
+ goto enable_err_exit;
+ }
+
+ comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
+ if (!comp_master) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ master_match = NULL;
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_pxp_component_match, &cldev->dev);
+ if (IS_ERR_OR_NULL(master_match)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ mei_cldev_set_drvdata(cldev, comp_master);
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_component_master_ops,
+ master_match);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ mei_cldev_set_drvdata(cldev, NULL);
+ kfree(comp_master);
+ mei_cldev_disable(cldev);
+enable_err_exit:
+ return ret;
+}
+
+static void mei_pxp_remove(struct mei_cl_device *cldev)
+{
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ component_master_del(&cldev->dev, &mei_component_master_ops);
+ kfree(comp_master);
+ mei_cldev_set_drvdata(cldev, NULL);
+
+ ret = mei_cldev_disable(cldev);
+ if (ret)
+ dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
+}
+
+/* fbf6fcf1-96cf-4e2e-a6a6-1bab8cbe36b1 : PAVP GUID*/
+#define MEI_GUID_PXP GUID_INIT(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
+ 0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
+
+static struct mei_cl_device_id mei_pxp_tbl[] = {
+ { .uuid = MEI_GUID_PXP, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_pxp_tbl);
+
+static struct mei_cl_driver mei_pxp_driver = {
+ .id_table = mei_pxp_tbl,
+ .name = KBUILD_MODNAME,
+ .probe = mei_pxp_probe,
+ .remove = mei_pxp_remove,
+};
+
+module_mei_cl_driver(mei_pxp_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI PXP");
diff --git a/drivers/misc/mei/pxp/mei_pxp.h b/drivers/misc/mei/pxp/mei_pxp.h
new file mode 100644
index 000000000000..e7b15373fefd
--- /dev/null
+++ b/drivers/misc/mei/pxp/mei_pxp.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Authors:
+ * Vitaly Lubart <vitaly.lubart@intel.com>
+ */
+
+#ifndef __MEI_PXP_H__
+#define __MEI_PXP_H__
+
+/* me_pxp_status: Enumeration of all PXP Status Codes */
+enum me_pxp_status {
+ ME_PXP_STATUS_SUCCESS = 0x0000,
+
+};
+
+#endif /* __MEI_PXP_H__ */
diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c
index a68738f38252..e401a51596b9 100644
--- a/drivers/misc/ocxl/config.c
+++ b/drivers/misc/ocxl/config.c
@@ -33,18 +33,7 @@
static int find_dvsec(struct pci_dev *dev, int dvsec_id)
{
- int vsec = 0;
- u16 vendor, id;
-
- while ((vsec = pci_find_next_ext_capability(dev, vsec,
- OCXL_EXT_CAP_ID_DVSEC))) {
- pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET,
- &vendor);
- pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id);
- if (vendor == PCI_VENDOR_ID_IBM && id == dvsec_id)
- return vsec;
- }
- return 0;
+ return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, dvsec_id);
}
static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx)
diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c
index be4016084979..eb97167c03fb 100644
--- a/drivers/misc/pvpanic/pvpanic-mmio.c
+++ b/drivers/misc/pvpanic/pvpanic-mmio.c
@@ -24,8 +24,7 @@ MODULE_AUTHOR("Hu Tao <hutao@cn.fujitsu.com>");
MODULE_DESCRIPTION("pvpanic-mmio device driver");
MODULE_LICENSE("GPL");
-static ssize_t capability_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -33,14 +32,14 @@ static ssize_t capability_show(struct device *dev,
}
static DEVICE_ATTR_RO(capability);
-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->events);
}
-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -100,7 +99,7 @@ static int pvpanic_mmio_probe(struct platform_device *pdev)
pi->base = base;
pi->capability = PVPANIC_PANICKED | PVPANIC_CRASH_LOADED;
- /* initlize capability by RDPT */
+ /* initialize capability by RDPT */
pi->capability &= ioread8(base);
pi->events = pi->capability;
diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c
index 741116b3d995..07eddb5ea30f 100644
--- a/drivers/misc/pvpanic/pvpanic-pci.c
+++ b/drivers/misc/pvpanic/pvpanic-pci.c
@@ -19,16 +19,10 @@
#define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011
MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
-MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
-static const struct pci_device_id pvpanic_pci_id_tbl[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_PVPANIC)},
- {}
-};
-
-static ssize_t capability_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t capability_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -36,14 +30,14 @@ static ssize_t capability_show(struct device *dev,
}
static DEVICE_ATTR_RO(capability);
-static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t events_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
return sysfs_emit(buf, "%x\n", pi->events);
}
-static ssize_t events_store(struct device *dev, struct device_attribute *attr,
+static ssize_t events_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct pvpanic_instance *pi = dev_get_drvdata(dev);
@@ -70,8 +64,7 @@ static struct attribute *pvpanic_pci_dev_attrs[] = {
};
ATTRIBUTE_GROUPS(pvpanic_pci_dev);
-static int pvpanic_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pvpanic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct pvpanic_instance *pi;
void __iomem *base;
@@ -99,6 +92,12 @@ static int pvpanic_pci_probe(struct pci_dev *pdev,
return devm_pvpanic_probe(&pdev->dev, pi);
}
+static const struct pci_device_id pvpanic_pci_id_tbl[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REDHAT, PCI_DEVICE_ID_REDHAT_PVPANIC)},
+ {}
+};
+MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl);
+
static struct pci_driver pvpanic_pci_driver = {
.name = "pvpanic-pci",
.id_table = pvpanic_pci_id_tbl,
@@ -107,7 +106,4 @@ static struct pci_driver pvpanic_pci_driver = {
.dev_groups = pvpanic_pci_dev_groups,
},
};
-
-MODULE_DEVICE_TABLE(pci, pvpanic_pci_id_tbl);
-
module_pci_driver(pvpanic_pci_driver);
diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c
index bb7aa6368538..4b8f1c7d726d 100644
--- a/drivers/misc/pvpanic/pvpanic.c
+++ b/drivers/misc/pvpanic/pvpanic.c
@@ -23,7 +23,7 @@
#include "pvpanic.h"
MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>");
-MODULE_DESCRIPTION("pvpanic device driver ");
+MODULE_DESCRIPTION("pvpanic device driver");
MODULE_LICENSE("GPL");
static struct list_head pvpanic_list;
@@ -43,8 +43,7 @@ pvpanic_send_event(unsigned int event)
}
static int
-pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
- void *unused)
+pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, void *unused)
{
unsigned int event = PVPANIC_PANICKED;
@@ -58,7 +57,7 @@ pvpanic_panic_notify(struct notifier_block *nb, unsigned long code,
static struct notifier_block pvpanic_panic_nb = {
.notifier_call = pvpanic_panic_notify,
- .priority = 1, /* let this called before broken drm_fb_helper */
+ .priority = 1, /* let this called before broken drm_fb_helper() */
};
static void pvpanic_remove(void *param)
@@ -96,18 +95,15 @@ static int pvpanic_init(void)
INIT_LIST_HEAD(&pvpanic_list);
spin_lock_init(&pvpanic_lock);
- atomic_notifier_chain_register(&panic_notifier_list,
- &pvpanic_panic_nb);
+ atomic_notifier_chain_register(&panic_notifier_list, &pvpanic_panic_nb);
return 0;
}
+module_init(pvpanic_init);
static void pvpanic_exit(void)
{
- atomic_notifier_chain_unregister(&panic_notifier_list,
- &pvpanic_panic_nb);
+ atomic_notifier_chain_unregister(&panic_notifier_list, &pvpanic_panic_nb);
}
-
-module_init(pvpanic_init);
module_exit(pvpanic_exit);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 2508f83bdc3f..dab7b92db790 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -514,6 +514,7 @@ static const struct net_device_ops xpnet_netdev_ops = {
static int __init
xpnet_init(void)
{
+ u8 addr[ETH_ALEN];
int result;
if (!is_uv_system())
@@ -545,15 +546,17 @@ xpnet_init(void)
xpnet_device->min_mtu = XPNET_MIN_MTU;
xpnet_device->max_mtu = XPNET_MAX_MTU;
+ memset(addr, 0, sizeof(addr));
/*
* Multicast assumes the LSB of the first octet is set for multicast
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
- xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
+ addr[0] = 0x02; /* locally administered, no OUI */
- xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
- xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
+ addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
+ addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
+ eth_hw_addr_set(xpnet_device, addr);
/*
* ether_setup() sets this to a multicast device. We are
diff --git a/drivers/misc/tifm_7xx1.c b/drivers/misc/tifm_7xx1.c
index 228f2eb1d476..017c2f7d6287 100644
--- a/drivers/misc/tifm_7xx1.c
+++ b/drivers/misc/tifm_7xx1.c
@@ -311,7 +311,7 @@ static int tifm_7xx1_probe(struct pci_dev *dev,
int pci_dev_busy = 0;
int rc;
- rc = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
if (rc)
return rc;
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 52656fc87e99..a3098fea3bf7 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -176,8 +176,7 @@ struct tifm_adapter *tifm_alloc_adapter(unsigned int num_sockets,
{
struct tifm_adapter *fm;
- fm = kzalloc(sizeof(struct tifm_adapter)
- + sizeof(struct tifm_dev*) * num_sockets, GFP_KERNEL);
+ fm = kzalloc(struct_size(fm, sockets, num_sockets), GFP_KERNEL);
if (fm) {
fm->dev.class = &tifm_adapter_class;
fm->dev.parent = dev;
@@ -293,14 +292,15 @@ EXPORT_SYMBOL(tifm_has_ms_pif);
int tifm_map_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
int direction)
{
- return pci_map_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
+ return dma_map_sg(&to_pci_dev(sock->dev.parent)->dev, sg, nents,
+ direction);
}
EXPORT_SYMBOL(tifm_map_sg);
void tifm_unmap_sg(struct tifm_dev *sock, struct scatterlist *sg, int nents,
int direction)
{
- pci_unmap_sg(to_pci_dev(sock->dev.parent), sg, nents, direction);
+ dma_unmap_sg(&to_pci_dev(sock->dev.parent)->dev, sg, nents, direction);
}
EXPORT_SYMBOL(tifm_unmap_sg);