diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nvkm/falcon')
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/base.c | 245 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/fw.c | 354 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c | 62 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c | 148 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c | 345 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c | 82 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/priv.h | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/nouveau/nvkm/falcon/v1.c | 210 |
12 files changed, 1187 insertions, 294 deletions
diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild index d79d783904ee..9ffe7b921ccb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/Kbuild @@ -1,6 +1,12 @@ # SPDX-License-Identifier: MIT nvkm-y += nvkm/falcon/base.o nvkm-y += nvkm/falcon/cmdq.o +nvkm-y += nvkm/falcon/fw.o nvkm-y += nvkm/falcon/msgq.o nvkm-y += nvkm/falcon/qmgr.o nvkm-y += nvkm/falcon/v1.o + +nvkm-y += nvkm/falcon/gm200.o +nvkm-y += nvkm/falcon/gp102.o +nvkm-y += nvkm/falcon/ga100.o +nvkm-y += nvkm/falcon/ga102.o diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c index f3f90c1063dd..235149f73a69 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/base.c @@ -22,119 +22,217 @@ #include "priv.h" #include <subdev/mc.h> +#include <subdev/timer.h> #include <subdev/top.h> -void -nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, - u32 size, u16 tag, u8 port, bool secure) +static const struct nvkm_falcon_func_dma * +nvkm_falcon_dma(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base) { - if (secure && !falcon->secret) { - nvkm_warn(falcon->user, - "writing with secure tag on a non-secure falcon!\n"); - return; + switch (*mem_type) { + case IMEM: return falcon->func->imem_dma; + case DMEM: return falcon->func->dmem_dma; + default: + return NULL; } - - falcon->func->load_imem(falcon, data, start, size, tag, port, - secure); } -void -nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, - u32 size, u8 port) +int +nvkm_falcon_dma_wr(struct nvkm_falcon *falcon, const u8 *img, u64 dma_addr, u32 dma_base, + enum nvkm_falcon_mem mem_type, u32 mem_base, int len, bool sec) { - mutex_lock(&falcon->dmem_mutex); - - falcon->func->load_dmem(falcon, data, start, size, port); + const struct nvkm_falcon_func_dma *dma = nvkm_falcon_dma(falcon, &mem_type, &mem_base); + const char *type = nvkm_falcon_mem(mem_type); + const int dmalen = 256; + u32 dma_start = 0; + u32 dst, src, cmd; + int ret, i; + + if (WARN_ON(!dma->xfer)) + return -EINVAL; + + if (mem_type == DMEM) { + dma_start = dma_base; + dma_addr += dma_base; + } - mutex_unlock(&falcon->dmem_mutex); -} + FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x (%010llx %08x)", + type, mem_base, len, dma_base, dma_addr - dma_base, dma_start); + if (WARN_ON(!len || (len & (dmalen - 1)))) + return -EINVAL; -void -nvkm_falcon_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, u8 port, - void *data) -{ - mutex_lock(&falcon->dmem_mutex); + ret = dma->init(falcon, dma_addr, dmalen, mem_type, sec, &cmd); + if (ret) + return ret; - falcon->func->read_dmem(falcon, start, size, port, data); + dst = mem_base; + src = dma_base; + if (len) { + while (len >= dmalen) { + dma->xfer(falcon, dst, src - dma_start, cmd); + + if (img && nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) { + for (i = 0; i < dmalen; i += 4, mem_base += 4) { + const int w = 8, x = (i / 4) % w; + + if (x == 0) + printk(KERN_INFO "%s %08x <-", type, mem_base); + printk(KERN_CONT " %08x", *(u32 *)(img + src + i)); + if (x == (w - 1) || ((i + 4) == dmalen)) + printk(KERN_CONT " <- %08x+%08x", dma_base, + src + i - dma_base - (x * 4)); + if (i == (7 * 4)) + printk(KERN_CONT " *"); + } + } + + if (nvkm_msec(falcon->owner->device, 2000, + if (dma->done(falcon)) + break; + ) < 0) + return -ETIMEDOUT; + + src += dmalen; + dst += dmalen; + len -= dmalen; + } + WARN_ON(len); + } - mutex_unlock(&falcon->dmem_mutex); + return 0; } -void -nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *inst) +static const struct nvkm_falcon_func_pio * +nvkm_falcon_pio(struct nvkm_falcon *falcon, enum nvkm_falcon_mem *mem_type, u32 *mem_base) { - if (!falcon->func->bind_context) { - nvkm_error(falcon->user, - "Context binding not supported on this falcon!\n"); - return; + switch (*mem_type) { + case IMEM: + return falcon->func->imem_pio; + case DMEM: + if (!falcon->func->emem_addr || *mem_base < falcon->func->emem_addr) + return falcon->func->dmem_pio; + + *mem_base -= falcon->func->emem_addr; + fallthrough; + case EMEM: + return falcon->func->emem_pio; + default: + return NULL; } - - falcon->func->bind_context(falcon, inst); } -void -nvkm_falcon_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) +int +nvkm_falcon_pio_rd(struct nvkm_falcon *falcon, u8 port, enum nvkm_falcon_mem mem_type, u32 mem_base, + const u8 *img, u32 img_base, int len) { - falcon->func->set_start_addr(falcon, start_addr); -} + const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base); + const char *type = nvkm_falcon_mem(mem_type); + int xfer_len; + + if (WARN_ON(!pio || !pio->rd)) + return -EINVAL; + + FLCN_DBG(falcon, "%s %08x -> %08x bytes at %08x", type, mem_base, len, img_base); + if (WARN_ON(!len || (len & (pio->min - 1)))) + return -EINVAL; + + pio->rd_init(falcon, port, mem_base); + do { + xfer_len = min(len, pio->max); + pio->rd(falcon, port, img, xfer_len); + + if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) { + for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) { + if (((img_base / 4) % 8) == 0) + printk(KERN_INFO "%s %08x ->", type, mem_base); + printk(KERN_CONT " %08x", *(u32 *)(img + img_base)); + } + } + + img += xfer_len; + len -= xfer_len; + } while (len); -void -nvkm_falcon_start(struct nvkm_falcon *falcon) -{ - falcon->func->start(falcon); + return 0; } int -nvkm_falcon_enable(struct nvkm_falcon *falcon) +nvkm_falcon_pio_wr(struct nvkm_falcon *falcon, const u8 *img, u32 img_base, u8 port, + enum nvkm_falcon_mem mem_type, u32 mem_base, int len, u16 tag, bool sec) { - struct nvkm_device *device = falcon->owner->device; - int ret; - - nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst); - ret = falcon->func->enable(falcon); - if (ret) { - nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); - return ret; - } + const struct nvkm_falcon_func_pio *pio = nvkm_falcon_pio(falcon, &mem_type, &mem_base); + const char *type = nvkm_falcon_mem(mem_type); + int xfer_len; + + if (WARN_ON(!pio || !pio->wr)) + return -EINVAL; + + FLCN_DBG(falcon, "%s %08x <- %08x bytes at %08x", type, mem_base, len, img_base); + if (WARN_ON(!len || (len & (pio->min - 1)))) + return -EINVAL; + + pio->wr_init(falcon, port, sec, mem_base); + do { + xfer_len = min(len, pio->max); + pio->wr(falcon, port, img, xfer_len, tag++); + + if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) { + for (img_base = 0; img_base < xfer_len; img_base += 4, mem_base += 4) { + if (((img_base / 4) % 8) == 0) + printk(KERN_INFO "%s %08x <-", type, mem_base); + printk(KERN_CONT " %08x", *(u32 *)(img + img_base)); + if ((img_base / 4) == 7 && mem_type == IMEM) + printk(KERN_CONT " %04x", tag - 1); + } + } + + img += xfer_len; + len -= xfer_len; + } while (len); return 0; } void -nvkm_falcon_disable(struct nvkm_falcon *falcon) +nvkm_falcon_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, + u32 size, u16 tag, u8 port, bool secure) { - struct nvkm_device *device = falcon->owner->device; - - /* already disabled, return or wait_idle will timeout */ - if (!nvkm_mc_enabled(device, falcon->owner->type, falcon->owner->inst)) + if (secure && !falcon->secret) { + nvkm_warn(falcon->user, + "writing with secure tag on a non-secure falcon!\n"); return; + } - falcon->func->disable(falcon); - - nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); + falcon->func->load_imem(falcon, data, start, size, tag, port, + secure); } -int -nvkm_falcon_reset(struct nvkm_falcon *falcon) +void +nvkm_falcon_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, + u32 size, u8 port) { - if (!falcon->func->reset) { - nvkm_falcon_disable(falcon); - return nvkm_falcon_enable(falcon); - } + mutex_lock(&falcon->dmem_mutex); - return falcon->func->reset(falcon); + falcon->func->load_dmem(falcon, data, start, size, port); + + mutex_unlock(&falcon->dmem_mutex); } -int -nvkm_falcon_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) +void +nvkm_falcon_start(struct nvkm_falcon *falcon) { - return falcon->func->wait_for_halt(falcon, ms); + falcon->func->start(falcon); } int -nvkm_falcon_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) +nvkm_falcon_reset(struct nvkm_falcon *falcon) { - return falcon->func->clear_interrupt(falcon, mask); + int ret; + + ret = falcon->func->disable(falcon); + if (WARN_ON(ret)) + return ret; + + return nvkm_falcon_enable(falcon); } static int @@ -169,7 +267,7 @@ nvkm_falcon_oneinit(struct nvkm_falcon *falcon) } void -nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) +nvkm_falcon_put(struct nvkm_falcon *falcon, struct nvkm_subdev *user) { if (unlikely(!falcon)) return; @@ -183,7 +281,7 @@ nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) } int -nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) +nvkm_falcon_get(struct nvkm_falcon *falcon, struct nvkm_subdev *user) { int ret = 0; @@ -217,6 +315,7 @@ nvkm_falcon_ctor(const struct nvkm_falcon_func *func, falcon->owner = subdev; falcon->name = name; falcon->addr = addr; + falcon->addr2 = func->addr2; mutex_init(&falcon->mutex); mutex_init(&falcon->dmem_mutex); return 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c index 44cf6a8862e1..211ebe7afac6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/cmdq.c @@ -51,7 +51,7 @@ static void nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) { struct nvkm_falcon *falcon = cmdq->qmgr->falcon; - nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0); + nvkm_falcon_pio_wr(falcon, data, 0, 0, DMEM, cmdq->position, size, 0, false); cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c new file mode 100644 index 000000000000..80a480b12174 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/fw.c @@ -0,0 +1,354 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/memory.h> +#include <subdev/mmu.h> + +#include <nvfw/fw.h> +#include <nvfw/hs.h> + +int +nvkm_falcon_fw_patch(struct nvkm_falcon_fw *fw) +{ + struct nvkm_falcon *falcon = fw->falcon; + u32 sig_base_src = fw->sig_base_prd; + u32 src, dst, len, i; + int idx = 0; + + FLCNFW_DBG(fw, "patching sigs:%d size:%d", fw->sig_nr, fw->sig_size); + if (fw->func->signature) { + idx = fw->func->signature(fw, &sig_base_src); + if (idx < 0) + return idx; + } + + src = idx * fw->sig_size; + dst = fw->sig_base_img; + len = fw->sig_size / 4; + FLCNFW_DBG(fw, "patch idx:%d src:%08x dst:%08x", idx, sig_base_src + src, dst); + for (i = 0; i < len; i++) { + u32 sig = *(u32 *)(fw->sigs + src); + + if (nvkm_printk_ok(falcon->owner, falcon->user, NV_DBG_TRACE)) { + if (i % 8 == 0) + printk(KERN_INFO "sig -> %08x:", dst); + printk(KERN_CONT " %08x", sig); + } + + *(u32 *)(fw->fw.img + dst) = sig; + src += 4; + dst += 4; + } + + return 0; +} + +static void +nvkm_falcon_fw_dtor_sigs(struct nvkm_falcon_fw *fw) +{ + kfree(fw->sigs); + fw->sigs = NULL; +} + +int +nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user, + bool release, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr) +{ + struct nvkm_falcon *falcon = fw->falcon; + int ret; + + ret = nvkm_falcon_get(falcon, user); + if (ret) + return ret; + + if (fw->sigs) { + ret = nvkm_falcon_fw_patch(fw); + if (ret) + goto done; + + nvkm_falcon_fw_dtor_sigs(fw); + } + + FLCNFW_DBG(fw, "resetting"); + fw->func->reset(fw); + + FLCNFW_DBG(fw, "loading"); + if (fw->func->setup) { + ret = fw->func->setup(fw); + if (ret) + goto done; + } + + ret = fw->func->load(fw); + if (ret) + goto done; + + FLCNFW_DBG(fw, "booting"); + ret = fw->func->boot(fw, pmbox0, pmbox1, mbox0_ok, irqsclr); + if (ret) + FLCNFW_ERR(fw, "boot failed: %d", ret); + else + FLCNFW_DBG(fw, "booted"); + +done: + if (ret || release) + nvkm_falcon_put(falcon, user); + return ret; +} + +int +nvkm_falcon_fw_oneinit(struct nvkm_falcon_fw *fw, struct nvkm_falcon *falcon, + struct nvkm_vmm *vmm, struct nvkm_memory *inst) +{ + int ret; + + fw->falcon = falcon; + fw->vmm = nvkm_vmm_ref(vmm); + fw->inst = nvkm_memory_ref(inst); + + if (fw->boot) { + FLCN_DBG(falcon, "mapping %s fw", fw->fw.name); + ret = nvkm_vmm_get(fw->vmm, 12, nvkm_memory_size(&fw->fw.mem.memory), &fw->vma); + if (ret) { + FLCN_ERR(falcon, "get %d", ret); + return ret; + } + + ret = nvkm_memory_map(&fw->fw.mem.memory, 0, fw->vmm, fw->vma, NULL, 0); + if (ret) { + FLCN_ERR(falcon, "map %d", ret); + return ret; + } + } + + return 0; +} + +void +nvkm_falcon_fw_dtor(struct nvkm_falcon_fw *fw) +{ + nvkm_vmm_put(fw->vmm, &fw->vma); + nvkm_vmm_unref(&fw->vmm); + nvkm_memory_unref(&fw->inst); + nvkm_falcon_fw_dtor_sigs(fw); + nvkm_firmware_dtor(&fw->fw); +} + +static const struct nvkm_firmware_func +nvkm_falcon_fw_dma = { + .type = NVKM_FIRMWARE_IMG_DMA, +}; + +static const struct nvkm_firmware_func +nvkm_falcon_fw = { + .type = NVKM_FIRMWARE_IMG_RAM, +}; + +int +nvkm_falcon_fw_sign(struct nvkm_falcon_fw *fw, u32 sig_base_img, u32 sig_size, const u8 *sigs, + int sig_nr_prd, u32 sig_base_prd, int sig_nr_dbg, u32 sig_base_dbg) +{ + fw->sig_base_prd = sig_base_prd; + fw->sig_base_dbg = sig_base_dbg; + fw->sig_base_img = sig_base_img; + fw->sig_size = sig_size; + fw->sig_nr = sig_nr_prd + sig_nr_dbg; + + fw->sigs = kmalloc_array(fw->sig_nr, fw->sig_size, GFP_KERNEL); + if (!fw->sigs) + return -ENOMEM; + + memcpy(fw->sigs, sigs + sig_base_prd, sig_nr_prd * fw->sig_size); + if (sig_nr_dbg) + memcpy(fw->sigs + sig_size, sigs + sig_base_dbg, sig_nr_dbg * fw->sig_size); + + return 0; +} + +int +nvkm_falcon_fw_ctor(const struct nvkm_falcon_fw_func *func, const char *name, + struct nvkm_device *device, bool dma, const void *src, u32 len, + struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) +{ + const struct nvkm_firmware_func *type = dma ? &nvkm_falcon_fw_dma : &nvkm_falcon_fw; + int ret; + + fw->func = func; + + ret = nvkm_firmware_ctor(type, name, device, src, len, &fw->fw); + if (ret) + return ret; + + return falcon ? nvkm_falcon_fw_oneinit(fw, falcon, NULL, NULL) : 0; +} + +int +nvkm_falcon_fw_ctor_hs(const struct nvkm_falcon_fw_func *func, const char *name, + struct nvkm_subdev *subdev, const char *bl, const char *img, int ver, + struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) +{ + const struct firmware *blob; + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header *hshdr; + const struct nvfw_hs_load_header *lhdr; + const struct nvfw_bl_desc *desc; + u32 loc, sig; + int ret; + + ret = nvkm_firmware_load_name(subdev, img, "", ver, &blob); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, blob->data); + hshdr = nvfw_hs_header(subdev, blob->data + hdr->header_offset); + + ret = nvkm_falcon_fw_ctor(func, name, subdev->device, bl != NULL, + blob->data + hdr->data_offset, hdr->data_size, falcon, fw); + if (ret) + goto done; + + /* Earlier FW releases by NVIDIA for Nouveau's use aren't in NVIDIA's + * standard format, and don't have the indirection seen in the 0x10de + * case. + */ + switch (hdr->bin_magic) { + case 0x000010de: + loc = *(u32 *)(blob->data + hshdr->patch_loc); + sig = *(u32 *)(blob->data + hshdr->patch_sig); + break; + case 0x3b1d14f0: + loc = hshdr->patch_loc; + sig = hshdr->patch_sig; + break; + default: + WARN_ON(1); + ret = -EINVAL; + goto done; + } + + ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size, blob->data, + 1, hshdr->sig_prod_offset + sig, + 1, hshdr->sig_dbg_offset + sig); + if (ret) + goto done; + + lhdr = nvfw_hs_load_header(subdev, blob->data + hshdr->hdr_offset); + + fw->nmem_base_img = 0; + fw->nmem_base = lhdr->non_sec_code_off; + fw->nmem_size = lhdr->non_sec_code_size; + + fw->imem_base_img = lhdr->apps[0]; + fw->imem_base = ALIGN(lhdr->apps[0], 0x100); + fw->imem_size = lhdr->apps[lhdr->num_apps + 0]; + + fw->dmem_base_img = lhdr->data_dma_base; + fw->dmem_base = 0; + fw->dmem_size = lhdr->data_size; + fw->dmem_sign = loc - lhdr->data_dma_base; + + if (bl) { + nvkm_firmware_put(blob); + + ret = nvkm_firmware_load_name(subdev, bl, "", ver, &blob); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, blob->data); + desc = nvfw_bl_desc(subdev, blob->data + hdr->header_offset); + + fw->boot_addr = desc->start_tag << 8; + fw->boot_size = desc->code_size; + fw->boot = kmemdup(blob->data + hdr->data_offset + desc->code_off, + fw->boot_size, GFP_KERNEL); + if (!fw->boot) + ret = -ENOMEM; + } else { + fw->boot_addr = fw->nmem_base; + } + +done: + if (ret) + nvkm_falcon_fw_dtor(fw); + + nvkm_firmware_put(blob); + return ret; +} + +int +nvkm_falcon_fw_ctor_hs_v2(const struct nvkm_falcon_fw_func *func, const char *name, + struct nvkm_subdev *subdev, const char *img, int ver, + struct nvkm_falcon *falcon, struct nvkm_falcon_fw *fw) +{ + const struct nvfw_bin_hdr *hdr; + const struct nvfw_hs_header_v2 *hshdr; + const struct nvfw_hs_load_header_v2 *lhdr; + const struct firmware *blob; + u32 loc, sig, cnt, *meta; + int ret; + + ret = nvkm_firmware_load_name(subdev, img, "", ver, &blob); + if (ret) + return ret; + + hdr = nvfw_bin_hdr(subdev, blob->data); + hshdr = nvfw_hs_header_v2(subdev, blob->data + hdr->header_offset); + meta = (u32 *)(blob->data + hshdr->meta_data_offset); + loc = *(u32 *)(blob->data + hshdr->patch_loc); + sig = *(u32 *)(blob->data + hshdr->patch_sig); + cnt = *(u32 *)(blob->data + hshdr->num_sig); + + ret = nvkm_falcon_fw_ctor(func, name, subdev->device, true, + blob->data + hdr->data_offset, hdr->data_size, falcon, fw); + if (ret) + goto done; + + ret = nvkm_falcon_fw_sign(fw, loc, hshdr->sig_prod_size / cnt, blob->data, + cnt, hshdr->sig_prod_offset + sig, 0, 0); + if (ret) + goto done; + + lhdr = nvfw_hs_load_header_v2(subdev, blob->data + hshdr->header_offset); + + fw->imem_base_img = lhdr->app[0].offset; + fw->imem_base = 0; + fw->imem_size = lhdr->app[0].size; + + fw->dmem_base_img = lhdr->os_data_offset; + fw->dmem_base = 0; + fw->dmem_size = lhdr->os_data_size; + fw->dmem_sign = loc - lhdr->os_data_offset; + + fw->boot_addr = lhdr->app[0].offset; + + fw->fuse_ver = meta[0]; + fw->engine_id = meta[1]; + fw->ucode_id = meta[2]; + +done: + if (ret) + nvkm_falcon_fw_dtor(fw); + + nvkm_firmware_put(blob); + return ret; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c new file mode 100644 index 000000000000..49fd32943916 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga100.c @@ -0,0 +1,62 @@ +/* + * Copyright 2021 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +int +ga100_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *src_base_src) +{ + struct nvkm_falcon *falcon = fw->falcon; + struct nvkm_device *device = falcon->owner->device; + u32 reg_fuse_version; + int idx; + + FLCN_DBG(falcon, "brom: %08x %08x", fw->engine_id, fw->ucode_id); + FLCN_DBG(falcon, "fuse_version: %d", fw->fuse_ver); + + if (fw->engine_id & 0x00000001) { + reg_fuse_version = nvkm_rd32(device, 0x824140 + (fw->ucode_id - 1) * 4); + } else + if (fw->engine_id & 0x00000004) { + reg_fuse_version = nvkm_rd32(device, 0x824100 + (fw->ucode_id - 1) * 4); + } else + if (fw->engine_id & 0x00000400) { + reg_fuse_version = nvkm_rd32(device, 0x8241c0 + (fw->ucode_id - 1) * 4); + } else { + WARN_ON(1); + return -ENOSYS; + } + + FLCN_DBG(falcon, "reg_fuse_version: %08x", reg_fuse_version); + if (reg_fuse_version) { + reg_fuse_version = fls(reg_fuse_version); + FLCN_DBG(falcon, "reg_fuse_version: %d", reg_fuse_version); + + if (WARN_ON(fw->fuse_ver < reg_fuse_version)) + return -EINVAL; + + idx = fw->fuse_ver - reg_fuse_version; + } else { + idx = fw->sig_nr - 1; + } + + return idx; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c new file mode 100644 index 000000000000..0ff450fe3590 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/ga102.c @@ -0,0 +1,148 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <subdev/mc.h> +#include <subdev/timer.h> + +static bool +ga102_flcn_dma_done(struct nvkm_falcon *falcon) +{ + return !!(nvkm_falcon_rd32(falcon, 0x118) & 0x00000002); +} + +static void +ga102_flcn_dma_xfer(struct nvkm_falcon *falcon, u32 mem_base, u32 dma_base, u32 cmd) +{ + nvkm_falcon_wr32(falcon, 0x114, mem_base); + nvkm_falcon_wr32(falcon, 0x11c, dma_base); + nvkm_falcon_wr32(falcon, 0x118, cmd); +} + +static int +ga102_flcn_dma_init(struct nvkm_falcon *falcon, u64 dma_addr, int xfer_len, + enum nvkm_falcon_mem mem_type, bool sec, u32 *cmd) +{ + *cmd = (ilog2(xfer_len) - 2) << 8; + if (mem_type == IMEM) + *cmd |= 0x00000010; + if (sec) + *cmd |= 0x00000004; + + nvkm_falcon_wr32(falcon, 0x110, dma_addr >> 8); + nvkm_falcon_wr32(falcon, 0x128, 0x00000000); + return 0; +} + +const struct nvkm_falcon_func_dma +ga102_flcn_dma = { + .init = ga102_flcn_dma_init, + .xfer = ga102_flcn_dma_xfer, + .done = ga102_flcn_dma_done, +}; + +int +ga102_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon) +{ + nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000); + + if (nvkm_msec(falcon->owner->device, 20, + if (!(nvkm_falcon_rd32(falcon, 0x0f4) & 0x00001000)) + break; + ) < 0) + return -ETIMEDOUT; + + return 0; +} + +int +ga102_flcn_reset_prep(struct nvkm_falcon *falcon) +{ + nvkm_falcon_rd32(falcon, 0x0f4); + + nvkm_usec(falcon->owner->device, 150, + if (nvkm_falcon_rd32(falcon, 0x0f4) & 0x80000000) + break; + _warn = false; + ); + + return 0; +} + +int +ga102_flcn_select(struct nvkm_falcon *falcon) +{ + if ((nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000010) != 0x00000000) { + nvkm_falcon_wr32(falcon, falcon->addr2 + 0x668, 0x00000000); + if (nvkm_msec(falcon->owner->device, 10, + if (nvkm_falcon_rd32(falcon, falcon->addr2 + 0x668) & 0x00000001) + break; + ) < 0) + return -ETIMEDOUT; + } + + return 0; +} + +int +ga102_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *mbox0, u32 *mbox1, u32 mbox0_ok, u32 irqsclr) +{ + struct nvkm_falcon *falcon = fw->falcon; + + nvkm_falcon_wr32(falcon, falcon->addr2 + 0x210, fw->dmem_sign); + nvkm_falcon_wr32(falcon, falcon->addr2 + 0x19c, fw->engine_id); + nvkm_falcon_wr32(falcon, falcon->addr2 + 0x198, fw->ucode_id); + nvkm_falcon_wr32(falcon, falcon->addr2 + 0x180, 0x00000001); + + return gm200_flcn_fw_boot(fw, mbox0, mbox1, mbox0_ok, irqsclr); +} + +int +ga102_flcn_fw_load(struct nvkm_falcon_fw *fw) +{ + struct nvkm_falcon *falcon = fw->falcon; + int ret = 0; + + nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080); + nvkm_falcon_wr32(falcon, 0x10c, 0x00000000); + nvkm_falcon_mask(falcon, 0x600, 0x00010007, (0 << 16) | (1 << 2) | 1); + + ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->imem_base_img, + IMEM, fw->imem_base, fw->imem_size, true); + if (ret) + return ret; + + ret = nvkm_falcon_dma_wr(falcon, fw->fw.img, fw->fw.phys, fw->dmem_base_img, + DMEM, fw->dmem_base, fw->dmem_size, false); + if (ret) + return ret; + + return 0; +} + +const struct nvkm_falcon_fw_func +ga102_flcn_fw = { + .signature = ga100_flcn_fw_signature, + .reset = gm200_flcn_fw_reset, + .load = ga102_flcn_fw_load, + .boot = ga102_flcn_fw_boot, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c new file mode 100644 index 000000000000..393ade9f7e6c --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c @@ -0,0 +1,345 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +#include <core/memory.h> +#include <subdev/mc.h> +#include <subdev/timer.h> + +void +gm200_flcn_tracepc(struct nvkm_falcon *falcon) +{ + u32 sctl = nvkm_falcon_rd32(falcon, 0x240); + u32 tidx = nvkm_falcon_rd32(falcon, 0x148); + int nr = (tidx & 0x00ff0000) >> 16, sp, ip; + + FLCN_ERR(falcon, "TRACEPC SCTL %08x TIDX %08x", sctl, tidx); + for (sp = 0; sp < nr; sp++) { + nvkm_falcon_wr32(falcon, 0x148, sp); + ip = nvkm_falcon_rd32(falcon, 0x14c); + FLCN_ERR(falcon, "TRACEPC: %08x", ip); + } +} + +static void +gm200_flcn_pio_dmem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len) +{ + while (len >= 4) { + *(u32 *)img = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8)); + img += 4; + len -= 4; + } +} + +static void +gm200_flcn_pio_dmem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base) +{ + nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(25) | dmem_base); +} + +static void +gm200_flcn_pio_dmem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag) +{ + while (len >= 4) { + nvkm_falcon_wr32(falcon, 0x1c4 + (port * 8), *(u32 *)img); + img += 4; + len -= 4; + } +} + +static void +gm200_flcn_pio_dmem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 dmem_base) +{ + nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), BIT(24) | dmem_base); +} + +const struct nvkm_falcon_func_pio +gm200_flcn_dmem_pio = { + .min = 4, + .max = 0x100, + .wr_init = gm200_flcn_pio_dmem_wr_init, + .wr = gm200_flcn_pio_dmem_wr, + .rd_init = gm200_flcn_pio_dmem_rd_init, + .rd = gm200_flcn_pio_dmem_rd, +}; + +static void +gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 imem_base) +{ + nvkm_falcon_wr32(falcon, 0x180 + (port * 0x10), (sec ? BIT(28) : 0) | BIT(24) | imem_base); +} + +static void +gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag) +{ + nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++); + while (len >= 4) { + nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img); + img += 4; + len -= 4; + } +} + +const struct nvkm_falcon_func_pio +gm200_flcn_imem_pio = { + .min = 0x100, + .max = 0x100, + .wr_init = gm200_flcn_pio_imem_wr_init, + .wr = gm200_flcn_pio_imem_wr, +}; + +int +gm200_flcn_bind_stat(struct nvkm_falcon *falcon, bool intr) +{ + if (intr && !(nvkm_falcon_rd32(falcon, 0x008) & 0x00000008)) + return -1; + + return (nvkm_falcon_rd32(falcon, 0x0dc) & 0x00007000) >> 12; +} + +void +gm200_flcn_bind_inst(struct nvkm_falcon *falcon, int target, u64 addr) +{ + nvkm_falcon_mask(falcon, 0x604, 0x00000007, 0x00000000); /* DMAIDX_VIRT */ + nvkm_falcon_wr32(falcon, 0x054, (1 << 30) | (target << 28) | (addr >> 12)); + nvkm_falcon_mask(falcon, 0x090, 0x00010000, 0x00010000); + nvkm_falcon_mask(falcon, 0x0a4, 0x00000008, 0x00000008); +} + +int +gm200_flcn_reset_wait_mem_scrubbing(struct nvkm_falcon *falcon) +{ + nvkm_falcon_mask(falcon, 0x040, 0x00000000, 0x00000000); + + if (nvkm_msec(falcon->owner->device, 10, + if (!(nvkm_falcon_rd32(falcon, 0x10c) & 0x00000006)) + break; + ) < 0) + return -ETIMEDOUT; + + return 0; +} + +int +gm200_flcn_enable(struct nvkm_falcon *falcon) +{ + struct nvkm_device *device = falcon->owner->device; + int ret; + + if (falcon->func->reset_eng) { + ret = falcon->func->reset_eng(falcon); + if (ret) + return ret; + } + + if (falcon->func->select) { + ret = falcon->func->select(falcon); + if (ret) + return ret; + } + + if (falcon->func->reset_pmc) + nvkm_mc_enable(device, falcon->owner->type, falcon->owner->inst); + + ret = falcon->func->reset_wait_mem_scrubbing(falcon); + if (ret) + return ret; + + nvkm_falcon_wr32(falcon, 0x084, nvkm_rd32(device, 0x000000)); + return 0; +} + +int +gm200_flcn_disable(struct nvkm_falcon *falcon) +{ + struct nvkm_device *device = falcon->owner->device; + int ret; + + if (falcon->func->select) { + ret = falcon->func->select(falcon); + if (ret) + return ret; + } + + nvkm_falcon_mask(falcon, 0x048, 0x00000003, 0x00000000); + nvkm_falcon_wr32(falcon, 0x014, 0xffffffff); + + if (falcon->func->reset_pmc) { + if (falcon->func->reset_prep) { + ret = falcon->func->reset_prep(falcon); + if (ret) + return ret; + } + + nvkm_mc_disable(device, falcon->owner->type, falcon->owner->inst); + } + + if (falcon->func->reset_eng) { + ret = falcon->func->reset_eng(falcon); + if (ret) + return ret; + } + + return 0; +} + +int +gm200_flcn_fw_boot(struct nvkm_falcon_fw *fw, u32 *pmbox0, u32 *pmbox1, u32 mbox0_ok, u32 irqsclr) +{ + struct nvkm_falcon *falcon = fw->falcon; + u32 mbox0, mbox1; + int ret = 0; + + nvkm_falcon_wr32(falcon, 0x040, pmbox0 ? *pmbox0 : 0xcafebeef); + if (pmbox1) + nvkm_falcon_wr32(falcon, 0x044, *pmbox1); + + nvkm_falcon_wr32(falcon, 0x104, fw->boot_addr); + nvkm_falcon_wr32(falcon, 0x100, 0x00000002); + + if (nvkm_msec(falcon->owner->device, 2000, + if (nvkm_falcon_rd32(falcon, 0x100) & 0x00000010) + break; + ) < 0) + ret = -ETIMEDOUT; + + mbox0 = nvkm_falcon_rd32(falcon, 0x040); + mbox1 = nvkm_falcon_rd32(falcon, 0x044); + if (FLCN_ERRON(falcon, ret || mbox0 != mbox0_ok, "mbox %08x %08x", mbox0, mbox1)) + ret = ret ?: -EIO; + + if (irqsclr) + nvkm_falcon_mask(falcon, 0x004, 0xffffffff, irqsclr); + + return ret; +} + +int +gm200_flcn_fw_load(struct nvkm_falcon_fw *fw) +{ + struct nvkm_falcon *falcon = fw->falcon; + int target, ret; + + if (fw->inst) { + nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001); + + switch (nvkm_memory_target(fw->inst)) { + case NVKM_MEM_TARGET_VRAM: target = 0; break; + case NVKM_MEM_TARGET_HOST: target = 2; break; + case NVKM_MEM_TARGET_NCOH: target = 3; break; + default: + WARN_ON(1); + return -EINVAL; + } + + falcon->func->bind_inst(falcon, target, nvkm_memory_addr(fw->inst)); + + if (nvkm_msec(falcon->owner->device, 10, + if (falcon->func->bind_stat(falcon, falcon->func->bind_intr) == 5) + break; + ) < 0) + return -ETIMEDOUT; + + nvkm_falcon_mask(falcon, 0x004, 0x00000008, 0x00000008); + nvkm_falcon_mask(falcon, 0x058, 0x00000002, 0x00000002); + + if (nvkm_msec(falcon->owner->device, 10, + if (falcon->func->bind_stat(falcon, false) == 0) + break; + ) < 0) + return -ETIMEDOUT; + } else { + nvkm_falcon_mask(falcon, 0x624, 0x00000080, 0x00000080); + nvkm_falcon_wr32(falcon, 0x10c, 0x00000000); + } + + if (fw->boot) { + switch (nvkm_memory_target(&fw->fw.mem.memory)) { + case NVKM_MEM_TARGET_VRAM: target = 4; break; + case NVKM_MEM_TARGET_HOST: target = 5; break; + case NVKM_MEM_TARGET_NCOH: target = 6; break; + default: + WARN_ON(1); + return -EINVAL; + } + + ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0, + IMEM, falcon->code.limit - fw->boot_size, fw->boot_size, + fw->boot_addr >> 8, false); + if (ret) + return ret; + + return fw->func->load_bld(fw); + } + + ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->nmem_base_img, fw->nmem_base_img, 0, + IMEM, fw->nmem_base, fw->nmem_size, fw->nmem_base >> 8, false); + if (ret) + return ret; + + ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->imem_base_img, fw->imem_base_img, 0, + IMEM, fw->imem_base, fw->imem_size, fw->imem_base >> 8, true); + if (ret) + return ret; + + ret = nvkm_falcon_pio_wr(falcon, fw->fw.img + fw->dmem_base_img, fw->dmem_base_img, 0, + DMEM, fw->dmem_base, fw->dmem_size, 0, false); + if (ret) + return ret; + + return 0; +} + +int +gm200_flcn_fw_reset(struct nvkm_falcon_fw *fw) +{ + return nvkm_falcon_reset(fw->falcon); +} + +int +gm200_flcn_fw_signature(struct nvkm_falcon_fw *fw, u32 *sig_base_src) +{ + struct nvkm_falcon *falcon = fw->falcon; + u32 addr = falcon->func->debug; + int ret = 0; + + if (addr) { + ret = nvkm_falcon_enable(falcon); + if (ret) + return ret; + + if (nvkm_falcon_rd32(falcon, addr) & 0x00100000) { + *sig_base_src = fw->sig_base_dbg; + return 1; + } + } + + return ret; +} + +const struct nvkm_falcon_fw_func +gm200_flcn_fw = { + .signature = gm200_flcn_fw_signature, + .reset = gm200_flcn_fw_reset, + .load = gm200_flcn_fw_load, + .boot = gm200_flcn_fw_boot, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c new file mode 100644 index 000000000000..c774935f3077 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gp102.c @@ -0,0 +1,82 @@ +/* + * Copyright 2022 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include "priv.h" + +static void +gp102_flcn_pio_emem_rd(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len) +{ + while (len >= 4) { + *(u32 *)img = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8)); + img += 4; + len -= 4; + } +} + +static void +gp102_flcn_pio_emem_rd_init(struct nvkm_falcon *falcon, u8 port, u32 dmem_base) +{ + nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), BIT(25) | dmem_base); +} + +static void +gp102_flcn_pio_emem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag) +{ + while (len >= 4) { + nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), *(u32 *)img); + img += 4; + len -= 4; + } +} + +static void +gp102_flcn_pio_emem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 emem_base) +{ + nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), BIT(24) | emem_base); +} + +const struct nvkm_falcon_func_pio +gp102_flcn_emem_pio = { + .min = 4, + .max = 0x100, + .wr_init = gp102_flcn_pio_emem_wr_init, + .wr = gp102_flcn_pio_emem_wr, + .rd_init = gp102_flcn_pio_emem_rd_init, + .rd = gp102_flcn_pio_emem_rd, +}; + +int +gp102_flcn_reset_eng(struct nvkm_falcon *falcon) +{ + int ret; + + if (falcon->func->reset_prep) { + ret = falcon->func->reset_prep(falcon); + if (ret) + return ret; + } + + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000001); + udelay(10); + nvkm_falcon_mask(falcon, 0x3c0, 0x00000001, 0x00000000); + + return falcon->func->reset_wait_mem_scrubbing(falcon); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c index e74371dffc76..16b246fda666 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/msgq.c @@ -25,7 +25,7 @@ static void nvkm_falcon_msgq_open(struct nvkm_falcon_msgq *msgq) { - mutex_lock(&msgq->mutex); + spin_lock(&msgq->lock); msgq->position = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->tail_reg); } @@ -37,10 +37,10 @@ nvkm_falcon_msgq_close(struct nvkm_falcon_msgq *msgq, bool commit) if (commit) nvkm_falcon_wr32(falcon, msgq->tail_reg, msgq->position); - mutex_unlock(&msgq->mutex); + spin_unlock(&msgq->lock); } -static bool +bool nvkm_falcon_msgq_empty(struct nvkm_falcon_msgq *msgq) { u32 head = nvkm_falcon_rd32(msgq->qmgr->falcon, msgq->head_reg); @@ -68,7 +68,7 @@ nvkm_falcon_msgq_pop(struct nvkm_falcon_msgq *msgq, void *data, u32 size) return -EINVAL; } - nvkm_falcon_read_dmem(falcon, tail, size, 0, data); + nvkm_falcon_pio_rd(falcon, 0, DMEM, tail, data, 0, size); msgq->position += ALIGN(size, QUEUE_ALIGNMENT); return 0; } @@ -208,6 +208,6 @@ nvkm_falcon_msgq_new(struct nvkm_falcon_qmgr *qmgr, const char *name, msgq->qmgr = qmgr; msgq->name = name; - mutex_init(&msgq->mutex); + spin_lock_init(&msgq->lock); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h index 466188752eb0..11a24b9c8569 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/priv.h @@ -2,4 +2,12 @@ #ifndef __NVKM_FALCON_PRIV_H__ #define __NVKM_FALCON_PRIV_H__ #include <core/falcon.h> + +static inline int +nvkm_falcon_enable(struct nvkm_falcon *falcon) +{ + if (falcon->func->enable) + return falcon->func->enable(falcon); + return 0; +} #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h index 976cb7b7aa99..79f0da9e749f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/qmgr.h @@ -73,7 +73,7 @@ struct nvkm_falcon_cmdq { struct nvkm_falcon_msgq { struct nvkm_falcon_qmgr *qmgr; const char *name; - struct mutex mutex; + spinlock_t lock; u32 head_reg; u32 tail_reg; @@ -82,8 +82,7 @@ struct nvkm_falcon_msgq { u32 position; }; -#define FLCNQ_PRINTK(t,q,f,a...) \ - FLCN_PRINTK(t, (q)->qmgr->falcon, "%s: "f, (q)->name, ##a) -#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK(debug, (q), f, ##a) -#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK(error, (q), f, ##a) +#define FLCNQ_PRINTK(q,l,p,f,a...) FLCN_PRINTK((q)->qmgr->falcon, l, p, "%s: "f, (q)->name, ##a) +#define FLCNQ_DBG(q,f,a...) FLCNQ_PRINTK((q), DEBUG, info, f, ##a) +#define FLCNQ_ERR(q,f,a...) FLCNQ_PRINTK((q), ERROR, err, f, ##a) #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c index b0ee4c31414c..dd2ddc54ac60 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/v1.c @@ -64,44 +64,13 @@ nvkm_falcon_v1_load_imem(struct nvkm_falcon *falcon, void *data, u32 start, nvkm_falcon_wr32(falcon, 0x184 + (port * 16), 0); } -static void -nvkm_falcon_v1_load_emem(struct nvkm_falcon *falcon, void *data, u32 start, - u32 size, u8 port) -{ - u8 rem = size % 4; - int i; - - size -= rem; - - nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 24)); - for (i = 0; i < size / 4; i++) - nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), ((u32 *)data)[i]); - - /* - * If size is not a multiple of 4, mask the last word to ensure garbage - * does not get written - */ - if (rem) { - u32 extra = ((u32 *)data)[i]; - - nvkm_falcon_wr32(falcon, 0xac4 + (port * 8), - extra & (BIT(rem * 8) - 1)); - } -} - void nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, u32 size, u8 port) { - const struct nvkm_falcon_func *func = falcon->func; u8 rem = size % 4; int i; - if (func->emem_addr && start >= func->emem_addr) - return nvkm_falcon_v1_load_emem(falcon, data, - start - func->emem_addr, size, - port); - size -= rem; nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 24)); @@ -120,113 +89,6 @@ nvkm_falcon_v1_load_dmem(struct nvkm_falcon *falcon, void *data, u32 start, } } -static void -nvkm_falcon_v1_read_emem(struct nvkm_falcon *falcon, u32 start, u32 size, - u8 port, void *data) -{ - u8 rem = size % 4; - int i; - - size -= rem; - - nvkm_falcon_wr32(falcon, 0xac0 + (port * 8), start | (0x1 << 25)); - for (i = 0; i < size / 4; i++) - ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8)); - - /* - * If size is not a multiple of 4, mask the last word to ensure garbage - * does not get read - */ - if (rem) { - u32 extra = nvkm_falcon_rd32(falcon, 0xac4 + (port * 8)); - - for (i = size; i < size + rem; i++) { - ((u8 *)data)[i] = (u8)(extra & 0xff); - extra >>= 8; - } - } -} - -void -nvkm_falcon_v1_read_dmem(struct nvkm_falcon *falcon, u32 start, u32 size, - u8 port, void *data) -{ - const struct nvkm_falcon_func *func = falcon->func; - u8 rem = size % 4; - int i; - - if (func->emem_addr && start >= func->emem_addr) - return nvkm_falcon_v1_read_emem(falcon, start - func->emem_addr, - size, port, data); - - size -= rem; - - nvkm_falcon_wr32(falcon, 0x1c0 + (port * 8), start | (0x1 << 25)); - for (i = 0; i < size / 4; i++) - ((u32 *)data)[i] = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8)); - - /* - * If size is not a multiple of 4, mask the last word to ensure garbage - * does not get read - */ - if (rem) { - u32 extra = nvkm_falcon_rd32(falcon, 0x1c4 + (port * 8)); - - for (i = size; i < size + rem; i++) { - ((u8 *)data)[i] = (u8)(extra & 0xff); - extra >>= 8; - } - } -} - -void -nvkm_falcon_v1_bind_context(struct nvkm_falcon *falcon, struct nvkm_memory *ctx) -{ - const u32 fbif = falcon->func->fbif; - u32 inst_loc; - - /* disable instance block binding */ - if (ctx == NULL) { - nvkm_falcon_wr32(falcon, 0x10c, 0x0); - return; - } - - nvkm_falcon_wr32(falcon, 0x10c, 0x1); - - /* setup apertures - virtual */ - nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_UCODE, 0x4); - nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_VIRT, 0x0); - /* setup apertures - physical */ - nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_VID, 0x4); - nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_COH, 0x5); - nvkm_falcon_wr32(falcon, fbif + 4 * FALCON_DMAIDX_PHYS_SYS_NCOH, 0x6); - - /* Set context */ - switch (nvkm_memory_target(ctx)) { - case NVKM_MEM_TARGET_VRAM: inst_loc = 0; break; - case NVKM_MEM_TARGET_HOST: inst_loc = 2; break; - case NVKM_MEM_TARGET_NCOH: inst_loc = 3; break; - default: - WARN_ON(1); - return; - } - - /* Enable context */ - nvkm_falcon_mask(falcon, 0x048, 0x1, 0x1); - nvkm_falcon_wr32(falcon, 0x054, - ((nvkm_memory_addr(ctx) >> 12) & 0xfffffff) | - (inst_loc << 28) | (1 << 30)); - - nvkm_falcon_mask(falcon, 0x090, 0x10000, 0x10000); - nvkm_falcon_mask(falcon, 0x0a4, 0x8, 0x8); -} - -void -nvkm_falcon_v1_set_start_addr(struct nvkm_falcon *falcon, u32 start_addr) -{ - nvkm_falcon_wr32(falcon, 0x104, start_addr); -} - void nvkm_falcon_v1_start(struct nvkm_falcon *falcon) { @@ -237,75 +99,3 @@ nvkm_falcon_v1_start(struct nvkm_falcon *falcon) else nvkm_falcon_wr32(falcon, 0x100, 0x2); } - -int -nvkm_falcon_v1_wait_for_halt(struct nvkm_falcon *falcon, u32 ms) -{ - struct nvkm_device *device = falcon->owner->device; - int ret; - - ret = nvkm_wait_msec(device, ms, falcon->addr + 0x100, 0x10, 0x10); - if (ret < 0) - return ret; - - return 0; -} - -int -nvkm_falcon_v1_clear_interrupt(struct nvkm_falcon *falcon, u32 mask) -{ - struct nvkm_device *device = falcon->owner->device; - int ret; - - /* clear interrupt(s) */ - nvkm_falcon_mask(falcon, 0x004, mask, mask); - /* wait until interrupts are cleared */ - ret = nvkm_wait_msec(device, 10, falcon->addr + 0x008, mask, 0x0); - if (ret < 0) - return ret; - - return 0; -} - -static int -falcon_v1_wait_idle(struct nvkm_falcon *falcon) -{ - struct nvkm_device *device = falcon->owner->device; - int ret; - - ret = nvkm_wait_msec(device, 10, falcon->addr + 0x04c, 0xffff, 0x0); - if (ret < 0) - return ret; - - return 0; -} - -int -nvkm_falcon_v1_enable(struct nvkm_falcon *falcon) -{ - struct nvkm_device *device = falcon->owner->device; - int ret; - - ret = nvkm_wait_msec(device, 10, falcon->addr + 0x10c, 0x6, 0x0); - if (ret < 0) { - nvkm_error(falcon->user, "Falcon mem scrubbing timeout\n"); - return ret; - } - - ret = falcon_v1_wait_idle(falcon); - if (ret) - return ret; - - /* enable IRQs */ - nvkm_falcon_wr32(falcon, 0x010, 0xff); - - return 0; -} - -void -nvkm_falcon_v1_disable(struct nvkm_falcon *falcon) -{ - /* disable IRQs and wait for any previous code to complete */ - nvkm_falcon_wr32(falcon, 0x014, 0xff); - falcon_v1_wait_idle(falcon); -} |