diff options
Diffstat (limited to 'drivers/crypto/ccp')
-rw-r--r-- | drivers/crypto/ccp/Makefile | 3 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-debugfs.c | 3 | ||||
-rw-r--r-- | drivers/crypto/ccp/ccp-ops.c | 163 | ||||
-rw-r--r-- | drivers/crypto/ccp/psp-dev.c | 20 | ||||
-rw-r--r-- | drivers/crypto/ccp/psp-dev.h | 8 | ||||
-rw-r--r-- | drivers/crypto/ccp/sev-dev.c | 210 | ||||
-rw-r--r-- | drivers/crypto/ccp/sev-dev.h | 3 | ||||
-rw-r--r-- | drivers/crypto/ccp/sfs.c | 311 | ||||
-rw-r--r-- | drivers/crypto/ccp/sfs.h | 47 | ||||
-rw-r--r-- | drivers/crypto/ccp/sp-pci.c | 1 |
10 files changed, 680 insertions, 89 deletions
diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index 394484929dae..a9626b30044a 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -13,7 +13,8 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ tee-dev.o \ platform-access.o \ dbc.o \ - hsti.o + hsti.o \ + sfs.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ diff --git a/drivers/crypto/ccp/ccp-debugfs.c b/drivers/crypto/ccp/ccp-debugfs.c index a1055554b47a..dc26bc22c91d 100644 --- a/drivers/crypto/ccp/ccp-debugfs.c +++ b/drivers/crypto/ccp/ccp-debugfs.c @@ -319,5 +319,8 @@ void ccp5_debugfs_setup(struct ccp_device *ccp) void ccp5_debugfs_destroy(void) { + mutex_lock(&ccp_debugfs_lock); debugfs_remove_recursive(ccp_debugfs_dir); + ccp_debugfs_dir = NULL; + mutex_unlock(&ccp_debugfs_lock); } diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index 109b5aef4034..d78865d9d5f0 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -633,10 +633,16 @@ static noinline_for_stack int ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { struct ccp_aes_engine *aes = &cmd->u.aes; - struct ccp_dm_workarea key, ctx, final_wa, tag; - struct ccp_data src, dst; - struct ccp_data aad; - struct ccp_op op; + struct { + struct ccp_dm_workarea key; + struct ccp_dm_workarea ctx; + struct ccp_dm_workarea final; + struct ccp_dm_workarea tag; + struct ccp_data src; + struct ccp_data dst; + struct ccp_data aad; + struct ccp_op op; + } *wa __cleanup(kfree) = kzalloc(sizeof *wa, GFP_KERNEL); unsigned int dm_offset; unsigned int authsize; unsigned int jobid; @@ -650,6 +656,9 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) struct scatterlist *p_outp, sg_outp[2]; struct scatterlist *p_aad; + if (!wa) + return -ENOMEM; + if (!aes->iv) return -EINVAL; @@ -696,26 +705,26 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) jobid = CCP_NEW_JOBID(cmd_q->ccp); - memset(&op, 0, sizeof(op)); - op.cmd_q = cmd_q; - op.jobid = jobid; - op.sb_key = cmd_q->sb_key; /* Pre-allocated */ - op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ - op.init = 1; - op.u.aes.type = aes->type; + memset(&wa->op, 0, sizeof(wa->op)); + wa->op.cmd_q = cmd_q; + wa->op.jobid = jobid; + wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */ + wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ + wa->op.init = 1; + wa->op.u.aes.type = aes->type; /* Copy the key to the LSB */ - ret = ccp_init_dm_workarea(&key, cmd_q, + ret = ccp_init_dm_workarea(&wa->key, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_TO_DEVICE); if (ret) return ret; dm_offset = CCP_SB_BYTES - aes->key_len; - ret = ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len); + ret = ccp_set_dm_area(&wa->key, dm_offset, aes->key, 0, aes->key_len); if (ret) goto e_key; - ret = ccp_copy_to_sb(cmd_q, &key, op.jobid, op.sb_key, + ret = ccp_copy_to_sb(cmd_q, &wa->key, wa->op.jobid, wa->op.sb_key, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; @@ -726,58 +735,58 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) * There is an assumption here that the IV is 96 bits in length, plus * a nonce of 32 bits. If no IV is present, use a zeroed buffer. */ - ret = ccp_init_dm_workarea(&ctx, cmd_q, + ret = ccp_init_dm_workarea(&wa->ctx, cmd_q, CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES, DMA_BIDIRECTIONAL); if (ret) goto e_key; dm_offset = CCP_AES_CTX_SB_COUNT * CCP_SB_BYTES - aes->iv_len; - ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_ctx; - ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_ctx; } - op.init = 1; + wa->op.init = 1; if (aes->aad_len > 0) { /* Step 1: Run a GHASH over the Additional Authenticated Data */ - ret = ccp_init_data(&aad, cmd_q, p_aad, aes->aad_len, + ret = ccp_init_data(&wa->aad, cmd_q, p_aad, aes->aad_len, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (ret) goto e_ctx; - op.u.aes.mode = CCP_AES_MODE_GHASH; - op.u.aes.action = CCP_AES_GHASHAAD; + wa->op.u.aes.mode = CCP_AES_MODE_GHASH; + wa->op.u.aes.action = CCP_AES_GHASHAAD; - while (aad.sg_wa.bytes_left) { - ccp_prepare_data(&aad, NULL, &op, AES_BLOCK_SIZE, true); + while (wa->aad.sg_wa.bytes_left) { + ccp_prepare_data(&wa->aad, NULL, &wa->op, AES_BLOCK_SIZE, true); - ret = cmd_q->ccp->vdata->perform->aes(&op); + ret = cmd_q->ccp->vdata->perform->aes(&wa->op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_aad; } - ccp_process_data(&aad, NULL, &op); - op.init = 0; + ccp_process_data(&wa->aad, NULL, &wa->op); + wa->op.init = 0; } } - op.u.aes.mode = CCP_AES_MODE_GCTR; - op.u.aes.action = aes->action; + wa->op.u.aes.mode = CCP_AES_MODE_GCTR; + wa->op.u.aes.action = aes->action; if (ilen > 0) { /* Step 2: Run a GCTR over the plaintext */ in_place = (sg_virt(p_inp) == sg_virt(p_outp)) ? true : false; - ret = ccp_init_data(&src, cmd_q, p_inp, ilen, + ret = ccp_init_data(&wa->src, cmd_q, p_inp, ilen, AES_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); @@ -785,52 +794,52 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) goto e_aad; if (in_place) { - dst = src; + wa->dst = wa->src; } else { - ret = ccp_init_data(&dst, cmd_q, p_outp, ilen, + ret = ccp_init_data(&wa->dst, cmd_q, p_outp, ilen, AES_BLOCK_SIZE, DMA_FROM_DEVICE); if (ret) goto e_src; } - op.soc = 0; - op.eom = 0; - op.init = 1; - while (src.sg_wa.bytes_left) { - ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true); - if (!src.sg_wa.bytes_left) { + wa->op.soc = 0; + wa->op.eom = 0; + wa->op.init = 1; + while (wa->src.sg_wa.bytes_left) { + ccp_prepare_data(&wa->src, &wa->dst, &wa->op, AES_BLOCK_SIZE, true); + if (!wa->src.sg_wa.bytes_left) { unsigned int nbytes = ilen % AES_BLOCK_SIZE; if (nbytes) { - op.eom = 1; - op.u.aes.size = (nbytes * 8) - 1; + wa->op.eom = 1; + wa->op.u.aes.size = (nbytes * 8) - 1; } } - ret = cmd_q->ccp->vdata->perform->aes(&op); + ret = cmd_q->ccp->vdata->perform->aes(&wa->op); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } - ccp_process_data(&src, &dst, &op); - op.init = 0; + ccp_process_data(&wa->src, &wa->dst, &wa->op); + wa->op.init = 0; } } /* Step 3: Update the IV portion of the context with the original IV */ - ret = ccp_copy_from_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + ret = ccp_copy_from_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; goto e_dst; } - ret = ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len); + ret = ccp_set_dm_area(&wa->ctx, dm_offset, aes->iv, 0, aes->iv_len); if (ret) goto e_dst; - ret = ccp_copy_to_sb(cmd_q, &ctx, op.jobid, op.sb_ctx, + ret = ccp_copy_to_sb(cmd_q, &wa->ctx, wa->op.jobid, wa->op.sb_ctx, CCP_PASSTHRU_BYTESWAP_256BIT); if (ret) { cmd->engine_error = cmd_q->cmd_error; @@ -840,75 +849,75 @@ ccp_run_aes_gcm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) /* Step 4: Concatenate the lengths of the AAD and source, and * hash that 16 byte buffer. */ - ret = ccp_init_dm_workarea(&final_wa, cmd_q, AES_BLOCK_SIZE, + ret = ccp_init_dm_workarea(&wa->final, cmd_q, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); if (ret) goto e_dst; - final = (__be64 *)final_wa.address; + final = (__be64 *)wa->final.address; final[0] = cpu_to_be64(aes->aad_len * 8); final[1] = cpu_to_be64(ilen * 8); - memset(&op, 0, sizeof(op)); - op.cmd_q = cmd_q; - op.jobid = jobid; - op.sb_key = cmd_q->sb_key; /* Pre-allocated */ - op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ - op.init = 1; - op.u.aes.type = aes->type; - op.u.aes.mode = CCP_AES_MODE_GHASH; - op.u.aes.action = CCP_AES_GHASHFINAL; - op.src.type = CCP_MEMTYPE_SYSTEM; - op.src.u.dma.address = final_wa.dma.address; - op.src.u.dma.length = AES_BLOCK_SIZE; - op.dst.type = CCP_MEMTYPE_SYSTEM; - op.dst.u.dma.address = final_wa.dma.address; - op.dst.u.dma.length = AES_BLOCK_SIZE; - op.eom = 1; - op.u.aes.size = 0; - ret = cmd_q->ccp->vdata->perform->aes(&op); + memset(&wa->op, 0, sizeof(wa->op)); + wa->op.cmd_q = cmd_q; + wa->op.jobid = jobid; + wa->op.sb_key = cmd_q->sb_key; /* Pre-allocated */ + wa->op.sb_ctx = cmd_q->sb_ctx; /* Pre-allocated */ + wa->op.init = 1; + wa->op.u.aes.type = aes->type; + wa->op.u.aes.mode = CCP_AES_MODE_GHASH; + wa->op.u.aes.action = CCP_AES_GHASHFINAL; + wa->op.src.type = CCP_MEMTYPE_SYSTEM; + wa->op.src.u.dma.address = wa->final.dma.address; + wa->op.src.u.dma.length = AES_BLOCK_SIZE; + wa->op.dst.type = CCP_MEMTYPE_SYSTEM; + wa->op.dst.u.dma.address = wa->final.dma.address; + wa->op.dst.u.dma.length = AES_BLOCK_SIZE; + wa->op.eom = 1; + wa->op.u.aes.size = 0; + ret = cmd_q->ccp->vdata->perform->aes(&wa->op); if (ret) goto e_final_wa; if (aes->action == CCP_AES_ACTION_ENCRYPT) { /* Put the ciphered tag after the ciphertext. */ - ccp_get_dm_area(&final_wa, 0, p_tag, 0, authsize); + ccp_get_dm_area(&wa->final, 0, p_tag, 0, authsize); } else { /* Does this ciphered tag match the input? */ - ret = ccp_init_dm_workarea(&tag, cmd_q, authsize, + ret = ccp_init_dm_workarea(&wa->tag, cmd_q, authsize, DMA_BIDIRECTIONAL); if (ret) goto e_final_wa; - ret = ccp_set_dm_area(&tag, 0, p_tag, 0, authsize); + ret = ccp_set_dm_area(&wa->tag, 0, p_tag, 0, authsize); if (ret) { - ccp_dm_free(&tag); + ccp_dm_free(&wa->tag); goto e_final_wa; } - ret = crypto_memneq(tag.address, final_wa.address, + ret = crypto_memneq(wa->tag.address, wa->final.address, authsize) ? -EBADMSG : 0; - ccp_dm_free(&tag); + ccp_dm_free(&wa->tag); } e_final_wa: - ccp_dm_free(&final_wa); + ccp_dm_free(&wa->final); e_dst: if (ilen > 0 && !in_place) - ccp_free_data(&dst, cmd_q); + ccp_free_data(&wa->dst, cmd_q); e_src: if (ilen > 0) - ccp_free_data(&src, cmd_q); + ccp_free_data(&wa->src, cmd_q); e_aad: if (aes->aad_len) - ccp_free_data(&aad, cmd_q); + ccp_free_data(&wa->aad, cmd_q); e_ctx: - ccp_dm_free(&ctx); + ccp_dm_free(&wa->ctx); e_key: - ccp_dm_free(&key); + ccp_dm_free(&wa->key); return ret; } diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index 1c5a7189631e..9e21da0e298a 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -17,6 +17,7 @@ #include "psp-dev.h" #include "sev-dev.h" #include "tee-dev.h" +#include "sfs.h" #include "platform-access.h" #include "dbc.h" #include "hsti.h" @@ -182,6 +183,17 @@ static int psp_check_tee_support(struct psp_device *psp) return 0; } +static int psp_check_sfs_support(struct psp_device *psp) +{ + /* Check if device supports SFS feature */ + if (!psp->capability.sfs) { + dev_dbg(psp->dev, "psp does not support SFS\n"); + return -ENODEV; + } + + return 0; +} + static int psp_init(struct psp_device *psp) { int ret; @@ -198,6 +210,12 @@ static int psp_init(struct psp_device *psp) return ret; } + if (!psp_check_sfs_support(psp)) { + ret = sfs_dev_init(psp); + if (ret) + return ret; + } + if (psp->vdata->platform_access) { ret = platform_access_dev_init(psp); if (ret) @@ -302,6 +320,8 @@ void psp_dev_destroy(struct sp_device *sp) tee_dev_destroy(psp); + sfs_dev_destroy(psp); + dbc_dev_destroy(psp); platform_access_dev_destroy(psp); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index e43ce87ede76..268c83f298cb 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -32,7 +32,8 @@ union psp_cap_register { unsigned int sev :1, tee :1, dbc_thru_ext :1, - rsvd1 :4, + sfs :1, + rsvd1 :3, security_reporting :1, fused_part :1, rsvd2 :1, @@ -68,6 +69,7 @@ struct psp_device { void *tee_data; void *platform_access_data; void *dbc_data; + void *sfs_data; union psp_cap_register capability; }; @@ -118,12 +120,16 @@ struct psp_ext_request { * @PSP_SUB_CMD_DBC_SET_UID: Set UID for DBC * @PSP_SUB_CMD_DBC_GET_PARAMETER: Get parameter from DBC * @PSP_SUB_CMD_DBC_SET_PARAMETER: Set parameter for DBC + * @PSP_SUB_CMD_SFS_GET_FW_VERS: Get firmware versions for ASP and other MP + * @PSP_SUB_CMD_SFS_UPDATE: Command to load, verify and execute SFS package */ enum psp_sub_cmd { PSP_SUB_CMD_DBC_GET_NONCE = PSP_DYNAMIC_BOOST_GET_NONCE, PSP_SUB_CMD_DBC_SET_UID = PSP_DYNAMIC_BOOST_SET_UID, PSP_SUB_CMD_DBC_GET_PARAMETER = PSP_DYNAMIC_BOOST_GET_PARAMETER, PSP_SUB_CMD_DBC_SET_PARAMETER = PSP_DYNAMIC_BOOST_SET_PARAMETER, + PSP_SUB_CMD_SFS_GET_FW_VERS = PSP_SFS_GET_FW_VERSIONS, + PSP_SUB_CMD_SFS_UPDATE = PSP_SFS_UPDATE, }; int psp_extended_mailbox_cmd(struct psp_device *psp, unsigned int timeout_msecs, diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index 3451bada884e..65d6d0af140a 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -82,6 +82,21 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; +enum snp_hv_fixed_pages_state { + ALLOCATED, + HV_FIXED, +}; + +struct snp_hv_fixed_pages_entry { + struct list_head list; + struct page *page; + unsigned int order; + bool free; + enum snp_hv_fixed_pages_state page_state; +}; + +static LIST_HEAD(snp_hv_fixed_pages); + /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -434,7 +449,7 @@ cleanup: return rc; } -static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order) +static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order, bool locked) { unsigned long npages = 1ul << order, paddr; struct sev_device *sev; @@ -453,7 +468,7 @@ static struct page *__snp_alloc_firmware_pages(gfp_t gfp_mask, int order) return page; paddr = __pa((unsigned long)page_address(page)); - if (rmp_mark_pages_firmware(paddr, npages, false)) + if (rmp_mark_pages_firmware(paddr, npages, locked)) return NULL; return page; @@ -463,7 +478,7 @@ void *snp_alloc_firmware_page(gfp_t gfp_mask) { struct page *page; - page = __snp_alloc_firmware_pages(gfp_mask, 0); + page = __snp_alloc_firmware_pages(gfp_mask, 0, false); return page ? page_address(page) : NULL; } @@ -498,7 +513,7 @@ static void *sev_fw_alloc(unsigned long len) { struct page *page; - page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len)); + page = __snp_alloc_firmware_pages(GFP_KERNEL, get_order(len), true); if (!page) return NULL; @@ -1073,6 +1088,165 @@ static void snp_set_hsave_pa(void *arg) wrmsrq(MSR_VM_HSAVE_PA, 0); } +/* Hypervisor Fixed pages API interface */ +static void snp_hv_fixed_pages_state_update(struct sev_device *sev, + enum snp_hv_fixed_pages_state page_state) +{ + struct snp_hv_fixed_pages_entry *entry; + + /* List is protected by sev_cmd_mutex */ + lockdep_assert_held(&sev_cmd_mutex); + + if (list_empty(&snp_hv_fixed_pages)) + return; + + list_for_each_entry(entry, &snp_hv_fixed_pages, list) + entry->page_state = page_state; +} + +/* + * Allocate HV_FIXED pages in 2MB aligned sizes to ensure the whole + * 2MB pages are marked as HV_FIXED. + */ +struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages) +{ + struct psp_device *psp_master = psp_get_master_device(); + struct snp_hv_fixed_pages_entry *entry; + struct sev_device *sev; + unsigned int order; + struct page *page; + + if (!psp_master || !psp_master->sev_data) + return NULL; + + sev = psp_master->sev_data; + + order = get_order(PMD_SIZE * num_2mb_pages); + + /* + * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list + * also needs to be protected using the same mutex. + */ + guard(mutex)(&sev_cmd_mutex); + + /* + * This API uses SNP_INIT_EX to transition allocated pages to HV_Fixed + * page state, fail if SNP is already initialized. + */ + if (sev->snp_initialized) + return NULL; + + /* Re-use freed pages that match the request */ + list_for_each_entry(entry, &snp_hv_fixed_pages, list) { + /* Hypervisor fixed page allocator implements exact fit policy */ + if (entry->order == order && entry->free) { + entry->free = false; + memset(page_address(entry->page), 0, + (1 << entry->order) * PAGE_SIZE); + return entry->page; + } + } + + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return NULL; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + __free_pages(page, order); + return NULL; + } + + entry->page = page; + entry->order = order; + list_add_tail(&entry->list, &snp_hv_fixed_pages); + + return page; +} + +void snp_free_hv_fixed_pages(struct page *page) +{ + struct psp_device *psp_master = psp_get_master_device(); + struct snp_hv_fixed_pages_entry *entry, *nentry; + + if (!psp_master || !psp_master->sev_data) + return; + + /* + * SNP_INIT_EX is protected by sev_cmd_mutex, therefore this list + * also needs to be protected using the same mutex. + */ + guard(mutex)(&sev_cmd_mutex); + + list_for_each_entry_safe(entry, nentry, &snp_hv_fixed_pages, list) { + if (entry->page != page) + continue; + + /* + * HV_FIXED page state cannot be changed until reboot + * and they cannot be used by an SNP guest, so they cannot + * be returned back to the page allocator. + * Mark the pages as free internally to allow possible re-use. + */ + if (entry->page_state == HV_FIXED) { + entry->free = true; + } else { + __free_pages(page, entry->order); + list_del(&entry->list); + kfree(entry); + } + return; + } +} + +static void snp_add_hv_fixed_pages(struct sev_device *sev, struct sev_data_range_list *range_list) +{ + struct snp_hv_fixed_pages_entry *entry; + struct sev_data_range *range; + int num_elements; + + lockdep_assert_held(&sev_cmd_mutex); + + if (list_empty(&snp_hv_fixed_pages)) + return; + + num_elements = list_count_nodes(&snp_hv_fixed_pages) + + range_list->num_elements; + + /* + * Ensure the list of HV_FIXED pages that will be passed to firmware + * do not exceed the page-sized argument buffer. + */ + if (num_elements * sizeof(*range) + sizeof(*range_list) > PAGE_SIZE) { + dev_warn(sev->dev, "Additional HV_Fixed pages cannot be accommodated, omitting\n"); + return; + } + + range = &range_list->ranges[range_list->num_elements]; + list_for_each_entry(entry, &snp_hv_fixed_pages, list) { + range->base = page_to_pfn(entry->page) << PAGE_SHIFT; + range->page_count = 1 << entry->order; + range++; + } + range_list->num_elements = num_elements; +} + +static void snp_leak_hv_fixed_pages(void) +{ + struct snp_hv_fixed_pages_entry *entry; + + /* List is protected by sev_cmd_mutex */ + lockdep_assert_held(&sev_cmd_mutex); + + if (list_empty(&snp_hv_fixed_pages)) + return; + + list_for_each_entry(entry, &snp_hv_fixed_pages, list) + if (entry->page_state == HV_FIXED) + __snp_leak_pages(page_to_pfn(entry->page), + 1 << entry->order, false); +} + static int snp_filter_reserved_mem_regions(struct resource *rs, void *arg) { struct sev_data_range_list *range_list = arg; @@ -1163,6 +1337,12 @@ static int __sev_snp_init_locked(int *error) return rc; } + /* + * Add HV_Fixed pages from other PSP sub-devices, such as SFS to the + * HV_Fixed page list. + */ + snp_add_hv_fixed_pages(sev, snp_range_list); + memset(&data, 0, sizeof(data)); data.init_rmp = 1; data.list_paddr_en = 1; @@ -1202,6 +1382,7 @@ static int __sev_snp_init_locked(int *error) return rc; } + snp_hv_fixed_pages_state_update(sev, HV_FIXED); sev->snp_initialized = true; dev_dbg(sev->dev, "SEV-SNP firmware initialized\n"); @@ -1276,9 +1457,11 @@ static int __sev_platform_init_handle_init_ex_path(struct sev_device *sev) static int __sev_platform_init_locked(int *error) { - int rc, psp_ret = SEV_RET_NO_FW_CALL; + int rc, psp_ret, dfflush_error; struct sev_device *sev; + psp_ret = dfflush_error = SEV_RET_NO_FW_CALL; + if (!psp_master || !psp_master->sev_data) return -ENODEV; @@ -1320,10 +1503,10 @@ static int __sev_platform_init_locked(int *error) /* Prepare for first SEV guest launch after INIT */ wbinvd_on_all_cpus(); - rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, error); + rc = __sev_do_cmd_locked(SEV_CMD_DF_FLUSH, NULL, &dfflush_error); if (rc) { dev_err(sev->dev, "SEV: DF_FLUSH failed %#x, rc %d\n", - *error, rc); + dfflush_error, rc); return rc; } @@ -1782,11 +1965,18 @@ static int __sev_snp_shutdown_locked(int *error, bool panic) return ret; } + snp_leak_hv_fixed_pages(); sev->snp_initialized = false; dev_dbg(sev->dev, "SEV-SNP firmware shutdown\n"); - atomic_notifier_chain_unregister(&panic_notifier_list, - &snp_panic_notifier); + /* + * __sev_snp_shutdown_locked() deadlocks when it tries to unregister + * itself during panic as the panic notifier is called with RCU read + * lock held and notifier unregistration does RCU synchronization. + */ + if (!panic) + atomic_notifier_chain_unregister(&panic_notifier_list, + &snp_panic_notifier); /* Reset TMR size back to default */ sev_es_tmr_size = SEV_TMR_SIZE; @@ -2422,7 +2612,7 @@ static void __sev_firmware_shutdown(struct sev_device *sev, bool panic) { int error; - __sev_platform_shutdown_locked(NULL); + __sev_platform_shutdown_locked(&error); if (sev_es_tmr) { /* diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 3e4e5574e88a..28021abc85ad 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -65,4 +65,7 @@ void sev_dev_destroy(struct psp_device *psp); void sev_pci_init(void); void sev_pci_exit(void); +struct page *snp_alloc_hv_fixed_pages(unsigned int num_2mb_pages); +void snp_free_hv_fixed_pages(struct page *page); + #endif /* __SEV_DEV_H */ diff --git a/drivers/crypto/ccp/sfs.c b/drivers/crypto/ccp/sfs.c new file mode 100644 index 000000000000..2f4beaafe7ec --- /dev/null +++ b/drivers/crypto/ccp/sfs.c @@ -0,0 +1,311 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * AMD Secure Processor Seamless Firmware Servicing support. + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + * + * Author: Ashish Kalra <ashish.kalra@amd.com> + */ + +#include <linux/firmware.h> + +#include "sfs.h" +#include "sev-dev.h" + +#define SFS_DEFAULT_TIMEOUT (10 * MSEC_PER_SEC) +#define SFS_MAX_PAYLOAD_SIZE (2 * 1024 * 1024) +#define SFS_NUM_2MB_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PMD_SIZE) +#define SFS_NUM_PAGES_CMDBUF (SFS_MAX_PAYLOAD_SIZE / PAGE_SIZE) + +static DEFINE_MUTEX(sfs_ioctl_mutex); + +static struct sfs_misc_dev *misc_dev; + +static int send_sfs_cmd(struct sfs_device *sfs_dev, int msg) +{ + int ret; + + sfs_dev->command_buf->hdr.status = 0; + sfs_dev->command_buf->hdr.sub_cmd_id = msg; + + ret = psp_extended_mailbox_cmd(sfs_dev->psp, + SFS_DEFAULT_TIMEOUT, + (struct psp_ext_request *)sfs_dev->command_buf); + if (ret == -EIO) { + dev_dbg(sfs_dev->dev, + "msg 0x%x failed with PSP error: 0x%x, extended status: 0x%x\n", + msg, sfs_dev->command_buf->hdr.status, + *(u32 *)sfs_dev->command_buf->buf); + } + + return ret; +} + +static int send_sfs_get_fw_versions(struct sfs_device *sfs_dev) +{ + /* + * SFS_GET_FW_VERSIONS command needs the output buffer to be + * initialized to 0xC7 in every byte. + */ + memset(sfs_dev->command_buf->sfs_buffer, 0xc7, PAGE_SIZE); + sfs_dev->command_buf->hdr.payload_size = 2 * PAGE_SIZE; + + return send_sfs_cmd(sfs_dev, PSP_SFS_GET_FW_VERSIONS); +} + +static int send_sfs_update_package(struct sfs_device *sfs_dev, const char *payload_name) +{ + char payload_path[PAYLOAD_NAME_SIZE + sizeof("amd/")]; + const struct firmware *firmware; + unsigned long package_size; + int ret; + + /* Sanitize userspace provided payload name */ + if (!strnchr(payload_name, PAYLOAD_NAME_SIZE, '\0')) + return -EINVAL; + + snprintf(payload_path, sizeof(payload_path), "amd/%s", payload_name); + + ret = firmware_request_nowarn(&firmware, payload_path, sfs_dev->dev); + if (ret < 0) { + dev_warn_ratelimited(sfs_dev->dev, "firmware request failed for %s (%d)\n", + payload_path, ret); + return -ENOENT; + } + + /* + * SFS Update Package command's input buffer contains TEE_EXT_CMD_BUFFER + * followed by the Update Package and it should be 64KB aligned. + */ + package_size = ALIGN(firmware->size + PAGE_SIZE, 0x10000U); + + /* + * SFS command buffer is a pre-allocated 2MB buffer, fail update package + * if SFS payload is larger than the pre-allocated command buffer. + */ + if (package_size > SFS_MAX_PAYLOAD_SIZE) { + dev_warn_ratelimited(sfs_dev->dev, + "SFS payload size %ld larger than maximum supported payload size of %u\n", + package_size, SFS_MAX_PAYLOAD_SIZE); + release_firmware(firmware); + return -E2BIG; + } + + /* + * Copy firmware data to a HV_Fixed memory region. + */ + memcpy(sfs_dev->command_buf->sfs_buffer, firmware->data, firmware->size); + sfs_dev->command_buf->hdr.payload_size = package_size; + + release_firmware(firmware); + + return send_sfs_cmd(sfs_dev, PSP_SFS_UPDATE); +} + +static long sfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct sfs_user_get_fw_versions __user *sfs_get_fw_versions; + struct sfs_user_update_package __user *sfs_update_package; + struct psp_device *psp_master = psp_get_master_device(); + char payload_name[PAYLOAD_NAME_SIZE]; + struct sfs_device *sfs_dev; + int ret = 0; + + if (!psp_master || !psp_master->sfs_data) + return -ENODEV; + + sfs_dev = psp_master->sfs_data; + + guard(mutex)(&sfs_ioctl_mutex); + + switch (cmd) { + case SFSIOCFWVERS: + dev_dbg(sfs_dev->dev, "in SFSIOCFWVERS\n"); + + sfs_get_fw_versions = (struct sfs_user_get_fw_versions __user *)arg; + + ret = send_sfs_get_fw_versions(sfs_dev); + if (ret && ret != -EIO) + return ret; + + /* + * Return SFS status and extended status back to userspace + * if PSP status indicated success or command error. + */ + if (copy_to_user(&sfs_get_fw_versions->blob, sfs_dev->command_buf->sfs_buffer, + PAGE_SIZE)) + return -EFAULT; + if (copy_to_user(&sfs_get_fw_versions->sfs_status, + &sfs_dev->command_buf->hdr.status, + sizeof(sfs_get_fw_versions->sfs_status))) + return -EFAULT; + if (copy_to_user(&sfs_get_fw_versions->sfs_extended_status, + &sfs_dev->command_buf->buf, + sizeof(sfs_get_fw_versions->sfs_extended_status))) + return -EFAULT; + break; + case SFSIOCUPDATEPKG: + dev_dbg(sfs_dev->dev, "in SFSIOCUPDATEPKG\n"); + + sfs_update_package = (struct sfs_user_update_package __user *)arg; + + if (copy_from_user(payload_name, sfs_update_package->payload_name, + PAYLOAD_NAME_SIZE)) + return -EFAULT; + + ret = send_sfs_update_package(sfs_dev, payload_name); + if (ret && ret != -EIO) + return ret; + + /* + * Return SFS status and extended status back to userspace + * if PSP status indicated success or command error. + */ + if (copy_to_user(&sfs_update_package->sfs_status, + &sfs_dev->command_buf->hdr.status, + sizeof(sfs_update_package->sfs_status))) + return -EFAULT; + if (copy_to_user(&sfs_update_package->sfs_extended_status, + &sfs_dev->command_buf->buf, + sizeof(sfs_update_package->sfs_extended_status))) + return -EFAULT; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +static const struct file_operations sfs_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = sfs_ioctl, +}; + +static void sfs_exit(struct kref *ref) +{ + misc_deregister(&misc_dev->misc); + kfree(misc_dev); + misc_dev = NULL; +} + +void sfs_dev_destroy(struct psp_device *psp) +{ + struct sfs_device *sfs_dev = psp->sfs_data; + + if (!sfs_dev) + return; + + /* + * Change SFS command buffer back to the default "Write-Back" type. + */ + set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF); + + snp_free_hv_fixed_pages(sfs_dev->page); + + if (sfs_dev->misc) + kref_put(&misc_dev->refcount, sfs_exit); + + psp->sfs_data = NULL; +} + +/* Based on sev_misc_init() */ +static int sfs_misc_init(struct sfs_device *sfs) +{ + struct device *dev = sfs->dev; + int ret; + + /* + * SFS feature support can be detected on multiple devices but the SFS + * FW commands must be issued on the master. During probe, we do not + * know the master hence we create /dev/sfs on the first device probe. + */ + if (!misc_dev) { + struct miscdevice *misc; + + misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL); + if (!misc_dev) + return -ENOMEM; + + misc = &misc_dev->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "sfs"; + misc->fops = &sfs_fops; + misc->mode = 0600; + + ret = misc_register(misc); + if (ret) + return ret; + + kref_init(&misc_dev->refcount); + } else { + kref_get(&misc_dev->refcount); + } + + sfs->misc = misc_dev; + dev_dbg(dev, "registered SFS device\n"); + + return 0; +} + +int sfs_dev_init(struct psp_device *psp) +{ + struct device *dev = psp->dev; + struct sfs_device *sfs_dev; + struct page *page; + int ret = -ENOMEM; + + sfs_dev = devm_kzalloc(dev, sizeof(*sfs_dev), GFP_KERNEL); + if (!sfs_dev) + return -ENOMEM; + + /* + * Pre-allocate 2MB command buffer for all SFS commands using + * SNP HV_Fixed page allocator which also transitions the + * SFS command buffer to HV_Fixed page state if SNP is enabled. + */ + page = snp_alloc_hv_fixed_pages(SFS_NUM_2MB_PAGES_CMDBUF); + if (!page) { + dev_dbg(dev, "Command Buffer HV-Fixed page allocation failed\n"); + goto cleanup_dev; + } + sfs_dev->page = page; + sfs_dev->command_buf = page_address(page); + + dev_dbg(dev, "Command buffer 0x%px to be marked as HV_Fixed\n", sfs_dev->command_buf); + + /* + * SFS command buffer must be mapped as non-cacheable. + */ + ret = set_memory_uc((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF); + if (ret) { + dev_dbg(dev, "Set memory uc failed\n"); + goto cleanup_cmd_buf; + } + + dev_dbg(dev, "Command buffer 0x%px marked uncacheable\n", sfs_dev->command_buf); + + psp->sfs_data = sfs_dev; + sfs_dev->dev = dev; + sfs_dev->psp = psp; + + ret = sfs_misc_init(sfs_dev); + if (ret) + goto cleanup_mem_attr; + + dev_notice(sfs_dev->dev, "SFS support is available\n"); + + return 0; + +cleanup_mem_attr: + set_memory_wb((unsigned long)sfs_dev->command_buf, SFS_NUM_PAGES_CMDBUF); + +cleanup_cmd_buf: + snp_free_hv_fixed_pages(page); + +cleanup_dev: + psp->sfs_data = NULL; + devm_kfree(dev, sfs_dev); + + return ret; +} diff --git a/drivers/crypto/ccp/sfs.h b/drivers/crypto/ccp/sfs.h new file mode 100644 index 000000000000..97704c210efd --- /dev/null +++ b/drivers/crypto/ccp/sfs.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * AMD Platform Security Processor (PSP) Seamless Firmware (SFS) Support. + * + * Copyright (C) 2025 Advanced Micro Devices, Inc. + * + * Author: Ashish Kalra <ashish.kalra@amd.com> + */ + +#ifndef __SFS_H__ +#define __SFS_H__ + +#include <uapi/linux/psp-sfs.h> + +#include <linux/device.h> +#include <linux/miscdevice.h> +#include <linux/psp-sev.h> +#include <linux/psp-platform-access.h> +#include <linux/set_memory.h> + +#include "psp-dev.h" + +struct sfs_misc_dev { + struct kref refcount; + struct miscdevice misc; +}; + +struct sfs_command { + struct psp_ext_req_buffer_hdr hdr; + u8 buf[PAGE_SIZE - sizeof(struct psp_ext_req_buffer_hdr)]; + u8 sfs_buffer[]; +} __packed; + +struct sfs_device { + struct device *dev; + struct psp_device *psp; + + struct page *page; + struct sfs_command *command_buf; + + struct sfs_misc_dev *misc; +}; + +void sfs_dev_destroy(struct psp_device *psp); +int sfs_dev_init(struct psp_device *psp); + +#endif /* __SFS_H__ */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index e1be2072d680..e7bb803912a6 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -453,6 +453,7 @@ static const struct psp_vdata pspv6 = { .cmdresp_reg = 0x10944, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10948, /* C2PMSG_18 */ .cmdbuff_addr_hi_reg = 0x1094c, /* C2PMSG_19 */ + .bootloader_info_reg = 0x109ec, /* C2PMSG_59 */ .feature_reg = 0x109fc, /* C2PMSG_63 */ .inten_reg = 0x10510, /* P2CMSG_INTEN */ .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ |