diff options
Diffstat (limited to 'drivers')
924 files changed, 33308 insertions, 8951 deletions
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index 7c138a4edc03..26dd208a0d63 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -73,6 +73,18 @@ const guid_t *to_nfit_uuid(enum nfit_uuids id) } EXPORT_SYMBOL(to_nfit_uuid); +static const guid_t *to_nfit_bus_uuid(int family) +{ + if (WARN_ONCE(family == NVDIMM_BUS_FAMILY_NFIT, + "only secondary bus families can be translated\n")) + return NULL; + /* + * The index of bus UUIDs starts immediately following the last + * NVDIMM/leaf family. + */ + return to_nfit_uuid(family + NVDIMM_FAMILY_MAX); +} + static struct acpi_device *to_acpi_dev(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; @@ -362,24 +374,8 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) { static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { [NVDIMM_FAMILY_INTEL] = { - [NVDIMM_INTEL_GET_MODES] = 2, - [NVDIMM_INTEL_GET_FWINFO] = 2, - [NVDIMM_INTEL_START_FWUPDATE] = 2, - [NVDIMM_INTEL_SEND_FWUPDATE] = 2, - [NVDIMM_INTEL_FINISH_FWUPDATE] = 2, - [NVDIMM_INTEL_QUERY_FWUPDATE] = 2, - [NVDIMM_INTEL_SET_THRESHOLD] = 2, - [NVDIMM_INTEL_INJECT_ERROR] = 2, - [NVDIMM_INTEL_GET_SECURITY_STATE] = 2, - [NVDIMM_INTEL_SET_PASSPHRASE] = 2, - [NVDIMM_INTEL_DISABLE_PASSPHRASE] = 2, - [NVDIMM_INTEL_UNLOCK_UNIT] = 2, - [NVDIMM_INTEL_FREEZE_LOCK] = 2, - [NVDIMM_INTEL_SECURE_ERASE] = 2, - [NVDIMM_INTEL_OVERWRITE] = 2, - [NVDIMM_INTEL_QUERY_OVERWRITE] = 2, - [NVDIMM_INTEL_SET_MASTER_PASSPHRASE] = 2, - [NVDIMM_INTEL_MASTER_SECURE_ERASE] = 2, + [NVDIMM_INTEL_GET_MODES ... + NVDIMM_INTEL_FW_ACTIVATE_ARM] = 2, }, }; u8 id; @@ -406,7 +402,7 @@ static bool payload_dumpable(struct nvdimm *nvdimm, unsigned int func) } static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, - struct nd_cmd_pkg *call_pkg) + struct nd_cmd_pkg *call_pkg, int *family) { if (call_pkg) { int i; @@ -417,6 +413,7 @@ static int cmd_to_func(struct nfit_mem *nfit_mem, unsigned int cmd, for (i = 0; i < ARRAY_SIZE(call_pkg->nd_reserved2); i++) if (call_pkg->nd_reserved2[i]) return -EINVAL; + *family = call_pkg->nd_family; return call_pkg->nd_command; } @@ -450,13 +447,14 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, acpi_handle handle; const guid_t *guid; int func, rc, i; + int family = 0; if (cmd_rc) *cmd_rc = -EINVAL; if (cmd == ND_CMD_CALL) call_pkg = buf; - func = cmd_to_func(nfit_mem, cmd, call_pkg); + func = cmd_to_func(nfit_mem, cmd, call_pkg, &family); if (func < 0) return func; @@ -478,9 +476,17 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, cmd_name = nvdimm_bus_cmd_name(cmd); cmd_mask = nd_desc->cmd_mask; - dsm_mask = nd_desc->bus_dsm_mask; + if (cmd == ND_CMD_CALL && call_pkg->nd_family) { + family = call_pkg->nd_family; + if (!test_bit(family, &nd_desc->bus_family_mask)) + return -EINVAL; + dsm_mask = acpi_desc->family_dsm_mask[family]; + guid = to_nfit_bus_uuid(family); + } else { + dsm_mask = acpi_desc->bus_dsm_mask; + guid = to_nfit_uuid(NFIT_DEV_BUS); + } desc = nd_cmd_bus_desc(cmd); - guid = to_nfit_uuid(NFIT_DEV_BUS); handle = adev->handle; dimm_name = "bus"; } @@ -516,8 +522,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, in_buf.buffer.length = call_pkg->nd_size_in; } - dev_dbg(dev, "%s cmd: %d: func: %d input length: %d\n", - dimm_name, cmd, func, in_buf.buffer.length); + dev_dbg(dev, "%s cmd: %d: family: %d func: %d input length: %d\n", + dimm_name, cmd, family, func, in_buf.buffer.length); if (payload_dumpable(nvdimm, func)) print_hex_dump_debug("nvdimm in ", DUMP_PREFIX_OFFSET, 4, 4, in_buf.buffer.pointer, @@ -1238,8 +1244,9 @@ static ssize_t bus_dsm_mask_show(struct device *dev, { struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); - return sprintf(buf, "%#lx\n", nd_desc->bus_dsm_mask); + return sprintf(buf, "%#lx\n", acpi_desc->bus_dsm_mask); } static struct device_attribute dev_attr_bus_dsm_mask = __ATTR(dsm_mask, 0444, bus_dsm_mask_show, NULL); @@ -1385,8 +1392,12 @@ static umode_t nfit_visible(struct kobject *kobj, struct attribute *a, int n) struct device *dev = container_of(kobj, struct device, kobj); struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); - if (a == &dev_attr_scrub.attr && !ars_supported(nvdimm_bus)) - return 0; + if (a == &dev_attr_scrub.attr) + return ars_supported(nvdimm_bus) ? a->mode : 0; + + if (a == &dev_attr_firmware_activate_noidle.attr) + return intel_fwa_supported(nvdimm_bus) ? a->mode : 0; + return a->mode; } @@ -1395,6 +1406,7 @@ static struct attribute *acpi_nfit_attributes[] = { &dev_attr_scrub.attr, &dev_attr_hw_error_scrub.attr, &dev_attr_bus_dsm_mask.attr, + &dev_attr_firmware_activate_noidle.attr, NULL, }; @@ -1823,6 +1835,7 @@ static void populate_shutdown_status(struct nfit_mem *nfit_mem) static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, struct nfit_mem *nfit_mem, u32 device_handle) { + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; struct acpi_device *adev, *adev_dimm; struct device *dev = acpi_desc->dev; unsigned long dsm_mask, label_mask; @@ -1834,6 +1847,7 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, /* nfit test assumes 1:1 relationship between commands and dsms */ nfit_mem->dsm_mask = acpi_desc->dimm_cmd_force_en; nfit_mem->family = NVDIMM_FAMILY_INTEL; + set_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); if (dcr->valid_fields & ACPI_NFIT_CONTROL_MFG_INFO_VALID) sprintf(nfit_mem->id, "%04x-%02x-%04x-%08x", @@ -1886,10 +1900,13 @@ static int acpi_nfit_add_dimm(struct acpi_nfit_desc *acpi_desc, * Note, that checking for function0 (bit0) tells us if any commands * are reachable through this GUID. */ + clear_bit(NVDIMM_FAMILY_INTEL, &nd_desc->dimm_family_mask); for (i = 0; i <= NVDIMM_FAMILY_MAX; i++) - if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) + if (acpi_check_dsm(adev_dimm->handle, to_nfit_uuid(i), 1, 1)) { + set_bit(i, &nd_desc->dimm_family_mask); if (family < 0 || i == default_dsm_family) family = i; + } /* limit the supported commands to those that are publicly documented */ nfit_mem->family = family; @@ -2007,6 +2024,26 @@ static const struct nvdimm_security_ops *acpi_nfit_get_security_ops(int family) } } +static const struct nvdimm_fw_ops *acpi_nfit_get_fw_ops( + struct nfit_mem *nfit_mem) +{ + unsigned long mask; + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; + + if (!nd_desc->fw_ops) + return NULL; + + if (nfit_mem->family != NVDIMM_FAMILY_INTEL) + return NULL; + + mask = nfit_mem->dsm_mask & NVDIMM_INTEL_FW_ACTIVATE_CMDMASK; + if (mask != NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) + return NULL; + + return intel_fw_ops; +} + static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) { struct nfit_mem *nfit_mem; @@ -2083,7 +2120,8 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) acpi_nfit_dimm_attribute_groups, flags, cmd_mask, flush ? flush->hint_count : 0, nfit_mem->flush_wpq, &nfit_mem->id[0], - acpi_nfit_get_security_ops(nfit_mem->family)); + acpi_nfit_get_security_ops(nfit_mem->family), + acpi_nfit_get_fw_ops(nfit_mem)); if (!nvdimm) return -ENOMEM; @@ -2147,12 +2185,23 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) { struct nvdimm_bus_descriptor *nd_desc = &acpi_desc->nd_desc; const guid_t *guid = to_nfit_uuid(NFIT_DEV_BUS); + unsigned long dsm_mask, *mask; struct acpi_device *adev; - unsigned long dsm_mask; int i; - nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; - nd_desc->bus_dsm_mask = acpi_desc->bus_nfit_cmd_force_en; + set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); + set_bit(NVDIMM_BUS_FAMILY_NFIT, &nd_desc->bus_family_mask); + + /* enable nfit_test to inject bus command emulation */ + if (acpi_desc->bus_cmd_force_en) { + nd_desc->cmd_mask = acpi_desc->bus_cmd_force_en; + mask = &nd_desc->bus_family_mask; + if (acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]) { + set_bit(NVDIMM_BUS_FAMILY_INTEL, mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } + } + adev = to_acpi_dev(acpi_desc); if (!adev) return; @@ -2160,7 +2209,6 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) for (i = ND_CMD_ARS_CAP; i <= ND_CMD_CLEAR_ERROR; i++) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) set_bit(i, &nd_desc->cmd_mask); - set_bit(ND_CMD_CALL, &nd_desc->cmd_mask); dsm_mask = (1 << ND_CMD_ARS_CAP) | @@ -2173,7 +2221,20 @@ static void acpi_nfit_init_dsms(struct acpi_nfit_desc *acpi_desc) (1 << NFIT_CMD_ARS_INJECT_GET); for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) - set_bit(i, &nd_desc->bus_dsm_mask); + set_bit(i, &acpi_desc->bus_dsm_mask); + + /* Enumerate allowed NVDIMM_BUS_FAMILY_INTEL commands */ + dsm_mask = NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; + guid = to_nfit_bus_uuid(NVDIMM_BUS_FAMILY_INTEL); + mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; + for_each_set_bit(i, &dsm_mask, BITS_PER_LONG) + if (acpi_check_dsm(adev->handle, guid, 1, 1ULL << i)) + set_bit(i, mask); + + if (*mask == dsm_mask) { + set_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask); + nd_desc->fw_ops = intel_bus_fw_ops; + } } static ssize_t range_index_show(struct device *dev, @@ -3273,7 +3334,7 @@ static void acpi_nfit_init_ars(struct acpi_nfit_desc *acpi_desc, static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) { struct nfit_spa *nfit_spa; - int rc; + int rc, do_sched_ars = 0; set_bit(ARS_VALID, &acpi_desc->scrub_flags); list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { @@ -3285,7 +3346,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) } } - list_for_each_entry(nfit_spa, &acpi_desc->spas, list) + list_for_each_entry(nfit_spa, &acpi_desc->spas, list) { switch (nfit_spa_type(nfit_spa->spa)) { case NFIT_SPA_VOLATILE: case NFIT_SPA_PM: @@ -3293,6 +3354,13 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) rc = ars_register(acpi_desc, nfit_spa); if (rc) return rc; + + /* + * Kick off background ARS if at least one + * region successfully registered ARS + */ + if (!test_bit(ARS_FAILED, &nfit_spa->ars_state)) + do_sched_ars++; break; case NFIT_SPA_BDW: /* nothing to register */ @@ -3311,8 +3379,10 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc) /* don't register unknown regions */ break; } + } - sched_ars(acpi_desc); + if (do_sched_ars) + sched_ars(acpi_desc); return 0; } @@ -3485,7 +3555,10 @@ static int __acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, return 0; } -/* prevent security commands from being issued via ioctl */ +/* + * Prevent security and firmware activate commands from being issued via + * ioctl. + */ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf) { @@ -3496,10 +3569,15 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { func = call_pkg->nd_command; if (func > NVDIMM_CMD_MAX || - (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) + (1 << func) & NVDIMM_INTEL_DENY_CMDMASK) return -EOPNOTSUPP; } + /* block all non-nfit bus commands */ + if (!nvdimm && cmd == ND_CMD_CALL && + call_pkg->nd_family != NVDIMM_BUS_FAMILY_NFIT) + return -EOPNOTSUPP; + return __acpi_nfit_clear_to_send(nd_desc, nvdimm, cmd); } @@ -3791,6 +3869,7 @@ static __init int nfit_init(void) guid_parse(UUID_NFIT_DIMM_N_HPE2, &nfit_uuid[NFIT_DEV_DIMM_N_HPE2]); guid_parse(UUID_NFIT_DIMM_N_MSFT, &nfit_uuid[NFIT_DEV_DIMM_N_MSFT]); guid_parse(UUID_NFIT_DIMM_N_HYPERV, &nfit_uuid[NFIT_DEV_DIMM_N_HYPERV]); + guid_parse(UUID_INTEL_BUS, &nfit_uuid[NFIT_BUS_INTEL]); nfit_wq = create_singlethread_workqueue("nfit"); if (!nfit_wq) diff --git a/drivers/acpi/nfit/intel.c b/drivers/acpi/nfit/intel.c index 1113b679cd7b..8dd792a55730 100644 --- a/drivers/acpi/nfit/intel.c +++ b/drivers/acpi/nfit/intel.c @@ -7,6 +7,48 @@ #include "intel.h" #include "nfit.h" +static ssize_t firmware_activate_noidle_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + return sprintf(buf, "%s\n", acpi_desc->fwa_noidle ? "Y" : "N"); +} + +static ssize_t firmware_activate_noidle_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t size) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + ssize_t rc; + bool val; + + rc = kstrtobool(buf, &val); + if (rc) + return rc; + if (val != acpi_desc->fwa_noidle) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_INVALID; + acpi_desc->fwa_noidle = val; + return size; +} +DEVICE_ATTR_RW(firmware_activate_noidle); + +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus) +{ + struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus); + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + unsigned long *mask; + + if (!test_bit(NVDIMM_BUS_FAMILY_INTEL, &nd_desc->bus_family_mask)) + return false; + + mask = &acpi_desc->family_dsm_mask[NVDIMM_BUS_FAMILY_INTEL]; + return *mask == NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK; +} + static unsigned long intel_security_flags(struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype) { @@ -389,3 +431,347 @@ static const struct nvdimm_security_ops __intel_security_ops = { }; const struct nvdimm_security_ops *intel_security_ops = &__intel_security_ops; + +static int intel_bus_fwa_businfo(struct nvdimm_bus_descriptor *nd_desc, + struct nd_intel_bus_fw_activate_businfo *info) +{ + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_bus_fw_activate_businfo cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate_businfo), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate_businfo), + }, + }; + int rc; + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + *info = nd_cmd.cmd; + return rc; +} + +/* The fw_ops expect to be called with the nvdimm_bus_lock() held */ +static enum nvdimm_fwa_state intel_bus_fwa_state( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct nd_intel_bus_fw_activate_businfo info; + struct device *dev = acpi_desc->dev; + enum nvdimm_fwa_state state; + int rc; + + /* + * It should not be possible for platform firmware to return + * busy because activate is a synchronous operation. Treat it + * similar to invalid, i.e. always refresh / poll the status. + */ + switch (acpi_desc->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* check if capability needs to be refreshed */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) + break; + return acpi_desc->fwa_state; + } + + /* Refresh with platform firmware */ + rc = intel_bus_fwa_businfo(nd_desc, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + if (info.activate_tmo > info.max_quiesce_tmo) + state = NVDIMM_FWA_ARM_OVERFLOW; + else + state = NVDIMM_FWA_ARMED; + break; + default: + dev_err_once(dev, "invalid firmware activate state %d\n", + info.state); + return NVDIMM_FWA_INVALID; + } + + /* + * Capability data is available in the same payload as state. It + * is expected to be static. + */ + if (acpi_desc->fwa_cap == NVDIMM_FWA_CAP_INVALID) { + if (info.capability & ND_INTEL_BUS_FWA_CAP_FWQUIESCE) + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_QUIESCE; + else if (info.capability & ND_INTEL_BUS_FWA_CAP_OSQUIESCE) { + /* + * Skip hibernate cycle by default if platform + * indicates that it does not need devices to be + * quiesced. + */ + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_LIVE; + } else + acpi_desc->fwa_cap = NVDIMM_FWA_CAP_NONE; + } + + acpi_desc->fwa_state = state; + + return state; +} + +static enum nvdimm_fwa_capability intel_bus_fwa_capability( + struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + + if (acpi_desc->fwa_cap > NVDIMM_FWA_CAP_INVALID) + return acpi_desc->fwa_cap; + + if (intel_bus_fwa_state(nd_desc) > NVDIMM_FWA_INVALID) + return acpi_desc->fwa_cap; + + return NVDIMM_FWA_CAP_INVALID; +} + +static int intel_bus_fwa_activate(struct nvdimm_bus_descriptor *nd_desc) +{ + struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc); + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_bus_fw_activate cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_BUS_INTEL_FW_ACTIVATE, + .nd_family = NVDIMM_BUS_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.iodev_state), + .nd_size_out = + sizeof(struct nd_intel_bus_fw_activate), + .nd_fw_size = + sizeof(struct nd_intel_bus_fw_activate), + }, + /* + * Even though activate is run from a suspended context, + * for safety, still ask platform firmware to force + * quiesce devices by default. Let a module + * parameter override that policy. + */ + .cmd = { + .iodev_state = acpi_desc->fwa_noidle + ? ND_INTEL_BUS_FWA_IODEV_OS_IDLE + : ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE, + }, + }; + int rc; + + switch (intel_bus_fwa_state(nd_desc)) { + case NVDIMM_FWA_ARMED: + case NVDIMM_FWA_ARM_OVERFLOW: + break; + default: + return -ENXIO; + } + + rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), + NULL); + + /* + * Whether the command succeeded, or failed, the agent checking + * for the result needs to query the DIMMs individually. + * Increment the activation count to invalidate all the DIMM + * states at once (it's otherwise not possible to take + * acpi_desc->init_mutex in this context) + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + acpi_desc->fwa_count++; + + dev_dbg(acpi_desc->dev, "result: %d\n", rc); + + return rc; +} + +static const struct nvdimm_bus_fw_ops __intel_bus_fw_ops = { + .activate_state = intel_bus_fwa_state, + .capability = intel_bus_fwa_capability, + .activate = intel_bus_fwa_activate, +}; + +const struct nvdimm_bus_fw_ops *intel_bus_fw_ops = &__intel_bus_fw_ops; + +static int intel_fwa_dimminfo(struct nvdimm *nvdimm, + struct nd_intel_fw_activate_dimminfo *info) +{ + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_fw_activate_dimminfo cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_out = + sizeof(struct nd_intel_fw_activate_dimminfo), + .nd_fw_size = + sizeof(struct nd_intel_fw_activate_dimminfo), + }, + }; + int rc; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + *info = nd_cmd.cmd; + return rc; +} + +static enum nvdimm_fwa_state intel_fwa_state(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct nd_intel_fw_activate_dimminfo info; + int rc; + + /* + * Similar to the bus state, since activate is synchronous the + * busy state should resolve within the context of 'activate'. + */ + switch (nfit_mem->fwa_state) { + case NVDIMM_FWA_INVALID: + case NVDIMM_FWA_BUSY: + break; + default: + /* If no activations occurred the old state is still valid */ + if (nfit_mem->fwa_count == acpi_desc->fwa_count) + return nfit_mem->fwa_state; + } + + rc = intel_fwa_dimminfo(nvdimm, &info); + if (rc) + return NVDIMM_FWA_INVALID; + + switch (info.state) { + case ND_INTEL_FWA_IDLE: + nfit_mem->fwa_state = NVDIMM_FWA_IDLE; + break; + case ND_INTEL_FWA_BUSY: + nfit_mem->fwa_state = NVDIMM_FWA_BUSY; + break; + case ND_INTEL_FWA_ARMED: + nfit_mem->fwa_state = NVDIMM_FWA_ARMED; + break; + default: + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + break; + } + + switch (info.result) { + case ND_INTEL_DIMM_FWA_NONE: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NONE; + break; + case ND_INTEL_DIMM_FWA_SUCCESS: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_SUCCESS; + break; + case ND_INTEL_DIMM_FWA_NOTSTAGED: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NOTSTAGED; + break; + case ND_INTEL_DIMM_FWA_NEEDRESET: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_NEEDRESET; + break; + case ND_INTEL_DIMM_FWA_MEDIAFAILED: + case ND_INTEL_DIMM_FWA_ABORT: + case ND_INTEL_DIMM_FWA_NOTSUPP: + case ND_INTEL_DIMM_FWA_ERROR: + default: + nfit_mem->fwa_result = NVDIMM_FWA_RESULT_FAIL; + break; + } + + nfit_mem->fwa_count = acpi_desc->fwa_count; + + return nfit_mem->fwa_state; +} + +static enum nvdimm_fwa_result intel_fwa_result(struct nvdimm *nvdimm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + + if (nfit_mem->fwa_count == acpi_desc->fwa_count + && nfit_mem->fwa_result > NVDIMM_FWA_RESULT_INVALID) + return nfit_mem->fwa_result; + + if (intel_fwa_state(nvdimm) > NVDIMM_FWA_INVALID) + return nfit_mem->fwa_result; + + return NVDIMM_FWA_RESULT_INVALID; +} + +static int intel_fwa_arm(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arm) +{ + struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); + struct acpi_nfit_desc *acpi_desc = nfit_mem->acpi_desc; + struct { + struct nd_cmd_pkg pkg; + struct nd_intel_fw_activate_arm cmd; + } nd_cmd = { + .pkg = { + .nd_command = NVDIMM_INTEL_FW_ACTIVATE_ARM, + .nd_family = NVDIMM_FAMILY_INTEL, + .nd_size_in = sizeof(nd_cmd.cmd.activate_arm), + .nd_size_out = + sizeof(struct nd_intel_fw_activate_arm), + .nd_fw_size = + sizeof(struct nd_intel_fw_activate_arm), + }, + .cmd = { + .activate_arm = arm == NVDIMM_FWA_ARM + ? ND_INTEL_DIMM_FWA_ARM + : ND_INTEL_DIMM_FWA_DISARM, + }, + }; + int rc; + + switch (intel_fwa_state(nvdimm)) { + case NVDIMM_FWA_INVALID: + return -ENXIO; + case NVDIMM_FWA_BUSY: + return -EBUSY; + case NVDIMM_FWA_IDLE: + if (arm == NVDIMM_FWA_DISARM) + return 0; + break; + case NVDIMM_FWA_ARMED: + if (arm == NVDIMM_FWA_ARM) + return 0; + break; + default: + return -ENXIO; + } + + /* + * Invalidate the bus-level state, now that we're committed to + * changing the 'arm' state. + */ + acpi_desc->fwa_state = NVDIMM_FWA_INVALID; + nfit_mem->fwa_state = NVDIMM_FWA_INVALID; + + rc = nvdimm_ctl(nvdimm, ND_CMD_CALL, &nd_cmd, sizeof(nd_cmd), NULL); + + dev_dbg(acpi_desc->dev, "%s result: %d\n", arm == NVDIMM_FWA_ARM + ? "arm" : "disarm", rc); + return rc; +} + +static const struct nvdimm_fw_ops __intel_fw_ops = { + .activate_state = intel_fwa_state, + .activate_result = intel_fwa_result, + .arm = intel_fwa_arm, +}; + +const struct nvdimm_fw_ops *intel_fw_ops = &__intel_fw_ops; diff --git a/drivers/acpi/nfit/intel.h b/drivers/acpi/nfit/intel.h index 0aca682ab9d7..b768234ccebc 100644 --- a/drivers/acpi/nfit/intel.h +++ b/drivers/acpi/nfit/intel.h @@ -111,4 +111,65 @@ struct nd_intel_master_secure_erase { u8 passphrase[ND_INTEL_PASSPHRASE_SIZE]; u32 status; } __packed; + +#define ND_INTEL_FWA_IDLE 0 +#define ND_INTEL_FWA_ARMED 1 +#define ND_INTEL_FWA_BUSY 2 + +#define ND_INTEL_DIMM_FWA_NONE 0 +#define ND_INTEL_DIMM_FWA_NOTSTAGED 1 +#define ND_INTEL_DIMM_FWA_SUCCESS 2 +#define ND_INTEL_DIMM_FWA_NEEDRESET 3 +#define ND_INTEL_DIMM_FWA_MEDIAFAILED 4 +#define ND_INTEL_DIMM_FWA_ABORT 5 +#define ND_INTEL_DIMM_FWA_NOTSUPP 6 +#define ND_INTEL_DIMM_FWA_ERROR 7 + +struct nd_intel_fw_activate_dimminfo { + u32 status; + u16 result; + u8 state; + u8 reserved[7]; +} __packed; + +#define ND_INTEL_DIMM_FWA_ARM 1 +#define ND_INTEL_DIMM_FWA_DISARM 0 + +struct nd_intel_fw_activate_arm { + u8 activate_arm; + u32 status; +} __packed; + +/* Root device command payloads */ +#define ND_INTEL_BUS_FWA_CAP_FWQUIESCE (1 << 0) +#define ND_INTEL_BUS_FWA_CAP_OSQUIESCE (1 << 1) +#define ND_INTEL_BUS_FWA_CAP_RESET (1 << 2) + +struct nd_intel_bus_fw_activate_businfo { + u32 status; + u16 reserved; + u8 state; + u8 capability; + u64 activate_tmo; + u64 cpu_quiesce_tmo; + u64 io_quiesce_tmo; + u64 max_quiesce_tmo; +} __packed; + +#define ND_INTEL_BUS_FWA_STATUS_NOARM (6 | 1 << 16) +#define ND_INTEL_BUS_FWA_STATUS_BUSY (6 | 2 << 16) +#define ND_INTEL_BUS_FWA_STATUS_NOFW (6 | 3 << 16) +#define ND_INTEL_BUS_FWA_STATUS_TMO (6 | 4 << 16) +#define ND_INTEL_BUS_FWA_STATUS_NOIDLE (6 | 5 << 16) +#define ND_INTEL_BUS_FWA_STATUS_ABORT (6 | 6 << 16) + +#define ND_INTEL_BUS_FWA_IODEV_FORCE_IDLE (0) +#define ND_INTEL_BUS_FWA_IODEV_OS_IDLE (1) +struct nd_intel_bus_fw_activate { + u8 iodev_state; + u32 status; +} __packed; + +extern const struct nvdimm_fw_ops *intel_fw_ops; +extern const struct nvdimm_bus_fw_ops *intel_bus_fw_ops; #endif diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index a303f0123394..c674f3df9be7 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -18,6 +18,7 @@ /* https://pmem.io/documents/NVDIMM_DSM_Interface-V1.6.pdf */ #define UUID_NFIT_DIMM "4309ac30-0d11-11e4-9191-0800200c9a66" +#define UUID_INTEL_BUS "c7d8acd4-2df8-4b82-9f65-a325335af149" /* https://github.com/HewlettPackard/hpe-nvm/blob/master/Documentation/ */ #define UUID_NFIT_DIMM_N_HPE1 "9002c334-acf3-4c0e-9642-a235f0d53bc6" @@ -33,7 +34,6 @@ | ACPI_NFIT_MEM_RESTORE_FAILED | ACPI_NFIT_MEM_FLUSH_FAILED \ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) -#define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV #define NVDIMM_CMD_MAX 31 #define NVDIMM_STANDARD_CMDMASK \ @@ -66,6 +66,13 @@ enum nvdimm_family_cmds { NVDIMM_INTEL_QUERY_OVERWRITE = 26, NVDIMM_INTEL_SET_MASTER_PASSPHRASE = 27, NVDIMM_INTEL_MASTER_SECURE_ERASE = 28, + NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO = 29, + NVDIMM_INTEL_FW_ACTIVATE_ARM = 30, +}; + +enum nvdimm_bus_family_cmds { + NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO = 1, + NVDIMM_BUS_INTEL_FW_ACTIVATE = 2, }; #define NVDIMM_INTEL_SECURITY_CMDMASK \ @@ -76,13 +83,22 @@ enum nvdimm_family_cmds { | 1 << NVDIMM_INTEL_SET_MASTER_PASSPHRASE \ | 1 << NVDIMM_INTEL_MASTER_SECURE_ERASE) +#define NVDIMM_INTEL_FW_ACTIVATE_CMDMASK \ +(1 << NVDIMM_INTEL_FW_ACTIVATE_DIMMINFO | 1 << NVDIMM_INTEL_FW_ACTIVATE_ARM) + +#define NVDIMM_BUS_INTEL_FW_ACTIVATE_CMDMASK \ +(1 << NVDIMM_BUS_INTEL_FW_ACTIVATE_BUSINFO | 1 << NVDIMM_BUS_INTEL_FW_ACTIVATE) + #define NVDIMM_INTEL_CMDMASK \ (NVDIMM_STANDARD_CMDMASK | 1 << NVDIMM_INTEL_GET_MODES \ | 1 << NVDIMM_INTEL_GET_FWINFO | 1 << NVDIMM_INTEL_START_FWUPDATE \ | 1 << NVDIMM_INTEL_SEND_FWUPDATE | 1 << NVDIMM_INTEL_FINISH_FWUPDATE \ | 1 << NVDIMM_INTEL_QUERY_FWUPDATE | 1 << NVDIMM_INTEL_SET_THRESHOLD \ | 1 << NVDIMM_INTEL_INJECT_ERROR | 1 << NVDIMM_INTEL_LATCH_SHUTDOWN \ - | NVDIMM_INTEL_SECURITY_CMDMASK) + | NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) + +#define NVDIMM_INTEL_DENY_CMDMASK \ +(NVDIMM_INTEL_SECURITY_CMDMASK | NVDIMM_INTEL_FW_ACTIVATE_CMDMASK) enum nfit_uuids { /* for simplicity alias the uuid index with the family id */ @@ -91,6 +107,11 @@ enum nfit_uuids { NFIT_DEV_DIMM_N_HPE2 = NVDIMM_FAMILY_HPE2, NFIT_DEV_DIMM_N_MSFT = NVDIMM_FAMILY_MSFT, NFIT_DEV_DIMM_N_HYPERV = NVDIMM_FAMILY_HYPERV, + /* + * to_nfit_bus_uuid() expects to translate bus uuid family ids + * to a UUID index using NVDIMM_FAMILY_MAX as an offset + */ + NFIT_BUS_INTEL = NVDIMM_FAMILY_MAX + NVDIMM_BUS_FAMILY_INTEL, NFIT_SPA_VOLATILE, NFIT_SPA_PM, NFIT_SPA_DCR, @@ -199,6 +220,9 @@ struct nfit_mem { struct list_head list; struct acpi_device *adev; struct acpi_nfit_desc *acpi_desc; + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_result fwa_result; + int fwa_count; char id[NFIT_DIMM_ID_LEN+1]; struct resource *flush_wpq; unsigned long dsm_mask; @@ -238,11 +262,17 @@ struct acpi_nfit_desc { unsigned long scrub_flags; unsigned long dimm_cmd_force_en; unsigned long bus_cmd_force_en; - unsigned long bus_nfit_cmd_force_en; + unsigned long bus_dsm_mask; + unsigned long family_dsm_mask[NVDIMM_BUS_FAMILY_MAX + 1]; unsigned int platform_cap; unsigned int scrub_tmo; int (*blk_do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, void *iobuf, u64 len, int rw); + enum nvdimm_fwa_state fwa_state; + enum nvdimm_fwa_capability fwa_cap; + int fwa_count; + bool fwa_noidle; + bool fwa_nosuspend; }; enum scrub_mode { @@ -345,4 +375,6 @@ void __acpi_nvdimm_notify(struct device *dev, u32 event); int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev); +bool intel_fwa_supported(struct nvdimm_bus *nvdimm_bus); +extern struct device_attribute dev_attr_firmware_activate_noidle; #endif /* __NFIT_H__ */ diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 01333af13bbb..7e261224ff10 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -627,7 +627,7 @@ static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) } static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev, - enum pci_channel_state error) + pci_channel_state_t error) { int st; diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e1b22fe0916c..737c0b6b24ea 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -3080,8 +3080,6 @@ static int __ipmi_bmc_register(struct ipmi_smi *intf, rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { - kfree(intf->my_dev_name); - intf->my_dev_name = NULL; dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", rv); goto out_free_my_dev_name; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 198b65d45c5e..0416b9c9d410 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -22,11 +22,6 @@ * and drives the real SSIF state machine. */ -/* - * TODO: Figure out how to use SMB alerts. This will require a new - * interface into the I2C driver, I believe. - */ - #define pr_fmt(fmt) "ipmi_ssif: " fmt #define dev_fmt(fmt) "ipmi_ssif: " fmt diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 55986e10a124..f78156d93c3f 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -232,12 +232,17 @@ static int set_param_str(const char *val, const struct kernel_param *kp) static int get_param_str(char *buffer, const struct kernel_param *kp) { action_fn fn = (action_fn) kp->arg; - int rv; + int rv, len; rv = fn(NULL, buffer); if (rv) return rv; - return strlen(buffer); + + len = strlen(buffer); + buffer[len++] = '\n'; + buffer[len] = 0; + + return len; } diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 326f91b2dda9..4026fac9fac3 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -50,7 +50,7 @@ source "drivers/clk/versatile/Kconfig" config CLK_HSDK bool "PLL Driver for HSDK platform" depends on OF || COMPILE_TEST - depends on IOMEM + depends on HAS_IOMEM help This driver supports the HSDK core, system, ddr, tunnel and hdmi PLLs control. @@ -121,7 +121,6 @@ config COMMON_CLK_SI5351 tristate "Clock driver for SiLabs 5351A/B/C" depends on I2C select REGMAP_I2C - select RATIONAL help This driver supports Silicon Labs 5351A/B/C programmable clock generators. @@ -163,7 +162,6 @@ config COMMON_CLK_CDCE706 tristate "Clock driver for TI CDCE706 clock synthesizer" depends on I2C select REGMAP_I2C - select RATIONAL help This driver supports TI CDCE706 programmable 3-PLL clock synthesizer. diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index ca9af11d3391..da8fcf147eb1 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o obj-$(CONFIG_COMMON_CLK_CS2000_CP) += clk-cs2000-cp.o obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o +obj-$(CONFIG_ARCH_SPARX5) += clk-sparx5.o obj-$(CONFIG_COMMON_CLK_FIXED_MMIO) += clk-fixed-mmio.o obj-$(CONFIG_COMMON_CLK_FSL_SAI) += clk-fsl-sai.o obj-$(CONFIG_COMMON_CLK_GEMINI) += clk-gemini.o diff --git a/drivers/clk/actions/owl-s500.c b/drivers/clk/actions/owl-s500.c index e2007ac4d235..61bb224f6330 100644 --- a/drivers/clk/actions/owl-s500.c +++ b/drivers/clk/actions/owl-s500.c @@ -23,8 +23,10 @@ #include "owl-gate.h" #include "owl-mux.h" #include "owl-pll.h" +#include "owl-reset.h" #include <dt-bindings/clock/actions,s500-cmu.h> +#include <dt-bindings/reset/actions,s500-reset.h> #define CMU_COREPLL (0x0000) #define CMU_DEVPLL (0x0004) @@ -175,6 +177,8 @@ static OWL_MUX(dev_clk, "dev_clk", dev_clk_mux_p, CMU_DEVPLL, 12, 1, CLK_SET_RAT static OWL_MUX(ahbprediv_clk, "ahbprediv_clk", ahbprediv_clk_mux_p, CMU_BUSCLK1, 8, 3, CLK_SET_RATE_PARENT); /* gate clocks */ +static OWL_GATE(gpio_clk, "gpio_clk", "apb_clk", CMU_DEVCLKEN0, 18, 0, 0); +static OWL_GATE(dmac_clk, "dmac_clk", "h_clk", CMU_DEVCLKEN0, 1, 0, 0); static OWL_GATE(spi0_clk, "spi0_clk", "ahb_clk", CMU_DEVCLKEN1, 10, 0, CLK_IGNORE_UNUSED); static OWL_GATE(spi1_clk, "spi1_clk", "ahb_clk", CMU_DEVCLKEN1, 11, 0, CLK_IGNORE_UNUSED); static OWL_GATE(spi2_clk, "spi2_clk", "ahb_clk", CMU_DEVCLKEN1, 12, 0, CLK_IGNORE_UNUSED); @@ -183,7 +187,8 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0); static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0); /* divider clocks */ -static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0); +static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0); +static OWL_DIVIDER(apb_clk, "apb_clk", "ahb_clk", CMU_BUSCLK1, 14, 2, NULL, 0, 0); static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0); /* factor clocks */ @@ -428,6 +433,9 @@ static struct owl_clk_common *s500_clks[] = { &spdif_clk.common, &nand_clk.common, &ecc_clk.common, + &apb_clk.common, + &dmac_clk.common, + &gpio_clk.common, }; static struct clk_hw_onecell_data s500_hw_clks = { @@ -484,24 +492,103 @@ static struct clk_hw_onecell_data s500_hw_clks = { [CLK_SPDIF] = &spdif_clk.common.hw, [CLK_NAND] = &nand_clk.common.hw, [CLK_ECC] = &ecc_clk.common.hw, + [CLK_APB] = &apb_clk.common.hw, + [CLK_DMAC] = &dmac_clk.common.hw, + [CLK_GPIO] = &gpio_clk.common.hw, }, .num = CLK_NR_CLKS, }; +static const struct owl_reset_map s500_resets[] = { + [RESET_DMAC] = { CMU_DEVRST0, BIT(0) }, + [RESET_NORIF] = { CMU_DEVRST0, BIT(1) }, + [RESET_DDR] = { CMU_DEVRST0, BIT(2) }, + [RESET_NANDC] = { CMU_DEVRST0, BIT(3) }, + [RESET_SD0] = { CMU_DEVRST0, BIT(4) }, + [RESET_SD1] = { CMU_DEVRST0, BIT(5) }, + [RESET_PCM1] = { CMU_DEVRST0, BIT(6) }, + [RESET_DE] = { CMU_DEVRST0, BIT(7) }, + [RESET_LCD] = { CMU_DEVRST0, BIT(8) }, + [RESET_SD2] = { CMU_DEVRST0, BIT(9) }, + [RESET_DSI] = { CMU_DEVRST0, BIT(10) }, + [RESET_CSI] = { CMU_DEVRST0, BIT(11) }, + [RESET_BISP] = { CMU_DEVRST0, BIT(12) }, + [RESET_KEY] = { CMU_DEVRST0, BIT(14) }, + [RESET_GPIO] = { CMU_DEVRST0, BIT(15) }, + [RESET_AUDIO] = { CMU_DEVRST0, BIT(17) }, + [RESET_PCM0] = { CMU_DEVRST0, BIT(18) }, + [RESET_VDE] = { CMU_DEVRST0, BIT(19) }, + [RESET_VCE] = { CMU_DEVRST0, BIT(20) }, + [RESET_GPU3D] = { CMU_DEVRST0, BIT(22) }, + [RESET_NIC301] = { CMU_DEVRST0, BIT(23) }, + [RESET_LENS] = { CMU_DEVRST0, BIT(26) }, + [RESET_PERIPHRESET] = { CMU_DEVRST0, BIT(27) }, + [RESET_USB2_0] = { CMU_DEVRST1, BIT(0) }, + [RESET_TVOUT] = { CMU_DEVRST1, BIT(1) }, + [RESET_HDMI] = { CMU_DEVRST1, BIT(2) }, + [RESET_HDCP2TX] = { CMU_DEVRST1, BIT(3) }, + [RESET_UART6] = { CMU_DEVRST1, BIT(4) }, + [RESET_UART0] = { CMU_DEVRST1, BIT(5) }, + [RESET_UART1] = { CMU_DEVRST1, BIT(6) }, + [RESET_UART2] = { CMU_DEVRST1, BIT(7) }, + [RESET_SPI0] = { CMU_DEVRST1, BIT(8) }, + [RESET_SPI1] = { CMU_DEVRST1, BIT(9) }, + [RESET_SPI2] = { CMU_DEVRST1, BIT(10) }, + [RESET_SPI3] = { CMU_DEVRST1, BIT(11) }, + [RESET_I2C0] = { CMU_DEVRST1, BIT(12) }, + [RESET_I2C1] = { CMU_DEVRST1, BIT(13) }, + [RESET_USB3] = { CMU_DEVRST1, BIT(14) }, + [RESET_UART3] = { CMU_DEVRST1, BIT(15) }, + [RESET_UART4] = { CMU_DEVRST1, BIT(16) }, + [RESET_UART5] = { CMU_DEVRST1, BIT(17) }, + [RESET_I2C2] = { CMU_DEVRST1, BIT(18) }, + [RESET_I2C3] = { CMU_DEVRST1, BIT(19) }, + [RESET_ETHERNET] = { CMU_DEVRST1, BIT(20) }, + [RESET_CHIPID] = { CMU_DEVRST1, BIT(21) }, + [RESET_USB2_1] = { CMU_DEVRST1, BIT(22) }, + [RESET_WD0RESET] = { CMU_DEVRST1, BIT(24) }, + [RESET_WD1RESET] = { CMU_DEVRST1, BIT(25) }, + [RESET_WD2RESET] = { CMU_DEVRST1, BIT(26) }, + [RESET_WD3RESET] = { CMU_DEVRST1, BIT(27) }, + [RESET_DBG0RESET] = { CMU_DEVRST1, BIT(28) }, + [RESET_DBG1RESET] = { CMU_DEVRST1, BIT(29) }, + [RESET_DBG2RESET] = { CMU_DEVRST1, BIT(30) }, + [RESET_DBG3RESET] = { CMU_DEVRST1, BIT(31) }, +}; + static struct owl_clk_desc s500_clk_desc = { .clks = s500_clks, .num_clks = ARRAY_SIZE(s500_clks), .hw_clks = &s500_hw_clks, + + .resets = s500_resets, + .num_resets = ARRAY_SIZE(s500_resets), }; static int s500_clk_probe(struct platform_device *pdev) { struct owl_clk_desc *desc; + struct owl_reset *reset; + int ret; desc = &s500_clk_desc; owl_clk_regmap_init(pdev, desc); + reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL); + if (!reset) + return -ENOMEM; + + reset->rcdev.of_node = pdev->dev.of_node; + reset->rcdev.ops = &owl_reset_ops; + reset->rcdev.nr_resets = desc->num_resets; + reset->reset_map = desc->resets; + reset->regmap = desc->regmap; + + ret = devm_reset_controller_register(&pdev->dev, &reset->rcdev); + if (ret) + dev_err(&pdev->dev, "Failed to register reset controller\n"); + return owl_clk_probe(&pdev->dev, desc->hw_clks); } diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index 8b90357f2a93..79301e1c1c36 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -23,3 +23,4 @@ obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o +obj-$(CONFIG_SOC_SAMA7G5) += sama7g5.o diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c index 38bdb4981315..2c3d8e6ca63c 100644 --- a/drivers/clk/at91/at91rm9200.c +++ b/drivers/clk/at91/at91rm9200.c @@ -160,7 +160,8 @@ static void __init at91rm9200_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 4, i, - &at91rm9200_programmable_layout); + &at91rm9200_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c index 6d0723aa8b13..bb81ff731ad8 100644 --- a/drivers/clk/at91/at91sam9260.c +++ b/drivers/clk/at91/at91sam9260.c @@ -436,7 +436,8 @@ static void __init at91sam926x_pmc_setup(struct device_node *np, hw = at91_clk_register_programmable(regmap, name, parent_names, 4, i, - &at91rm9200_programmable_layout); + &at91rm9200_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c index 9873b583c260..c88ee20bee31 100644 --- a/drivers/clk/at91/at91sam9g45.c +++ b/drivers/clk/at91/at91sam9g45.c @@ -111,7 +111,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; @@ -181,7 +181,8 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 5, i, - &at91sam9g45_programmable_layout); + &at91sam9g45_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c index 630dc5d87171..93f7eb216122 100644 --- a/drivers/clk/at91/at91sam9n12.c +++ b/drivers/clk/at91/at91sam9n12.c @@ -124,7 +124,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; @@ -199,7 +199,8 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 5, i, - &at91sam9x5_programmable_layout); + &at91sam9x5_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; @@ -222,7 +223,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np) at91sam9n12_periphck[i].n, "masterck", at91sam9n12_periphck[i].id, - &range); + &range, INT_MIN); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c index 0d1cc44b056f..a343eb69bb35 100644 --- a/drivers/clk/at91/at91sam9rl.c +++ b/drivers/clk/at91/at91sam9rl.c @@ -137,7 +137,8 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 5, i, - &at91rm9200_programmable_layout); + &at91rm9200_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 0ce3da080287..22b9aad9efb8 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c @@ -226,7 +226,8 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, hw = at91_clk_register_programmable(regmap, name, parent_names, 5, i, - &at91sam9x5_programmable_layout); + &at91sam9x5_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; @@ -257,7 +258,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, at91sam9x5_periphck[i].n, "masterck", at91sam9x5_periphck[i].id, - &range); + &range, INT_MIN); if (IS_ERR(hw)) goto err_free; @@ -270,7 +271,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, extra_pcks[i].n, "masterck", extra_pcks[i].id, - &range); + &range, INT_MIN); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c index 44a46dcc0518..b4fc8d71daf2 100644 --- a/drivers/clk/at91/clk-generated.c +++ b/drivers/clk/at91/clk-generated.c @@ -18,18 +18,17 @@ #define GENERATED_MAX_DIV 255 -#define GCK_INDEX_DT_AUDIO_PLL 5 - struct clk_generated { struct clk_hw hw; struct regmap *regmap; struct clk_range range; spinlock_t *lock; + u32 *mux_table; u32 id; u32 gckdiv; const struct clk_pcr_layout *layout; u8 parent_id; - bool audio_pll_allowed; + int chg_pid; }; #define to_clk_generated(hw) \ @@ -83,7 +82,7 @@ static int clk_generated_is_enabled(struct clk_hw *hw) regmap_read(gck->regmap, gck->layout->offset, &status); spin_unlock_irqrestore(gck->lock, flags); - return status & AT91_PMC_PCR_GCKEN ? 1 : 0; + return !!(status & AT91_PMC_PCR_GCKEN); } static unsigned long @@ -109,7 +108,7 @@ static void clk_generated_best_diff(struct clk_rate_request *req, tmp_rate = parent_rate / div; tmp_diff = abs(req->rate - tmp_rate); - if (*best_diff < 0 || *best_diff > tmp_diff) { + if (*best_diff < 0 || *best_diff >= tmp_diff) { *best_rate = tmp_rate; *best_diff = tmp_diff; req->best_parent_rate = parent_rate; @@ -129,7 +128,10 @@ static int clk_generated_determine_rate(struct clk_hw *hw, int i; u32 div; - for (i = 0; i < clk_hw_get_num_parents(hw) - 1; i++) { + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + if (gck->chg_pid == i) + continue; + parent = clk_hw_get_parent_by_index(hw, i); if (!parent) continue; @@ -161,16 +163,17 @@ static int clk_generated_determine_rate(struct clk_hw *hw, * that the only clks able to modify gck rate are those of audio IPs. */ - if (!gck->audio_pll_allowed) + if (gck->chg_pid < 0) goto end; - parent = clk_hw_get_parent_by_index(hw, GCK_INDEX_DT_AUDIO_PLL); + parent = clk_hw_get_parent_by_index(hw, gck->chg_pid); if (!parent) goto end; for (div = 1; div < GENERATED_MAX_DIV + 2; div++) { req_parent.rate = req->rate * div; - __clk_determine_rate(parent, &req_parent); + if (__clk_determine_rate(parent, &req_parent)) + continue; clk_generated_best_diff(req, parent, req_parent.rate, div, &best_diff, &best_rate); @@ -184,8 +187,8 @@ end: __clk_get_name((req->best_parent_hw)->clk), req->best_parent_rate); - if (best_rate < 0) - return best_rate; + if (best_rate < 0 || (gck->range.max && best_rate > gck->range.max)) + return -EINVAL; req->rate = best_rate; return 0; @@ -199,7 +202,11 @@ static int clk_generated_set_parent(struct clk_hw *hw, u8 index) if (index >= clk_hw_get_num_parents(hw)) return -EINVAL; - gck->parent_id = index; + if (gck->mux_table) + gck->parent_id = clk_mux_index_to_val(gck->mux_table, 0, index); + else + gck->parent_id = index; + return 0; } @@ -271,8 +278,9 @@ struct clk_hw * __init at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const struct clk_pcr_layout *layout, const char *name, const char **parent_names, - u8 num_parents, u8 id, bool pll_audio, - const struct clk_range *range) + u32 *mux_table, u8 num_parents, u8 id, + const struct clk_range *range, + int chg_pid) { struct clk_generated *gck; struct clk_init_data init; @@ -287,16 +295,18 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, init.ops = &generated_ops; init.parent_names = parent_names; init.num_parents = num_parents; - init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | - CLK_SET_RATE_PARENT; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; + if (chg_pid >= 0) + init.flags |= CLK_SET_RATE_PARENT; gck->id = id; gck->hw.init = &init; gck->regmap = regmap; gck->lock = lock; gck->range = *range; - gck->audio_pll_allowed = pll_audio; + gck->chg_pid = chg_pid; gck->layout = layout; + gck->mux_table = mux_table; clk_generated_startup(gck); hw = &gck->hw; diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c index 37c22667e831..5c83e899084f 100644 --- a/drivers/clk/at91/clk-main.c +++ b/drivers/clk/at91/clk-main.c @@ -175,7 +175,7 @@ static bool clk_main_rc_osc_ready(struct regmap *regmap) regmap_read(regmap, AT91_PMC_SR, &status); - return status & AT91_PMC_MOSCRCS; + return !!(status & AT91_PMC_MOSCRCS); } static int clk_main_rc_osc_prepare(struct clk_hw *hw) @@ -336,7 +336,7 @@ static int clk_rm9200_main_is_prepared(struct clk_hw *hw) regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status); - return status & AT91_PMC_MAINRDY ? 1 : 0; + return !!(status & AT91_PMC_MAINRDY); } static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw, @@ -398,7 +398,7 @@ static inline bool clk_sam9x5_main_ready(struct regmap *regmap) regmap_read(regmap, AT91_PMC_SR, &status); - return status & AT91_PMC_MOSCSELS ? 1 : 0; + return !!(status & AT91_PMC_MOSCSELS); } static int clk_sam9x5_main_prepare(struct clk_hw *hw) diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c index e7e0ba652de1..bd0d8a69a2cf 100644 --- a/drivers/clk/at91/clk-master.c +++ b/drivers/clk/at91/clk-master.c @@ -17,30 +17,49 @@ #define MASTER_DIV_SHIFT 8 #define MASTER_DIV_MASK 0x3 +#define PMC_MCR 0x30 +#define PMC_MCR_ID_MSK GENMASK(3, 0) +#define PMC_MCR_CMD BIT(7) +#define PMC_MCR_DIV GENMASK(10, 8) +#define PMC_MCR_CSS GENMASK(20, 16) +#define PMC_MCR_CSS_SHIFT (16) +#define PMC_MCR_EN BIT(28) + +#define PMC_MCR_ID(x) ((x) & PMC_MCR_ID_MSK) + +#define MASTER_MAX_ID 4 + #define to_clk_master(hw) container_of(hw, struct clk_master, hw) struct clk_master { struct clk_hw hw; struct regmap *regmap; + spinlock_t *lock; const struct clk_master_layout *layout; const struct clk_master_characteristics *characteristics; + u32 *mux_table; u32 mckr; + int chg_pid; + u8 id; + u8 parent; + u8 div; }; -static inline bool clk_master_ready(struct regmap *regmap) +static inline bool clk_master_ready(struct clk_master *master) { + unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY; unsigned int status; - regmap_read(regmap, AT91_PMC_SR, &status); + regmap_read(master->regmap, AT91_PMC_SR, &status); - return status & AT91_PMC_MCKRDY ? 1 : 0; + return !!(status & bit); } static int clk_master_prepare(struct clk_hw *hw) { struct clk_master *master = to_clk_master(hw); - while (!clk_master_ready(master->regmap)) + while (!clk_master_ready(master)) cpu_relax(); return 0; @@ -50,7 +69,7 @@ static int clk_master_is_prepared(struct clk_hw *hw) { struct clk_master *master = to_clk_master(hw); - return clk_master_ready(master->regmap); + return clk_master_ready(master); } static unsigned long clk_master_recalc_rate(struct clk_hw *hw, @@ -143,6 +162,287 @@ at91_clk_register_master(struct regmap *regmap, return hw; } +static unsigned long +clk_sama7g5_master_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_master *master = to_clk_master(hw); + + return DIV_ROUND_CLOSEST_ULL(parent_rate, (1 << master->div)); +} + +static void clk_sama7g5_master_best_diff(struct clk_rate_request *req, + struct clk_hw *parent, + unsigned long parent_rate, + long *best_rate, + long *best_diff, + u32 div) +{ + unsigned long tmp_rate, tmp_diff; + + if (div == MASTER_PRES_MAX) + tmp_rate = parent_rate / 3; + else + tmp_rate = parent_rate >> div; + + tmp_diff = abs(req->rate - tmp_rate); + + if (*best_diff < 0 || *best_diff >= tmp_diff) { + *best_rate = tmp_rate; + *best_diff = tmp_diff; + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; + } +} + +static int clk_sama7g5_master_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_master *master = to_clk_master(hw); + struct clk_rate_request req_parent = *req; + struct clk_hw *parent; + long best_rate = LONG_MIN, best_diff = LONG_MIN; + unsigned long parent_rate; + unsigned int div, i; + + /* First: check the dividers of MCR. */ + for (i = 0; i < clk_hw_get_num_parents(hw); i++) { + parent = clk_hw_get_parent_by_index(hw, i); + if (!parent) + continue; + + parent_rate = clk_hw_get_rate(parent); + if (!parent_rate) + continue; + + for (div = 0; div < MASTER_PRES_MAX + 1; div++) { + clk_sama7g5_master_best_diff(req, parent, parent_rate, + &best_rate, &best_diff, + div); + if (!best_diff) + break; + } + + if (!best_diff) + break; + } + + /* Second: try to request rate form changeable parent. */ + if (master->chg_pid < 0) + goto end; + + parent = clk_hw_get_parent_by_index(hw, master->chg_pid); + if (!parent) + goto end; + + for (div = 0; div < MASTER_PRES_MAX + 1; div++) { + if (div == MASTER_PRES_MAX) + req_parent.rate = req->rate * 3; + else + req_parent.rate = req->rate << div; + + if (__clk_determine_rate(parent, &req_parent)) + continue; + + clk_sama7g5_master_best_diff(req, parent, req_parent.rate, + &best_rate, &best_diff, div); + + if (!best_diff) + break; + } + +end: + pr_debug("MCK: %s, best_rate = %ld, parent clk: %s @ %ld\n", + __func__, best_rate, + __clk_get_name((req->best_parent_hw)->clk), + req->best_parent_rate); + + if (best_rate < 0) + return -EINVAL; + + req->rate = best_rate; + + return 0; +} + +static u8 clk_sama7g5_master_get_parent(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + u8 index; + + spin_lock_irqsave(master->lock, flags); + index = clk_mux_val_to_index(&master->hw, master->mux_table, 0, + master->parent); + spin_unlock_irqrestore(master->lock, flags); + + return index; +} + +static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + + if (index >= clk_hw_get_num_parents(hw)) + return -EINVAL; + + spin_lock_irqsave(master->lock, flags); + master->parent = clk_mux_index_to_val(master->mux_table, 0, index); + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static int clk_sama7g5_master_enable(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + unsigned int val, cparent; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id)); + regmap_read(master->regmap, PMC_MCR, &val); + regmap_update_bits(master->regmap, PMC_MCR, + PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV | + PMC_MCR_CMD | PMC_MCR_ID_MSK, + PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) | + (master->div << MASTER_DIV_SHIFT) | + PMC_MCR_CMD | PMC_MCR_ID(master->id)); + + cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; + + /* Wait here only if parent is being changed. */ + while ((cparent != master->parent) && !clk_master_ready(master)) + cpu_relax(); + + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static void clk_sama7g5_master_disable(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_update_bits(master->regmap, PMC_MCR, + PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK, + PMC_MCR_CMD | PMC_MCR_ID(master->id)); + + spin_unlock_irqrestore(master->lock, flags); +} + +static int clk_sama7g5_master_is_enabled(struct clk_hw *hw) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(master->lock, flags); + + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_read(master->regmap, PMC_MCR, &val); + + spin_unlock_irqrestore(master->lock, flags); + + return !!(val & PMC_MCR_EN); +} + +static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_master *master = to_clk_master(hw); + unsigned long div, flags; + + div = DIV_ROUND_CLOSEST(parent_rate, rate); + if ((div > (1 << (MASTER_PRES_MAX - 1))) || (div & (div - 1))) + return -EINVAL; + + if (div == 3) + div = MASTER_PRES_MAX; + else + div = ffs(div) - 1; + + spin_lock_irqsave(master->lock, flags); + master->div = div; + spin_unlock_irqrestore(master->lock, flags); + + return 0; +} + +static const struct clk_ops sama7g5_master_ops = { + .enable = clk_sama7g5_master_enable, + .disable = clk_sama7g5_master_disable, + .is_enabled = clk_sama7g5_master_is_enabled, + .recalc_rate = clk_sama7g5_master_recalc_rate, + .determine_rate = clk_sama7g5_master_determine_rate, + .set_rate = clk_sama7g5_master_set_rate, + .get_parent = clk_sama7g5_master_get_parent, + .set_parent = clk_sama7g5_master_set_parent, +}; + +struct clk_hw * __init +at91_clk_sama7g5_register_master(struct regmap *regmap, + const char *name, int num_parents, + const char **parent_names, + u32 *mux_table, + spinlock_t *lock, u8 id, + bool critical, int chg_pid) +{ + struct clk_master *master; + struct clk_hw *hw; + struct clk_init_data init; + unsigned long flags; + unsigned int val; + int ret; + + if (!name || !num_parents || !parent_names || !mux_table || + !lock || id > MASTER_MAX_ID) + return ERR_PTR(-EINVAL); + + master = kzalloc(sizeof(*master), GFP_KERNEL); + if (!master) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &sama7g5_master_ops; + init.parent_names = parent_names; + init.num_parents = num_parents; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; + if (chg_pid >= 0) + init.flags |= CLK_SET_RATE_PARENT; + if (critical) + init.flags |= CLK_IS_CRITICAL; + + master->hw.init = &init; + master->regmap = regmap; + master->id = id; + master->chg_pid = chg_pid; + master->lock = lock; + master->mux_table = mux_table; + + spin_lock_irqsave(master->lock, flags); + regmap_write(master->regmap, PMC_MCR, master->id); + regmap_read(master->regmap, PMC_MCR, &val); + master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT; + master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT; + spin_unlock_irqrestore(master->lock, flags); + + hw = &master->hw; + ret = clk_hw_register(NULL, &master->hw); + if (ret) { + kfree(master); + hw = ERR_PTR(ret); + } + + return hw; +} + const struct clk_master_layout at91rm9200_master_layout = { .mask = 0x31F, .pres_shift = 2, diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c index c2ab4860a2bf..7867eaf0447f 100644 --- a/drivers/clk/at91/clk-peripheral.c +++ b/drivers/clk/at91/clk-peripheral.c @@ -38,6 +38,7 @@ struct clk_sam9x5_peripheral { u32 div; const struct clk_pcr_layout *layout; bool auto_div; + int chg_pid; }; #define to_clk_sam9x5_peripheral(hw) \ @@ -208,7 +209,7 @@ static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw) regmap_read(periph->regmap, periph->layout->offset, &status); spin_unlock_irqrestore(periph->lock, flags); - return status & AT91_PMC_PCR_EN ? 1 : 0; + return !!(status & AT91_PMC_PCR_EN); } static unsigned long @@ -238,6 +239,87 @@ clk_sam9x5_peripheral_recalc_rate(struct clk_hw *hw, return parent_rate >> periph->div; } +static void clk_sam9x5_peripheral_best_diff(struct clk_rate_request *req, + struct clk_hw *parent, + unsigned long parent_rate, + u32 shift, long *best_diff, + long *best_rate) +{ + unsigned long tmp_rate = parent_rate >> shift; + unsigned long tmp_diff = abs(req->rate - tmp_rate); + + if (*best_diff < 0 || *best_diff >= tmp_diff) { + *best_rate = tmp_rate; + *best_diff = tmp_diff; + req->best_parent_rate = parent_rate; + req->best_parent_hw = parent; + } +} + +static int clk_sam9x5_peripheral_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); + struct clk_hw *parent = clk_hw_get_parent(hw); + struct clk_rate_request req_parent = *req; + unsigned long parent_rate = clk_hw_get_rate(parent); + unsigned long tmp_rate; + long best_rate = LONG_MIN; + long best_diff = LONG_MIN; + u32 shift; + + if (periph->id < PERIPHERAL_ID_MIN || !periph->range.max) + return parent_rate; + + /* Fist step: check the available dividers. */ + for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { + tmp_rate = parent_rate >> shift; + + if (periph->range.max && tmp_rate > periph->range.max) + continue; + + clk_sam9x5_peripheral_best_diff(req, parent, parent_rate, + shift, &best_diff, &best_rate); + + if (!best_diff || best_rate <= req->rate) + break; + } + + if (periph->chg_pid < 0) + goto end; + + /* Step two: try to request rate from parent. */ + parent = clk_hw_get_parent_by_index(hw, periph->chg_pid); + if (!parent) + goto end; + + for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) { + req_parent.rate = req->rate << shift; + + if (__clk_determine_rate(parent, &req_parent)) + continue; + + clk_sam9x5_peripheral_best_diff(req, parent, req_parent.rate, + shift, &best_diff, &best_rate); + + if (!best_diff) + break; + } +end: + if (best_rate < 0 || + (periph->range.max && best_rate > periph->range.max)) + return -EINVAL; + + pr_debug("PCK: %s, best_rate = %ld, parent clk: %s @ %ld\n", + __func__, best_rate, + __clk_get_name((req->best_parent_hw)->clk), + req->best_parent_rate); + + req->rate = best_rate; + + return 0; +} + static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) @@ -320,11 +402,21 @@ static const struct clk_ops sam9x5_peripheral_ops = { .set_rate = clk_sam9x5_peripheral_set_rate, }; +static const struct clk_ops sam9x5_peripheral_chg_ops = { + .enable = clk_sam9x5_peripheral_enable, + .disable = clk_sam9x5_peripheral_disable, + .is_enabled = clk_sam9x5_peripheral_is_enabled, + .recalc_rate = clk_sam9x5_peripheral_recalc_rate, + .determine_rate = clk_sam9x5_peripheral_determine_rate, + .set_rate = clk_sam9x5_peripheral_set_rate, +}; + struct clk_hw * __init at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, const struct clk_pcr_layout *layout, const char *name, const char *parent_name, - u32 id, const struct clk_range *range) + u32 id, const struct clk_range *range, + int chg_pid) { struct clk_sam9x5_peripheral *periph; struct clk_init_data init; @@ -339,10 +431,16 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, return ERR_PTR(-ENOMEM); init.name = name; - init.ops = &sam9x5_peripheral_ops; - init.parent_names = (parent_name ? &parent_name : NULL); - init.num_parents = (parent_name ? 1 : 0); - init.flags = 0; + init.parent_names = &parent_name; + init.num_parents = 1; + if (chg_pid < 0) { + init.flags = 0; + init.ops = &sam9x5_peripheral_ops; + } else { + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT; + init.ops = &sam9x5_peripheral_chg_ops; + } periph->id = id; periph->hw.init = &init; @@ -353,6 +451,7 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, periph->auto_div = true; periph->layout = layout; periph->range = *range; + periph->chg_pid = chg_pid; hw = &periph->hw; ret = clk_hw_register(NULL, &periph->hw); diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index 8ee66fbee3d9..fcf8f6a1c2c6 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -21,6 +21,7 @@ struct clk_programmable { struct clk_hw hw; struct regmap *regmap; + u32 *mux_table; u8 id; const struct clk_programmable_layout *layout; }; @@ -108,6 +109,9 @@ static int clk_programmable_set_parent(struct clk_hw *hw, u8 index) if (layout->have_slck_mck) mask |= AT91_PMC_CSSMCK_MCK; + if (prog->mux_table) + pckr = clk_mux_index_to_val(prog->mux_table, 0, index); + if (index > layout->css_mask) { if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck) return -EINVAL; @@ -134,6 +138,9 @@ static u8 clk_programmable_get_parent(struct clk_hw *hw) if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret) ret = PROG_MAX_RM9200_CSS + 1; + if (prog->mux_table) + ret = clk_mux_val_to_index(&prog->hw, prog->mux_table, 0, ret); + return ret; } @@ -182,7 +189,8 @@ struct clk_hw * __init at91_clk_register_programmable(struct regmap *regmap, const char *name, const char **parent_names, u8 num_parents, u8 id, - const struct clk_programmable_layout *layout) + const struct clk_programmable_layout *layout, + u32 *mux_table) { struct clk_programmable *prog; struct clk_hw *hw; @@ -206,6 +214,7 @@ at91_clk_register_programmable(struct regmap *regmap, prog->layout = layout; prog->hw.init = &init; prog->regmap = regmap; + prog->mux_table = mux_table; hw = &prog->hw; ret = clk_hw_register(NULL, &prog->hw); diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c index e699803986e5..b473298ef7e6 100644 --- a/drivers/clk/at91/clk-sam9x60-pll.c +++ b/drivers/clk/at91/clk-sam9x60-pll.c @@ -15,26 +15,41 @@ #include "pmc.h" #define PMC_PLL_CTRL0_DIV_MSK GENMASK(7, 0) -#define PMC_PLL_CTRL1_MUL_MSK GENMASK(30, 24) +#define PMC_PLL_CTRL1_MUL_MSK GENMASK(31, 24) +#define PMC_PLL_CTRL1_FRACR_MSK GENMASK(21, 0) #define PLL_DIV_MAX (FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, UINT_MAX) + 1) #define UPLL_DIV 2 #define PLL_MUL_MAX (FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, UINT_MAX) + 1) -#define PLL_MAX_ID 1 +#define FCORE_MIN (600000000) +#define FCORE_MAX (1200000000) -struct sam9x60_pll { - struct clk_hw hw; +#define PLL_MAX_ID 7 + +struct sam9x60_pll_core { struct regmap *regmap; spinlock_t *lock; const struct clk_pll_characteristics *characteristics; - u32 frac; + const struct clk_pll_layout *layout; + struct clk_hw hw; u8 id; - u8 div; +}; + +struct sam9x60_frac { + struct sam9x60_pll_core core; + u32 frac; u16 mul; }; -#define to_sam9x60_pll(hw) container_of(hw, struct sam9x60_pll, hw) +struct sam9x60_div { + struct sam9x60_pll_core core; + u8 div; +}; + +#define to_sam9x60_pll_core(hw) container_of(hw, struct sam9x60_pll_core, hw) +#define to_sam9x60_frac(core) container_of(core, struct sam9x60_frac, core) +#define to_sam9x60_div(core) container_of(core, struct sam9x60_div, core) static inline bool sam9x60_pll_ready(struct regmap *regmap, int id) { @@ -45,41 +60,53 @@ static inline bool sam9x60_pll_ready(struct regmap *regmap, int id) return !!(status & BIT(id)); } -static int sam9x60_pll_prepare(struct clk_hw *hw) +static bool sam9x60_frac_pll_ready(struct regmap *regmap, u8 id) { - struct sam9x60_pll *pll = to_sam9x60_pll(hw); - struct regmap *regmap = pll->regmap; - unsigned long flags; - u8 div; - u16 mul; - u32 val; + return sam9x60_pll_ready(regmap, id); +} - spin_lock_irqsave(pll->lock, flags); - regmap_write(regmap, AT91_PMC_PLL_UPDT, pll->id); +static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct sam9x60_frac *frac = to_sam9x60_frac(core); - regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); - div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val); + return (parent_rate * (frac->mul + 1) + + ((u64)parent_rate * frac->frac >> 22)); +} +static int sam9x60_frac_pll_prepare(struct clk_hw *hw) +{ + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct sam9x60_frac *frac = to_sam9x60_frac(core); + struct regmap *regmap = core->regmap; + unsigned int val, cfrac, cmul; + unsigned long flags; + + spin_lock_irqsave(core->lock, flags); + + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_ID_MSK, core->id); regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val); - mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val); + cmul = (val & core->layout->mul_mask) >> core->layout->mul_shift; + cfrac = (val & core->layout->frac_mask) >> core->layout->frac_shift; - if (sam9x60_pll_ready(regmap, pll->id) && - (div == pll->div && mul == pll->mul)) { - spin_unlock_irqrestore(pll->lock, flags); - return 0; - } + if (sam9x60_frac_pll_ready(regmap, core->id) && + (cmul == frac->mul && cfrac == frac->frac)) + goto unlock; - /* Recommended value for AT91_PMC_PLL_ACR */ - if (pll->characteristics->upll) + /* Recommended value for PMC_PLL_ACR */ + if (core->characteristics->upll) val = AT91_PMC_PLL_ACR_DEFAULT_UPLL; else val = AT91_PMC_PLL_ACR_DEFAULT_PLLA; regmap_write(regmap, AT91_PMC_PLL_ACR, val); regmap_write(regmap, AT91_PMC_PLL_CTRL1, - FIELD_PREP(PMC_PLL_CTRL1_MUL_MSK, pll->mul)); + (frac->mul << core->layout->mul_shift) | + (frac->frac << core->layout->frac_shift)); - if (pll->characteristics->upll) { + if (core->characteristics->upll) { /* Enable the UTMI internal bandgap */ val |= AT91_PMC_PLL_ACR_UTMIBG; regmap_write(regmap, AT91_PMC_PLL_ACR, val); @@ -94,221 +121,409 @@ static int sam9x60_pll_prepare(struct clk_hw *hw) } regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, - AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); + AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, + AT91_PMC_PLL_UPDT_UPDATE | core->id); - regmap_write(regmap, AT91_PMC_PLL_CTRL0, - AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL | - AT91_PMC_PLL_CTRL0_ENPLLCK | pll->div); + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, + AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL, + AT91_PMC_PLL_CTRL0_ENLOCK | AT91_PMC_PLL_CTRL0_ENPLL); regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, - AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); + AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, + AT91_PMC_PLL_UPDT_UPDATE | core->id); - while (!sam9x60_pll_ready(regmap, pll->id)) + while (!sam9x60_pll_ready(regmap, core->id)) cpu_relax(); - spin_unlock_irqrestore(pll->lock, flags); +unlock: + spin_unlock_irqrestore(core->lock, flags); return 0; } -static int sam9x60_pll_is_prepared(struct clk_hw *hw) +static void sam9x60_frac_pll_unprepare(struct clk_hw *hw) { - struct sam9x60_pll *pll = to_sam9x60_pll(hw); + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct regmap *regmap = core->regmap; + unsigned long flags; + + spin_lock_irqsave(core->lock, flags); - return sam9x60_pll_ready(pll->regmap, pll->id); + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_ID_MSK, core->id); + + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, AT91_PMC_PLL_CTRL0_ENPLL, 0); + + if (core->characteristics->upll) + regmap_update_bits(regmap, AT91_PMC_PLL_ACR, + AT91_PMC_PLL_ACR_UTMIBG | AT91_PMC_PLL_ACR_UTMIVR, 0); + + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, + AT91_PMC_PLL_UPDT_UPDATE | core->id); + + spin_unlock_irqrestore(core->lock, flags); } -static void sam9x60_pll_unprepare(struct clk_hw *hw) +static int sam9x60_frac_pll_is_prepared(struct clk_hw *hw) { - struct sam9x60_pll *pll = to_sam9x60_pll(hw); - unsigned long flags; + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); - spin_lock_irqsave(pll->lock, flags); + return sam9x60_pll_ready(core->regmap, core->id); +} - regmap_write(pll->regmap, AT91_PMC_PLL_UPDT, pll->id); +static long sam9x60_frac_pll_compute_mul_frac(struct sam9x60_pll_core *core, + unsigned long rate, + unsigned long parent_rate, + bool update) +{ + struct sam9x60_frac *frac = to_sam9x60_frac(core); + unsigned long tmprate, remainder; + unsigned long nmul = 0; + unsigned long nfrac = 0; - regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0, - AT91_PMC_PLL_CTRL0_ENPLLCK, 0); + if (rate < FCORE_MIN || rate > FCORE_MAX) + return -ERANGE; - regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT, - AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); + /* + * Calculate the multiplier associated with the current + * divider that provide the closest rate to the requested one. + */ + nmul = mult_frac(rate, 1, parent_rate); + tmprate = mult_frac(parent_rate, nmul, 1); + remainder = rate - tmprate; - regmap_update_bits(pll->regmap, AT91_PMC_PLL_CTRL0, - AT91_PMC_PLL_CTRL0_ENPLL, 0); + if (remainder) { + nfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * (1 << 22), + parent_rate); - if (pll->characteristics->upll) - regmap_update_bits(pll->regmap, AT91_PMC_PLL_ACR, - AT91_PMC_PLL_ACR_UTMIBG | - AT91_PMC_PLL_ACR_UTMIVR, 0); + tmprate += DIV_ROUND_CLOSEST_ULL((u64)nfrac * parent_rate, + (1 << 22)); + } - regmap_update_bits(pll->regmap, AT91_PMC_PLL_UPDT, - AT91_PMC_PLL_UPDT_UPDATE, AT91_PMC_PLL_UPDT_UPDATE); + /* Check if resulted rate is a valid. */ + if (tmprate < FCORE_MIN || tmprate > FCORE_MAX) + return -ERANGE; - spin_unlock_irqrestore(pll->lock, flags); + if (update) { + frac->mul = nmul - 1; + frac->frac = nfrac; + } + + return tmprate; } -static unsigned long sam9x60_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +static long sam9x60_frac_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) { - struct sam9x60_pll *pll = to_sam9x60_pll(hw); + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); - return (parent_rate * (pll->mul + 1)) / (pll->div + 1); + return sam9x60_frac_pll_compute_mul_frac(core, rate, *parent_rate, false); } -static long sam9x60_pll_get_best_div_mul(struct sam9x60_pll *pll, - unsigned long rate, - unsigned long parent_rate, - bool update) +static int sam9x60_frac_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { - const struct clk_pll_characteristics *characteristics = - pll->characteristics; - unsigned long bestremainder = ULONG_MAX; - unsigned long maxdiv, mindiv, tmpdiv; - long bestrate = -ERANGE; - unsigned long bestdiv = 0; - unsigned long bestmul = 0; - unsigned long bestfrac = 0; + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); - if (rate < characteristics->output[0].min || - rate > characteristics->output[0].max) - return -ERANGE; + return sam9x60_frac_pll_compute_mul_frac(core, rate, parent_rate, true); +} - if (!pll->characteristics->upll) { - mindiv = parent_rate / rate; - if (mindiv < 2) - mindiv = 2; +static const struct clk_ops sam9x60_frac_pll_ops = { + .prepare = sam9x60_frac_pll_prepare, + .unprepare = sam9x60_frac_pll_unprepare, + .is_prepared = sam9x60_frac_pll_is_prepared, + .recalc_rate = sam9x60_frac_pll_recalc_rate, + .round_rate = sam9x60_frac_pll_round_rate, + .set_rate = sam9x60_frac_pll_set_rate, +}; - maxdiv = DIV_ROUND_UP(parent_rate * PLL_MUL_MAX, rate); - if (maxdiv > PLL_DIV_MAX) - maxdiv = PLL_DIV_MAX; - } else { - mindiv = maxdiv = UPLL_DIV; - } +static int sam9x60_div_pll_prepare(struct clk_hw *hw) +{ + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct sam9x60_div *div = to_sam9x60_div(core); + struct regmap *regmap = core->regmap; + unsigned long flags; + unsigned int val, cdiv; - for (tmpdiv = mindiv; tmpdiv <= maxdiv; tmpdiv++) { - unsigned long remainder; - unsigned long tmprate; - unsigned long tmpmul; - unsigned long tmpfrac = 0; + spin_lock_irqsave(core->lock, flags); + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_ID_MSK, core->id); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); + cdiv = (val & core->layout->div_mask) >> core->layout->div_shift; - /* - * Calculate the multiplier associated with the current - * divider that provide the closest rate to the requested one. - */ - tmpmul = mult_frac(rate, tmpdiv, parent_rate); - tmprate = mult_frac(parent_rate, tmpmul, tmpdiv); - remainder = rate - tmprate; + /* Stop if enabled an nothing changed. */ + if (!!(val & core->layout->endiv_mask) && cdiv == div->div) + goto unlock; - if (remainder) { - tmpfrac = DIV_ROUND_CLOSEST_ULL((u64)remainder * tmpdiv * (1 << 22), - parent_rate); + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, + core->layout->div_mask | core->layout->endiv_mask, + (div->div << core->layout->div_shift) | + (1 << core->layout->endiv_shift)); - tmprate += DIV_ROUND_CLOSEST_ULL((u64)tmpfrac * parent_rate, - tmpdiv * (1 << 22)); + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, + AT91_PMC_PLL_UPDT_UPDATE | core->id); - if (tmprate > rate) - remainder = tmprate - rate; - else - remainder = rate - tmprate; - } + while (!sam9x60_pll_ready(regmap, core->id)) + cpu_relax(); - /* - * Compare the remainder with the best remainder found until - * now and elect a new best multiplier/divider pair if the - * current remainder is smaller than the best one. - */ - if (remainder < bestremainder) { - bestremainder = remainder; - bestdiv = tmpdiv; - bestmul = tmpmul; - bestrate = tmprate; - bestfrac = tmpfrac; +unlock: + spin_unlock_irqrestore(core->lock, flags); + + return 0; +} + +static void sam9x60_div_pll_unprepare(struct clk_hw *hw) +{ + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct regmap *regmap = core->regmap; + unsigned long flags; + + spin_lock_irqsave(core->lock, flags); + + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_ID_MSK, core->id); + + regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0, + core->layout->endiv_mask, 0); + + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK, + AT91_PMC_PLL_UPDT_UPDATE | core->id); + + spin_unlock_irqrestore(core->lock, flags); +} + +static int sam9x60_div_pll_is_prepared(struct clk_hw *hw) +{ + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct regmap *regmap = core->regmap; + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(core->lock, flags); + + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_ID_MSK, core->id); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); + + spin_unlock_irqrestore(core->lock, flags); + + return !!(val & core->layout->endiv_mask); +} + +static unsigned long sam9x60_div_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct sam9x60_div *div = to_sam9x60_div(core); + + return DIV_ROUND_CLOSEST_ULL(parent_rate, (div->div + 1)); +} + +static long sam9x60_div_pll_compute_div(struct sam9x60_pll_core *core, + unsigned long *parent_rate, + unsigned long rate) +{ + const struct clk_pll_characteristics *characteristics = + core->characteristics; + struct clk_hw *parent = clk_hw_get_parent(&core->hw); + unsigned long tmp_rate, tmp_parent_rate, tmp_diff; + long best_diff = -1, best_rate = -EINVAL; + u32 divid, best_div; + + if (!rate) + return 0; + + if (rate < characteristics->output[0].min || + rate > characteristics->output[0].max) + return -ERANGE; + + for (divid = 1; divid < core->layout->div_mask; divid++) { + tmp_parent_rate = clk_hw_round_rate(parent, rate * divid); + if (!tmp_parent_rate) + continue; + + tmp_rate = DIV_ROUND_CLOSEST_ULL(tmp_parent_rate, divid); + tmp_diff = abs(rate - tmp_rate); + + if (best_diff < 0 || best_diff > tmp_diff) { + *parent_rate = tmp_parent_rate; + best_rate = tmp_rate; + best_diff = tmp_diff; + best_div = divid; } - /* We've found a perfect match! */ - if (!remainder) + if (!best_diff) break; } - /* Check if bestrate is a valid output rate */ - if (bestrate < characteristics->output[0].min && - bestrate > characteristics->output[0].max) + if (best_rate < characteristics->output[0].min || + best_rate > characteristics->output[0].max) return -ERANGE; - if (update) { - pll->div = bestdiv - 1; - pll->mul = bestmul - 1; - pll->frac = bestfrac; - } - - return bestrate; + return best_rate; } -static long sam9x60_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static long sam9x60_div_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) { - struct sam9x60_pll *pll = to_sam9x60_pll(hw); + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); - return sam9x60_pll_get_best_div_mul(pll, rate, *parent_rate, false); + return sam9x60_div_pll_compute_div(core, parent_rate, rate); } -static int sam9x60_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) +static int sam9x60_div_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { - struct sam9x60_pll *pll = to_sam9x60_pll(hw); + struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw); + struct sam9x60_div *div = to_sam9x60_div(core); + + div->div = DIV_ROUND_CLOSEST(parent_rate, rate) - 1; - return sam9x60_pll_get_best_div_mul(pll, rate, parent_rate, true); + return 0; } -static const struct clk_ops pll_ops = { - .prepare = sam9x60_pll_prepare, - .unprepare = sam9x60_pll_unprepare, - .is_prepared = sam9x60_pll_is_prepared, - .recalc_rate = sam9x60_pll_recalc_rate, - .round_rate = sam9x60_pll_round_rate, - .set_rate = sam9x60_pll_set_rate, +static const struct clk_ops sam9x60_div_pll_ops = { + .prepare = sam9x60_div_pll_prepare, + .unprepare = sam9x60_div_pll_unprepare, + .is_prepared = sam9x60_div_pll_is_prepared, + .recalc_rate = sam9x60_div_pll_recalc_rate, + .round_rate = sam9x60_div_pll_round_rate, + .set_rate = sam9x60_div_pll_set_rate, }; struct clk_hw * __init -sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock, - const char *name, const char *parent_name, u8 id, - const struct clk_pll_characteristics *characteristics) +sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock, + const char *name, const char *parent_name, + struct clk_hw *parent_hw, u8 id, + const struct clk_pll_characteristics *characteristics, + const struct clk_pll_layout *layout, bool critical) { - struct sam9x60_pll *pll; + struct sam9x60_frac *frac; struct clk_hw *hw; struct clk_init_data init; - unsigned int pllr; + unsigned long parent_rate, flags; + unsigned int val; int ret; - if (id > PLL_MAX_ID) + if (id > PLL_MAX_ID || !lock || !parent_hw) return ERR_PTR(-EINVAL); - pll = kzalloc(sizeof(*pll), GFP_KERNEL); - if (!pll) + frac = kzalloc(sizeof(*frac), GFP_KERNEL); + if (!frac) return ERR_PTR(-ENOMEM); init.name = name; - init.ops = &pll_ops; init.parent_names = &parent_name; init.num_parents = 1; + init.ops = &sam9x60_frac_pll_ops; init.flags = CLK_SET_RATE_GATE; + if (critical) + init.flags |= CLK_IS_CRITICAL; + + frac->core.id = id; + frac->core.hw.init = &init; + frac->core.characteristics = characteristics; + frac->core.layout = layout; + frac->core.regmap = regmap; + frac->core.lock = lock; + + spin_lock_irqsave(frac->core.lock, flags); + if (sam9x60_pll_ready(regmap, id)) { + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_ID_MSK, id); + regmap_read(regmap, AT91_PMC_PLL_CTRL1, &val); + frac->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, val); + frac->frac = FIELD_GET(PMC_PLL_CTRL1_FRACR_MSK, val); + } else { + /* + * This means the PLL is not setup by bootloaders. In this + * case we need to set the minimum rate for it. Otherwise + * a clock child of this PLL may be enabled before setting + * its rate leading to enabling this PLL with unsupported + * rate. This will lead to PLL not being locked at all. + */ + parent_rate = clk_hw_get_rate(parent_hw); + if (!parent_rate) { + hw = ERR_PTR(-EINVAL); + goto free; + } + + ret = sam9x60_frac_pll_compute_mul_frac(&frac->core, FCORE_MIN, + parent_rate, true); + if (ret <= 0) { + hw = ERR_PTR(ret); + goto free; + } + } + spin_unlock_irqrestore(frac->core.lock, flags); + + hw = &frac->core.hw; + ret = clk_hw_register(NULL, hw); + if (ret) { + kfree(frac); + hw = ERR_PTR(ret); + } - pll->id = id; - pll->hw.init = &init; - pll->characteristics = characteristics; - pll->regmap = regmap; - pll->lock = lock; + return hw; + +free: + spin_unlock_irqrestore(frac->core.lock, flags); + kfree(frac); + return hw; +} + +struct clk_hw * __init +sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock, + const char *name, const char *parent_name, u8 id, + const struct clk_pll_characteristics *characteristics, + const struct clk_pll_layout *layout, bool critical) +{ + struct sam9x60_div *div; + struct clk_hw *hw; + struct clk_init_data init; + unsigned long flags; + unsigned int val; + int ret; + + if (id > PLL_MAX_ID || !lock) + return ERR_PTR(-EINVAL); + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.parent_names = &parent_name; + init.num_parents = 1; + init.ops = &sam9x60_div_pll_ops; + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | + CLK_SET_RATE_PARENT; + if (critical) + init.flags |= CLK_IS_CRITICAL; + + div->core.id = id; + div->core.hw.init = &init; + div->core.characteristics = characteristics; + div->core.layout = layout; + div->core.regmap = regmap; + div->core.lock = lock; + + spin_lock_irqsave(div->core.lock, flags); + + regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, + AT91_PMC_PLL_UPDT_ID_MSK, id); + regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val); + div->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, val); - regmap_write(regmap, AT91_PMC_PLL_UPDT, id); - regmap_read(regmap, AT91_PMC_PLL_CTRL0, &pllr); - pll->div = FIELD_GET(PMC_PLL_CTRL0_DIV_MSK, pllr); - regmap_read(regmap, AT91_PMC_PLL_CTRL1, &pllr); - pll->mul = FIELD_GET(PMC_PLL_CTRL1_MUL_MSK, pllr); + spin_unlock_irqrestore(div->core.lock, flags); - hw = &pll->hw; + hw = &div->core.hw; ret = clk_hw_register(NULL, hw); if (ret) { - kfree(pll); + kfree(div); hw = ERR_PTR(ret); } diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c index c4b3877aa445..f83ec0de86c3 100644 --- a/drivers/clk/at91/clk-system.c +++ b/drivers/clk/at91/clk-system.c @@ -34,7 +34,7 @@ static inline bool clk_system_ready(struct regmap *regmap, int id) regmap_read(regmap, AT91_PMC_SR, &status); - return status & (1 << id) ? 1 : 0; + return !!(status & (1 << id)); } static int clk_system_prepare(struct clk_hw *hw) @@ -74,7 +74,7 @@ static int clk_system_is_prepared(struct clk_hw *hw) regmap_read(sys->regmap, AT91_PMC_SR, &status); - return status & (1 << sys->id) ? 1 : 0; + return !!(status & (1 << sys->id)); } static const struct clk_ops system_ops = { diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c index f1ef4e1f41a9..df9f3fc3b6a6 100644 --- a/drivers/clk/at91/clk-utmi.c +++ b/drivers/clk/at91/clk-utmi.c @@ -120,9 +120,11 @@ static const struct clk_ops utmi_ops = { .recalc_rate = clk_utmi_recalc_rate, }; -struct clk_hw * __init -at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, - const char *name, const char *parent_name) +static struct clk_hw * __init +at91_clk_register_utmi_internal(struct regmap *regmap_pmc, + struct regmap *regmap_sfr, + const char *name, const char *parent_name, + const struct clk_ops *ops, unsigned long flags) { struct clk_utmi *utmi; struct clk_hw *hw; @@ -134,10 +136,10 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, return ERR_PTR(-ENOMEM); init.name = name; - init.ops = &utmi_ops; + init.ops = ops; init.parent_names = parent_name ? &parent_name : NULL; init.num_parents = parent_name ? 1 : 0; - init.flags = CLK_SET_RATE_GATE; + init.flags = flags; utmi->hw.init = &init; utmi->regmap_pmc = regmap_pmc; @@ -152,3 +154,94 @@ at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, return hw; } + +struct clk_hw * __init +at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, + const char *name, const char *parent_name) +{ + return at91_clk_register_utmi_internal(regmap_pmc, regmap_sfr, name, + parent_name, &utmi_ops, CLK_SET_RATE_GATE); +} + +static int clk_utmi_sama7g5_prepare(struct clk_hw *hw) +{ + struct clk_utmi *utmi = to_clk_utmi(hw); + struct clk_hw *hw_parent; + unsigned long parent_rate; + unsigned int val; + + hw_parent = clk_hw_get_parent(hw); + parent_rate = clk_hw_get_rate(hw_parent); + + switch (parent_rate) { + case 16000000: + val = 0; + break; + case 20000000: + val = 2; + break; + case 24000000: + val = 3; + break; + case 32000000: + val = 5; + break; + default: + pr_err("UTMICK: unsupported main_xtal rate\n"); + return -EINVAL; + } + + regmap_write(utmi->regmap_pmc, AT91_PMC_XTALF, val); + + return 0; + +} + +static int clk_utmi_sama7g5_is_prepared(struct clk_hw *hw) +{ + struct clk_utmi *utmi = to_clk_utmi(hw); + struct clk_hw *hw_parent; + unsigned long parent_rate; + unsigned int val; + + hw_parent = clk_hw_get_parent(hw); + parent_rate = clk_hw_get_rate(hw_parent); + + regmap_read(utmi->regmap_pmc, AT91_PMC_XTALF, &val); + switch (val & 0x7) { + case 0: + if (parent_rate == 16000000) + return 1; + break; + case 2: + if (parent_rate == 20000000) + return 1; + break; + case 3: + if (parent_rate == 24000000) + return 1; + break; + case 5: + if (parent_rate == 32000000) + return 1; + break; + default: + break; + } + + return 0; +} + +static const struct clk_ops sama7g5_utmi_ops = { + .prepare = clk_utmi_sama7g5_prepare, + .is_prepared = clk_utmi_sama7g5_is_prepared, + .recalc_rate = clk_utmi_recalc_rate, +}; + +struct clk_hw * __init +at91_clk_sama7g5_register_utmi(struct regmap *regmap_pmc, const char *name, + const char *parent_name) +{ + return at91_clk_register_utmi_internal(regmap_pmc, NULL, name, + parent_name, &sama7g5_utmi_ops, 0); +} diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c index aa1754eac59f..a50084de97d4 100644 --- a/drivers/clk/at91/dt-compat.c +++ b/drivers/clk/at91/dt-compat.c @@ -22,6 +22,8 @@ #define SYSTEM_MAX_ID 31 +#define GCK_INDEX_DT_AUDIO_PLL 5 + #ifdef CONFIG_HAVE_AT91_AUDIO_PLL static void __init of_sama5d2_clk_audio_pll_frac_setup(struct device_node *np) { @@ -135,7 +137,7 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np) return; for_each_child_of_node(np, gcknp) { - bool pll_audio = false; + int chg_pid = INT_MIN; if (of_property_read_u32(gcknp, "reg", &id)) continue; @@ -152,12 +154,13 @@ static void __init of_sama5d2_clk_generated_setup(struct device_node *np) if (of_device_is_compatible(np, "atmel,sama5d2-clk-generated") && (id == GCK_ID_I2S0 || id == GCK_ID_I2S1 || id == GCK_ID_CLASSD)) - pll_audio = true; + chg_pid = GCK_INDEX_DT_AUDIO_PLL; hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, &dt_pcr_layout, name, - parent_names, num_parents, - id, pll_audio, &range); + parent_names, NULL, + num_parents, id, &range, + chg_pid); if (IS_ERR(hw)) continue; @@ -460,7 +463,8 @@ of_at91_clk_periph_setup(struct device_node *np, u8 type) &dt_pcr_layout, name, parent_name, - id, &range); + id, &range, + INT_MIN); } if (IS_ERR(hw)) @@ -673,7 +677,8 @@ CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv", static void __init of_at91_clk_prog_setup(struct device_node *np, - const struct clk_programmable_layout *layout) + const struct clk_programmable_layout *layout, + u32 *mux_table) { int num; u32 id; @@ -707,7 +712,7 @@ of_at91_clk_prog_setup(struct device_node *np, hw = at91_clk_register_programmable(regmap, name, parent_names, num_parents, - id, layout); + id, layout, mux_table); if (IS_ERR(hw)) continue; @@ -717,21 +722,21 @@ of_at91_clk_prog_setup(struct device_node *np, static void __init of_at91rm9200_clk_prog_setup(struct device_node *np) { - of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout); + of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout, NULL); } CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable", of_at91rm9200_clk_prog_setup); static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np) { - of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout); + of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout, NULL); } CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable", of_at91sam9g45_clk_prog_setup); static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np) { - of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout); + of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout, NULL); } CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable", of_at91sam9x5_clk_prog_setup); diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h index df616f2937e7..7b86affc6d7c 100644 --- a/drivers/clk/at91/pmc.h +++ b/drivers/clk/at91/pmc.h @@ -54,8 +54,14 @@ struct clk_master_characteristics { struct clk_pll_layout { u32 pllr_mask; - u16 mul_mask; + u32 mul_mask; + u32 frac_mask; + u32 div_mask; + u32 endiv_mask; u8 mul_shift; + u8 frac_shift; + u8 div_shift; + u8 endiv_shift; }; extern const struct clk_pll_layout at91rm9200_pll_layout; @@ -122,8 +128,8 @@ struct clk_hw * __init at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const struct clk_pcr_layout *layout, const char *name, const char **parent_names, - u8 num_parents, u8 id, bool pll_audio, - const struct clk_range *range); + u32 *mux_table, u8 num_parents, u8 id, + const struct clk_range *range, int chg_pid); struct clk_hw * __init at91_clk_register_h32mx(struct regmap *regmap, const char *name, @@ -155,13 +161,21 @@ at91_clk_register_master(struct regmap *regmap, const char *name, const struct clk_master_characteristics *characteristics); struct clk_hw * __init +at91_clk_sama7g5_register_master(struct regmap *regmap, + const char *name, int num_parents, + const char **parent_names, u32 *mux_table, + spinlock_t *lock, u8 id, bool critical, + int chg_pid); + +struct clk_hw * __init at91_clk_register_peripheral(struct regmap *regmap, const char *name, const char *parent_name, u32 id); struct clk_hw * __init at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, const struct clk_pcr_layout *layout, const char *name, const char *parent_name, - u32 id, const struct clk_range *range); + u32 id, const struct clk_range *range, + int chg_pid); struct clk_hw * __init at91_clk_register_pll(struct regmap *regmap, const char *name, @@ -173,14 +187,23 @@ at91_clk_register_plldiv(struct regmap *regmap, const char *name, const char *parent_name); struct clk_hw * __init -sam9x60_clk_register_pll(struct regmap *regmap, spinlock_t *lock, - const char *name, const char *parent_name, u8 id, - const struct clk_pll_characteristics *characteristics); +sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock, + const char *name, const char *parent_name, u8 id, + const struct clk_pll_characteristics *characteristics, + const struct clk_pll_layout *layout, bool critical); + +struct clk_hw * __init +sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock, + const char *name, const char *parent_name, + struct clk_hw *parent_hw, u8 id, + const struct clk_pll_characteristics *characteristics, + const struct clk_pll_layout *layout, bool critical); struct clk_hw * __init at91_clk_register_programmable(struct regmap *regmap, const char *name, const char **parent_names, u8 num_parents, u8 id, - const struct clk_programmable_layout *layout); + const struct clk_programmable_layout *layout, + u32 *mux_table); struct clk_hw * __init at91_clk_register_sam9260_slow(struct regmap *regmap, @@ -213,6 +236,10 @@ struct clk_hw * __init at91_clk_register_utmi(struct regmap *regmap_pmc, struct regmap *regmap_sfr, const char *name, const char *parent_name); +struct clk_hw * __init +at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name, + const char *parent_name); + #ifdef CONFIG_PM void pmc_register_id(u8 id); void pmc_register_pck(u8 pck); diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index 3e20aa68259f..ab6318c0589e 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -22,7 +22,7 @@ static const struct clk_master_layout sam9x60_master_layout = { }; static const struct clk_range plla_outputs[] = { - { .min = 300000000, .max = 600000000 }, + { .min = 2343750, .max = 1200000000 }, }; static const struct clk_pll_characteristics plla_characteristics = { @@ -42,6 +42,20 @@ static const struct clk_pll_characteristics upll_characteristics = { .upll = true, }; +static const struct clk_pll_layout pll_frac_layout = { + .mul_mask = GENMASK(31, 24), + .frac_mask = GENMASK(21, 0), + .mul_shift = 24, + .frac_shift = 0, +}; + +static const struct clk_pll_layout pll_div_layout = { + .div_mask = GENMASK(7, 0), + .endiv_mask = BIT(29), + .div_shift = 0, + .endiv_shift = 29, +}; + static const struct clk_programmable_layout sam9x60_programmable_layout = { .pres_mask = 0xff, .pres_shift = 8, @@ -156,6 +170,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) const char *td_slck_name, *md_slck_name, *mainxtal_name; struct pmc_data *sam9x60_pmc; const char *parent_names[6]; + struct clk_hw *main_osc_hw; struct regmap *regmap; struct clk_hw *hw; int i; @@ -178,7 +193,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; @@ -189,7 +204,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) if (!sam9x60_pmc) return; - hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 24000000, + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, 50000000); if (IS_ERR(hw)) goto err_free; @@ -200,6 +215,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) bypass); if (IS_ERR(hw)) goto err_free; + main_osc_hw = hw; parent_names[0] = "main_rc_osc"; parent_names[1] = "main_osc"; @@ -209,15 +225,31 @@ static void __init sam9x60_pmc_setup(struct device_node *np) sam9x60_pmc->chws[PMC_MAIN] = hw; - hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "pllack", - "mainck", 0, &plla_characteristics); + hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "pllack_fracck", + "mainck", sam9x60_pmc->chws[PMC_MAIN], + 0, &plla_characteristics, + &pll_frac_layout, true); + if (IS_ERR(hw)) + goto err_free; + + hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "pllack_divck", + "pllack_fracck", 0, &plla_characteristics, + &pll_div_layout, true); if (IS_ERR(hw)) goto err_free; sam9x60_pmc->chws[PMC_PLLACK] = hw; - hw = sam9x60_clk_register_pll(regmap, &pmc_pll_lock, "upllck", - "main_osc", 1, &upll_characteristics); + hw = sam9x60_clk_register_frac_pll(regmap, &pmc_pll_lock, "upllck_fracck", + "main_osc", main_osc_hw, 1, + &upll_characteristics, + &pll_frac_layout, false); + if (IS_ERR(hw)) + goto err_free; + + hw = sam9x60_clk_register_div_pll(regmap, &pmc_pll_lock, "upllck_divck", + "upllck_fracck", 1, &upll_characteristics, + &pll_div_layout, false); if (IS_ERR(hw)) goto err_free; @@ -225,7 +257,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) parent_names[0] = md_slck_name; parent_names[1] = "mainck"; - parent_names[2] = "pllack"; + parent_names[2] = "pllack_divck"; hw = at91_clk_register_master(regmap, "masterck", 3, parent_names, &sam9x60_master_layout, &mck_characteristics); @@ -234,8 +266,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np) sam9x60_pmc->chws[PMC_MCK] = hw; - parent_names[0] = "pllack"; - parent_names[1] = "upllck"; + parent_names[0] = "pllack_divck"; + parent_names[1] = "upllck_divck"; parent_names[2] = "main_osc"; hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3); if (IS_ERR(hw)) @@ -245,8 +277,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np) parent_names[1] = td_slck_name; parent_names[2] = "mainck"; parent_names[3] = "masterck"; - parent_names[4] = "pllack"; - parent_names[5] = "upllck"; + parent_names[4] = "pllack_divck"; + parent_names[5] = "upllck_divck"; for (i = 0; i < 8; i++) { char name[6]; @@ -254,7 +286,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 6, i, - &sam9x60_programmable_layout); + &sam9x60_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; @@ -277,7 +310,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) sam9x60_periphck[i].n, "masterck", sam9x60_periphck[i].id, - &range); + &range, INT_MIN); if (IS_ERR(hw)) goto err_free; @@ -288,10 +321,9 @@ static void __init sam9x60_pmc_setup(struct device_node *np) hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, &sam9x60_pcr_layout, sam9x60_gck[i].n, - parent_names, 6, + parent_names, NULL, 6, sam9x60_gck[i].id, - false, - &sam9x60_gck[i].r); + &sam9x60_gck[i].r, INT_MIN); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index d69421d71daf..8b220762941a 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c @@ -116,21 +116,20 @@ static const struct { char *n; u8 id; struct clk_range r; - bool pll; + int chg_pid; } sama5d2_gck[] = { - { .n = "sdmmc0_gclk", .id = 31, }, - { .n = "sdmmc1_gclk", .id = 32, }, - { .n = "tcb0_gclk", .id = 35, .r = { .min = 0, .max = 83000000 }, }, - { .n = "tcb1_gclk", .id = 36, .r = { .min = 0, .max = 83000000 }, }, - { .n = "pwm_gclk", .id = 38, .r = { .min = 0, .max = 83000000 }, }, - { .n = "isc_gclk", .id = 46, }, - { .n = "pdmic_gclk", .id = 48, }, - { .n = "i2s0_gclk", .id = 54, .pll = true }, - { .n = "i2s1_gclk", .id = 55, .pll = true }, - { .n = "can0_gclk", .id = 56, .r = { .min = 0, .max = 80000000 }, }, - { .n = "can1_gclk", .id = 57, .r = { .min = 0, .max = 80000000 }, }, - { .n = "classd_gclk", .id = 59, .r = { .min = 0, .max = 100000000 }, - .pll = true }, + { .n = "sdmmc0_gclk", .id = 31, .chg_pid = INT_MIN, }, + { .n = "sdmmc1_gclk", .id = 32, .chg_pid = INT_MIN, }, + { .n = "tcb0_gclk", .id = 35, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, }, + { .n = "tcb1_gclk", .id = 36, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, }, + { .n = "pwm_gclk", .id = 38, .chg_pid = INT_MIN, .r = { .min = 0, .max = 83000000 }, }, + { .n = "isc_gclk", .id = 46, .chg_pid = INT_MIN, }, + { .n = "pdmic_gclk", .id = 48, .chg_pid = INT_MIN, }, + { .n = "i2s0_gclk", .id = 54, .chg_pid = 5, }, + { .n = "i2s1_gclk", .id = 55, .chg_pid = 5, }, + { .n = "can0_gclk", .id = 56, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, }, + { .n = "can1_gclk", .id = 57, .chg_pid = INT_MIN, .r = { .min = 0, .max = 80000000 }, }, + { .n = "classd_gclk", .id = 59, .chg_pid = 5, .r = { .min = 0, .max = 100000000 }, }, }; static const struct clk_programmable_layout sama5d2_programmable_layout = { @@ -269,7 +268,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 6, i, - &sama5d2_programmable_layout); + &sama5d2_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; @@ -292,7 +292,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) sama5d2_periphck[i].n, "masterck", sama5d2_periphck[i].id, - &range); + &range, INT_MIN); if (IS_ERR(hw)) goto err_free; @@ -305,7 +305,8 @@ static void __init sama5d2_pmc_setup(struct device_node *np) sama5d2_periph32ck[i].n, "h32mxck", sama5d2_periph32ck[i].id, - &sama5d2_periph32ck[i].r); + &sama5d2_periph32ck[i].r, + INT_MIN); if (IS_ERR(hw)) goto err_free; @@ -322,10 +323,10 @@ static void __init sama5d2_pmc_setup(struct device_node *np) hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, &sama5d2_pcr_layout, sama5d2_gck[i].n, - parent_names, 6, + parent_names, NULL, 6, sama5d2_gck[i].id, - sama5d2_gck[i].pll, - &sama5d2_gck[i].r); + &sama5d2_gck[i].r, + sama5d2_gck[i].chg_pid); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c index 5e4e44dd4c37..7c6e0a5b9dc8 100644 --- a/drivers/clk/at91/sama5d3.c +++ b/drivers/clk/at91/sama5d3.c @@ -121,7 +121,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np) return; mainxtal_name = of_clk_get_parent_name(np, i); - regmap = syscon_node_to_regmap(np); + regmap = device_node_to_regmap(np); if (IS_ERR(regmap)) return; @@ -200,7 +200,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 5, i, - &at91sam9x5_programmable_layout); + &at91sam9x5_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; @@ -223,7 +224,8 @@ static void __init sama5d3_pmc_setup(struct device_node *np) sama5d3_periphck[i].n, "masterck", sama5d3_periphck[i].id, - &sama5d3_periphck[i].r); + &sama5d3_periphck[i].r, + INT_MIN); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index 662ff5fa6e98..92d8d4141b43 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c @@ -223,7 +223,8 @@ static void __init sama5d4_pmc_setup(struct device_node *np) hw = at91_clk_register_programmable(regmap, name, parent_names, 5, i, - &at91sam9x5_programmable_layout); + &at91sam9x5_programmable_layout, + NULL); if (IS_ERR(hw)) goto err_free; @@ -246,7 +247,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) sama5d4_periphck[i].n, "masterck", sama5d4_periphck[i].id, - &range); + &range, INT_MIN); if (IS_ERR(hw)) goto err_free; @@ -259,7 +260,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) sama5d4_periph32ck[i].n, "h32mxck", sama5d4_periph32ck[i].id, - &range); + &range, INT_MIN); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c new file mode 100644 index 000000000000..0db2ab3eca14 --- /dev/null +++ b/drivers/clk/at91/sama7g5.c @@ -0,0 +1,1059 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SAMA7G5 PMC code. + * + * Copyright (C) 2020 Microchip Technology Inc. and its subsidiaries + * + * Author: Claudiu Beznea <claudiu.beznea@microchip.com> + * + */ +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +#define SAMA7G5_INIT_TABLE(_table, _count) \ + do { \ + u8 _i; \ + for (_i = 0; _i < (_count); _i++) \ + (_table)[_i] = _i; \ + } while (0) + +#define SAMA7G5_FILL_TABLE(_to, _from, _count) \ + do { \ + u8 _i; \ + for (_i = 0; _i < (_count); _i++) { \ + (_to)[_i] = (_from)[_i]; \ + } \ + } while (0) + +static DEFINE_SPINLOCK(pmc_pll_lock); +static DEFINE_SPINLOCK(pmc_mckX_lock); + +/** + * PLL clocks identifiers + * @PLL_ID_CPU: CPU PLL identifier + * @PLL_ID_SYS: System PLL identifier + * @PLL_ID_DDR: DDR PLL identifier + * @PLL_ID_IMG: Image subsystem PLL identifier + * @PLL_ID_BAUD: Baud PLL identifier + * @PLL_ID_AUDIO: Audio PLL identifier + * @PLL_ID_ETH: Ethernet PLL identifier + */ +enum pll_ids { + PLL_ID_CPU, + PLL_ID_SYS, + PLL_ID_DDR, + PLL_ID_IMG, + PLL_ID_BAUD, + PLL_ID_AUDIO, + PLL_ID_ETH, + PLL_ID_MAX, +}; + +/** + * PLL type identifiers + * @PLL_TYPE_FRAC: fractional PLL identifier + * @PLL_TYPE_DIV: divider PLL identifier + */ +enum pll_type { + PLL_TYPE_FRAC, + PLL_TYPE_DIV, +}; + +/* Layout for fractional PLLs. */ +static const struct clk_pll_layout pll_layout_frac = { + .mul_mask = GENMASK(31, 24), + .frac_mask = GENMASK(21, 0), + .mul_shift = 24, + .frac_shift = 0, +}; + +/* Layout for DIVPMC dividers. */ +static const struct clk_pll_layout pll_layout_divpmc = { + .div_mask = GENMASK(7, 0), + .endiv_mask = BIT(29), + .div_shift = 0, + .endiv_shift = 29, +}; + +/* Layout for DIVIO dividers. */ +static const struct clk_pll_layout pll_layout_divio = { + .div_mask = GENMASK(19, 12), + .endiv_mask = BIT(30), + .div_shift = 12, + .endiv_shift = 30, +}; + +/** + * PLL clocks description + * @n: clock name + * @p: clock parent + * @l: clock layout + * @t: clock type + * @f: true if clock is critical and cannot be disabled + * @eid: export index in sama7g5->chws[] array + */ +static const struct { + const char *n; + const char *p; + const struct clk_pll_layout *l; + u8 t; + u8 c; + u8 eid; +} sama7g5_plls[][PLL_ID_MAX] = { + [PLL_ID_CPU] = { + { .n = "cpupll_fracck", + .p = "mainck", + .l = &pll_layout_frac, + .t = PLL_TYPE_FRAC, + .c = 1, }, + + { .n = "cpupll_divpmcck", + .p = "cpupll_fracck", + .l = &pll_layout_divpmc, + .t = PLL_TYPE_DIV, + .c = 1, }, + }, + + [PLL_ID_SYS] = { + { .n = "syspll_fracck", + .p = "mainck", + .l = &pll_layout_frac, + .t = PLL_TYPE_FRAC, + .c = 1, }, + + { .n = "syspll_divpmcck", + .p = "syspll_fracck", + .l = &pll_layout_divpmc, + .t = PLL_TYPE_DIV, + .c = 1, }, + }, + + [PLL_ID_DDR] = { + { .n = "ddrpll_fracck", + .p = "mainck", + .l = &pll_layout_frac, + .t = PLL_TYPE_FRAC, + .c = 1, }, + + { .n = "ddrpll_divpmcck", + .p = "ddrpll_fracck", + .l = &pll_layout_divpmc, + .t = PLL_TYPE_DIV, + .c = 1, }, + }, + + [PLL_ID_IMG] = { + { .n = "imgpll_fracck", + .p = "mainck", + .l = &pll_layout_frac, + .t = PLL_TYPE_FRAC, }, + + { .n = "imgpll_divpmcck", + .p = "imgpll_fracck", + .l = &pll_layout_divpmc, + .t = PLL_TYPE_DIV, }, + }, + + [PLL_ID_BAUD] = { + { .n = "baudpll_fracck", + .p = "mainck", + .l = &pll_layout_frac, + .t = PLL_TYPE_FRAC, }, + + { .n = "baudpll_divpmcck", + .p = "baudpll_fracck", + .l = &pll_layout_divpmc, + .t = PLL_TYPE_DIV, }, + }, + + [PLL_ID_AUDIO] = { + { .n = "audiopll_fracck", + .p = "main_xtal", + .l = &pll_layout_frac, + .t = PLL_TYPE_FRAC, }, + + { .n = "audiopll_divpmcck", + .p = "audiopll_fracck", + .l = &pll_layout_divpmc, + .t = PLL_TYPE_DIV, + .eid = PMC_I2S0_MUX, }, + + { .n = "audiopll_diviock", + .p = "audiopll_fracck", + .l = &pll_layout_divio, + .t = PLL_TYPE_DIV, + .eid = PMC_I2S1_MUX, }, + }, + + [PLL_ID_ETH] = { + { .n = "ethpll_fracck", + .p = "main_xtal", + .l = &pll_layout_frac, + .t = PLL_TYPE_FRAC, }, + + { .n = "ethpll_divpmcck", + .p = "ethpll_fracck", + .l = &pll_layout_divpmc, + .t = PLL_TYPE_DIV, }, + }, +}; + +/** + * Master clock (MCK[1..4]) description + * @n: clock name + * @ep: extra parents names array + * @ep_chg_chg_id: index in parents array that specifies the changeable + * parent + * @ep_count: extra parents count + * @ep_mux_table: mux table for extra parents + * @id: clock id + * @c: true if clock is critical and cannot be disabled + */ +static const struct { + const char *n; + const char *ep[4]; + int ep_chg_id; + u8 ep_count; + u8 ep_mux_table[4]; + u8 id; + u8 c; +} sama7g5_mckx[] = { + { .n = "mck1", + .id = 1, + .ep = { "syspll_divpmcck", }, + .ep_mux_table = { 5, }, + .ep_count = 1, + .ep_chg_id = INT_MIN, + .c = 1, }, + + { .n = "mck2", + .id = 2, + .ep = { "ddrpll_divpmcck", }, + .ep_mux_table = { 6, }, + .ep_count = 1, + .ep_chg_id = INT_MIN, + .c = 1, }, + + { .n = "mck3", + .id = 3, + .ep = { "syspll_divpmcck", "ddrpll_divpmcck", "imgpll_divpmcck", }, + .ep_mux_table = { 5, 6, 7, }, + .ep_count = 3, + .ep_chg_id = 6, }, + + { .n = "mck4", + .id = 4, + .ep = { "syspll_divpmcck", }, + .ep_mux_table = { 5, }, + .ep_count = 1, + .ep_chg_id = INT_MIN, + .c = 1, }, +}; + +/** + * System clock description + * @n: clock name + * @p: clock parent name + * @id: clock id + */ +static const struct { + const char *n; + const char *p; + u8 id; +} sama7g5_systemck[] = { + { .n = "pck0", .p = "prog0", .id = 8, }, + { .n = "pck1", .p = "prog1", .id = 9, }, + { .n = "pck2", .p = "prog2", .id = 10, }, + { .n = "pck3", .p = "prog3", .id = 11, }, + { .n = "pck4", .p = "prog4", .id = 12, }, + { .n = "pck5", .p = "prog5", .id = 13, }, + { .n = "pck6", .p = "prog6", .id = 14, }, + { .n = "pck7", .p = "prog7", .id = 15, }, +}; + +/* Mux table for programmable clocks. */ +static u32 sama7g5_prog_mux_table[] = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, }; + +/** + * Peripheral clock description + * @n: clock name + * @p: clock parent name + * @r: clock range values + * @id: clock id + * @chgp: index in parent array of the changeable parent + */ +static const struct { + const char *n; + const char *p; + struct clk_range r; + u8 chgp; + u8 id; +} sama7g5_periphck[] = { + { .n = "pioA_clk", .p = "mck0", .id = 11, }, + { .n = "sfr_clk", .p = "mck1", .id = 19, }, + { .n = "hsmc_clk", .p = "mck1", .id = 21, }, + { .n = "xdmac0_clk", .p = "mck1", .id = 22, }, + { .n = "xdmac1_clk", .p = "mck1", .id = 23, }, + { .n = "xdmac2_clk", .p = "mck1", .id = 24, }, + { .n = "acc_clk", .p = "mck1", .id = 25, }, + { .n = "aes_clk", .p = "mck1", .id = 27, }, + { .n = "tzaesbasc_clk", .p = "mck1", .id = 28, }, + { .n = "asrc_clk", .p = "mck1", .id = 30, .r = { .max = 200000000, }, }, + { .n = "cpkcc_clk", .p = "mck0", .id = 32, }, + { .n = "csi_clk", .p = "mck3", .id = 33, .r = { .max = 266000000, }, .chgp = 1, }, + { .n = "csi2dc_clk", .p = "mck3", .id = 34, .r = { .max = 266000000, }, .chgp = 1, }, + { .n = "eic_clk", .p = "mck1", .id = 37, }, + { .n = "flex0_clk", .p = "mck1", .id = 38, }, + { .n = "flex1_clk", .p = "mck1", .id = 39, }, + { .n = "flex2_clk", .p = "mck1", .id = 40, }, + { .n = "flex3_clk", .p = "mck1", .id = 41, }, + { .n = "flex4_clk", .p = "mck1", .id = 42, }, + { .n = "flex5_clk", .p = "mck1", .id = 43, }, + { .n = "flex6_clk", .p = "mck1", .id = 44, }, + { .n = "flex7_clk", .p = "mck1", .id = 45, }, + { .n = "flex8_clk", .p = "mck1", .id = 46, }, + { .n = "flex9_clk", .p = "mck1", .id = 47, }, + { .n = "flex10_clk", .p = "mck1", .id = 48, }, + { .n = "flex11_clk", .p = "mck1", .id = 49, }, + { .n = "gmac0_clk", .p = "mck1", .id = 51, }, + { .n = "gmac1_clk", .p = "mck1", .id = 52, }, + { .n = "icm_clk", .p = "mck1", .id = 55, }, + { .n = "isc_clk", .p = "mck3", .id = 56, .r = { .max = 266000000, }, .chgp = 1, }, + { .n = "i2smcc0_clk", .p = "mck1", .id = 57, .r = { .max = 200000000, }, }, + { .n = "i2smcc1_clk", .p = "mck1", .id = 58, .r = { .max = 200000000, }, }, + { .n = "matrix_clk", .p = "mck1", .id = 60, }, + { .n = "mcan0_clk", .p = "mck1", .id = 61, .r = { .max = 200000000, }, }, + { .n = "mcan1_clk", .p = "mck1", .id = 62, .r = { .max = 200000000, }, }, + { .n = "mcan2_clk", .p = "mck1", .id = 63, .r = { .max = 200000000, }, }, + { .n = "mcan3_clk", .p = "mck1", .id = 64, .r = { .max = 200000000, }, }, + { .n = "mcan4_clk", .p = "mck1", .id = 65, .r = { .max = 200000000, }, }, + { .n = "mcan5_clk", .p = "mck1", .id = 66, .r = { .max = 200000000, }, }, + { .n = "pdmc0_clk", .p = "mck1", .id = 68, .r = { .max = 200000000, }, }, + { .n = "pdmc1_clk", .p = "mck1", .id = 69, .r = { .max = 200000000, }, }, + { .n = "pit64b0_clk", .p = "mck1", .id = 70, }, + { .n = "pit64b1_clk", .p = "mck1", .id = 71, }, + { .n = "pit64b2_clk", .p = "mck1", .id = 72, }, + { .n = "pit64b3_clk", .p = "mck1", .id = 73, }, + { .n = "pit64b4_clk", .p = "mck1", .id = 74, }, + { .n = "pit64b5_clk", .p = "mck1", .id = 75, }, + { .n = "pwm_clk", .p = "mck1", .id = 77, }, + { .n = "qspi0_clk", .p = "mck1", .id = 78, }, + { .n = "qspi1_clk", .p = "mck1", .id = 79, }, + { .n = "sdmmc0_clk", .p = "mck1", .id = 80, }, + { .n = "sdmmc1_clk", .p = "mck1", .id = 81, }, + { .n = "sdmmc2_clk", .p = "mck1", .id = 82, }, + { .n = "sha_clk", .p = "mck1", .id = 83, }, + { .n = "spdifrx_clk", .p = "mck1", .id = 84, .r = { .max = 200000000, }, }, + { .n = "spdiftx_clk", .p = "mck1", .id = 85, .r = { .max = 200000000, }, }, + { .n = "ssc0_clk", .p = "mck1", .id = 86, .r = { .max = 200000000, }, }, + { .n = "ssc1_clk", .p = "mck1", .id = 87, .r = { .max = 200000000, }, }, + { .n = "tcb0_ch0_clk", .p = "mck1", .id = 88, .r = { .max = 200000000, }, }, + { .n = "tcb0_ch1_clk", .p = "mck1", .id = 89, .r = { .max = 200000000, }, }, + { .n = "tcb0_ch2_clk", .p = "mck1", .id = 90, .r = { .max = 200000000, }, }, + { .n = "tcb1_ch0_clk", .p = "mck1", .id = 91, .r = { .max = 200000000, }, }, + { .n = "tcb1_ch1_clk", .p = "mck1", .id = 92, .r = { .max = 200000000, }, }, + { .n = "tcb1_ch2_clk", .p = "mck1", .id = 93, .r = { .max = 200000000, }, }, + { .n = "tcpca_clk", .p = "mck1", .id = 94, }, + { .n = "tcpcb_clk", .p = "mck1", .id = 95, }, + { .n = "tdes_clk", .p = "mck1", .id = 96, }, + { .n = "trng_clk", .p = "mck1", .id = 97, }, + { .n = "udphsa_clk", .p = "mck1", .id = 104, }, + { .n = "udphsb_clk", .p = "mck1", .id = 105, }, + { .n = "uhphs_clk", .p = "mck1", .id = 106, }, +}; + +/** + * Generic clock description + * @n: clock name + * @pp: PLL parents + * @pp_mux_table: PLL parents mux table + * @r: clock output range + * @pp_chg_id: id in parrent array of changeable PLL parent + * @pp_count: PLL parents count + * @id: clock id + */ +static const struct { + const char *n; + const char *pp[8]; + const char pp_mux_table[8]; + struct clk_range r; + int pp_chg_id; + u8 pp_count; + u8 id; +} sama7g5_gck[] = { + { .n = "adc_gclk", + .id = 26, + .r = { .max = 100000000, }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 7, 9, }, + .pp_count = 3, + .pp_chg_id = INT_MIN, }, + + { .n = "asrc_gclk", + .id = 30, + .r = { .max = 200000000 }, + .pp = { "audiopll_divpmcck", }, + .pp_mux_table = { 9, }, + .pp_count = 1, + .pp_chg_id = 4, }, + + { .n = "csi_gclk", + .id = 33, + .r = { .max = 27000000 }, + .pp = { "ddrpll_divpmcck", "imgpll_divpmcck", }, + .pp_mux_table = { 6, 7, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex0_gclk", + .id = 38, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex1_gclk", + .id = 39, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex2_gclk", + .id = 40, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex3_gclk", + .id = 41, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex4_gclk", + .id = 42, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex5_gclk", + .id = 43, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex6_gclk", + .id = 44, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex7_gclk", + .id = 45, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex8_gclk", + .id = 46, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex9_gclk", + .id = 47, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex10_gclk", + .id = 48, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "flex11_gclk", + .id = 49, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "gmac0_gclk", + .id = 51, + .r = { .max = 125000000 }, + .pp = { "ethpll_divpmcck", }, + .pp_mux_table = { 10, }, + .pp_count = 1, + .pp_chg_id = 4, }, + + { .n = "gmac1_gclk", + .id = 52, + .r = { .max = 50000000 }, + .pp = { "ethpll_divpmcck", }, + .pp_mux_table = { 10, }, + .pp_count = 1, + .pp_chg_id = INT_MIN, }, + + { .n = "gmac0_tsu_gclk", + .id = 53, + .r = { .max = 300000000 }, + .pp = { "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 9, 10, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "gmac1_tsu_gclk", + .id = 54, + .r = { .max = 300000000 }, + .pp = { "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 9, 10, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "i2smcc0_gclk", + .id = 57, + .r = { .max = 100000000 }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, + .pp_count = 2, + .pp_chg_id = 5, }, + + { .n = "i2smcc1_gclk", + .id = 58, + .r = { .max = 100000000 }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, + .pp_count = 2, + .pp_chg_id = 5, }, + + { .n = "mcan0_gclk", + .id = 61, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "mcan1_gclk", + .id = 62, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "mcan2_gclk", + .id = 63, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "mcan3_gclk", + .id = 64, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "mcan4_gclk", + .id = 65, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "mcan5_gclk", + .id = 66, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "pdmc0_gclk", + .id = 68, + .r = { .max = 50000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "pdmc1_gclk", + .id = 69, + .r = { .max = 50000000, }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "pit64b0_gclk", + .id = 70, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "pit64b1_gclk", + .id = 71, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "pit64b2_gclk", + .id = 72, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "pit64b3_gclk", + .id = 73, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "pit64b4_gclk", + .id = 74, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "pit64b5_gclk", + .id = 75, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "qspi0_gclk", + .id = 78, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "qspi1_gclk", + .id = 79, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = INT_MIN, }, + + { .n = "sdmmc0_gclk", + .id = 80, + .r = { .max = 208000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = 5, }, + + { .n = "sdmmc1_gclk", + .id = 81, + .r = { .max = 208000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = 5, }, + + { .n = "sdmmc2_gclk", + .id = 82, + .r = { .max = 208000000 }, + .pp = { "syspll_divpmcck", "baudpll_divpmcck", }, + .pp_mux_table = { 5, 8, }, + .pp_count = 2, + .pp_chg_id = 5, }, + + { .n = "spdifrx_gclk", + .id = 84, + .r = { .max = 150000000 }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, + .pp_count = 2, + .pp_chg_id = 5, }, + + { .n = "spdiftx_gclk", + .id = 85, + .r = { .max = 25000000 }, + .pp = { "syspll_divpmcck", "audiopll_divpmcck", }, + .pp_mux_table = { 5, 9, }, + .pp_count = 2, + .pp_chg_id = 5, }, + + { .n = "tcb0_ch0_gclk", + .id = 88, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "tcb1_ch0_gclk", + .id = 91, + .r = { .max = 200000000 }, + .pp = { "syspll_divpmcck", "imgpll_divpmcck", "baudpll_divpmcck", + "audiopll_divpmcck", "ethpll_divpmcck", }, + .pp_mux_table = { 5, 7, 8, 9, 10, }, + .pp_count = 5, + .pp_chg_id = INT_MIN, }, + + { .n = "tcpca_gclk", + .id = 94, + .r = { .max = 32768, }, + .pp_chg_id = INT_MIN, }, + + { .n = "tcpcb_gclk", + .id = 95, + .r = { .max = 32768, }, + .pp_chg_id = INT_MIN, }, +}; + +/* PLL output range. */ +static const struct clk_range pll_outputs[] = { + { .min = 2343750, .max = 1200000000 }, +}; + +/* PLL characteristics. */ +static const struct clk_pll_characteristics pll_characteristics = { + .input = { .min = 12000000, .max = 50000000 }, + .num_output = ARRAY_SIZE(pll_outputs), + .output = pll_outputs, +}; + +/* MCK0 characteristics. */ +static const struct clk_master_characteristics mck0_characteristics = { + .output = { .min = 140000000, .max = 200000000 }, + .divisors = { 1, 2, 4, 3 }, + .have_div3_pres = 1, +}; + +/* MCK0 layout. */ +static const struct clk_master_layout mck0_layout = { + .mask = 0x373, + .pres_shift = 4, + .offset = 0x28, +}; + +/* Programmable clock layout. */ +static const struct clk_programmable_layout programmable_layout = { + .pres_mask = 0xff, + .pres_shift = 8, + .css_mask = 0x1f, + .have_slck_mck = 0, + .is_pres_direct = 1, +}; + +/* Peripheral clock layout. */ +static const struct clk_pcr_layout sama7g5_pcr_layout = { + .offset = 0x88, + .cmd = BIT(31), + .gckcss_mask = GENMASK(12, 8), + .pid_mask = GENMASK(6, 0), +}; + +static void __init sama7g5_pmc_setup(struct device_node *np) +{ + const char *td_slck_name, *md_slck_name, *mainxtal_name; + struct pmc_data *sama7g5_pmc; + const char *parent_names[10]; + void **alloc_mem = NULL; + int alloc_mem_size = 0; + struct regmap *regmap; + struct clk_hw *hw; + bool bypass; + int i, j; + + i = of_property_match_string(np, "clock-names", "td_slck"); + if (i < 0) + return; + + td_slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "md_slck"); + if (i < 0) + return; + + md_slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = device_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + sama7g5_pmc = pmc_data_allocate(PMC_I2S1_MUX + 1, + nck(sama7g5_systemck), + nck(sama7g5_periphck), + nck(sama7g5_gck)); + if (!sama7g5_pmc) + return; + + alloc_mem = kmalloc(sizeof(void *) * + (ARRAY_SIZE(sama7g5_mckx) + ARRAY_SIZE(sama7g5_gck)), + GFP_KERNEL); + if (!alloc_mem) + goto err_free; + + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, + 50000000); + if (IS_ERR(hw)) + goto err_free; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "main_rc_osc"; + parent_names[1] = "main_osc"; + hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + sama7g5_pmc->chws[PMC_MAIN] = hw; + + for (i = 0; i < PLL_ID_MAX; i++) { + for (j = 0; j < 3; j++) { + struct clk_hw *parent_hw; + + if (!sama7g5_plls[i][j].n) + continue; + + switch (sama7g5_plls[i][j].t) { + case PLL_TYPE_FRAC: + if (!strcmp(sama7g5_plls[i][j].p, "mainck")) + parent_hw = sama7g5_pmc->chws[PMC_MAIN]; + else + parent_hw = __clk_get_hw(of_clk_get_by_name(np, + sama7g5_plls[i][j].p)); + + hw = sam9x60_clk_register_frac_pll(regmap, + &pmc_pll_lock, sama7g5_plls[i][j].n, + sama7g5_plls[i][j].p, parent_hw, i, + &pll_characteristics, + sama7g5_plls[i][j].l, + sama7g5_plls[i][j].c); + break; + + case PLL_TYPE_DIV: + hw = sam9x60_clk_register_div_pll(regmap, + &pmc_pll_lock, sama7g5_plls[i][j].n, + sama7g5_plls[i][j].p, i, + &pll_characteristics, + sama7g5_plls[i][j].l, + sama7g5_plls[i][j].c); + break; + + default: + continue; + } + + if (IS_ERR(hw)) + goto err_free; + + if (sama7g5_plls[i][j].eid) + sama7g5_pmc->chws[sama7g5_plls[i][j].eid] = hw; + } + } + + parent_names[0] = md_slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "cpupll_divpmcck"; + parent_names[3] = "syspll_divpmcck"; + hw = at91_clk_register_master(regmap, "mck0", 4, parent_names, + &mck0_layout, &mck0_characteristics); + if (IS_ERR(hw)) + goto err_free; + + sama7g5_pmc->chws[PMC_MCK] = hw; + + parent_names[0] = md_slck_name; + parent_names[1] = td_slck_name; + parent_names[2] = "mainck"; + parent_names[3] = "mck0"; + for (i = 0; i < ARRAY_SIZE(sama7g5_mckx); i++) { + u8 num_parents = 4 + sama7g5_mckx[i].ep_count; + u32 *mux_table; + + mux_table = kmalloc_array(num_parents, sizeof(*mux_table), + GFP_KERNEL); + if (!mux_table) + goto err_free; + + SAMA7G5_INIT_TABLE(mux_table, 4); + SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_mckx[i].ep_mux_table, + sama7g5_mckx[i].ep_count); + SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_mckx[i].ep, + sama7g5_mckx[i].ep_count); + + hw = at91_clk_sama7g5_register_master(regmap, sama7g5_mckx[i].n, + num_parents, parent_names, mux_table, + &pmc_mckX_lock, sama7g5_mckx[i].id, + sama7g5_mckx[i].c, + sama7g5_mckx[i].ep_chg_id); + if (IS_ERR(hw)) + goto err_free; + + alloc_mem[alloc_mem_size++] = mux_table; + } + + hw = at91_clk_sama7g5_register_utmi(regmap, "utmick", "main_xtal"); + if (IS_ERR(hw)) + goto err_free; + + sama7g5_pmc->chws[PMC_UTMI] = hw; + + parent_names[0] = md_slck_name; + parent_names[1] = td_slck_name; + parent_names[2] = "mainck"; + parent_names[3] = "mck0"; + parent_names[4] = "syspll_divpmcck"; + parent_names[5] = "ddrpll_divpmcck"; + parent_names[6] = "imgpll_divpmcck"; + parent_names[7] = "baudpll_divpmcck"; + parent_names[8] = "audiopll_divpmcck"; + parent_names[9] = "ethpll_divpmcck"; + for (i = 0; i < 8; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, parent_names, + 10, i, + &programmable_layout, + sama7g5_prog_mux_table); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(sama7g5_systemck); i++) { + hw = at91_clk_register_system(regmap, sama7g5_systemck[i].n, + sama7g5_systemck[i].p, + sama7g5_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + sama7g5_pmc->shws[sama7g5_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sama7g5_periphck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + &sama7g5_pcr_layout, + sama7g5_periphck[i].n, + sama7g5_periphck[i].p, + sama7g5_periphck[i].id, + &sama7g5_periphck[i].r, + sama7g5_periphck[i].chgp ? 0 : + INT_MIN); + if (IS_ERR(hw)) + goto err_free; + + sama7g5_pmc->phws[sama7g5_periphck[i].id] = hw; + } + + parent_names[0] = md_slck_name; + parent_names[1] = td_slck_name; + parent_names[2] = "mainck"; + parent_names[3] = "mck0"; + for (i = 0; i < ARRAY_SIZE(sama7g5_gck); i++) { + u8 num_parents = 4 + sama7g5_gck[i].pp_count; + u32 *mux_table; + + mux_table = kmalloc_array(num_parents, sizeof(*mux_table), + GFP_KERNEL); + if (!mux_table) + goto err_free; + + SAMA7G5_INIT_TABLE(mux_table, 4); + SAMA7G5_FILL_TABLE(&mux_table[4], sama7g5_gck[i].pp_mux_table, + sama7g5_gck[i].pp_count); + SAMA7G5_FILL_TABLE(&parent_names[4], sama7g5_gck[i].pp, + sama7g5_gck[i].pp_count); + + hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, + &sama7g5_pcr_layout, + sama7g5_gck[i].n, + parent_names, mux_table, + num_parents, + sama7g5_gck[i].id, + &sama7g5_gck[i].r, + sama7g5_gck[i].pp_chg_id); + if (IS_ERR(hw)) + goto err_free; + + sama7g5_pmc->ghws[sama7g5_gck[i].id] = hw; + alloc_mem[alloc_mem_size++] = mux_table; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama7g5_pmc); + + return; + +err_free: + if (alloc_mem) { + for (i = 0; i < alloc_mem_size; i++) + kfree(alloc_mem[i]); + kfree(alloc_mem); + } + + pmc_data_free(sama7g5_pmc); +} + +/* Some clks are used for a clocksource */ +CLK_OF_DECLARE(sama7g5_pmc, "microchip,sama7g5-pmc", sama7g5_pmc_setup); diff --git a/drivers/clk/at91/sckc.c b/drivers/clk/at91/sckc.c index 15dc4cd86d76..2d65770d8665 100644 --- a/drivers/clk/at91/sckc.c +++ b/drivers/clk/at91/sckc.c @@ -471,8 +471,9 @@ static void __init of_sam9x60_sckc_setup(struct device_node *np) if (!regbase) return; - slow_rc = clk_hw_register_fixed_rate(NULL, parent_names[0], NULL, 0, - 32768); + slow_rc = clk_hw_register_fixed_rate_with_accuracy(NULL, parent_names[0], + NULL, 0, 32768, + 93750000); if (IS_ERR(slow_rc)) return; diff --git a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig index 8c83977a7dc4..784f12c72365 100644 --- a/drivers/clk/bcm/Kconfig +++ b/drivers/clk/bcm/Kconfig @@ -1,4 +1,15 @@ # SPDX-License-Identifier: GPL-2.0-only + +config CLK_BCM2711_DVP + tristate "Broadcom BCM2711 DVP support" + depends on ARCH_BCM2835 ||COMPILE_TEST + depends on COMMON_CLK + default ARCH_BCM2835 + select RESET_SIMPLE + help + Enable common clock framework support for the Broadcom BCM2711 + DVP Controller. + config CLK_BCM2835 bool "Broadcom BCM2835 clock support" depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST diff --git a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile index 0070ddf6cdd2..edb66b44cb27 100644 --- a/drivers/clk/bcm/Makefile +++ b/drivers/clk/bcm/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_CLK_BCM_KONA) += clk-kona-setup.o obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm281xx.o obj-$(CONFIG_CLK_BCM_KONA) += clk-bcm21664.o obj-$(CONFIG_COMMON_CLK_IPROC) += clk-iproc-armpll.o clk-iproc-pll.o clk-iproc-asiu.o +obj-$(CONFIG_CLK_BCM2711_DVP) += clk-bcm2711-dvp.o obj-$(CONFIG_CLK_BCM2835) += clk-bcm2835.o obj-$(CONFIG_CLK_BCM2835) += clk-bcm2835-aux.o obj-$(CONFIG_CLK_RASPBERRYPI) += clk-raspberrypi.o diff --git a/drivers/clk/bcm/clk-bcm2711-dvp.c b/drivers/clk/bcm/clk-bcm2711-dvp.c new file mode 100644 index 000000000000..8333e20dc9d2 --- /dev/null +++ b/drivers/clk/bcm/clk-bcm2711-dvp.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// Copyright 2020 Cerno + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/reset/reset-simple.h> + +#define DVP_HT_RPI_SW_INIT 0x04 +#define DVP_HT_RPI_MISC_CONFIG 0x08 + +#define NR_CLOCKS 2 +#define NR_RESETS 6 + +struct clk_dvp { + struct clk_hw_onecell_data *data; + struct reset_simple_data reset; +}; + +static const struct clk_parent_data clk_dvp_parent = { + .index = 0, +}; + +static int clk_dvp_probe(struct platform_device *pdev) +{ + struct clk_hw_onecell_data *data; + struct resource *res; + struct clk_dvp *dvp; + void __iomem *base; + int ret; + + dvp = devm_kzalloc(&pdev->dev, sizeof(*dvp), GFP_KERNEL); + if (!dvp) + return -ENOMEM; + platform_set_drvdata(pdev, dvp); + + dvp->data = devm_kzalloc(&pdev->dev, + struct_size(dvp->data, hws, NR_CLOCKS), + GFP_KERNEL); + if (!dvp->data) + return -ENOMEM; + data = dvp->data; + + base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); + if (IS_ERR(base)) + return PTR_ERR(base); + + dvp->reset.rcdev.owner = THIS_MODULE; + dvp->reset.rcdev.nr_resets = NR_RESETS; + dvp->reset.rcdev.ops = &reset_simple_ops; + dvp->reset.rcdev.of_node = pdev->dev.of_node; + dvp->reset.membase = base + DVP_HT_RPI_SW_INIT; + spin_lock_init(&dvp->reset.lock); + + ret = devm_reset_controller_register(&pdev->dev, &dvp->reset.rcdev); + if (ret) + return ret; + + data->hws[0] = clk_hw_register_gate_parent_data(&pdev->dev, + "hdmi0-108MHz", + &clk_dvp_parent, 0, + base + DVP_HT_RPI_MISC_CONFIG, 3, + CLK_GATE_SET_TO_DISABLE, + &dvp->reset.lock); + if (IS_ERR(data->hws[0])) + return PTR_ERR(data->hws[0]); + + data->hws[1] = clk_hw_register_gate_parent_data(&pdev->dev, + "hdmi1-108MHz", + &clk_dvp_parent, 0, + base + DVP_HT_RPI_MISC_CONFIG, 4, + CLK_GATE_SET_TO_DISABLE, + &dvp->reset.lock); + if (IS_ERR(data->hws[1])) { + ret = PTR_ERR(data->hws[1]); + goto unregister_clk0; + } + + data->num = NR_CLOCKS; + ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get, + data); + if (ret) + goto unregister_clk1; + + return 0; + +unregister_clk1: + clk_hw_unregister_gate(data->hws[1]); + +unregister_clk0: + clk_hw_unregister_gate(data->hws[0]); + return ret; +}; + +static int clk_dvp_remove(struct platform_device *pdev) +{ + struct clk_dvp *dvp = platform_get_drvdata(pdev); + struct clk_hw_onecell_data *data = dvp->data; + + clk_hw_unregister_gate(data->hws[1]); + clk_hw_unregister_gate(data->hws[0]); + + return 0; +} + +static const struct of_device_id clk_dvp_dt_ids[] = { + { .compatible = "brcm,brcm2711-dvp", }, + { /* sentinel */ } +}; + +static struct platform_driver clk_dvp_driver = { + .probe = clk_dvp_probe, + .remove = clk_dvp_remove, + .driver = { + .name = "brcm2711-dvp", + .of_match_table = clk_dvp_dt_ids, + }, +}; +module_platform_driver(clk_dvp_driver); + +MODULE_AUTHOR("Maxime Ripard <maxime@cerno.tech>"); +MODULE_DESCRIPTION("BCM2711 DVP clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index 6bb7efa12037..3439bc65bb4e 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -314,6 +314,7 @@ struct bcm2835_cprman { struct device *dev; void __iomem *regs; spinlock_t regs_lock; /* spinlock for all clocks */ + unsigned int soc; /* * Real names of cprman clock parents looked up through @@ -421,6 +422,7 @@ struct bcm2835_pll_data { u32 reference_enable_mask; /* Bit in CM_LOCK to indicate when the PLL has locked. */ u32 lock_mask; + u32 flags; const struct bcm2835_pll_ana_bits *ana; @@ -525,6 +527,20 @@ static int bcm2835_pll_is_on(struct clk_hw *hw) A2W_PLL_CTRL_PRST_DISABLE; } +static u32 bcm2835_pll_get_prediv_mask(struct bcm2835_cprman *cprman, + const struct bcm2835_pll_data *data) +{ + /* + * On BCM2711 there isn't a pre-divisor available in the PLL feedback + * loop. Bits 13:14 of ANA1 (PLLA,PLLB,PLLC,PLLD) have been re-purposed + * for to for VCO RANGE bits. + */ + if (cprman->soc & SOC_BCM2711) + return 0; + + return data->ana->fb_prediv_mask; +} + static void bcm2835_pll_choose_ndiv_and_fdiv(unsigned long rate, unsigned long parent_rate, u32 *ndiv, u32 *fdiv) @@ -582,7 +598,7 @@ static unsigned long bcm2835_pll_get_rate(struct clk_hw *hw, ndiv = (a2wctrl & A2W_PLL_CTRL_NDIV_MASK) >> A2W_PLL_CTRL_NDIV_SHIFT; pdiv = (a2wctrl & A2W_PLL_CTRL_PDIV_MASK) >> A2W_PLL_CTRL_PDIV_SHIFT; using_prediv = cprman_read(cprman, data->ana_reg_base + 4) & - data->ana->fb_prediv_mask; + bcm2835_pll_get_prediv_mask(cprman, data); if (using_prediv) { ndiv *= 2; @@ -665,6 +681,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, struct bcm2835_pll *pll = container_of(hw, struct bcm2835_pll, hw); struct bcm2835_cprman *cprman = pll->cprman; const struct bcm2835_pll_data *data = pll->data; + u32 prediv_mask = bcm2835_pll_get_prediv_mask(cprman, data); bool was_using_prediv, use_fb_prediv, do_ana_setup_first; u32 ndiv, fdiv, a2w_ctl; u32 ana[4]; @@ -682,7 +699,7 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, for (i = 3; i >= 0; i--) ana[i] = cprman_read(cprman, data->ana_reg_base + i * 4); - was_using_prediv = ana[1] & data->ana->fb_prediv_mask; + was_using_prediv = ana[1] & prediv_mask; ana[0] &= ~data->ana->mask0; ana[0] |= data->ana->set0; @@ -692,10 +709,10 @@ static int bcm2835_pll_set_rate(struct clk_hw *hw, ana[3] |= data->ana->set3; if (was_using_prediv && !use_fb_prediv) { - ana[1] &= ~data->ana->fb_prediv_mask; + ana[1] &= ~prediv_mask; do_ana_setup_first = true; } else if (!was_using_prediv && use_fb_prediv) { - ana[1] |= data->ana->fb_prediv_mask; + ana[1] |= prediv_mask; do_ana_setup_first = false; } else { do_ana_setup_first = true; @@ -1310,7 +1327,7 @@ static struct clk_hw *bcm2835_register_pll(struct bcm2835_cprman *cprman, init.num_parents = 1; init.name = pll_data->name; init.ops = &bcm2835_pll_clk_ops; - init.flags = CLK_IGNORE_UNUSED; + init.flags = pll_data->flags | CLK_IGNORE_UNUSED; pll = kzalloc(sizeof(*pll), GFP_KERNEL); if (!pll) @@ -1684,10 +1701,33 @@ static const struct bcm2835_clk_desc clk_desc_array[] = { .fixed_divider = 1, .flags = CLK_SET_RATE_PARENT), - /* - * PLLB is used for the ARM's clock. Controlled by firmware, see - * clk-raspberrypi.c. - */ + /* PLLB is used for the ARM's clock. */ + [BCM2835_PLLB] = REGISTER_PLL( + SOC_ALL, + .name = "pllb", + .cm_ctrl_reg = CM_PLLB, + .a2w_ctrl_reg = A2W_PLLB_CTRL, + .frac_reg = A2W_PLLB_FRAC, + .ana_reg_base = A2W_PLLB_ANA0, + .reference_enable_mask = A2W_XOSC_CTRL_PLLB_ENABLE, + .lock_mask = CM_LOCK_FLOCKB, + + .ana = &bcm2835_ana_default, + + .min_rate = 600000000u, + .max_rate = 3000000000u, + .max_fb_rate = BCM2835_MAX_FB_RATE, + .flags = CLK_GET_RATE_NOCACHE), + [BCM2835_PLLB_ARM] = REGISTER_PLL_DIV( + SOC_ALL, + .name = "pllb_arm", + .source_pll = "pllb", + .cm_reg = CM_PLLB, + .a2w_reg = A2W_PLLB_ARM, + .load_mask = CM_PLLB_LOADARM, + .hold_mask = CM_PLLB_HOLDARM, + .fixed_divider = 1, + .flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE), /* * PLLC is the core PLL, used to drive the core VPU clock. @@ -2238,6 +2278,7 @@ static int bcm2835_clk_probe(struct platform_device *pdev) platform_set_drvdata(pdev, cprman); cprman->onecell.num = asize; + cprman->soc = pdata->soc; hws = cprman->onecell.hws; for (i = 0; i < asize; i++) { diff --git a/drivers/clk/bcm/clk-bcm63xx-gate.c b/drivers/clk/bcm/clk-bcm63xx-gate.c index 98e884957db8..89297c57881e 100644 --- a/drivers/clk/bcm/clk-bcm63xx-gate.c +++ b/drivers/clk/bcm/clk-bcm63xx-gate.c @@ -6,6 +6,14 @@ #include <linux/of_device.h> #include <linux/platform_device.h> +#include <dt-bindings/clock/bcm3368-clock.h> +#include <dt-bindings/clock/bcm6318-clock.h> +#include <dt-bindings/clock/bcm6328-clock.h> +#include <dt-bindings/clock/bcm6358-clock.h> +#include <dt-bindings/clock/bcm6362-clock.h> +#include <dt-bindings/clock/bcm6368-clock.h> +#include <dt-bindings/clock/bcm63268-clock.h> + struct clk_bcm63xx_table_entry { const char * const name; u8 bit; @@ -20,126 +28,458 @@ struct clk_bcm63xx_hw { }; static const struct clk_bcm63xx_table_entry bcm3368_clocks[] = { - { .name = "mac", .bit = 3, }, - { .name = "tc", .bit = 5, }, - { .name = "us_top", .bit = 6, }, - { .name = "ds_top", .bit = 7, }, - { .name = "acm", .bit = 8, }, - { .name = "spi", .bit = 9, }, - { .name = "usbs", .bit = 10, }, - { .name = "bmu", .bit = 11, }, - { .name = "pcm", .bit = 12, }, - { .name = "ntp", .bit = 13, }, - { .name = "acp_b", .bit = 14, }, - { .name = "acp_a", .bit = 15, }, - { .name = "emusb", .bit = 17, }, - { .name = "enet0", .bit = 18, }, - { .name = "enet1", .bit = 19, }, - { .name = "usbsu", .bit = 20, }, - { .name = "ephy", .bit = 21, }, - { }, + { + .name = "mac", + .bit = BCM3368_CLK_MAC, + }, { + .name = "tc", + .bit = BCM3368_CLK_TC, + }, { + .name = "us_top", + .bit = BCM3368_CLK_US_TOP, + }, { + .name = "ds_top", + .bit = BCM3368_CLK_DS_TOP, + }, { + .name = "acm", + .bit = BCM3368_CLK_ACM, + }, { + .name = "spi", + .bit = BCM3368_CLK_SPI, + }, { + .name = "usbs", + .bit = BCM3368_CLK_USBS, + }, { + .name = "bmu", + .bit = BCM3368_CLK_BMU, + }, { + .name = "pcm", + .bit = BCM3368_CLK_PCM, + }, { + .name = "ntp", + .bit = BCM3368_CLK_NTP, + }, { + .name = "acp_b", + .bit = BCM3368_CLK_ACP_B, + }, { + .name = "acp_a", + .bit = BCM3368_CLK_ACP_A, + }, { + .name = "emusb", + .bit = BCM3368_CLK_EMUSB, + }, { + .name = "enet0", + .bit = BCM3368_CLK_ENET0, + }, { + .name = "enet1", + .bit = BCM3368_CLK_ENET1, + }, { + .name = "usbsu", + .bit = BCM3368_CLK_USBSU, + }, { + .name = "ephy", + .bit = BCM3368_CLK_EPHY, + }, { + /* sentinel */ + }, +}; + +static const struct clk_bcm63xx_table_entry bcm6318_clocks[] = { + { + .name = "adsl_asb", + .bit = BCM6318_CLK_ADSL_ASB, + }, { + .name = "usb_asb", + .bit = BCM6318_CLK_USB_ASB, + }, { + .name = "mips_asb", + .bit = BCM6318_CLK_MIPS_ASB, + }, { + .name = "pcie_asb", + .bit = BCM6318_CLK_PCIE_ASB, + }, { + .name = "phymips_asb", + .bit = BCM6318_CLK_PHYMIPS_ASB, + }, { + .name = "robosw_asb", + .bit = BCM6318_CLK_ROBOSW_ASB, + }, { + .name = "sar_asb", + .bit = BCM6318_CLK_SAR_ASB, + }, { + .name = "sdr_asb", + .bit = BCM6318_CLK_SDR_ASB, + }, { + .name = "swreg_asb", + .bit = BCM6318_CLK_SWREG_ASB, + }, { + .name = "periph_asb", + .bit = BCM6318_CLK_PERIPH_ASB, + }, { + .name = "cpubus160", + .bit = BCM6318_CLK_CPUBUS160, + }, { + .name = "adsl", + .bit = BCM6318_CLK_ADSL, + }, { + .name = "sar125", + .bit = BCM6318_CLK_SAR125, + }, { + .name = "mips", + .bit = BCM6318_CLK_MIPS, + .flags = CLK_IS_CRITICAL, + }, { + .name = "pcie", + .bit = BCM6318_CLK_PCIE, + }, { + .name = "robosw250", + .bit = BCM6318_CLK_ROBOSW250, + }, { + .name = "robosw025", + .bit = BCM6318_CLK_ROBOSW025, + }, { + .name = "sdr", + .bit = BCM6318_CLK_SDR, + .flags = CLK_IS_CRITICAL, + }, { + .name = "usbd", + .bit = BCM6318_CLK_USBD, + }, { + .name = "hsspi", + .bit = BCM6318_CLK_HSSPI, + }, { + .name = "pcie25", + .bit = BCM6318_CLK_PCIE25, + }, { + .name = "phymips", + .bit = BCM6318_CLK_PHYMIPS, + }, { + .name = "afe", + .bit = BCM6318_CLK_AFE, + }, { + .name = "qproc", + .bit = BCM6318_CLK_QPROC, + }, { + /* sentinel */ + }, +}; + +static const struct clk_bcm63xx_table_entry bcm6318_ubus_clocks[] = { + { + .name = "adsl-ubus", + .bit = BCM6318_UCLK_ADSL, + }, { + .name = "arb-ubus", + .bit = BCM6318_UCLK_ARB, + .flags = CLK_IS_CRITICAL, + }, { + .name = "mips-ubus", + .bit = BCM6318_UCLK_MIPS, + .flags = CLK_IS_CRITICAL, + }, { + .name = "pcie-ubus", + .bit = BCM6318_UCLK_PCIE, + }, { + .name = "periph-ubus", + .bit = BCM6318_UCLK_PERIPH, + .flags = CLK_IS_CRITICAL, + }, { + .name = "phymips-ubus", + .bit = BCM6318_UCLK_PHYMIPS, + }, { + .name = "robosw-ubus", + .bit = BCM6318_UCLK_ROBOSW, + }, { + .name = "sar-ubus", + .bit = BCM6318_UCLK_SAR, + }, { + .name = "sdr-ubus", + .bit = BCM6318_UCLK_SDR, + }, { + .name = "usb-ubus", + .bit = BCM6318_UCLK_USB, + }, { + /* sentinel */ + }, }; static const struct clk_bcm63xx_table_entry bcm6328_clocks[] = { - { .name = "phy_mips", .bit = 0, }, - { .name = "adsl_qproc", .bit = 1, }, - { .name = "adsl_afe", .bit = 2, }, - { .name = "adsl", .bit = 3, }, - { .name = "mips", .bit = 4, .flags = CLK_IS_CRITICAL, }, - { .name = "sar", .bit = 5, }, - { .name = "pcm", .bit = 6, }, - { .name = "usbd", .bit = 7, }, - { .name = "usbh", .bit = 8, }, - { .name = "hsspi", .bit = 9, }, - { .name = "pcie", .bit = 10, }, - { .name = "robosw", .bit = 11, }, - { }, + { + .name = "phy_mips", + .bit = BCM6328_CLK_PHYMIPS, + }, { + .name = "adsl_qproc", + .bit = BCM6328_CLK_ADSL_QPROC, + }, { + .name = "adsl_afe", + .bit = BCM6328_CLK_ADSL_AFE, + }, { + .name = "adsl", + .bit = BCM6328_CLK_ADSL, + }, { + .name = "mips", + .bit = BCM6328_CLK_MIPS, + .flags = CLK_IS_CRITICAL, + }, { + .name = "sar", + .bit = BCM6328_CLK_SAR, + }, { + .name = "pcm", + .bit = BCM6328_CLK_PCM, + }, { + .name = "usbd", + .bit = BCM6328_CLK_USBD, + }, { + .name = "usbh", + .bit = BCM6328_CLK_USBH, + }, { + .name = "hsspi", + .bit = BCM6328_CLK_HSSPI, + }, { + .name = "pcie", + .bit = BCM6328_CLK_PCIE, + }, { + .name = "robosw", + .bit = BCM6328_CLK_ROBOSW, + }, { + /* sentinel */ + }, }; static const struct clk_bcm63xx_table_entry bcm6358_clocks[] = { - { .name = "enet", .bit = 4, }, - { .name = "adslphy", .bit = 5, }, - { .name = "pcm", .bit = 8, }, - { .name = "spi", .bit = 9, }, - { .name = "usbs", .bit = 10, }, - { .name = "sar", .bit = 11, }, - { .name = "emusb", .bit = 17, }, - { .name = "enet0", .bit = 18, }, - { .name = "enet1", .bit = 19, }, - { .name = "usbsu", .bit = 20, }, - { .name = "ephy", .bit = 21, }, - { }, + { + .name = "enet", + .bit = BCM6358_CLK_ENET, + }, { + .name = "adslphy", + .bit = BCM6358_CLK_ADSLPHY, + }, { + .name = "pcm", + .bit = BCM6358_CLK_PCM, + }, { + .name = "spi", + .bit = BCM6358_CLK_SPI, + }, { + .name = "usbs", + .bit = BCM6358_CLK_USBS, + }, { + .name = "sar", + .bit = BCM6358_CLK_SAR, + }, { + .name = "emusb", + .bit = BCM6358_CLK_EMUSB, + }, { + .name = "enet0", + .bit = BCM6358_CLK_ENET0, + }, { + .name = "enet1", + .bit = BCM6358_CLK_ENET1, + }, { + .name = "usbsu", + .bit = BCM6358_CLK_USBSU, + }, { + .name = "ephy", + .bit = BCM6358_CLK_EPHY, + }, { + /* sentinel */ + }, }; static const struct clk_bcm63xx_table_entry bcm6362_clocks[] = { - { .name = "adsl_qproc", .bit = 1, }, - { .name = "adsl_afe", .bit = 2, }, - { .name = "adsl", .bit = 3, }, - { .name = "mips", .bit = 4, .flags = CLK_IS_CRITICAL, }, - { .name = "wlan_ocp", .bit = 5, }, - { .name = "swpkt_usb", .bit = 7, }, - { .name = "swpkt_sar", .bit = 8, }, - { .name = "sar", .bit = 9, }, - { .name = "robosw", .bit = 10, }, - { .name = "pcm", .bit = 11, }, - { .name = "usbd", .bit = 12, }, - { .name = "usbh", .bit = 13, }, - { .name = "ipsec", .bit = 14, }, - { .name = "spi", .bit = 15, }, - { .name = "hsspi", .bit = 16, }, - { .name = "pcie", .bit = 17, }, - { .name = "fap", .bit = 18, }, - { .name = "phymips", .bit = 19, }, - { .name = "nand", .bit = 20, }, - { }, + { + .name = "adsl_qproc", + .bit = BCM6362_CLK_ADSL_QPROC, + }, { + .name = "adsl_afe", + .bit = BCM6362_CLK_ADSL_AFE, + }, { + .name = "adsl", + .bit = BCM6362_CLK_ADSL, + }, { + .name = "mips", + .bit = BCM6362_CLK_MIPS, + .flags = CLK_IS_CRITICAL, + }, { + .name = "wlan_ocp", + .bit = BCM6362_CLK_WLAN_OCP, + }, { + .name = "swpkt_usb", + .bit = BCM6362_CLK_SWPKT_USB, + }, { + .name = "swpkt_sar", + .bit = BCM6362_CLK_SWPKT_SAR, + }, { + .name = "sar", + .bit = BCM6362_CLK_SAR, + }, { + .name = "robosw", + .bit = BCM6362_CLK_ROBOSW, + }, { + .name = "pcm", + .bit = BCM6362_CLK_PCM, + }, { + .name = "usbd", + .bit = BCM6362_CLK_USBD, + }, { + .name = "usbh", + .bit = BCM6362_CLK_USBH, + }, { + .name = "ipsec", + .bit = BCM6362_CLK_IPSEC, + }, { + .name = "spi", + .bit = BCM6362_CLK_SPI, + }, { + .name = "hsspi", + .bit = BCM6362_CLK_HSSPI, + }, { + .name = "pcie", + .bit = BCM6362_CLK_PCIE, + }, { + .name = "fap", + .bit = BCM6362_CLK_FAP, + }, { + .name = "phymips", + .bit = BCM6362_CLK_PHYMIPS, + }, { + .name = "nand", + .bit = BCM6362_CLK_NAND, + }, { + /* sentinel */ + }, }; static const struct clk_bcm63xx_table_entry bcm6368_clocks[] = { - { .name = "vdsl_qproc", .bit = 2, }, - { .name = "vdsl_afe", .bit = 3, }, - { .name = "vdsl_bonding", .bit = 4, }, - { .name = "vdsl", .bit = 5, }, - { .name = "phymips", .bit = 6, }, - { .name = "swpkt_usb", .bit = 7, }, - { .name = "swpkt_sar", .bit = 8, }, - { .name = "spi", .bit = 9, }, - { .name = "usbd", .bit = 10, }, - { .name = "sar", .bit = 11, }, - { .name = "robosw", .bit = 12, }, - { .name = "utopia", .bit = 13, }, - { .name = "pcm", .bit = 14, }, - { .name = "usbh", .bit = 15, }, - { .name = "disable_gless", .bit = 16, }, - { .name = "nand", .bit = 17, }, - { .name = "ipsec", .bit = 18, }, - { }, + { + .name = "vdsl_qproc", + .bit = BCM6368_CLK_VDSL_QPROC, + }, { + .name = "vdsl_afe", + .bit = BCM6368_CLK_VDSL_AFE, + }, { + .name = "vdsl_bonding", + .bit = BCM6368_CLK_VDSL_BONDING, + }, { + .name = "vdsl", + .bit = BCM6368_CLK_VDSL, + }, { + .name = "phymips", + .bit = BCM6368_CLK_PHYMIPS, + }, { + .name = "swpkt_usb", + .bit = BCM6368_CLK_SWPKT_USB, + }, { + .name = "swpkt_sar", + .bit = BCM6368_CLK_SWPKT_SAR, + }, { + .name = "spi", + .bit = BCM6368_CLK_SPI, + }, { + .name = "usbd", + .bit = BCM6368_CLK_USBD, + }, { + .name = "sar", + .bit = BCM6368_CLK_SAR, + }, { + .name = "robosw", + .bit = BCM6368_CLK_ROBOSW, + }, { + .name = "utopia", + .bit = BCM6368_CLK_UTOPIA, + }, { + .name = "pcm", + .bit = BCM6368_CLK_PCM, + }, { + .name = "usbh", + .bit = BCM6368_CLK_USBH, + }, { + .name = "disable_gless", + .bit = BCM6368_CLK_DIS_GLESS, + }, { + .name = "nand", + .bit = BCM6368_CLK_NAND, + }, { + .name = "ipsec", + .bit = BCM6368_CLK_IPSEC, + }, { + /* sentinel */ + }, }; static const struct clk_bcm63xx_table_entry bcm63268_clocks[] = { - { .name = "disable_gless", .bit = 0, }, - { .name = "vdsl_qproc", .bit = 1, }, - { .name = "vdsl_afe", .bit = 2, }, - { .name = "vdsl", .bit = 3, }, - { .name = "mips", .bit = 4, .flags = CLK_IS_CRITICAL, }, - { .name = "wlan_ocp", .bit = 5, }, - { .name = "dect", .bit = 6, }, - { .name = "fap0", .bit = 7, }, - { .name = "fap1", .bit = 8, }, - { .name = "sar", .bit = 9, }, - { .name = "robosw", .bit = 10, }, - { .name = "pcm", .bit = 11, }, - { .name = "usbd", .bit = 12, }, - { .name = "usbh", .bit = 13, }, - { .name = "ipsec", .bit = 14, }, - { .name = "spi", .bit = 15, }, - { .name = "hsspi", .bit = 16, }, - { .name = "pcie", .bit = 17, }, - { .name = "phymips", .bit = 18, }, - { .name = "gmac", .bit = 19, }, - { .name = "nand", .bit = 20, }, - { .name = "tbus", .bit = 27, }, - { .name = "robosw250", .bit = 31, }, - { }, + { + .name = "disable_gless", + .bit = BCM63268_CLK_DIS_GLESS, + }, { + .name = "vdsl_qproc", + .bit = BCM63268_CLK_VDSL_QPROC, + }, { + .name = "vdsl_afe", + .bit = BCM63268_CLK_VDSL_AFE, + }, { + .name = "vdsl", + .bit = BCM63268_CLK_VDSL, + }, { + .name = "mips", + .bit = BCM63268_CLK_MIPS, + .flags = CLK_IS_CRITICAL, + }, { + .name = "wlan_ocp", + .bit = BCM63268_CLK_WLAN_OCP, + }, { + .name = "dect", + .bit = BCM63268_CLK_DECT, + }, { + .name = "fap0", + .bit = BCM63268_CLK_FAP0, + }, { + .name = "fap1", + .bit = BCM63268_CLK_FAP1, + }, { + .name = "sar", + .bit = BCM63268_CLK_SAR, + }, { + .name = "robosw", + .bit = BCM63268_CLK_ROBOSW, + }, { + .name = "pcm", + .bit = BCM63268_CLK_PCM, + }, { + .name = "usbd", + .bit = BCM63268_CLK_USBD, + }, { + .name = "usbh", + .bit = BCM63268_CLK_USBH, + }, { + .name = "ipsec", + .bit = BCM63268_CLK_IPSEC, + }, { + .name = "spi", + .bit = BCM63268_CLK_SPI, + }, { + .name = "hsspi", + .bit = BCM63268_CLK_HSSPI, + }, { + .name = "pcie", + .bit = BCM63268_CLK_PCIE, + }, { + .name = "phymips", + .bit = BCM63268_CLK_PHYMIPS, + }, { + .name = "gmac", + .bit = BCM63268_CLK_GMAC, + }, { + .name = "nand", + .bit = BCM63268_CLK_NAND, + }, { + .name = "tbus", + .bit = BCM63268_CLK_TBUS, + }, { + .name = "robosw250", + .bit = BCM63268_CLK_ROBOSW250, + }, { + /* sentinel */ + }, }; static int clk_bcm63xx_probe(struct platform_device *pdev) @@ -155,6 +495,7 @@ static int clk_bcm63xx_probe(struct platform_device *pdev) for (entry = table; entry->name; entry++) maxbit = max_t(u8, maxbit, entry->bit); + maxbit++; hw = devm_kzalloc(&pdev->dev, struct_size(hw, data.hws, maxbit), GFP_KERNEL); @@ -217,6 +558,8 @@ static int clk_bcm63xx_remove(struct platform_device *pdev) static const struct of_device_id clk_bcm63xx_dt_ids[] = { { .compatible = "brcm,bcm3368-clocks", .data = &bcm3368_clocks, }, + { .compatible = "brcm,bcm6318-clocks", .data = &bcm6318_clocks, }, + { .compatible = "brcm,bcm6318-ubus-clocks", .data = &bcm6318_ubus_clocks, }, { .compatible = "brcm,bcm6328-clocks", .data = &bcm6328_clocks, }, { .compatible = "brcm,bcm6358-clocks", .data = &bcm6358_clocks, }, { .compatible = "brcm,bcm6362-clocks", .data = &bcm6362_clocks, }, diff --git a/drivers/clk/bcm/clk-iproc-asiu.c b/drivers/clk/bcm/clk-iproc-asiu.c index 6fb8af506777..e062dd4992ea 100644 --- a/drivers/clk/bcm/clk-iproc-asiu.c +++ b/drivers/clk/bcm/clk-iproc-asiu.c @@ -119,7 +119,7 @@ static long iproc_asiu_clk_round_rate(struct clk_hw *hw, unsigned long rate, if (rate == *parent_rate) return *parent_rate; - div = DIV_ROUND_UP(*parent_rate, rate); + div = DIV_ROUND_CLOSEST(*parent_rate, rate); if (div < 2) return *parent_rate; @@ -145,7 +145,7 @@ static int iproc_asiu_clk_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } - div = DIV_ROUND_UP(parent_rate, rate); + div = DIV_ROUND_CLOSEST(parent_rate, rate); if (div < 2) return -EINVAL; diff --git a/drivers/clk/bcm/clk-raspberrypi.c b/drivers/clk/bcm/clk-raspberrypi.c index 1654fd0eedc9..5cc82954e1ce 100644 --- a/drivers/clk/bcm/clk-raspberrypi.c +++ b/drivers/clk/bcm/clk-raspberrypi.c @@ -18,30 +18,56 @@ #include <soc/bcm2835/raspberrypi-firmware.h> -#define RPI_FIRMWARE_ARM_CLK_ID 0x00000003 +enum rpi_firmware_clk_id { + RPI_FIRMWARE_EMMC_CLK_ID = 1, + RPI_FIRMWARE_UART_CLK_ID, + RPI_FIRMWARE_ARM_CLK_ID, + RPI_FIRMWARE_CORE_CLK_ID, + RPI_FIRMWARE_V3D_CLK_ID, + RPI_FIRMWARE_H264_CLK_ID, + RPI_FIRMWARE_ISP_CLK_ID, + RPI_FIRMWARE_SDRAM_CLK_ID, + RPI_FIRMWARE_PIXEL_CLK_ID, + RPI_FIRMWARE_PWM_CLK_ID, + RPI_FIRMWARE_HEVC_CLK_ID, + RPI_FIRMWARE_EMMC2_CLK_ID, + RPI_FIRMWARE_M2MC_CLK_ID, + RPI_FIRMWARE_PIXEL_BVB_CLK_ID, + RPI_FIRMWARE_NUM_CLK_ID, +}; + +static char *rpi_firmware_clk_names[] = { + [RPI_FIRMWARE_EMMC_CLK_ID] = "emmc", + [RPI_FIRMWARE_UART_CLK_ID] = "uart", + [RPI_FIRMWARE_ARM_CLK_ID] = "arm", + [RPI_FIRMWARE_CORE_CLK_ID] = "core", + [RPI_FIRMWARE_V3D_CLK_ID] = "v3d", + [RPI_FIRMWARE_H264_CLK_ID] = "h264", + [RPI_FIRMWARE_ISP_CLK_ID] = "isp", + [RPI_FIRMWARE_SDRAM_CLK_ID] = "sdram", + [RPI_FIRMWARE_PIXEL_CLK_ID] = "pixel", + [RPI_FIRMWARE_PWM_CLK_ID] = "pwm", + [RPI_FIRMWARE_HEVC_CLK_ID] = "hevc", + [RPI_FIRMWARE_EMMC2_CLK_ID] = "emmc2", + [RPI_FIRMWARE_M2MC_CLK_ID] = "m2mc", + [RPI_FIRMWARE_PIXEL_BVB_CLK_ID] = "pixel-bvb", +}; #define RPI_FIRMWARE_STATE_ENABLE_BIT BIT(0) #define RPI_FIRMWARE_STATE_WAIT_BIT BIT(1) -/* - * Even though the firmware interface alters 'pllb' the frequencies are - * provided as per 'pllb_arm'. We need to scale before passing them trough. - */ -#define RPI_FIRMWARE_PLLB_ARM_DIV_RATE 2 - -#define A2W_PLL_FRAC_BITS 20 - struct raspberrypi_clk { struct device *dev; struct rpi_firmware *firmware; struct platform_device *cpufreq; +}; - unsigned long min_rate; - unsigned long max_rate; +struct raspberrypi_clk_data { + struct clk_hw hw; - struct clk_hw pllb; - struct clk_hw *pllb_arm; - struct clk_lookup *pllb_arm_lookup; + unsigned int id; + + struct raspberrypi_clk *rpi; }; /* @@ -64,11 +90,12 @@ struct raspberrypi_firmware_prop { __le32 disable_turbo; } __packed; -static int raspberrypi_clock_property(struct rpi_firmware *firmware, u32 tag, - u32 clk, u32 *val) +static int raspberrypi_clock_property(struct rpi_firmware *firmware, + const struct raspberrypi_clk_data *data, + u32 tag, u32 *val) { struct raspberrypi_firmware_prop msg = { - .id = cpu_to_le32(clk), + .id = cpu_to_le32(data->id), .val = cpu_to_le32(*val), .disable_turbo = cpu_to_le32(1), }; @@ -83,16 +110,16 @@ static int raspberrypi_clock_property(struct rpi_firmware *firmware, u32 tag, return 0; } -static int raspberrypi_fw_pll_is_on(struct clk_hw *hw) +static int raspberrypi_fw_is_prepared(struct clk_hw *hw) { - struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, - pllb); + struct raspberrypi_clk_data *data = + container_of(hw, struct raspberrypi_clk_data, hw); + struct raspberrypi_clk *rpi = data->rpi; u32 val = 0; int ret; - ret = raspberrypi_clock_property(rpi->firmware, - RPI_FIRMWARE_GET_CLOCK_STATE, - RPI_FIRMWARE_ARM_CLK_ID, &val); + ret = raspberrypi_clock_property(rpi->firmware, data, + RPI_FIRMWARE_GET_CLOCK_STATE, &val); if (ret) return 0; @@ -100,36 +127,34 @@ static int raspberrypi_fw_pll_is_on(struct clk_hw *hw) } -static unsigned long raspberrypi_fw_pll_get_rate(struct clk_hw *hw, - unsigned long parent_rate) +static unsigned long raspberrypi_fw_get_rate(struct clk_hw *hw, + unsigned long parent_rate) { - struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, - pllb); + struct raspberrypi_clk_data *data = + container_of(hw, struct raspberrypi_clk_data, hw); + struct raspberrypi_clk *rpi = data->rpi; u32 val = 0; int ret; - ret = raspberrypi_clock_property(rpi->firmware, - RPI_FIRMWARE_GET_CLOCK_RATE, - RPI_FIRMWARE_ARM_CLK_ID, - &val); + ret = raspberrypi_clock_property(rpi->firmware, data, + RPI_FIRMWARE_GET_CLOCK_RATE, &val); if (ret) return ret; - return val * RPI_FIRMWARE_PLLB_ARM_DIV_RATE; + return val; } -static int raspberrypi_fw_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) +static int raspberrypi_fw_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { - struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, - pllb); - u32 new_rate = rate / RPI_FIRMWARE_PLLB_ARM_DIV_RATE; + struct raspberrypi_clk_data *data = + container_of(hw, struct raspberrypi_clk_data, hw); + struct raspberrypi_clk *rpi = data->rpi; + u32 _rate = rate; int ret; - ret = raspberrypi_clock_property(rpi->firmware, - RPI_FIRMWARE_SET_CLOCK_RATE, - RPI_FIRMWARE_ARM_CLK_ID, - &new_rate); + ret = raspberrypi_clock_property(rpi->firmware, data, + RPI_FIRMWARE_SET_CLOCK_RATE, &_rate); if (ret) dev_err_ratelimited(rpi->dev, "Failed to change %s frequency: %d", clk_hw_get_name(hw), ret); @@ -137,111 +162,128 @@ static int raspberrypi_fw_pll_set_rate(struct clk_hw *hw, unsigned long rate, return ret; } -/* - * Sadly there is no firmware rate rounding interface. We borrowed it from - * clk-bcm2835. - */ -static int raspberrypi_pll_determine_rate(struct clk_hw *hw, - struct clk_rate_request *req) +static int raspberrypi_fw_dumb_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - struct raspberrypi_clk *rpi = container_of(hw, struct raspberrypi_clk, - pllb); - u64 div, final_rate; - u32 ndiv, fdiv; - - /* We can't use req->rate directly as it would overflow */ - final_rate = clamp(req->rate, rpi->min_rate, rpi->max_rate); - - div = (u64)final_rate << A2W_PLL_FRAC_BITS; - do_div(div, req->best_parent_rate); - - ndiv = div >> A2W_PLL_FRAC_BITS; - fdiv = div & ((1 << A2W_PLL_FRAC_BITS) - 1); - - final_rate = ((u64)req->best_parent_rate * - ((ndiv << A2W_PLL_FRAC_BITS) + fdiv)); - - req->rate = final_rate >> A2W_PLL_FRAC_BITS; - + /* + * The firmware will do the rounding but that isn't part of + * the interface with the firmware, so we just do our best + * here. + */ + req->rate = clamp(req->rate, req->min_rate, req->max_rate); return 0; } -static const struct clk_ops raspberrypi_firmware_pll_clk_ops = { - .is_prepared = raspberrypi_fw_pll_is_on, - .recalc_rate = raspberrypi_fw_pll_get_rate, - .set_rate = raspberrypi_fw_pll_set_rate, - .determine_rate = raspberrypi_pll_determine_rate, +static const struct clk_ops raspberrypi_firmware_clk_ops = { + .is_prepared = raspberrypi_fw_is_prepared, + .recalc_rate = raspberrypi_fw_get_rate, + .determine_rate = raspberrypi_fw_dumb_determine_rate, + .set_rate = raspberrypi_fw_set_rate, }; -static int raspberrypi_register_pllb(struct raspberrypi_clk *rpi) +static struct clk_hw *raspberrypi_clk_register(struct raspberrypi_clk *rpi, + unsigned int parent, + unsigned int id) { - u32 min_rate = 0, max_rate = 0; - struct clk_init_data init; + struct raspberrypi_clk_data *data; + struct clk_init_data init = {}; + u32 min_rate, max_rate; int ret; - memset(&init, 0, sizeof(init)); + data = devm_kzalloc(rpi->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + data->rpi = rpi; + data->id = id; - /* All of the PLLs derive from the external oscillator. */ - init.parent_names = (const char *[]){ "osc" }; - init.num_parents = 1; - init.name = "pllb"; - init.ops = &raspberrypi_firmware_pll_clk_ops; - init.flags = CLK_GET_RATE_NOCACHE | CLK_IGNORE_UNUSED; + init.name = devm_kasprintf(rpi->dev, GFP_KERNEL, + "fw-clk-%s", + rpi_firmware_clk_names[id]); + init.ops = &raspberrypi_firmware_clk_ops; + init.flags = CLK_GET_RATE_NOCACHE; - /* Get min & max rates set by the firmware */ - ret = raspberrypi_clock_property(rpi->firmware, + data->hw.init = &init; + + ret = raspberrypi_clock_property(rpi->firmware, data, RPI_FIRMWARE_GET_MIN_CLOCK_RATE, - RPI_FIRMWARE_ARM_CLK_ID, &min_rate); if (ret) { - dev_err(rpi->dev, "Failed to get %s min freq: %d\n", - init.name, ret); - return ret; + dev_err(rpi->dev, "Failed to get clock %d min freq: %d", + id, ret); + return ERR_PTR(ret); } - ret = raspberrypi_clock_property(rpi->firmware, + ret = raspberrypi_clock_property(rpi->firmware, data, RPI_FIRMWARE_GET_MAX_CLOCK_RATE, - RPI_FIRMWARE_ARM_CLK_ID, &max_rate); if (ret) { - dev_err(rpi->dev, "Failed to get %s max freq: %d\n", - init.name, ret); - return ret; - } - - if (!min_rate || !max_rate) { - dev_err(rpi->dev, "Unexpected frequency range: min %u, max %u\n", - min_rate, max_rate); - return -EINVAL; + dev_err(rpi->dev, "Failed to get clock %d max freq: %d\n", + id, ret); + return ERR_PTR(ret); } - dev_info(rpi->dev, "CPU frequency range: min %u, max %u\n", - min_rate, max_rate); + ret = devm_clk_hw_register(rpi->dev, &data->hw); + if (ret) + return ERR_PTR(ret); - rpi->min_rate = min_rate * RPI_FIRMWARE_PLLB_ARM_DIV_RATE; - rpi->max_rate = max_rate * RPI_FIRMWARE_PLLB_ARM_DIV_RATE; + clk_hw_set_rate_range(&data->hw, min_rate, max_rate); - rpi->pllb.init = &init; + if (id == RPI_FIRMWARE_ARM_CLK_ID) { + ret = devm_clk_hw_register_clkdev(rpi->dev, &data->hw, + NULL, "cpu0"); + if (ret) { + dev_err(rpi->dev, "Failed to initialize clkdev\n"); + return ERR_PTR(ret); + } + } - return devm_clk_hw_register(rpi->dev, &rpi->pllb); + return &data->hw; } -static int raspberrypi_register_pllb_arm(struct raspberrypi_clk *rpi) +struct rpi_firmware_get_clocks_response { + u32 parent; + u32 id; +}; + +static int raspberrypi_discover_clocks(struct raspberrypi_clk *rpi, + struct clk_hw_onecell_data *data) { - rpi->pllb_arm = clk_hw_register_fixed_factor(rpi->dev, - "pllb_arm", "pllb", - CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE, - 1, 2); - if (IS_ERR(rpi->pllb_arm)) { - dev_err(rpi->dev, "Failed to initialize pllb_arm\n"); - return PTR_ERR(rpi->pllb_arm); - } + struct rpi_firmware_get_clocks_response *clks; + int ret; - rpi->pllb_arm_lookup = clkdev_hw_create(rpi->pllb_arm, NULL, "cpu0"); - if (!rpi->pllb_arm_lookup) { - dev_err(rpi->dev, "Failed to initialize pllb_arm_lookup\n"); - clk_hw_unregister_fixed_factor(rpi->pllb_arm); + clks = devm_kcalloc(rpi->dev, + sizeof(*clks), RPI_FIRMWARE_NUM_CLK_ID, + GFP_KERNEL); + if (!clks) return -ENOMEM; + + ret = rpi_firmware_property(rpi->firmware, RPI_FIRMWARE_GET_CLOCKS, + clks, + sizeof(*clks) * RPI_FIRMWARE_NUM_CLK_ID); + if (ret) + return ret; + + while (clks->id) { + struct clk_hw *hw; + + switch (clks->id) { + case RPI_FIRMWARE_ARM_CLK_ID: + case RPI_FIRMWARE_CORE_CLK_ID: + case RPI_FIRMWARE_M2MC_CLK_ID: + case RPI_FIRMWARE_V3D_CLK_ID: + hw = raspberrypi_clk_register(rpi, clks->parent, + clks->id); + if (IS_ERR(hw)) + return PTR_ERR(hw); + + data->hws[clks->id] = hw; + data->num = clks->id + 1; + fallthrough; + + default: + clks++; + break; + } } return 0; @@ -249,14 +291,23 @@ static int raspberrypi_register_pllb_arm(struct raspberrypi_clk *rpi) static int raspberrypi_clk_probe(struct platform_device *pdev) { + struct clk_hw_onecell_data *clk_data; struct device_node *firmware_node; struct device *dev = &pdev->dev; struct rpi_firmware *firmware; struct raspberrypi_clk *rpi; int ret; - firmware_node = of_find_compatible_node(NULL, NULL, - "raspberrypi,bcm2835-firmware"); + /* + * We can be probed either through the an old-fashioned + * platform device registration or through a DT node that is a + * child of the firmware node. Handle both cases. + */ + if (dev->of_node) + firmware_node = of_get_parent(dev->of_node); + else + firmware_node = of_find_compatible_node(NULL, NULL, + "raspberrypi,bcm2835-firmware"); if (!firmware_node) { dev_err(dev, "Missing firmware node\n"); return -ENOENT; @@ -275,13 +326,18 @@ static int raspberrypi_clk_probe(struct platform_device *pdev) rpi->firmware = firmware; platform_set_drvdata(pdev, rpi); - ret = raspberrypi_register_pllb(rpi); - if (ret) { - dev_err(dev, "Failed to initialize pllb, %d\n", ret); + clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, + RPI_FIRMWARE_NUM_CLK_ID), + GFP_KERNEL); + if (!clk_data) + return -ENOMEM; + + ret = raspberrypi_discover_clocks(rpi, clk_data); + if (ret) return ret; - } - ret = raspberrypi_register_pllb_arm(rpi); + ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + clk_data); if (ret) return ret; @@ -300,9 +356,16 @@ static int raspberrypi_clk_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id raspberrypi_clk_match[] = { + { .compatible = "raspberrypi,firmware-clocks" }, + { }, +}; +MODULE_DEVICE_TABLE(of, raspberrypi_clk_match); + static struct platform_driver raspberrypi_clk_driver = { .driver = { .name = "raspberrypi-clk", + .of_match_table = raspberrypi_clk_match, }, .probe = raspberrypi_clk_probe, .remove = raspberrypi_clk_remove, diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c index 239102e37e2f..c91e9096b070 100644 --- a/drivers/clk/clk-cdce706.c +++ b/drivers/clk/clk-cdce706.c @@ -4,7 +4,7 @@ * * Copyright (c) 2014 Cadence Design Systems Inc. * - * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf + * Reference: https://www.ti.com/lit/ds/symlink/cdce706.pdf */ #include <linux/clk.h> diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c index 70397b4b5ffe..38755a241ab7 100644 --- a/drivers/clk/clk-gpio.c +++ b/drivers/clk/clk-gpio.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2013 - 2014 Texas Instruments Incorporated - https://www.ti.com * * Authors: * Jyri Sarha <jsarha@ti.com> diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c index 87fe0b0e01a3..86f2e2d3fc02 100644 --- a/drivers/clk/clk-pwm.c +++ b/drivers/clk/clk-pwm.c @@ -89,7 +89,12 @@ static int clk_pwm_probe(struct platform_device *pdev) } if (of_property_read_u32(node, "clock-frequency", &clk_pwm->fixed_rate)) - clk_pwm->fixed_rate = NSEC_PER_SEC / pargs.period; + clk_pwm->fixed_rate = div64_u64(NSEC_PER_SEC, pargs.period); + + if (!clk_pwm->fixed_rate) { + dev_err(&pdev->dev, "fixed_rate cannot be zero\n"); + return -EINVAL; + } if (pargs.period != NSEC_PER_SEC / clk_pwm->fixed_rate && pargs.period != DIV_ROUND_UP(NSEC_PER_SEC, clk_pwm->fixed_rate)) { diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 374afcab89af..5942e9874bc0 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -244,6 +244,14 @@ static const struct clockgen_muxinfo clockgen2_cmux_cgb = { }, }; +static const struct clockgen_muxinfo ls1021a_cmux = { + { + { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, + { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, + } +}; + static const struct clockgen_muxinfo ls1028a_hwa1 = { { { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, @@ -577,7 +585,7 @@ static const struct clockgen_chipinfo chipinfo[] = { { .compat = "fsl,ls1021a-clockgen", .cmux_groups = { - &t1023_cmux + &ls1021a_cmux }, .cmux_to_group = { 0, -1 diff --git a/drivers/clk/clk-si5351.c b/drivers/clk/clk-si5351.c index 71de3618e508..1e1702e609cb 100644 --- a/drivers/clk/clk-si5351.c +++ b/drivers/clk/clk-si5351.c @@ -7,9 +7,9 @@ * * References: * [1] "Si5351A/B/C Data Sheet" - * http://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf + * https://www.silabs.com/Support%20Documents/TechnicalDocs/Si5351.pdf * [2] "Manually Generating an Si5351 Register Map" - * http://www.silabs.com/Support%20Documents/TechnicalDocs/AN619.pdf + * https://www.silabs.com/Support%20Documents/TechnicalDocs/AN619.pdf */ #include <linux/module.h> diff --git a/drivers/clk/clk-sparx5.c b/drivers/clk/clk-sparx5.c new file mode 100644 index 000000000000..0fad0c1a0186 --- /dev/null +++ b/drivers/clk/clk-sparx5.c @@ -0,0 +1,295 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Microchip Sparx5 SoC Clock driver. + * + * Copyright (c) 2019 Microchip Inc. + * + * Author: Lars Povlsen <lars.povlsen@microchip.com> + */ + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/clk-provider.h> +#include <linux/bitfield.h> +#include <linux/of.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <dt-bindings/clock/microchip,sparx5.h> + +#define PLL_DIV GENMASK(7, 0) +#define PLL_PRE_DIV GENMASK(10, 8) +#define PLL_ROT_DIR BIT(11) +#define PLL_ROT_SEL GENMASK(13, 12) +#define PLL_ROT_ENA BIT(14) +#define PLL_CLK_ENA BIT(15) + +#define MAX_SEL 4 +#define MAX_PRE BIT(3) + +static const u8 sel_rates[MAX_SEL] = { 0, 2*8, 2*4, 2*2 }; + +static const char *clk_names[N_CLOCKS] = { + "core", "ddr", "cpu2", "arm2", + "aux1", "aux2", "aux3", "aux4", + "synce", +}; + +struct s5_hw_clk { + struct clk_hw hw; + void __iomem *reg; +}; + +struct s5_clk_data { + void __iomem *base; + struct s5_hw_clk s5_hw[N_CLOCKS]; +}; + +struct s5_pll_conf { + unsigned long freq; + u8 div; + bool rot_ena; + u8 rot_sel; + u8 rot_dir; + u8 pre_div; +}; + +#define to_s5_pll(hw) container_of(hw, struct s5_hw_clk, hw) + +static unsigned long s5_calc_freq(unsigned long parent_rate, + const struct s5_pll_conf *conf) +{ + unsigned long rate = parent_rate / conf->div; + + if (conf->rot_ena) { + int sign = conf->rot_dir ? -1 : 1; + int divt = sel_rates[conf->rot_sel] * (1 + conf->pre_div); + int divb = divt + sign; + + rate = mult_frac(rate, divt, divb); + rate = roundup(rate, 1000); + } + + return rate; +} + +static void s5_search_fractional(unsigned long rate, + unsigned long parent_rate, + int div, + struct s5_pll_conf *conf) +{ + struct s5_pll_conf best; + ulong cur_offset, best_offset = rate; + int d, i, j; + + memset(conf, 0, sizeof(*conf)); + conf->div = div; + conf->rot_ena = 1; /* Fractional rate */ + + for (d = 0; best_offset > 0 && d <= 1 ; d++) { + conf->rot_dir = !!d; + for (i = 0; best_offset > 0 && i < MAX_PRE; i++) { + conf->pre_div = i; + for (j = 1; best_offset > 0 && j < MAX_SEL; j++) { + conf->rot_sel = j; + conf->freq = s5_calc_freq(parent_rate, conf); + cur_offset = abs(rate - conf->freq); + if (cur_offset < best_offset) { + best_offset = cur_offset; + best = *conf; + } + } + } + } + + /* Best match */ + *conf = best; +} + +static unsigned long s5_calc_params(unsigned long rate, + unsigned long parent_rate, + struct s5_pll_conf *conf) +{ + if (parent_rate % rate) { + struct s5_pll_conf alt1, alt2; + int div; + + div = DIV_ROUND_CLOSEST_ULL(parent_rate, rate); + s5_search_fractional(rate, parent_rate, div, &alt1); + + /* Straight match? */ + if (alt1.freq == rate) { + *conf = alt1; + } else { + /* Try without rounding divider */ + div = parent_rate / rate; + if (div != alt1.div) { + s5_search_fractional(rate, parent_rate, div, + &alt2); + /* Select the better match */ + if (abs(rate - alt1.freq) < + abs(rate - alt2.freq)) + *conf = alt1; + else + *conf = alt2; + } + } + } else { + /* Straight fit */ + memset(conf, 0, sizeof(*conf)); + conf->div = parent_rate / rate; + } + + return conf->freq; +} + +static int s5_pll_enable(struct clk_hw *hw) +{ + struct s5_hw_clk *pll = to_s5_pll(hw); + u32 val = readl(pll->reg); + + val |= PLL_CLK_ENA; + writel(val, pll->reg); + + return 0; +} + +static void s5_pll_disable(struct clk_hw *hw) +{ + struct s5_hw_clk *pll = to_s5_pll(hw); + u32 val = readl(pll->reg); + + val &= ~PLL_CLK_ENA; + writel(val, pll->reg); +} + +static int s5_pll_set_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long parent_rate) +{ + struct s5_hw_clk *pll = to_s5_pll(hw); + struct s5_pll_conf conf; + unsigned long eff_rate; + u32 val; + + eff_rate = s5_calc_params(rate, parent_rate, &conf); + if (eff_rate != rate) + return -EOPNOTSUPP; + + val = readl(pll->reg) & PLL_CLK_ENA; + val |= FIELD_PREP(PLL_DIV, conf.div); + if (conf.rot_ena) { + val |= PLL_ROT_ENA; + val |= FIELD_PREP(PLL_ROT_SEL, conf.rot_sel); + val |= FIELD_PREP(PLL_PRE_DIV, conf.pre_div); + if (conf.rot_dir) + val |= PLL_ROT_DIR; + } + writel(val, pll->reg); + + return 0; +} + +static unsigned long s5_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct s5_hw_clk *pll = to_s5_pll(hw); + struct s5_pll_conf conf; + u32 val; + + val = readl(pll->reg); + + if (val & PLL_CLK_ENA) { + conf.div = FIELD_GET(PLL_DIV, val); + conf.pre_div = FIELD_GET(PLL_PRE_DIV, val); + conf.rot_ena = FIELD_GET(PLL_ROT_ENA, val); + conf.rot_dir = FIELD_GET(PLL_ROT_DIR, val); + conf.rot_sel = FIELD_GET(PLL_ROT_SEL, val); + + conf.freq = s5_calc_freq(parent_rate, &conf); + } else { + conf.freq = 0; + } + + return conf.freq; +} + +static long s5_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct s5_pll_conf conf; + + return s5_calc_params(rate, *parent_rate, &conf); +} + +static const struct clk_ops s5_pll_ops = { + .enable = s5_pll_enable, + .disable = s5_pll_disable, + .set_rate = s5_pll_set_rate, + .round_rate = s5_pll_round_rate, + .recalc_rate = s5_pll_recalc_rate, +}; + +static struct clk_hw *s5_clk_hw_get(struct of_phandle_args *clkspec, void *data) +{ + struct s5_clk_data *s5_clk = data; + unsigned int idx = clkspec->args[0]; + + if (idx >= N_CLOCKS) { + pr_err("%s: invalid index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + return &s5_clk->s5_hw[idx].hw; +} + +static int s5_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + int i, ret; + struct s5_clk_data *s5_clk; + struct clk_parent_data pdata = { .index = 0 }; + struct clk_init_data init = { + .ops = &s5_pll_ops, + .num_parents = 1, + .parent_data = &pdata, + }; + + s5_clk = devm_kzalloc(dev, sizeof(*s5_clk), GFP_KERNEL); + if (!s5_clk) + return -ENOMEM; + + s5_clk->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(s5_clk->base)) + return PTR_ERR(s5_clk->base); + + for (i = 0; i < N_CLOCKS; i++) { + struct s5_hw_clk *s5_hw = &s5_clk->s5_hw[i]; + + init.name = clk_names[i]; + s5_hw->reg = s5_clk->base + (i * 4); + s5_hw->hw.init = &init; + ret = devm_clk_hw_register(dev, &s5_hw->hw); + if (ret) { + dev_err(dev, "failed to register %s clock\n", + init.name); + return ret; + } + } + + return devm_of_clk_add_hw_provider(dev, s5_clk_hw_get, s5_clk); +} + +static const struct of_device_id s5_clk_dt_ids[] = { + { .compatible = "microchip,sparx5-dpll", }, + { } +}; +MODULE_DEVICE_TABLE(of, s5_clk_dt_ids); + +static struct platform_driver s5_clk_driver = { + .probe = s5_clk_probe, + .driver = { + .name = "sparx5-clk", + .of_match_table = s5_clk_dt_ids, + }, +}; +builtin_platform_driver(s5_clk_driver); diff --git a/drivers/clk/clk-versaclock5.c b/drivers/clk/clk-versaclock5.c index fa96659f8023..c90460e7ef21 100644 --- a/drivers/clk/clk-versaclock5.c +++ b/drivers/clk/clk-versaclock5.c @@ -24,6 +24,8 @@ #include <linux/regmap.h> #include <linux/slab.h> +#include <dt-bindings/clk/versaclock.h> + /* VersaClock5 registers */ #define VC5_OTP_CONTROL 0x00 @@ -89,6 +91,28 @@ /* Clock control register for clock 1,2 */ #define VC5_CLK_OUTPUT_CFG(idx, n) (0x60 + ((idx) * 0x2) + (n)) +#define VC5_CLK_OUTPUT_CFG0_CFG_SHIFT 5 +#define VC5_CLK_OUTPUT_CFG0_CFG_MASK GENMASK(7, VC5_CLK_OUTPUT_CFG0_CFG_SHIFT) + +#define VC5_CLK_OUTPUT_CFG0_CFG_LVPECL (VC5_LVPECL) +#define VC5_CLK_OUTPUT_CFG0_CFG_CMOS (VC5_CMOS) +#define VC5_CLK_OUTPUT_CFG0_CFG_HCSL33 (VC5_HCSL33) +#define VC5_CLK_OUTPUT_CFG0_CFG_LVDS (VC5_LVDS) +#define VC5_CLK_OUTPUT_CFG0_CFG_CMOS2 (VC5_CMOS2) +#define VC5_CLK_OUTPUT_CFG0_CFG_CMOSD (VC5_CMOSD) +#define VC5_CLK_OUTPUT_CFG0_CFG_HCSL25 (VC5_HCSL25) + +#define VC5_CLK_OUTPUT_CFG0_PWR_SHIFT 3 +#define VC5_CLK_OUTPUT_CFG0_PWR_MASK GENMASK(4, VC5_CLK_OUTPUT_CFG0_PWR_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_PWR_18 (0<<VC5_CLK_OUTPUT_CFG0_PWR_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_PWR_25 (2<<VC5_CLK_OUTPUT_CFG0_PWR_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_PWR_33 (3<<VC5_CLK_OUTPUT_CFG0_PWR_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT 0 +#define VC5_CLK_OUTPUT_CFG0_SLEW_MASK GENMASK(1, VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_SLEW_80 (0<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_SLEW_85 (1<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_SLEW_90 (2<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT) +#define VC5_CLK_OUTPUT_CFG0_SLEW_100 (3<<VC5_CLK_OUTPUT_CFG0_SLEW_SHIFT) #define VC5_CLK_OUTPUT_CFG1_EN_CLKBUF BIT(0) #define VC5_CLK_OE_SHDN 0x68 @@ -145,6 +169,14 @@ struct vc5_hw_data { unsigned int num; }; +struct vc5_out_data { + struct clk_hw hw; + struct vc5_driver_data *vc5; + unsigned int num; + unsigned int clk_output_cfg0; + unsigned int clk_output_cfg0_mask; +}; + struct vc5_driver_data { struct i2c_client *client; struct regmap *regmap; @@ -158,31 +190,7 @@ struct vc5_driver_data { struct clk_hw clk_pfd; struct vc5_hw_data clk_pll; struct vc5_hw_data clk_fod[VC5_MAX_FOD_NUM]; - struct vc5_hw_data clk_out[VC5_MAX_CLK_OUT_NUM]; -}; - -static const char * const vc5_mux_names[] = { - "mux" -}; - -static const char * const vc5_dbl_names[] = { - "dbl" -}; - -static const char * const vc5_pfd_names[] = { - "pfd" -}; - -static const char * const vc5_pll_names[] = { - "pll" -}; - -static const char * const vc5_fod_names[] = { - "fod0", "fod1", "fod2", "fod3", -}; - -static const char * const vc5_clk_out_names[] = { - "out0_sel_i2cb", "out1", "out2", "out3", "out4", + struct vc5_out_data clk_out[VC5_MAX_CLK_OUT_NUM]; }; /* @@ -565,7 +573,7 @@ static const struct clk_ops vc5_fod_ops = { static int vc5_clk_out_prepare(struct clk_hw *hw) { - struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw); struct vc5_driver_data *vc5 = hwdata->vc5; const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM | VC5_OUT_DIV_CONTROL_SEL_EXT | @@ -591,12 +599,23 @@ static int vc5_clk_out_prepare(struct clk_hw *hw) regmap_update_bits(vc5->regmap, VC5_CLK_OUTPUT_CFG(hwdata->num, 1), VC5_CLK_OUTPUT_CFG1_EN_CLKBUF, VC5_CLK_OUTPUT_CFG1_EN_CLKBUF); + if (hwdata->clk_output_cfg0_mask) { + dev_dbg(&vc5->client->dev, "Update output %d mask 0x%0X val 0x%0X\n", + hwdata->num, hwdata->clk_output_cfg0_mask, + hwdata->clk_output_cfg0); + + regmap_update_bits(vc5->regmap, + VC5_CLK_OUTPUT_CFG(hwdata->num, 0), + hwdata->clk_output_cfg0_mask, + hwdata->clk_output_cfg0); + } + return 0; } static void vc5_clk_out_unprepare(struct clk_hw *hw) { - struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw); struct vc5_driver_data *vc5 = hwdata->vc5; /* Disable the clock buffer */ @@ -606,7 +625,7 @@ static void vc5_clk_out_unprepare(struct clk_hw *hw) static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw) { - struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw); struct vc5_driver_data *vc5 = hwdata->vc5; const u8 mask = VC5_OUT_DIV_CONTROL_SELB_NORM | VC5_OUT_DIV_CONTROL_SEL_EXT | @@ -636,7 +655,7 @@ static unsigned char vc5_clk_out_get_parent(struct clk_hw *hw) static int vc5_clk_out_set_parent(struct clk_hw *hw, u8 index) { - struct vc5_hw_data *hwdata = container_of(hw, struct vc5_hw_data, hw); + struct vc5_out_data *hwdata = container_of(hw, struct vc5_out_data, hw); struct vc5_driver_data *vc5 = hwdata->vc5; const u8 mask = VC5_OUT_DIV_CONTROL_RESET | VC5_OUT_DIV_CONTROL_SELB_NORM | @@ -690,10 +709,125 @@ static int vc5_map_index_to_output(const enum vc5_model model, } } +static int vc5_update_mode(struct device_node *np_output, + struct vc5_out_data *clk_out) +{ + u32 value; + + if (!of_property_read_u32(np_output, "idt,mode", &value)) { + clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_CFG_MASK; + switch (value) { + case VC5_CLK_OUTPUT_CFG0_CFG_LVPECL: + case VC5_CLK_OUTPUT_CFG0_CFG_CMOS: + case VC5_CLK_OUTPUT_CFG0_CFG_HCSL33: + case VC5_CLK_OUTPUT_CFG0_CFG_LVDS: + case VC5_CLK_OUTPUT_CFG0_CFG_CMOS2: + case VC5_CLK_OUTPUT_CFG0_CFG_CMOSD: + case VC5_CLK_OUTPUT_CFG0_CFG_HCSL25: + clk_out->clk_output_cfg0 |= + value << VC5_CLK_OUTPUT_CFG0_CFG_SHIFT; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static int vc5_update_power(struct device_node *np_output, + struct vc5_out_data *clk_out) +{ + u32 value; + + if (!of_property_read_u32(np_output, + "idt,voltage-microvolts", &value)) { + clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_PWR_MASK; + switch (value) { + case 1800000: + clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_18; + break; + case 2500000: + clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_25; + break; + case 3300000: + clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_PWR_33; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static int vc5_update_slew(struct device_node *np_output, + struct vc5_out_data *clk_out) +{ + u32 value; + + if (!of_property_read_u32(np_output, "idt,slew-percent", &value)) { + clk_out->clk_output_cfg0_mask |= VC5_CLK_OUTPUT_CFG0_SLEW_MASK; + switch (value) { + case 80: + clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_80; + break; + case 85: + clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_85; + break; + case 90: + clk_out->clk_output_cfg0 |= VC5_CLK_OUTPUT_CFG0_SLEW_90; + break; + case 100: + clk_out->clk_output_cfg0 |= + VC5_CLK_OUTPUT_CFG0_SLEW_100; + break; + default: + return -EINVAL; + } + } + return 0; +} + +static int vc5_get_output_config(struct i2c_client *client, + struct vc5_out_data *clk_out) +{ + struct device_node *np_output; + char *child_name; + int ret = 0; + + child_name = kasprintf(GFP_KERNEL, "OUT%d", clk_out->num + 1); + if (!child_name) + return -ENOMEM; + + np_output = of_get_child_by_name(client->dev.of_node, child_name); + kfree(child_name); + if (!np_output) + return 0; + + ret = vc5_update_mode(np_output, clk_out); + if (ret) + goto output_error; + + ret = vc5_update_power(np_output, clk_out); + if (ret) + goto output_error; + + ret = vc5_update_slew(np_output, clk_out); + +output_error: + if (ret) { + dev_err(&client->dev, + "Invalid clock output configuration OUT%d\n", + clk_out->num + 1); + } + + of_node_put(np_output); + + return ret; +} + static const struct of_device_id clk_vc5_of_match[]; -static int vc5_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int vc5_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct vc5_driver_data *vc5; struct clk_init_data init; @@ -702,7 +836,7 @@ static int vc5_probe(struct i2c_client *client, int ret; vc5 = devm_kzalloc(&client->dev, sizeof(*vc5), GFP_KERNEL); - if (vc5 == NULL) + if (!vc5) return -ENOMEM; i2c_set_clientdata(client, vc5); @@ -742,7 +876,7 @@ static int vc5_probe(struct i2c_client *client, if (!IS_ERR(vc5->pin_clkin)) { vc5->clk_mux_ins |= VC5_MUX_IN_CLKIN; parent_names[init.num_parents++] = - __clk_get_name(vc5->pin_clkin); + __clk_get_name(vc5->pin_clkin); } if (!init.num_parents) { @@ -750,115 +884,116 @@ static int vc5_probe(struct i2c_client *client, return -EINVAL; } - init.name = vc5_mux_names[0]; + init.name = kasprintf(GFP_KERNEL, "%pOFn.mux", client->dev.of_node); init.ops = &vc5_mux_ops; init.flags = 0; init.parent_names = parent_names; vc5->clk_mux.init = &init; ret = devm_clk_hw_register(&client->dev, &vc5->clk_mux); - if (ret) { - dev_err(&client->dev, "unable to register %s\n", init.name); - goto err_clk; - } + if (ret) + goto err_clk_register; + kfree(init.name); /* clock framework made a copy of the name */ if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) { /* Register frequency doubler */ memset(&init, 0, sizeof(init)); - init.name = vc5_dbl_names[0]; + init.name = kasprintf(GFP_KERNEL, "%pOFn.dbl", + client->dev.of_node); init.ops = &vc5_dbl_ops; init.flags = CLK_SET_RATE_PARENT; - init.parent_names = vc5_mux_names; + init.parent_names = parent_names; + parent_names[0] = clk_hw_get_name(&vc5->clk_mux); init.num_parents = 1; vc5->clk_mul.init = &init; ret = devm_clk_hw_register(&client->dev, &vc5->clk_mul); - if (ret) { - dev_err(&client->dev, "unable to register %s\n", - init.name); - goto err_clk; - } + if (ret) + goto err_clk_register; + kfree(init.name); /* clock framework made a copy of the name */ } /* Register PFD */ memset(&init, 0, sizeof(init)); - init.name = vc5_pfd_names[0]; + init.name = kasprintf(GFP_KERNEL, "%pOFn.pfd", client->dev.of_node); init.ops = &vc5_pfd_ops; init.flags = CLK_SET_RATE_PARENT; + init.parent_names = parent_names; if (vc5->chip_info->flags & VC5_HAS_PFD_FREQ_DBL) - init.parent_names = vc5_dbl_names; + parent_names[0] = clk_hw_get_name(&vc5->clk_mul); else - init.parent_names = vc5_mux_names; + parent_names[0] = clk_hw_get_name(&vc5->clk_mux); init.num_parents = 1; vc5->clk_pfd.init = &init; ret = devm_clk_hw_register(&client->dev, &vc5->clk_pfd); - if (ret) { - dev_err(&client->dev, "unable to register %s\n", init.name); - goto err_clk; - } + if (ret) + goto err_clk_register; + kfree(init.name); /* clock framework made a copy of the name */ /* Register PLL */ memset(&init, 0, sizeof(init)); - init.name = vc5_pll_names[0]; + init.name = kasprintf(GFP_KERNEL, "%pOFn.pll", client->dev.of_node); init.ops = &vc5_pll_ops; init.flags = CLK_SET_RATE_PARENT; - init.parent_names = vc5_pfd_names; + init.parent_names = parent_names; + parent_names[0] = clk_hw_get_name(&vc5->clk_pfd); init.num_parents = 1; vc5->clk_pll.num = 0; vc5->clk_pll.vc5 = vc5; vc5->clk_pll.hw.init = &init; ret = devm_clk_hw_register(&client->dev, &vc5->clk_pll.hw); - if (ret) { - dev_err(&client->dev, "unable to register %s\n", init.name); - goto err_clk; - } + if (ret) + goto err_clk_register; + kfree(init.name); /* clock framework made a copy of the name */ /* Register FODs */ for (n = 0; n < vc5->chip_info->clk_fod_cnt; n++) { idx = vc5_map_index_to_output(vc5->chip_info->model, n); memset(&init, 0, sizeof(init)); - init.name = vc5_fod_names[idx]; + init.name = kasprintf(GFP_KERNEL, "%pOFn.fod%d", + client->dev.of_node, idx); init.ops = &vc5_fod_ops; init.flags = CLK_SET_RATE_PARENT; - init.parent_names = vc5_pll_names; + init.parent_names = parent_names; + parent_names[0] = clk_hw_get_name(&vc5->clk_pll.hw); init.num_parents = 1; vc5->clk_fod[n].num = idx; vc5->clk_fod[n].vc5 = vc5; vc5->clk_fod[n].hw.init = &init; ret = devm_clk_hw_register(&client->dev, &vc5->clk_fod[n].hw); - if (ret) { - dev_err(&client->dev, "unable to register %s\n", - init.name); - goto err_clk; - } + if (ret) + goto err_clk_register; + kfree(init.name); /* clock framework made a copy of the name */ } /* Register MUX-connected OUT0_I2C_SELB output */ memset(&init, 0, sizeof(init)); - init.name = vc5_clk_out_names[0]; + init.name = kasprintf(GFP_KERNEL, "%pOFn.out0_sel_i2cb", + client->dev.of_node); init.ops = &vc5_clk_out_ops; init.flags = CLK_SET_RATE_PARENT; - init.parent_names = vc5_mux_names; + init.parent_names = parent_names; + parent_names[0] = clk_hw_get_name(&vc5->clk_mux); init.num_parents = 1; vc5->clk_out[0].num = idx; vc5->clk_out[0].vc5 = vc5; vc5->clk_out[0].hw.init = &init; ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[0].hw); - if (ret) { - dev_err(&client->dev, "unable to register %s\n", - init.name); - goto err_clk; - } + if (ret) + goto err_clk_register; + kfree(init.name); /* clock framework made a copy of the name */ /* Register FOD-connected OUTx outputs */ for (n = 1; n < vc5->chip_info->clk_out_cnt; n++) { idx = vc5_map_index_to_output(vc5->chip_info->model, n - 1); - parent_names[0] = vc5_fod_names[idx]; + parent_names[0] = clk_hw_get_name(&vc5->clk_fod[idx].hw); if (n == 1) - parent_names[1] = vc5_mux_names[0]; + parent_names[1] = clk_hw_get_name(&vc5->clk_mux); else - parent_names[1] = vc5_clk_out_names[n - 1]; + parent_names[1] = + clk_hw_get_name(&vc5->clk_out[n - 1].hw); memset(&init, 0, sizeof(init)); - init.name = vc5_clk_out_names[idx + 1]; + init.name = kasprintf(GFP_KERNEL, "%pOFn.out%d", + client->dev.of_node, idx + 1); init.ops = &vc5_clk_out_ops; init.flags = CLK_SET_RATE_PARENT; init.parent_names = parent_names; @@ -866,13 +1001,15 @@ static int vc5_probe(struct i2c_client *client, vc5->clk_out[n].num = idx; vc5->clk_out[n].vc5 = vc5; vc5->clk_out[n].hw.init = &init; - ret = devm_clk_hw_register(&client->dev, - &vc5->clk_out[n].hw); - if (ret) { - dev_err(&client->dev, "unable to register %s\n", - init.name); + ret = devm_clk_hw_register(&client->dev, &vc5->clk_out[n].hw); + if (ret) + goto err_clk_register; + kfree(init.name); /* clock framework made a copy of the name */ + + /* Fetch Clock Output configuration from DT (if specified) */ + ret = vc5_get_output_config(client, &vc5->clk_out[n]); + if (ret) goto err_clk; - } } ret = of_clk_add_hw_provider(client->dev.of_node, vc5_of_clk_get, vc5); @@ -883,6 +1020,9 @@ static int vc5_probe(struct i2c_client *client, return 0; +err_clk_register: + dev_err(&client->dev, "unable to register %s\n", init.name); + kfree(init.name); /* clock framework made a copy of the name */ err_clk: if (vc5->chip_info->flags & VC5_HAS_INTERNAL_XTAL) clk_unregister_fixed_rate(vc5->pin_xin); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 3f588ed06ce3..0a9261a099bd 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -500,12 +500,6 @@ static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) return core->accuracy; } -unsigned long __clk_get_flags(struct clk *clk) -{ - return !clk ? 0 : clk->core->flags; -} -EXPORT_SYMBOL_GPL(__clk_get_flags); - unsigned long clk_hw_get_flags(const struct clk_hw *hw) { return hw->core->flags; @@ -1400,6 +1394,21 @@ int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req) } EXPORT_SYMBOL_GPL(__clk_determine_rate); +/** + * clk_hw_round_rate() - round the given rate for a hw clk + * @hw: the hw clk for which we are rounding a rate + * @rate: the rate which is to be rounded + * + * Takes in a rate as input and rounds it to a rate that the clk can actually + * use. + * + * Context: prepare_lock must be held. + * For clk providers to call from within clk_ops such as .round_rate, + * .determine_rate. + * + * Return: returns rounded rate of hw clk if clk supports round_rate operation + * else returns the parent rate. + */ unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate) { int ret; @@ -3039,6 +3048,31 @@ static int clk_rate_set(void *data, u64 val) } #define clk_rate_mode 0644 + +static int clk_prepare_enable_set(void *data, u64 val) +{ + struct clk_core *core = data; + int ret = 0; + + if (val) + ret = clk_prepare_enable(core->hw->clk); + else + clk_disable_unprepare(core->hw->clk); + + return ret; +} + +static int clk_prepare_enable_get(void *data, u64 *val) +{ + struct clk_core *core = data; + + *val = core->enable_count && core->prepare_count; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get, + clk_prepare_enable_set, "%llu\n"); + #else #define clk_rate_set NULL #define clk_rate_mode 0444 @@ -3216,6 +3250,10 @@ static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry) debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count); debugfs_create_file("clk_duty_cycle", 0444, root, core, &clk_duty_cycle_fops); +#ifdef CLOCK_ALLOW_WRITE_DEBUGFS + debugfs_create_file("clk_prepare_enable", 0644, root, core, + &clk_prepare_enable_fops); +#endif if (core->num_parents > 0) debugfs_create_file("clk_parent", 0444, root, core, @@ -4120,6 +4158,7 @@ static int devm_clk_hw_match(struct device *dev, void *res, void *data) /** * devm_clk_unregister - resource managed clk_unregister() + * @dev: device that is unregistering the clock data * @clk: clock to unregister * * Deallocate a clock allocated with devm_clk_register(). Normally @@ -4309,6 +4348,8 @@ static void clk_core_reparent_orphans(void) * @node: Pointer to device tree node of clock provider * @get: Get clock callback. Returns NULL or a struct clk for the * given clock specifier + * @get_hw: Get clk_hw callback. Returns NULL, ERR_PTR or a + * struct clk_hw for the given clock specifier * @data: context pointer to be passed into @get callback */ struct of_clk_provider { diff --git a/drivers/clk/davinci/pll.c b/drivers/clk/davinci/pll.c index 8a23d5dfd1f8..6c35e4bb7940 100644 --- a/drivers/clk/davinci/pll.c +++ b/drivers/clk/davinci/pll.c @@ -651,7 +651,7 @@ static int davinci_pll_sysclk_rate_change(struct notifier_block *nb, pllcmd = readl(pll->base + PLLCMD); pllcmd |= PLLCMD_GOSET; writel(pllcmd, pll->base + PLLCMD); - /* fallthrough */ + fallthrough; case PRE_RATE_CHANGE: /* Wait until for outstanding changes to take effect */ do { diff --git a/drivers/clk/imx/clk-pllv3.c b/drivers/clk/imx/clk-pllv3.c index a7db93030e02..b20cdea3e9cc 100644 --- a/drivers/clk/imx/clk-pllv3.c +++ b/drivers/clk/imx/clk-pllv3.c @@ -433,7 +433,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name, break; case IMX_PLLV3_USB_VF610: pll->div_shift = 1; - /* fall through */ + fallthrough; case IMX_PLLV3_USB: ops = &clk_pllv3_ops; pll->powerup_set = true; @@ -441,7 +441,7 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name, case IMX_PLLV3_AV_IMX7: pll->num_offset = PLL_IMX7_NUM_OFFSET; pll->denom_offset = PLL_IMX7_DENOM_OFFSET; - /* fall through */ + fallthrough; case IMX_PLLV3_AV: ops = &clk_pllv3_av_ops; break; diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c index 6c5b8029cc8a..0268d23ebe2e 100644 --- a/drivers/clk/ingenic/jz4780-cgu.c +++ b/drivers/clk/ingenic/jz4780-cgu.c @@ -4,6 +4,7 @@ * * Copyright (c) 2013-2015 Imagination Technologies * Author: Paul Burton <paul.burton@mips.com> + * Copyright (c) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com> */ #include <linux/clk-provider.h> @@ -19,49 +20,50 @@ /* CGU register offsets */ #define CGU_REG_CLOCKCONTROL 0x00 -#define CGU_REG_LCR 0x04 -#define CGU_REG_APLL 0x10 -#define CGU_REG_MPLL 0x14 -#define CGU_REG_EPLL 0x18 -#define CGU_REG_VPLL 0x1c -#define CGU_REG_CLKGR0 0x20 -#define CGU_REG_OPCR 0x24 -#define CGU_REG_CLKGR1 0x28 -#define CGU_REG_DDRCDR 0x2c -#define CGU_REG_VPUCDR 0x30 -#define CGU_REG_USBPCR 0x3c -#define CGU_REG_USBRDT 0x40 -#define CGU_REG_USBVBFIL 0x44 -#define CGU_REG_USBPCR1 0x48 -#define CGU_REG_LP0CDR 0x54 -#define CGU_REG_I2SCDR 0x60 -#define CGU_REG_LP1CDR 0x64 -#define CGU_REG_MSC0CDR 0x68 -#define CGU_REG_UHCCDR 0x6c -#define CGU_REG_SSICDR 0x74 -#define CGU_REG_CIMCDR 0x7c -#define CGU_REG_PCMCDR 0x84 -#define CGU_REG_GPUCDR 0x88 -#define CGU_REG_HDMICDR 0x8c -#define CGU_REG_MSC1CDR 0xa4 -#define CGU_REG_MSC2CDR 0xa8 -#define CGU_REG_BCHCDR 0xac -#define CGU_REG_CLOCKSTATUS 0xd4 +#define CGU_REG_LCR 0x04 +#define CGU_REG_APLL 0x10 +#define CGU_REG_MPLL 0x14 +#define CGU_REG_EPLL 0x18 +#define CGU_REG_VPLL 0x1c +#define CGU_REG_CLKGR0 0x20 +#define CGU_REG_OPCR 0x24 +#define CGU_REG_CLKGR1 0x28 +#define CGU_REG_DDRCDR 0x2c +#define CGU_REG_VPUCDR 0x30 +#define CGU_REG_USBPCR 0x3c +#define CGU_REG_USBRDT 0x40 +#define CGU_REG_USBVBFIL 0x44 +#define CGU_REG_USBPCR1 0x48 +#define CGU_REG_LP0CDR 0x54 +#define CGU_REG_I2SCDR 0x60 +#define CGU_REG_LP1CDR 0x64 +#define CGU_REG_MSC0CDR 0x68 +#define CGU_REG_UHCCDR 0x6c +#define CGU_REG_SSICDR 0x74 +#define CGU_REG_CIMCDR 0x7c +#define CGU_REG_PCMCDR 0x84 +#define CGU_REG_GPUCDR 0x88 +#define CGU_REG_HDMICDR 0x8c +#define CGU_REG_MSC1CDR 0xa4 +#define CGU_REG_MSC2CDR 0xa8 +#define CGU_REG_BCHCDR 0xac +#define CGU_REG_CLOCKSTATUS 0xd4 /* bits within the OPCR register */ -#define OPCR_SPENDN0 BIT(7) -#define OPCR_SPENDN1 BIT(6) +#define OPCR_SPENDN0 BIT(7) +#define OPCR_SPENDN1 BIT(6) /* bits within the USBPCR register */ -#define USBPCR_USB_MODE BIT(31) +#define USBPCR_USB_MODE BIT(31) #define USBPCR_IDPULLUP_MASK (0x3 << 28) -#define USBPCR_COMMONONN BIT(25) -#define USBPCR_VBUSVLDEXT BIT(24) +#define USBPCR_COMMONONN BIT(25) +#define USBPCR_VBUSVLDEXT BIT(24) #define USBPCR_VBUSVLDEXTSEL BIT(23) -#define USBPCR_POR BIT(22) -#define USBPCR_OTG_DISABLE BIT(20) +#define USBPCR_POR BIT(22) +#define USBPCR_SIDDQ BIT(21) +#define USBPCR_OTG_DISABLE BIT(20) #define USBPCR_COMPDISTUNE_MASK (0x7 << 17) -#define USBPCR_OTGTUNE_MASK (0x7 << 14) +#define USBPCR_OTGTUNE_MASK (0x7 << 14) #define USBPCR_SQRXTUNE_MASK (0x7 << 11) #define USBPCR_TXFSLSTUNE_MASK (0xf << 7) #define USBPCR_TXPREEMPHTUNE BIT(6) @@ -78,13 +80,13 @@ #define USBPCR1_REFCLKDIV_48 (0x2 << USBPCR1_REFCLKDIV_SHIFT) #define USBPCR1_REFCLKDIV_24 (0x1 << USBPCR1_REFCLKDIV_SHIFT) #define USBPCR1_REFCLKDIV_12 (0x0 << USBPCR1_REFCLKDIV_SHIFT) -#define USBPCR1_USB_SEL BIT(28) -#define USBPCR1_WORD_IF0 BIT(19) -#define USBPCR1_WORD_IF1 BIT(18) +#define USBPCR1_USB_SEL BIT(28) +#define USBPCR1_WORD_IF0 BIT(19) +#define USBPCR1_WORD_IF1 BIT(18) /* bits within the USBRDT register */ -#define USBRDT_VBFIL_LD_EN BIT(25) -#define USBRDT_USBRDT_MASK 0x7fffff +#define USBRDT_VBFIL_LD_EN BIT(25) +#define USBRDT_USBRDT_MASK 0x7fffff /* bits within the USBVBFIL register */ #define USBVBFIL_IDDIGFIL_SHIFT 16 @@ -92,40 +94,14 @@ #define USBVBFIL_USBVBFIL_MASK (0xffff) /* bits within the LCR register */ -#define LCR_PD_SCPU BIT(31) -#define LCR_SCPUS BIT(27) +#define LCR_PD_SCPU BIT(31) +#define LCR_SCPUS BIT(27) /* bits within the CLKGR1 register */ -#define CLKGR1_CORE1 BIT(15) +#define CLKGR1_CORE1 BIT(15) static struct ingenic_cgu *cgu; -static u8 jz4780_otg_phy_get_parent(struct clk_hw *hw) -{ - /* we only use CLKCORE, revisit if that ever changes */ - return 0; -} - -static int jz4780_otg_phy_set_parent(struct clk_hw *hw, u8 idx) -{ - unsigned long flags; - u32 usbpcr1; - - if (idx > 0) - return -EINVAL; - - spin_lock_irqsave(&cgu->lock, flags); - - usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1); - usbpcr1 &= ~USBPCR1_REFCLKSEL_MASK; - /* we only use CLKCORE */ - usbpcr1 |= USBPCR1_REFCLKSEL_CORE; - writel(usbpcr1, cgu->base + CGU_REG_USBPCR1); - - spin_unlock_irqrestore(&cgu->lock, flags); - return 0; -} - static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { @@ -149,7 +125,6 @@ static unsigned long jz4780_otg_phy_recalc_rate(struct clk_hw *hw, return 19200000; } - BUG(); return parent_rate; } @@ -206,13 +181,43 @@ static int jz4780_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate, return 0; } -static const struct clk_ops jz4780_otg_phy_ops = { - .get_parent = jz4780_otg_phy_get_parent, - .set_parent = jz4780_otg_phy_set_parent, +static int jz4780_otg_phy_enable(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + writel(readl(reg_opcr) | OPCR_SPENDN0, reg_opcr); + writel(readl(reg_usbpcr) & ~USBPCR_OTG_DISABLE & ~USBPCR_SIDDQ, reg_usbpcr); + return 0; +} +static void jz4780_otg_phy_disable(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + writel(readl(reg_opcr) & ~OPCR_SPENDN0, reg_opcr); + writel(readl(reg_usbpcr) | USBPCR_OTG_DISABLE | USBPCR_SIDDQ, reg_usbpcr); +} + +static int jz4780_otg_phy_is_enabled(struct clk_hw *hw) +{ + void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; + void __iomem *reg_usbpcr = cgu->base + CGU_REG_USBPCR; + + return (readl(reg_opcr) & OPCR_SPENDN0) && + !(readl(reg_usbpcr) & USBPCR_SIDDQ) && + !(readl(reg_usbpcr) & USBPCR_OTG_DISABLE); +} + +static const struct clk_ops jz4780_otg_phy_ops = { .recalc_rate = jz4780_otg_phy_recalc_rate, .round_rate = jz4780_otg_phy_round_rate, .set_rate = jz4780_otg_phy_set_rate, + + .enable = jz4780_otg_phy_enable, + .disable = jz4780_otg_phy_disable, + .is_enabled = jz4780_otg_phy_is_enabled, }; static int jz4780_core1_enable(struct clk_hw *hw) @@ -516,6 +521,18 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = { .gate = { CGU_REG_CLKGR0, 1 }, }, + [JZ4780_CLK_EXCLK_DIV512] = { + "exclk_div512", CGU_CLK_FIXDIV, + .parents = { JZ4780_CLK_EXCLK }, + .fixdiv = { 512 }, + }, + + [JZ4780_CLK_RTC] = { + "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE, + .parents = { JZ4780_CLK_EXCLK_DIV512, JZ4780_CLK_RTCLK }, + .mux = { CGU_REG_OPCR, 2, 1}, + }, + /* Gate-only clocks */ [JZ4780_CLK_NEMC] = { diff --git a/drivers/clk/ingenic/x1000-cgu.c b/drivers/clk/ingenic/x1000-cgu.c index 453f3323cb99..9aa20b52e1c3 100644 --- a/drivers/clk/ingenic/x1000-cgu.c +++ b/drivers/clk/ingenic/x1000-cgu.c @@ -48,8 +48,87 @@ #define USBPCR_SIDDQ BIT(21) #define USBPCR_OTG_DISABLE BIT(20) +/* bits within the USBPCR1 register */ +#define USBPCR1_REFCLKSEL_SHIFT 26 +#define USBPCR1_REFCLKSEL_MASK (0x3 << USBPCR1_REFCLKSEL_SHIFT) +#define USBPCR1_REFCLKSEL_CORE (0x2 << USBPCR1_REFCLKSEL_SHIFT) +#define USBPCR1_REFCLKDIV_SHIFT 24 +#define USBPCR1_REFCLKDIV_MASK (0x3 << USBPCR1_REFCLKDIV_SHIFT) +#define USBPCR1_REFCLKDIV_48 (0x2 << USBPCR1_REFCLKDIV_SHIFT) +#define USBPCR1_REFCLKDIV_24 (0x1 << USBPCR1_REFCLKDIV_SHIFT) +#define USBPCR1_REFCLKDIV_12 (0x0 << USBPCR1_REFCLKDIV_SHIFT) + static struct ingenic_cgu *cgu; +static unsigned long x1000_otg_phy_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + u32 usbpcr1; + unsigned refclk_div; + + usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1); + refclk_div = usbpcr1 & USBPCR1_REFCLKDIV_MASK; + + switch (refclk_div) { + case USBPCR1_REFCLKDIV_12: + return 12000000; + + case USBPCR1_REFCLKDIV_24: + return 24000000; + + case USBPCR1_REFCLKDIV_48: + return 48000000; + } + + return parent_rate; +} + +static long x1000_otg_phy_round_rate(struct clk_hw *hw, unsigned long req_rate, + unsigned long *parent_rate) +{ + if (req_rate < 18000000) + return 12000000; + + if (req_rate < 36000000) + return 24000000; + + return 48000000; +} + +static int x1000_otg_phy_set_rate(struct clk_hw *hw, unsigned long req_rate, + unsigned long parent_rate) +{ + unsigned long flags; + u32 usbpcr1, div_bits; + + switch (req_rate) { + case 12000000: + div_bits = USBPCR1_REFCLKDIV_12; + break; + + case 24000000: + div_bits = USBPCR1_REFCLKDIV_24; + break; + + case 48000000: + div_bits = USBPCR1_REFCLKDIV_48; + break; + + default: + return -EINVAL; + } + + spin_lock_irqsave(&cgu->lock, flags); + + usbpcr1 = readl(cgu->base + CGU_REG_USBPCR1); + usbpcr1 &= ~USBPCR1_REFCLKDIV_MASK; + usbpcr1 |= div_bits; + writel(usbpcr1, cgu->base + CGU_REG_USBPCR1); + + spin_unlock_irqrestore(&cgu->lock, flags); + return 0; +} + static int x1000_usb_phy_enable(struct clk_hw *hw) { void __iomem *reg_opcr = cgu->base + CGU_REG_OPCR; @@ -80,6 +159,10 @@ static int x1000_usb_phy_is_enabled(struct clk_hw *hw) } static const struct clk_ops x1000_otg_phy_ops = { + .recalc_rate = x1000_otg_phy_recalc_rate, + .round_rate = x1000_otg_phy_round_rate, + .set_rate = x1000_otg_phy_set_rate, + .enable = x1000_usb_phy_enable, .disable = x1000_usb_phy_disable, .is_enabled = x1000_usb_phy_is_enabled, @@ -144,7 +227,6 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { }, }, - /* Custom (SoC-specific) OTG PHY */ [X1000_CLK_OTGPHY] = { @@ -278,6 +360,19 @@ static const struct ingenic_cgu_clk_info x1000_cgu_clocks[] = { .mux = { CGU_REG_SSICDR, 30, 1 }, }, + [X1000_CLK_EXCLK_DIV512] = { + "exclk_div512", CGU_CLK_FIXDIV, + .parents = { X1000_CLK_EXCLK }, + .fixdiv = { 512 }, + }, + + [X1000_CLK_RTC] = { + "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE, + .parents = { X1000_CLK_EXCLK_DIV512, X1000_CLK_RTCLK }, + .mux = { CGU_REG_OPCR, 2, 1}, + .gate = { CGU_REG_CLKGR, 27 }, + }, + /* Gate-only clocks */ [X1000_CLK_EMC] = { diff --git a/drivers/clk/ingenic/x1830-cgu.c b/drivers/clk/ingenic/x1830-cgu.c index a1b2ff0ee487..950aee243364 100644 --- a/drivers/clk/ingenic/x1830-cgu.c +++ b/drivers/clk/ingenic/x1830-cgu.c @@ -329,6 +329,19 @@ static const struct ingenic_cgu_clk_info x1830_cgu_clocks[] = { .mux = { CGU_REG_SSICDR, 29, 1 }, }, + [X1830_CLK_EXCLK_DIV512] = { + "exclk_div512", CGU_CLK_FIXDIV, + .parents = { X1830_CLK_EXCLK }, + .fixdiv = { 512 }, + }, + + [X1830_CLK_RTC] = { + "rtc_ercs", CGU_CLK_MUX | CGU_CLK_GATE, + .parents = { X1830_CLK_EXCLK_DIV512, X1830_CLK_RTCLK }, + .mux = { CGU_REG_OPCR, 2, 1}, + .gate = { CGU_REG_CLKGR0, 29 }, + }, + /* Gate-only clocks */ [X1830_CLK_EMC] = { diff --git a/drivers/clk/keystone/sci-clk.c b/drivers/clk/keystone/sci-clk.c index 7edf8c8432b6..2ad26cb927fd 100644 --- a/drivers/clk/keystone/sci-clk.c +++ b/drivers/clk/keystone/sci-clk.c @@ -1,7 +1,7 @@ /* * SCI Clock driver for keystone based devices * - * Copyright (C) 2015-2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2016 Texas Instruments Incorporated - https://www.ti.com/ * Tero Kristo <t-kristo@ti.com> * * This program is free software; you can redistribute it and/or modify diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c index 8d7dbea3bd30..5b3d36462174 100644 --- a/drivers/clk/keystone/syscon-clk.c +++ b/drivers/clk/keystone/syscon-clk.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/ */ #include <linux/clk-provider.h> diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index 30c15766ebb1..9803d44bb157 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -3981,6 +3981,113 @@ static struct clk_regmap g12a_spicc1_sclk = { }, }; +/* Neural Network Accelerator source clock */ + +static const struct clk_parent_data nna_clk_parent_data[] = { + { .fw_name = "xtal", }, + { .hw = &g12a_gp0_pll.hw, }, + { .hw = &g12a_hifi_pll.hw, }, + { .hw = &g12a_fclk_div2p5.hw, }, + { .hw = &g12a_fclk_div3.hw, }, + { .hw = &g12a_fclk_div4.hw, }, + { .hw = &g12a_fclk_div5.hw, }, + { .hw = &g12a_fclk_div7.hw }, +}; + +static struct clk_regmap sm1_nna_axi_clk_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_NNA_CLK_CNTL, + .mask = 7, + .shift = 9, + }, + .hw.init = &(struct clk_init_data){ + .name = "nna_axi_clk_sel", + .ops = &clk_regmap_mux_ops, + .parent_data = nna_clk_parent_data, + .num_parents = ARRAY_SIZE(nna_clk_parent_data), + }, +}; + +static struct clk_regmap sm1_nna_axi_clk_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_NNA_CLK_CNTL, + .shift = 0, + .width = 7, + }, + .hw.init = &(struct clk_init_data){ + .name = "nna_axi_clk_div", + .ops = &clk_regmap_divider_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_nna_axi_clk_sel.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap sm1_nna_axi_clk = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_NNA_CLK_CNTL, + .bit_idx = 8, + }, + .hw.init = &(struct clk_init_data){ + .name = "nna_axi_clk", + .ops = &clk_regmap_gate_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_nna_axi_clk_div.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap sm1_nna_core_clk_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_NNA_CLK_CNTL, + .mask = 7, + .shift = 25, + }, + .hw.init = &(struct clk_init_data){ + .name = "nna_core_clk_sel", + .ops = &clk_regmap_mux_ops, + .parent_data = nna_clk_parent_data, + .num_parents = ARRAY_SIZE(nna_clk_parent_data), + }, +}; + +static struct clk_regmap sm1_nna_core_clk_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_NNA_CLK_CNTL, + .shift = 16, + .width = 7, + }, + .hw.init = &(struct clk_init_data){ + .name = "nna_core_clk_div", + .ops = &clk_regmap_divider_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_nna_core_clk_sel.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap sm1_nna_core_clk = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_NNA_CLK_CNTL, + .bit_idx = 24, + }, + .hw.init = &(struct clk_init_data){ + .name = "nna_core_clk", + .ops = &clk_regmap_gate_ops, + .parent_hws = (const struct clk_hw *[]) { + &sm1_nna_core_clk_div.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + #define MESON_GATE(_name, _reg, _bit) \ MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw) @@ -4779,6 +4886,12 @@ static struct clk_hw_onecell_data sm1_hw_onecell_data = { [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw, [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw, [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw, + [CLKID_NNA_AXI_CLK_SEL] = &sm1_nna_axi_clk_sel.hw, + [CLKID_NNA_AXI_CLK_DIV] = &sm1_nna_axi_clk_div.hw, + [CLKID_NNA_AXI_CLK] = &sm1_nna_axi_clk.hw, + [CLKID_NNA_CORE_CLK_SEL] = &sm1_nna_core_clk_sel.hw, + [CLKID_NNA_CORE_CLK_DIV] = &sm1_nna_core_clk_div.hw, + [CLKID_NNA_CORE_CLK] = &sm1_nna_core_clk.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -5020,6 +5133,12 @@ static struct clk_regmap *const g12a_clk_regmaps[] = { &g12a_spicc1_sclk_sel, &g12a_spicc1_sclk_div, &g12a_spicc1_sclk, + &sm1_nna_axi_clk_sel, + &sm1_nna_axi_clk_div, + &sm1_nna_axi_clk, + &sm1_nna_core_clk_sel, + &sm1_nna_core_clk_div, + &sm1_nna_core_clk, }; static const struct reg_sequence g12a_init_regs[] = { diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h index a8852556836e..69b6a69549c7 100644 --- a/drivers/clk/meson/g12a.h +++ b/drivers/clk/meson/g12a.h @@ -70,6 +70,7 @@ #define HHI_MALI_CLK_CNTL 0x1b0 #define HHI_VPU_CLKC_CNTL 0x1b4 #define HHI_VPU_CLK_CNTL 0x1bC +#define HHI_NNA_CLK_CNTL 0x1C8 #define HHI_HDMI_CLK_CNTL 0x1CC #define HHI_VDEC_CLK_CNTL 0x1E0 #define HHI_VDEC2_CLK_CNTL 0x1E4 @@ -259,8 +260,12 @@ #define CLKID_SPICC0_SCLK_DIV 257 #define CLKID_SPICC1_SCLK_SEL 259 #define CLKID_SPICC1_SCLK_DIV 260 +#define CLKID_NNA_AXI_CLK_SEL 262 +#define CLKID_NNA_AXI_CLK_DIV 263 +#define CLKID_NNA_CORE_CLK_SEL 265 +#define CLKID_NNA_CORE_CLK_DIV 266 -#define NR_CLKS 262 +#define NR_CLKS 268 /* include the CLKIDs that have been made part of the DT binding */ #include <dt-bindings/clock/g12a-clkc.h> diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index edc09d050ecf..862f0756b50f 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -293,13 +293,6 @@ static struct clk_regmap meson8b_fclk_div2 = { &meson8b_fclk_div2_div.hw }, .num_parents = 1, - /* - * FIXME: Ethernet with a RGMII PHYs is not working if - * fclk_div2 is disabled. it is currently unclear why this - * is. keep it enabled until the Ethernet driver knows how - * to manage this clock. - */ - .flags = CLK_IS_CRITICAL, }, }; @@ -1211,6 +1204,22 @@ static struct clk_regmap meson8b_vclk_in_en = { }, }; +static struct clk_regmap meson8b_vclk_en = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VID_CLK_CNTL, + .bit_idx = 19, + }, + .hw.init = &(struct clk_init_data){ + .name = "vclk_en", + .ops = &clk_regmap_gate_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &meson8b_vclk_in_en.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + static struct clk_regmap meson8b_vclk_div1_gate = { .data = &(struct clk_regmap_gate_data){ .offset = HHI_VID_CLK_CNTL, @@ -1220,7 +1229,7 @@ static struct clk_regmap meson8b_vclk_div1_gate = { .name = "vclk_div1_en", .ops = &clk_regmap_gate_ro_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk_in_en.hw + &meson8b_vclk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1234,7 +1243,7 @@ static struct clk_fixed_factor meson8b_vclk_div2_div = { .name = "vclk_div2", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk_in_en.hw + &meson8b_vclk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1264,7 +1273,7 @@ static struct clk_fixed_factor meson8b_vclk_div4_div = { .name = "vclk_div4", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk_in_en.hw + &meson8b_vclk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1294,7 +1303,7 @@ static struct clk_fixed_factor meson8b_vclk_div6_div = { .name = "vclk_div6", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk_in_en.hw + &meson8b_vclk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1324,7 +1333,7 @@ static struct clk_fixed_factor meson8b_vclk_div12_div = { .name = "vclk_div12", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk_in_en.hw + &meson8b_vclk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1378,6 +1387,22 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = { }, }; +static struct clk_regmap meson8b_vclk2_clk_en = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_VIID_CLK_DIV, + .bit_idx = 19, + }, + .hw.init = &(struct clk_init_data){ + .name = "vclk2_en", + .ops = &clk_regmap_gate_ro_ops, + .parent_hws = (const struct clk_hw *[]) { + &meson8b_vclk2_clk_in_en.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + static struct clk_regmap meson8b_vclk2_div1_gate = { .data = &(struct clk_regmap_gate_data){ .offset = HHI_VIID_CLK_DIV, @@ -1387,7 +1412,7 @@ static struct clk_regmap meson8b_vclk2_div1_gate = { .name = "vclk2_div1_en", .ops = &clk_regmap_gate_ro_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk2_clk_in_en.hw + &meson8b_vclk2_clk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1401,7 +1426,7 @@ static struct clk_fixed_factor meson8b_vclk2_div2_div = { .name = "vclk2_div2", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk2_clk_in_en.hw + &meson8b_vclk2_clk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1431,7 +1456,7 @@ static struct clk_fixed_factor meson8b_vclk2_div4_div = { .name = "vclk2_div4", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk2_clk_in_en.hw + &meson8b_vclk2_clk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1461,7 +1486,7 @@ static struct clk_fixed_factor meson8b_vclk2_div6_div = { .name = "vclk2_div6", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk2_clk_in_en.hw + &meson8b_vclk2_clk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -1491,7 +1516,7 @@ static struct clk_fixed_factor meson8b_vclk2_div12_div = { .name = "vclk2_div12", .ops = &clk_fixed_factor_ops, .parent_hws = (const struct clk_hw *[]) { - &meson8b_vclk2_clk_in_en.hw + &meson8b_vclk2_clk_en.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, @@ -2827,6 +2852,7 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = { [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw, [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw, [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw, + [CLKID_VCLK_EN] = &meson8b_vclk_en.hw, [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw, [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw, [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw, @@ -2838,6 +2864,7 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = { [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw, [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw, [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw, + [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw, [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw, [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw, [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw, @@ -3032,6 +3059,7 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw, [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw, [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw, + [CLKID_VCLK_EN] = &meson8b_vclk_en.hw, [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw, [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw, [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw, @@ -3043,6 +3071,7 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = { [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw, [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw, [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw, + [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw, [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw, [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw, [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw, @@ -3248,6 +3277,7 @@ static struct clk_hw_onecell_data meson8m2_hw_onecell_data = { [CLKID_VID_PLL_FINAL_DIV] = &meson8b_vid_pll_final_div.hw, [CLKID_VCLK_IN_SEL] = &meson8b_vclk_in_sel.hw, [CLKID_VCLK_IN_EN] = &meson8b_vclk_in_en.hw, + [CLKID_VCLK_EN] = &meson8b_vclk_en.hw, [CLKID_VCLK_DIV1] = &meson8b_vclk_div1_gate.hw, [CLKID_VCLK_DIV2_DIV] = &meson8b_vclk_div2_div.hw, [CLKID_VCLK_DIV2] = &meson8b_vclk_div2_div_gate.hw, @@ -3259,6 +3289,7 @@ static struct clk_hw_onecell_data meson8m2_hw_onecell_data = { [CLKID_VCLK_DIV12] = &meson8b_vclk_div12_div_gate.hw, [CLKID_VCLK2_IN_SEL] = &meson8b_vclk2_in_sel.hw, [CLKID_VCLK2_IN_EN] = &meson8b_vclk2_clk_in_en.hw, + [CLKID_VCLK2_EN] = &meson8b_vclk2_clk_en.hw, [CLKID_VCLK2_DIV1] = &meson8b_vclk2_div1_gate.hw, [CLKID_VCLK2_DIV2_DIV] = &meson8b_vclk2_div2_div.hw, [CLKID_VCLK2_DIV2] = &meson8b_vclk2_div2_div_gate.hw, @@ -3450,6 +3481,7 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { &meson8b_vid_pll_final_div, &meson8b_vclk_in_sel, &meson8b_vclk_in_en, + &meson8b_vclk_en, &meson8b_vclk_div1_gate, &meson8b_vclk_div2_div_gate, &meson8b_vclk_div4_div_gate, @@ -3457,6 +3489,7 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = { &meson8b_vclk_div12_div_gate, &meson8b_vclk2_in_sel, &meson8b_vclk2_clk_in_en, + &meson8b_vclk2_clk_en, &meson8b_vclk2_div1_gate, &meson8b_vclk2_div2_div_gate, &meson8b_vclk2_div4_div_gate, diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h index cd38ae2a9cb5..b1a5074cf148 100644 --- a/drivers/clk/meson/meson8b.h +++ b/drivers/clk/meson/meson8b.h @@ -17,7 +17,7 @@ * blocks below. Those offsets must be multiplied by 4 before adding them to * the base address to get the right value * - * [0] http://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf + * [0] https://dn.odroid.com/S805/Datasheet/S805_Datasheet%20V0.8%2020150126.pdf */ #define HHI_GP_PLL_CNTL 0x40 /* 0x10 offset in data sheet */ #define HHI_GP_PLL_CNTL2 0x44 /* 0x11 offset in data sheet */ @@ -180,8 +180,10 @@ #define CLKID_CTS_AMCLK_DIV 208 #define CLKID_CTS_MCLK_I958_SEL 210 #define CLKID_CTS_MCLK_I958_DIV 211 +#define CLKID_VCLK_EN 214 +#define CLKID_VCLK2_EN 215 -#define CLK_NR_CLKS 214 +#define CLK_NR_CLKS 216 /* * include the CLKID and RESETID that have diff --git a/drivers/clk/mmp/clk-pxa168.c b/drivers/clk/mmp/clk-pxa168.c index 8e2551ab8462..b351039cac09 100644 --- a/drivers/clk/mmp/clk-pxa168.c +++ b/drivers/clk/mmp/clk-pxa168.c @@ -10,6 +10,7 @@ */ #include <linux/clk.h> +#include <linux/clk/mmp.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spinlock.h> diff --git a/drivers/clk/mmp/clk-pxa910.c b/drivers/clk/mmp/clk-pxa910.c index 7a7965141918..f254ceff3ea7 100644 --- a/drivers/clk/mmp/clk-pxa910.c +++ b/drivers/clk/mmp/clk-pxa910.c @@ -10,6 +10,7 @@ */ #include <linux/clk.h> +#include <linux/clk/mmp.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spinlock.h> diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index cde6ca90a06b..058327310c25 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -37,6 +37,15 @@ config QCOM_CLK_APCS_MSM8916 Say Y if you want to support CPU frequency scaling on devices such as msm8916. +config QCOM_CLK_APCC_MSM8996 + tristate "MSM8996 CPU Clock Controller" + select QCOM_KRYO_L2_ACCESSORS + depends on ARM64 + help + Support for the CPU clock controller on msm8996 devices. + Say Y if you want to support CPU clock scaling using CPUfreq + drivers for dyanmic power management. + config QCOM_CLK_RPM tristate "RPM based Clock Controller" depends on MFD_QCOM_RPM @@ -89,6 +98,25 @@ config APQ_MMCC_8084 Say Y if you want to support multimedia devices such as display, graphics, video encode/decode, camera, etc. +config IPQ_APSS_PLL + tristate "IPQ APSS PLL" + help + Support for APSS PLL on ipq devices. The APSS PLL is the main + clock that feeds the CPUs on ipq based devices. + Say Y if you want to support CPU frequency scaling on ipq based + devices. + +config IPQ_APSS_6018 + tristate "IPQ APSS Clock Controller" + select IPQ_APSS_PLL + depends on QCOM_APCS_IPC || COMPILE_TEST + help + Support for APSS clock controller on IPQ platforms. The + APSS clock controller manages the Mux and enable block that feeds the + CPUs. + Say Y if you want to support CPU frequency scaling on + ipq based devices. + config IPQ_GCC_4019 tristate "IPQ4019 Global Clock Controller" help @@ -280,6 +308,15 @@ config SC_GCC_7180 Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, UFS, SDCC, etc. +config SC_LPASS_CORECC_7180 + tristate "SC7180 LPASS Core Clock Controller" + select SC_GCC_7180 + help + Support for the LPASS(Low Power Audio Subsystem) core clock controller + on SC7180 devices. + Say Y if you want to use LPASS clocks and power domains of the LPASS + core clock controller. + config SC_GPUCC_7180 tristate "SC7180 Graphics Clock Controller" select SC_GCC_7180 @@ -391,6 +428,22 @@ config SM_GCC_8250 Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, SD/UFS, PCIe etc. +config SM_GPUCC_8150 + tristate "SM8150 Graphics Clock Controller" + select SM_GCC_8150 + help + Support for the graphics clock controller on SM8150 devices. + Say Y if you want to support graphics controller devices and + functionality such as 3D graphics. + +config SM_GPUCC_8250 + tristate "SM8250 Graphics Clock Controller" + select SM_GCC_8250 + help + Support for the graphics clock controller on SM8250 devices. + Say Y if you want to support graphics controller devices and + functionality such as 3D graphics. + config SPMI_PMIC_CLKDIV tristate "SPMI PMIC clkdiv Support" depends on SPMI || COMPILE_TEST diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 7ec8561a1270..9677e769e7e9 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -19,6 +19,8 @@ clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o # Keep alphabetically sorted by config obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o +obj-$(CONFIG_IPQ_APSS_PLL) += apss-ipq-pll.o +obj-$(CONFIG_IPQ_APSS_6018) += apss-ipq6018.o obj-$(CONFIG_IPQ_GCC_4019) += gcc-ipq4019.o obj-$(CONFIG_IPQ_GCC_6018) += gcc-ipq6018.o obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o @@ -42,6 +44,7 @@ obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o obj-$(CONFIG_MSM_MMCC_8998) += mmcc-msm8998.o obj-$(CONFIG_QCOM_A53PLL) += a53-pll.o obj-$(CONFIG_QCOM_CLK_APCS_MSM8916) += apcs-msm8916.o +obj-$(CONFIG_QCOM_CLK_APCC_MSM8996) += clk-cpu-8996.o obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o @@ -51,6 +54,7 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o +obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o @@ -62,6 +66,8 @@ obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o +obj-$(CONFIG_SM_GPUCC_8150) += gpucc-sm8150.o +obj-$(CONFIG_SM_GPUCC_8250) += gpucc-sm8250.o obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o obj-$(CONFIG_QCOM_HFPLL) += hfpll.o diff --git a/drivers/clk/qcom/apss-ipq-pll.c b/drivers/clk/qcom/apss-ipq-pll.c new file mode 100644 index 000000000000..30be87fb222a --- /dev/null +++ b/drivers/clk/qcom/apss-ipq-pll.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018, The Linux Foundation. All rights reserved. +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "clk-alpha-pll.h" + +static const u8 ipq_pll_offsets[] = { + [PLL_OFF_L_VAL] = 0x08, + [PLL_OFF_ALPHA_VAL] = 0x10, + [PLL_OFF_USER_CTL] = 0x18, + [PLL_OFF_CONFIG_CTL] = 0x20, + [PLL_OFF_CONFIG_CTL_U] = 0x24, + [PLL_OFF_STATUS] = 0x28, + [PLL_OFF_TEST_CTL] = 0x30, + [PLL_OFF_TEST_CTL_U] = 0x34, +}; + +static struct clk_alpha_pll ipq_pll = { + .offset = 0x0, + .regs = ipq_pll_offsets, + .flags = SUPPORTS_DYNAMIC_UPDATE, + .clkr = { + .enable_reg = 0x0, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "a53pll", + .parent_data = &(const struct clk_parent_data) { + .fw_name = "xo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_huayra_ops, + }, + }, +}; + +static const struct alpha_pll_config ipq_pll_config = { + .l = 0x37, + .config_ctl_val = 0x04141200, + .config_ctl_hi_val = 0x0, + .early_output_mask = BIT(3), + .main_output_mask = BIT(0), +}; + +static const struct regmap_config ipq_pll_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x40, + .fast_io = true, +}; + +static int apss_ipq_pll_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regmap *regmap; + void __iomem *base; + int ret; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio(dev, base, &ipq_pll_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_alpha_pll_configure(&ipq_pll, regmap, &ipq_pll_config); + + ret = devm_clk_register_regmap(dev, &ipq_pll.clkr); + if (ret) + return ret; + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, + &ipq_pll.clkr.hw); +} + +static const struct of_device_id apss_ipq_pll_match_table[] = { + { .compatible = "qcom,ipq6018-a53pll" }, + { } +}; + +static struct platform_driver apss_ipq_pll_driver = { + .probe = apss_ipq_pll_probe, + .driver = { + .name = "qcom-ipq-apss-pll", + .of_match_table = apss_ipq_pll_match_table, + }, +}; +module_platform_driver(apss_ipq_pll_driver); + +MODULE_DESCRIPTION("Qualcomm technology Inc APSS ALPHA PLL Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/apss-ipq6018.c b/drivers/clk/qcom/apss-ipq6018.c new file mode 100644 index 000000000000..d78ff2f310bf --- /dev/null +++ b/drivers/clk/qcom/apss-ipq6018.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/kernel.h> +#include <linux/err.h> +#include <linux/platform_device.h> +#include <linux/clk-provider.h> +#include <linux/regmap.h> +#include <linux/module.h> + +#include <dt-bindings/clock/qcom,apss-ipq.h> + +#include "common.h" +#include "clk-regmap.h" +#include "clk-branch.h" +#include "clk-alpha-pll.h" +#include "clk-regmap-mux.h" + +enum { + P_XO, + P_APSS_PLL_EARLY, +}; + +static const struct clk_parent_data parents_apcs_alias0_clk_src[] = { + { .fw_name = "xo" }, + { .fw_name = "pll" }, +}; + +static const struct parent_map parents_apcs_alias0_clk_src_map[] = { + { P_XO, 0 }, + { P_APSS_PLL_EARLY, 5 }, +}; + +static struct clk_regmap_mux apcs_alias0_clk_src = { + .reg = 0x0050, + .width = 3, + .shift = 7, + .parent_map = parents_apcs_alias0_clk_src_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "apcs_alias0_clk_src", + .parent_data = parents_apcs_alias0_clk_src, + .num_parents = 2, + .ops = &clk_regmap_mux_closest_ops, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_branch apcs_alias0_core_clk = { + .halt_reg = 0x0058, + .clkr = { + .enable_reg = 0x0058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "apcs_alias0_core_clk", + .parent_hws = (const struct clk_hw *[]){ + &apcs_alias0_clk_src.clkr.hw }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static const struct regmap_config apss_ipq6018_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1000, + .fast_io = true, +}; + +static struct clk_regmap *apss_ipq6018_clks[] = { + [APCS_ALIAS0_CLK_SRC] = &apcs_alias0_clk_src.clkr, + [APCS_ALIAS0_CORE_CLK] = &apcs_alias0_core_clk.clkr, +}; + +static const struct qcom_cc_desc apss_ipq6018_desc = { + .config = &apss_ipq6018_regmap_config, + .clks = apss_ipq6018_clks, + .num_clks = ARRAY_SIZE(apss_ipq6018_clks), +}; + +static int apss_ipq6018_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + + regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!regmap) + return -ENODEV; + + return qcom_cc_really_probe(pdev, &apss_ipq6018_desc, regmap); +} + +static struct platform_driver apss_ipq6018_driver = { + .probe = apss_ipq6018_probe, + .driver = { + .name = "qcom,apss-ipq6018-clk", + }, +}; + +module_platform_driver(apss_ipq6018_driver); + +MODULE_DESCRIPTION("QCOM APSS IPQ 6018 CLK Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 9b2dfa08acb2..26139ef005e4 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -56,7 +56,6 @@ #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS]) #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE]) #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC]) -#define PLL_CAL_VAL(p) ((p)->offset + (p)->regs[PLL_OFF_CAL_VAL]) const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [CLK_ALPHA_PLL_TYPE_DEFAULT] = { @@ -112,22 +111,6 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_CONFIG_CTL_U1] = 0x20, [PLL_OFF_TEST_CTL] = 0x24, [PLL_OFF_TEST_CTL_U] = 0x28, - [PLL_OFF_STATUS] = 0x30, - [PLL_OFF_OPMODE] = 0x38, - [PLL_OFF_ALPHA_VAL] = 0x40, - [PLL_OFF_CAL_VAL] = 0x44, - }, - [CLK_ALPHA_PLL_TYPE_LUCID] = { - [PLL_OFF_L_VAL] = 0x04, - [PLL_OFF_CAL_L_VAL] = 0x08, - [PLL_OFF_USER_CTL] = 0x0c, - [PLL_OFF_USER_CTL_U] = 0x10, - [PLL_OFF_USER_CTL_U1] = 0x14, - [PLL_OFF_CONFIG_CTL] = 0x18, - [PLL_OFF_CONFIG_CTL_U] = 0x1c, - [PLL_OFF_CONFIG_CTL_U1] = 0x20, - [PLL_OFF_TEST_CTL] = 0x24, - [PLL_OFF_TEST_CTL_U] = 0x28, [PLL_OFF_TEST_CTL_U1] = 0x2c, [PLL_OFF_STATUS] = 0x30, [PLL_OFF_OPMODE] = 0x38, @@ -156,9 +139,12 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); #define PLL_OUT_MASK 0x7 #define PLL_RATE_MARGIN 500 +/* TRION PLL specific settings and offsets */ +#define TRION_PLL_CAL_VAL 0x44 +#define TRION_PCAL_DONE BIT(26) + /* LUCID PLL specific settings and offsets */ -#define LUCID_PLL_CAL_VAL 0x44 -#define LUCID_PCAL_DONE BIT(26) +#define LUCID_PCAL_DONE BIT(27) #define pll_alpha_width(p) \ ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \ @@ -912,14 +898,14 @@ const struct clk_ops clk_alpha_pll_hwfsm_ops = { }; EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops); -const struct clk_ops clk_trion_fixed_pll_ops = { +const struct clk_ops clk_alpha_pll_fixed_trion_ops = { .enable = clk_trion_pll_enable, .disable = clk_trion_pll_disable, .is_enabled = clk_trion_pll_is_enabled, .recalc_rate = clk_trion_pll_recalc_rate, .round_rate = clk_alpha_pll_round_rate, }; -EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops); +EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_trion_ops); static unsigned long clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) @@ -1339,12 +1325,12 @@ clk_trion_pll_postdiv_set_rate(struct clk_hw *hw, unsigned long rate, val << PLL_POST_DIV_SHIFT); } -const struct clk_ops clk_trion_pll_postdiv_ops = { +const struct clk_ops clk_alpha_pll_postdiv_trion_ops = { .recalc_rate = clk_trion_pll_postdiv_recalc_rate, .round_rate = clk_trion_pll_postdiv_round_rate, .set_rate = clk_trion_pll_postdiv_set_rate, }; -EXPORT_SYMBOL_GPL(clk_trion_pll_postdiv_ops); +EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_trion_ops); static long clk_alpha_pll_postdiv_fabia_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) @@ -1399,13 +1385,13 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops); * @regmap: register map * @config: configuration to apply for pll */ -void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, +void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config) { if (config->l) regmap_write(regmap, PLL_L_VAL(pll), config->l); - regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL); + regmap_write(regmap, PLL_CAL_L_VAL(pll), TRION_PLL_CAL_VAL); if (config->alpha) regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha); @@ -1458,13 +1444,13 @@ void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, /* Place the PLL in STANDBY mode */ regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); } -EXPORT_SYMBOL_GPL(clk_lucid_pll_configure); +EXPORT_SYMBOL_GPL(clk_trion_pll_configure); /* - * The Lucid PLL requires a power-on self-calibration which happens when the + * The TRION PLL requires a power-on self-calibration which happens when the * PLL comes out of reset. Calibrate in case it is not completed. */ -static int alpha_pll_lucid_prepare(struct clk_hw *hw) +static int __alpha_pll_trion_prepare(struct clk_hw *hw, u32 pcal_done) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 regval; @@ -1472,7 +1458,7 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw) /* Return early if calibration is not needed. */ regmap_read(pll->clkr.regmap, PLL_STATUS(pll), ®val); - if (regval & LUCID_PCAL_DONE) + if (regval & pcal_done) return 0; /* On/off to calibrate */ @@ -1483,7 +1469,17 @@ static int alpha_pll_lucid_prepare(struct clk_hw *hw) return ret; } -static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate, +static int alpha_pll_trion_prepare(struct clk_hw *hw) +{ + return __alpha_pll_trion_prepare(hw, TRION_PCAL_DONE); +} + +static int alpha_pll_lucid_prepare(struct clk_hw *hw) +{ + return __alpha_pll_trion_prepare(hw, LUCID_PCAL_DONE); +} + +static int alpha_pll_trion_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long prate) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); @@ -1537,25 +1533,27 @@ static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -const struct clk_ops clk_alpha_pll_lucid_ops = { - .prepare = alpha_pll_lucid_prepare, +const struct clk_ops clk_alpha_pll_trion_ops = { + .prepare = alpha_pll_trion_prepare, .enable = clk_trion_pll_enable, .disable = clk_trion_pll_disable, .is_enabled = clk_trion_pll_is_enabled, .recalc_rate = clk_trion_pll_recalc_rate, .round_rate = clk_alpha_pll_round_rate, - .set_rate = alpha_pll_lucid_set_rate, + .set_rate = alpha_pll_trion_set_rate, }; -EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops); +EXPORT_SYMBOL_GPL(clk_alpha_pll_trion_ops); -const struct clk_ops clk_alpha_pll_fixed_lucid_ops = { +const struct clk_ops clk_alpha_pll_lucid_ops = { + .prepare = alpha_pll_lucid_prepare, .enable = clk_trion_pll_enable, .disable = clk_trion_pll_disable, .is_enabled = clk_trion_pll_is_enabled, .recalc_rate = clk_trion_pll_recalc_rate, .round_rate = clk_alpha_pll_round_rate, + .set_rate = alpha_pll_trion_set_rate, }; -EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_ops); +EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops); const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = { .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate, diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index 704674a153b6..d3201b87c0cd 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -14,7 +14,7 @@ enum { CLK_ALPHA_PLL_TYPE_BRAMMO, CLK_ALPHA_PLL_TYPE_FABIA, CLK_ALPHA_PLL_TYPE_TRION, - CLK_ALPHA_PLL_TYPE_LUCID, + CLK_ALPHA_PLL_TYPE_LUCID = CLK_ALPHA_PLL_TYPE_TRION, CLK_ALPHA_PLL_TYPE_MAX, }; @@ -47,6 +47,12 @@ struct pll_vco { u32 val; }; +#define VCO(a, b, c) { \ + .val = a,\ + .min_freq = b,\ + .max_freq = c,\ +} + /** * struct clk_alpha_pll - phase locked loop (PLL) * @offset: base address of registers @@ -128,18 +134,23 @@ extern const struct clk_ops clk_alpha_pll_fabia_ops; extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops; extern const struct clk_ops clk_alpha_pll_postdiv_fabia_ops; +extern const struct clk_ops clk_alpha_pll_trion_ops; +extern const struct clk_ops clk_alpha_pll_fixed_trion_ops; +extern const struct clk_ops clk_alpha_pll_postdiv_trion_ops; + extern const struct clk_ops clk_alpha_pll_lucid_ops; -extern const struct clk_ops clk_alpha_pll_fixed_lucid_ops; +#define clk_alpha_pll_fixed_lucid_ops clk_alpha_pll_fixed_trion_ops extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops; void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); -void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, +void clk_trion_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +#define clk_lucid_pll_configure(pll, regmap, config) \ + clk_trion_pll_configure(pll, regmap, config) + -extern const struct clk_ops clk_trion_fixed_pll_ops; -extern const struct clk_ops clk_trion_pll_postdiv_ops; #endif diff --git a/drivers/clk/qcom/clk-cpu-8996.c b/drivers/clk/qcom/clk-cpu-8996.c new file mode 100644 index 000000000000..4a4fde8dd12d --- /dev/null +++ b/drivers/clk/qcom/clk-cpu-8996.c @@ -0,0 +1,538 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +/* + * Each of the CPU clusters (Power and Perf) on msm8996 are + * clocked via 2 PLLs, a primary and alternate. There are also + * 2 Mux'es, a primary and secondary all connected together + * as shown below + * + * +-------+ + * XO | | + * +------------------>0 | + * | | + * PLL/2 | SMUX +----+ + * +------->1 | | + * | | | | + * | +-------+ | +-------+ + * | +---->0 | + * | | | + * +---------------+ | +----------->1 | CPU clk + * |Primary PLL +----+ PLL_EARLY | | +------> + * | +------+-----------+ +------>2 PMUX | + * +---------------+ | | | | + * | +------+ | +-->3 | + * +--^+ ACD +-----+ | +-------+ + * +---------------+ +------+ | + * |Alt PLL | | + * | +---------------------------+ + * +---------------+ PLL_EARLY + * + * The primary PLL is what drives the CPU clk, except for times + * when we are reprogramming the PLL itself (for rate changes) when + * we temporarily switch to an alternate PLL. + * + * The primary PLL operates on a single VCO range, between 600MHz + * and 3GHz. However the CPUs do support OPPs with frequencies + * between 300MHz and 600MHz. In order to support running the CPUs + * at those frequencies we end up having to lock the PLL at twice + * the rate and drive the CPU clk via the PLL/2 output and SMUX. + * + * So for frequencies above 600MHz we follow the following path + * Primary PLL --> PLL_EARLY --> PMUX(1) --> CPU clk + * and for frequencies between 300MHz and 600MHz we follow + * Primary PLL --> PLL/2 --> SMUX(1) --> PMUX(0) --> CPU clk + * + * ACD stands for Adaptive Clock Distribution and is used to + * detect voltage droops. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <soc/qcom/kryo-l2-accessors.h> + +#include "clk-alpha-pll.h" +#include "clk-regmap.h" + +enum _pmux_input { + DIV_2_INDEX = 0, + PLL_INDEX, + ACD_INDEX, + ALT_INDEX, + NUM_OF_PMUX_INPUTS +}; + +#define DIV_2_THRESHOLD 600000000 +#define PWRCL_REG_OFFSET 0x0 +#define PERFCL_REG_OFFSET 0x80000 +#define MUX_OFFSET 0x40 +#define ALT_PLL_OFFSET 0x100 +#define SSSCTL_OFFSET 0x160 + +static const u8 prim_pll_regs[PLL_OFF_MAX_REGS] = { + [PLL_OFF_L_VAL] = 0x04, + [PLL_OFF_ALPHA_VAL] = 0x08, + [PLL_OFF_USER_CTL] = 0x10, + [PLL_OFF_CONFIG_CTL] = 0x18, + [PLL_OFF_CONFIG_CTL_U] = 0x1c, + [PLL_OFF_TEST_CTL] = 0x20, + [PLL_OFF_TEST_CTL_U] = 0x24, + [PLL_OFF_STATUS] = 0x28, +}; + +static const u8 alt_pll_regs[PLL_OFF_MAX_REGS] = { + [PLL_OFF_L_VAL] = 0x04, + [PLL_OFF_ALPHA_VAL] = 0x08, + [PLL_OFF_ALPHA_VAL_U] = 0x0c, + [PLL_OFF_USER_CTL] = 0x10, + [PLL_OFF_USER_CTL_U] = 0x14, + [PLL_OFF_CONFIG_CTL] = 0x18, + [PLL_OFF_TEST_CTL] = 0x20, + [PLL_OFF_TEST_CTL_U] = 0x24, + [PLL_OFF_STATUS] = 0x28, +}; + +/* PLLs */ + +static const struct alpha_pll_config hfpll_config = { + .l = 60, + .config_ctl_val = 0x200d4aa8, + .config_ctl_hi_val = 0x006, + .pre_div_mask = BIT(12), + .post_div_mask = 0x3 << 8, + .post_div_val = 0x1 << 8, + .main_output_mask = BIT(0), + .early_output_mask = BIT(3), +}; + +static struct clk_alpha_pll perfcl_pll = { + .offset = PERFCL_REG_OFFSET, + .regs = prim_pll_regs, + .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE, + .clkr.hw.init = &(struct clk_init_data){ + .name = "perfcl_pll", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_huayra_ops, + }, +}; + +static struct clk_alpha_pll pwrcl_pll = { + .offset = PWRCL_REG_OFFSET, + .regs = prim_pll_regs, + .flags = SUPPORTS_DYNAMIC_UPDATE | SUPPORTS_FSM_MODE, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pwrcl_pll", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_huayra_ops, + }, +}; + +static const struct pll_vco alt_pll_vco_modes[] = { + VCO(3, 250000000, 500000000), + VCO(2, 500000000, 750000000), + VCO(1, 750000000, 1000000000), + VCO(0, 1000000000, 2150400000), +}; + +static const struct alpha_pll_config altpll_config = { + .l = 16, + .vco_val = 0x3 << 20, + .vco_mask = 0x3 << 20, + .config_ctl_val = 0x4001051b, + .post_div_mask = 0x3 << 8, + .post_div_val = 0x1 << 8, + .main_output_mask = BIT(0), + .early_output_mask = BIT(3), +}; + +static struct clk_alpha_pll perfcl_alt_pll = { + .offset = PERFCL_REG_OFFSET + ALT_PLL_OFFSET, + .regs = alt_pll_regs, + .vco_table = alt_pll_vco_modes, + .num_vco = ARRAY_SIZE(alt_pll_vco_modes), + .flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE, + .clkr.hw.init = &(struct clk_init_data) { + .name = "perfcl_alt_pll", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_hwfsm_ops, + }, +}; + +static struct clk_alpha_pll pwrcl_alt_pll = { + .offset = PWRCL_REG_OFFSET + ALT_PLL_OFFSET, + .regs = alt_pll_regs, + .vco_table = alt_pll_vco_modes, + .num_vco = ARRAY_SIZE(alt_pll_vco_modes), + .flags = SUPPORTS_OFFLINE_REQ | SUPPORTS_FSM_MODE, + .clkr.hw.init = &(struct clk_init_data) { + .name = "pwrcl_alt_pll", + .parent_names = (const char *[]){ "xo" }, + .num_parents = 1, + .ops = &clk_alpha_pll_hwfsm_ops, + }, +}; + +struct clk_cpu_8996_mux { + u32 reg; + u8 shift; + u8 width; + struct notifier_block nb; + struct clk_hw *pll; + struct clk_hw *pll_div_2; + struct clk_regmap clkr; +}; + +static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event, + void *data); + +#define to_clk_cpu_8996_mux_nb(_nb) \ + container_of(_nb, struct clk_cpu_8996_mux, nb) + +static inline struct clk_cpu_8996_mux *to_clk_cpu_8996_mux_hw(struct clk_hw *hw) +{ + return container_of(to_clk_regmap(hw), struct clk_cpu_8996_mux, clkr); +} + +static u8 clk_cpu_8996_mux_get_parent(struct clk_hw *hw) +{ + struct clk_regmap *clkr = to_clk_regmap(hw); + struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw); + u32 mask = GENMASK(cpuclk->width - 1, 0); + u32 val; + + regmap_read(clkr->regmap, cpuclk->reg, &val); + val >>= cpuclk->shift; + + return val & mask; +} + +static int clk_cpu_8996_mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_regmap *clkr = to_clk_regmap(hw); + struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw); + u32 mask = GENMASK(cpuclk->width + cpuclk->shift - 1, cpuclk->shift); + u32 val; + + val = index; + val <<= cpuclk->shift; + + return regmap_update_bits(clkr->regmap, cpuclk->reg, mask, val); +} + +static int clk_cpu_8996_mux_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_hw(hw); + struct clk_hw *parent = cpuclk->pll; + + if (cpuclk->pll_div_2 && req->rate < DIV_2_THRESHOLD) { + if (req->rate < (DIV_2_THRESHOLD / 2)) + return -EINVAL; + + parent = cpuclk->pll_div_2; + } + + req->best_parent_rate = clk_hw_round_rate(parent, req->rate); + req->best_parent_hw = parent; + + return 0; +} + +static const struct clk_ops clk_cpu_8996_mux_ops = { + .set_parent = clk_cpu_8996_mux_set_parent, + .get_parent = clk_cpu_8996_mux_get_parent, + .determine_rate = clk_cpu_8996_mux_determine_rate, +}; + +static struct clk_cpu_8996_mux pwrcl_smux = { + .reg = PWRCL_REG_OFFSET + MUX_OFFSET, + .shift = 2, + .width = 2, + .clkr.hw.init = &(struct clk_init_data) { + .name = "pwrcl_smux", + .parent_names = (const char *[]){ + "xo", + "pwrcl_pll_main", + }, + .num_parents = 2, + .ops = &clk_cpu_8996_mux_ops, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_cpu_8996_mux perfcl_smux = { + .reg = PERFCL_REG_OFFSET + MUX_OFFSET, + .shift = 2, + .width = 2, + .clkr.hw.init = &(struct clk_init_data) { + .name = "perfcl_smux", + .parent_names = (const char *[]){ + "xo", + "perfcl_pll_main", + }, + .num_parents = 2, + .ops = &clk_cpu_8996_mux_ops, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_cpu_8996_mux pwrcl_pmux = { + .reg = PWRCL_REG_OFFSET + MUX_OFFSET, + .shift = 0, + .width = 2, + .pll = &pwrcl_pll.clkr.hw, + .pll_div_2 = &pwrcl_smux.clkr.hw, + .nb.notifier_call = cpu_clk_notifier_cb, + .clkr.hw.init = &(struct clk_init_data) { + .name = "pwrcl_pmux", + .parent_names = (const char *[]){ + "pwrcl_smux", + "pwrcl_pll", + "pwrcl_pll_acd", + "pwrcl_alt_pll", + }, + .num_parents = 4, + .ops = &clk_cpu_8996_mux_ops, + /* CPU clock is critical and should never be gated */ + .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + }, +}; + +static struct clk_cpu_8996_mux perfcl_pmux = { + .reg = PERFCL_REG_OFFSET + MUX_OFFSET, + .shift = 0, + .width = 2, + .pll = &perfcl_pll.clkr.hw, + .pll_div_2 = &perfcl_smux.clkr.hw, + .nb.notifier_call = cpu_clk_notifier_cb, + .clkr.hw.init = &(struct clk_init_data) { + .name = "perfcl_pmux", + .parent_names = (const char *[]){ + "perfcl_smux", + "perfcl_pll", + "perfcl_pll_acd", + "perfcl_alt_pll", + }, + .num_parents = 4, + .ops = &clk_cpu_8996_mux_ops, + /* CPU clock is critical and should never be gated */ + .flags = CLK_SET_RATE_PARENT | CLK_IS_CRITICAL, + }, +}; + +static const struct regmap_config cpu_msm8996_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x80210, + .fast_io = true, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +static struct clk_regmap *cpu_msm8996_clks[] = { + &perfcl_pll.clkr, + &pwrcl_pll.clkr, + &perfcl_alt_pll.clkr, + &pwrcl_alt_pll.clkr, + &perfcl_smux.clkr, + &pwrcl_smux.clkr, + &perfcl_pmux.clkr, + &pwrcl_pmux.clkr, +}; + +static int qcom_cpu_clk_msm8996_register_clks(struct device *dev, + struct regmap *regmap) +{ + int i, ret; + + perfcl_smux.pll = clk_hw_register_fixed_factor(dev, "perfcl_pll_main", + "perfcl_pll", + CLK_SET_RATE_PARENT, + 1, 2); + if (IS_ERR(perfcl_smux.pll)) { + dev_err(dev, "Failed to initialize perfcl_pll_main\n"); + return PTR_ERR(perfcl_smux.pll); + } + + pwrcl_smux.pll = clk_hw_register_fixed_factor(dev, "pwrcl_pll_main", + "pwrcl_pll", + CLK_SET_RATE_PARENT, + 1, 2); + if (IS_ERR(pwrcl_smux.pll)) { + dev_err(dev, "Failed to initialize pwrcl_pll_main\n"); + clk_hw_unregister(perfcl_smux.pll); + return PTR_ERR(pwrcl_smux.pll); + } + + for (i = 0; i < ARRAY_SIZE(cpu_msm8996_clks); i++) { + ret = devm_clk_register_regmap(dev, cpu_msm8996_clks[i]); + if (ret) { + clk_hw_unregister(perfcl_smux.pll); + clk_hw_unregister(pwrcl_smux.pll); + return ret; + } + } + + clk_alpha_pll_configure(&perfcl_pll, regmap, &hfpll_config); + clk_alpha_pll_configure(&pwrcl_pll, regmap, &hfpll_config); + clk_alpha_pll_configure(&perfcl_alt_pll, regmap, &altpll_config); + clk_alpha_pll_configure(&pwrcl_alt_pll, regmap, &altpll_config); + + /* Enable alt PLLs */ + clk_prepare_enable(pwrcl_alt_pll.clkr.hw.clk); + clk_prepare_enable(perfcl_alt_pll.clkr.hw.clk); + + clk_notifier_register(pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb); + clk_notifier_register(perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb); + + return ret; +} + +static int qcom_cpu_clk_msm8996_unregister_clks(void) +{ + int ret = 0; + + ret = clk_notifier_unregister(pwrcl_pmux.clkr.hw.clk, &pwrcl_pmux.nb); + if (ret) + return ret; + + ret = clk_notifier_unregister(perfcl_pmux.clkr.hw.clk, &perfcl_pmux.nb); + if (ret) + return ret; + + clk_hw_unregister(perfcl_smux.pll); + clk_hw_unregister(pwrcl_smux.pll); + + return 0; +} + +#define CPU_AFINITY_MASK 0xFFF +#define PWRCL_CPU_REG_MASK 0x3 +#define PERFCL_CPU_REG_MASK 0x103 + +#define L2ACDCR_REG 0x580ULL +#define L2ACDTD_REG 0x581ULL +#define L2ACDDVMRC_REG 0x584ULL +#define L2ACDSSCR_REG 0x589ULL + +static DEFINE_SPINLOCK(qcom_clk_acd_lock); +static void __iomem *base; + +static void qcom_cpu_clk_msm8996_acd_init(void __iomem *base) +{ + u64 hwid; + unsigned long flags; + + spin_lock_irqsave(&qcom_clk_acd_lock, flags); + + hwid = read_cpuid_mpidr() & CPU_AFINITY_MASK; + + kryo_l2_set_indirect_reg(L2ACDTD_REG, 0x00006a11); + kryo_l2_set_indirect_reg(L2ACDDVMRC_REG, 0x000e0f0f); + kryo_l2_set_indirect_reg(L2ACDSSCR_REG, 0x00000601); + + if (PWRCL_CPU_REG_MASK == (hwid | PWRCL_CPU_REG_MASK)) { + writel(0xf, base + PWRCL_REG_OFFSET + SSSCTL_OFFSET); + kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd); + } + + if (PERFCL_CPU_REG_MASK == (hwid | PERFCL_CPU_REG_MASK)) { + kryo_l2_set_indirect_reg(L2ACDCR_REG, 0x002c5ffd); + writel(0xf, base + PERFCL_REG_OFFSET + SSSCTL_OFFSET); + } + + spin_unlock_irqrestore(&qcom_clk_acd_lock, flags); +} + +static int cpu_clk_notifier_cb(struct notifier_block *nb, unsigned long event, + void *data) +{ + struct clk_cpu_8996_mux *cpuclk = to_clk_cpu_8996_mux_nb(nb); + struct clk_notifier_data *cnd = data; + int ret; + + switch (event) { + case PRE_RATE_CHANGE: + ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw, ALT_INDEX); + qcom_cpu_clk_msm8996_acd_init(base); + break; + case POST_RATE_CHANGE: + if (cnd->new_rate < DIV_2_THRESHOLD) + ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw, + DIV_2_INDEX); + else + ret = clk_cpu_8996_mux_set_parent(&cpuclk->clkr.hw, + ACD_INDEX); + break; + default: + ret = 0; + break; + } + + return notifier_from_errno(ret); +}; + +static int qcom_cpu_clk_msm8996_driver_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + struct clk_hw_onecell_data *data; + struct device *dev = &pdev->dev; + int ret; + + data = devm_kzalloc(dev, struct_size(data, hws, 2), GFP_KERNEL); + if (!data) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + regmap = devm_regmap_init_mmio(dev, base, &cpu_msm8996_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + ret = qcom_cpu_clk_msm8996_register_clks(dev, regmap); + if (ret) + return ret; + + qcom_cpu_clk_msm8996_acd_init(base); + + data->hws[0] = &pwrcl_pmux.clkr.hw; + data->hws[1] = &perfcl_pmux.clkr.hw; + data->num = 2; + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, data); +} + +static int qcom_cpu_clk_msm8996_driver_remove(struct platform_device *pdev) +{ + return qcom_cpu_clk_msm8996_unregister_clks(); +} + +static const struct of_device_id qcom_cpu_clk_msm8996_match_table[] = { + { .compatible = "qcom,msm8996-apcc" }, + {} +}; +MODULE_DEVICE_TABLE(of, qcom_cpu_clk_msm8996_match_table); + +static struct platform_driver qcom_cpu_clk_msm8996_driver = { + .probe = qcom_cpu_clk_msm8996_driver_probe, + .remove = qcom_cpu_clk_msm8996_driver_remove, + .driver = { + .name = "qcom-msm8996-apcc", + .of_match_table = qcom_cpu_clk_msm8996_match_table, + }, +}; +module_platform_driver(qcom_cpu_clk_msm8996_driver); + +MODULE_DESCRIPTION("QCOM MSM8996 CPU Clock Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 52f63ad787ba..0e1dfa89489e 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -452,6 +452,55 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8916 = { .num_clks = ARRAY_SIZE(msm8916_clks), }; +/* msm8936 */ +DEFINE_CLK_SMD_RPM(msm8936, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8936, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8936, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8936, sysmmnoc_clk, sysmmnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(msm8936, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8936, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, bb_clk2_pin, bb_clk2_a_pin, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8936, rf_clk2_pin, rf_clk2_a_pin, 5); + +static struct clk_smd_rpm *msm8936_clks[] = { + [RPM_SMD_PCNOC_CLK] = &msm8936_pcnoc_clk, + [RPM_SMD_PCNOC_A_CLK] = &msm8936_pcnoc_a_clk, + [RPM_SMD_SNOC_CLK] = &msm8936_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8936_snoc_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8936_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8936_bimc_a_clk, + [RPM_SMD_SYSMMNOC_CLK] = &msm8936_sysmmnoc_clk, + [RPM_SMD_SYSMMNOC_A_CLK] = &msm8936_sysmmnoc_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8936_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8936_qdss_a_clk, + [RPM_SMD_BB_CLK1] = &msm8936_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8936_bb_clk1_a, + [RPM_SMD_BB_CLK2] = &msm8936_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8936_bb_clk2_a, + [RPM_SMD_RF_CLK1] = &msm8936_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8936_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8936_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8936_rf_clk2_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8936_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8936_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2_PIN] = &msm8936_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8936_bb_clk2_a_pin, + [RPM_SMD_RF_CLK1_PIN] = &msm8936_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8936_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8936_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8936_rf_clk2_a_pin, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8936 = { + .clks = msm8936_clks, + .num_clks = ARRAY_SIZE(msm8936_clks), +}; + /* msm8974 */ DEFINE_CLK_SMD_RPM(msm8974, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8974, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); @@ -574,6 +623,175 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8976 = { .num_clks = ARRAY_SIZE(msm8976_clks), }; +/* msm8992 */ +DEFINE_CLK_SMD_RPM(msm8992, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8992, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8992, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8992, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8992, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8992, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, bb_clk2_pin, bb_clk2_a_pin, 2); + +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk1, div_clk1_a, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk2, div_clk2_a, 12); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk3, div_clk3_a, 13); +DEFINE_CLK_SMD_RPM(msm8992, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8); +DEFINE_CLK_SMD_RPM(msm8992, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, + QCOM_SMD_RPM_BUS_CLK, 3); +DEFINE_CLK_SMD_RPM_QDSS(msm8992, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8992, rf_clk2_pin, rf_clk2_a_pin, 5); + +DEFINE_CLK_SMD_RPM(msm8992, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8992, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1); + +static struct clk_smd_rpm *msm8992_clks[] = { + [RPM_SMD_PNOC_CLK] = &msm8992_pnoc_clk, + [RPM_SMD_PNOC_A_CLK] = &msm8992_pnoc_a_clk, + [RPM_SMD_OCMEMGX_CLK] = &msm8992_ocmemgx_clk, + [RPM_SMD_OCMEMGX_A_CLK] = &msm8992_ocmemgx_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8992_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8992_bimc_a_clk, + [RPM_SMD_CNOC_CLK] = &msm8992_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &msm8992_cnoc_a_clk, + [RPM_SMD_GFX3D_CLK_SRC] = &msm8992_gfx3d_clk_src, + [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8992_gfx3d_a_clk_src, + [RPM_SMD_SNOC_CLK] = &msm8992_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8992_snoc_a_clk, + [RPM_SMD_BB_CLK1] = &msm8992_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8992_bb_clk1_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8992_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8992_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2] = &msm8992_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8992_bb_clk2_a, + [RPM_SMD_BB_CLK2_PIN] = &msm8992_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8992_bb_clk2_a_pin, + [RPM_SMD_DIV_CLK1] = &msm8992_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &msm8992_div_clk1_a, + [RPM_SMD_DIV_CLK2] = &msm8992_div_clk2, + [RPM_SMD_DIV_A_CLK2] = &msm8992_div_clk2_a, + [RPM_SMD_DIV_CLK3] = &msm8992_div_clk3, + [RPM_SMD_DIV_A_CLK3] = &msm8992_div_clk3_a, + [RPM_SMD_IPA_CLK] = &msm8992_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &msm8992_ipa_a_clk, + [RPM_SMD_LN_BB_CLK] = &msm8992_ln_bb_clk, + [RPM_SMD_LN_BB_A_CLK] = &msm8992_ln_bb_a_clk, + [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8992_mmssnoc_ahb_clk, + [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8992_mmssnoc_ahb_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8992_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8992_qdss_a_clk, + [RPM_SMD_RF_CLK1] = &msm8992_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8992_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8992_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8992_rf_clk2_a, + [RPM_SMD_RF_CLK1_PIN] = &msm8992_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8992_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8992_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8992_rf_clk2_a_pin, + [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk, + [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk, + [RPM_SMD_CE2_CLK] = &msm8992_ce2_clk, + [RPM_SMD_CE2_A_CLK] = &msm8992_ce2_a_clk, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8992 = { + .clks = msm8992_clks, + .num_clks = ARRAY_SIZE(msm8992_clks), +}; + +/* msm8994 */ +DEFINE_CLK_SMD_RPM(msm8994, pnoc_clk, pnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8994, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8994, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8994, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8994, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8994, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, bb_clk2_pin, bb_clk2_a_pin, 2); + +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, div_clk1, div_clk1_a, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, div_clk2, div_clk2_a, 12); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, div_clk3, div_clk3_a, 13); +DEFINE_CLK_SMD_RPM(msm8994, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, ln_bb_clk, ln_bb_a_clk, 8); +DEFINE_CLK_SMD_RPM(msm8994, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, + QCOM_SMD_RPM_BUS_CLK, 3); +DEFINE_CLK_SMD_RPM_QDSS(msm8994, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8994, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8994, rf_clk2_pin, rf_clk2_a_pin, 5); + +DEFINE_CLK_SMD_RPM(msm8994, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8994, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8994, ce3_clk, ce3_a_clk, QCOM_SMD_RPM_CE_CLK, 2); + +static struct clk_smd_rpm *msm8994_clks[] = { + [RPM_SMD_PNOC_CLK] = &msm8994_pnoc_clk, + [RPM_SMD_PNOC_A_CLK] = &msm8994_pnoc_a_clk, + [RPM_SMD_OCMEMGX_CLK] = &msm8994_ocmemgx_clk, + [RPM_SMD_OCMEMGX_A_CLK] = &msm8994_ocmemgx_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8994_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8994_bimc_a_clk, + [RPM_SMD_CNOC_CLK] = &msm8994_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &msm8994_cnoc_a_clk, + [RPM_SMD_GFX3D_CLK_SRC] = &msm8994_gfx3d_clk_src, + [RPM_SMD_GFX3D_A_CLK_SRC] = &msm8994_gfx3d_a_clk_src, + [RPM_SMD_SNOC_CLK] = &msm8994_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8994_snoc_a_clk, + [RPM_SMD_BB_CLK1] = &msm8994_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8994_bb_clk1_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8994_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8994_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2] = &msm8994_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8994_bb_clk2_a, + [RPM_SMD_BB_CLK2_PIN] = &msm8994_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8994_bb_clk2_a_pin, + [RPM_SMD_DIV_CLK1] = &msm8994_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &msm8994_div_clk1_a, + [RPM_SMD_DIV_CLK2] = &msm8994_div_clk2, + [RPM_SMD_DIV_A_CLK2] = &msm8994_div_clk2_a, + [RPM_SMD_DIV_CLK3] = &msm8994_div_clk3, + [RPM_SMD_DIV_A_CLK3] = &msm8994_div_clk3_a, + [RPM_SMD_IPA_CLK] = &msm8994_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &msm8994_ipa_a_clk, + [RPM_SMD_LN_BB_CLK] = &msm8994_ln_bb_clk, + [RPM_SMD_LN_BB_A_CLK] = &msm8994_ln_bb_a_clk, + [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8994_mmssnoc_ahb_clk, + [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8994_mmssnoc_ahb_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8994_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8994_qdss_a_clk, + [RPM_SMD_RF_CLK1] = &msm8994_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &msm8994_rf_clk1_a, + [RPM_SMD_RF_CLK2] = &msm8994_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8994_rf_clk2_a, + [RPM_SMD_RF_CLK1_PIN] = &msm8994_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &msm8994_rf_clk1_a_pin, + [RPM_SMD_RF_CLK2_PIN] = &msm8994_rf_clk2_pin, + [RPM_SMD_RF_CLK2_A_PIN] = &msm8994_rf_clk2_a_pin, + [RPM_SMD_CE1_CLK] = &msm8994_ce1_clk, + [RPM_SMD_CE1_A_CLK] = &msm8994_ce1_a_clk, + [RPM_SMD_CE2_CLK] = &msm8994_ce2_clk, + [RPM_SMD_CE2_A_CLK] = &msm8994_ce2_a_clk, + [RPM_SMD_CE3_CLK] = &msm8994_ce3_clk, + [RPM_SMD_CE3_A_CLK] = &msm8994_ce3_a_clk, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8994 = { + .clks = msm8994_clks, + .num_clks = ARRAY_SIZE(msm8994_clks), +}; + /* msm8996 */ DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); @@ -766,13 +984,92 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = { .num_clks = ARRAY_SIZE(msm8998_clks), }; +/* sdm660 */ +DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0, + 19200000); +DEFINE_CLK_SMD_RPM(sdm660, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(sdm660, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(sdm660, cnoc_periph_clk, cnoc_periph_a_clk, + QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, mmssnoc_axi_clk, mmssnoc_axi_a_clk, + QCOM_SMD_RPM_MMAXI_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0); +DEFINE_CLK_SMD_RPM(sdm660, aggre2_noc_clk, aggre2_noc_a_clk, + QCOM_SMD_RPM_AGGR_CLK, 2); +DEFINE_CLK_SMD_RPM_QDSS(sdm660, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, rf_clk1, rf_clk1_a, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, div_clk1, div_clk1_a, 11); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk1, ln_bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk2, ln_bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3); + +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, rf_clk1_pin, rf_clk1_a_pin, 4); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk1_pin, + ln_bb_clk1_pin_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk2_pin, + ln_bb_clk2_pin_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, + ln_bb_clk3_pin_a, 3); +static struct clk_smd_rpm *sdm660_clks[] = { + [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo, + [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a, + [RPM_SMD_SNOC_CLK] = &sdm660_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &sdm660_snoc_a_clk, + [RPM_SMD_CNOC_CLK] = &sdm660_cnoc_clk, + [RPM_SMD_CNOC_A_CLK] = &sdm660_cnoc_a_clk, + [RPM_SMD_CNOC_PERIPH_CLK] = &sdm660_cnoc_periph_clk, + [RPM_SMD_CNOC_PERIPH_A_CLK] = &sdm660_cnoc_periph_a_clk, + [RPM_SMD_BIMC_CLK] = &sdm660_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &sdm660_bimc_a_clk, + [RPM_SMD_MMSSNOC_AXI_CLK] = &sdm660_mmssnoc_axi_clk, + [RPM_SMD_MMSSNOC_AXI_CLK_A] = &sdm660_mmssnoc_axi_a_clk, + [RPM_SMD_IPA_CLK] = &sdm660_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &sdm660_ipa_a_clk, + [RPM_SMD_CE1_CLK] = &sdm660_ce1_clk, + [RPM_SMD_CE1_A_CLK] = &sdm660_ce1_a_clk, + [RPM_SMD_AGGR2_NOC_CLK] = &sdm660_aggre2_noc_clk, + [RPM_SMD_AGGR2_NOC_A_CLK] = &sdm660_aggre2_noc_a_clk, + [RPM_SMD_QDSS_CLK] = &sdm660_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &sdm660_qdss_a_clk, + [RPM_SMD_RF_CLK1] = &sdm660_rf_clk1, + [RPM_SMD_RF_CLK1_A] = &sdm660_rf_clk1_a, + [RPM_SMD_DIV_CLK1] = &sdm660_div_clk1, + [RPM_SMD_DIV_A_CLK1] = &sdm660_div_clk1_a, + [RPM_SMD_LN_BB_CLK] = &sdm660_ln_bb_clk1, + [RPM_SMD_LN_BB_A_CLK] = &sdm660_ln_bb_clk1_a, + [RPM_SMD_LN_BB_CLK2] = &sdm660_ln_bb_clk2, + [RPM_SMD_LN_BB_CLK2_A] = &sdm660_ln_bb_clk2_a, + [RPM_SMD_LN_BB_CLK3] = &sdm660_ln_bb_clk3, + [RPM_SMD_LN_BB_CLK3_A] = &sdm660_ln_bb_clk3_a, + [RPM_SMD_RF_CLK1_PIN] = &sdm660_rf_clk1_pin, + [RPM_SMD_RF_CLK1_A_PIN] = &sdm660_rf_clk1_a_pin, + [RPM_SMD_LN_BB_CLK1_PIN] = &sdm660_ln_bb_clk1_pin, + [RPM_SMD_LN_BB_CLK1_A_PIN] = &sdm660_ln_bb_clk1_pin_a, + [RPM_SMD_LN_BB_CLK2_PIN] = &sdm660_ln_bb_clk2_pin, + [RPM_SMD_LN_BB_CLK2_A_PIN] = &sdm660_ln_bb_clk2_pin_a, + [RPM_SMD_LN_BB_CLK3_PIN] = &sdm660_ln_bb_clk3_pin, + [RPM_SMD_LN_BB_CLK3_A_PIN] = &sdm660_ln_bb_clk3_pin_a, +}; + +static const struct rpm_smd_clk_desc rpm_clk_sdm660 = { + .clks = sdm660_clks, + .num_clks = ARRAY_SIZE(sdm660_clks), +}; + static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, + { .compatible = "qcom,rpmcc-msm8936", .data = &rpm_clk_msm8936 }, { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, { .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 }, + { .compatible = "qcom,rpmcc-msm8992", .data = &rpm_clk_msm8992 }, + { .compatible = "qcom,rpmcc-msm8994", .data = &rpm_clk_msm8994 }, { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 }, { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, + { .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 }, { } }; MODULE_DEVICE_TABLE(of, rpm_smd_clk_match_table); diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index a8456e09c44d..d6b7adb4be38 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -3089,7 +3089,7 @@ static int gcc_ipq806x_probe(struct platform_device *pdev) regmap_write(regmap, 0x3cf8, 8); regmap_write(regmap, 0x3d18, 8); - return 0; + return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); } static struct platform_driver gcc_ipq806x_driver = { diff --git a/drivers/clk/qcom/gcc-ipq8074.c b/drivers/clk/qcom/gcc-ipq8074.c index e01f5f591d1e..ef2c9c4cf9ab 100644 --- a/drivers/clk/qcom/gcc-ipq8074.c +++ b/drivers/clk/qcom/gcc-ipq8074.c @@ -4316,6 +4316,62 @@ static struct clk_branch gcc_gp3_clk = { }, }; +static const struct freq_tbl ftbl_pcie_rchng_clk_src[] = { + F(19200000, P_XO, 1, 0, 0), + F(100000000, P_GPLL0, 8, 0, 0), + { } +}; + +struct clk_rcg2 pcie0_rchng_clk_src = { + .cmd_rcgr = 0x75070, + .freq_tbl = ftbl_pcie_rchng_clk_src, + .hid_width = 5, + .parent_map = gcc_xo_gpll0_map, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pcie0_rchng_clk_src", + .parent_hws = (const struct clk_hw *[]) { + &gpll0.clkr.hw }, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gcc_pcie0_rchng_clk = { + .halt_reg = 0x75070, + .halt_bit = 31, + .clkr = { + .enable_reg = 0x75070, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie0_rchng_clk", + .parent_hws = (const struct clk_hw *[]){ + &pcie0_rchng_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_axi_s_bridge_clk = { + .halt_reg = 0x75048, + .halt_bit = 31, + .clkr = { + .enable_reg = 0x75048, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie0_axi_s_bridge_clk", + .parent_hws = (const struct clk_hw *[]){ + &pcie0_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_hw *gcc_ipq8074_hws[] = { &gpll0_out_main_div2.hw, &gpll6_out_main_div2.hw, @@ -4551,6 +4607,9 @@ static struct clk_regmap *gcc_ipq8074_clks[] = { [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_PCIE0_RCHNG_CLK_SRC] = &pcie0_rchng_clk_src.clkr, + [GCC_PCIE0_RCHNG_CLK] = &gcc_pcie0_rchng_clk.clkr, + [GCC_PCIE0_AXI_S_BRIDGE_CLK] = &gcc_pcie0_axi_s_bridge_clk.clkr, }; static const struct qcom_reset_map gcc_ipq8074_resets[] = { @@ -4678,6 +4737,7 @@ static const struct qcom_reset_map gcc_ipq8074_resets[] = { [GCC_PCIE0_AXI_SLAVE_ARES] = { 0x75040, 4 }, [GCC_PCIE0_AHB_ARES] = { 0x75040, 5 }, [GCC_PCIE0_AXI_MASTER_STICKY_ARES] = { 0x75040, 6 }, + [GCC_PCIE0_AXI_SLAVE_STICKY_ARES] = { 0x75040, 7 }, [GCC_PCIE1_PIPE_ARES] = { 0x76040, 0 }, [GCC_PCIE1_SLEEP_ARES] = { 0x76040, 1 }, [GCC_PCIE1_CORE_STICKY_ARES] = { 0x76040, 2 }, diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index ca4383e3a02a..68d8f7aaf64e 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -1061,7 +1061,7 @@ static struct clk_branch gcc_disp_gpll0_clk_src = { .hw = &gpll0.clkr.hw, }, .num_parents = 1, - .ops = &clk_branch2_ops, + .ops = &clk_branch2_aon_ops, }, }, }; @@ -2251,6 +2251,19 @@ static struct clk_branch gcc_mss_q6_memnoc_axi_clk = { }, }; +static struct clk_branch gcc_lpass_cfg_noc_sway_clk = { + .halt_reg = 0x47018, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x47018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_lpass_cfg_noc_sway_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct gdsc ufs_phy_gdsc = { .gdscr = 0x77004, .pd = { @@ -2428,6 +2441,7 @@ static struct clk_regmap *gcc_sc7180_clocks[] = { [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr, [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr, [GCC_SEC_CTRL_CLK_SRC] = &gcc_sec_ctrl_clk_src.clkr, + [GCC_LPASS_CFG_NOC_SWAY_CLK] = &gcc_lpass_cfg_noc_sway_clk.clkr, }; static const struct qcom_reset_map gcc_sc7180_resets[] = { diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c index bf5730832ef3..f0b47b7d50ca 100644 --- a/drivers/clk/qcom/gcc-sdm660.c +++ b/drivers/clk/qcom/gcc-sdm660.c @@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = { static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = { .halt_reg = 0x8a004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x8a004, + .hwcg_bit = 1, .clkr = { .enable_reg = 0x8a004, .enable_mask = BIT(0), @@ -2402,6 +2405,7 @@ static const struct qcom_reset_map gcc_sdm660_resets[] = { [GCC_USB_20_BCR] = { 0x2f000 }, [GCC_USB_30_BCR] = { 0xf000 }, [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 }, + [GCC_MSS_RESTART] = { 0x79000 }, }; static const struct regmap_config gcc_sdm660_regmap_config = { diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index f6ce888098be..90f7febaf528 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * Copyright (c) 2018, 2020, The Linux Foundation. All rights reserved. */ #include <linux/kernel.h> @@ -1344,7 +1344,7 @@ static struct clk_branch gcc_disp_gpll0_clk_src = { "gpll0", }, .num_parents = 1, - .ops = &clk_branch2_ops, + .ops = &clk_branch2_aon_ops, }, }, }; diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index 72524cf11048..8e9b5b3cceaf 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -34,14 +34,8 @@ enum { P_SLEEP_CLK, }; -static const struct pll_vco trion_vco[] = { - { 249600000, 2000000000, 0 }, -}; - static struct clk_alpha_pll gpll0 = { .offset = 0x0, - .vco_table = trion_vco, - .num_vco = ARRAY_SIZE(trion_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], .clkr = { .enable_reg = 0x52000, @@ -53,7 +47,7 @@ static struct clk_alpha_pll gpll0 = { .name = "bi_tcxo", }, .num_parents = 1, - .ops = &clk_trion_fixed_pll_ops, + .ops = &clk_alpha_pll_fixed_trion_ops, }, }, }; @@ -79,14 +73,12 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = { .hw = &gpll0.clkr.hw, }, .num_parents = 1, - .ops = &clk_trion_pll_postdiv_ops, + .ops = &clk_alpha_pll_postdiv_trion_ops, }, }; static struct clk_alpha_pll gpll7 = { .offset = 0x1a000, - .vco_table = trion_vco, - .num_vco = ARRAY_SIZE(trion_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], .clkr = { .enable_reg = 0x52000, @@ -98,15 +90,13 @@ static struct clk_alpha_pll gpll7 = { .name = "bi_tcxo", }, .num_parents = 1, - .ops = &clk_trion_fixed_pll_ops, + .ops = &clk_alpha_pll_fixed_trion_ops, }, }, }; static struct clk_alpha_pll gpll9 = { .offset = 0x1c000, - .vco_table = trion_vco, - .num_vco = ARRAY_SIZE(trion_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], .clkr = { .enable_reg = 0x52000, @@ -118,7 +108,7 @@ static struct clk_alpha_pll gpll9 = { .name = "bi_tcxo", }, .num_parents = 1, - .ops = &clk_trion_fixed_pll_ops, + .ops = &clk_alpha_pll_fixed_trion_ops, }, }, }; @@ -1617,6 +1607,7 @@ static struct clk_branch gcc_gpu_cfg_ahb_clk = { }; static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(15), @@ -1632,13 +1623,14 @@ static struct clk_branch gcc_gpu_gpll0_clk_src = { }; static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(16), .hw.init = &(struct clk_init_data){ .name = "gcc_gpu_gpll0_div_clk_src", .parent_hws = (const struct clk_hw *[]){ - &gcc_gpu_gpll0_clk_src.clkr.hw }, + &gpll0_out_even.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, @@ -1729,6 +1721,7 @@ static struct clk_branch gcc_npu_cfg_ahb_clk = { }; static struct clk_branch gcc_npu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(18), @@ -1744,13 +1737,14 @@ static struct clk_branch gcc_npu_gpll0_clk_src = { }; static struct clk_branch gcc_npu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_SKIP, .clkr = { .enable_reg = 0x52004, .enable_mask = BIT(19), .hw.init = &(struct clk_init_data){ .name = "gcc_npu_gpll0_div_clk_src", .parent_hws = (const struct clk_hw *[]){ - &gcc_npu_gpll0_clk_src.clkr.hw }, + &gpll0_out_even.clkr.hw }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c index 04944f11659b..bfc4ac02f9ea 100644 --- a/drivers/clk/qcom/gdsc.c +++ b/drivers/clk/qcom/gdsc.c @@ -6,6 +6,7 @@ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/ktime.h> @@ -29,6 +30,7 @@ /* CFG_GDSCR */ #define GDSC_POWER_UP_COMPLETE BIT(16) #define GDSC_POWER_DOWN_COMPLETE BIT(15) +#define GDSC_RETAIN_FF_ENABLE BIT(11) #define CFG_GDSCR_OFFSET 0x4 /* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */ @@ -216,6 +218,14 @@ static inline void gdsc_assert_reset_aon(struct gdsc *sc) regmap_update_bits(sc->regmap, sc->clamp_io_ctrl, GMEM_RESET_MASK, 0); } + +static void gdsc_retain_ff_on(struct gdsc *sc) +{ + u32 mask = GDSC_RETAIN_FF_ENABLE; + + regmap_update_bits(sc->regmap, sc->gdscr, mask, mask); +} + static int gdsc_enable(struct generic_pm_domain *domain) { struct gdsc *sc = domain_to_gdsc(domain); @@ -268,6 +278,9 @@ static int gdsc_enable(struct generic_pm_domain *domain) udelay(1); } + if (sc->flags & RETAIN_FF_ENABLE) + gdsc_retain_ff_on(sc); + return 0; } @@ -433,3 +446,29 @@ void gdsc_unregister(struct gdsc_desc *desc) } of_genpd_del_provider(dev->of_node); } + +/* + * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU + * running in the CX domain so the CPU doesn't need to know anything about the + * GX domain EXCEPT.... + * + * Hardware constraints dictate that the GX be powered down before the CX. If + * the GMU crashes it could leave the GX on. In order to successfully bring back + * the device the CPU needs to disable the GX headswitch. There being no sane + * way to reach in and touch that register from deep inside the GPU driver we + * need to set up the infrastructure to be able to ensure that the GPU can + * ensure that the GX is off during this super special case. We do this by + * defining a GX gdsc with a dummy enable function and a "default" disable + * function. + * + * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU + * driver. During power up, nothing will happen from the CPU (and the GMU will + * power up normally but during power down this will ensure that the GX domain + * is *really* off - this gives us a semi standard way of doing what we need. + */ +int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain) +{ + /* Do nothing but give genpd the impression that we were successful */ + return 0; +} +EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable); diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h index c36fc26dcdff..bd537438c793 100644 --- a/drivers/clk/qcom/gdsc.h +++ b/drivers/clk/qcom/gdsc.h @@ -50,6 +50,7 @@ struct gdsc { #define AON_RESET BIT(4) #define POLL_CFG_GDSCR BIT(5) #define ALWAYS_ON BIT(6) +#define RETAIN_FF_ENABLE BIT(7) struct reset_controller_dev *rcdev; unsigned int *resets; unsigned int reset_count; @@ -68,6 +69,7 @@ struct gdsc_desc { int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *, struct regmap *); void gdsc_unregister(struct gdsc_desc *desc); +int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain); #else static inline int gdsc_register(struct gdsc_desc *desc, struct reset_controller_dev *rcdev, diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c index 7b656b6aeced..88a739b6fec3 100644 --- a/drivers/clk/qcom/gpucc-sc7180.c +++ b/drivers/clk/qcom/gpucc-sc7180.c @@ -170,37 +170,12 @@ static struct gdsc cx_gdsc = { .flags = VOTABLE, }; -/* - * On SC7180 the GPU GX domain is *almost* entirely controlled by the GMU - * running in the CX domain so the CPU doesn't need to know anything about the - * GX domain EXCEPT.... - * - * Hardware constraints dictate that the GX be powered down before the CX. If - * the GMU crashes it could leave the GX on. In order to successfully bring back - * the device the CPU needs to disable the GX headswitch. There being no sane - * way to reach in and touch that register from deep inside the GPU driver we - * need to set up the infrastructure to be able to ensure that the GPU can - * ensure that the GX is off during this super special case. We do this by - * defining a GX gdsc with a dummy enable function and a "default" disable - * function. - * - * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU - * driver. During power up, nothing will happen from the CPU (and the GMU will - * power up normally but during power down this will ensure that the GX domain - * is *really* off - this gives us a semi standard way of doing what we need. - */ -static int gx_gdsc_enable(struct generic_pm_domain *domain) -{ - /* Do nothing but give genpd the impression that we were successful */ - return 0; -} - static struct gdsc gx_gdsc = { .gdscr = 0x100c, .clamp_io_ctrl = 0x1508, .pd = { .name = "gx_gdsc", - .power_on = gx_gdsc_enable, + .power_on = gdsc_gx_do_nothing_enable, }, .pwrsts = PWRSTS_OFF_ON, .flags = CLAMP_IO, diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c index e40efba1bf7d..5663698b306b 100644 --- a/drivers/clk/qcom/gpucc-sdm845.c +++ b/drivers/clk/qcom/gpucc-sdm845.c @@ -131,37 +131,12 @@ static struct gdsc gpu_cx_gdsc = { .flags = VOTABLE, }; -/* - * On SDM845 the GPU GX domain is *almost* entirely controlled by the GMU - * running in the CX domain so the CPU doesn't need to know anything about the - * GX domain EXCEPT.... - * - * Hardware constraints dictate that the GX be powered down before the CX. If - * the GMU crashes it could leave the GX on. In order to successfully bring back - * the device the CPU needs to disable the GX headswitch. There being no sane - * way to reach in and touch that register from deep inside the GPU driver we - * need to set up the infrastructure to be able to ensure that the GPU can - * ensure that the GX is off during this super special case. We do this by - * defining a GX gdsc with a dummy enable function and a "default" disable - * function. - * - * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU - * driver. During power up, nothing will happen from the CPU (and the GMU will - * power up normally but during power down this will ensure that the GX domain - * is *really* off - this gives us a semi standard way of doing what we need. - */ -static int gx_gdsc_enable(struct generic_pm_domain *domain) -{ - /* Do nothing but give genpd the impression that we were successful */ - return 0; -} - static struct gdsc gpu_gx_gdsc = { .gdscr = 0x100c, .clamp_io_ctrl = 0x1508, .pd = { .name = "gpu_gx_gdsc", - .power_on = gx_gdsc_enable, + .power_on = gdsc_gx_do_nothing_enable, }, .pwrsts = PWRSTS_OFF_ON, .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR, diff --git a/drivers/clk/qcom/gpucc-sm8150.c b/drivers/clk/qcom/gpucc-sm8150.c new file mode 100644 index 000000000000..27c40754b2c7 --- /dev/null +++ b/drivers/clk/qcom/gpucc-sm8150.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,gpucc-sm8150.h> + +#include "common.h" +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "reset.h" +#include "gdsc.h" + +enum { + P_BI_TCXO, + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_CC_PLL1_OUT_MAIN, +}; + +static const struct pll_vco trion_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static struct alpha_pll_config gpu_cc_pll1_config = { + .l = 0x1a, + .alpha = 0xaaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002267, + .config_ctl_hi1_val = 0x00000024, + .test_ctl_val = 0x00000000, + .test_ctl_hi_val = 0x00000002, + .test_ctl_hi1_val = 0x00000000, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x000000d0, +}; + +static struct clk_alpha_pll gpu_cc_pll1 = { + .offset = 0x100, + .vco_table = trion_vco, + .num_vco = ARRAY_SIZE(trion_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_TRION], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll1", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_trion_ops, + }, + }, +}; + +static const struct parent_map gpu_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .fw_name = "gcc_gpu_gpll0_clk_src" }, + { .fw_name = "gcc_gpu_gpll0_div_clk_src" }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0), + F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gmu_clk_src = { + .cmd_rcgr = 0x1120, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_0, + .freq_tbl = ftbl_gpu_cc_gmu_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gmu_clk_src", + .parent_data = gpu_cc_parent_data_0, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gpu_cc_ahb_clk = { + .halt_reg = 0x1078, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_crc_ahb_clk = { + .halt_reg = 0x107c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x107c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_crc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_apb_clk = { + .halt_reg = 0x1088, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_apb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gmu_clk = { + .halt_reg = 0x1098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gmu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_snoc_dvm_clk = { + .halt_reg = 0x108c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x108c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_snoc_dvm_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_aon_clk = { + .halt_reg = 0x1004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_aon_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_clk = { + .halt_reg = 0x109c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x109c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gmu_clk = { + .halt_reg = 0x1064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gmu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc gpu_cx_gdsc = { + .gdscr = 0x106c, + .gds_hw_ctrl = 0x1540, + .pd = { + .name = "gpu_cx_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc gpu_gx_gdsc = { + .gdscr = 0x100c, + .clamp_io_ctrl = 0x1508, + .pd = { + .name = "gpu_gx_gdsc", + .power_on = gdsc_gx_do_nothing_enable, + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR, +}; + +static struct clk_regmap *gpu_cc_sm8150_clocks[] = { + [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr, + [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, + [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr, + [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr, + [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr, + [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr, + [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr, + [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr, + [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr, + [GPU_CC_PLL1] = &gpu_cc_pll1.clkr, +}; + +static const struct qcom_reset_map gpu_cc_sm8150_resets[] = { + [GPUCC_GPU_CC_CX_BCR] = { 0x1068 }, + [GPUCC_GPU_CC_GMU_BCR] = { 0x111c }, + [GPUCC_GPU_CC_GX_BCR] = { 0x1008 }, + [GPUCC_GPU_CC_SPDM_BCR] = { 0x1110 }, + [GPUCC_GPU_CC_XO_BCR] = { 0x1000 }, +}; + +static struct gdsc *gpu_cc_sm8150_gdscs[] = { + [GPU_CX_GDSC] = &gpu_cx_gdsc, + [GPU_GX_GDSC] = &gpu_gx_gdsc, +}; + +static const struct regmap_config gpu_cc_sm8150_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x8008, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpu_cc_sm8150_desc = { + .config = &gpu_cc_sm8150_regmap_config, + .clks = gpu_cc_sm8150_clocks, + .num_clks = ARRAY_SIZE(gpu_cc_sm8150_clocks), + .resets = gpu_cc_sm8150_resets, + .num_resets = ARRAY_SIZE(gpu_cc_sm8150_resets), + .gdscs = gpu_cc_sm8150_gdscs, + .num_gdscs = ARRAY_SIZE(gpu_cc_sm8150_gdscs), +}; + +static const struct of_device_id gpu_cc_sm8150_match_table[] = { + { .compatible = "qcom,sm8150-gpucc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpu_cc_sm8150_match_table); + +static int gpu_cc_sm8150_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &gpu_cc_sm8150_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_trion_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + + return qcom_cc_really_probe(pdev, &gpu_cc_sm8150_desc, regmap); +} + +static struct platform_driver gpu_cc_sm8150_driver = { + .probe = gpu_cc_sm8150_probe, + .driver = { + .name = "sm8150-gpucc", + .of_match_table = gpu_cc_sm8150_match_table, + }, +}; + +static int __init gpu_cc_sm8150_init(void) +{ + return platform_driver_register(&gpu_cc_sm8150_driver); +} +subsys_initcall(gpu_cc_sm8150_init); + +static void __exit gpu_cc_sm8150_exit(void) +{ + platform_driver_unregister(&gpu_cc_sm8150_driver); +} +module_exit(gpu_cc_sm8150_exit); + +MODULE_DESCRIPTION("QTI GPUCC SM8150 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/gpucc-sm8250.c b/drivers/clk/qcom/gpucc-sm8250.c new file mode 100644 index 000000000000..3fa7d1f9ff98 --- /dev/null +++ b/drivers/clk/qcom/gpucc-sm8250.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,gpucc-sm8250.h> + +#include "common.h" +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "reset.h" +#include "gdsc.h" + +#define CX_GMU_CBCR_SLEEP_MASK 0xf +#define CX_GMU_CBCR_SLEEP_SHIFT 4 +#define CX_GMU_CBCR_WAKE_MASK 0xf +#define CX_GMU_CBCR_WAKE_SHIFT 8 + +enum { + P_BI_TCXO, + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_MAIN, + P_GPLL0_OUT_MAIN_DIV, + P_GPU_CC_PLL0_OUT_MAIN, + P_GPU_CC_PLL1_OUT_MAIN, +}; + +static struct pll_vco lucid_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static const struct alpha_pll_config gpu_cc_pll1_config = { + .l = 0x1a, + .alpha = 0xaaa, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002261, + .config_ctl_hi1_val = 0x029a699c, + .user_ctl_val = 0x00000000, + .user_ctl_hi_val = 0x00000805, + .user_ctl_hi1_val = 0x00000000, +}; + +static struct clk_alpha_pll gpu_cc_pll1 = { + .offset = 0x100, + .vco_table = lucid_vco, + .num_vco = ARRAY_SIZE(lucid_vco), + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_pll1", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_lucid_ops, + }, + }, +}; + +static const struct parent_map gpu_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPU_CC_PLL1_OUT_MAIN, 3 }, + { P_GPLL0_OUT_MAIN, 5 }, + { P_GPLL0_OUT_MAIN_DIV, 6 }, +}; + +static const struct clk_parent_data gpu_cc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpu_cc_pll1.clkr.hw }, + { .fw_name = "gcc_gpu_gpll0_clk_src" }, + { .fw_name = "gcc_gpu_gpll0_div_clk_src" }, +}; + +static const struct freq_tbl ftbl_gpu_cc_gmu_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN_DIV, 1.5, 0, 0), + F(500000000, P_GPU_CC_PLL1_OUT_MAIN, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gpu_cc_gmu_clk_src = { + .cmd_rcgr = 0x1120, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gpu_cc_parent_map_0, + .freq_tbl = ftbl_gpu_cc_gmu_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gmu_clk_src", + .parent_data = gpu_cc_parent_data_0, + .num_parents = ARRAY_SIZE(gpu_cc_parent_data_0), + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch gpu_cc_ahb_clk = { + .halt_reg = 0x1078, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1078, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_crc_ahb_clk = { + .halt_reg = 0x107c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x107c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_crc_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_apb_clk = { + .halt_reg = 0x1088, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1088, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_apb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_gmu_clk = { + .halt_reg = 0x1098, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1098, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_gmu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cx_snoc_dvm_clk = { + .halt_reg = 0x108c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x108c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cx_snoc_dvm_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_aon_clk = { + .halt_reg = 0x1004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_aon_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_cxo_clk = { + .halt_reg = 0x109c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x109c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_cxo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_gx_gmu_clk = { + .halt_reg = 0x1064, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_gx_gmu_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpu_cc_gmu_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gpu_cc_hlos1_vote_gpu_smmu_clk = { + .halt_reg = 0x5000, + .halt_check = BRANCH_VOTED, + .clkr = { + .enable_reg = 0x5000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpu_cc_hlos1_vote_gpu_smmu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc gpu_cx_gdsc = { + .gdscr = 0x106c, + .gds_hw_ctrl = 0x1540, + .pd = { + .name = "gpu_cx_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc gpu_gx_gdsc = { + .gdscr = 0x100c, + .clamp_io_ctrl = 0x1508, + .pd = { + .name = "gpu_gx_gdsc", + .power_on = gdsc_gx_do_nothing_enable, + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = CLAMP_IO | AON_RESET | POLL_CFG_GDSCR, +}; + +static struct clk_regmap *gpu_cc_sm8250_clocks[] = { + [GPU_CC_AHB_CLK] = &gpu_cc_ahb_clk.clkr, + [GPU_CC_CRC_AHB_CLK] = &gpu_cc_crc_ahb_clk.clkr, + [GPU_CC_CX_APB_CLK] = &gpu_cc_cx_apb_clk.clkr, + [GPU_CC_CX_GMU_CLK] = &gpu_cc_cx_gmu_clk.clkr, + [GPU_CC_CX_SNOC_DVM_CLK] = &gpu_cc_cx_snoc_dvm_clk.clkr, + [GPU_CC_CXO_AON_CLK] = &gpu_cc_cxo_aon_clk.clkr, + [GPU_CC_CXO_CLK] = &gpu_cc_cxo_clk.clkr, + [GPU_CC_GMU_CLK_SRC] = &gpu_cc_gmu_clk_src.clkr, + [GPU_CC_GX_GMU_CLK] = &gpu_cc_gx_gmu_clk.clkr, + [GPU_CC_PLL1] = &gpu_cc_pll1.clkr, + [GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK] = &gpu_cc_hlos1_vote_gpu_smmu_clk.clkr, +}; + +static const struct qcom_reset_map gpu_cc_sm8250_resets[] = { + [GPUCC_GPU_CC_ACD_BCR] = { 0x1160 }, + [GPUCC_GPU_CC_CX_BCR] = { 0x1068 }, + [GPUCC_GPU_CC_GFX3D_AON_BCR] = { 0x10a0 }, + [GPUCC_GPU_CC_GMU_BCR] = { 0x111c }, + [GPUCC_GPU_CC_GX_BCR] = { 0x1008 }, + [GPUCC_GPU_CC_XO_BCR] = { 0x1000 }, +}; + +static struct gdsc *gpu_cc_sm8250_gdscs[] = { + [GPU_CX_GDSC] = &gpu_cx_gdsc, + [GPU_GX_GDSC] = &gpu_gx_gdsc, +}; + +static const struct regmap_config gpu_cc_sm8250_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x8008, + .fast_io = true, +}; + +static const struct qcom_cc_desc gpu_cc_sm8250_desc = { + .config = &gpu_cc_sm8250_regmap_config, + .clks = gpu_cc_sm8250_clocks, + .num_clks = ARRAY_SIZE(gpu_cc_sm8250_clocks), + .resets = gpu_cc_sm8250_resets, + .num_resets = ARRAY_SIZE(gpu_cc_sm8250_resets), + .gdscs = gpu_cc_sm8250_gdscs, + .num_gdscs = ARRAY_SIZE(gpu_cc_sm8250_gdscs), +}; + +static const struct of_device_id gpu_cc_sm8250_match_table[] = { + { .compatible = "qcom,sm8250-gpucc" }, + { } +}; +MODULE_DEVICE_TABLE(of, gpu_cc_sm8250_match_table); + +static int gpu_cc_sm8250_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + unsigned int value, mask; + + regmap = qcom_cc_map(pdev, &gpu_cc_sm8250_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + clk_lucid_pll_configure(&gpu_cc_pll1, regmap, &gpu_cc_pll1_config); + + /* + * Configure gpu_cc_cx_gmu_clk with recommended + * wakeup/sleep settings + */ + mask = CX_GMU_CBCR_WAKE_MASK << CX_GMU_CBCR_WAKE_SHIFT; + mask |= CX_GMU_CBCR_SLEEP_MASK << CX_GMU_CBCR_SLEEP_SHIFT; + value = 0xf << CX_GMU_CBCR_WAKE_SHIFT | 0xf << CX_GMU_CBCR_SLEEP_SHIFT; + regmap_update_bits(regmap, 0x1098, mask, value); + + return qcom_cc_really_probe(pdev, &gpu_cc_sm8250_desc, regmap); +} + +static struct platform_driver gpu_cc_sm8250_driver = { + .probe = gpu_cc_sm8250_probe, + .driver = { + .name = "sm8250-gpucc", + .of_match_table = gpu_cc_sm8250_match_table, + }, +}; + +static int __init gpu_cc_sm8250_init(void) +{ + return platform_driver_register(&gpu_cc_sm8250_driver); +} +subsys_initcall(gpu_cc_sm8250_init); + +static void __exit gpu_cc_sm8250_exit(void) +{ + platform_driver_unregister(&gpu_cc_sm8250_driver); +} +module_exit(gpu_cc_sm8250_exit); + +MODULE_DESCRIPTION("QTI GPU_CC SM8250 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/lpasscorecc-sc7180.c b/drivers/clk/qcom/lpasscorecc-sc7180.c new file mode 100644 index 000000000000..d4c1864e1ee9 --- /dev/null +++ b/drivers/clk/qcom/lpasscorecc-sc7180.c @@ -0,0 +1,476 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> +#include <linux/of.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,lpasscorecc-sc7180.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "common.h" +#include "gdsc.h" + +enum { + P_BI_TCXO, + P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, + P_SLEEP_CLK, +}; + +static struct pll_vco fabia_vco[] = { + { 249600000, 2000000000, 0 }, +}; + +static const struct alpha_pll_config lpass_lpaaudio_dig_pll_config = { + .l = 0x20, + .alpha = 0x0, + .config_ctl_val = 0x20485699, + .config_ctl_hi_val = 0x00002067, + .test_ctl_val = 0x40000000, + .test_ctl_hi_val = 0x00000000, + .user_ctl_val = 0x00005105, + .user_ctl_hi_val = 0x00004805, +}; + +static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = { + [CLK_ALPHA_PLL_TYPE_FABIA] = { + [PLL_OFF_L_VAL] = 0x04, + [PLL_OFF_CAL_L_VAL] = 0x8, + [PLL_OFF_USER_CTL] = 0x0c, + [PLL_OFF_USER_CTL_U] = 0x10, + [PLL_OFF_USER_CTL_U1] = 0x14, + [PLL_OFF_CONFIG_CTL] = 0x18, + [PLL_OFF_CONFIG_CTL_U] = 0x1C, + [PLL_OFF_CONFIG_CTL_U1] = 0x20, + [PLL_OFF_TEST_CTL] = 0x24, + [PLL_OFF_TEST_CTL_U] = 0x28, + [PLL_OFF_STATUS] = 0x30, + [PLL_OFF_OPMODE] = 0x38, + [PLL_OFF_FRAC] = 0x40, + }, +}; + +static struct clk_alpha_pll lpass_lpaaudio_dig_pll = { + .offset = 0x1000, + .vco_table = fabia_vco, + .num_vco = ARRAY_SIZE(fabia_vco), + .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "lpass_lpaaudio_dig_pll", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fabia_ops, + }, + }, +}; + +static const struct clk_div_table + post_div_table_lpass_lpaaudio_dig_pll_out_odd[] = { + { 0x5, 5 }, + { } +}; + +static struct clk_alpha_pll_postdiv lpass_lpaaudio_dig_pll_out_odd = { + .offset = 0x1000, + .post_div_shift = 12, + .post_div_table = post_div_table_lpass_lpaaudio_dig_pll_out_odd, + .num_post_div = + ARRAY_SIZE(post_div_table_lpass_lpaaudio_dig_pll_out_odd), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA], + .clkr.hw.init = &(struct clk_init_data){ + .name = "lpass_lpaaudio_dig_pll_out_odd", + .parent_data = &(const struct clk_parent_data){ + .hw = &lpass_lpaaudio_dig_pll.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_alpha_pll_postdiv_fabia_ops, + }, +}; + +static const struct parent_map lpass_core_cc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 5 }, +}; + +static const struct clk_parent_data lpass_core_cc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &lpass_lpaaudio_dig_pll_out_odd.clkr.hw }, +}; + +static const struct parent_map lpass_core_cc_parent_map_2[] = { + { P_BI_TCXO, 0 }, +}; + +static struct clk_rcg2 core_clk_src = { + .cmd_rcgr = 0x1d000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = lpass_core_cc_parent_map_2, + .clkr.hw.init = &(struct clk_init_data){ + .name = "core_clk_src", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_ext_mclk0_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static const struct freq_tbl ftbl_ext_lpaif_clk_src[] = { + F(256000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 32), + F(512000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 16), + F(768000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 16), + F(1024000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 8), + F(1536000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 8), + F(2048000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 4), + F(3072000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 4), + F(4096000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 1, 2), + F(6144000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 1, 2), + F(8192000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 15, 0, 0), + F(9600000, P_BI_TCXO, 2, 0, 0), + F(12288000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 10, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(24576000, P_LPASS_LPAAUDIO_DIG_PLL_OUT_ODD, 5, 0, 0), + { } +}; + +static struct clk_rcg2 ext_mclk0_clk_src = { + .cmd_rcgr = 0x20000, + .mnd_width = 8, + .hid_width = 5, + .parent_map = lpass_core_cc_parent_map_0, + .freq_tbl = ftbl_ext_mclk0_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "ext_mclk0_clk_src", + .parent_data = lpass_core_cc_parent_data_0, + .num_parents = 2, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 lpaif_pri_clk_src = { + .cmd_rcgr = 0x10000, + .mnd_width = 16, + .hid_width = 5, + .parent_map = lpass_core_cc_parent_map_0, + .freq_tbl = ftbl_ext_lpaif_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "lpaif_pri_clk_src", + .parent_data = lpass_core_cc_parent_data_0, + .num_parents = 2, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 lpaif_sec_clk_src = { + .cmd_rcgr = 0x11000, + .mnd_width = 16, + .hid_width = 5, + .parent_map = lpass_core_cc_parent_map_0, + .freq_tbl = ftbl_ext_lpaif_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "lpaif_sec_clk_src", + .parent_data = lpass_core_cc_parent_data_0, + .num_parents = 2, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_branch lpass_audio_core_ext_mclk0_clk = { + .halt_reg = 0x20014, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x20014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x20014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "lpass_audio_core_ext_mclk0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &ext_mclk0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_core_lpaif_pri_ibit_clk = { + .halt_reg = 0x10018, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x10018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x10018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "lpass_audio_core_lpaif_pri_ibit_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &lpaif_pri_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_core_lpaif_sec_ibit_clk = { + .halt_reg = 0x11018, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x11018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x11018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "lpass_audio_core_lpaif_sec_ibit_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &lpaif_sec_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch lpass_audio_core_sysnoc_mport_core_clk = { + .halt_reg = 0x23000, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x23000, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x23000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "lpass_audio_core_sysnoc_mport_core_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_regmap *lpass_core_cc_sc7180_clocks[] = { + [EXT_MCLK0_CLK_SRC] = &ext_mclk0_clk_src.clkr, + [LPAIF_PRI_CLK_SRC] = &lpaif_pri_clk_src.clkr, + [LPAIF_SEC_CLK_SRC] = &lpaif_sec_clk_src.clkr, + [CORE_CLK_SRC] = &core_clk_src.clkr, + [LPASS_AUDIO_CORE_EXT_MCLK0_CLK] = &lpass_audio_core_ext_mclk0_clk.clkr, + [LPASS_AUDIO_CORE_LPAIF_PRI_IBIT_CLK] = + &lpass_audio_core_lpaif_pri_ibit_clk.clkr, + [LPASS_AUDIO_CORE_LPAIF_SEC_IBIT_CLK] = + &lpass_audio_core_lpaif_sec_ibit_clk.clkr, + [LPASS_AUDIO_CORE_SYSNOC_MPORT_CORE_CLK] = + &lpass_audio_core_sysnoc_mport_core_clk.clkr, + [LPASS_LPAAUDIO_DIG_PLL] = &lpass_lpaaudio_dig_pll.clkr, + [LPASS_LPAAUDIO_DIG_PLL_OUT_ODD] = &lpass_lpaaudio_dig_pll_out_odd.clkr, +}; + +static struct gdsc lpass_pdc_hm_gdsc = { + .gdscr = 0x3090, + .pd = { + .name = "lpass_pdc_hm_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc lpass_audio_hm_gdsc = { + .gdscr = 0x9090, + .pd = { + .name = "lpass_audio_hm_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc lpass_core_hm_gdsc = { + .gdscr = 0x0, + .pd = { + .name = "lpass_core_hm_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = RETAIN_FF_ENABLE, +}; + +static struct gdsc *lpass_core_hm_sc7180_gdscs[] = { + [LPASS_CORE_HM_GDSCR] = &lpass_core_hm_gdsc, +}; + +static struct gdsc *lpass_audio_hm_sc7180_gdscs[] = { + [LPASS_PDC_HM_GDSCR] = &lpass_pdc_hm_gdsc, + [LPASS_AUDIO_HM_GDSCR] = &lpass_audio_hm_gdsc, +}; + +static struct regmap_config lpass_core_cc_sc7180_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, +}; + +static const struct qcom_cc_desc lpass_core_hm_sc7180_desc = { + .config = &lpass_core_cc_sc7180_regmap_config, + .gdscs = lpass_core_hm_sc7180_gdscs, + .num_gdscs = ARRAY_SIZE(lpass_core_hm_sc7180_gdscs), +}; + +static const struct qcom_cc_desc lpass_core_cc_sc7180_desc = { + .config = &lpass_core_cc_sc7180_regmap_config, + .clks = lpass_core_cc_sc7180_clocks, + .num_clks = ARRAY_SIZE(lpass_core_cc_sc7180_clocks), +}; + +static const struct qcom_cc_desc lpass_audio_hm_sc7180_desc = { + .config = &lpass_core_cc_sc7180_regmap_config, + .gdscs = lpass_audio_hm_sc7180_gdscs, + .num_gdscs = ARRAY_SIZE(lpass_audio_hm_sc7180_gdscs), +}; + +static int lpass_core_cc_sc7180_probe(struct platform_device *pdev) +{ + const struct qcom_cc_desc *desc; + struct regmap *regmap; + int ret; + + lpass_core_cc_sc7180_regmap_config.name = "lpass_audio_cc"; + desc = &lpass_audio_hm_sc7180_desc; + ret = qcom_cc_probe_by_index(pdev, 1, desc); + if (ret) + return ret; + + lpass_core_cc_sc7180_regmap_config.name = "lpass_core_cc"; + regmap = qcom_cc_map(pdev, &lpass_core_cc_sc7180_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* + * Keep the CLK always-ON + * LPASS_AUDIO_CORE_SYSNOC_SWAY_CORE_CLK + */ + regmap_update_bits(regmap, 0x24000, BIT(0), BIT(0)); + + /* PLL settings */ + regmap_write(regmap, 0x1008, 0x20); + regmap_update_bits(regmap, 0x1014, BIT(0), BIT(0)); + + clk_fabia_pll_configure(&lpass_lpaaudio_dig_pll, regmap, + &lpass_lpaaudio_dig_pll_config); + + return qcom_cc_really_probe(pdev, &lpass_core_cc_sc7180_desc, regmap); +} + +static int lpass_hm_core_probe(struct platform_device *pdev) +{ + const struct qcom_cc_desc *desc; + + lpass_core_cc_sc7180_regmap_config.name = "lpass_hm_core"; + desc = &lpass_core_hm_sc7180_desc; + + return qcom_cc_probe_by_index(pdev, 0, desc); +} + +static const struct of_device_id lpass_core_cc_sc7180_match_table[] = { + { + .compatible = "qcom,sc7180-lpasshm", + .data = lpass_hm_core_probe, + }, + { + .compatible = "qcom,sc7180-lpasscorecc", + .data = lpass_core_cc_sc7180_probe, + }, + { } +}; +MODULE_DEVICE_TABLE(of, lpass_core_cc_sc7180_match_table); + +static int lpass_core_sc7180_probe(struct platform_device *pdev) +{ + int (*clk_probe)(struct platform_device *p); + int ret; + + pm_runtime_enable(&pdev->dev); + ret = pm_clk_create(&pdev->dev); + if (ret) + return ret; + + ret = pm_clk_add(&pdev->dev, "iface"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to acquire iface clock\n"); + goto disable_pm_runtime; + } + + clk_probe = of_device_get_match_data(&pdev->dev); + if (!clk_probe) + return -EINVAL; + + ret = clk_probe(pdev); + if (ret) + goto destroy_pm_clk; + + return 0; + +destroy_pm_clk: + pm_clk_destroy(&pdev->dev); + +disable_pm_runtime: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static const struct dev_pm_ops lpass_core_cc_pm_ops = { + SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL) +}; + +static struct platform_driver lpass_core_cc_sc7180_driver = { + .probe = lpass_core_sc7180_probe, + .driver = { + .name = "lpass_core_cc-sc7180", + .of_match_table = lpass_core_cc_sc7180_match_table, + .pm = &lpass_core_cc_pm_ops, + }, +}; + +static int __init lpass_core_cc_sc7180_init(void) +{ + return platform_driver_register(&lpass_core_cc_sc7180_driver); +} +subsys_initcall(lpass_core_cc_sc7180_init); + +static void __exit lpass_core_cc_sc7180_exit(void) +{ + platform_driver_unregister(&lpass_core_cc_sc7180_driver); +} +module_exit(lpass_core_cc_sc7180_exit); + +MODULE_DESCRIPTION("QTI LPASS_CORE_CC SC7180 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 9eb79bf90643..28e8730ce263 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -15,6 +15,7 @@ config CLK_RENESAS select CLK_R8A774A1 if ARCH_R8A774A1 select CLK_R8A774B1 if ARCH_R8A774B1 select CLK_R8A774C0 if ARCH_R8A774C0 + select CLK_R8A774E1 if ARCH_R8A774E1 select CLK_R8A7778 if ARCH_R8A7778 select CLK_R8A7779 if ARCH_R8A7779 select CLK_R8A7790 if ARCH_R8A7790 @@ -84,6 +85,10 @@ config CLK_R8A774C0 bool "RZ/G2E clock support" if COMPILE_TEST select CLK_RCAR_GEN3_CPG +config CLK_R8A774E1 + bool "RZ/G2H clock support" if COMPILE_TEST + select CLK_RCAR_GEN3_CPG + config CLK_R8A7778 bool "R-Car M1A clock support" if COMPILE_TEST select CLK_RENESAS_CPG_MSTP diff --git a/drivers/clk/renesas/Makefile b/drivers/clk/renesas/Makefile index a4066f9b34ef..c7c03ab9a6a3 100644 --- a/drivers/clk/renesas/Makefile +++ b/drivers/clk/renesas/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_CLK_R8A77470) += r8a77470-cpg-mssr.o obj-$(CONFIG_CLK_R8A774A1) += r8a774a1-cpg-mssr.o obj-$(CONFIG_CLK_R8A774B1) += r8a774b1-cpg-mssr.o obj-$(CONFIG_CLK_R8A774C0) += r8a774c0-cpg-mssr.o +obj-$(CONFIG_CLK_R8A774E1) += r8a774e1-cpg-mssr.o obj-$(CONFIG_CLK_R8A7778) += clk-r8a7778.o obj-$(CONFIG_CLK_R8A7779) += clk-r8a7779.o obj-$(CONFIG_CLK_R8A7790) += r8a7790-cpg-mssr.o diff --git a/drivers/clk/renesas/r8a774a1-cpg-mssr.c b/drivers/clk/renesas/r8a774a1-cpg-mssr.c index e05bfa200480..fd54b9f625da 100644 --- a/drivers/clk/renesas/r8a774a1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774a1-cpg-mssr.c @@ -237,6 +237,7 @@ static const struct mssr_mod_clk r8a774a1_mod_clks[] __initconst = { }; static const unsigned int r8a774a1_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; diff --git a/drivers/clk/renesas/r8a774b1-cpg-mssr.c b/drivers/clk/renesas/r8a774b1-cpg-mssr.c index c9af70917312..f436691271ec 100644 --- a/drivers/clk/renesas/r8a774b1-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774b1-cpg-mssr.c @@ -233,6 +233,7 @@ static const struct mssr_mod_clk r8a774b1_mod_clks[] __initconst = { }; static const unsigned int r8a774b1_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; diff --git a/drivers/clk/renesas/r8a774c0-cpg-mssr.c b/drivers/clk/renesas/r8a774c0-cpg-mssr.c index f91e7a484753..9fc9fa9e531a 100644 --- a/drivers/clk/renesas/r8a774c0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a774c0-cpg-mssr.c @@ -238,6 +238,7 @@ static const struct mssr_mod_clk r8a774c0_mod_clks[] __initconst = { }; static const unsigned int r8a774c0_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; diff --git a/drivers/clk/renesas/r8a774e1-cpg-mssr.c b/drivers/clk/renesas/r8a774e1-cpg-mssr.c new file mode 100644 index 000000000000..b96c486abb44 --- /dev/null +++ b/drivers/clk/renesas/r8a774e1-cpg-mssr.c @@ -0,0 +1,349 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * r8a774e1 Clock Pulse Generator / Module Standby and Software Reset + * + * Copyright (C) 2020 Renesas Electronics Corp. + * + * Based on r8a7795-cpg-mssr.c + * + * Copyright (C) 2015 Glider bvba + */ + +#include <linux/device.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/soc/renesas/rcar-rst.h> + +#include <dt-bindings/clock/r8a774e1-cpg-mssr.h> + +#include "renesas-cpg-mssr.h" +#include "rcar-gen3-cpg.h" + +enum clk_ids { + /* Core Clock Outputs exported to DT */ + LAST_DT_CORE_CLK = R8A774E1_CLK_CANFD, + + /* External Input Clocks */ + CLK_EXTAL, + CLK_EXTALR, + + /* Internal Core Clocks */ + CLK_MAIN, + CLK_PLL0, + CLK_PLL1, + CLK_PLL2, + CLK_PLL3, + CLK_PLL4, + CLK_PLL1_DIV2, + CLK_PLL1_DIV4, + CLK_S0, + CLK_S1, + CLK_S2, + CLK_S3, + CLK_SDSRC, + CLK_RPCSRC, + CLK_RINT, + + /* Module Clocks */ + MOD_CLK_BASE +}; + +static const struct cpg_core_clk r8a774e1_core_clks[] __initconst = { + /* External Clock Inputs */ + DEF_INPUT("extal", CLK_EXTAL), + DEF_INPUT("extalr", CLK_EXTALR), + + /* Internal Core Clocks */ + DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), + DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN), + DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), + DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN), + DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), + DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN), + + DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), + DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), + DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), + + DEF_BASE("rpc", R8A774E1_CLK_RPC, CLK_TYPE_GEN3_RPC, + CLK_RPCSRC), + DEF_BASE("rpcd2", R8A774E1_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, + R8A774E1_CLK_RPC), + + DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), + + /* Core Clock Outputs */ + DEF_GEN3_Z("z", R8A774E1_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), + DEF_GEN3_Z("z2", R8A774E1_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0), + DEF_FIXED("ztr", R8A774E1_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), + DEF_FIXED("ztrd2", R8A774E1_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), + DEF_FIXED("zt", R8A774E1_CLK_ZT, CLK_PLL1_DIV2, 4, 1), + DEF_FIXED("zx", R8A774E1_CLK_ZX, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED("s0d1", R8A774E1_CLK_S0D1, CLK_S0, 1, 1), + DEF_FIXED("s0d2", R8A774E1_CLK_S0D2, CLK_S0, 2, 1), + DEF_FIXED("s0d3", R8A774E1_CLK_S0D3, CLK_S0, 3, 1), + DEF_FIXED("s0d4", R8A774E1_CLK_S0D4, CLK_S0, 4, 1), + DEF_FIXED("s0d6", R8A774E1_CLK_S0D6, CLK_S0, 6, 1), + DEF_FIXED("s0d8", R8A774E1_CLK_S0D8, CLK_S0, 8, 1), + DEF_FIXED("s0d12", R8A774E1_CLK_S0D12, CLK_S0, 12, 1), + DEF_FIXED("s1d2", R8A774E1_CLK_S1D2, CLK_S1, 2, 1), + DEF_FIXED("s1d4", R8A774E1_CLK_S1D4, CLK_S1, 4, 1), + DEF_FIXED("s2d1", R8A774E1_CLK_S2D1, CLK_S2, 1, 1), + DEF_FIXED("s2d2", R8A774E1_CLK_S2D2, CLK_S2, 2, 1), + DEF_FIXED("s2d4", R8A774E1_CLK_S2D4, CLK_S2, 4, 1), + DEF_FIXED("s3d1", R8A774E1_CLK_S3D1, CLK_S3, 1, 1), + DEF_FIXED("s3d2", R8A774E1_CLK_S3D2, CLK_S3, 2, 1), + DEF_FIXED("s3d4", R8A774E1_CLK_S3D4, CLK_S3, 4, 1), + + DEF_GEN3_SD("sd0", R8A774E1_CLK_SD0, CLK_SDSRC, 0x074), + DEF_GEN3_SD("sd1", R8A774E1_CLK_SD1, CLK_SDSRC, 0x078), + DEF_GEN3_SD("sd2", R8A774E1_CLK_SD2, CLK_SDSRC, 0x268), + DEF_GEN3_SD("sd3", R8A774E1_CLK_SD3, CLK_SDSRC, 0x26c), + + DEF_FIXED("cl", R8A774E1_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cr", R8A774E1_CLK_CR, CLK_PLL1_DIV4, 2, 1), + DEF_FIXED("cp", R8A774E1_CLK_CP, CLK_EXTAL, 2, 1), + DEF_FIXED("cpex", R8A774E1_CLK_CPEX, CLK_EXTAL, 2, 1), + + DEF_DIV6P1("canfd", R8A774E1_CLK_CANFD, CLK_PLL1_DIV4, 0x244), + DEF_DIV6P1("csi0", R8A774E1_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), + DEF_DIV6P1("mso", R8A774E1_CLK_MSO, CLK_PLL1_DIV4, 0x014), + DEF_DIV6P1("hdmi", R8A774E1_CLK_HDMI, CLK_PLL1_DIV4, 0x250), + + DEF_GEN3_OSC("osc", R8A774E1_CLK_OSC, CLK_EXTAL, 8), + + DEF_BASE("r", R8A774E1_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), +}; + +static const struct mssr_mod_clk r8a774e1_mod_clks[] __initconst = { + DEF_MOD("fdp1-1", 118, R8A774E1_CLK_S0D1), + DEF_MOD("fdp1-0", 119, R8A774E1_CLK_S0D1), + DEF_MOD("tmu4", 121, R8A774E1_CLK_S0D6), + DEF_MOD("tmu3", 122, R8A774E1_CLK_S3D2), + DEF_MOD("tmu2", 123, R8A774E1_CLK_S3D2), + DEF_MOD("tmu1", 124, R8A774E1_CLK_S3D2), + DEF_MOD("tmu0", 125, R8A774E1_CLK_CP), + DEF_MOD("vcplf", 130, R8A774E1_CLK_S2D1), + DEF_MOD("vdpb", 131, R8A774E1_CLK_S2D1), + DEF_MOD("scif5", 202, R8A774E1_CLK_S3D4), + DEF_MOD("scif4", 203, R8A774E1_CLK_S3D4), + DEF_MOD("scif3", 204, R8A774E1_CLK_S3D4), + DEF_MOD("scif1", 206, R8A774E1_CLK_S3D4), + DEF_MOD("scif0", 207, R8A774E1_CLK_S3D4), + DEF_MOD("msiof3", 208, R8A774E1_CLK_MSO), + DEF_MOD("msiof2", 209, R8A774E1_CLK_MSO), + DEF_MOD("msiof1", 210, R8A774E1_CLK_MSO), + DEF_MOD("msiof0", 211, R8A774E1_CLK_MSO), + DEF_MOD("sys-dmac2", 217, R8A774E1_CLK_S3D1), + DEF_MOD("sys-dmac1", 218, R8A774E1_CLK_S3D1), + DEF_MOD("sys-dmac0", 219, R8A774E1_CLK_S0D3), + DEF_MOD("cmt3", 300, R8A774E1_CLK_R), + DEF_MOD("cmt2", 301, R8A774E1_CLK_R), + DEF_MOD("cmt1", 302, R8A774E1_CLK_R), + DEF_MOD("cmt0", 303, R8A774E1_CLK_R), + DEF_MOD("tpu0", 304, R8A774E1_CLK_S3D4), + DEF_MOD("scif2", 310, R8A774E1_CLK_S3D4), + DEF_MOD("sdif3", 311, R8A774E1_CLK_SD3), + DEF_MOD("sdif2", 312, R8A774E1_CLK_SD2), + DEF_MOD("sdif1", 313, R8A774E1_CLK_SD1), + DEF_MOD("sdif0", 314, R8A774E1_CLK_SD0), + DEF_MOD("pcie1", 318, R8A774E1_CLK_S3D1), + DEF_MOD("pcie0", 319, R8A774E1_CLK_S3D1), + DEF_MOD("usb3-if0", 328, R8A774E1_CLK_S3D1), + DEF_MOD("usb-dmac0", 330, R8A774E1_CLK_S3D1), + DEF_MOD("usb-dmac1", 331, R8A774E1_CLK_S3D1), + DEF_MOD("rwdt", 402, R8A774E1_CLK_R), + DEF_MOD("intc-ex", 407, R8A774E1_CLK_CP), + DEF_MOD("intc-ap", 408, R8A774E1_CLK_S0D3), + DEF_MOD("audmac1", 501, R8A774E1_CLK_S1D2), + DEF_MOD("audmac0", 502, R8A774E1_CLK_S1D2), + DEF_MOD("hscif4", 516, R8A774E1_CLK_S3D1), + DEF_MOD("hscif3", 517, R8A774E1_CLK_S3D1), + DEF_MOD("hscif2", 518, R8A774E1_CLK_S3D1), + DEF_MOD("hscif1", 519, R8A774E1_CLK_S3D1), + DEF_MOD("hscif0", 520, R8A774E1_CLK_S3D1), + DEF_MOD("thermal", 522, R8A774E1_CLK_CP), + DEF_MOD("pwm", 523, R8A774E1_CLK_S0D12), + DEF_MOD("fcpvd1", 602, R8A774E1_CLK_S0D2), + DEF_MOD("fcpvd0", 603, R8A774E1_CLK_S0D2), + DEF_MOD("fcpvb1", 606, R8A774E1_CLK_S0D1), + DEF_MOD("fcpvb0", 607, R8A774E1_CLK_S0D1), + DEF_MOD("fcpvi1", 610, R8A774E1_CLK_S0D1), + DEF_MOD("fcpvi0", 611, R8A774E1_CLK_S0D1), + DEF_MOD("fcpf1", 614, R8A774E1_CLK_S0D1), + DEF_MOD("fcpf0", 615, R8A774E1_CLK_S0D1), + DEF_MOD("fcpcs", 619, R8A774E1_CLK_S0D1), + DEF_MOD("vspd1", 622, R8A774E1_CLK_S0D2), + DEF_MOD("vspd0", 623, R8A774E1_CLK_S0D2), + DEF_MOD("vspbc", 624, R8A774E1_CLK_S0D1), + DEF_MOD("vspbd", 626, R8A774E1_CLK_S0D1), + DEF_MOD("vspi1", 630, R8A774E1_CLK_S0D1), + DEF_MOD("vspi0", 631, R8A774E1_CLK_S0D1), + DEF_MOD("ehci1", 702, R8A774E1_CLK_S3D2), + DEF_MOD("ehci0", 703, R8A774E1_CLK_S3D2), + DEF_MOD("hsusb", 704, R8A774E1_CLK_S3D2), + DEF_MOD("csi20", 714, R8A774E1_CLK_CSI0), + DEF_MOD("csi40", 716, R8A774E1_CLK_CSI0), + DEF_MOD("du3", 721, R8A774E1_CLK_S2D1), + DEF_MOD("du1", 723, R8A774E1_CLK_S2D1), + DEF_MOD("du0", 724, R8A774E1_CLK_S2D1), + DEF_MOD("lvds", 727, R8A774E1_CLK_S0D4), + DEF_MOD("hdmi0", 729, R8A774E1_CLK_HDMI), + DEF_MOD("vin7", 804, R8A774E1_CLK_S0D2), + DEF_MOD("vin6", 805, R8A774E1_CLK_S0D2), + DEF_MOD("vin5", 806, R8A774E1_CLK_S0D2), + DEF_MOD("vin4", 807, R8A774E1_CLK_S0D2), + DEF_MOD("vin3", 808, R8A774E1_CLK_S0D2), + DEF_MOD("vin2", 809, R8A774E1_CLK_S0D2), + DEF_MOD("vin1", 810, R8A774E1_CLK_S0D2), + DEF_MOD("vin0", 811, R8A774E1_CLK_S0D2), + DEF_MOD("etheravb", 812, R8A774E1_CLK_S0D6), + DEF_MOD("sata0", 815, R8A774E1_CLK_S3D2), + DEF_MOD("gpio7", 905, R8A774E1_CLK_S3D4), + DEF_MOD("gpio6", 906, R8A774E1_CLK_S3D4), + DEF_MOD("gpio5", 907, R8A774E1_CLK_S3D4), + DEF_MOD("gpio4", 908, R8A774E1_CLK_S3D4), + DEF_MOD("gpio3", 909, R8A774E1_CLK_S3D4), + DEF_MOD("gpio2", 910, R8A774E1_CLK_S3D4), + DEF_MOD("gpio1", 911, R8A774E1_CLK_S3D4), + DEF_MOD("gpio0", 912, R8A774E1_CLK_S3D4), + DEF_MOD("can-fd", 914, R8A774E1_CLK_S3D2), + DEF_MOD("can-if1", 915, R8A774E1_CLK_S3D4), + DEF_MOD("can-if0", 916, R8A774E1_CLK_S3D4), + DEF_MOD("rpc-if", 917, R8A774E1_CLK_RPCD2), + DEF_MOD("i2c6", 918, R8A774E1_CLK_S0D6), + DEF_MOD("i2c5", 919, R8A774E1_CLK_S0D6), + DEF_MOD("adg", 922, R8A774E1_CLK_S0D1), + DEF_MOD("i2c-dvfs", 926, R8A774E1_CLK_CP), + DEF_MOD("i2c4", 927, R8A774E1_CLK_S0D6), + DEF_MOD("i2c3", 928, R8A774E1_CLK_S0D6), + DEF_MOD("i2c2", 929, R8A774E1_CLK_S3D2), + DEF_MOD("i2c1", 930, R8A774E1_CLK_S3D2), + DEF_MOD("i2c0", 931, R8A774E1_CLK_S3D2), + DEF_MOD("ssi-all", 1005, R8A774E1_CLK_S3D4), + DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), + DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), + DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), + DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), + DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), + DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), + DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), + DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), + DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), + DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), + DEF_MOD("scu-all", 1017, R8A774E1_CLK_S3D4), + DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), + DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), + DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), + DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), + DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), + DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), + DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), + DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), + DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), + DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), + DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), + DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), + DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), +}; + +static const unsigned int r8a774e1_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ + MOD_CLK_ID(408), /* INTC-AP (GIC) */ +}; + +/* + * CPG Clock Data + */ + +/* + * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC + * 14 13 19 17 (MHz) + *------------------------------------------------------------------------- + * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16 + * 0 0 1 0 Prohibited setting + * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16 + * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19 + * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19 + * 0 1 1 0 Prohibited setting + * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19 + * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24 + * 1 0 1 0 Prohibited setting + * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24 + * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32 + * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32 + * 1 1 1 0 Prohibited setting + * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32 + */ +#define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ + (((md) & BIT(13)) >> 11) | \ + (((md) & BIT(19)) >> 18) | \ + (((md) & BIT(17)) >> 17)) + +static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { + /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ + { 1, 192, 1, 192, 1, 16, }, + { 1, 192, 1, 128, 1, 16, }, + { 0, /* Prohibited setting */ }, + { 1, 192, 1, 192, 1, 16, }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 160, 1, 106, 1, 19, }, + { 0, /* Prohibited setting */ }, + { 1, 160, 1, 160, 1, 19, }, + { 1, 128, 1, 128, 1, 24, }, + { 1, 128, 1, 84, 1, 24, }, + { 0, /* Prohibited setting */ }, + { 1, 128, 1, 128, 1, 24, }, + { 2, 192, 1, 192, 1, 32, }, + { 2, 192, 1, 128, 1, 32, }, + { 0, /* Prohibited setting */ }, + { 2, 192, 1, 192, 1, 32, }, +}; + +static int __init r8a774e1_cpg_mssr_init(struct device *dev) +{ + const struct rcar_gen3_cpg_pll_config *cpg_pll_config; + u32 cpg_mode; + int error; + + error = rcar_rst_read_mode_pins(&cpg_mode); + if (error) + return error; + + cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; + if (!cpg_pll_config->extal_div) { + dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode); + return -EINVAL; + } + + return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); +} + +const struct cpg_mssr_info r8a774e1_cpg_mssr_info __initconst = { + /* Core Clocks */ + .core_clks = r8a774e1_core_clks, + .num_core_clks = ARRAY_SIZE(r8a774e1_core_clks), + .last_dt_core_clk = LAST_DT_CORE_CLK, + .num_total_core_clks = MOD_CLK_BASE, + + /* Module Clocks */ + .mod_clks = r8a774e1_mod_clks, + .num_mod_clks = ARRAY_SIZE(r8a774e1_mod_clks), + .num_hw_mod_clks = 12 * 32, + + /* Critical Module Clocks */ + .crit_mod_clks = r8a774e1_crit_mod_clks, + .num_crit_mod_clks = ARRAY_SIZE(r8a774e1_crit_mod_clks), + + /* Callbacks */ + .init = r8a774e1_cpg_mssr_init, + .cpg_clk_register = rcar_gen3_cpg_clk_register, +}; diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index ff5b3020cb03..068018ae3c6e 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -287,10 +287,10 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { }; static const unsigned int r8a7795_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; - /* * CPG Clock Data */ diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index e8d466dbc7f9..2cd6e3876fbd 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -262,10 +262,10 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = { }; static const unsigned int r8a7796_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; - /* * CPG Clock Data */ diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c index 7a05a2fc1cc6..2b55a06ac5cf 100644 --- a/drivers/clk/renesas/r8a77965-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c @@ -263,6 +263,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { }; static const unsigned int r8a77965_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; diff --git a/drivers/clk/renesas/r8a77970-cpg-mssr.c b/drivers/clk/renesas/r8a77970-cpg-mssr.c index cbed3769a100..0f59c84229a8 100644 --- a/drivers/clk/renesas/r8a77970-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77970-cpg-mssr.c @@ -165,10 +165,10 @@ static const struct mssr_mod_clk r8a77970_mod_clks[] __initconst = { }; static const unsigned int r8a77970_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; - /* * CPG Clock Data */ diff --git a/drivers/clk/renesas/r8a77980-cpg-mssr.c b/drivers/clk/renesas/r8a77980-cpg-mssr.c index 7227f675e61f..9fe372286c1e 100644 --- a/drivers/clk/renesas/r8a77980-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77980-cpg-mssr.c @@ -180,10 +180,10 @@ static const struct mssr_mod_clk r8a77980_mod_clks[] __initconst = { }; static const unsigned int r8a77980_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; - /* * CPG Clock Data */ diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c index 8eda2e3e2480..2b97ab61d044 100644 --- a/drivers/clk/renesas/r8a77990-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c @@ -245,6 +245,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { }; static const unsigned int r8a77990_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c index 056ebf3e70e2..5b4691117b47 100644 --- a/drivers/clk/renesas/r8a77995-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -183,10 +183,10 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { }; static const unsigned int r8a77995_crit_mod_clks[] __initconst = { + MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; - /* * CPG Clock Data */ diff --git a/drivers/clk/renesas/renesas-cpg-mssr.c b/drivers/clk/renesas/renesas-cpg-mssr.c index dcb6e2706d37..5a306d28738c 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.c +++ b/drivers/clk/renesas/renesas-cpg-mssr.c @@ -416,14 +416,6 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod, init.name = mod->name; init.ops = &cpg_mstp_clock_ops; init.flags = CLK_SET_RATE_PARENT; - for (i = 0; i < info->num_crit_mod_clks; i++) - if (id == info->crit_mod_clks[i]) { - dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n", - mod->name); - init.flags |= CLK_IS_CRITICAL; - break; - } - parent_name = __clk_get_name(parent); init.parent_names = &parent_name; init.num_parents = 1; @@ -432,6 +424,15 @@ static void __init cpg_mssr_register_mod_clk(const struct mssr_mod_clk *mod, clock->priv = priv; clock->hw.init = &init; + for (i = 0; i < info->num_crit_mod_clks; i++) + if (id == info->crit_mod_clks[i] && + cpg_mstp_clock_is_enabled(&clock->hw)) { + dev_dbg(dev, "MSTP %s setting CLK_IS_CRITICAL\n", + mod->name); + init.flags |= CLK_IS_CRITICAL; + break; + } + clk = clk_register(NULL, &clock->hw); if (IS_ERR(clk)) goto fail; @@ -720,6 +721,12 @@ static const struct of_device_id cpg_mssr_match[] = { .data = &r8a774c0_cpg_mssr_info, }, #endif +#ifdef CONFIG_CLK_R8A774E1 + { + .compatible = "renesas,r8a774e1-cpg-mssr", + .data = &r8a774e1_cpg_mssr_info, + }, +#endif #ifdef CONFIG_CLK_R8A7790 { .compatible = "renesas,r8a7790-cpg-mssr", diff --git a/drivers/clk/renesas/renesas-cpg-mssr.h b/drivers/clk/renesas/renesas-cpg-mssr.h index 55a18ef0efaf..1cc569484250 100644 --- a/drivers/clk/renesas/renesas-cpg-mssr.h +++ b/drivers/clk/renesas/renesas-cpg-mssr.h @@ -162,6 +162,7 @@ extern const struct cpg_mssr_info r8a77470_cpg_mssr_info; extern const struct cpg_mssr_info r8a774a1_cpg_mssr_info; extern const struct cpg_mssr_info r8a774b1_cpg_mssr_info; extern const struct cpg_mssr_info r8a774c0_cpg_mssr_info; +extern const struct cpg_mssr_info r8a774e1_cpg_mssr_info; extern const struct cpg_mssr_info r8a7790_cpg_mssr_info; extern const struct cpg_mssr_info r8a7791_cpg_mssr_info; extern const struct cpg_mssr_info r8a7792_cpg_mssr_info; diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c index 10560d963baf..4c6c9167ef50 100644 --- a/drivers/clk/rockchip/clk-pll.c +++ b/drivers/clk/rockchip/clk-pll.c @@ -12,6 +12,7 @@ #include <linux/io.h> #include <linux/delay.h> #include <linux/clk-provider.h> +#include <linux/iopoll.h> #include <linux/regmap.h> #include <linux/clk.h> #include "clk.h" @@ -86,23 +87,14 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll) { struct regmap *grf = pll->ctx->grf; unsigned int val; - int delay = 24000000, ret; - - while (delay > 0) { - ret = regmap_read(grf, pll->lock_offset, &val); - if (ret) { - pr_err("%s: failed to read pll lock status: %d\n", - __func__, ret); - return ret; - } + int ret; - if (val & BIT(pll->lock_shift)) - return 0; - delay--; - } + ret = regmap_read_poll_timeout(grf, pll->lock_offset, val, + val & BIT(pll->lock_shift), 0, 1000); + if (ret) + pr_err("%s: timeout waiting for pll to lock\n", __func__); - pr_err("%s: timeout waiting for pll to lock\n", __func__); - return -ETIMEDOUT; + return ret; } /** @@ -118,12 +110,31 @@ static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll) #define RK3036_PLLCON1_REFDIV_SHIFT 0 #define RK3036_PLLCON1_POSTDIV2_MASK 0x7 #define RK3036_PLLCON1_POSTDIV2_SHIFT 6 +#define RK3036_PLLCON1_LOCK_STATUS BIT(10) #define RK3036_PLLCON1_DSMPD_MASK 0x1 #define RK3036_PLLCON1_DSMPD_SHIFT 12 +#define RK3036_PLLCON1_PWRDOWN BIT(13) #define RK3036_PLLCON2_FRAC_MASK 0xffffff #define RK3036_PLLCON2_FRAC_SHIFT 0 -#define RK3036_PLLCON1_PWRDOWN (1 << 13) +static int rockchip_rk3036_pll_wait_lock(struct rockchip_clk_pll *pll) +{ + u32 pllcon; + int ret; + + /* + * Lock time typical 250, max 500 input clock cycles @24MHz + * So define a very safe maximum of 1000us, meaning 24000 cycles. + */ + ret = readl_relaxed_poll_timeout(pll->reg_base + RK3036_PLLCON(1), + pllcon, + pllcon & RK3036_PLLCON1_LOCK_STATUS, + 0, 1000); + if (ret) + pr_err("%s: timeout waiting for pll to lock\n", __func__); + + return ret; +} static void rockchip_rk3036_pll_get_params(struct rockchip_clk_pll *pll, struct rockchip_pll_rate_table *rate) @@ -221,7 +232,7 @@ static int rockchip_rk3036_pll_set_params(struct rockchip_clk_pll *pll, writel_relaxed(pllcon, pll->reg_base + RK3036_PLLCON(2)); /* wait for the pll to lock */ - ret = rockchip_pll_wait_lock(pll); + ret = rockchip_rk3036_pll_wait_lock(pll); if (ret) { pr_warn("%s: pll update unsuccessful, trying to restore old params\n", __func__); @@ -260,7 +271,7 @@ static int rockchip_rk3036_pll_enable(struct clk_hw *hw) writel(HIWORD_UPDATE(0, RK3036_PLLCON1_PWRDOWN, 0), pll->reg_base + RK3036_PLLCON(1)); - rockchip_pll_wait_lock(pll); + rockchip_rk3036_pll_wait_lock(pll); return 0; } @@ -589,19 +600,20 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = { static int rockchip_rk3399_pll_wait_lock(struct rockchip_clk_pll *pll) { u32 pllcon; - int delay = 24000000; - - /* poll check the lock status in rk3399 xPLLCON2 */ - while (delay > 0) { - pllcon = readl_relaxed(pll->reg_base + RK3399_PLLCON(2)); - if (pllcon & RK3399_PLLCON2_LOCK_STATUS) - return 0; + int ret; - delay--; - } + /* + * Lock time typical 250, max 500 input clock cycles @24MHz + * So define a very safe maximum of 1000us, meaning 24000 cycles. + */ + ret = readl_relaxed_poll_timeout(pll->reg_base + RK3399_PLLCON(2), + pllcon, + pllcon & RK3399_PLLCON2_LOCK_STATUS, + 0, 1000); + if (ret) + pr_err("%s: timeout waiting for pll to lock\n", __func__); - pr_err("%s: timeout waiting for pll to lock\n", __func__); - return -ETIMEDOUT; + return ret; } static void rockchip_rk3399_pll_get_params(struct rockchip_clk_pll *pll, diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index 77aebfb1d6d5..730020fcc7fe 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -751,6 +751,7 @@ static const char *const rk3188_critical_clocks[] __initconst = { "pclk_peri", "hclk_cpubus", "hclk_vio_bus", + "sclk_mac_lbtest", }; static struct rockchip_clk_provider *__init rk3188_common_clk_init(struct device_node *np) diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index cc2a177bbdbf..93c794695c46 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -15,6 +15,11 @@ #define RK3288_GRF_SOC_CON(x) (0x244 + x * 4) #define RK3288_GRF_SOC_STATUS1 0x284 +enum rk3288_variant { + RK3288_CRU, + RK3288W_CRU, +}; + enum rk3288_plls { apll, dpll, cpll, gpll, npll, }; @@ -425,8 +430,6 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS, RK3288_CLKGATE_CON(3), 0, GFLAGS), - DIV(0, "hclk_vio", "aclk_vio0", 0, - RK3288_CLKSEL_CON(28), 8, 5, DFLAGS), COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED, RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS, RK3288_CLKGATE_CON(3), 2, GFLAGS), @@ -819,6 +822,16 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { INVERTER(0, "pclk_isp", "pclk_isp_in", RK3288_CLKSEL_CON(29), 3, IFLAGS), }; +static struct rockchip_clk_branch rk3288w_hclkvio_branch[] __initdata = { + DIV(0, "hclk_vio", "aclk_vio1", 0, + RK3288_CLKSEL_CON(28), 8, 5, DFLAGS), +}; + +static struct rockchip_clk_branch rk3288_hclkvio_branch[] __initdata = { + DIV(0, "hclk_vio", "aclk_vio0", 0, + RK3288_CLKSEL_CON(28), 8, 5, DFLAGS), +}; + static const char *const rk3288_critical_clocks[] __initconst = { "aclk_cpu", "aclk_peri", @@ -914,7 +927,8 @@ static struct syscore_ops rk3288_clk_syscore_ops = { .resume = rk3288_clk_resume, }; -static void __init rk3288_clk_init(struct device_node *np) +static void __init rk3288_common_init(struct device_node *np, + enum rk3288_variant soc) { struct rockchip_clk_provider *ctx; @@ -936,6 +950,14 @@ static void __init rk3288_clk_init(struct device_node *np) RK3288_GRF_SOC_STATUS1); rockchip_clk_register_branches(ctx, rk3288_clk_branches, ARRAY_SIZE(rk3288_clk_branches)); + + if (soc == RK3288W_CRU) + rockchip_clk_register_branches(ctx, rk3288w_hclkvio_branch, + ARRAY_SIZE(rk3288w_hclkvio_branch)); + else + rockchip_clk_register_branches(ctx, rk3288_hclkvio_branch, + ARRAY_SIZE(rk3288_hclkvio_branch)); + rockchip_clk_protect_critical(rk3288_critical_clocks, ARRAY_SIZE(rk3288_critical_clocks)); @@ -954,4 +976,15 @@ static void __init rk3288_clk_init(struct device_node *np) rockchip_clk_of_add_provider(np, ctx); } + +static void __init rk3288_clk_init(struct device_node *np) +{ + rk3288_common_init(np, RK3288_CRU); +} CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init); + +static void __init rk3288w_clk_init(struct device_node *np) +{ + rk3288_common_init(np, RK3288W_CRU); +} +CLK_OF_DECLARE(rk3288w_cru, "rockchip,rk3288w-cru", rk3288w_clk_init); diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c index c186a1985bf4..2429b7c2a8b3 100644 --- a/drivers/clk/rockchip/clk-rk3328.c +++ b/drivers/clk/rockchip/clk-rk3328.c @@ -808,22 +808,22 @@ static struct rockchip_clk_branch rk3328_clk_branches[] __initdata = { MMC(SCLK_SDMMC_DRV, "sdmmc_drv", "clk_sdmmc", RK3328_SDMMC_CON0, 1), MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "clk_sdmmc", - RK3328_SDMMC_CON1, 0), + RK3328_SDMMC_CON1, 1), MMC(SCLK_SDIO_DRV, "sdio_drv", "clk_sdio", RK3328_SDIO_CON0, 1), MMC(SCLK_SDIO_SAMPLE, "sdio_sample", "clk_sdio", - RK3328_SDIO_CON1, 0), + RK3328_SDIO_CON1, 1), MMC(SCLK_EMMC_DRV, "emmc_drv", "clk_emmc", RK3328_EMMC_CON0, 1), MMC(SCLK_EMMC_SAMPLE, "emmc_sample", "clk_emmc", - RK3328_EMMC_CON1, 0), + RK3328_EMMC_CON1, 1), MMC(SCLK_SDMMC_EXT_DRV, "sdmmc_ext_drv", "clk_sdmmc_ext", RK3328_SDMMC_EXT_CON0, 1), MMC(SCLK_SDMMC_EXT_SAMPLE, "sdmmc_ext_sample", "clk_sdmmc_ext", - RK3328_SDMMC_EXT_CON1, 0), + RK3328_SDMMC_EXT_CON1, 1), }; static const char *const rk3328_critical_clocks[] __initconst = { diff --git a/drivers/clk/sirf/clk-atlas6.c b/drivers/clk/sirf/clk-atlas6.c index c84d5bab7ac2..b95483bb6a5e 100644 --- a/drivers/clk/sirf/clk-atlas6.c +++ b/drivers/clk/sirf/clk-atlas6.c @@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np) for (i = pll1; i < maxclk; i++) { atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]); - BUG_ON(!atlas6_clks[i]); + BUG_ON(IS_ERR(atlas6_clks[i])); } clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu"); clk_register_clkdev(atlas6_clks[io], NULL, "io"); diff --git a/drivers/clk/socfpga/clk-agilex.c b/drivers/clk/socfpga/clk-agilex.c index 699527f7e764..8fb12cbe0208 100644 --- a/drivers/clk/socfpga/clk-agilex.c +++ b/drivers/clk/socfpga/clk-agilex.c @@ -252,7 +252,7 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = { 0, 0, 0, 0, 0x30, 0, 0}, { AGILEX_MPU_PERIPH_CLK, "mpu_periph_clk", "mpu_clk", NULL, 1, 0, 0x24, 0, 0, 0, 0, 0, 0, 4}, - { AGILEX_MPU_L2RAM_CLK, "mpu_l2ram_clk", "mpu_clk", NULL, 1, 0, 0x24, + { AGILEX_MPU_CCU_CLK, "mpu_ccu_clk", "mpu_clk", NULL, 1, 0, 0x24, 0, 0, 0, 0, 0, 0, 2}, { AGILEX_L4_MAIN_CLK, "l4_main_clk", "noc_clk", NULL, 1, 0, 0x24, 1, 0x44, 0, 2, 0, 0, 0}, @@ -294,8 +294,12 @@ static const struct stratix10_gate_clock agilex_gate_clks[] = { 8, 0, 0, 0, 0, 0, 0}, { AGILEX_SPI_M_CLK, "spi_m_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, 9, 0, 0, 0, 0, 0, 0}, - { AGILEX_NAND_CLK, "nand_clk", "l4_main_clk", NULL, 1, 0, 0x7C, + { AGILEX_NAND_X_CLK, "nand_x_clk", "l4_mp_clk", NULL, 1, 0, 0x7C, 10, 0, 0, 0, 0, 0, 0}, + { AGILEX_NAND_CLK, "nand_clk", "nand_x_clk", NULL, 1, 0, 0x7C, + 10, 0, 0, 0, 0, 0, 4}, + { AGILEX_NAND_ECC_CLK, "nand_ecc_clk", "nand_x_clk", NULL, 1, 0, 0x7C, + 10, 0, 0, 0, 0, 0, 4}, }; static int agilex_clk_register_c_perip(const struct stratix10_perip_c_clock *clks, diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index 0b212cf2e794..f180c055d33f 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -327,16 +327,26 @@ int tegra_pll_wait_for_lock(struct tegra_clk_pll *pll) return clk_pll_wait_for_lock(pll); } +static bool pllm_clk_is_gated_by_pmc(struct tegra_clk_pll *pll) +{ + u32 val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE); + + return (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) && + !(val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE); +} + static int clk_pll_is_enabled(struct clk_hw *hw) { struct tegra_clk_pll *pll = to_clk_pll(hw); u32 val; - if (pll->params->flags & TEGRA_PLLM) { - val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE); - if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) - return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0; - } + /* + * Power Management Controller (PMC) can override the PLLM clock + * settings, including the enable-state. The PLLM is enabled when + * PLLM's CaR state is ON and when PLLM isn't gated by PMC. + */ + if ((pll->params->flags & TEGRA_PLLM) && pllm_clk_is_gated_by_pmc(pll)) + return 0; val = pll_readl_base(pll); diff --git a/drivers/clk/versatile/icst.c b/drivers/clk/versatile/icst.c index ba4b2d22ec97..307cb3774f87 100644 --- a/drivers/clk/versatile/icst.c +++ b/drivers/clk/versatile/icst.c @@ -5,7 +5,7 @@ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. * * Support functions for calculating clocks/divisors for the ICST307 - * clock generators. See http://www.idt.com/ for more information + * clock generators. See https://www.idt.com/ for more information * on these devices. * * This is an almost identical implementation to the ICST525 clock generator. diff --git a/drivers/clk/versatile/icst.h b/drivers/clk/versatile/icst.h index 73a3062b4535..29622768b02a 100644 --- a/drivers/clk/versatile/icst.h +++ b/drivers/clk/versatile/icst.h @@ -3,7 +3,7 @@ * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. * * Support functions for calculating clocks/divisors for the ICST - * clock generators. See http://www.idt.com/ for more information + * clock generators. See https://www.idt.com/ for more information * on these devices. */ #ifndef ICST_H diff --git a/drivers/clk/x86/clk-cgu-pll.c b/drivers/clk/x86/clk-cgu-pll.c index c03cc6b85b9f..3179557b5f78 100644 --- a/drivers/clk/x86/clk-cgu-pll.c +++ b/drivers/clk/x86/clk-cgu-pll.c @@ -128,7 +128,7 @@ lgm_clk_register_pll(struct lgm_clk_provider *ctx, pll->hw.init = &init; hw = &pll->hw; - ret = clk_hw_register(dev, hw); + ret = devm_clk_hw_register(dev, hw); if (ret) return ERR_PTR(ret); diff --git a/drivers/clk/x86/clk-cgu.c b/drivers/clk/x86/clk-cgu.c index 56af0e04ec1e..33de600e0c38 100644 --- a/drivers/clk/x86/clk-cgu.c +++ b/drivers/clk/x86/clk-cgu.c @@ -119,7 +119,7 @@ lgm_clk_register_mux(struct lgm_clk_provider *ctx, mux->hw.init = &init; hw = &mux->hw; - ret = clk_hw_register(dev, hw); + ret = devm_clk_hw_register(dev, hw); if (ret) return ERR_PTR(ret); @@ -247,7 +247,7 @@ lgm_clk_register_divider(struct lgm_clk_provider *ctx, div->hw.init = &init; hw = &div->hw; - ret = clk_hw_register(dev, hw); + ret = devm_clk_hw_register(dev, hw); if (ret) return ERR_PTR(ret); @@ -361,7 +361,7 @@ lgm_clk_register_gate(struct lgm_clk_provider *ctx, gate->hw.init = &init; hw = &gate->hw; - ret = clk_hw_register(dev, hw); + ret = devm_clk_hw_register(dev, hw); if (ret) return ERR_PTR(ret); @@ -420,18 +420,14 @@ lgm_clk_ddiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct lgm_clk_ddiv *ddiv = to_lgm_clk_ddiv(hw); unsigned int div0, div1, exdiv; - unsigned long flags; u64 prate; - spin_lock_irqsave(&ddiv->lock, flags); div0 = lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift0, ddiv->width0) + 1; div1 = lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift1, ddiv->width1) + 1; exdiv = lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, ddiv->width2); - spin_unlock_irqrestore(&ddiv->lock, flags); - prate = (u64)parent_rate; do_div(prate, div0); do_div(prate, div1); @@ -548,24 +544,21 @@ lgm_clk_ddiv_round_rate(struct clk_hw *hw, unsigned long rate, div = div * 2; div = DIV_ROUND_CLOSEST_ULL((u64)div, 5); } + spin_unlock_irqrestore(&ddiv->lock, flags); - if (div <= 0) { - spin_unlock_irqrestore(&ddiv->lock, flags); + if (div <= 0) return *prate; - } - if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) { - if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) { - spin_unlock_irqrestore(&ddiv->lock, flags); + if (lgm_clk_get_ddiv_val(div, &ddiv1, &ddiv2) != 0) + if (lgm_clk_get_ddiv_val(div + 1, &ddiv1, &ddiv2) != 0) return -EINVAL; - } - } rate64 = *prate; do_div(rate64, ddiv1); do_div(rate64, ddiv2); /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */ + spin_lock_irqsave(&ddiv->lock, flags); if (lgm_get_clk_val(ddiv->membase, ddiv->reg, ddiv->shift2, 1)) { rate64 = rate64 * 2; rate64 = DIV_ROUND_CLOSEST_ULL(rate64, 5); @@ -588,19 +581,18 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, unsigned int nr_clk) { struct device *dev = ctx->dev; - struct clk_init_data init = {}; - struct lgm_clk_ddiv *ddiv; struct clk_hw *hw; unsigned int idx; int ret; for (idx = 0; idx < nr_clk; idx++, list++) { - ddiv = NULL; + struct clk_init_data init = {}; + struct lgm_clk_ddiv *ddiv; + ddiv = devm_kzalloc(dev, sizeof(*ddiv), GFP_KERNEL); if (!ddiv) return -ENOMEM; - memset(&init, 0, sizeof(init)); init.name = list->name; init.ops = &lgm_clk_ddiv_ops; init.flags = list->flags; @@ -624,7 +616,7 @@ int lgm_clk_register_ddiv(struct lgm_clk_provider *ctx, ddiv->hw.init = &init; hw = &ddiv->hw; - ret = clk_hw_register(dev, hw); + ret = devm_clk_hw_register(dev, hw); if (ret) { dev_err(dev, "register clk: %s failed!\n", list->name); return ret; diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 2ed8b4361d95..3576ad7bd380 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -291,6 +291,10 @@ config CLKSRC_STM32 select CLKSRC_MMIO select TIMER_OF +config CLKSRC_STM32_LP + bool "Low power clocksource for STM32 SoCs" + depends on MFD_STM32_LPTIMER || COMPILE_TEST + config CLKSRC_MPS2 bool "Clocksource for MPS2 SoCs" if COMPILE_TEST depends on GENERIC_SCHED_CLOCK diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 3994e221e262..eaedb7240ae7 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_BCM_KONA_TIMER) += bcm_kona_timer.o obj-$(CONFIG_CADENCE_TTC_TIMER) += timer-cadence-ttc.o obj-$(CONFIG_CLKSRC_EFM32) += timer-efm32.o obj-$(CONFIG_CLKSRC_STM32) += timer-stm32.o +obj-$(CONFIG_CLKSRC_STM32_LP) += timer-stm32-lp.o obj-$(CONFIG_CLKSRC_EXYNOS_MCT) += exynos_mct.o obj-$(CONFIG_CLKSRC_LPC32XX) += timer-lpc32xx.o obj-$(CONFIG_CLKSRC_MPS2) += mps2-timer.o diff --git a/drivers/clocksource/timer-stm32-lp.c b/drivers/clocksource/timer-stm32-lp.c new file mode 100644 index 000000000000..db2841d0beb8 --- /dev/null +++ b/drivers/clocksource/timer-stm32-lp.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2019 - All Rights Reserved + * Authors: Benjamin Gaignard <benjamin.gaignard@st.com> for STMicroelectronics. + * Pascal Paillet <p.paillet@st.com> for STMicroelectronics. + */ + +#include <linux/clk.h> +#include <linux/clockchips.h> +#include <linux/interrupt.h> +#include <linux/mfd/stm32-lptimer.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/pm_wakeirq.h> + +#define CFGR_PSC_OFFSET 9 +#define STM32_LP_RATING 1000 +#define STM32_TARGET_CLKRATE (32000 * HZ) +#define STM32_LP_MAX_PSC 7 + +struct stm32_lp_private { + struct regmap *reg; + struct clock_event_device clkevt; + unsigned long period; + struct device *dev; +}; + +static struct stm32_lp_private* +to_priv(struct clock_event_device *clkevt) +{ + return container_of(clkevt, struct stm32_lp_private, clkevt); +} + +static int stm32_clkevent_lp_shutdown(struct clock_event_device *clkevt) +{ + struct stm32_lp_private *priv = to_priv(clkevt); + + regmap_write(priv->reg, STM32_LPTIM_CR, 0); + regmap_write(priv->reg, STM32_LPTIM_IER, 0); + /* clear pending flags */ + regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF); + + return 0; +} + +static int stm32_clkevent_lp_set_timer(unsigned long evt, + struct clock_event_device *clkevt, + int is_periodic) +{ + struct stm32_lp_private *priv = to_priv(clkevt); + + /* disable LPTIMER to be able to write into IER register*/ + regmap_write(priv->reg, STM32_LPTIM_CR, 0); + /* enable ARR interrupt */ + regmap_write(priv->reg, STM32_LPTIM_IER, STM32_LPTIM_ARRMIE); + /* enable LPTIMER to be able to write into ARR register */ + regmap_write(priv->reg, STM32_LPTIM_CR, STM32_LPTIM_ENABLE); + /* set next event counter */ + regmap_write(priv->reg, STM32_LPTIM_ARR, evt); + + /* start counter */ + if (is_periodic) + regmap_write(priv->reg, STM32_LPTIM_CR, + STM32_LPTIM_CNTSTRT | STM32_LPTIM_ENABLE); + else + regmap_write(priv->reg, STM32_LPTIM_CR, + STM32_LPTIM_SNGSTRT | STM32_LPTIM_ENABLE); + + return 0; +} + +static int stm32_clkevent_lp_set_next_event(unsigned long evt, + struct clock_event_device *clkevt) +{ + return stm32_clkevent_lp_set_timer(evt, clkevt, + clockevent_state_periodic(clkevt)); +} + +static int stm32_clkevent_lp_set_periodic(struct clock_event_device *clkevt) +{ + struct stm32_lp_private *priv = to_priv(clkevt); + + return stm32_clkevent_lp_set_timer(priv->period, clkevt, true); +} + +static int stm32_clkevent_lp_set_oneshot(struct clock_event_device *clkevt) +{ + struct stm32_lp_private *priv = to_priv(clkevt); + + return stm32_clkevent_lp_set_timer(priv->period, clkevt, false); +} + +static irqreturn_t stm32_clkevent_lp_irq_handler(int irq, void *dev_id) +{ + struct clock_event_device *clkevt = (struct clock_event_device *)dev_id; + struct stm32_lp_private *priv = to_priv(clkevt); + + regmap_write(priv->reg, STM32_LPTIM_ICR, STM32_LPTIM_ARRMCF); + + if (clkevt->event_handler) + clkevt->event_handler(clkevt); + + return IRQ_HANDLED; +} + +static void stm32_clkevent_lp_set_prescaler(struct stm32_lp_private *priv, + unsigned long *rate) +{ + int i; + + for (i = 0; i <= STM32_LP_MAX_PSC; i++) { + if (DIV_ROUND_CLOSEST(*rate, 1 << i) < STM32_TARGET_CLKRATE) + break; + } + + regmap_write(priv->reg, STM32_LPTIM_CFGR, i << CFGR_PSC_OFFSET); + + /* Adjust rate and period given the prescaler value */ + *rate = DIV_ROUND_CLOSEST(*rate, (1 << i)); + priv->period = DIV_ROUND_UP(*rate, HZ); +} + +static void stm32_clkevent_lp_init(struct stm32_lp_private *priv, + struct device_node *np, unsigned long rate) +{ + priv->clkevt.name = np->full_name; + priv->clkevt.cpumask = cpu_possible_mask; + priv->clkevt.features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT; + priv->clkevt.set_state_shutdown = stm32_clkevent_lp_shutdown; + priv->clkevt.set_state_periodic = stm32_clkevent_lp_set_periodic; + priv->clkevt.set_state_oneshot = stm32_clkevent_lp_set_oneshot; + priv->clkevt.set_next_event = stm32_clkevent_lp_set_next_event; + priv->clkevt.rating = STM32_LP_RATING; + + clockevents_config_and_register(&priv->clkevt, rate, 0x1, + STM32_LPTIM_MAX_ARR); +} + +static int stm32_clkevent_lp_probe(struct platform_device *pdev) +{ + struct stm32_lptimer *ddata = dev_get_drvdata(pdev->dev.parent); + struct stm32_lp_private *priv; + unsigned long rate; + int ret, irq; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->reg = ddata->regmap; + ret = clk_prepare_enable(ddata->clk); + if (ret) + return -EINVAL; + + rate = clk_get_rate(ddata->clk); + if (!rate) { + ret = -EINVAL; + goto out_clk_disable; + } + + irq = platform_get_irq(to_platform_device(pdev->dev.parent), 0); + if (irq <= 0) { + ret = irq; + goto out_clk_disable; + } + + if (of_property_read_bool(pdev->dev.parent->of_node, "wakeup-source")) { + ret = device_init_wakeup(&pdev->dev, true); + if (ret) + goto out_clk_disable; + + ret = dev_pm_set_wake_irq(&pdev->dev, irq); + if (ret) + goto out_clk_disable; + } + + ret = devm_request_irq(&pdev->dev, irq, stm32_clkevent_lp_irq_handler, + IRQF_TIMER, pdev->name, &priv->clkevt); + if (ret) + goto out_clk_disable; + + stm32_clkevent_lp_set_prescaler(priv, &rate); + + stm32_clkevent_lp_init(priv, pdev->dev.parent->of_node, rate); + + priv->dev = &pdev->dev; + + return 0; + +out_clk_disable: + clk_disable_unprepare(ddata->clk); + return ret; +} + +static int stm32_clkevent_lp_remove(struct platform_device *pdev) +{ + return -EBUSY; /* cannot unregister clockevent */ +} + +static const struct of_device_id stm32_clkevent_lp_of_match[] = { + { .compatible = "st,stm32-lptimer-timer", }, + {}, +}; +MODULE_DEVICE_TABLE(of, stm32_clkevent_lp_of_match); + +static struct platform_driver stm32_clkevent_lp_driver = { + .probe = stm32_clkevent_lp_probe, + .remove = stm32_clkevent_lp_remove, + .driver = { + .name = "stm32-lptimer-timer", + .of_match_table = of_match_ptr(stm32_clkevent_lp_of_match), + }, +}; +module_platform_driver(stm32_clkevent_lp_driver); + +MODULE_ALIAS("platform:stm32-lptimer-timer"); +MODULE_DESCRIPTION("STMicroelectronics STM32 clockevent low power driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/crypto/virtio/virtio_crypto_core.c b/drivers/crypto/virtio/virtio_crypto_core.c index 0c66d6193ca2..080955a1dd9c 100644 --- a/drivers/crypto/virtio/virtio_crypto_core.c +++ b/drivers/crypto/virtio/virtio_crypto_core.c @@ -204,8 +204,8 @@ static int virtcrypto_update_status(struct virtio_crypto *vcrypto) u32 status; int err; - virtio_cread(vcrypto->vdev, - struct virtio_crypto_config, status, &status); + virtio_cread_le(vcrypto->vdev, + struct virtio_crypto_config, status, &status); /* * Unknown status bits would be a host error and the driver @@ -323,31 +323,31 @@ static int virtcrypto_probe(struct virtio_device *vdev) if (!vcrypto) return -ENOMEM; - virtio_cread(vdev, struct virtio_crypto_config, + virtio_cread_le(vdev, struct virtio_crypto_config, max_dataqueues, &max_data_queues); if (max_data_queues < 1) max_data_queues = 1; - virtio_cread(vdev, struct virtio_crypto_config, - max_cipher_key_len, &max_cipher_key_len); - virtio_cread(vdev, struct virtio_crypto_config, - max_auth_key_len, &max_auth_key_len); - virtio_cread(vdev, struct virtio_crypto_config, - max_size, &max_size); - virtio_cread(vdev, struct virtio_crypto_config, - crypto_services, &crypto_services); - virtio_cread(vdev, struct virtio_crypto_config, - cipher_algo_l, &cipher_algo_l); - virtio_cread(vdev, struct virtio_crypto_config, - cipher_algo_h, &cipher_algo_h); - virtio_cread(vdev, struct virtio_crypto_config, - hash_algo, &hash_algo); - virtio_cread(vdev, struct virtio_crypto_config, - mac_algo_l, &mac_algo_l); - virtio_cread(vdev, struct virtio_crypto_config, - mac_algo_h, &mac_algo_h); - virtio_cread(vdev, struct virtio_crypto_config, - aead_algo, &aead_algo); + virtio_cread_le(vdev, struct virtio_crypto_config, + max_cipher_key_len, &max_cipher_key_len); + virtio_cread_le(vdev, struct virtio_crypto_config, + max_auth_key_len, &max_auth_key_len); + virtio_cread_le(vdev, struct virtio_crypto_config, + max_size, &max_size); + virtio_cread_le(vdev, struct virtio_crypto_config, + crypto_services, &crypto_services); + virtio_cread_le(vdev, struct virtio_crypto_config, + cipher_algo_l, &cipher_algo_l); + virtio_cread_le(vdev, struct virtio_crypto_config, + cipher_algo_h, &cipher_algo_h); + virtio_cread_le(vdev, struct virtio_crypto_config, + hash_algo, &hash_algo); + virtio_cread_le(vdev, struct virtio_crypto_config, + mac_algo_l, &mac_algo_l); + virtio_cread_le(vdev, struct virtio_crypto_config, + mac_algo_h, &mac_algo_h); + virtio_cread_le(vdev, struct virtio_crypto_config, + aead_algo, &aead_algo); /* Add virtio crypto device to global table */ err = virtcrypto_devmgr_add_dev(vcrypto); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index f50828526331..c82cbcb64202 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -80,14 +80,14 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, int err, id; if (blocksize != PAGE_SIZE) { - pr_debug("%s: error: unsupported blocksize for dax\n", + pr_info("%s: error: unsupported blocksize for dax\n", bdevname(bdev, buf)); return false; } err = bdev_dax_pgoff(bdev, start, PAGE_SIZE, &pgoff); if (err) { - pr_debug("%s: error: unaligned partition for dax\n", + pr_info("%s: error: unaligned partition for dax\n", bdevname(bdev, buf)); return false; } @@ -95,7 +95,7 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, last_page = PFN_DOWN((start + sectors - 1) * 512) * PAGE_SIZE / 512; err = bdev_dax_pgoff(bdev, last_page, PAGE_SIZE, &pgoff_end); if (err) { - pr_debug("%s: error: unaligned partition for dax\n", + pr_info("%s: error: unaligned partition for dax\n", bdevname(bdev, buf)); return false; } @@ -103,11 +103,11 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, id = dax_read_lock(); len = dax_direct_access(dax_dev, pgoff, 1, &kaddr, &pfn); len2 = dax_direct_access(dax_dev, pgoff_end, 1, &end_kaddr, &end_pfn); - dax_read_unlock(id); if (len < 1 || len2 < 1) { - pr_debug("%s: error: dax access failed (%ld)\n", + pr_info("%s: error: dax access failed (%ld)\n", bdevname(bdev, buf), len < 1 ? len : len2); + dax_read_unlock(id); return false; } @@ -137,9 +137,10 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev, put_dev_pagemap(end_pgmap); } + dax_read_unlock(id); if (!dax_enabled) { - pr_debug("%s: error: dax support not enabled\n", + pr_info("%s: error: dax support not enabled\n", bdevname(bdev, buf)); return false; } diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 07f5273207e7..434a3314fb0e 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -52,12 +52,6 @@ DEFINE_WD_CLASS(reservation_ww_class); EXPORT_SYMBOL(reservation_ww_class); -struct lock_class_key reservation_seqcount_class; -EXPORT_SYMBOL(reservation_seqcount_class); - -const char reservation_seqcount_string[] = "reservation_seqcount"; -EXPORT_SYMBOL(reservation_seqcount_string); - /** * dma_resv_list_alloc - allocate fence list * @shared_max: number of fences we need space for @@ -143,9 +137,8 @@ subsys_initcall(dma_resv_lockdep); void dma_resv_init(struct dma_resv *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); + seqcount_ww_mutex_init(&obj->seq, &obj->lock); - __seqcount_init(&obj->seq, reservation_seqcount_string, - &reservation_seqcount_class); RCU_INIT_POINTER(obj->fence, NULL); RCU_INIT_POINTER(obj->fence_excl, NULL); } @@ -275,7 +268,6 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence) fobj = dma_resv_get_list(obj); count = fobj->shared_count; - preempt_disable(); write_seqcount_begin(&obj->seq); for (i = 0; i < count; ++i) { @@ -297,7 +289,6 @@ replace: smp_store_mb(fobj->shared_count, count); write_seqcount_end(&obj->seq); - preempt_enable(); dma_fence_put(old); } EXPORT_SYMBOL(dma_resv_add_shared_fence); @@ -324,14 +315,12 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) if (fence) dma_fence_get(fence); - preempt_disable(); write_seqcount_begin(&obj->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(obj->fence_excl, fence); if (old) old->shared_count = 0; write_seqcount_end(&obj->seq); - preempt_enable(); /* inplace update, no shared fences */ while (i--) @@ -409,13 +398,11 @@ retry: src_list = dma_resv_get_list(dst); old = dma_resv_get_excl(dst); - preempt_disable(); write_seqcount_begin(&dst->seq); /* write_seqcount_begin provides the necessary memory barrier */ RCU_INIT_POINTER(dst->fence_excl, new); RCU_INIT_POINTER(dst->fence, dst_list); write_seqcount_end(&dst->seq); - preempt_enable(); dma_resv_list_free(src_list); dma_fence_put(old); diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index df0935a0021a..8a53f5c96b16 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -1195,13 +1195,13 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) /* disable relaxed ordering */ err = pcie_capability_read_word(pdev, IOAT_DEVCTRL_OFFSET, &val16); if (err) - return err; + return pcibios_err_to_errno(err); /* clear relaxed ordering enable */ val16 &= ~IOAT_DEVCTRL_ROE; err = pcie_capability_write_word(pdev, IOAT_DEVCTRL_OFFSET, val16); if (err) - return err; + return pcibios_err_to_errno(err); if (ioat_dma->cap & IOAT_CAP_DPS) writeb(ioat_pending_level + 1, @@ -1267,7 +1267,7 @@ static void ioat_resume(struct ioatdma_device *ioat_dma) #define DRV_NAME "ioatdma" static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, - enum pci_channel_state error) + pci_channel_state_t error) { dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 5c118c7e02bd..6e530dca6d9e 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -20,6 +20,7 @@ #include <linux/mod_devicetable.h> #include <linux/dma-mapping.h> #include <linux/of.h> +#include <linux/slab.h> #include "sf-pdma.h" diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c index 67087dbe2f9f..962b6e05287b 100644 --- a/drivers/dma/st_fdma.c +++ b/drivers/dma/st_fdma.c @@ -15,6 +15,7 @@ #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/remoteproc.h> +#include <linux/slab.h> #include "st_fdma.h" diff --git a/drivers/dma/uniphier-xdmac.c b/drivers/dma/uniphier-xdmac.c index 7b2f8a8c2d31..16b19654873d 100644 --- a/drivers/dma/uniphier-xdmac.c +++ b/drivers/dma/uniphier-xdmac.c @@ -12,6 +12,7 @@ #include <linux/of.h> #include <linux/of_dma.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include "dmaengine.h" #include "virt-dma.h" diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index e7e36aab2386..b4b9ce97f415 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -1136,15 +1136,14 @@ int sdei_event_handler(struct pt_regs *regs, * access kernel memory. * Do the same here because this doesn't come via the same entry code. */ - orig_addr_limit = get_fs(); - set_fs(USER_DS); + orig_addr_limit = force_uaccess_begin(); err = arg->callback(event_num, regs, arg->callback_arg); if (err) pr_err_ratelimited("event %u on CPU %u failed with error: %d\n", event_num, smp_processor_id(), err); - set_fs(orig_addr_limit); + force_uaccess_end(orig_addr_limit); return err; } diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index 75daaf20374e..296b18fbd7a2 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,8 +28,8 @@ cflags-$(CONFIG_EFI_GENERIC_STUB) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -Os -DDISABLE_BRANCH_PROFILING \ -include $(srctree)/drivers/firmware/efi/libstub/hidden.h \ -D__NO_FORTIFY \ - $(call cc-option,-ffreestanding) \ - $(call cc-option,-fno-stack-protector) \ + -ffreestanding \ + -fno-stack-protector \ $(call cc-option,-fno-addrsig) \ -D__DISABLE_EXPORTS diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 625c8fdceabf..8f2fb4c562da 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -211,6 +211,20 @@ rpi_register_hwmon_driver(struct device *dev, struct rpi_firmware *fw) static void rpi_register_clk_driver(struct device *dev) { + struct device_node *firmware; + + /* + * Earlier DTs don't have a node for the firmware clocks but + * rely on us creating a platform device by hand. If we do + * have a node for the firmware clocks, just bail out here. + */ + firmware = of_get_compatible_child(dev->of_node, + "raspberrypi,firmware-clocks"); + if (firmware) { + of_node_put(firmware); + return; + } + rpi_clk = platform_device_register_data(dev, "raspberrypi-clk", -1, NULL, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e5a5ba869eb4..a58af513c952 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -258,11 +258,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, new->shared_count = k; /* Install the new fence list, seqcount provides the barriers */ - preempt_disable(); write_seqcount_begin(&resv->seq); RCU_INIT_POINTER(resv->fence, new); write_seqcount_end(&resv->seq); - preempt_enable(); /* Drop the references to the removed fences or move them to ef_list */ for (i = j, k = 0; i < old->shared_count; ++i) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index aa5b54e5a1d7..eb7cfe87042e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2574,6 +2574,9 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) AMD_IP_BLOCK_TYPE_IH, }; + for (i = 0; i < adev->num_ip_blocks; i++) + adev->ip_blocks[i].status.hw = false; + for (i = 0; i < ARRAY_SIZE(ip_order); i++) { int j; struct amdgpu_ip_block *block; @@ -2581,7 +2584,6 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev) for (j = 0; j < adev->num_ip_blocks; j++) { block = &adev->ip_blocks[j]; - block->status.hw = false; if (block->version->type != ip_order[i] || !block->status.valid) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 5f20cadee343..e4dbf14320b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -3212,6 +3212,12 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) return 0; + /* Skip crit temp on APU */ + if ((adev->flags & AMD_IS_APU) && (adev->family >= AMDGPU_FAMILY_CZ) && + (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || + attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr)) + return 0; + /* Skip limit attributes if DPM is not enabled */ if (!adev->pm.dpm_enabled && (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index fe7d39bb975d..7fe564275457 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -193,12 +193,18 @@ static int psp_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; psp_memory_training_fini(&adev->psp); - release_firmware(adev->psp.sos_fw); - adev->psp.sos_fw = NULL; - release_firmware(adev->psp.asd_fw); - adev->psp.asd_fw = NULL; - release_firmware(adev->psp.ta_fw); - adev->psp.ta_fw = NULL; + if (adev->psp.sos_fw) { + release_firmware(adev->psp.sos_fw); + adev->psp.sos_fw = NULL; + } + if (adev->psp.asd_fw) { + release_firmware(adev->psp.asd_fw); + adev->psp.asd_fw = NULL; + } + if (adev->psp.ta_fw) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + } if (adev->asic_type == CHIP_NAVI10) psp_sysfs_fini(adev); @@ -409,11 +415,28 @@ static int psp_clear_vf_fw(struct psp_context *psp) return ret; } +static bool psp_skip_tmr(struct psp_context *psp) +{ + switch (psp->adev->asic_type) { + case CHIP_NAVI12: + case CHIP_SIENNA_CICHLID: + return true; + default: + return false; + } +} + static int psp_tmr_load(struct psp_context *psp) { int ret; struct psp_gfx_cmd_resp *cmd; + /* For Navi12 and CHIP_SIENNA_CICHLID SRIOV, do not set up TMR. + * Already set up by host driver. + */ + if (amdgpu_sriov_vf(psp->adev) && psp_skip_tmr(psp)) + return 0; + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!cmd) return -ENOMEM; @@ -1987,7 +2010,7 @@ static int psp_suspend(void *handle) ret = psp_tmr_terminate(psp); if (ret) { - DRM_ERROR("Falied to terminate tmr\n"); + DRM_ERROR("Failed to terminate tmr\n"); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index e10f02ed3f65..bcce4c0be462 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1618,7 +1618,7 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) data = con->eh_data; save_count = data->count - control->num_recs; /* only new entries are saved */ - if (save_count > 0) + if (save_count > 0) { if (amdgpu_ras_eeprom_process_recods(control, &data->bps[control->num_recs], true, @@ -1627,6 +1627,9 @@ static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) return -EIO; } + dev_info(adev->dev, "Saved %d pages to EEPROM table.\n", save_count); + } + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 61e89247faf3..65997ffaed45 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3082,7 +3082,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), @@ -3127,7 +3127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA0_CLK_CTRL, 0xff7f0fff, 0x30000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCGTT_SPI_RA1_CLK_CTRL, 0xff7f0fff, 0x7e000100), SOC15_REG_GOLDEN_VALUE(GC, 0, mmCPF_GCR_CNTL, 0x0007ffff, 0x0000c000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000200), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG3, 0xffffffff, 0x00000280), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Sienna_Cichlid, 0x1ff1ffff, 0x00000500), @@ -3158,7 +3158,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER7_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER8_SELECT, 0xf0f001ff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_PERFCOUNTER9_SELECT, 0xf0f001ff, 0x00000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xffffffff, 0x010b0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffbfffff, 0x00a00000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff) }; @@ -7529,6 +7529,7 @@ static int gfx_v10_0_set_powergating_state(void *handle, case CHIP_NAVI14: case CHIP_NAVI12: case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: amdgpu_gfx_off_ctrl(adev, enable); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 42f1a516005e..c41e5590a701 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -49,12 +49,11 @@ static int jpeg_v3_0_set_powergating_state(void *handle, static int jpeg_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type == CHIP_SIENNA_CICHLID) { - u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); + u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); + + if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) + return -ENOENT; - if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) - return -ENOENT; - } adev->jpeg.num_jpeg_inst = 1; jpeg_v3_0_set_dec_ring_funcs(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index ea69ae76773e..da8024c2826e 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -97,6 +97,49 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } +static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags, address, data; + u64 r; + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + /* read low 32 bit */ + WREG32(address, reg); + (void)RREG32(address); + r = RREG32(data); + + /* read high 32 bit*/ + WREG32(address, reg + 4); + (void)RREG32(address); + r |= ((u64)RREG32(data) << 32); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) +{ + unsigned long flags, address, data; + + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + /* write low 32 bit */ + WREG32(address, reg); + (void)RREG32(address); + WREG32(data, (u32)(v & 0xffffffffULL)); + (void)RREG32(data); + + /* write high 32 bit */ + WREG32(address, reg + 4); + (void)RREG32(address); + WREG32(data, (u32)(v >> 32)); + (void)RREG32(data); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} + static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; @@ -319,10 +362,15 @@ nv_asic_reset_method(struct amdgpu_device *adev) dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n", amdgpu_reset_method); - if (smu_baco_is_support(smu)) - return AMD_RESET_METHOD_BACO; - else + switch (adev->asic_type) { + case CHIP_SIENNA_CICHLID: return AMD_RESET_METHOD_MODE1; + default: + if (smu_baco_is_support(smu)) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_MODE1; + } } static int nv_asic_reset(struct amdgpu_device *adev) @@ -673,6 +721,8 @@ static int nv_common_early_init(void *handle) adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; adev->pcie_wreg = &nv_pcie_wreg; + adev->pcie_rreg64 = &nv_pcie_rreg64; + adev->pcie_wreg64 = &nv_pcie_wreg64; /* TODO: will add them during VCN v2 implementation */ adev->uvd_ctx_rreg = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 910a4a32ff78..63e5547cfb16 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1659,7 +1659,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .emit_ib = vcn_v2_0_dec_ring_emit_ib, .emit_fence = vcn_v2_0_dec_ring_emit_fence, .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, - .test_ring = amdgpu_vcn_dec_ring_test_ring, + .test_ring = vcn_v2_0_dec_ring_test_ring, .test_ib = amdgpu_vcn_dec_ring_test_ib, .insert_nop = vcn_v2_0_dec_ring_insert_nop, .insert_start = vcn_v2_0_dec_ring_insert_start, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 407065cd8d57..e4b33c67b634 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -97,6 +97,8 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); #if defined(CONFIG_DRM_AMD_DC_DCN3_0) #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); +#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); #endif #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" @@ -1185,10 +1187,13 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) break; #if defined(CONFIG_DRM_AMD_DC_DCN3_0) case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: dmub_asic = DMUB_ASIC_DCN30; fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; break; + case CHIP_NAVY_FLOUNDER: + dmub_asic = DMUB_ASIC_DCN30; + fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; + break; #endif default: @@ -8544,6 +8549,29 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, if (ret) goto fail; + /* Check connector changes */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + + /* Skip connectors that are disabled or part of modeset already. */ + if (!old_con_state->crtc && !new_con_state->crtc) + continue; + + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); + if (IS_ERR(new_crtc_state)) { + ret = PTR_ERR(new_crtc_state); + goto fail; + } + + if (dm_old_con_state->abm_level != + dm_new_con_state->abm_level) + new_crtc_state->connectors_changed = true; + } + #if defined(CONFIG_DRM_AMD_DC_DCN) if (adev->asic_type >= CHIP_NAVI10) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 998f729976bf..e5a6d9115949 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -35,6 +35,7 @@ #include "dmub/dmub_srv.h" #include "resource.h" #include "dsc.h" +#include "dc_link_dp.h" struct dmub_debugfs_trace_header { uint32_t entry_count; @@ -1150,7 +1151,7 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, return result; } -static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf, +static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { char *rd_buf = NULL; @@ -1186,7 +1187,7 @@ static ssize_t dp_dsc_bytes_per_pixel_read(struct file *f, char __user *buf, snprintf(rd_buf_ptr, str_len, "%d\n", - dsc_state.dsc_bytes_per_pixel); + dsc_state.dsc_bits_per_pixel); rd_buf_ptr += str_len; while (size) { @@ -1460,9 +1461,9 @@ static const struct file_operations dp_dsc_slice_height_debugfs_fops = { .llseek = default_llseek }; -static const struct file_operations dp_dsc_bytes_per_pixel_debugfs_fops = { +static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = { .owner = THIS_MODULE, - .read = dp_dsc_bytes_per_pixel_read, + .read = dp_dsc_bits_per_pixel_read, .llseek = default_llseek }; @@ -1552,7 +1553,7 @@ static const struct { {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, - {"dsc_bytes_per_pixel", &dp_dsc_bytes_per_pixel_debugfs_fops}, + {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops}, {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops}, {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops}, diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 008d4d11339d..ad394aefa5d9 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2834,6 +2834,8 @@ static const struct dc_vbios_funcs vbios_funcs = { .bios_parser_destroy = bios_parser_destroy, .get_board_layout_info = bios_get_board_layout_info, + + .get_atom_dc_golden_table = NULL }; static bool bios_parser_construct( diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index b8684131151d..078b7e344185 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2079,6 +2079,85 @@ static uint16_t bios_parser_pack_data_tables( return 0; } +static struct atom_dc_golden_table_v1 *bios_get_golden_table( + struct bios_parser *bp, + uint32_t rev_major, + uint32_t rev_minor, + uint16_t *dc_golden_table_ver) +{ + struct atom_display_controller_info_v4_4 *disp_cntl_tbl_4_4 = NULL; + uint32_t dc_golden_offset = 0; + *dc_golden_table_ver = 0; + + if (!DATA_TABLES(dce_info)) + return NULL; + + /* ver.4.4 or higher */ + switch (rev_major) { + case 4: + switch (rev_minor) { + case 4: + disp_cntl_tbl_4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4, + DATA_TABLES(dce_info)); + if (!disp_cntl_tbl_4_4) + return NULL; + dc_golden_offset = DATA_TABLES(dce_info) + disp_cntl_tbl_4_4->dc_golden_table_offset; + *dc_golden_table_ver = disp_cntl_tbl_4_4->dc_golden_table_ver; + break; + } + break; + } + + if (!dc_golden_offset) + return NULL; + + if (*dc_golden_table_ver != 1) + return NULL; + + return GET_IMAGE(struct atom_dc_golden_table_v1, + dc_golden_offset); +} + +static enum bp_result bios_get_atom_dc_golden_table( + struct dc_bios *dcb) +{ + struct bios_parser *bp = BP_FROM_DCB(dcb); + enum bp_result result = BP_RESULT_OK; + struct atom_dc_golden_table_v1 *atom_dc_golden_table = NULL; + struct atom_common_table_header *header; + struct atom_data_revision tbl_revision; + uint16_t dc_golden_table_ver = 0; + + header = GET_IMAGE(struct atom_common_table_header, + DATA_TABLES(dce_info)); + if (!header) + return BP_RESULT_UNSUPPORTED; + + get_atom_data_table_revision(header, &tbl_revision); + + atom_dc_golden_table = bios_get_golden_table(bp, + tbl_revision.major, + tbl_revision.minor, + &dc_golden_table_ver); + + if (!atom_dc_golden_table) + return BP_RESULT_UNSUPPORTED; + + dcb->golden_table.dc_golden_table_ver = dc_golden_table_ver; + dcb->golden_table.aux_dphy_rx_control0_val = atom_dc_golden_table->aux_dphy_rx_control0_val; + dcb->golden_table.aux_dphy_rx_control1_val = atom_dc_golden_table->aux_dphy_rx_control1_val; + dcb->golden_table.aux_dphy_tx_control_val = atom_dc_golden_table->aux_dphy_tx_control_val; + dcb->golden_table.dc_gpio_aux_ctrl_0_val = atom_dc_golden_table->dc_gpio_aux_ctrl_0_val; + dcb->golden_table.dc_gpio_aux_ctrl_1_val = atom_dc_golden_table->dc_gpio_aux_ctrl_1_val; + dcb->golden_table.dc_gpio_aux_ctrl_2_val = atom_dc_golden_table->dc_gpio_aux_ctrl_2_val; + dcb->golden_table.dc_gpio_aux_ctrl_3_val = atom_dc_golden_table->dc_gpio_aux_ctrl_3_val; + dcb->golden_table.dc_gpio_aux_ctrl_4_val = atom_dc_golden_table->dc_gpio_aux_ctrl_4_val; + dcb->golden_table.dc_gpio_aux_ctrl_5_val = atom_dc_golden_table->dc_gpio_aux_ctrl_5_val; + + return result; +} + + static const struct dc_vbios_funcs vbios_funcs = { .get_connectors_number = bios_parser_get_connectors_number, @@ -2128,6 +2207,8 @@ static const struct dc_vbios_funcs vbios_funcs = { .get_board_layout_info = bios_get_board_layout_info, .pack_data_tables = bios_parser_pack_data_tables, + + .get_atom_dc_golden_table = bios_get_atom_dc_golden_table }; static bool bios_parser2_construct( diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 3fab9296918a..e133edc587d3 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -85,12 +85,77 @@ static int rv1_determine_dppclk_threshold(struct clk_mgr_internal *clk_mgr, stru return disp_clk_threshold; } -static void ramp_up_dispclk_with_dpp(struct clk_mgr_internal *clk_mgr, struct dc *dc, struct dc_clocks *new_clocks) +static void ramp_up_dispclk_with_dpp( + struct clk_mgr_internal *clk_mgr, + struct dc *dc, + struct dc_clocks *new_clocks, + bool safe_to_lower) { int i; int dispclk_to_dpp_threshold = rv1_determine_dppclk_threshold(clk_mgr, new_clocks); bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz; + /* this function is to change dispclk, dppclk and dprefclk according to + * bandwidth requirement. Its call stack is rv1_update_clocks --> + * update_clocks --> dcn10_prepare_bandwidth / dcn10_optimize_bandwidth + * --> prepare_bandwidth / optimize_bandwidth. before change dcn hw, + * prepare_bandwidth will be called first to allow enough clock, + * watermark for change, after end of dcn hw change, optimize_bandwidth + * is executed to lower clock to save power for new dcn hw settings. + * + * below is sequence of commit_planes_for_stream: + * + * step 1: prepare_bandwidth - raise clock to have enough bandwidth + * step 2: lock_doublebuffer_enable + * step 3: pipe_control_lock(true) - make dchubp register change will + * not take effect right way + * step 4: apply_ctx_for_surface - program dchubp + * step 5: pipe_control_lock(false) - dchubp register change take effect + * step 6: optimize_bandwidth --> dc_post_update_surfaces_to_stream + * for full_date, optimize clock to save power + * + * at end of step 1, dcn clocks (dprefclk, dispclk, dppclk) may be + * changed for new dchubp configuration. but real dcn hub dchubps are + * still running with old configuration until end of step 5. this need + * clocks settings at step 1 should not less than that before step 1. + * this is checked by two conditions: 1. if (should_set_clock(safe_to_lower + * , new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || + * new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) + * 2. request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz + * + * the second condition is based on new dchubp configuration. dppclk + * for new dchubp may be different from dppclk before step 1. + * for example, before step 1, dchubps are as below: + * pipe 0: recout=(0,40,1920,980) viewport=(0,0,1920,979) + * pipe 1: recout=(0,0,1920,1080) viewport=(0,0,1920,1080) + * for dppclk for pipe0 need dppclk = dispclk + * + * new dchubp pipe split configuration: + * pipe 0: recout=(0,0,960,1080) viewport=(0,0,960,1080) + * pipe 1: recout=(960,0,960,1080) viewport=(960,0,960,1080) + * dppclk only needs dppclk = dispclk /2. + * + * dispclk, dppclk are not lock by otg master lock. they take effect + * after step 1. during this transition, dispclk are the same, but + * dppclk is changed to half of previous clock for old dchubp + * configuration between step 1 and step 6. This may cause p-state + * warning intermittently. + * + * for new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz, we + * need make sure dppclk are not changed to less between step 1 and 6. + * for new_clocks->dispclk_khz > clk_mgr_base->clks.dispclk_khz, + * new display clock is raised, but we do not know ratio of + * new_clocks->dispclk_khz and clk_mgr_base->clks.dispclk_khz, + * new_clocks->dispclk_khz /2 does not guarantee equal or higher than + * old dppclk. we could ignore power saving different between + * dppclk = displck and dppclk = dispclk / 2 between step 1 and step 6. + * as long as safe_to_lower = false, set dpclk = dispclk to simplify + * condition check. + * todo: review this change for other asic. + **/ + if (!safe_to_lower) + request_dpp_div = false; + /* set disp clk to dpp clk threshold */ clk_mgr->funcs->set_dispclk(clk_mgr, dispclk_to_dpp_threshold); @@ -209,7 +274,7 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, /* program dispclk on = as a w/a for sleep resume clock ramping issues */ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) || new_clocks->dispclk_khz == clk_mgr_base->clks.dispclk_khz) { - ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks); + ramp_up_dispclk_with_dpp(clk_mgr, dc, new_clocks, safe_to_lower); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; send_request_to_lower = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index d94fdc52be37..9133646f6d5f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -323,9 +323,10 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, /* if clock is being raised, increase refclk before lowering DTO */ if (update_dppclk || update_dispclk) dcn20_update_clocks_update_dentist(clk_mgr); - /* always update dtos unless clock is lowered and not safe to lower */ - if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) - dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); + /* There is a check inside dcn20_update_clocks_update_dpp_dto which ensures + * that we do not lower dto when it is not safe to lower. We do not need to + * compare the current and new dppclk before calling this function.*/ + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); } } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index ef0b5941bc50..92eb1ca1634f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1250,6 +1250,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c int i, k, l; struct dc_stream_state *dc_streams[MAX_STREAMS] = {0}; +#if defined(CONFIG_DRM_AMD_DC_DCN3_0) + dc_allow_idle_optimizations(dc, false); +#endif for (i = 0; i < context->stream_count; i++) dc_streams[i] = context->streams[i]; @@ -1838,6 +1841,11 @@ static enum surface_update_type check_update_surfaces_for_stream( int i; enum surface_update_type overall_type = UPDATE_TYPE_FAST; +#if defined(CONFIG_DRM_AMD_DC_DCN3_0) + if (dc->idle_optimizations_allowed) + overall_type = UPDATE_TYPE_FULL; + +#endif if (stream_status == NULL || stream_status->plane_count != surface_count) overall_type = UPDATE_TYPE_FULL; @@ -2306,8 +2314,14 @@ static void commit_planes_for_stream(struct dc *dc, } } - if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) { - dc->hwss.prepare_bandwidth(dc, context); + if (update_type == UPDATE_TYPE_FULL) { +#if defined(CONFIG_DRM_AMD_DC_DCN3_0) + dc_allow_idle_optimizations(dc, false); + +#endif + if (dc->optimize_seamless_boot_streams == 0) + dc->hwss.prepare_bandwidth(dc, context); + context_clock_trace(dc, context); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 02742cca4d84..4bd6e03a7ef3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1540,6 +1540,9 @@ static bool dc_link_construct(struct dc_link *link, } } + if (bios->funcs->get_atom_dc_golden_table) + bios->funcs->get_atom_dc_golden_table(bios); + /* * TODO check if GPIO programmed correctly * @@ -3102,6 +3105,9 @@ void core_link_enable_stream( struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; enum dc_status status; +#if defined(CONFIG_DRM_AMD_DC_DCN3_0) + enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; +#endif DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); if (!IS_DIAG_DC(dc->ctx->dce_environment) && @@ -3136,8 +3142,8 @@ void core_link_enable_stream( pipe_ctx->stream->link->link_state_valid = true; #if defined(CONFIG_DRM_AMD_DC_DCN3_0) - if (pipe_ctx->stream_res.tg->funcs->set_out_mux) - pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); #endif if (dc_is_dvi_signal(pipe_ctx->stream->signal)) @@ -3276,7 +3282,7 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) dc_is_virtual_signal(pipe_ctx->stream->signal)) return; - if (pipe_ctx->stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { core_link_set_avmute(pipe_ctx, true); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 5cb7b834e459..9bc03f26efda 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1133,6 +1133,44 @@ static inline enum link_training_result perform_link_training_int( return status; } +static enum link_training_result check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + union lane_status lane_status; + uint8_t dpcd_buf[6] = {0}; + uint32_t lane; + + core_link_read_dpcd( + link, + DP_SINK_COUNT, + (uint8_t *)(dpcd_buf), + sizeof(dpcd_buf)); + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* + * check lanes status + */ + lane_status.raw = get_nibble_at_index(&dpcd_buf[2], lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + status = LINK_TRAINING_LINK_LOSS; + break; + } + } + + return status; +} + static void initialize_training_settings( struct dc_link *link, const struct dc_link_settings *link_setting, @@ -1372,6 +1410,9 @@ static void print_status_message( case LINK_TRAINING_LQA_FAIL: lt_result = "LQA failed"; break; + case LINK_TRAINING_LINK_LOSS: + lt_result = "Link loss"; + break; default: break; } @@ -1531,6 +1572,14 @@ enum link_training_result dc_link_dp_perform_link_training( status); } + /* delay 5ms after Main Link output idle pattern and then check + * DPCD 0202h. + */ + if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { + msleep(5); + status = check_link_loss_status(link, <_settings); + } + /* 6. print status message*/ print_status_message(link, <_settings, status); @@ -4290,22 +4339,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) void dpcd_set_source_specific_data(struct dc_link *link) { - uint8_t dspc = 0; - enum dc_status ret; - - ret = core_link_read_dpcd(link, DP_DOWN_STREAM_PORT_COUNT, &dspc, - sizeof(dspc)); - - if (ret != DC_OK) { - DC_LOG_ERROR("Error in DP aux read transaction," - " not writing source specific data\n"); - return; - } - - /* Return if OUI unsupported */ - if (!(dspc & DP_OUI_SUPPORT)) - return; - if (!link->dc->vendor_signature.is_valid) { struct dpcd_amd_signature amd_signature; amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index 10d69ada88e3..0257a900fe2b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -246,20 +246,18 @@ struct dc_stream_status *dc_stream_get_status( #ifndef TRIM_FSFT /** - * dc_optimize_timing() - dc to optimize timing + * dc_optimize_timing_for_fsft() - dc to optimize timing */ -bool dc_optimize_timing( - struct dc_crtc_timing *timing, +bool dc_optimize_timing_for_fsft( + struct dc_stream_state *pStream, unsigned int max_input_rate_in_khz) { - //optimization is expected to assing a value to these: - //timing->pix_clk_100hz - //timing->v_front_porch - //timing->v_total - //timing->fast_transport_output_rate_100hz; - timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz; + struct dc *dc; - return true; + dc = pStream->ctx->dc; + + return (dc->hwss.optimize_timing_for_fsft && + dc->hwss.optimize_timing_for_fsft(dc, &pStream->timing, max_input_rate_in_khz)); } #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h index 845a3054f21f..d06d07042a12 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h @@ -133,6 +133,9 @@ struct dc_vbios_funcs { uint16_t (*pack_data_tables)( struct dc_bios *dcb, void *dst); + + enum bp_result (*get_atom_dc_golden_table)( + struct dc_bios *dcb); }; struct bios_registers { @@ -154,6 +157,7 @@ struct dc_bios { struct dc_firmware_info fw_info; bool fw_info_valid; struct dc_vram_info vram_info; + struct dc_golden_table golden_table; }; #endif /* DC_BIOS_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e4e85a159462..633442bc7ef2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -424,8 +424,8 @@ struct dc_stream_status *dc_stream_get_status( struct dc_stream_state *dc_stream); #ifndef TRIM_FSFT -bool dc_optimize_timing( - struct dc_crtc_timing *timing, +bool dc_optimize_timing_for_fsft( + struct dc_stream_state *pStream, unsigned int max_input_rate_in_khz); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 29fe5389f973..946ba929c6f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -890,6 +890,20 @@ struct dsc_dec_dpcd_caps { uint32_t branch_max_line_width; }; +struct dc_golden_table { + uint16_t dc_golden_table_ver; + uint32_t aux_dphy_rx_control0_val; + uint32_t aux_dphy_tx_control_val; + uint32_t aux_dphy_rx_control1_val; + uint32_t dc_gpio_aux_ctrl_0_val; + uint32_t dc_gpio_aux_ctrl_1_val; + uint32_t dc_gpio_aux_ctrl_2_val; + uint32_t dc_gpio_aux_ctrl_3_val; + uint32_t dc_gpio_aux_ctrl_4_val; + uint32_t dc_gpio_aux_ctrl_5_val; +}; + + #if defined(CONFIG_DRM_AMD_DC_DCN3_0) enum dc_gpu_mem_alloc_type { DC_MEM_ALLOC_TYPE_GART, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h index 384389f0e2c3..66027d496778 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.h @@ -38,7 +38,8 @@ #define AUX_REG_LIST(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ - SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id) + SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \ + SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id) #define HPD_REG_LIST(id)\ SRI(DC_HPD_CONTROL, HPD, id) @@ -107,6 +108,7 @@ struct dce110_link_enc_aux_registers { uint32_t AUX_CONTROL; uint32_t AUX_DPHY_RX_CONTROL0; + uint32_t AUX_DPHY_RX_CONTROL1; }; struct dce110_link_enc_hpd_registers { diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 82e67bd81f2d..5167d6b8a48d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -233,8 +233,8 @@ static bool dmub_psr_copy_settings(struct dmub_psr *dmub, copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq; copy_settings_data->debug.bitfields.visual_confirm = dc->dc->debug.visual_confirm == VISUAL_CONFIRM_PSR ? true : false; + copy_settings_data->debug.bitfields.use_hw_lock_mgr = 1; copy_settings_data->init_sdp_deadline = psr_context->sdpTransmitLineNumDeadline; - copy_settings_data->debug.bitfields.use_hw_lock_mgr = 0; dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd); dc_dmub_srv_cmd_execute(dc->dmub_srv); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index da0897fe3b54..a643927e272b 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -390,6 +390,8 @@ void dcn10_log_hw_state(struct dc *dc, } DTN_INFO("\n"); + // dcn_dsc_state struct field bytes_per_pixel was renamed to bits_per_pixel + // TODO: Update golden log header to reflect this name change DTN_INFO("DSC: CLOCK_EN SLICE_WIDTH Bytes_pp\n"); for (i = 0; i < pool->res_cap->num_dsc; i++) { struct display_stream_compressor *dsc = pool->dscs[i]; @@ -400,7 +402,7 @@ void dcn10_log_hw_state(struct dc *dc, dsc->inst, s.dsc_clock_en, s.dsc_slice_width, - s.dsc_bytes_per_pixel); + s.dsc_bits_per_pixel); DTN_INFO("\n"); } DTN_INFO("\n"); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index cf59ab0034dc..04dabed5f1c5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -31,10 +31,10 @@ #define TO_DCN10_LINK_ENC(link_encoder)\ container_of(link_encoder, struct dcn10_link_encoder, base) - #define AUX_REG_LIST(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ - SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id) + SRI(AUX_DPHY_RX_CONTROL0, DP_AUX, id), \ + SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id) #define HPD_REG_LIST(id)\ SRI(DC_HPD_CONTROL, HPD, id) @@ -73,6 +73,7 @@ struct dcn10_link_enc_aux_registers { uint32_t AUX_CONTROL; uint32_t AUX_DPHY_RX_CONTROL0; uint32_t AUX_DPHY_TX_CONTROL; + uint32_t AUX_DPHY_RX_CONTROL1; }; struct dcn10_link_enc_hpd_registers { @@ -443,7 +444,10 @@ struct dcn10_link_enc_registers { type AUX_TX_PRECHARGE_LEN; \ type AUX_TX_PRECHARGE_SYMBOLS; \ type AUX_MODE_DET_CHECK_DELAY;\ - type DPCS_DBG_CBUS_DIS + type DPCS_DBG_CBUS_DIS;\ + type AUX_RX_PRECHARGE_SKIP;\ + type AUX_RX_TIMEOUT_LEN;\ + type AUX_RX_TIMEOUT_LEN_MUL struct dcn10_link_enc_shift { DCN_LINK_ENCODER_REG_FIELD_LIST(uint8_t); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index ba50214d6c32..79b640e202eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -156,7 +156,7 @@ static void dsc2_read_state(struct display_stream_compressor *dsc, struct dcn_ds REG_GET(DSC_TOP_CONTROL, DSC_CLOCK_EN, &s->dsc_clock_en); REG_GET(DSCC_PPS_CONFIG3, SLICE_WIDTH, &s->dsc_slice_width); - REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bytes_per_pixel); + REG_GET(DSCC_PPS_CONFIG1, BITS_PER_PIXEL, &s->dsc_bits_per_pixel); REG_GET(DSCC_PPS_CONFIG3, SLICE_HEIGHT, &s->dsc_slice_height); REG_GET(DSCC_PPS_CONFIG1, CHUNK_SIZE, &s->dsc_chunk_size); REG_GET(DSCC_PPS_CONFIG2, PIC_WIDTH, &s->dsc_pic_width); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 7725a406c16e..66180b4332f1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -2498,3 +2498,30 @@ void dcn20_fpga_init_hw(struct dc *dc) tg->funcs->tg_init(tg); } } +#ifndef TRIM_FSFT +bool dcn20_optimize_timing_for_fsft(struct dc *dc, + struct dc_crtc_timing *timing, + unsigned int max_input_rate_in_khz) +{ + unsigned int old_v_front_porch; + unsigned int old_v_total; + unsigned int max_input_rate_in_100hz; + unsigned long long new_v_total; + + max_input_rate_in_100hz = max_input_rate_in_khz * 10; + if (max_input_rate_in_100hz < timing->pix_clk_100hz) + return false; + + old_v_total = timing->v_total; + old_v_front_porch = timing->v_front_porch; + + timing->fast_transport_output_rate_100hz = timing->pix_clk_100hz; + timing->pix_clk_100hz = max_input_rate_in_100hz; + + new_v_total = div_u64((unsigned long long)old_v_total * max_input_rate_in_100hz, timing->pix_clk_100hz); + + timing->v_total = new_v_total; + timing->v_front_porch = old_v_front_porch + (timing->v_total - old_v_total); + return true; +} +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 63ce763f148e..83220e34c1a9 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -132,5 +132,10 @@ int dcn20_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config); +#ifndef TRIM_FSFT +bool dcn20_optimize_timing_for_fsft(struct dc *dc, + struct dc_crtc_timing *timing, + unsigned int max_input_rate_in_khz); +#endif #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c index 2380392b916e..3dde6f26de47 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c @@ -88,6 +88,9 @@ static const struct hw_sequencer_funcs dcn20_funcs = { .set_backlight_level = dce110_set_backlight_level, .set_abm_immediate_disable = dce110_set_abm_immediate_disable, .set_pipe = dce110_set_pipe, +#ifndef TRIM_FSFT + .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft, +#endif }; static const struct hwseq_private_funcs dcn20_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c index 8d209dae66e6..15c2ff264ff6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.c @@ -309,7 +309,6 @@ bool dcn20_link_encoder_is_in_alt_mode(struct link_encoder *enc) void enc2_hw_init(struct link_encoder *enc) { struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); - /* 00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2 01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4 @@ -333,9 +332,18 @@ void enc2_hw_init(struct link_encoder *enc) AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 AUX_RX_DETECTION_THRESHOLD [30:28] = 1 */ - AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); + if (enc->ctx->dc_bios->golden_table.dc_golden_table_ver > 0) { + AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control0_val); + + AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, enc->ctx->dc_bios->golden_table.aux_dphy_tx_control_val); + + AUX_REG_WRITE(AUX_DPHY_RX_CONTROL1, enc->ctx->dc_bios->golden_table.aux_dphy_rx_control1_val); + } else { + AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); + + AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c4d); - AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); + } //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index db09f40075c2..bf0044f7417e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -191,7 +191,10 @@ LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL0, AUX_RX_DETECTION_THRESHOLD, mask_sh), \ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_LEN, mask_sh),\ LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_TX_PRECHARGE_SYMBOLS, mask_sh),\ - LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh) + LE_SF(DP_AUX0_AUX_DPHY_TX_CONTROL, AUX_MODE_DET_CHECK_DELAY, mask_sh),\ + LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_PRECHARGE_SKIP, mask_sh),\ + LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\ + LE_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh) #define UNIPHY_DCN2_REG_LIST(id) \ SRI(CLOCK_ENABLE, SYMCLK, id), \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 968a89bbcf24..790baf552695 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -2223,7 +2223,7 @@ int dcn20_populate_dml_pipes_from_context( if (!res_ctx->pipe_ctx[i].plane_state) { pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled; pipes[pipe_cnt].pipe.src.source_scan = dm_horz; - pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear; + pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_4kb_s; pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile; pipes[pipe_cnt].pipe.src.viewport_width = timing->h_addressable; if (pipes[pipe_cnt].pipe.src.viewport_width > 1920) @@ -2235,7 +2235,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.surface_width_y = pipes[pipe_cnt].pipe.src.viewport_width; pipes[pipe_cnt].pipe.src.surface_height_c = pipes[pipe_cnt].pipe.src.viewport_height; pipes[pipe_cnt].pipe.src.surface_width_c = pipes[pipe_cnt].pipe.src.viewport_width; - pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 63) / 64) * 64; /* linear sw only */ + pipes[pipe_cnt].pipe.src.data_pitch = ((pipes[pipe_cnt].pipe.src.viewport_width + 255) / 256) * 256; pipes[pipe_cnt].pipe.src.source_format = dm_444_32; pipes[pipe_cnt].pipe.dest.recout_width = pipes[pipe_cnt].pipe.src.viewport_width; /*vp_width/hratio*/ pipes[pipe_cnt].pipe.dest.recout_height = pipes[pipe_cnt].pipe.src.viewport_height; /*vp_height/vratio*/ @@ -3069,8 +3069,7 @@ void dcn20_calculate_dlg_params( int pipe_cnt, int vlevel) { - int i, j, pipe_idx, pipe_idx_unsplit; - bool visited[MAX_PIPES] = { 0 }; + int i, pipe_idx; /* Writeback MCIF_WB arbitration parameters */ dc->res_pool->funcs->set_mcif_arb_params(dc, context, pipes, pipe_cnt); @@ -3089,55 +3088,17 @@ void dcn20_calculate_dlg_params( if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz) context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz; - /* - * An artifact of dml pipe split/odm is that pipes get merged back together for - * calculation. Therefore we need to only extract for first pipe in ascending index order - * and copy into the other split half. - */ - for (i = 0, pipe_idx = 0, pipe_idx_unsplit = 0; i < dc->res_pool->pipe_count; i++) { - if (!context->res_ctx.pipe_ctx[i].stream) - continue; - - if (!visited[pipe_idx]) { - display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src; - display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest; - - dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; - dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; - dst->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; - dst->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; - /* - * j iterates inside pipes array, unlike i which iterates inside - * pipe_ctx array - */ - if (src->is_hsplit) - for (j = pipe_idx + 1; j < pipe_cnt; j++) { - display_pipe_source_params_st *src_j = &pipes[j].pipe.src; - display_pipe_dest_params_st *dst_j = &pipes[j].pipe.dest; - - if (src_j->is_hsplit && !visited[j] - && src->hsplit_grp == src_j->hsplit_grp) { - dst_j->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; - dst_j->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; - dst_j->vupdate_width = context->bw_ctx.dml.vba.VUpdateWidthPix[pipe_idx_unsplit]; - dst_j->vready_offset = context->bw_ctx.dml.vba.VReadyOffsetPix[pipe_idx_unsplit]; - visited[j] = true; - } - } - visited[pipe_idx] = true; - pipe_idx_unsplit++; - } - pipe_idx++; - } - for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; + pipes[pipe_idx].pipe.dest.vstartup_start = get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_offset = get_vupdate_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); + pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx); if (context->bw_ctx.bw.dcn.clk.dppclk_khz < pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000) context->bw_ctx.bw.dcn.clk.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz = pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000; - ASSERT(visited[pipe_idx]); context->res_ctx.pipe_ctx[i].pipe_dlg_param = pipes[pipe_idx].pipe.dest; pipe_idx++; } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c index 177d0dc8927a..b187f71afa65 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c @@ -92,6 +92,9 @@ static const struct hw_sequencer_funcs dcn21_funcs = { .set_backlight_level = dcn21_set_backlight_level, .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, .set_pipe = dcn21_set_pipe, +#ifndef TRIM_FSFT + .optimize_timing_for_fsft = dcn20_optimize_timing_for_fsft, +#endif }; static const struct hwseq_private_funcs dcn21_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c index c29326e9856a..2ae159e2dd6e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c @@ -62,7 +62,7 @@ static const struct link_encoder_funcs dcn30_link_enc_funcs = { .read_state = link_enc2_read_state, .validate_output_with_stream = dcn30_link_encoder_validate_output_with_stream, - .hw_init = enc2_hw_init, + .hw_init = enc3_hw_init, .setup = dcn10_link_encoder_setup, .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, .enable_dp_output = dcn20_link_encoder_enable_dp_output, @@ -203,3 +203,54 @@ void dcn30_link_encoder_construct( enc10->base.features.flags.bits.HDMI_6GB_EN = 0; } } + +#define AUX_REG(reg)\ + (enc10->aux_regs->reg) + +#define AUX_REG_READ(reg_name) \ + dm_read_reg(CTX, AUX_REG(reg_name)) + +#define AUX_REG_WRITE(reg_name, val) \ + dm_write_reg(CTX, AUX_REG(reg_name), val) +void enc3_hw_init(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + +/* + 00 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__1to2 : 1/2 + 01 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__3to4 : 3/4 + 02 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__7to8 : 7/8 + 03 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__15to16 : 15/16 + 04 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__31to32 : 31/32 + 05 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__63to64 : 63/64 + 06 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__127to128 : 127/128 + 07 - DP_AUX_DPHY_RX_DETECTION_THRESHOLD__255to256 : 255/256 +*/ + +/* + AUX_REG_UPDATE_5(AUX_DPHY_RX_CONTROL0, + AUX_RX_START_WINDOW = 1 [6:4] + AUX_RX_RECEIVE_WINDOW = 1 default is 2 [10:8] + AUX_RX_HALF_SYM_DETECT_LEN = 1 [13:12] default is 1 + AUX_RX_TRANSITION_FILTER_EN = 1 [16] default is 1 + AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT [17] is 0 default is 0 + AUX_RX_ALLOW_BELOW_THRESHOLD_START [18] is 1 default is 1 + AUX_RX_ALLOW_BELOW_THRESHOLD_STOP [19] is 1 default is 1 + AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 + AUX_RX_DETECTION_THRESHOLD [30:28] = 1 +*/ + AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); + + AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); + + //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; + // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk + // 27MHz -> 0xd + // 100MHz -> 0x32 + // 48MHz -> 0x18 + + // Set TMDS_CTL0 to 1. This is a legacy setting. + REG_UPDATE(TMDS_CTL_BITS, TMDS_CTL0, 1); + + dcn10_aux_initialize(enc10); +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h index 585d1ce63db1..8e9fd59ccde8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.h @@ -73,4 +73,6 @@ void dcn30_link_encoder_construct( const struct dcn10_link_enc_shift *link_shift, const struct dcn10_link_enc_mask *link_mask); +void enc3_hw_init(struct link_encoder *enc); + #endif /* __DC_LINK_ENCODER__DCN30_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 1b354c219d0a..9afee7160490 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -26,6 +26,7 @@ #include "dce110/dce110_hw_sequencer.h" #include "dcn10/dcn10_hw_sequencer.h" #include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "dcn30_hwseq.h" static const struct hw_sequencer_funcs dcn30_funcs = { @@ -87,8 +88,8 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .set_flip_control_gsl = dcn20_set_flip_control_gsl, .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync, .apply_idle_power_optimizations = dcn30_apply_idle_power_optimizations, - .set_backlight_level = dce110_set_backlight_level, - .set_abm_immediate_disable = dce110_set_abm_immediate_disable, + .set_backlight_level = dcn21_set_backlight_level, + .set_abm_immediate_disable = dcn21_set_abm_immediate_disable, }; static const struct hwseq_private_funcs dcn30_private_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 7916a7ea9336..afdd4f0d9d71 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -154,23 +154,11 @@ dml_get_pipe_attr_func(refcyc_per_meta_chunk_vblank_c_in_us, mode_lib->vba.TimeP dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_l_in_us, mode_lib->vba.TimePerMetaChunkFlip); dml_get_pipe_attr_func(refcyc_per_meta_chunk_flip_c_in_us, mode_lib->vba.TimePerChromaMetaChunkFlip); +dml_get_pipe_attr_func(vstartup, mode_lib->vba.VStartup); dml_get_pipe_attr_func(vupdate_offset, mode_lib->vba.VUpdateOffsetPix); dml_get_pipe_attr_func(vupdate_width, mode_lib->vba.VUpdateWidthPix); dml_get_pipe_attr_func(vready_offset, mode_lib->vba.VReadyOffsetPix); -unsigned int get_vstartup_calculated( - struct display_mode_lib *mode_lib, - const display_e2e_pipe_params_st *pipes, - unsigned int num_pipes, - unsigned int which_pipe) -{ - unsigned int which_plane; - - recalculate_params(mode_lib, pipes, num_pipes); - which_plane = mode_lib->vba.pipe_plane[which_pipe]; - return mode_lib->vba.VStartup[which_plane]; -} - double get_total_immediate_flip_bytes( struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, @@ -479,7 +467,8 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0; - mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable; + mode_lib->vba.DSCEnabled[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable;; + mode_lib->vba.DSCEnable[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_enable; mode_lib->vba.NumberOfDSCSlices[mode_lib->vba.NumberOfActivePlanes] = dout->dsc_slices; mode_lib->vba.DSCInputBitPerComponent[mode_lib->vba.NumberOfActivePlanes] = diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 756d8eb1221c..21e5111ea7a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -98,16 +98,11 @@ dml_get_pipe_attr_decl(refcyc_per_meta_chunk_vblank_c_in_us); dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_l_in_us); dml_get_pipe_attr_decl(refcyc_per_meta_chunk_flip_c_in_us); +dml_get_pipe_attr_decl(vstartup); dml_get_pipe_attr_decl(vupdate_offset); dml_get_pipe_attr_decl(vupdate_width); dml_get_pipe_attr_decl(vready_offset); -unsigned int get_vstartup_calculated( - struct display_mode_lib *mode_lib, - const display_e2e_pipe_params_st *pipes, - unsigned int num_pipes, - unsigned int which_pipe); - double get_total_immediate_flip_bytes( struct display_mode_lib *mode_lib, const display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 4e6e18bbef5d..72743058836d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -71,8 +71,9 @@ enum dentist_divider_range { #define CTX \ clk_mgr->base.ctx + #define DC_LOGGER \ - clk_mgr->ctx->logger + clk_mgr->base.ctx->logger diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index 5915994f9eb8..f520e13aee4c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -55,7 +55,7 @@ struct dsc_optc_config { struct dcn_dsc_state { uint32_t dsc_clock_en; uint32_t dsc_slice_width; - uint32_t dsc_bytes_per_pixel; + uint32_t dsc_bits_per_pixel; uint32_t dsc_slice_height; uint32_t dsc_pic_width; uint32_t dsc_pic_height; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 720ce5e458d8..3c986717dcd5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -116,6 +116,11 @@ struct hw_sequencer_funcs { void (*set_static_screen_control)(struct pipe_ctx **pipe_ctx, int num_pipes, const struct dc_static_screen_params *events); +#ifndef TRIM_FSFT + bool (*optimize_timing_for_fsft)(struct dc *dc, + struct dc_crtc_timing *timing, + unsigned int max_input_rate_in_khz); +#endif /* Stream Related */ void (*enable_stream)(struct pipe_ctx *pipe_ctx); diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 4869d4562e4d..550f46e9b95f 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -66,6 +66,8 @@ enum link_training_result { /* other failure during EQ step */ LINK_TRAINING_EQ_FAIL_EQ, LINK_TRAINING_LQA_FAIL, + /* one of the CR,EQ or symbol lock is dropped */ + LINK_TRAINING_LINK_LOSS, }; struct link_training_settings { diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 7a2500fbf3f2..81820f3d6b3b 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -829,10 +829,13 @@ void mod_freesync_build_vrr_infopacket(struct mod_freesync *mod_freesync, switch (packet_type) { case PACKET_TYPE_FS_V3: #ifndef TRIM_FSFT + // always populate with pixel rate. build_vrr_infopacket_v3( stream->signal, vrr, stream->timing.flags.FAST_TRANSPORT, - stream->timing.fast_transport_output_rate_100hz, + (stream->timing.flags.FAST_TRANSPORT) ? + stream->timing.fast_transport_output_rate_100hz : + stream->timing.pix_clk_100hz, app_tf, infopacket); #else build_vrr_infopacket_v3(stream->signal, vrr, app_tf, infopacket); diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index c2544c81dfb2..3e526c394f6c 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -941,7 +941,6 @@ struct atom_display_controller_info_v4_1 uint8_t reserved3[8]; }; - struct atom_display_controller_info_v4_2 { struct atom_common_table_header table_header; @@ -976,6 +975,59 @@ struct atom_display_controller_info_v4_2 uint8_t reserved3[8]; }; +struct atom_display_controller_info_v4_4 { + struct atom_common_table_header table_header; + uint32_t display_caps; + uint32_t bootup_dispclk_10khz; + uint16_t dce_refclk_10khz; + uint16_t i2c_engine_refclk_10khz; + uint16_t dvi_ss_percentage; // in unit of 0.001% + uint16_t dvi_ss_rate_10hz; + uint16_t hdmi_ss_percentage; // in unit of 0.001% + uint16_t hdmi_ss_rate_10hz; + uint16_t dp_ss_percentage; // in unit of 0.001% + uint16_t dp_ss_rate_10hz; + uint8_t dvi_ss_mode; // enum of atom_spread_spectrum_mode + uint8_t hdmi_ss_mode; // enum of atom_spread_spectrum_mode + uint8_t dp_ss_mode; // enum of atom_spread_spectrum_mode + uint8_t ss_reserved; + uint8_t dfp_hardcode_mode_num; // DFP hardcode mode number defined in StandardVESA_TimingTable when EDID is not available + uint8_t dfp_hardcode_refreshrate;// DFP hardcode mode refreshrate defined in StandardVESA_TimingTable when EDID is not available + uint8_t vga_hardcode_mode_num; // VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable + uint8_t vga_hardcode_refreshrate;// VGA hardcode mode number defined in StandardVESA_TimingTable when EDID is not avablable + uint16_t dpphy_refclk_10khz; + uint16_t hw_chip_id; + uint8_t dcnip_min_ver; + uint8_t dcnip_max_ver; + uint8_t max_disp_pipe_num; + uint8_t max_vbios_active_disp_pipum; + uint8_t max_ppll_num; + uint8_t max_disp_phy_num; + uint8_t max_aux_pairs; + uint8_t remotedisplayconfig; + uint32_t dispclk_pll_vco_freq; + uint32_t dp_ref_clk_freq; + uint32_t max_mclk_chg_lat; // Worst case blackout duration for a memory clock frequency (p-state) change, units of 100s of ns (0.1 us) + uint32_t max_sr_exit_lat; // Worst case memory self refresh exit time, units of 100ns of ns (0.1us) + uint32_t max_sr_enter_exit_lat; // Worst case memory self refresh entry followed by immediate exit time, units of 100ns of ns (0.1us) + uint16_t dc_golden_table_offset; // point of struct of atom_dc_golden_table_vxx + uint16_t dc_golden_table_ver; + uint32_t reserved3[3]; +}; + +struct atom_dc_golden_table_v1 +{ + uint32_t aux_dphy_rx_control0_val; + uint32_t aux_dphy_tx_control_val; + uint32_t aux_dphy_rx_control1_val; + uint32_t dc_gpio_aux_ctrl_0_val; + uint32_t dc_gpio_aux_ctrl_1_val; + uint32_t dc_gpio_aux_ctrl_2_val; + uint32_t dc_gpio_aux_ctrl_3_val; + uint32_t dc_gpio_aux_ctrl_4_val; + uint32_t dc_gpio_aux_ctrl_5_val; + uint32_t reserved[23]; +}; enum dce_info_caps_def { diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 838a369c9ec3..0826625573dc 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -133,6 +133,78 @@ int smu_get_dpm_freq_range(struct smu_context *smu, return ret; } +static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (!smu->ppt_funcs->dpm_set_vcn_enable) + return 0; + + if (atomic_read(&power_gate->vcn_gated) ^ enable) + return 0; + + ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable); + if (!ret) + atomic_set(&power_gate->vcn_gated, !enable); + + return ret; +} + +static int smu_dpm_set_vcn_enable(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + mutex_lock(&power_gate->vcn_gate_lock); + + ret = smu_dpm_set_vcn_enable_locked(smu, enable); + + mutex_unlock(&power_gate->vcn_gate_lock); + + return ret; +} + +static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (!smu->ppt_funcs->dpm_set_jpeg_enable) + return 0; + + if (atomic_read(&power_gate->jpeg_gated) ^ enable) + return 0; + + ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable); + if (!ret) + atomic_set(&power_gate->jpeg_gated, !enable); + + return ret; +} + +static int smu_dpm_set_jpeg_enable(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + mutex_lock(&power_gate->jpeg_gate_lock); + + ret = smu_dpm_set_jpeg_enable_locked(smu, enable); + + mutex_unlock(&power_gate->jpeg_gate_lock); + + return ret; +} + /** * smu_dpm_set_power_gate - power gate/ungate the specific IP block * @@ -353,6 +425,45 @@ static int smu_early_init(void *handle) return smu_set_funcs(adev); } +static int smu_set_default_dpm_table(struct smu_context *smu) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int vcn_gate, jpeg_gate; + int ret = 0; + + if (!smu->ppt_funcs->set_default_dpm_table) + return 0; + + mutex_lock(&power_gate->vcn_gate_lock); + mutex_lock(&power_gate->jpeg_gate_lock); + + vcn_gate = atomic_read(&power_gate->vcn_gated); + jpeg_gate = atomic_read(&power_gate->jpeg_gated); + + ret = smu_dpm_set_vcn_enable_locked(smu, true); + if (ret) + goto err0_out; + + ret = smu_dpm_set_jpeg_enable_locked(smu, true); + if (ret) + goto err1_out; + + ret = smu->ppt_funcs->set_default_dpm_table(smu); + if (ret) + dev_err(smu->adev->dev, + "Failed to setup default dpm clock tables!\n"); + + smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); +err1_out: + smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); +err0_out: + mutex_unlock(&power_gate->jpeg_gate_lock); + mutex_unlock(&power_gate->vcn_gate_lock); + + return ret; +} + static int smu_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -579,6 +690,10 @@ static int smu_smc_table_sw_init(struct smu_context *smu) if (ret) return ret; + ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); + if (ret) + return ret; + return 0; } @@ -586,6 +701,8 @@ static int smu_smc_table_sw_fini(struct smu_context *smu) { int ret; + smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); + ret = smu_free_memory_pool(smu); if (ret) return ret; @@ -643,6 +760,11 @@ static int smu_sw_init(void *handle) smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; + atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); + atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); + mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); + mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); + smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1; @@ -734,7 +856,7 @@ static int smu_smc_hw_setup(struct smu_context *smu) uint32_t pcie_gen = 0, pcie_width = 0; int ret; - if (smu_is_dpm_running(smu) && adev->in_suspend) { + if (adev->in_suspend && smu_is_dpm_running(smu)) { dev_info(adev->dev, "dpm has been enabled\n"); return 0; } @@ -844,10 +966,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } - ret = smu_i2c_init(smu, &adev->pm.smu_i2c); - if (ret) - return ret; - ret = smu_disable_umc_cdr_12gbps_workaround(smu); if (ret) { dev_err(adev->dev, "Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); @@ -1046,8 +1164,6 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; - smu_i2c_fini(smu, &adev->pm.smu_i2c); - cancel_work_sync(&smu->throttling_logging_work); ret = smu_disable_thermal_alert(smu); @@ -1590,6 +1706,9 @@ int smu_set_mp1_state(struct smu_context *smu, } ret = smu_send_smc_msg(smu, msg, NULL); + /* some asics may not support those messages */ + if (ret == -EINVAL) + ret = 0; if (ret) dev_err(smu->adev->dev, "[PrepareMp1] Failed!\n"); @@ -1944,6 +2063,10 @@ int smu_read_sensor(struct smu_context *smu, mutex_lock(&smu->mutex); + if (smu->ppt_funcs->read_sensor) + if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) + goto unlock; + switch (sensor) { case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK: *((uint32_t *)data) = pstate_table->gfxclk_pstate.standard * 100; @@ -1966,7 +2089,7 @@ int smu_read_sensor(struct smu_context *smu, *size = 4; break; case AMDGPU_PP_SENSOR_VCN_POWER_STATE: - *(uint32_t *)data = smu->smu_power.power_gate.vcn_gated ? 0 : 1; + *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0: 1; *size = 4; break; case AMDGPU_PP_SENSOR_MIN_FAN_RPM: @@ -1974,11 +2097,12 @@ int smu_read_sensor(struct smu_context *smu, *size = 4; break; default: - if (smu->ppt_funcs->read_sensor) - ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); + *size = 0; + ret = -EOPNOTSUPP; break; } +unlock: mutex_unlock(&smu->mutex); return ret; diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index 3b9182c8c53f..6c991de8f371 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -1849,8 +1849,6 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { @@ -1861,7 +1859,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) return ret; } } - power_gate->vcn_gated = false; } else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0); @@ -1870,7 +1867,6 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) return ret; } } - power_gate->vcn_gated = true; } return ret; @@ -2080,22 +2076,11 @@ static const struct i2c_algorithm arcturus_i2c_algo = { .functionality = arcturus_i2c_func, }; -static bool arcturus_i2c_adapter_is_added(struct i2c_adapter *control) -{ - struct amdgpu_device *adev = to_amdgpu_device(control); - - return control->dev.parent == &adev->pdev->dev; -} - static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); int res; - /* smu_i2c_eeprom_init may be called twice in sriov */ - if (arcturus_i2c_adapter_is_added(control)) - return 0; - control->owner = THIS_MODULE; control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; @@ -2111,9 +2096,6 @@ static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) { - if (!arcturus_i2c_adapter_is_added(control)) - return; - i2c_del_adapter(control); } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 28312d6dc187..074458eb5407 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -292,8 +292,10 @@ struct smu_dpm_context { struct smu_power_gate { bool uvd_gated; bool vce_gated; - bool vcn_gated; - bool jpeg_gated; + atomic_t vcn_gated; + atomic_t jpeg_gated; + struct mutex vcn_gate_lock; + struct mutex jpeg_gate_lock; }; struct smu_power_context { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h index b2232e24d82f..aa2708fccb6d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_sienna_cichlid.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU11_DRIVER_IF_VERSION 0x33 +#define SMU11_DRIVER_IF_VERSION 0x34 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 5 @@ -968,9 +968,15 @@ typedef struct { typedef struct { uint32_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequency; - uint16_t AverageFclkFrequency; - uint16_t AverageUclkFrequency ; + + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageFclkFrequencyPreDs; + uint16_t AverageFclkFrequencyPostDs; + uint16_t AverageUclkFrequencyPreDs ; + uint16_t AverageUclkFrequencyPostDs ; + + uint16_t AverageGfxActivity ; uint16_t AverageUclkActivity ; uint8_t CurrSocVoltageOffset ; @@ -988,6 +994,7 @@ typedef struct { uint16_t TemperatureLiquid0 ; uint16_t TemperatureLiquid1 ; uint16_t TemperaturePlx ; + uint16_t Padding16 ; uint32_t ThrottlerStatus ; uint8_t LinkDpmLevel; @@ -1006,8 +1013,10 @@ typedef struct { uint16_t AverageDclk0Frequency ; uint16_t AverageVclk1Frequency ; uint16_t AverageDclk1Frequency ; - uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence - uint16_t padding16_2; + uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence + uint8_t PcieRate ; + uint8_t PcieWidth ; + } SmuMetrics_t; typedef struct { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 429f5aa8924a..6a42331aba8a 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -30,8 +30,8 @@ #define SMU11_DRIVER_IF_VERSION_NV10 0x36 #define SMU11_DRIVER_IF_VERSION_NV12 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x36 -#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x33 -#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x2 +#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x34 +#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0x3 /* MP Apertures */ #define MP0_Public 0x03800000 diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 6aaf483858a0..9f62af9abd23 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -785,8 +785,6 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { @@ -796,14 +794,12 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) if (ret) return ret; } - power_gate->vcn_gated = false; } else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } - power_gate->vcn_gated = true; } return ret; @@ -811,8 +807,6 @@ static int navi10_dpm_set_vcn_enable(struct smu_context *smu, bool enable) static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) { - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { @@ -821,14 +815,12 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (ret) return ret; } - power_gate->jpeg_gated = false; } else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL); if (ret) return ret; } - power_gate->jpeg_gated = true; } return ret; @@ -2457,22 +2449,11 @@ static const struct i2c_algorithm navi10_i2c_algo = { .functionality = navi10_i2c_func, }; -static bool navi10_i2c_adapter_is_added(struct i2c_adapter *control) -{ - struct amdgpu_device *adev = to_amdgpu_device(control); - - return control->dev.parent == &adev->pdev->dev; -} - static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); int res; - /* smu_i2c_eeprom_init may be called twice in sriov */ - if (navi10_i2c_adapter_is_added(control)) - return 0; - control->owner = THIS_MODULE; control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; @@ -2488,9 +2469,6 @@ static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter * static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) { - if (!navi10_i2c_adapter_is_added(control)) - return; - i2c_del_adapter(control); } diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index 575ae4be98a2..dbb676c482fd 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -459,8 +459,6 @@ static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { @@ -470,14 +468,12 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) if (ret) return ret; } - power_gate->vcn_gated = false; } else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL); if (ret) return ret; } - power_gate->vcn_gated = true; } return ret; @@ -485,8 +481,6 @@ static int renoir_dpm_set_vcn_enable(struct smu_context *smu, bool enable) static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) { - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { @@ -495,14 +489,12 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) if (ret) return ret; } - power_gate->jpeg_gated = false; } else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); if (ret) return ret; } - power_gate->jpeg_gated = true; } return ret; diff --git a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c index 59da3ca2a4ca..3865dbed5f93 100644 --- a/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/sienna_cichlid_ppt.c @@ -70,14 +70,16 @@ FEATURE_MASK(FEATURE_DPM_FCLK_BIT) | \ FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT)) +#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15 + static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] = { MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1), MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), - MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 1), - MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 1), - MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 1), - MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 1), + MSG_MAP(SetAllowedFeaturesMaskLow, PPSMC_MSG_SetAllowedFeaturesMaskLow, 0), + MSG_MAP(SetAllowedFeaturesMaskHigh, PPSMC_MSG_SetAllowedFeaturesMaskHigh, 0), + MSG_MAP(EnableAllSmuFeatures, PPSMC_MSG_EnableAllSmuFeatures, 0), + MSG_MAP(DisableAllSmuFeatures, PPSMC_MSG_DisableAllSmuFeatures, 0), MSG_MAP(EnableSmuFeaturesLow, PPSMC_MSG_EnableSmuFeaturesLow, 1), MSG_MAP(EnableSmuFeaturesHigh, PPSMC_MSG_EnableSmuFeaturesHigh, 1), MSG_MAP(DisableSmuFeaturesLow, PPSMC_MSG_DisableSmuFeaturesLow, 1), @@ -85,42 +87,43 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetRunningSmuFeaturesLow, 1), MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetRunningSmuFeaturesHigh, 1), MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1), - MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1), - MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), - MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), - MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 1), - MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 1), - MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), - MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), - MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 1), - MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 1), - MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1), - MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1), + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), + MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0), + MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0), + MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0), + MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0), + MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0), + MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0), + MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0), + MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0), + MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0), + MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0), MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1), - MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 1), + MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0), MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), - MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 1), - MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 1), - MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 1), - MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 1), - MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 1), - MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 1), - MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 1), - MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 1), + MSG_MAP(SetGeminiMode, PPSMC_MSG_SetGeminiMode, 0), + MSG_MAP(SetGeminiApertureHigh, PPSMC_MSG_SetGeminiApertureHigh, 0), + MSG_MAP(SetGeminiApertureLow, PPSMC_MSG_SetGeminiApertureLow, 0), + MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0), + MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0), + MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), + MSG_MAP(SetUclkFastSwitch, PPSMC_MSG_SetUclkFastSwitch, 0), + MSG_MAP(SetVideoFps, PPSMC_MSG_SetVideoFps, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), - MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), - MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), - MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), + MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), + MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), - MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 1), - MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), - MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), - MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), - MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), - MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 1), - MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 1), + MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), + MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), + MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0), + MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0), + MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0), + MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), + MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), }; static struct cmn2asic_mapping sienna_cichlid_clk_map[SMU_CLK_COUNT] = { @@ -442,13 +445,16 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, *value = metrics->CurrClock[PPCLK_DCEFCLK]; break; case METRICS_AVERAGE_GFXCLK: - *value = metrics->AverageGfxclkFrequency; + if (metrics->AverageGfxActivity <= SMU_11_0_7_GFX_BUSY_THRESHOLD) + *value = metrics->AverageGfxclkFrequencyPostDs; + else + *value = metrics->AverageGfxclkFrequencyPreDs; break; case METRICS_AVERAGE_FCLK: - *value = metrics->AverageFclkFrequency; + *value = metrics->AverageFclkFrequencyPostDs; break; case METRICS_AVERAGE_UCLK: - *value = metrics->AverageUclkFrequency; + *value = metrics->AverageUclkFrequencyPostDs; break; case METRICS_AVERAGE_GFXACTIVITY: *value = metrics->AverageGfxActivity; @@ -760,10 +766,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu) static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable) { - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; struct amdgpu_device *adev = smu->adev; - int ret = 0; if (enable) { @@ -779,7 +782,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl return ret; } } - power_gate->vcn_gated = false; } else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL); @@ -792,7 +794,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl return ret; } } - power_gate->vcn_gated = true; } return ret; @@ -800,8 +801,6 @@ static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enabl static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enable) { - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; int ret = 0; if (enable) { @@ -810,14 +809,12 @@ static int sienna_cichlid_dpm_set_jpeg_enable(struct smu_context *smu, bool enab if (ret) return ret; } - power_gate->jpeg_gated = false; } else { if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) { ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL); if (ret) return ret; } - power_gate->jpeg_gated = true; } return ret; @@ -2624,22 +2621,11 @@ static const struct i2c_algorithm sienna_cichlid_i2c_algo = { .functionality = sienna_cichlid_i2c_func, }; -static bool sienna_cichlid_i2c_adapter_is_added(struct i2c_adapter *control) -{ - struct amdgpu_device *adev = to_amdgpu_device(control); - - return control->dev.parent == &adev->pdev->dev; -} - static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) { struct amdgpu_device *adev = to_amdgpu_device(control); int res; - /* smu_i2c_eeprom_init may be called twice in sriov */ - if (sienna_cichlid_i2c_adapter_is_added(control)) - return 0; - control->owner = THIS_MODULE; control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; @@ -2655,9 +2641,6 @@ static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_a static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) { - if (!sienna_cichlid_i2c_adapter_is_added(control)) - return; - i2c_del_adapter(control); } diff --git a/drivers/gpu/drm/amd/powerplay/smu_cmn.c b/drivers/gpu/drm/amd/powerplay/smu_cmn.c index be4b678d0e60..5c23c44c33bd 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_cmn.c +++ b/drivers/gpu/drm/amd/powerplay/smu_cmn.c @@ -166,7 +166,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, switch (type) { case CMN2ASIC_MAPPING_MSG: - if (index > SMU_MSG_MAX_COUNT || + if (index >= SMU_MSG_MAX_COUNT || !smu->message_map) return -EINVAL; @@ -181,7 +181,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return msg_mapping.map_to; case CMN2ASIC_MAPPING_CLK: - if (index > SMU_CLK_COUNT || + if (index >= SMU_CLK_COUNT || !smu->clock_map) return -EINVAL; @@ -192,7 +192,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return mapping.map_to; case CMN2ASIC_MAPPING_FEATURE: - if (index > SMU_FEATURE_COUNT || + if (index >= SMU_FEATURE_COUNT || !smu->feature_map) return -EINVAL; @@ -203,7 +203,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return mapping.map_to; case CMN2ASIC_MAPPING_TABLE: - if (index > SMU_TABLE_COUNT || + if (index >= SMU_TABLE_COUNT || !smu->table_map) return -EINVAL; @@ -214,7 +214,7 @@ int smu_cmn_to_asic_specific_index(struct smu_context *smu, return mapping.map_to; case CMN2ASIC_MAPPING_PWR: - if (index > SMU_POWER_SOURCE_COUNT || + if (index >= SMU_POWER_SOURCE_COUNT || !smu->pwr_src_map) return -EINVAL; diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h index d0deaefd3feb..264073d4e263 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_internal.h +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -60,7 +60,6 @@ #define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask) #define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu) #define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu) -#define smu_set_default_dpm_table(smu) smu_ppt_funcs(set_default_dpm_table, 0, smu) #define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu) #define smu_set_default_od8_settings(smu) smu_ppt_funcs(set_default_od8_settings, 0, smu) #define smu_enable_thermal_alert(smu) smu_ppt_funcs(enable_thermal_alert, 0, smu) @@ -77,8 +76,6 @@ #define smu_get_dal_power_level(smu, clocks) smu_ppt_funcs(get_dal_power_level, 0, smu, clocks) #define smu_get_perf_level(smu, designation, level) smu_ppt_funcs(get_perf_level, 0, smu, designation, level) #define smu_get_current_shallow_sleep_clocks(smu, clocks) smu_ppt_funcs(get_current_shallow_sleep_clocks, 0, smu, clocks) -#define smu_dpm_set_vcn_enable(smu, enable) smu_ppt_funcs(dpm_set_vcn_enable, 0, smu, enable) -#define smu_dpm_set_jpeg_enable(smu, enable) smu_ppt_funcs(dpm_set_jpeg_enable, 0, smu, enable) #define smu_set_watermarks_table(smu, clock_ranges) smu_ppt_funcs(set_watermarks_table, 0, smu, clock_ranges) #define smu_thermal_temperature_range_update(smu, range, rw) smu_ppt_funcs(thermal_temperature_range_update, 0, smu, range, rw) #define smu_register_irq_handler(smu) smu_ppt_funcs(register_irq_handler, 0, smu) diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index fd82402065e6..7b950a582a28 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -1029,6 +1029,7 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case CHIP_NAVI14: case CHIP_NAVI12: case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c index 02159ca29fa2..c18169aa59ce 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/ci_smumgr.c @@ -2725,7 +2725,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr) static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr) { - return ci_is_smc_ram_running(hwmgr); + return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device, + CGS_IND_REG__SMC, FEATURE_STATUS, + VOLTAGE_CONTROLLER_ON)) + ? true : false; } static int ci_smu_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 09b32289497e..b23cb2fec3f3 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -4308,11 +4308,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, { int ret; - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) + if (slots < 0) return false; - if (slots < 0) + port = drm_dp_mst_topology_get_port_validated(mgr, port); + if (!port) return false; if (port->vcpi.vcpi > 0) { @@ -4328,6 +4328,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, if (ret) { DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), ret); + drm_dp_mst_topology_put_port(port); goto out; } DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n", diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index bc38322f306e..13068fdf4331 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -815,8 +815,7 @@ static void drm_dev_release(struct kref *ref) drm_managed_release(dev); - if (dev->managed.final_kfree) - kfree(dev->managed.final_kfree); + kfree(dev->managed.final_kfree); } /** diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index d4e7c8370565..19d73868490e 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -879,6 +879,9 @@ err: * @file_priv: drm file-private structure * * Open an object using the global name, returning a handle and the size. + * + * This handle (of course) holds a reference to the object, so the object + * will not go away until the handle is deleted. */ int drm_gem_open_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index d00ea384dcbf..58f5dc2f6dd5 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Asus T103HAF */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* GPD MicroPC (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index aaed9eb3b56c..bbde3b12c311 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -1929,7 +1929,7 @@ static int pwm_setup_backlight(struct intel_connector *connector, return retval; } - level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100, + level = DIV_ROUND_UP_ULL(pwm_get_duty_cycle(panel->backlight.pwm) * 100, CRC_PMIC_PWM_PERIOD_NS); panel->backlight.level = intel_panel_compute_brightness(connector, level); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index e946032b13e4..2c2bf24140c9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -469,7 +469,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) locked = 1; } ret = pin_user_pages_remote - (work->task, mm, + (mm, obj->userptr.ptr + pinned * PAGE_SIZE, npages - pinned, flags, diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 9a46be05425a..b9810bf156c3 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -24,6 +24,7 @@ #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/iommu.h> #include <drm/drm_managed.h> @@ -118,6 +119,9 @@ struct drm_i915_private *mock_gem_device(void) { struct drm_i915_private *i915; struct pci_dev *pdev; +#if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) + struct dev_iommu iommu; +#endif int err; pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); @@ -136,8 +140,10 @@ struct drm_i915_private *mock_gem_device(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - /* hack to disable iommu for the fake device; force identity mapping */ - pdev->dev.archdata.iommu = (void *)-1; + /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */ + memset(&iommu, 0, sizeof(iommu)); + iommu.priv = (void *)-1; + pdev->dev.iommu = &iommu; #endif pci_set_drvdata(pdev, i915); diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 6639ee9b05d3..48593932bddf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -4915,6 +4915,7 @@ static int dispc_runtime_resume(struct device *dev) static const struct dev_pm_ops dispc_pm_ops = { .runtime_suspend = dispc_runtime_suspend, .runtime_resume = dispc_runtime_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; struct platform_driver omap_dispchw_driver = { diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 79ddfbfd1b58..eeccf40bae41 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -5467,6 +5467,7 @@ static int dsi_runtime_resume(struct device *dev) static const struct dev_pm_ops dsi_pm_ops = { .runtime_suspend = dsi_runtime_suspend, .runtime_resume = dsi_runtime_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; struct platform_driver omap_dsihw_driver = { diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 4d5739fa4a5d..6ccbc29c4ce4 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1614,6 +1614,7 @@ static int dss_runtime_resume(struct device *dev) static const struct dev_pm_ops dss_pm_ops = { .runtime_suspend = dss_runtime_suspend, .runtime_resume = dss_runtime_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; struct platform_driver omap_dsshw_driver = { diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 4406ce2a08b4..e0817934ee16 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -903,6 +903,7 @@ static int venc_runtime_resume(struct device *dev) static const struct dev_pm_ops venc_pm_ops = { .runtime_suspend = venc_runtime_suspend, .runtime_resume = venc_runtime_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static const struct of_device_id venc_of_match[] = { diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c index 528764566b17..de95dc1b861c 100644 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ b/drivers/gpu/drm/omapdrm/omap_connector.c @@ -89,7 +89,7 @@ static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *conn struct drm_display_mode *mode) { struct omap_connector *omap_connector = to_omap_connector(connector); - struct drm_display_mode new_mode = { { 0 } }; + struct drm_display_mode new_mode = {}; enum drm_mode_status status; status = omap_connector_mode_fixup(omap_connector->output, mode, diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 1a49e619aacf..e8f7b11352d2 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, while (len) { size_t pgsize = get_pgsize(iova | paddr, len); - ops->map(ops, iova, paddr, pgsize, prot); + ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL); iova += pgsize; paddr += pgsize; len -= pgsize; diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h index a0ee41632d7e..a7bc31f6d565 100644 --- a/drivers/gpu/drm/qxl/qxl_dev.h +++ b/drivers/gpu/drm/qxl/qxl_dev.h @@ -131,8 +131,6 @@ enum SpiceCursorType { #pragma pack(push, 1) -#define REDHAT_PCI_VENDOR_ID 0x1b36 - /* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */ #define QXL_DEVICE_ID_STABLE 0x0100 diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index 808c8af58fd5..09485c7f0d6f 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -154,7 +154,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss) break; case DISPC_VP_DPI: enc_type = DRM_MODE_ENCODER_DPI; - conn_type = DRM_MODE_CONNECTOR_LVDS; + conn_type = DRM_MODE_CONNECTOR_DPI; break; default: WARN_ON(1); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f297fd5e02d4..cc6a4e7551e3 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -287,11 +287,12 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, */ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) { - bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); - - ret = ttm_tt_create(bo, zero); - if (ret) - goto out_err; + if (bo->ttm == NULL) { + bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED); + ret = ttm_tt_create(bo, zero); + if (ret) + goto out_err; + } ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) @@ -652,8 +653,13 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, placement.num_busy_placement = 0; bdev->driver->evict_flags(bo, &placement); - if (!placement.num_placement && !placement.num_busy_placement) - return ttm_bo_pipeline_gutting(bo); + if (!placement.num_placement && !placement.num_busy_placement) { + ret = ttm_bo_pipeline_gutting(bo); + if (ret) + return ret; + + return ttm_tt_create(bo, false); + } evict_mem = bo->mem; evict_mem.mm_node = NULL; @@ -1192,8 +1198,13 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * Remove the backing store if no placement is given. */ - if (!placement->num_placement && !placement->num_busy_placement) - return ttm_bo_pipeline_gutting(bo); + if (!placement->num_placement && !placement->num_busy_placement) { + ret = ttm_bo_pipeline_gutting(bo); + if (ret) + return ret; + + return ttm_tt_create(bo, false); + } /* * Check whether we need to move buffer. @@ -1210,6 +1221,14 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ttm_flag_masked(&bo->mem.placement, new_flags, ~TTM_PL_MASK_MEMTYPE); } + /* + * We might need to add a TTM. + */ + if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { + ret = ttm_tt_create(bo, true); + if (ret) + return ret; + } return 0; } EXPORT_SYMBOL(ttm_bo_validate); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7fb3e0bcbab4..e6c8bd254055 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -531,15 +531,12 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, .interruptible = false, .no_wait_gpu = false }; - struct ttm_tt *ttm; + struct ttm_tt *ttm = bo->ttm; pgprot_t prot; int ret; - ret = ttm_tt_create(bo, true); - if (ret) - return ret; + BUG_ON(!ttm); - ttm = bo->ttm; ret = ttm_tt_populate(ttm, &ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index d7a6537dd6ee..33526c5df0e8 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -351,11 +351,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, }; - if (ttm_tt_create(bo, true)) { - ret = VM_FAULT_OOM; - goto out_io_unlock; - } - ttm = bo->ttm; if (ttm_tt_populate(bo->ttm, &ctx)) { ret = VM_FAULT_OOM; @@ -510,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo, int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write) { - unsigned long offset = (addr) - vma->vm_start; struct ttm_buffer_object *bo = vma->vm_private_data; + unsigned long offset = (addr) - vma->vm_start + + ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node)) + << PAGE_SHIFT); int ret; if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 9d1c7177384c..3437711ddb43 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -50,9 +50,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) dma_resv_assert_held(bo->base.resv); - if (bo->ttm) - return 0; - if (bdev->need_dma32) page_flags |= TTM_PAGE_FLAG_DMA32; @@ -70,6 +67,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc) page_flags |= TTM_PAGE_FLAG_SG; break; default: + bo->ttm = NULL; pr_err("Illegal buffer object type\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index 0a5c8cf409fb..4d944a0dff3e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -39,8 +39,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) u32 events_read, events_clear = 0; /* read the config space */ - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - events_read, &events_read); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + events_read, &events_read); if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { if (vgdev->has_edid) virtio_gpu_cmd_get_edids(vgdev); @@ -49,8 +49,8 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work) drm_helper_hpd_irq_event(vgdev->ddev); events_clear |= VIRTIO_GPU_EVENT_DISPLAY; } - virtio_cwrite(vgdev->vdev, struct virtio_gpu_config, - events_clear, &events_clear); + virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, + events_clear, &events_clear); } static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, @@ -165,8 +165,8 @@ int virtio_gpu_init(struct drm_device *dev) } /* get display info */ - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - num_scanouts, &num_scanouts); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_scanouts, &num_scanouts); vgdev->num_scanouts = min_t(uint32_t, num_scanouts, VIRTIO_GPU_MAX_SCANOUTS); if (!vgdev->num_scanouts) { @@ -176,8 +176,8 @@ int virtio_gpu_init(struct drm_device *dev) } DRM_INFO("number of scanouts: %d\n", num_scanouts); - virtio_cread(vgdev->vdev, struct virtio_gpu_config, - num_capsets, &num_capsets); + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_capsets, &num_capsets); DRM_INFO("number of cap sets: %d\n", num_capsets); virtio_gpu_modeset_init(vgdev); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 346cef5ce251..2cdd3cd9ce75 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -141,7 +141,7 @@ static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, struct virtio_gpu_mem_entry **ents, unsigned int *nents) { - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); struct scatterlist *sg; int si, ret; diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 9e663a5d9952..53af60d484a4 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -599,7 +599,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) @@ -1015,7 +1015,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; - bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo); if (use_dma_api) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 4284c4bd444d..e67e2e8f6e6f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -3037,7 +3037,7 @@ static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv, res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx), cmd->body.soid); if (IS_ERR(res)) { - DRM_ERROR("Cound not find streamoutput to bind.\n"); + DRM_ERROR("Could not find streamoutput to bind.\n"); return PTR_ERR(res); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bbce45d142aa..312ed0881a99 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -186,7 +186,7 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, /* TODO handle none page aligned offsets */ /* TODO handle more dst & src != 0 */ /* TODO handle more then one copy */ - DRM_ERROR("Cant snoop dma request for cursor!\n"); + DRM_ERROR("Can't snoop dma request for cursor!\n"); DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", box->srcx, box->srcy, box->srcz, box->x, box->y, box->z, @@ -2575,7 +2575,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, ++i; } - if (i != unit) { + if (&con->head == &dev_priv->dev->mode_config.connector_list) { DRM_ERROR("Could not find initial display unit.\n"); ret = -EINVAL; goto out_unlock; @@ -2599,13 +2599,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, break; } - if (mode->type & DRM_MODE_TYPE_PREFERRED) - *p_mode = mode; - else { + if (&mode->head == &con->modes) { WARN_ONCE(true, "Could not find initial preferred mode.\n"); *p_mode = list_first_entry(&con->modes, struct drm_display_mode, head); + } else { + *p_mode = mode; } out_unlock: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 16dafff5cab1..c4017c7a24db 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -81,7 +81,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) struct vmw_legacy_display_unit *entry; struct drm_framebuffer *fb = NULL; struct drm_crtc *crtc = NULL; - int i = 0; + int i; /* If there is no display topology the host just assumes * that the guest will set the same layout as the host. @@ -92,12 +92,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) crtc = &entry->base.crtc; w = max(w, crtc->x + crtc->mode.hdisplay); h = max(h, crtc->y + crtc->mode.vdisplay); - i++; } if (crtc == NULL) return 0; - fb = entry->base.crtc.primary->state->fb; + fb = crtc->primary->state->fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], fb->format->cpp[0] * 8, @@ -388,8 +387,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) ldu->base.is_implicit = true; /* Initialize primary plane */ - vmw_du_plane_reset(primary); - ret = drm_universal_plane_init(dev, &ldu->base.primary, 0, &vmw_ldu_plane_funcs, vmw_primary_plane_formats, @@ -403,8 +400,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs); /* Initialize cursor plane */ - vmw_du_plane_reset(cursor); - ret = drm_universal_plane_init(dev, &ldu->base.cursor, 0, &vmw_ldu_cursor_funcs, vmw_cursor_plane_formats, @@ -418,7 +413,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs); - vmw_du_connector_reset(connector); ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { @@ -446,7 +440,6 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_encoder; } - vmw_du_crtc_reset(crtc); ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary, &ldu->base.cursor, &vmw_legacy_crtc_funcs, NULL); @@ -521,6 +514,8 @@ int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) dev_priv->active_display_unit = vmw_du_legacy; + drm_mode_config_reset(dev); + DRM_INFO("Legacy Display Unit initialized\n"); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 32a22e4eddb1..4bf0f5ec4fc2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -859,8 +859,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) sou->base.is_implicit = false; /* Initialize primary plane */ - vmw_du_plane_reset(primary); - ret = drm_universal_plane_init(dev, &sou->base.primary, 0, &vmw_sou_plane_funcs, vmw_primary_plane_formats, @@ -875,8 +873,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) drm_plane_enable_fb_damage_clips(primary); /* Initialize cursor plane */ - vmw_du_plane_reset(cursor); - ret = drm_universal_plane_init(dev, &sou->base.cursor, 0, &vmw_sou_cursor_funcs, vmw_cursor_plane_formats, @@ -890,7 +886,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) drm_plane_helper_add(cursor, &vmw_sou_cursor_plane_helper_funcs); - vmw_du_connector_reset(connector); ret = drm_connector_init(dev, connector, &vmw_sou_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { @@ -918,8 +913,6 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_encoder; } - - vmw_du_crtc_reset(crtc); ret = drm_crtc_init_with_planes(dev, crtc, &sou->base.primary, &sou->base.cursor, &vmw_screen_object_crtc_funcs, NULL); @@ -973,6 +966,8 @@ int vmw_kms_sou_init_display(struct vmw_private *dev_priv) dev_priv->active_display_unit = vmw_du_screen_object; + drm_mode_config_reset(dev); + DRM_INFO("Screen Objects Display Unit initialized\n"); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 16b385629688..cf3aafd00837 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1738,8 +1738,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) stdu->base.is_implicit = false; /* Initialize primary plane */ - vmw_du_plane_reset(primary); - ret = drm_universal_plane_init(dev, primary, 0, &vmw_stdu_plane_funcs, vmw_primary_plane_formats, @@ -1754,8 +1752,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) drm_plane_enable_fb_damage_clips(primary); /* Initialize cursor plane */ - vmw_du_plane_reset(cursor); - ret = drm_universal_plane_init(dev, cursor, 0, &vmw_stdu_cursor_funcs, vmw_cursor_plane_formats, @@ -1769,8 +1765,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) drm_plane_helper_add(cursor, &vmw_stdu_cursor_plane_helper_funcs); - vmw_du_connector_reset(connector); - ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); if (ret) { @@ -1798,7 +1792,6 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) goto err_free_encoder; } - vmw_du_crtc_reset(crtc); ret = drm_crtc_init_with_planes(dev, crtc, &stdu->base.primary, &stdu->base.cursor, &vmw_stdu_crtc_funcs, NULL); @@ -1894,6 +1887,8 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) } } + drm_mode_config_reset(dev); + DRM_INFO("Screen Target Display device initialized\n"); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 126f93c0b0b8..3914bfee0533 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1969,7 +1969,7 @@ static int vmw_surface_dirty_alloc(struct vmw_resource *res) num_mip = 1; num_subres = num_layers * num_mip; - dirty_size = sizeof(*dirty) + num_subres * sizeof(dirty->boxes[0]); + dirty_size = struct_size(dirty, boxes, num_subres); acc_size = ttm_round_pot(dirty_size); ret = ttm_mem_global_alloc(vmw_mem_glob(res->dev_priv), acc_size, &ctx); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 3e660fb111b3..013c9e0e412c 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -157,7 +157,8 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u32 width, u32 height, - u32 bpp, u64 size, struct page **pages) + u32 bpp, u64 size, u32 offset, + struct page **pages) { struct xen_drm_front_evtchnl *evtchnl; struct xen_drm_front_dbuf *dbuf; @@ -194,6 +195,7 @@ int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, req->op.dbuf_create.gref_directory = xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf); req->op.dbuf_create.buffer_sz = size; + req->op.dbuf_create.data_ofs = offset; req->op.dbuf_create.dbuf_cookie = dbuf_cookie; req->op.dbuf_create.width = width; req->op.dbuf_create.height = height; @@ -400,15 +402,15 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp, args->size = args->pitch * args->height; obj = xen_drm_front_gem_create(dev, args->size); - if (IS_ERR_OR_NULL(obj)) { - ret = PTR_ERR_OR_ZERO(obj); + if (IS_ERR(obj)) { + ret = PTR_ERR(obj); goto fail; } ret = xen_drm_front_dbuf_create(drm_info->front_info, xen_drm_front_dbuf_to_cookie(obj), args->width, args->height, args->bpp, - args->size, + args->size, 0, xen_drm_front_gem_get_pages(obj)); if (ret) goto fail_backend; diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h index f92c258350ca..54486d89650e 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.h +++ b/drivers/gpu/drm/xen/xen_drm_front.h @@ -145,7 +145,7 @@ int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline, int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u32 width, u32 height, - u32 bpp, u64 size, struct page **pages); + u32 bpp, u64 size, u32 offset, struct page **pages); int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info, u64 dbuf_cookie, u64 fb_cookie, u32 width, diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c index 459702fa990e..44f1f70c0aed 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_conn.c +++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c @@ -33,6 +33,7 @@ static const u32 plane_formats[] = { DRM_FORMAT_ARGB4444, DRM_FORMAT_XRGB1555, DRM_FORMAT_ARGB1555, + DRM_FORMAT_YUYV, }; const u32 *xen_drm_front_conn_get_formats(int *format_count) diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index f0b85e094111..39ff95b75357 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) size = round_up(size, PAGE_SIZE); xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return xen_obj; if (drm_info->front_info->cfg.be_alloc) { @@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) */ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); xen_obj->pages = drm_gem_get_pages(&xen_obj->base); - if (IS_ERR_OR_NULL(xen_obj->pages)) { + if (IS_ERR(xen_obj->pages)) { ret = PTR_ERR(xen_obj->pages); xen_obj->pages = NULL; goto fail; @@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, struct xen_gem_object *xen_obj; xen_obj = gem_create(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); return &xen_obj->base; @@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, size = attach->dmabuf->size; xen_obj = gem_create_obj(dev, size); - if (IS_ERR_OR_NULL(xen_obj)) + if (IS_ERR(xen_obj)) return ERR_CAST(xen_obj); ret = gem_alloc_pages_array(xen_obj, size); @@ -210,7 +210,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, ret = xen_drm_front_dbuf_create(drm_info->front_info, xen_drm_front_dbuf_to_cookie(&xen_obj->base), - 0, 0, 0, size, xen_obj->pages); + 0, 0, 0, size, sgt->sgl->offset, + xen_obj->pages); if (ret < 0) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 78096bbcd226..ef11b1e4de39 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp, int ret; fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); - if (IS_ERR_OR_NULL(fb)) + if (IS_ERR(fb)) return fb; gem_obj = fb->obj[0]; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 821f7a71e182..99158ee67d02 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -44,7 +44,7 @@ MODULE_PARM_DESC(aux_timeout_ms, "DP aux timeout value in msec (default: 50)"); */ static uint zynqmp_dp_power_on_delay_ms = 4; module_param_named(power_on_delay_ms, zynqmp_dp_power_on_delay_ms, uint, 0444); -MODULE_PARM_DESC(aux_timeout_ms, "DP power on delay in msec (default: 4)"); +MODULE_PARM_DESC(power_on_delay_ms, "DP power on delay in msec (default: 4)"); /* Link configuration registers */ #define ZYNQMP_DP_LINK_BW_SET 0x0 @@ -567,34 +567,37 @@ static int zynqmp_dp_mode_configure(struct zynqmp_dp *dp, int pclock, u8 current_bw) { int max_rate = dp->link_config.max_rate; - u8 bws[3] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; + u8 bw_code; u8 max_lanes = dp->link_config.max_lanes; u8 max_link_rate_code = drm_dp_link_rate_to_bw_code(max_rate); u8 bpp = dp->config.bpp; u8 lane_cnt; - s8 i; - if (current_bw == DP_LINK_BW_1_62) { + /* Downshift from current bandwidth */ + switch (current_bw) { + case DP_LINK_BW_5_4: + bw_code = DP_LINK_BW_2_7; + break; + case DP_LINK_BW_2_7: + bw_code = DP_LINK_BW_1_62; + break; + case DP_LINK_BW_1_62: dev_err(dp->dev, "can't downshift. already lowest link rate\n"); return -EINVAL; - } - - for (i = ARRAY_SIZE(bws) - 1; i >= 0; i--) { - if (current_bw && bws[i] >= current_bw) - continue; - - if (bws[i] <= max_link_rate_code) - break; + default: + /* If not given, start with max supported */ + bw_code = max_link_rate_code; + break; } for (lane_cnt = 1; lane_cnt <= max_lanes; lane_cnt <<= 1) { int bw; u32 rate; - bw = drm_dp_bw_code_to_link_rate(bws[i]); + bw = drm_dp_bw_code_to_link_rate(bw_code); rate = zynqmp_dp_max_rate(bw, lane_cnt, bpp); if (pclock <= rate) { - dp->mode.bw_code = bws[i]; + dp->mode.bw_code = bw_code; dp->mode.lane_cnt = lane_cnt; dp->mode.pclock = pclock; return dp->mode.bw_code; @@ -1308,7 +1311,7 @@ zynqmp_dp_connector_detect(struct drm_connector *connector, bool force) ret = drm_dp_dpcd_read(&dp->aux, 0x0, dp->dpcd, sizeof(dp->dpcd)); if (ret < 0) { - dev_dbg(dp->dev, "DPCD read failes"); + dev_dbg(dp->dev, "DPCD read failed"); goto disconnected; } diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c index f2f3ef8af271..5180c5687ee5 100644 --- a/drivers/gpu/vga/vgaarb.c +++ b/drivers/gpu/vga/vgaarb.c @@ -529,7 +529,7 @@ EXPORT_SYMBOL(vga_get); * * 0 on success, negative error code on failure. */ -int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) +static int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { struct vga_device *vgadev; unsigned long flags; @@ -554,7 +554,6 @@ bail: spin_unlock_irqrestore(&vga_lock, flags); return rc; } -EXPORT_SYMBOL(vga_tryget); /** * vga_put - release lock on legacy VGA resources diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 45e87dc59d4e..05315b434276 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -20,7 +20,7 @@ config HID removed from the HID bus by the transport-layer drivers, such as usbhid (USB_HID) and hidp (BT_HIDP). - For docs and specs, see http://www.usb.org/developers/hidpage/ + For docs and specs, see https://www.usb.org/developers/hidpage/ If unsure, say Y. diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index db1b55df0d13..f64517bc33e2 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c @@ -11,7 +11,7 @@ * host communicates with the CP2112 via raw HID reports. * * Data Sheet: - * http://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf + * https://www.silabs.com/Support%20Documents/TechnicalDocs/CP2112.pdf * Programming Interface Specification: * https://www.silabs.com/documents/public/application-notes/an495-cp2112-interface-specification.pdf */ diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 6f370e020feb..6221888aae99 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -724,6 +724,7 @@ #define USB_DEVICE_ID_LENOVO_CUSBKBD 0x6047 #define USB_DEVICE_ID_LENOVO_CBTKBD 0x6048 #define USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL 0x6049 +#define USB_DEVICE_ID_LENOVO_TP10UBKBD 0x6062 #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d @@ -773,6 +774,7 @@ #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a +#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882 #define USB_DEVICE_ID_S510_RECEIVER 0xc50c #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index dea9cc65bf80..b8eabf206e74 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -350,13 +350,13 @@ static int hidinput_query_battery_capacity(struct hid_device *dev) u8 *buf; int ret; - buf = kmalloc(2, GFP_KERNEL); + buf = kmalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 4, dev->battery_report_type, HID_REQ_GET_REPORT); - if (ret != 2) { + if (ret < 2) { kfree(buf); return -ENODATA; } @@ -1560,21 +1560,12 @@ static bool __hidinput_change_resolution_multipliers(struct hid_device *hid, { struct hid_usage *usage; bool update_needed = false; + bool get_report_completed = false; int i, j; if (report->maxfield == 0) return false; - /* - * If we have more than one feature within this report we - * need to fill in the bits from the others before we can - * overwrite the ones for the Resolution Multiplier. - */ - if (report->maxfield > 1) { - hid_hw_request(hid, report, HID_REQ_GET_REPORT); - hid_hw_wait(hid); - } - for (i = 0; i < report->maxfield; i++) { __s32 value = use_logical_max ? report->field[i]->logical_maximum : @@ -1593,6 +1584,25 @@ static bool __hidinput_change_resolution_multipliers(struct hid_device *hid, if (usage->hid != HID_GD_RESOLUTION_MULTIPLIER) continue; + /* + * If we have more than one feature within this + * report we need to fill in the bits from the + * others before we can overwrite the ones for the + * Resolution Multiplier. + * + * But if we're not allowed to read from the device, + * we just bail. Such a device should not exist + * anyway. + */ + if (!get_report_completed && report->maxfield > 1) { + if (hid->quirks & HID_QUIRK_NO_INIT_REPORTS) + return update_needed; + + hid_hw_request(hid, report, HID_REQ_GET_REPORT); + hid_hw_wait(hid); + get_report_completed = true; + } + report->field[i]->value[j] = value; update_needed = true; } diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 96fa2a2c2cd3..c6c8e20f3e8d 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -29,29 +29,67 @@ #include <linux/hid.h> #include <linux/input.h> #include <linux/leds.h> +#include <linux/workqueue.h> #include "hid-ids.h" -struct lenovo_drvdata_tpkbd { +struct lenovo_drvdata { + u8 led_report[3]; /* Must be first for proper alignment */ int led_state; + struct mutex led_report_mutex; struct led_classdev led_mute; struct led_classdev led_micmute; + struct work_struct fn_lock_sync_work; + struct hid_device *hdev; int press_to_select; int dragging; int release_to_select; int select_right; int sensitivity; int press_speed; -}; - -struct lenovo_drvdata_cptkbd { u8 middlebutton_state; /* 0:Up, 1:Down (undecided), 2:Scrolling */ bool fn_lock; - int sensitivity; }; #define map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, EV_KEY, (c)) +#define TP10UBKBD_LED_OUTPUT_REPORT 9 + +#define TP10UBKBD_FN_LOCK_LED 0x54 +#define TP10UBKBD_MUTE_LED 0x64 +#define TP10UBKBD_MICMUTE_LED 0x74 + +#define TP10UBKBD_LED_OFF 1 +#define TP10UBKBD_LED_ON 2 + +static void lenovo_led_set_tp10ubkbd(struct hid_device *hdev, u8 led_code, + enum led_brightness value) +{ + struct lenovo_drvdata *data = hid_get_drvdata(hdev); + int ret; + + mutex_lock(&data->led_report_mutex); + + data->led_report[0] = TP10UBKBD_LED_OUTPUT_REPORT; + data->led_report[1] = led_code; + data->led_report[2] = value ? TP10UBKBD_LED_ON : TP10UBKBD_LED_OFF; + ret = hid_hw_raw_request(hdev, data->led_report[0], data->led_report, 3, + HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); + if (ret) + hid_err(hdev, "Set LED output report error: %d\n", ret); + + mutex_unlock(&data->led_report_mutex); +} + +static void lenovo_tp10ubkbd_sync_fn_lock(struct work_struct *work) +{ + struct lenovo_drvdata *data = + container_of(work, struct lenovo_drvdata, fn_lock_sync_work); + + lenovo_led_set_tp10ubkbd(data->hdev, TP10UBKBD_FN_LOCK_LED, + data->fn_lock); +} + static const __u8 lenovo_pro_dock_need_fixup_collection[] = { 0x05, 0x88, /* Usage Page (Vendor Usage Page 0x88) */ 0x09, 0x01, /* Usage (Vendor Usage 0x01) */ @@ -179,6 +217,44 @@ static int lenovo_input_mapping_scrollpoint(struct hid_device *hdev, return 0; } +static int lenovo_input_mapping_tp10_ultrabook_kbd(struct hid_device *hdev, + struct hid_input *hi, struct hid_field *field, + struct hid_usage *usage, unsigned long **bit, int *max) +{ + /* + * The ThinkPad 10 Ultrabook Keyboard uses 0x000c0001 usage for + * a bunch of keys which have no standard consumer page code. + */ + if (usage->hid == 0x000c0001) { + switch (usage->usage_index) { + case 8: /* Fn-Esc: Fn-lock toggle */ + map_key_clear(KEY_FN_ESC); + return 1; + case 9: /* Fn-F4: Mic mute */ + map_key_clear(KEY_MICMUTE); + return 1; + case 10: /* Fn-F7: Control panel */ + map_key_clear(KEY_CONFIG); + return 1; + case 11: /* Fn-F8: Search (magnifier glass) */ + map_key_clear(KEY_SEARCH); + return 1; + case 12: /* Fn-F10: Open My computer (6 boxes) */ + map_key_clear(KEY_FILE); + return 1; + } + } + + /* + * The Ultrabook Keyboard sends a spurious F23 key-press when resuming + * from suspend and it does not actually have a F23 key, ignore it. + */ + if (usage->hid == 0x00070072) + return -1; + + return 0; +} + static int lenovo_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) @@ -199,6 +275,9 @@ static int lenovo_input_mapping(struct hid_device *hdev, case USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL: return lenovo_input_mapping_scrollpoint(hdev, hi, field, usage, bit, max); + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field, + usage, bit, max); default: return 0; } @@ -242,7 +321,7 @@ static int lenovo_send_cmd_cptkbd(struct hid_device *hdev, static void lenovo_features_set_cptkbd(struct hid_device *hdev) { int ret; - struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); ret = lenovo_send_cmd_cptkbd(hdev, 0x05, cptkbd_data->fn_lock); if (ret) @@ -253,23 +332,23 @@ static void lenovo_features_set_cptkbd(struct hid_device *hdev) hid_err(hdev, "Sensitivity setting failed: %d\n", ret); } -static ssize_t attr_fn_lock_show_cptkbd(struct device *dev, +static ssize_t attr_fn_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev); + struct lenovo_drvdata *data = hid_get_drvdata(hdev); - return snprintf(buf, PAGE_SIZE, "%u\n", cptkbd_data->fn_lock); + return snprintf(buf, PAGE_SIZE, "%u\n", data->fn_lock); } -static ssize_t attr_fn_lock_store_cptkbd(struct device *dev, +static ssize_t attr_fn_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev); + struct lenovo_drvdata *data = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) @@ -277,8 +356,17 @@ static ssize_t attr_fn_lock_store_cptkbd(struct device *dev, if (value < 0 || value > 1) return -EINVAL; - cptkbd_data->fn_lock = !!value; - lenovo_features_set_cptkbd(hdev); + data->fn_lock = !!value; + + switch (hdev->product) { + case USB_DEVICE_ID_LENOVO_CUSBKBD: + case USB_DEVICE_ID_LENOVO_CBTKBD: + lenovo_features_set_cptkbd(hdev); + break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value); + break; + } return count; } @@ -288,7 +376,7 @@ static ssize_t attr_sensitivity_show_cptkbd(struct device *dev, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", cptkbd_data->sensitivity); @@ -300,7 +388,7 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) @@ -313,10 +401,10 @@ static ssize_t attr_sensitivity_store_cptkbd(struct device *dev, } -static struct device_attribute dev_attr_fn_lock_cptkbd = +static struct device_attribute dev_attr_fn_lock = __ATTR(fn_lock, S_IWUSR | S_IRUGO, - attr_fn_lock_show_cptkbd, - attr_fn_lock_store_cptkbd); + attr_fn_lock_show, + attr_fn_lock_store); static struct device_attribute dev_attr_sensitivity_cptkbd = __ATTR(sensitivity, S_IWUSR | S_IRUGO, @@ -325,7 +413,7 @@ static struct device_attribute dev_attr_sensitivity_cptkbd = static struct attribute *lenovo_attributes_cptkbd[] = { - &dev_attr_fn_lock_cptkbd.attr, + &dev_attr_fn_lock.attr, &dev_attr_sensitivity_cptkbd.attr, NULL }; @@ -354,10 +442,28 @@ static int lenovo_raw_event(struct hid_device *hdev, return 0; } +static int lenovo_event_tp10ubkbd(struct hid_device *hdev, + struct hid_field *field, struct hid_usage *usage, __s32 value) +{ + struct lenovo_drvdata *data = hid_get_drvdata(hdev); + + if (usage->type == EV_KEY && usage->code == KEY_FN_ESC && value == 1) { + /* + * The user has toggled the Fn-lock state. Toggle our own + * cached value of it and sync our value to the keyboard to + * ensure things are in sync (the sycning should be a no-op). + */ + data->fn_lock = !data->fn_lock; + schedule_work(&data->fn_lock_sync_work); + } + + return 0; +} + static int lenovo_event_cptkbd(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { - struct lenovo_drvdata_cptkbd *cptkbd_data = hid_get_drvdata(hdev); + struct lenovo_drvdata *cptkbd_data = hid_get_drvdata(hdev); /* "wheel" scroll events */ if (usage->type == EV_REL && (usage->code == REL_WHEEL || @@ -396,6 +502,8 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field, case USB_DEVICE_ID_LENOVO_CUSBKBD: case USB_DEVICE_ID_LENOVO_CBTKBD: return lenovo_event_cptkbd(hdev, field, usage, value); + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + return lenovo_event_tp10ubkbd(hdev, field, usage, value); default: return 0; } @@ -404,7 +512,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field, static int lenovo_features_set_tpkbd(struct hid_device *hdev) { struct hid_report *report; - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4]; @@ -425,7 +533,7 @@ static ssize_t attr_press_to_select_show_tpkbd(struct device *dev, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select); } @@ -436,7 +544,7 @@ static ssize_t attr_press_to_select_store_tpkbd(struct device *dev, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) @@ -455,7 +563,7 @@ static ssize_t attr_dragging_show_tpkbd(struct device *dev, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging); } @@ -466,7 +574,7 @@ static ssize_t attr_dragging_store_tpkbd(struct device *dev, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) @@ -485,7 +593,7 @@ static ssize_t attr_release_to_select_show_tpkbd(struct device *dev, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select); } @@ -496,7 +604,7 @@ static ssize_t attr_release_to_select_store_tpkbd(struct device *dev, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) @@ -515,7 +623,7 @@ static ssize_t attr_select_right_show_tpkbd(struct device *dev, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right); } @@ -526,7 +634,7 @@ static ssize_t attr_select_right_store_tpkbd(struct device *dev, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value)) @@ -545,7 +653,7 @@ static ssize_t attr_sensitivity_show_tpkbd(struct device *dev, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->sensitivity); @@ -557,7 +665,7 @@ static ssize_t attr_sensitivity_store_tpkbd(struct device *dev, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) @@ -574,7 +682,7 @@ static ssize_t attr_press_speed_show_tpkbd(struct device *dev, char *buf) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_speed); @@ -586,7 +694,7 @@ static ssize_t attr_press_speed_store_tpkbd(struct device *dev, size_t count) { struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int value; if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) @@ -642,12 +750,23 @@ static const struct attribute_group lenovo_attr_group_tpkbd = { .attrs = lenovo_attributes_tpkbd, }; -static enum led_brightness lenovo_led_brightness_get_tpkbd( +static void lenovo_led_set_tpkbd(struct hid_device *hdev) +{ + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); + struct hid_report *report; + + report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3]; + report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1; + report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1; + hid_hw_request(hdev, report, HID_REQ_SET_REPORT); +} + +static enum led_brightness lenovo_led_brightness_get( struct led_classdev *led_cdev) { struct device *dev = led_cdev->dev->parent; struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); int led_nr = 0; if (led_cdev == &data_pointer->led_micmute) @@ -658,13 +777,13 @@ static enum led_brightness lenovo_led_brightness_get_tpkbd( : LED_OFF; } -static void lenovo_led_brightness_set_tpkbd(struct led_classdev *led_cdev, +static void lenovo_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { struct device *dev = led_cdev->dev->parent; struct hid_device *hdev = to_hid_device(dev); - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); - struct hid_report *report; + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); + u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; int led_nr = 0; if (led_cdev == &data_pointer->led_micmute) @@ -675,21 +794,58 @@ static void lenovo_led_brightness_set_tpkbd(struct led_classdev *led_cdev, else data_pointer->led_state |= 1 << led_nr; - report = hdev->report_enum[HID_OUTPUT_REPORT].report_id_hash[3]; - report->field[0]->value[0] = (data_pointer->led_state >> 0) & 1; - report->field[0]->value[1] = (data_pointer->led_state >> 1) & 1; - hid_hw_request(hdev, report, HID_REQ_SET_REPORT); + switch (hdev->product) { + case USB_DEVICE_ID_LENOVO_TPKBD: + lenovo_led_set_tpkbd(hdev); + break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value); + break; + } } -static int lenovo_probe_tpkbd(struct hid_device *hdev) +static int lenovo_register_leds(struct hid_device *hdev) { - struct device *dev = &hdev->dev; - struct lenovo_drvdata_tpkbd *data_pointer; - size_t name_sz = strlen(dev_name(dev)) + 16; - char *name_mute, *name_micmute; - int i; + struct lenovo_drvdata *data = hid_get_drvdata(hdev); + size_t name_sz = strlen(dev_name(&hdev->dev)) + 16; + char *name_mute, *name_micm; int ret; + name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); + name_micm = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); + if (name_mute == NULL || name_micm == NULL) { + hid_err(hdev, "Could not allocate memory for led data\n"); + return -ENOMEM; + } + snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(&hdev->dev)); + snprintf(name_micm, name_sz, "%s:amber:micmute", dev_name(&hdev->dev)); + + data->led_mute.name = name_mute; + data->led_mute.brightness_get = lenovo_led_brightness_get; + data->led_mute.brightness_set = lenovo_led_brightness_set; + data->led_mute.dev = &hdev->dev; + ret = led_classdev_register(&hdev->dev, &data->led_mute); + if (ret < 0) + return ret; + + data->led_micmute.name = name_micm; + data->led_micmute.brightness_get = lenovo_led_brightness_get; + data->led_micmute.brightness_set = lenovo_led_brightness_set; + data->led_micmute.dev = &hdev->dev; + ret = led_classdev_register(&hdev->dev, &data->led_micmute); + if (ret < 0) { + led_classdev_unregister(&data->led_mute); + return ret; + } + + return 0; +} + +static int lenovo_probe_tpkbd(struct hid_device *hdev) +{ + struct lenovo_drvdata *data_pointer; + int i, ret; + /* * Only register extra settings against subdevice where input_mapping * set drvdata to 1, i.e. the trackpoint. @@ -712,7 +868,7 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) hid_warn(hdev, "Could not create sysfs group: %d\n", ret); data_pointer = devm_kzalloc(&hdev->dev, - sizeof(struct lenovo_drvdata_tpkbd), + sizeof(struct lenovo_drvdata), GFP_KERNEL); if (data_pointer == NULL) { hid_err(hdev, "Could not allocate memory for driver data\n"); @@ -724,37 +880,11 @@ static int lenovo_probe_tpkbd(struct hid_device *hdev) data_pointer->sensitivity = 0xa0; data_pointer->press_speed = 0x38; - name_mute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); - name_micmute = devm_kzalloc(&hdev->dev, name_sz, GFP_KERNEL); - if (name_mute == NULL || name_micmute == NULL) { - hid_err(hdev, "Could not allocate memory for led data\n"); - ret = -ENOMEM; - goto err; - } - snprintf(name_mute, name_sz, "%s:amber:mute", dev_name(dev)); - snprintf(name_micmute, name_sz, "%s:amber:micmute", dev_name(dev)); - hid_set_drvdata(hdev, data_pointer); - data_pointer->led_mute.name = name_mute; - data_pointer->led_mute.brightness_get = lenovo_led_brightness_get_tpkbd; - data_pointer->led_mute.brightness_set = lenovo_led_brightness_set_tpkbd; - data_pointer->led_mute.dev = dev; - ret = led_classdev_register(dev, &data_pointer->led_mute); - if (ret < 0) - goto err; - - data_pointer->led_micmute.name = name_micmute; - data_pointer->led_micmute.brightness_get = - lenovo_led_brightness_get_tpkbd; - data_pointer->led_micmute.brightness_set = - lenovo_led_brightness_set_tpkbd; - data_pointer->led_micmute.dev = dev; - ret = led_classdev_register(dev, &data_pointer->led_micmute); - if (ret < 0) { - led_classdev_unregister(&data_pointer->led_mute); + ret = lenovo_register_leds(hdev); + if (ret) goto err; - } lenovo_features_set_tpkbd(hdev); @@ -767,7 +897,7 @@ err: static int lenovo_probe_cptkbd(struct hid_device *hdev) { int ret; - struct lenovo_drvdata_cptkbd *cptkbd_data; + struct lenovo_drvdata *cptkbd_data; /* All the custom action happens on the USBMOUSE device for USB */ if (hdev->product == USB_DEVICE_ID_LENOVO_CUSBKBD @@ -811,6 +941,57 @@ static int lenovo_probe_cptkbd(struct hid_device *hdev) return 0; } +static struct attribute *lenovo_attributes_tp10ubkbd[] = { + &dev_attr_fn_lock.attr, + NULL +}; + +static const struct attribute_group lenovo_attr_group_tp10ubkbd = { + .attrs = lenovo_attributes_tp10ubkbd, +}; + +static int lenovo_probe_tp10ubkbd(struct hid_device *hdev) +{ + struct lenovo_drvdata *data; + int ret; + + /* All the custom action happens on the USBMOUSE device for USB */ + if (hdev->type != HID_TYPE_USBMOUSE) + return 0; + + data = devm_kzalloc(&hdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_init(&data->led_report_mutex); + INIT_WORK(&data->fn_lock_sync_work, lenovo_tp10ubkbd_sync_fn_lock); + data->hdev = hdev; + + hid_set_drvdata(hdev, data); + + /* + * The Thinkpad 10 ultrabook USB kbd dock's Fn-lock defaults to on. + * We cannot read the state, only set it, so we force it to on here + * (which should be a no-op) to make sure that our state matches the + * keyboard's FN-lock state. This is the same as what Windows does. + */ + data->fn_lock = true; + lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, data->fn_lock); + + ret = sysfs_create_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd); + if (ret) + return ret; + + ret = lenovo_register_leds(hdev); + if (ret) + goto err; + + return 0; +err: + sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd); + return ret; +} + static int lenovo_probe(struct hid_device *hdev, const struct hid_device_id *id) { @@ -836,6 +1017,9 @@ static int lenovo_probe(struct hid_device *hdev, case USB_DEVICE_ID_LENOVO_CBTKBD: ret = lenovo_probe_cptkbd(hdev); break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + ret = lenovo_probe_tp10ubkbd(hdev); + break; default: ret = 0; break; @@ -852,7 +1036,7 @@ err: static void lenovo_remove_tpkbd(struct hid_device *hdev) { - struct lenovo_drvdata_tpkbd *data_pointer = hid_get_drvdata(hdev); + struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); /* * Only the trackpoint half of the keyboard has drvdata and stuff that @@ -874,6 +1058,20 @@ static void lenovo_remove_cptkbd(struct hid_device *hdev) &lenovo_attr_group_cptkbd); } +static void lenovo_remove_tp10ubkbd(struct hid_device *hdev) +{ + struct lenovo_drvdata *data = hid_get_drvdata(hdev); + + if (data == NULL) + return; + + led_classdev_unregister(&data->led_micmute); + led_classdev_unregister(&data->led_mute); + + sysfs_remove_group(&hdev->dev.kobj, &lenovo_attr_group_tp10ubkbd); + cancel_work_sync(&data->fn_lock_sync_work); +} + static void lenovo_remove(struct hid_device *hdev) { switch (hdev->product) { @@ -884,6 +1082,9 @@ static void lenovo_remove(struct hid_device *hdev) case USB_DEVICE_ID_LENOVO_CBTKBD: lenovo_remove_cptkbd(hdev); break; + case USB_DEVICE_ID_LENOVO_TP10UBKBD: + lenovo_remove_tp10ubkbd(hdev); + break; } hid_hw_stop(hdev); @@ -920,6 +1121,7 @@ static const struct hid_device_id lenovo_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL) }, { HID_USB_DEVICE(USB_VENDOR_ID_IBM, USB_DEVICE_ID_IBM_SCROLLPOINT_800DPI_OPTICAL_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_SCROLLPOINT_OPTICAL) }, + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TP10UBKBD) }, { } }; diff --git a/drivers/hid/hid-mcp2221.c b/drivers/hid/hid-mcp2221.c index e1b93ce32e01..0d27ccb55dd9 100644 --- a/drivers/hid/hid-mcp2221.c +++ b/drivers/hid/hid-mcp2221.c @@ -4,7 +4,7 @@ * * Copyright (c) 2020, Rishi Gupta <gupt21@gmail.com> * - * Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/20005565B.pdf + * Datasheet: https://ww1.microchip.com/downloads/en/DeviceDoc/20005565B.pdf */ #include <linux/module.h> diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 934fc0a798d4..c242150d35a3 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -179,6 +179,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT }, + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET }, { 0 } }; diff --git a/drivers/hid/hid-udraw-ps3.c b/drivers/hid/hid-udraw-ps3.c index b0fbd11aa0fc..b2e17ef2ea27 100644 --- a/drivers/hid/hid-udraw-ps3.c +++ b/drivers/hid/hid-udraw-ps3.c @@ -16,7 +16,7 @@ MODULE_LICENSE("GPL"); /* * Protocol information from: - * http://brandonw.net/udraw/ + * https://brandonw.net/udraw/ * and the source code of: * https://vvvv.org/contribution/udraw-hid */ diff --git a/drivers/hid/hid-wiimote-core.c b/drivers/hid/hid-wiimote-core.c index 92874dbe4d4a..679e142fc850 100644 --- a/drivers/hid/hid-wiimote-core.c +++ b/drivers/hid/hid-wiimote-core.c @@ -1870,6 +1870,11 @@ static const struct hid_device_id wiimote_hid_devices[] = { USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, { } }; + +bool wiimote_dpad_as_analog = false; +module_param_named(dpad_as_analog, wiimote_dpad_as_analog, bool, 0644); +MODULE_PARM_DESC(dpad_as_analog, "Use D-Pad as main analog input"); + MODULE_DEVICE_TABLE(hid, wiimote_hid_devices); static struct hid_driver wiimote_hid_driver = { diff --git a/drivers/hid/hid-wiimote-modules.c b/drivers/hid/hid-wiimote-modules.c index 2c3925357857..213c58bf2495 100644 --- a/drivers/hid/hid-wiimote-modules.c +++ b/drivers/hid/hid-wiimote-modules.c @@ -1088,12 +1088,28 @@ static void wiimod_classic_in_ext(struct wiimote_data *wdata, const __u8 *ext) * is the same as before. */ + static const s8 digital_to_analog[3] = {0x20, 0, -0x20}; + if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { - lx = ext[0] & 0x3e; - ly = ext[1] & 0x3e; + if (wiimote_dpad_as_analog) { + lx = digital_to_analog[1 - !(ext[4] & 0x80) + + !(ext[1] & 0x01)]; + ly = digital_to_analog[1 - !(ext[4] & 0x40) + + !(ext[0] & 0x01)]; + } else { + lx = (ext[0] & 0x3e) - 0x20; + ly = (ext[1] & 0x3e) - 0x20; + } } else { - lx = ext[0] & 0x3f; - ly = ext[1] & 0x3f; + if (wiimote_dpad_as_analog) { + lx = digital_to_analog[1 - !(ext[4] & 0x80) + + !(ext[5] & 0x02)]; + ly = digital_to_analog[1 - !(ext[4] & 0x40) + + !(ext[5] & 0x01)]; + } else { + lx = (ext[0] & 0x3f) - 0x20; + ly = (ext[1] & 0x3f) - 0x20; + } } rx = (ext[0] >> 3) & 0x18; @@ -1110,20 +1126,14 @@ static void wiimod_classic_in_ext(struct wiimote_data *wdata, const __u8 *ext) rt <<= 1; lt <<= 1; - input_report_abs(wdata->extension.input, ABS_HAT1X, lx - 0x20); - input_report_abs(wdata->extension.input, ABS_HAT1Y, ly - 0x20); + input_report_abs(wdata->extension.input, ABS_HAT1X, lx); + input_report_abs(wdata->extension.input, ABS_HAT1Y, ly); input_report_abs(wdata->extension.input, ABS_HAT2X, rx - 0x20); input_report_abs(wdata->extension.input, ABS_HAT2Y, ry - 0x20); input_report_abs(wdata->extension.input, ABS_HAT3X, rt); input_report_abs(wdata->extension.input, ABS_HAT3Y, lt); input_report_key(wdata->extension.input, - wiimod_classic_map[WIIMOD_CLASSIC_KEY_RIGHT], - !(ext[4] & 0x80)); - input_report_key(wdata->extension.input, - wiimod_classic_map[WIIMOD_CLASSIC_KEY_DOWN], - !(ext[4] & 0x40)); - input_report_key(wdata->extension.input, wiimod_classic_map[WIIMOD_CLASSIC_KEY_LT], !(ext[4] & 0x20)); input_report_key(wdata->extension.input, @@ -1157,20 +1167,29 @@ static void wiimod_classic_in_ext(struct wiimote_data *wdata, const __u8 *ext) wiimod_classic_map[WIIMOD_CLASSIC_KEY_ZR], !(ext[5] & 0x04)); - if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { - input_report_key(wdata->extension.input, - wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT], - !(ext[1] & 0x01)); - input_report_key(wdata->extension.input, - wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP], - !(ext[0] & 0x01)); - } else { + if (!wiimote_dpad_as_analog) { input_report_key(wdata->extension.input, - wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT], - !(ext[5] & 0x02)); + wiimod_classic_map[WIIMOD_CLASSIC_KEY_RIGHT], + !(ext[4] & 0x80)); input_report_key(wdata->extension.input, - wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP], - !(ext[5] & 0x01)); + wiimod_classic_map[WIIMOD_CLASSIC_KEY_DOWN], + !(ext[4] & 0x40)); + + if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { + input_report_key(wdata->extension.input, + wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT], + !(ext[1] & 0x01)); + input_report_key(wdata->extension.input, + wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP], + !(ext[0] & 0x01)); + } else { + input_report_key(wdata->extension.input, + wiimod_classic_map[WIIMOD_CLASSIC_KEY_LEFT], + !(ext[5] & 0x02)); + input_report_key(wdata->extension.input, + wiimod_classic_map[WIIMOD_CLASSIC_KEY_UP], + !(ext[5] & 0x01)); + } } input_sync(wdata->extension.input); diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h index b2a26a0a8f12..ad4ff837f43e 100644 --- a/drivers/hid/hid-wiimote.h +++ b/drivers/hid/hid-wiimote.h @@ -162,6 +162,8 @@ struct wiimote_data { struct work_struct init_worker; }; +extern bool wiimote_dpad_as_analog; + /* wiimote modules */ enum wiimod_module { diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c index f491d8b4e24c..c6d48a8648b7 100644 --- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c +++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c @@ -106,6 +106,11 @@ static inline bool ish_should_enter_d0i3(struct pci_dev *pdev) return !pm_suspend_via_firmware() || pdev->device == CHV_DEVICE_ID; } +static inline bool ish_should_leave_d0i3(struct pci_dev *pdev) +{ + return !pm_resume_via_firmware() || pdev->device == CHV_DEVICE_ID; +} + /** * ish_probe() - PCI driver probe callback * @pdev: pci device @@ -215,9 +220,7 @@ static void __maybe_unused ish_resume_handler(struct work_struct *work) struct ishtp_device *dev = pci_get_drvdata(pdev); int ret; - /* Check the NO_D3 flag to distinguish the resume paths */ - if (pdev->dev_flags & PCI_DEV_FLAGS_NO_D3) { - pdev->dev_flags &= ~PCI_DEV_FLAGS_NO_D3; + if (ish_should_leave_d0i3(pdev) && !dev->suspend_flag) { disable_irq_wake(pdev->irq); ishtp_send_resume(dev); @@ -281,8 +284,11 @@ static int __maybe_unused ish_suspend(struct device *device) */ ish_disable_dma(dev); } else { - /* Set the NO_D3 flag, the ISH would enter D0i3 */ - pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + /* + * Save state so PCI core will keep the device at D0, + * the ISH would enter D0i3 + */ + pci_save_state(pdev); enable_irq_wake(pdev->irq); } diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 17a638f15082..492dd641a25d 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c @@ -26,6 +26,7 @@ #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/string.h> +#include <linux/timekeeping.h> #include <linux/usb.h> @@ -95,6 +96,18 @@ static int hid_start_in(struct hid_device *hid) set_bit(HID_NO_BANDWIDTH, &usbhid->iofl); } else { clear_bit(HID_NO_BANDWIDTH, &usbhid->iofl); + + if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { + /* + * In case events are generated while nobody was + * listening, some are released when the device + * is re-opened. Wait 50 msec for the queue to + * empty before allowing events to go through + * hid. + */ + usbhid->input_start_time = + ktime_add_ms(ktime_get_coarse(), 50); + } } } spin_unlock_irqrestore(&usbhid->lock, flags); @@ -280,20 +293,23 @@ static void hid_irq_in(struct urb *urb) if (!test_bit(HID_OPENED, &usbhid->iofl)) break; usbhid_mark_busy(usbhid); - if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { - hid_input_report(urb->context, HID_INPUT_REPORT, - urb->transfer_buffer, - urb->actual_length, 1); - /* - * autosuspend refused while keys are pressed - * because most keyboards don't wake up when - * a key is released - */ - if (hid_check_keys_pressed(hid)) - set_bit(HID_KEYS_PRESSED, &usbhid->iofl); - else - clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); + if (test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) { + if (ktime_before(ktime_get_coarse(), + usbhid->input_start_time)) + break; + clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); } + hid_input_report(urb->context, HID_INPUT_REPORT, + urb->transfer_buffer, urb->actual_length, 1); + /* + * autosuspend refused while keys are pressed + * because most keyboards don't wake up when + * a key is released + */ + if (hid_check_keys_pressed(hid)) + set_bit(HID_KEYS_PRESSED, &usbhid->iofl); + else + clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); break; case -EPIPE: /* stall */ usbhid_mark_busy(usbhid); @@ -720,17 +736,6 @@ static int usbhid_open(struct hid_device *hid) usb_autopm_put_interface(usbhid->intf); - /* - * In case events are generated while nobody was listening, - * some are released when the device is re-opened. - * Wait 50 msec for the queue to empty before allowing events - * to go through hid. - */ - if (res == 0) - msleep(50); - - clear_bit(HID_RESUME_RUNNING, &usbhid->iofl); - Done: mutex_unlock(&usbhid->mutex); return res; @@ -1667,7 +1672,7 @@ struct usb_interface *usbhid_find_interface(int minor) static int __init hid_init(void) { - int retval = -ENOMEM; + int retval; retval = hid_quirks_init(quirks_param, BUS_USB, MAX_USBHID_BOOT_QUIRKS); if (retval) diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h index 75fe85d3d27a..c6ad684d099a 100644 --- a/drivers/hid/usbhid/usbhid.h +++ b/drivers/hid/usbhid/usbhid.h @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/slab.h> +#include <linux/ktime.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/timer.h> @@ -83,6 +84,7 @@ struct usbhid_device { struct mutex mutex; /* start/stop/open/close */ spinlock_t lock; /* fifo spinlock */ unsigned long iofl; /* I/O flags (CTRL_RUNNING, OUT_RUNNING) */ + ktime_t input_start_time; /* When to start handling input */ struct timer_list io_retry; /* Retry timer */ unsigned long stop_retry; /* Time to give up, in jiffies */ unsigned int retry_delay; /* Delay length in ms */ diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index b50081cacf04..910b6e90866c 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -86,6 +86,10 @@ static int hyperv_die_event(struct notifier_block *nb, unsigned long val, struct die_args *die = (struct die_args *)args; struct pt_regs *regs = die->regs; + /* Don't notify Hyper-V if the die event is other than oops */ + if (val != DIE_OOPS) + return NOTIFY_DONE; + /* * Hyper-V should be notified only once about a panic. If we will be * doing hyperv_report_panic_msg() later with kmsg data, don't do diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 30b7b3ea8836..17bb64299bfd 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -447,7 +447,7 @@ static int pwm_fan_resume(struct device *dev) return 0; pwm_get_args(ctx->pwm, &pargs); - duty = DIV_ROUND_UP(ctx->pwm_value * (pargs.period - 1), MAX_PWM); + duty = DIV_ROUND_UP_ULL(ctx->pwm_value * (pargs.period - 1), MAX_PWM); ret = pwm_config(ctx->pwm, duty, pargs.period); if (ret) return ret; diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig index 826a1054100d..32cd26352f38 100644 --- a/drivers/hwspinlock/Kconfig +++ b/drivers/hwspinlock/Kconfig @@ -6,9 +6,10 @@ menuconfig HWSPINLOCK bool "Hardware Spinlock drivers" +if HWSPINLOCK + config HWSPINLOCK_OMAP tristate "OMAP Hardware Spinlock device" - depends on HWSPINLOCK depends on ARCH_OMAP4 || SOC_OMAP5 || SOC_DRA7XX || SOC_AM33XX || SOC_AM43XX || ARCH_K3 || COMPILE_TEST help Say y here to support the OMAP Hardware Spinlock device (firstly @@ -18,7 +19,6 @@ config HWSPINLOCK_OMAP config HWSPINLOCK_QCOM tristate "Qualcomm Hardware Spinlock device" - depends on HWSPINLOCK depends on ARCH_QCOM || COMPILE_TEST select MFD_SYSCON help @@ -30,7 +30,6 @@ config HWSPINLOCK_QCOM config HWSPINLOCK_SIRF tristate "SIRF Hardware Spinlock device" - depends on HWSPINLOCK depends on ARCH_SIRF || COMPILE_TEST help Say y here to support the SIRF Hardware Spinlock device, which @@ -43,7 +42,6 @@ config HWSPINLOCK_SIRF config HWSPINLOCK_SPRD tristate "SPRD Hardware Spinlock device" depends on ARCH_SPRD || COMPILE_TEST - depends on HWSPINLOCK help Say y here to support the SPRD Hardware Spinlock device. @@ -52,7 +50,6 @@ config HWSPINLOCK_SPRD config HWSPINLOCK_STM32 tristate "STM32 Hardware Spinlock device" depends on MACH_STM32MP157 || COMPILE_TEST - depends on HWSPINLOCK help Say y here to support the STM32 Hardware Spinlock device. @@ -60,7 +57,6 @@ config HWSPINLOCK_STM32 config HSEM_U8500 tristate "STE Hardware Semaphore functionality" - depends on HWSPINLOCK depends on ARCH_U8500 || COMPILE_TEST help Say y here to support the STE Hardware Semaphore functionality, which @@ -68,3 +64,5 @@ config HSEM_U8500 SoC. If unsure, say N. + +endif # HWSPINLOCK diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c index f0da544b14d2..364710966665 100644 --- a/drivers/hwspinlock/qcom_hwspinlock.c +++ b/drivers/hwspinlock/qcom_hwspinlock.c @@ -70,41 +70,79 @@ static const struct of_device_id qcom_hwspinlock_of_match[] = { }; MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match); -static int qcom_hwspinlock_probe(struct platform_device *pdev) +static struct regmap *qcom_hwspinlock_probe_syscon(struct platform_device *pdev, + u32 *base, u32 *stride) { - struct hwspinlock_device *bank; struct device_node *syscon; - struct reg_field field; struct regmap *regmap; - size_t array_size; - u32 stride; - u32 base; int ret; - int i; syscon = of_parse_phandle(pdev->dev.of_node, "syscon", 0); - if (!syscon) { - dev_err(&pdev->dev, "no syscon property\n"); - return -ENODEV; - } + if (!syscon) + return ERR_PTR(-ENODEV); regmap = syscon_node_to_regmap(syscon); of_node_put(syscon); if (IS_ERR(regmap)) - return PTR_ERR(regmap); + return regmap; - ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, &base); + ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 1, base); if (ret < 0) { dev_err(&pdev->dev, "no offset in syscon\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } - ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, &stride); + ret = of_property_read_u32_index(pdev->dev.of_node, "syscon", 2, stride); if (ret < 0) { dev_err(&pdev->dev, "no stride syscon\n"); - return -EINVAL; + return ERR_PTR(-EINVAL); } + return regmap; +} + +static const struct regmap_config tcsr_mutex_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x40000, + .fast_io = true, +}; + +static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev, + u32 *offset, u32 *stride) +{ + struct device *dev = &pdev->dev; + void __iomem *base; + + /* All modern platform has offset 0 and stride of 4k */ + *offset = 0; + *stride = 0x1000; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return ERR_CAST(base); + + return devm_regmap_init_mmio(dev, base, &tcsr_mutex_config); +} + +static int qcom_hwspinlock_probe(struct platform_device *pdev) +{ + struct hwspinlock_device *bank; + struct reg_field field; + struct regmap *regmap; + size_t array_size; + u32 stride; + u32 base; + int i; + + regmap = qcom_hwspinlock_probe_syscon(pdev, &base, &stride); + if (IS_ERR(regmap) && PTR_ERR(regmap) == -ENODEV) + regmap = qcom_hwspinlock_probe_mmio(pdev, &base, &stride); + + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + array_size = QCOM_MUTEX_NUM_LOCKS * sizeof(struct hwspinlock); bank = devm_kzalloc(&pdev->dev, sizeof(*bank) + array_size, GFP_KERNEL); if (!bank) diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c index 388978775be0..710fbef9a9c2 100644 --- a/drivers/i2c/algos/i2c-algo-pca.c +++ b/drivers/i2c/algos/i2c-algo-pca.c @@ -542,8 +542,8 @@ int i2c_pca_add_numbered_bus(struct i2c_adapter *adap) } EXPORT_SYMBOL(i2c_pca_add_numbered_bus); -MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>, " - "Wolfram Sang <kernel@pengutronix.de>"); +MODULE_AUTHOR("Ian Campbell <icampbell@arcom.com>"); +MODULE_AUTHOR("Wolfram Sang <kernel@pengutronix.de>"); MODULE_DESCRIPTION("I2C-Bus PCA9564/PCA9665 algorithm"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 88639e52c73a..293e7a0760e7 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -146,6 +146,7 @@ config I2C_I801 Elkhart Lake (PCH) Tiger Lake (PCH) Jasper Lake (SOC) + Emmitsburg (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c index a43deea390f5..fb93152845f4 100644 --- a/drivers/i2c/busses/i2c-ali1535.c +++ b/drivers/i2c/busses/i2c-ali1535.c @@ -519,9 +519,9 @@ static struct pci_driver ali1535_driver = { module_pci_driver(ali1535_driver); -MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>, " - "Philip Edelbrock <phil@netroedge.com>, " - "Mark D. Studebaker <mdsxyz123@yahoo.com> " - "and Dan Eaton <dan.eaton@rocketlogix.com>"); +MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); +MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>"); +MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>"); +MODULE_AUTHOR("Dan Eaton <dan.eaton@rocketlogix.com>"); MODULE_DESCRIPTION("ALI1535 SMBus driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c index 02185a1cfa77..cc58feacd082 100644 --- a/drivers/i2c/busses/i2c-ali15x3.c +++ b/drivers/i2c/busses/i2c-ali15x3.c @@ -502,8 +502,8 @@ static struct pci_driver ali15x3_driver = { module_pci_driver(ali15x3_driver); -MODULE_AUTHOR ("Frodo Looijaard <frodol@dds.nl>, " - "Philip Edelbrock <phil@netroedge.com>, " - "and Mark D. Studebaker <mdsxyz123@yahoo.com>"); +MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); +MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>"); +MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>"); MODULE_DESCRIPTION("ALI15X3 SMBus driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c index 2b14fef5bf26..34862ad3423e 100644 --- a/drivers/i2c/busses/i2c-amd8111.c +++ b/drivers/i2c/busses/i2c-amd8111.c @@ -381,7 +381,7 @@ static s32 amd8111_access(struct i2c_adapter * adap, u16 addr, if (status) return status; len = min_t(u8, len, I2C_SMBUS_BLOCK_MAX); - /* fall through */ + fallthrough; case I2C_SMBUS_I2C_BLOCK_DATA: for (i = 0; i < len; i++) { status = amd_ec_read(smbus, AMD_SMB_DATA + i, diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c index f51702d86a90..31268074c422 100644 --- a/drivers/i2c/busses/i2c-aspeed.c +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -504,7 +504,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) goto error_and_stop; } irq_handled |= ASPEED_I2CD_INTR_TX_ACK; - /* fall through */ + fallthrough; case ASPEED_I2C_MASTER_TX_FIRST: if (bus->buf_index < msg->len) { bus->master_state = ASPEED_I2C_MASTER_TX; @@ -520,7 +520,7 @@ static u32 aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus, u32 irq_status) /* RX may not have completed yet (only address cycle) */ if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE)) goto out_no_complete; - /* fall through */ + fallthrough; case ASPEED_I2C_MASTER_RX: if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) { dev_err(bus->dev, "master failed to RX\n"); diff --git a/drivers/i2c/busses/i2c-at91-master.c b/drivers/i2c/busses/i2c-at91-master.c index 363d540a8345..66864f9cf7ac 100644 --- a/drivers/i2c/busses/i2c-at91-master.c +++ b/drivers/i2c/busses/i2c-at91-master.c @@ -816,79 +816,16 @@ error: return ret; } -static void at91_prepare_twi_recovery(struct i2c_adapter *adap) -{ - struct at91_twi_dev *dev = i2c_get_adapdata(adap); - - pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio); -} - -static void at91_unprepare_twi_recovery(struct i2c_adapter *adap) -{ - struct at91_twi_dev *dev = i2c_get_adapdata(adap); - - pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default); -} - static int at91_init_twi_recovery_gpio(struct platform_device *pdev, struct at91_twi_dev *dev) { struct i2c_bus_recovery_info *rinfo = &dev->rinfo; - dev->pinctrl = devm_pinctrl_get(&pdev->dev); - if (!dev->pinctrl || IS_ERR(dev->pinctrl)) { + rinfo->pinctrl = devm_pinctrl_get(&pdev->dev); + if (!rinfo->pinctrl || IS_ERR(rinfo->pinctrl)) { dev_info(dev->dev, "can't get pinctrl, bus recovery not supported\n"); - return PTR_ERR(dev->pinctrl); + return PTR_ERR(rinfo->pinctrl); } - - dev->pinctrl_pins_default = pinctrl_lookup_state(dev->pinctrl, - PINCTRL_STATE_DEFAULT); - dev->pinctrl_pins_gpio = pinctrl_lookup_state(dev->pinctrl, - "gpio"); - if (IS_ERR(dev->pinctrl_pins_default) || - IS_ERR(dev->pinctrl_pins_gpio)) { - dev_info(&pdev->dev, "pinctrl states incomplete for recovery\n"); - return -EINVAL; - } - - /* - * pins will be taken as GPIO, so we might as well inform pinctrl about - * this and move the state to GPIO - */ - pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_gpio); - - rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN); - if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", - GPIOD_OUT_HIGH_OPEN_DRAIN); - if (PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - if (IS_ERR(rinfo->sda_gpiod) || - IS_ERR(rinfo->scl_gpiod)) { - dev_info(&pdev->dev, "recovery information incomplete\n"); - if (!IS_ERR(rinfo->sda_gpiod)) { - gpiod_put(rinfo->sda_gpiod); - rinfo->sda_gpiod = NULL; - } - if (!IS_ERR(rinfo->scl_gpiod)) { - gpiod_put(rinfo->scl_gpiod); - rinfo->scl_gpiod = NULL; - } - pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default); - return -EINVAL; - } - - /* change the state of the pins back to their default state */ - pinctrl_select_state(dev->pinctrl, dev->pinctrl_pins_default); - - dev_info(&pdev->dev, "using scl, sda for recovery\n"); - - rinfo->prepare_recovery = at91_prepare_twi_recovery; - rinfo->unprepare_recovery = at91_unprepare_twi_recovery; - rinfo->recover_bus = i2c_generic_scl_recovery; dev->adapter.bus_recovery_info = rinfo; return 0; diff --git a/drivers/i2c/busses/i2c-at91.h b/drivers/i2c/busses/i2c-at91.h index 7e7b4955ca7f..eae673ae786c 100644 --- a/drivers/i2c/busses/i2c-at91.h +++ b/drivers/i2c/busses/i2c-at91.h @@ -157,9 +157,6 @@ struct at91_twi_dev { struct at91_twi_dma dma; bool slave_detected; struct i2c_bus_recovery_info rinfo; - struct pinctrl *pinctrl; - struct pinctrl_state *pinctrl_pins_default; - struct pinctrl_state *pinctrl_pins_gpio; #ifdef CONFIG_I2C_AT91_SLAVE_EXPERIMENTAL unsigned smr; struct i2c_client *slave; diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 8a3c98866fb7..688e92818821 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -1078,7 +1078,7 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) if (!iproc_i2c->slave) return -EINVAL; - iproc_i2c->slave = NULL; + disable_irq(iproc_i2c->irq); /* disable all slave interrupts */ tmp = iproc_i2c_rd_reg(iproc_i2c, IE_OFFSET); @@ -1091,6 +1091,17 @@ static int bcm_iproc_i2c_unreg_slave(struct i2c_client *slave) tmp &= ~BIT(S_CFG_EN_NIC_SMB_ADDR3_SHIFT); iproc_i2c_wr_reg(iproc_i2c, S_CFG_SMBUS_ADDR_OFFSET, tmp); + /* flush TX/RX FIFOs */ + tmp = (BIT(S_FIFO_RX_FLUSH_SHIFT) | BIT(S_FIFO_TX_FLUSH_SHIFT)); + iproc_i2c_wr_reg(iproc_i2c, S_FIFO_CTRL_OFFSET, tmp); + + /* clear all pending slave interrupts */ + iproc_i2c_wr_reg(iproc_i2c, IS_OFFSET, ISR_MASK_SLAVE); + + iproc_i2c->slave = NULL; + + enable_irq(iproc_i2c->irq); + return 0; } diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index d9b86fcc3825..5dc519516292 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -392,7 +392,7 @@ static const struct i2c_algorithm bcm2835_i2c_algo = { /* * The BCM2835 was reported to have problems with clock stretching: - * http://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html + * https://www.advamation.com/knowhow/raspberrypi/rpi-i2c-bug.html * https://www.raspberrypi.org/forums/viewtopic.php?p=146272 */ static const struct i2c_adapter_quirks bcm2835_i2c_quirks = { diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 8522134f9ea9..55c83a7a24f3 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -90,7 +90,7 @@ static int mfld_setup(struct pci_dev *pdev, struct dw_pci_controller *c) switch (pdev->device) { case 0x0817: dev->timings.bus_freq_hz = I2C_MAX_STANDARD_MODE_FREQ; - /* fall through */ + fallthrough; case 0x0818: case 0x0819: c->bus_num = pdev->device - 0x817 + 3; diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 332f00437479..f67639dc74b7 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c @@ -187,7 +187,7 @@ static irqreturn_t dc_i2c_irq(int irq, void *dev_id) break; } i2c->state = STATE_WRITE; - /* fall through */ + fallthrough; case STATE_WRITE: if (i2c->msgbuf_ptr < i2c->msg->len) dc_i2c_write_buf(i2c); diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 73f139690e4e..843b31a0f752 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c @@ -846,11 +846,10 @@ static void pch_i2c_remove(struct pci_dev *pdev) kfree(adap_info); } -#ifdef CONFIG_PM -static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state) +static int __maybe_unused pch_i2c_suspend(struct device *dev) { - int ret; int i; + struct pci_dev *pdev = to_pci_dev(dev); struct adapter_info *adap_info = pci_get_drvdata(pdev); void __iomem *p = adap_info->pch_data[0].pch_base_address; @@ -872,34 +871,13 @@ static int pch_i2c_suspend(struct pci_dev *pdev, pm_message_t state) ioread32(p + PCH_I2CSR), ioread32(p + PCH_I2CBUFSTA), ioread32(p + PCH_I2CESRSTA)); - ret = pci_save_state(pdev); - - if (ret) { - pch_pci_err(pdev, "pci_save_state\n"); - return ret; - } - - pci_enable_wake(pdev, PCI_D3hot, 0); - pci_disable_device(pdev); - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - return 0; } -static int pch_i2c_resume(struct pci_dev *pdev) +static int __maybe_unused pch_i2c_resume(struct device *dev) { int i; - struct adapter_info *adap_info = pci_get_drvdata(pdev); - - pci_set_power_state(pdev, PCI_D0); - pci_restore_state(pdev); - - if (pci_enable_device(pdev) < 0) { - pch_pci_err(pdev, "pch_i2c_resume:pci_enable_device FAILED\n"); - return -EIO; - } - - pci_enable_wake(pdev, PCI_D3hot, 0); + struct adapter_info *adap_info = dev_get_drvdata(dev); for (i = 0; i < adap_info->ch_num; i++) pch_i2c_init(&adap_info->pch_data[i]); @@ -908,18 +886,15 @@ static int pch_i2c_resume(struct pci_dev *pdev) return 0; } -#else -#define pch_i2c_suspend NULL -#define pch_i2c_resume NULL -#endif + +static SIMPLE_DEV_PM_OPS(pch_i2c_pm_ops, pch_i2c_suspend, pch_i2c_resume); static struct pci_driver pch_pcidriver = { .name = KBUILD_MODNAME, .id_table = pch_pcidev_id, .probe = pch_i2c_probe, .remove = pch_i2c_remove, - .suspend = pch_i2c_suspend, - .resume = pch_i2c_resume + .driver.pm = &pch_i2c_pm_ops, }; module_pci_driver(pch_pcidriver); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 1a319352e51b..a08554c1a570 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -442,6 +442,7 @@ static struct platform_driver em_i2c_driver = { module_platform_driver(em_i2c_driver); MODULE_DESCRIPTION("EMEV2 I2C bus driver"); -MODULE_AUTHOR("Ian Molton and Wolfram Sang <wsa@sang-engineering.com>"); +MODULE_AUTHOR("Ian Molton"); +MODULE_AUTHOR("Wolfram Sang <wsa@sang-engineering.com>"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(of, em_i2c_ids); diff --git a/drivers/i2c/busses/i2c-fsi.c b/drivers/i2c/busses/i2c-fsi.c index 977d6f524649..10332693edf0 100644 --- a/drivers/i2c/busses/i2c-fsi.c +++ b/drivers/i2c/busses/i2c-fsi.c @@ -703,7 +703,7 @@ static int fsi_i2c_probe(struct device *dev) for (port_no = 0; port_no < ports; port_no++) { np = fsi_i2c_find_port_of_node(dev->of_node, port_no); - if (np && !of_device_is_available(np)) + if (!of_device_is_available(np)) continue; port = kzalloc(sizeof(*port), GFP_KERNEL); diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index fea644921a76..e32ef3f01fe8 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -54,6 +54,7 @@ * Sunrise Point-H (PCH) 0xa123 32 hard yes yes yes * Sunrise Point-LP (PCH) 0x9d23 32 hard yes yes yes * DNV (SOC) 0x19df 32 hard yes yes yes + * Emmitsburg (PCH) 0x1bc9 32 hard yes yes yes * Broxton (SOC) 0x5ad4 32 hard yes yes yes * Lewisburg (PCH) 0xa1a3 32 hard yes yes yes * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes @@ -67,6 +68,7 @@ * Comet Lake-H (PCH) 0x06a3 32 hard yes yes yes * Elkhart Lake (PCH) 0x4b23 32 hard yes yes yes * Tiger Lake-LP (PCH) 0xa0a3 32 hard yes yes yes + * Tiger Lake-H (PCH) 0x43a3 32 hard yes yes yes * Jasper Lake (SOC) 0x4da3 32 hard yes yes yes * Comet Lake-V (PCH) 0xa3a3 32 hard yes yes yes * @@ -207,6 +209,7 @@ #define PCI_DEVICE_ID_INTEL_BAYTRAIL_SMBUS 0x0f12 #define PCI_DEVICE_ID_INTEL_CDF_SMBUS 0x18df #define PCI_DEVICE_ID_INTEL_DNV_SMBUS 0x19df +#define PCI_DEVICE_ID_INTEL_EBG_SMBUS 0x1bc9 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS 0x1c22 #define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS 0x1d22 /* Patsburg also has three 'Integrated Device Function' SMBus controllers */ @@ -221,6 +224,7 @@ #define PCI_DEVICE_ID_INTEL_GEMINILAKE_SMBUS 0x31d4 #define PCI_DEVICE_ID_INTEL_ICELAKE_LP_SMBUS 0x34a3 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 +#define PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS 0x43a3 #define PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS 0x4b23 #define PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS 0x4da3 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 @@ -1062,6 +1066,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CDF_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EBG_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) }, @@ -1074,6 +1079,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COMETLAKE_V_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS) }, { 0, } }; @@ -1748,7 +1754,9 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) case PCI_DEVICE_ID_INTEL_COMETLAKE_H_SMBUS: case PCI_DEVICE_ID_INTEL_ELKHART_LAKE_SMBUS: case PCI_DEVICE_ID_INTEL_TIGERLAKE_LP_SMBUS: + case PCI_DEVICE_ID_INTEL_TIGERLAKE_H_SMBUS: case PCI_DEVICE_ID_INTEL_JASPER_LAKE_SMBUS: + case PCI_DEVICE_ID_INTEL_EBG_SMBUS: priv->features |= FEATURE_BLOCK_PROC; priv->features |= FEATURE_I2C_BLOCK_READ; priv->features |= FEATURE_IRQ; @@ -1765,19 +1773,19 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS1: case PCI_DEVICE_ID_INTEL_WELLSBURG_SMBUS_MS2: priv->features |= FEATURE_IDF; - /* fall through */ + fallthrough; default: priv->features |= FEATURE_BLOCK_PROC; priv->features |= FEATURE_I2C_BLOCK_READ; priv->features |= FEATURE_IRQ; - /* fall through */ + fallthrough; case PCI_DEVICE_ID_INTEL_82801DB_3: priv->features |= FEATURE_SMBUS_PEC; priv->features |= FEATURE_BLOCK_BUFFER; - /* fall through */ + fallthrough; case PCI_DEVICE_ID_INTEL_82801CA_3: priv->features |= FEATURE_HOST_NOTIFY; - /* fall through */ + fallthrough; case PCI_DEVICE_ID_INTEL_82801BA_2: case PCI_DEVICE_ID_INTEL_82801AB_3: case PCI_DEVICE_ID_INTEL_82801AA_3: @@ -1986,7 +1994,8 @@ static void __exit i2c_i801_exit(void) pci_unregister_driver(&i801_driver); } -MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>, Jean Delvare <jdelvare@suse.de>"); +MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>"); +MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); MODULE_DESCRIPTION("I801 SMBus driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index deef69e56906..efc14041d45b 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -48,11 +48,13 @@ #define I2C_DMA_CON_TX 0x0000 #define I2C_DMA_CON_RX 0x0001 +#define I2C_DMA_ASYNC_MODE 0x0004 +#define I2C_DMA_SKIP_CONFIG 0x0010 +#define I2C_DMA_DIR_CHANGE 0x0200 #define I2C_DMA_START_EN 0x0001 #define I2C_DMA_INT_FLAG_NONE 0x0000 #define I2C_DMA_CLR_FLAG 0x0000 #define I2C_DMA_HARD_RST 0x0002 -#define I2C_DMA_4G_MODE 0x0001 #define MAX_SAMPLE_CNT_DIV 8 #define MAX_STEP_CNT_DIV 64 @@ -201,10 +203,11 @@ struct mtk_i2c_compatible { unsigned char dcm: 1; unsigned char auto_restart: 1; unsigned char aux_len_reg: 1; - unsigned char support_33bits: 1; unsigned char timing_adjust: 1; unsigned char dma_sync: 1; unsigned char ltiming_adjust: 1; + unsigned char apdma_sync: 1; + unsigned char max_dma_support; }; struct mtk_i2c_ac_timing { @@ -250,14 +253,13 @@ struct mtk_i2c { /** * struct i2c_spec_values: - * min_low_ns: min LOW period of the SCL clock - * min_su_sta_ns: min set-up time for a repeated START condition - * max_hd_dat_ns: max data hold time - * min_su_dat_ns: min data set-up time + * @min_low_ns: min LOW period of the SCL clock + * @min_su_sta_ns: min set-up time for a repeated START condition + * @max_hd_dat_ns: max data hold time + * @min_su_dat_ns: min data set-up time */ struct i2c_spec_values { unsigned int min_low_ns; - unsigned int min_high_ns; unsigned int min_su_sta_ns; unsigned int max_hd_dat_ns; unsigned int min_su_dat_ns; @@ -307,10 +309,11 @@ static const struct mtk_i2c_compatible mt2712_compat = { .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, - .support_33bits = 1, .timing_adjust = 1, .dma_sync = 0, .ltiming_adjust = 0, + .apdma_sync = 0, + .max_dma_support = 33, }; static const struct mtk_i2c_compatible mt6577_compat = { @@ -320,10 +323,11 @@ static const struct mtk_i2c_compatible mt6577_compat = { .dcm = 1, .auto_restart = 0, .aux_len_reg = 0, - .support_33bits = 0, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, + .apdma_sync = 0, + .max_dma_support = 32, }; static const struct mtk_i2c_compatible mt6589_compat = { @@ -333,10 +337,11 @@ static const struct mtk_i2c_compatible mt6589_compat = { .dcm = 0, .auto_restart = 0, .aux_len_reg = 0, - .support_33bits = 0, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, + .apdma_sync = 0, + .max_dma_support = 32, }; static const struct mtk_i2c_compatible mt7622_compat = { @@ -346,10 +351,11 @@ static const struct mtk_i2c_compatible mt7622_compat = { .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, - .support_33bits = 0, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, + .apdma_sync = 0, + .max_dma_support = 32, }; static const struct mtk_i2c_compatible mt8173_compat = { @@ -358,10 +364,11 @@ static const struct mtk_i2c_compatible mt8173_compat = { .dcm = 1, .auto_restart = 1, .aux_len_reg = 1, - .support_33bits = 1, .timing_adjust = 0, .dma_sync = 0, .ltiming_adjust = 0, + .apdma_sync = 0, + .max_dma_support = 33, }; static const struct mtk_i2c_compatible mt8183_compat = { @@ -371,10 +378,25 @@ static const struct mtk_i2c_compatible mt8183_compat = { .dcm = 0, .auto_restart = 1, .aux_len_reg = 1, - .support_33bits = 1, .timing_adjust = 1, .dma_sync = 1, .ltiming_adjust = 1, + .apdma_sync = 0, + .max_dma_support = 33, +}; + +static const struct mtk_i2c_compatible mt8192_compat = { + .quirks = &mt8183_i2c_quirks, + .regs = mt_i2c_regs_v2, + .pmic_i2c = 0, + .dcm = 0, + .auto_restart = 1, + .aux_len_reg = 1, + .timing_adjust = 1, + .dma_sync = 1, + .ltiming_adjust = 1, + .apdma_sync = 1, + .max_dma_support = 36, }; static const struct of_device_id mtk_i2c_of_match[] = { @@ -384,6 +406,7 @@ static const struct of_device_id mtk_i2c_of_match[] = { { .compatible = "mediatek,mt7622-i2c", .data = &mt7622_compat }, { .compatible = "mediatek,mt8173-i2c", .data = &mt8173_compat }, { .compatible = "mediatek,mt8183-i2c", .data = &mt8183_compat }, + { .compatible = "mediatek,mt8192-i2c", .data = &mt8192_compat }, {} }; MODULE_DEVICE_TABLE(of, mtk_i2c_of_match); @@ -786,11 +809,6 @@ static int mtk_i2c_set_speed(struct mtk_i2c *i2c, unsigned int parent_clk) return 0; } -static inline u32 mtk_i2c_set_4g_mode(dma_addr_t addr) -{ - return (addr & BIT_ULL(32)) ? I2C_DMA_4G_MODE : I2C_DMA_CLR_FLAG; -} - static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, int num, int left_num) { @@ -798,6 +816,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, u16 start_reg; u16 control_reg; u16 restart_flag = 0; + u16 dma_sync = 0; u32 reg_4g_mode; u8 *dma_rd_buf = NULL; u8 *dma_wr_buf = NULL; @@ -851,10 +870,16 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, mtk_i2c_writew(i2c, num, OFFSET_TRANSAC_LEN); } + if (i2c->dev_comp->apdma_sync) { + dma_sync = I2C_DMA_SKIP_CONFIG | I2C_DMA_ASYNC_MODE; + if (i2c->op == I2C_MASTER_WRRD) + dma_sync |= I2C_DMA_DIR_CHANGE; + } + /* Prepare buffer data to start transfer */ if (i2c->op == I2C_MASTER_RD) { writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); - writel(I2C_DMA_CON_RX, i2c->pdmabase + OFFSET_CON); + writel(I2C_DMA_CON_RX | dma_sync, i2c->pdmabase + OFFSET_CON); dma_rd_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_rd_buf) @@ -868,8 +893,8 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, return -ENOMEM; } - if (i2c->dev_comp->support_33bits) { - reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr); + if (i2c->dev_comp->max_dma_support > 32) { + reg_4g_mode = upper_32_bits(rpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE); } @@ -877,7 +902,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, writel(msgs->len, i2c->pdmabase + OFFSET_RX_LEN); } else if (i2c->op == I2C_MASTER_WR) { writel(I2C_DMA_INT_FLAG_NONE, i2c->pdmabase + OFFSET_INT_FLAG); - writel(I2C_DMA_CON_TX, i2c->pdmabase + OFFSET_CON); + writel(I2C_DMA_CON_TX | dma_sync, i2c->pdmabase + OFFSET_CON); dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_wr_buf) @@ -891,8 +916,8 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, return -ENOMEM; } - if (i2c->dev_comp->support_33bits) { - reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr); + if (i2c->dev_comp->max_dma_support > 32) { + reg_4g_mode = upper_32_bits(wpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE); } @@ -900,7 +925,7 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, writel(msgs->len, i2c->pdmabase + OFFSET_TX_LEN); } else { writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_INT_FLAG); - writel(I2C_DMA_CLR_FLAG, i2c->pdmabase + OFFSET_CON); + writel(I2C_DMA_CLR_FLAG | dma_sync, i2c->pdmabase + OFFSET_CON); dma_wr_buf = i2c_get_dma_safe_msg_buf(msgs, 1); if (!dma_wr_buf) @@ -937,11 +962,11 @@ static int mtk_i2c_do_transfer(struct mtk_i2c *i2c, struct i2c_msg *msgs, return -ENOMEM; } - if (i2c->dev_comp->support_33bits) { - reg_4g_mode = mtk_i2c_set_4g_mode(wpaddr); + if (i2c->dev_comp->max_dma_support > 32) { + reg_4g_mode = upper_32_bits(wpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_TX_4G_MODE); - reg_4g_mode = mtk_i2c_set_4g_mode(rpaddr); + reg_4g_mode = upper_32_bits(rpaddr); writel(reg_4g_mode, i2c->pdmabase + OFFSET_RX_4G_MODE); } @@ -1215,8 +1240,9 @@ static int mtk_i2c_probe(struct platform_device *pdev) return -EINVAL; } - if (i2c->dev_comp->support_33bits) { - ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(33)); + if (i2c->dev_comp->max_dma_support > 32) { + ret = dma_set_mask(&pdev->dev, + DMA_BIT_MASK(i2c->dev_comp->max_dma_support)); if (ret) { dev_err(&pdev->dev, "dma_set_mask return error.\n"); return ret; diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 829b8c98ae51..8d9d4ffdcd24 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c @@ -251,7 +251,7 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status) MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK; break; } - /* FALLTHRU */ + fallthrough; case MV64XXX_I2C_STATUS_MAST_WR_ADDR_2_ACK: /* 0xd0 */ case MV64XXX_I2C_STATUS_MAST_WR_ACK: /* 0x28 */ if ((drv_data->bytes_left == 0) @@ -282,14 +282,14 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status) MV64XXX_I2C_STATE_WAITING_FOR_ADDR_2_ACK; break; } - /* FALLTHRU */ + fallthrough; case MV64XXX_I2C_STATUS_MAST_RD_ADDR_2_ACK: /* 0xe0 */ if (drv_data->bytes_left == 0) { drv_data->action = MV64XXX_I2C_ACTION_SEND_STOP; drv_data->state = MV64XXX_I2C_STATE_IDLE; break; } - /* FALLTHRU */ + fallthrough; case MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK: /* 0x50 */ if (status != MV64XXX_I2C_STATUS_MAST_RD_DATA_ACK) drv_data->action = MV64XXX_I2C_ACTION_CONTINUE; @@ -417,8 +417,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) "mv64xxx_i2c_do_action: Invalid action: %d\n", drv_data->action); drv_data->rc = -EIO; - - /* FALLTHRU */ + fallthrough; case MV64XXX_I2C_ACTION_SEND_STOP: drv_data->cntl_bits &= ~MV64XXX_I2C_REG_CONTROL_INTEN; writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index e1e8d4ef9aa7..d4b1b0865f67 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1122,6 +1122,7 @@ static void __exit nmk_i2c_exit(void) subsys_initcall(nmk_i2c_init); module_exit(nmk_i2c_exit); -MODULE_AUTHOR("Sachin Verma, Srinidhi KASAGAR"); +MODULE_AUTHOR("Sachin Verma"); +MODULE_AUTHOR("Srinidhi KASAGAR"); MODULE_DESCRIPTION("Nomadik/Ux500 I2C driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 69740a4ff1db..8c1b31ed0c42 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -1032,7 +1032,7 @@ static struct pci_driver piix4_driver = { module_pci_driver(piix4_driver); -MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " - "Philip Edelbrock <phil@netroedge.com>"); +MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); +MODULE_AUTHOR("Philip Edelbrock <phil@netroedge.com>"); MODULE_DESCRIPTION("PIIX4 SMBus driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-pnx.c b/drivers/i2c/busses/i2c-pnx.c index 5d7207c10f1d..8c4ec7f13f5a 100644 --- a/drivers/i2c/busses/i2c-pnx.c +++ b/drivers/i2c/busses/i2c-pnx.c @@ -781,7 +781,8 @@ static void __exit i2c_adap_pnx_exit(void) platform_driver_unregister(&i2c_pnx_driver); } -MODULE_AUTHOR("Vitaly Wool, Dennis Kovalev <source@mvista.com>"); +MODULE_AUTHOR("Vitaly Wool"); +MODULE_AUTHOR("Dennis Kovalev <source@mvista.com>"); MODULE_DESCRIPTION("I2C driver for Philips IP3204-based I2C busses"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:pnx-i2c"); diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 2e3e1bb75013..9e883474db8c 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -583,13 +583,14 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv) rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR); } - rcar_i2c_write(priv, ICSSR, ~SAR & 0xff); + /* Clear SSR, too, because of old STOPs to other clients than us */ + rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff); } /* master sent stop */ if (ssr_filtered & SSR) { i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSSR, ~SSR & 0xff); } @@ -853,7 +854,7 @@ static int rcar_reg_slave(struct i2c_client *slave) priv->slave = slave; rcar_i2c_write(priv, ICSAR, slave->addr); rcar_i2c_write(priv, ICSSR, 0); - rcar_i2c_write(priv, ICSIER, SAR | SSR); + rcar_i2c_write(priv, ICSIER, SAR); rcar_i2c_write(priv, ICSCR, SIE | SDBS); return 0; @@ -865,12 +866,14 @@ static int rcar_unreg_slave(struct i2c_client *slave) WARN_ON(!priv->slave); - /* disable irqs and ensure none is running before clearing ptr */ + /* ensure no irq is running before clearing ptr */ + disable_irq(priv->irq); rcar_i2c_write(priv, ICSIER, 0); - rcar_i2c_write(priv, ICSCR, 0); + rcar_i2c_write(priv, ICSSR, 0); + enable_irq(priv->irq); + rcar_i2c_write(priv, ICSCR, SDBS); rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */ - synchronize_irq(priv->irq); priv->slave = NULL; pm_runtime_put(rcar_i2c_priv_to_dev(priv)); diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 15324bfbc6cb..8e3cc85d1921 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -10,6 +10,7 @@ #include <linux/module.h> #include <linux/i2c.h> #include <linux/interrupt.h> +#include <linux/iopoll.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/platform_device.h> @@ -1040,8 +1041,21 @@ static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num) return ret; } -static int rk3x_i2c_xfer(struct i2c_adapter *adap, - struct i2c_msg *msgs, int num) +static int rk3x_i2c_wait_xfer_poll(struct rk3x_i2c *i2c) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), WAIT_TIMEOUT); + + while (READ_ONCE(i2c->busy) && + ktime_compare(ktime_get(), timeout) < 0) { + udelay(5); + rk3x_i2c_irq(0, i2c); + } + + return !i2c->busy; +} + +static int rk3x_i2c_xfer_common(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num, bool polling) { struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data; unsigned long timeout, flags; @@ -1075,8 +1089,12 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, rk3x_i2c_start(i2c); - timeout = wait_event_timeout(i2c->wait, !i2c->busy, - msecs_to_jiffies(WAIT_TIMEOUT)); + if (!polling) { + timeout = wait_event_timeout(i2c->wait, !i2c->busy, + msecs_to_jiffies(WAIT_TIMEOUT)); + } else { + timeout = rk3x_i2c_wait_xfer_poll(i2c); + } spin_lock_irqsave(&i2c->lock, flags); @@ -1110,6 +1128,18 @@ static int rk3x_i2c_xfer(struct i2c_adapter *adap, return ret < 0 ? ret : num; } +static int rk3x_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + return rk3x_i2c_xfer_common(adap, msgs, num, false); +} + +static int rk3x_i2c_xfer_polling(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + return rk3x_i2c_xfer_common(adap, msgs, num, true); +} + static __maybe_unused int rk3x_i2c_resume(struct device *dev) { struct rk3x_i2c *i2c = dev_get_drvdata(dev); @@ -1126,6 +1156,7 @@ static u32 rk3x_i2c_func(struct i2c_adapter *adap) static const struct i2c_algorithm rk3x_i2c_algorithm = { .master_xfer = rk3x_i2c_xfer, + .master_xfer_atomic = rk3x_i2c_xfer_polling, .functionality = rk3x_i2c_func, }; diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 2cca1b21e26e..cab725559999 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -932,6 +932,7 @@ static void __exit sh_mobile_i2c_adap_exit(void) module_exit(sh_mobile_i2c_adap_exit); MODULE_DESCRIPTION("SuperH Mobile I2C Bus Controller driver"); -MODULE_AUTHOR("Magnus Damm and Wolfram Sang"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_AUTHOR("Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:i2c-sh_mobile"); diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c index 9dcea2ba7168..8f71f01cb169 100644 --- a/drivers/i2c/busses/i2c-sibyte.c +++ b/drivers/i2c/busses/i2c-sibyte.c @@ -180,6 +180,7 @@ static void __exit i2c_sibyte_exit(void) module_init(i2c_sibyte_init); module_exit(i2c_sibyte_exit); -MODULE_AUTHOR("Kip Walker (Broadcom Corp.), Steven J. Hill <sjhill@realitydiluted.com>"); +MODULE_AUTHOR("Kip Walker (Broadcom Corp.)"); +MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>"); MODULE_DESCRIPTION("SMBus adapter routines for SiByte boards"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c index d7f72ec331e8..30db8fafe078 100644 --- a/drivers/i2c/busses/i2c-sirf.c +++ b/drivers/i2c/busses/i2c-sirf.c @@ -470,6 +470,6 @@ static struct platform_driver i2c_sirfsoc_driver = { module_platform_driver(i2c_sirfsoc_driver); MODULE_DESCRIPTION("SiRF SoC I2C master controller driver"); -MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>, " - "Xiangzhen Ye <Xiangzhen.Ye@csr.com>"); +MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>"); +MODULE_AUTHOR("Xiangzhen Ye <Xiangzhen.Ye@csr.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-synquacer.c b/drivers/i2c/busses/i2c-synquacer.c index c9a3dba6a75d..31be1811d5e6 100644 --- a/drivers/i2c/busses/i2c-synquacer.c +++ b/drivers/i2c/busses/i2c-synquacer.c @@ -398,8 +398,7 @@ static irqreturn_t synquacer_i2c_isr(int irq, void *dev_id) if (i2c->state == STATE_READ) goto prepare_read; - - /* fall through */ + fallthrough; case STATE_WRITE: if (bsr & SYNQUACER_I2C_BSR_LRB) { diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 15772964a05f..00d3e4d7a01e 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -293,6 +293,8 @@ struct tegra_i2c_dev { bool is_curr_atomic_xfer; }; +static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev, bool clk_reinit); + static void dvc_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned long reg) { @@ -419,7 +421,7 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) dma_addr_t dma_phys; int err; - if (!i2c_dev->hw->has_apb_dma) + if (!i2c_dev->hw->has_apb_dma || i2c_dev->is_vi) return 0; if (!IS_ENABLED(CONFIG_TEGRA20_APB_DMA)) { @@ -655,32 +657,47 @@ static int __maybe_unused tegra_i2c_runtime_resume(struct device *dev) if (ret) return ret; - if (!i2c_dev->hw->has_single_clk_source) { - ret = clk_enable(i2c_dev->fast_clk); - if (ret < 0) { - dev_err(i2c_dev->dev, - "Enabling fast clk failed, err %d\n", ret); - return ret; - } + ret = clk_enable(i2c_dev->fast_clk); + if (ret < 0) { + dev_err(i2c_dev->dev, + "Enabling fast clk failed, err %d\n", ret); + return ret; } - if (i2c_dev->slow_clk) { - ret = clk_enable(i2c_dev->slow_clk); - if (ret < 0) { - dev_err(dev, "failed to enable slow clock: %d\n", ret); - return ret; - } + ret = clk_enable(i2c_dev->slow_clk); + if (ret < 0) { + dev_err(dev, "failed to enable slow clock: %d\n", ret); + goto disable_fast_clk; } ret = clk_enable(i2c_dev->div_clk); if (ret < 0) { dev_err(i2c_dev->dev, "Enabling div clk failed, err %d\n", ret); - clk_disable(i2c_dev->fast_clk); - return ret; + goto disable_slow_clk; + } + + /* + * VI I2C device is attached to VE power domain which goes through + * power ON/OFF during PM runtime resume/suspend. So, controller + * should go through reset and need to re-initialize after power + * domain ON. + */ + if (i2c_dev->is_vi) { + ret = tegra_i2c_init(i2c_dev, true); + if (ret) + goto disable_div_clk; } return 0; + +disable_div_clk: + clk_disable(i2c_dev->div_clk); +disable_slow_clk: + clk_disable(i2c_dev->slow_clk); +disable_fast_clk: + clk_disable(i2c_dev->fast_clk); + return ret; } static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev) @@ -688,12 +705,8 @@ static int __maybe_unused tegra_i2c_runtime_suspend(struct device *dev) struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); clk_disable(i2c_dev->div_clk); - - if (i2c_dev->slow_clk) - clk_disable(i2c_dev->slow_clk); - - if (!i2c_dev->hw->has_single_clk_source) - clk_disable(i2c_dev->fast_clk); + clk_disable(i2c_dev->slow_clk); + clk_disable(i2c_dev->fast_clk); return pinctrl_pm_select_idle_state(i2c_dev->dev); } @@ -1716,20 +1729,16 @@ static int tegra_i2c_probe(struct platform_device *pdev) platform_set_drvdata(pdev, i2c_dev); - if (!i2c_dev->hw->has_single_clk_source) { - ret = clk_prepare(i2c_dev->fast_clk); - if (ret < 0) { - dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); - return ret; - } + ret = clk_prepare(i2c_dev->fast_clk); + if (ret < 0) { + dev_err(i2c_dev->dev, "Clock prepare failed %d\n", ret); + return ret; } - if (i2c_dev->slow_clk) { - ret = clk_prepare(i2c_dev->slow_clk); - if (ret < 0) { - dev_err(dev, "failed to prepare slow clock: %d\n", ret); - goto unprepare_fast_clk; - } + ret = clk_prepare(i2c_dev->slow_clk); + if (ret < 0) { + dev_err(dev, "failed to prepare slow clock: %d\n", ret); + goto unprepare_fast_clk; } if (i2c_dev->bus_clk_rate > I2C_MAX_FAST_MODE_FREQ && @@ -1750,7 +1759,15 @@ static int tegra_i2c_probe(struct platform_device *pdev) goto unprepare_slow_clk; } - pm_runtime_irq_safe(&pdev->dev); + /* + * VI I2C is in VE power domain which is not always on and not + * an IRQ safe. So, IRQ safe device can't be attached to a non-IRQ + * safe domain as it prevents powering off the PM domain. + * Also, VI I2C device don't need to use runtime IRQ safe as it will + * not be used for atomic transfers. + */ + if (!i2c_dev->is_vi) + pm_runtime_irq_safe(&pdev->dev); pm_runtime_enable(&pdev->dev); if (!pm_runtime_enabled(&pdev->dev)) { ret = tegra_i2c_runtime_resume(&pdev->dev); @@ -1835,12 +1852,10 @@ unprepare_div_clk: clk_unprepare(i2c_dev->div_clk); unprepare_slow_clk: - if (i2c_dev->is_vi) - clk_unprepare(i2c_dev->slow_clk); + clk_unprepare(i2c_dev->slow_clk); unprepare_fast_clk: - if (!i2c_dev->hw->has_single_clk_source) - clk_unprepare(i2c_dev->fast_clk); + clk_unprepare(i2c_dev->fast_clk); return ret; } @@ -1859,12 +1874,8 @@ static int tegra_i2c_remove(struct platform_device *pdev) tegra_i2c_runtime_suspend(&pdev->dev); clk_unprepare(i2c_dev->div_clk); - - if (i2c_dev->slow_clk) - clk_unprepare(i2c_dev->slow_clk); - - if (!i2c_dev->hw->has_single_clk_source) - clk_unprepare(i2c_dev->fast_clk); + clk_unprepare(i2c_dev->slow_clk); + clk_unprepare(i2c_dev->fast_clk); tegra_i2c_release_dma(i2c_dev); return 0; diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c index 4abc7771af06..970ccdcbb889 100644 --- a/drivers/i2c/busses/i2c-viapro.c +++ b/drivers/i2c/busses/i2c-viapro.c @@ -228,7 +228,7 @@ static s32 vt596_access(struct i2c_adapter *adap, u16 addr, goto exit_unsupported; if (read_write == I2C_SMBUS_READ) outb_p(data->block[0], SMBHSTDAT0); - /* Fall through */ + fallthrough; case I2C_SMBUS_BLOCK_DATA: outb_p(command, SMBHSTCMD); if (read_write == I2C_SMBUS_WRITE) { @@ -489,9 +489,9 @@ static void __exit i2c_vt596_exit(void) } } -MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, " - "Mark D. Studebaker <mdsxyz123@yahoo.com> and " - "Jean Delvare <jdelvare@suse.de>"); +MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>"); +MODULE_AUTHOR("Mark D. Studebaker <mdsxyz123@yahoo.com>"); +MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>"); MODULE_DESCRIPTION("vt82c596 SMBus driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c index bd9afa383d12..7b42a18bd05c 100644 --- a/drivers/i2c/busses/scx200_acb.c +++ b/drivers/i2c/busses/scx200_acb.c @@ -151,7 +151,7 @@ static void scx200_acb_machine(struct scx200_acb_iface *iface, u8 status) case state_repeat_start: outb(inb(ACBCTL1) | ACBCTL1_START, ACBCTL1); - /* fallthrough */ + fallthrough; case state_quick: if (iface->address_byte & 1) { diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 26f03a14a478..34a9609f256d 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c @@ -32,6 +32,7 @@ #include <linux/of_device.h> #include <linux/of.h> #include <linux/of_irq.h> +#include <linux/pinctrl/consumer.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> #include <linux/pm_wakeirq.h> @@ -181,6 +182,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) if (bri->prepare_recovery) bri->prepare_recovery(adap); + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_gpio); /* * If we can set SDA, we will always create a STOP to ensure additional @@ -236,6 +239,8 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap) if (bri->unprepare_recovery) bri->unprepare_recovery(adap); + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_default); return ret; } @@ -251,13 +256,135 @@ int i2c_recover_bus(struct i2c_adapter *adap) } EXPORT_SYMBOL_GPL(i2c_recover_bus); -static void i2c_init_recovery(struct i2c_adapter *adap) +static void i2c_gpio_init_pinctrl_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + struct device *dev = &adap->dev; + struct pinctrl *p = bri->pinctrl; + + /* + * we can't change states without pinctrl, so remove the states if + * populated + */ + if (!p) { + bri->pins_default = NULL; + bri->pins_gpio = NULL; + return; + } + + if (!bri->pins_default) { + bri->pins_default = pinctrl_lookup_state(p, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(bri->pins_default)) { + dev_dbg(dev, PINCTRL_STATE_DEFAULT " state not found for GPIO recovery\n"); + bri->pins_default = NULL; + } + } + if (!bri->pins_gpio) { + bri->pins_gpio = pinctrl_lookup_state(p, "gpio"); + if (IS_ERR(bri->pins_gpio)) + bri->pins_gpio = pinctrl_lookup_state(p, "recovery"); + + if (IS_ERR(bri->pins_gpio)) { + dev_dbg(dev, "no gpio or recovery state found for GPIO recovery\n"); + bri->pins_gpio = NULL; + } + } + + /* for pinctrl state changes, we need all the information */ + if (bri->pins_default && bri->pins_gpio) { + dev_info(dev, "using pinctrl states for GPIO recovery"); + } else { + bri->pinctrl = NULL; + bri->pins_default = NULL; + bri->pins_gpio = NULL; + } +} + +static int i2c_gpio_init_generic_recovery(struct i2c_adapter *adap) +{ + struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; + struct device *dev = &adap->dev; + struct gpio_desc *gpiod; + int ret = 0; + + /* + * don't touch the recovery information if the driver is not using + * generic SCL recovery + */ + if (bri->recover_bus && bri->recover_bus != i2c_generic_scl_recovery) + return 0; + + /* + * pins might be taken as GPIO, so we should inform pinctrl about + * this and move the state to GPIO + */ + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_gpio); + + /* + * if there is incomplete or no recovery information, see if generic + * GPIO recovery is available + */ + if (!bri->scl_gpiod) { + gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN); + if (PTR_ERR(gpiod) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto cleanup_pinctrl_state; + } + if (!IS_ERR(gpiod)) { + bri->scl_gpiod = gpiod; + bri->recover_bus = i2c_generic_scl_recovery; + dev_info(dev, "using generic GPIOs for recovery\n"); + } + } + + /* SDA GPIOD line is optional, so we care about DEFER only */ + if (!bri->sda_gpiod) { + /* + * We have SCL. Pull SCL low and wait a bit so that SDA glitches + * have no effect. + */ + gpiod_direction_output(bri->scl_gpiod, 0); + udelay(10); + gpiod = devm_gpiod_get(dev, "sda", GPIOD_IN); + + /* Wait a bit in case of a SDA glitch, and then release SCL. */ + udelay(10); + gpiod_direction_output(bri->scl_gpiod, 1); + + if (PTR_ERR(gpiod) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto cleanup_pinctrl_state; + } + if (!IS_ERR(gpiod)) + bri->sda_gpiod = gpiod; + } + +cleanup_pinctrl_state: + /* change the state of the pins back to their default state */ + if (bri->pinctrl) + pinctrl_select_state(bri->pinctrl, bri->pins_default); + + return ret; +} + +static int i2c_gpio_init_recovery(struct i2c_adapter *adap) +{ + i2c_gpio_init_pinctrl_recovery(adap); + return i2c_gpio_init_generic_recovery(adap); +} + +static int i2c_init_recovery(struct i2c_adapter *adap) { struct i2c_bus_recovery_info *bri = adap->bus_recovery_info; char *err_str; if (!bri) - return; + return 0; + + if (i2c_gpio_init_recovery(adap) == -EPROBE_DEFER) + return -EPROBE_DEFER; if (!bri->recover_bus) { err_str = "no recover_bus() found"; @@ -273,10 +400,7 @@ static void i2c_init_recovery(struct i2c_adapter *adap) if (gpiod_get_direction(bri->sda_gpiod) == 0) bri->set_sda = set_sda_gpio_value; } - return; - } - - if (bri->recover_bus == i2c_generic_scl_recovery) { + } else if (bri->recover_bus == i2c_generic_scl_recovery) { /* Generic SCL recovery */ if (!bri->set_scl || !bri->get_scl) { err_str = "no {get|set}_scl() found"; @@ -288,10 +412,12 @@ static void i2c_init_recovery(struct i2c_adapter *adap) } } - return; + return 0; err: dev_err(&adap->dev, "Not using recovery: %s\n", err_str); adap->bus_recovery_info = NULL; + + return -EINVAL; } static int i2c_smbus_host_notify_to_irq(const struct i2c_client *client) @@ -319,11 +445,9 @@ static int i2c_device_probe(struct device *dev) if (!client) return 0; - driver = to_i2c_driver(dev->driver); - client->irq = client->init_irq; - if (!client->irq && !driver->disable_i2c_core_irq_mapping) { + if (!client->irq) { int irq = -ENOENT; if (client->flags & I2C_CLIENT_HOST_NOTIFY) { @@ -349,6 +473,8 @@ static int i2c_device_probe(struct device *dev) client->irq = irq; } + driver = to_i2c_driver(dev->driver); + /* * An I2C ID table is not mandatory, if and only if, a suitable OF * or ACPI ID table is supplied for the probing device. @@ -1227,7 +1353,7 @@ static int i2c_setup_host_notify_irq_domain(struct i2c_adapter *adap) if (!i2c_check_functionality(adap, I2C_FUNC_SMBUS_HOST_NOTIFY)) return 0; - domain = irq_domain_create_linear(adap->dev.fwnode, + domain = irq_domain_create_linear(adap->dev.parent->fwnode, I2C_ADDR_7BITS_COUNT, &i2c_host_notify_irq_ops, adap); if (!domain) @@ -1318,12 +1444,16 @@ static int i2c_register_adapter(struct i2c_adapter *adap) if (res) goto out_reg; - dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); - pm_runtime_no_callbacks(&adap->dev); pm_suspend_ignore_children(&adap->dev, true); pm_runtime_enable(&adap->dev); + res = i2c_init_recovery(adap); + if (res == -EPROBE_DEFER) + goto out_reg; + + dev_dbg(&adap->dev, "adapter [%s] registered\n", adap->name); + #ifdef CONFIG_I2C_COMPAT res = class_compat_create_link(i2c_adapter_compat_class, &adap->dev, adap->dev.parent); @@ -1332,8 +1462,6 @@ static int i2c_register_adapter(struct i2c_adapter *adap) "Failed to create compatibility class link\n"); #endif - i2c_init_recovery(adap); - /* create pre-declared device nodes */ of_i2c_register_devices(adap); i2c_acpi_register_devices(adap); diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c index da020acc9bbd..6ceb11cc4be1 100644 --- a/drivers/i2c/i2c-dev.c +++ b/drivers/i2c/i2c-dev.c @@ -761,8 +761,8 @@ static void __exit i2c_dev_exit(void) unregister_chrdev_region(MKDEV(I2C_MAJOR, 0), I2C_MINORS); } -MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and " - "Simon G. Vogl <simon@tk.uni-linz.ac.at>"); +MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); +MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C /dev entries driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c index 593f2fd39d17..5c7ae421cacf 100644 --- a/drivers/i2c/i2c-slave-eeprom.c +++ b/drivers/i2c/i2c-slave-eeprom.c @@ -66,7 +66,7 @@ static int i2c_slave_eeprom_slave_cb(struct i2c_client *client, case I2C_SLAVE_READ_PROCESSED: /* The previous byte made it to the bus, get next one */ eeprom->buffer_idx++; - /* fallthrough */ + fallthrough; case I2C_SLAVE_READ_REQUESTED: spin_lock(&eeprom->buffer_lock); *val = eeprom->buffer[eeprom->buffer_idx & eeprom->address_mask]; diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 5e32f61a2fe4..cc6b4befde7c 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -439,7 +439,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, * complex (and doesn't gain us much performance in most use * cases). */ - npages = get_user_pages_remote(owning_process, owning_mm, + npages = get_user_pages_remote(owning_mm, user_virt, gup_num_pages, flags, local_page_list, NULL, NULL); mmap_read_unlock(owning_mm); diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index a81e14148407..f699538bdac4 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c @@ -16,7 +16,7 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src) if (dev->absinfo && test_bit(src, dev->absbit)) { dev->absinfo[dst] = dev->absinfo[src]; dev->absinfo[dst].fuzz = 0; - dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst); + __set_bit(dst, dev->absbit); } } diff --git a/drivers/input/joystick/db9.c b/drivers/input/joystick/db9.c index a7bc576eb342..434d265fa2e8 100644 --- a/drivers/input/joystick/db9.c +++ b/drivers/input/joystick/db9.c @@ -247,7 +247,7 @@ static unsigned char db9_saturn_read_packet(struct parport *port, unsigned char db9_saturn_write_sub(port, type, 3, powered, 0); return data[0] = 0xe3; } - /* fall through */ + fallthrough; default: return data[0]; } @@ -267,14 +267,14 @@ static int db9_saturn_report(unsigned char id, unsigned char data[60], struct in switch (data[j]) { case 0x16: /* multi controller (analog 4 axis) */ input_report_abs(dev, db9_abs[5], data[j + 6]); - /* fall through */ + fallthrough; case 0x15: /* mission stick (analog 3 axis) */ input_report_abs(dev, db9_abs[3], data[j + 4]); input_report_abs(dev, db9_abs[4], data[j + 5]); - /* fall through */ + fallthrough; case 0x13: /* racing controller (analog 1 axis) */ input_report_abs(dev, db9_abs[2], data[j + 3]); - /* fall through */ + fallthrough; case 0x34: /* saturn keyboard (udlr ZXC ASD QE Esc) */ case 0x02: /* digital pad (digital 2 axis + buttons) */ input_report_abs(dev, db9_abs[0], !(data[j + 1] & 128) - !(data[j + 1] & 64)); @@ -368,7 +368,7 @@ static void db9_timer(struct timer_list *t) input_report_abs(dev2, ABS_X, (data & DB9_RIGHT ? 0 : 1) - (data & DB9_LEFT ? 0 : 1)); input_report_abs(dev2, ABS_Y, (data & DB9_DOWN ? 0 : 1) - (data & DB9_UP ? 0 : 1)); input_report_key(dev2, BTN_TRIGGER, ~data & DB9_FIRE1); - /* fall through */ + fallthrough; case DB9_MULTI_0802: diff --git a/drivers/input/joystick/gamecon.c b/drivers/input/joystick/gamecon.c index e0a362be5812..88df68cc4ac6 100644 --- a/drivers/input/joystick/gamecon.c +++ b/drivers/input/joystick/gamecon.c @@ -485,7 +485,7 @@ static void gc_multi_process_packet(struct gc *gc) switch (pad->type) { case GC_MULTI2: input_report_key(dev, BTN_THUMB, s & data[5]); - /* fall through */ + fallthrough; case GC_MULTI: input_report_abs(dev, ABS_X, @@ -638,7 +638,7 @@ static void gc_psx_report_one(struct gc_pad *pad, unsigned char psx_type, input_report_key(dev, BTN_THUMBL, ~data[0] & 0x04); input_report_key(dev, BTN_THUMBR, ~data[0] & 0x02); - /* fall through */ + fallthrough; case GC_PSX_NEGCON: case GC_PSX_ANALOG: @@ -872,7 +872,8 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type) case GC_SNES: for (i = 4; i < 8; i++) input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]); - /* fall through */ + fallthrough; + case GC_NES: for (i = 0; i < 4; i++) input_set_capability(input_dev, EV_KEY, gc_snes_btn[i]); @@ -880,7 +881,8 @@ static int gc_setup_pad(struct gc *gc, int idx, int pad_type) case GC_MULTI2: input_set_capability(input_dev, EV_KEY, BTN_THUMB); - /* fall through */ + fallthrough; + case GC_MULTI: input_set_capability(input_dev, EV_KEY, BTN_TRIGGER); /* fall through */ diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c index 1777e68c9f02..fac91ea14f17 100644 --- a/drivers/input/joystick/sidewinder.c +++ b/drivers/input/joystick/sidewinder.c @@ -656,16 +656,19 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv) switch (i * m) { case 60: - sw->number++; /* fall through */ + sw->number++; + fallthrough; case 45: /* Ambiguous packet length */ if (j <= 40) { /* ID length less or eq 40 -> FSP */ case 43: sw->type = SW_ID_FSP; break; } - sw->number++; /* fall through */ + sw->number++; + fallthrough; case 30: - sw->number++; /* fall through */ + sw->number++; + fallthrough; case 15: sw->type = SW_ID_GP; break; @@ -681,9 +684,11 @@ static int sw_connect(struct gameport *gameport, struct gameport_driver *drv) sw->type = SW_ID_PP; break; case 66: - sw->bits = 3; /* fall through */ + sw->bits = 3; + fallthrough; case 198: - sw->length = 22; /* fall through */ + sw->length = 22; + fallthrough; case 64: sw->type = SW_ID_3DP; if (j == 160) diff --git a/drivers/input/joystick/spaceball.c b/drivers/input/joystick/spaceball.c index cf7cbcd0c29d..429411c6c0a8 100644 --- a/drivers/input/joystick/spaceball.c +++ b/drivers/input/joystick/spaceball.c @@ -146,7 +146,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio, break; } spaceball->escape = 0; - /* fall through */ + fallthrough; case 'M': case 'Q': case 'S': @@ -154,7 +154,7 @@ static irqreturn_t spaceball_interrupt(struct serio *serio, spaceball->escape = 0; data &= 0x1f; } - /* fall through */ + fallthrough; default: if (spaceball->escape) spaceball->escape = 0; @@ -220,13 +220,13 @@ static int spaceball_connect(struct serio *serio, struct serio_driver *drv) input_dev->keybit[BIT_WORD(BTN_A)] |= BIT_MASK(BTN_A) | BIT_MASK(BTN_B) | BIT_MASK(BTN_C) | BIT_MASK(BTN_MODE); - /* fall through */ + fallthrough; default: input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_2) | BIT_MASK(BTN_3) | BIT_MASK(BTN_4) | BIT_MASK(BTN_5) | BIT_MASK(BTN_6) | BIT_MASK(BTN_7) | BIT_MASK(BTN_8); - /* fall through */ + fallthrough; case SPACEBALL_3003C: input_dev->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_1) | BIT_MASK(BTN_8); diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c index e7d58e7f0257..eb0e9cd66bcb 100644 --- a/drivers/input/keyboard/adp5589-keys.c +++ b/drivers/input/keyboard/adp5589-keys.c @@ -1016,7 +1016,7 @@ static int adp5589_probe(struct i2c_client *client, switch (id->driver_data) { case ADP5585_02: kpad->support_row5 = true; - /* fall through */ + fallthrough; case ADP5585_01: kpad->is_adp5585 = true; kpad->var = &const_adp5585; diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c index 6ec28265771d..edc613efc158 100644 --- a/drivers/input/keyboard/atkbd.c +++ b/drivers/input/keyboard/atkbd.c @@ -1241,7 +1241,7 @@ static int atkbd_connect(struct serio *serio, struct serio_driver *drv) case SERIO_8042_XL: atkbd->translated = true; - /* Fall through */ + fallthrough; case SERIO_8042: if (serio->write) diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 53c9ff338dea..f2d4e4daa818 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -574,7 +574,6 @@ static int gpio_keys_setup_key(struct platform_device *pdev, IRQ_TYPE_EDGE_RISING : IRQ_TYPE_EDGE_FALLING; break; case EV_ACT_ANY: - /* fall through */ default: /* * For other cases, we are OK letting suspend/resume diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c index 305f0160506a..8a36d78fed63 100644 --- a/drivers/input/misc/ati_remote2.c +++ b/drivers/input/misc/ati_remote2.c @@ -68,7 +68,7 @@ static int ati_remote2_get_channel_mask(char *buffer, { pr_debug("%s()\n", __func__); - return sprintf(buffer, "0x%04x", *(unsigned int *)kp->arg); + return sprintf(buffer, "0x%04x\n", *(unsigned int *)kp->arg); } static int ati_remote2_set_mode_mask(const char *val, @@ -84,7 +84,7 @@ static int ati_remote2_get_mode_mask(char *buffer, { pr_debug("%s()\n", __func__); - return sprintf(buffer, "0x%02x", *(unsigned int *)kp->arg); + return sprintf(buffer, "0x%02x\n", *(unsigned int *)kp->arg); } static unsigned int channel_mask = ATI_REMOTE2_MAX_CHANNEL_MASK; diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c index c09b9628ad34..e413801f0491 100644 --- a/drivers/input/misc/cm109.c +++ b/drivers/input/misc/cm109.c @@ -663,12 +663,8 @@ static const struct usb_device_id cm109_usb_table[] = { static void cm109_usb_cleanup(struct cm109_dev *dev) { kfree(dev->ctl_req); - if (dev->ctl_data) - usb_free_coherent(dev->udev, USB_PKT_LEN, - dev->ctl_data, dev->ctl_dma); - if (dev->irq_data) - usb_free_coherent(dev->udev, USB_PKT_LEN, - dev->irq_data, dev->irq_dma); + usb_free_coherent(dev->udev, USB_PKT_LEN, dev->ctl_data, dev->ctl_dma); + usb_free_coherent(dev->udev, USB_PKT_LEN, dev->irq_data, dev->irq_dma); usb_free_urb(dev->urb_irq); /* parameter validation in core/urb */ usb_free_urb(dev->urb_ctl); /* parameter validation in core/urb */ diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index d8dbfc030d0f..08b9b5cdb943 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c @@ -335,7 +335,7 @@ static int ims_pcu_setup_gamepad(struct ims_pcu *pcu) err_free_mem: input_free_device(input); kfree(gamepad); - return -ENOMEM; + return error; } static void ims_pcu_destroy_gamepad(struct ims_pcu *pcu) diff --git a/drivers/input/misc/iqs269a.c b/drivers/input/misc/iqs269a.c index 6699eb160a0f..a348247d3d38 100644 --- a/drivers/input/misc/iqs269a.c +++ b/drivers/input/misc/iqs269a.c @@ -575,8 +575,7 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, case IQS269_LOCAL_CAP_SIZE_GLOBAL_0pF5: engine_a |= IQS269_CHx_ENG_A_LOCAL_CAP_SIZE; - - /* fall through */ + fallthrough; case IQS269_LOCAL_CAP_SIZE_GLOBAL_ONLY: engine_b |= IQS269_CHx_ENG_B_LOCAL_CAP_ENABLE; @@ -731,14 +730,12 @@ static int iqs269_parse_chan(struct iqs269_private *iqs269, iqs269->switches[i].code = val; iqs269->switches[i].enabled = true; } - - /* fall through */ + fallthrough; case IQS269_CHx_HALL_INACTIVE: if (iqs269->hall_enable) break; - - /* fall through */ + fallthrough; default: iqs269->keycode[i * IQS269_NUM_CH + reg] = val; @@ -1143,14 +1140,12 @@ static int iqs269_input_init(struct iqs269_private *iqs269) sw_code, state & BIT(j)); } - - /* fall through */ + fallthrough; case IQS269_CHx_HALL_INACTIVE: if (iqs269->hall_enable) continue; - - /* fall through */ + fallthrough; default: if (keycode != KEY_RESERVED) @@ -1273,14 +1268,12 @@ static int iqs269_report(struct iqs269_private *iqs269) input_report_switch(iqs269->keypad, sw_code, state & BIT(j)); - - /* fall through */ + fallthrough; case IQS269_CHx_HALL_INACTIVE: if (iqs269->hall_enable) continue; - - /* fall through */ + fallthrough; default: input_report_key(iqs269->keypad, keycode, diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c index 8ceaf7db2882..81e777a04b88 100644 --- a/drivers/input/misc/pwm-vibra.c +++ b/drivers/input/misc/pwm-vibra.c @@ -190,7 +190,7 @@ static int pwm_vibrator_probe(struct platform_device *pdev) default: dev_err(&pdev->dev, "Failed to request direction pwm: %d", err); - /* Fall through */ + fallthrough; case -EPROBE_DEFER: return err; diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index a1bba722b234..4ff5cd2a6d8d 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -124,7 +124,7 @@ static void xenkbd_handle_mt_event(struct xenkbd_info *info, switch (mtouch->event_type) { case XENKBD_MT_EV_DOWN: input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, true); - /* fall through */ + fallthrough; case XENKBD_MT_EV_MOTION: input_report_abs(info->mtouch, ABS_MT_POSITION_X, @@ -524,7 +524,7 @@ static void xenkbd_backend_changed(struct xenbus_device *dev, case XenbusStateClosed: if (dev->state == XenbusStateClosed) break; - /* fall through - Missed the backend's CLOSING state */ + fallthrough; /* Missed the backend's CLOSING state */ case XenbusStateClosing: xenbus_frontend_closed(dev); break; diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 34700eda0429..b067bfd2699c 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1929,7 +1929,7 @@ static int alps_monitor_mode(struct psmouse *psmouse, bool enable) static int alps_absolute_mode_v6(struct psmouse *psmouse) { u16 reg_val = 0x181; - int ret = -1; + int ret; /* enter monitor mode, to write the register */ if (alps_monitor_mode(psmouse, true)) diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c index 3f06e8a495d8..bfa26651c0be 100644 --- a/drivers/input/mouse/appletouch.c +++ b/drivers/input/mouse/appletouch.c @@ -458,7 +458,7 @@ static int atp_status_check(struct urb *urb) dev->info->datalen, dev->urb->actual_length); dev->overflow_warned = true; } - /* fall through */ + fallthrough; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 00e395dfc3d5..a0361f9325f8 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c @@ -1067,7 +1067,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa) return error; } - /* Fall through */ + fallthrough; case CYAPA_STATE_BL_IDLE: /* Try to get firmware version in bootloader mode. */ cyapa_gen3_bl_query_data(cyapa); @@ -1078,7 +1078,7 @@ static int cyapa_gen3_do_operational_check(struct cyapa *cyapa) return error; } - /* Fall through */ + fallthrough; case CYAPA_STATE_OP: /* * Reading query data before going back to the full mode diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index 7f012bfa2658..bb3a63d1268d 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c @@ -2554,7 +2554,7 @@ static int cyapa_gen5_do_operational_check(struct cyapa *cyapa) } cyapa->state = CYAPA_STATE_GEN5_APP; - /* fall through */ + fallthrough; case CYAPA_STATE_GEN5_APP: /* diff --git a/drivers/input/mouse/cyapa_gen6.c b/drivers/input/mouse/cyapa_gen6.c index c1b524ab4623..7eba66fbef58 100644 --- a/drivers/input/mouse/cyapa_gen6.c +++ b/drivers/input/mouse/cyapa_gen6.c @@ -680,7 +680,7 @@ static int cyapa_gen6_operational_check(struct cyapa *cyapa) } cyapa->state = CYAPA_STATE_GEN6_APP; - /* fall through */ + fallthrough; case CYAPA_STATE_GEN6_APP: /* diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h index a9074ac9364f..c75b00c45d75 100644 --- a/drivers/input/mouse/elan_i2c.h +++ b/drivers/input/mouse/elan_i2c.h @@ -26,6 +26,8 @@ #define ETP_CALIBRATE_MAX_LEN 3 +#define ETP_FEATURE_REPORT_MK BIT(0) + /* IAP Firmware handling */ #define ETP_PRODUCT_ID_FORMAT_STRING "%d.0" #define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin" @@ -33,6 +35,8 @@ #define ETP_FW_IAP_PAGE_ERR (1 << 5) #define ETP_FW_IAP_INTF_ERR (1 << 4) #define ETP_FW_PAGE_SIZE 64 +#define ETP_FW_PAGE_SIZE_128 128 +#define ETP_FW_PAGE_SIZE_512 512 #define ETP_FW_SIGNATURE_SIZE 6 struct i2c_client; @@ -55,8 +59,9 @@ struct elan_transport_ops { int (*get_baseline_data)(struct i2c_client *client, bool max_baseliune, u8 *value); - int (*get_version)(struct i2c_client *client, bool iap, u8 *version); - int (*get_sm_version)(struct i2c_client *client, + int (*get_version)(struct i2c_client *client, u8 pattern, bool iap, + u8 *version); + int (*get_sm_version)(struct i2c_client *client, u8 pattern, u16 *ic_type, u8 *version, u8 *clickpad); int (*get_checksum)(struct i2c_client *client, bool iap, u16 *csum); int (*get_product_id)(struct i2c_client *client, u16 *id); @@ -72,13 +77,18 @@ struct elan_transport_ops { int (*iap_get_mode)(struct i2c_client *client, enum tp_mode *mode); int (*iap_reset)(struct i2c_client *client); - int (*prepare_fw_update)(struct i2c_client *client); - int (*write_fw_block)(struct i2c_client *client, + int (*prepare_fw_update)(struct i2c_client *client, u16 ic_type, + u8 iap_version); + int (*write_fw_block)(struct i2c_client *client, u16 fw_page_size, const u8 *page, u16 checksum, int idx); int (*finish_fw_update)(struct i2c_client *client, struct completion *reset_done); - int (*get_report)(struct i2c_client *client, u8 *report); + int (*get_report_features)(struct i2c_client *client, u8 pattern, + unsigned int *features, + unsigned int *report_len); + int (*get_report)(struct i2c_client *client, u8 *report, + unsigned int report_len); int (*get_pressure_adjustment)(struct i2c_client *client, int *adjustment); int (*get_pattern)(struct i2c_client *client, u8 *pattern); diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 6291fb5fa015..c599e21a8478 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c @@ -50,12 +50,14 @@ #define ETP_MAX_FINGERS 5 #define ETP_FINGER_DATA_LEN 5 #define ETP_REPORT_ID 0x5D +#define ETP_REPORT_ID2 0x60 /* High precision report */ #define ETP_TP_REPORT_ID 0x5E #define ETP_REPORT_ID_OFFSET 2 #define ETP_TOUCH_INFO_OFFSET 3 #define ETP_FINGER_DATA_OFFSET 4 #define ETP_HOVER_INFO_OFFSET 30 -#define ETP_MAX_REPORT_LEN 34 +#define ETP_MK_DATA_OFFSET 33 /* For high precision reports */ +#define ETP_MAX_REPORT_LEN 39 /* The main device structure */ struct elan_tp_data { @@ -85,11 +87,14 @@ struct elan_tp_data { u8 sm_version; u8 iap_version; u16 fw_checksum; + unsigned int report_features; + unsigned int report_len; int pressure_adjustment; u8 mode; u16 ic_type; u16 fw_validpage_count; - u16 fw_signature_address; + u16 fw_page_size; + u32 fw_signature_address; bool irq_wake; @@ -100,8 +105,8 @@ struct elan_tp_data { bool middle_button; }; -static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count, - u16 *signature_address) +static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count, + u32 *signature_address, u16 *page_size) { switch (ic_type) { case 0x00: @@ -126,16 +131,37 @@ static int elan_get_fwinfo(u16 ic_type, u16 *validpage_count, case 0x10: *validpage_count = 1024; break; + case 0x11: + *validpage_count = 1280; + break; + case 0x13: + *validpage_count = 2048; + break; + case 0x14: + case 0x15: + *validpage_count = 1024; + break; default: /* unknown ic type clear value */ *validpage_count = 0; *signature_address = 0; + *page_size = 0; return -ENXIO; } *signature_address = (*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE; + if ((ic_type == 0x14 || ic_type == 0x15) && iap_version >= 2) { + *validpage_count /= 8; + *page_size = ETP_FW_PAGE_SIZE_512; + } else if (ic_type >= 0x0D && iap_version >= 1) { + *validpage_count /= 2; + *page_size = ETP_FW_PAGE_SIZE_128; + } else { + *page_size = ETP_FW_PAGE_SIZE; + } + return 0; } @@ -215,8 +241,13 @@ static int elan_query_product(struct elan_tp_data *data) if (error) return error; - error = data->ops->get_sm_version(data->client, &data->ic_type, - &data->sm_version, &data->clickpad); + error = data->ops->get_pattern(data->client, &data->pattern); + if (error) + return error; + + error = data->ops->get_sm_version(data->client, data->pattern, + &data->ic_type, &data->sm_version, + &data->clickpad); if (error) return error; @@ -312,9 +343,9 @@ static int elan_initialize(struct elan_tp_data *data) static int elan_query_device_info(struct elan_tp_data *data) { int error; - u16 ic_type; - error = data->ops->get_version(data->client, false, &data->fw_version); + error = data->ops->get_version(data->client, data->pattern, false, + &data->fw_version); if (error) return error; @@ -323,7 +354,8 @@ static int elan_query_device_info(struct elan_tp_data *data) if (error) return error; - error = data->ops->get_version(data->client, true, &data->iap_version); + error = data->ops->get_version(data->client, data->pattern, + true, &data->iap_version); if (error) return error; @@ -332,17 +364,16 @@ static int elan_query_device_info(struct elan_tp_data *data) if (error) return error; - error = data->ops->get_pattern(data->client, &data->pattern); + error = data->ops->get_report_features(data->client, data->pattern, + &data->report_features, + &data->report_len); if (error) return error; - if (data->pattern == 0x01) - ic_type = data->ic_type; - else - ic_type = data->iap_version; - - error = elan_get_fwinfo(ic_type, &data->fw_validpage_count, - &data->fw_signature_address); + error = elan_get_fwinfo(data->ic_type, data->iap_version, + &data->fw_validpage_count, + &data->fw_signature_address, + &data->fw_page_size); if (error) dev_warn(&data->client->dev, "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", @@ -351,16 +382,21 @@ static int elan_query_device_info(struct elan_tp_data *data) return 0; } -static unsigned int elan_convert_resolution(u8 val) +static unsigned int elan_convert_resolution(u8 val, u8 pattern) { /* - * (value from firmware) * 10 + 790 = dpi - * + * pattern <= 0x01: + * (value from firmware) * 10 + 790 = dpi + * else + * ((value from firmware) + 3) * 100 = dpi + */ + int res = pattern <= 0x01 ? + (int)(char)val * 10 + 790 : ((int)(char)val + 3) * 100; + /* * We also have to convert dpi to dots/mm (*10/254 to avoid floating * point). */ - - return ((int)(char)val * 10 + 790) * 10 / 254; + return res * 10 / 254; } static int elan_query_device_parameters(struct elan_tp_data *data) @@ -409,8 +445,8 @@ static int elan_query_device_parameters(struct elan_tp_data *data) if (error) return error; - data->x_res = elan_convert_resolution(hw_x_res); - data->y_res = elan_convert_resolution(hw_y_res); + data->x_res = elan_convert_resolution(hw_x_res, data->pattern); + data->y_res = elan_convert_resolution(hw_y_res, data->pattern); } else { data->x_res = (data->max_x + 1) / x_mm; data->y_res = (data->max_y + 1) / y_mm; @@ -430,14 +466,14 @@ static int elan_query_device_parameters(struct elan_tp_data *data) * IAP firmware updater related routines ********************************************************** */ -static int elan_write_fw_block(struct elan_tp_data *data, +static int elan_write_fw_block(struct elan_tp_data *data, u16 page_size, const u8 *page, u16 checksum, int idx) { int retry = ETP_RETRY_COUNT; int error; do { - error = data->ops->write_fw_block(data->client, + error = data->ops->write_fw_block(data->client, page_size, page, checksum, idx); if (!error) return 0; @@ -460,21 +496,23 @@ static int __elan_update_firmware(struct elan_tp_data *data, u16 boot_page_count; u16 sw_checksum = 0, fw_checksum = 0; - error = data->ops->prepare_fw_update(client); + error = data->ops->prepare_fw_update(client, data->ic_type, + data->iap_version); if (error) return error; iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]); - boot_page_count = (iap_start_addr * 2) / ETP_FW_PAGE_SIZE; + boot_page_count = (iap_start_addr * 2) / data->fw_page_size; for (i = boot_page_count; i < data->fw_validpage_count; i++) { u16 checksum = 0; - const u8 *page = &fw->data[i * ETP_FW_PAGE_SIZE]; + const u8 *page = &fw->data[i * data->fw_page_size]; - for (j = 0; j < ETP_FW_PAGE_SIZE; j += 2) + for (j = 0; j < data->fw_page_size; j += 2) checksum += ((page[j + 1] << 8) | page[j]); - error = elan_write_fw_block(data, page, checksum, i); + error = elan_write_fw_block(data, data->fw_page_size, + page, checksum, i); if (error) { dev_err(dev, "write page %d fail: %d\n", i, error); return error; @@ -886,24 +924,22 @@ static const struct attribute_group *elan_sysfs_groups[] = { * Elan isr functions ****************************************************************** */ -static void elan_report_contact(struct elan_tp_data *data, - int contact_num, bool contact_valid, - u8 *finger_data) +static void elan_report_contact(struct elan_tp_data *data, int contact_num, + bool contact_valid, bool high_precision, + u8 *packet, u8 *finger_data) { struct input_dev *input = data->input; unsigned int pos_x, pos_y; - unsigned int pressure, mk_x, mk_y; - unsigned int area_x, area_y, major, minor; - unsigned int scaled_pressure; + unsigned int pressure, scaled_pressure; if (contact_valid) { - pos_x = ((finger_data[0] & 0xf0) << 4) | - finger_data[1]; - pos_y = ((finger_data[0] & 0x0f) << 8) | - finger_data[2]; - mk_x = (finger_data[3] & 0x0f); - mk_y = (finger_data[3] >> 4); - pressure = finger_data[4]; + if (high_precision) { + pos_x = get_unaligned_be16(&finger_data[0]); + pos_y = get_unaligned_be16(&finger_data[2]); + } else { + pos_x = ((finger_data[0] & 0xf0) << 4) | finger_data[1]; + pos_y = ((finger_data[0] & 0x0f) << 8) | finger_data[2]; + } if (pos_x > data->max_x || pos_y > data->max_y) { dev_dbg(input->dev.parent, @@ -913,18 +949,8 @@ static void elan_report_contact(struct elan_tp_data *data, return; } - /* - * To avoid treating large finger as palm, let's reduce the - * width x and y per trace. - */ - area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE); - area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE); - - major = max(area_x, area_y); - minor = min(area_x, area_y); - + pressure = finger_data[4]; scaled_pressure = pressure + data->pressure_adjustment; - if (scaled_pressure > ETP_MAX_PRESSURE) scaled_pressure = ETP_MAX_PRESSURE; @@ -933,16 +959,37 @@ static void elan_report_contact(struct elan_tp_data *data, input_report_abs(input, ABS_MT_POSITION_X, pos_x); input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y); input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure); - input_report_abs(input, ABS_TOOL_WIDTH, mk_x); - input_report_abs(input, ABS_MT_TOUCH_MAJOR, major); - input_report_abs(input, ABS_MT_TOUCH_MINOR, minor); + + if (data->report_features & ETP_FEATURE_REPORT_MK) { + unsigned int mk_x, mk_y, area_x, area_y; + u8 mk_data = high_precision ? + packet[ETP_MK_DATA_OFFSET + contact_num] : + finger_data[3]; + + mk_x = mk_data & 0x0f; + mk_y = mk_data >> 4; + + /* + * To avoid treating large finger as palm, let's reduce + * the width x and y per trace. + */ + area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE); + area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE); + + input_report_abs(input, ABS_TOOL_WIDTH, mk_x); + input_report_abs(input, ABS_MT_TOUCH_MAJOR, + max(area_x, area_y)); + input_report_abs(input, ABS_MT_TOUCH_MINOR, + min(area_x, area_y)); + } } else { input_mt_slot(input, contact_num); input_mt_report_slot_inactive(input); } } -static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) +static void elan_report_absolute(struct elan_tp_data *data, u8 *packet, + bool high_precision) { struct input_dev *input = data->input; u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET]; @@ -953,11 +1000,12 @@ static void elan_report_absolute(struct elan_tp_data *data, u8 *packet) pm_wakeup_event(&data->client->dev, 0); - hover_event = hover_info & 0x40; - for (i = 0; i < ETP_MAX_FINGERS; i++) { - contact_valid = tp_info & (1U << (3 + i)); - elan_report_contact(data, i, contact_valid, finger_data); + hover_event = hover_info & BIT(6); + for (i = 0; i < ETP_MAX_FINGERS; i++) { + contact_valid = tp_info & BIT(3 + i); + elan_report_contact(data, i, contact_valid, high_precision, + packet, finger_data); if (contact_valid) finger_data += ETP_FINGER_DATA_LEN; } @@ -1015,13 +1063,16 @@ static irqreturn_t elan_isr(int irq, void *dev_id) goto out; } - error = data->ops->get_report(data->client, report); + error = data->ops->get_report(data->client, report, data->report_len); if (error) goto out; switch (report[ETP_REPORT_ID_OFFSET]) { case ETP_REPORT_ID: - elan_report_absolute(data, report); + elan_report_absolute(data, report, false); + break; + case ETP_REPORT_ID2: + elan_report_absolute(data, report, true); break; case ETP_TP_REPORT_ID: elan_report_trackpoint(data, report); @@ -1112,7 +1163,9 @@ static int elan_setup_input_device(struct elan_tp_data *data) input_abs_set_res(input, ABS_X, data->x_res); input_abs_set_res(input, ABS_Y, data->y_res); input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0); - input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0); + if (data->report_features & ETP_FEATURE_REPORT_MK) + input_set_abs_params(input, ABS_TOOL_WIDTH, + 0, ETP_FINGER_WIDTH, 0, 0); input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0); /* And MT parameters */ @@ -1122,10 +1175,12 @@ static int elan_setup_input_device(struct elan_tp_data *data) input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res); input_set_abs_params(input, ABS_MT_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0); - input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, - ETP_FINGER_WIDTH * max_width, 0, 0); - input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, - ETP_FINGER_WIDTH * min_width, 0, 0); + if (data->report_features & ETP_FEATURE_REPORT_MK) { + input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, + 0, ETP_FINGER_WIDTH * max_width, 0, 0); + input_set_abs_params(input, ABS_MT_TOUCH_MINOR, + 0, ETP_FINGER_WIDTH * min_width, 0, 0); + } data->input = input; diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index 058b35b1f9a9..5a496d4ffa49 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c @@ -19,6 +19,7 @@ #include <linux/interrupt.h> #include <linux/jiffies.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/sched.h> #include <asm/unaligned.h> @@ -43,6 +44,8 @@ #define ETP_I2C_RESOLUTION_CMD 0x0108 #define ETP_I2C_PRESSURE_CMD 0x010A #define ETP_I2C_IAP_VERSION_CMD 0x0110 +#define ETP_I2C_IC_TYPE_P0_CMD 0x0110 +#define ETP_I2C_IAP_VERSION_P0_CMD 0x0111 #define ETP_I2C_SET_CMD 0x0300 #define ETP_I2C_POWER_CMD 0x0307 #define ETP_I2C_FW_CHECKSUM_CMD 0x030F @@ -53,8 +56,12 @@ #define ETP_I2C_CALIBRATE_CMD 0x0316 #define ETP_I2C_MAX_BASELINE_CMD 0x0317 #define ETP_I2C_MIN_BASELINE_CMD 0x0318 +#define ETP_I2C_IAP_TYPE_REG 0x0040 +#define ETP_I2C_IAP_TYPE_CMD 0x0304 #define ETP_I2C_REPORT_LEN 34 +#define ETP_I2C_REPORT_LEN_ID2 39 +#define ETP_I2C_REPORT_MAX_LEN 39 #define ETP_I2C_DESC_LENGTH 30 #define ETP_I2C_REPORT_DESC_LENGTH 158 #define ETP_I2C_INF_LENGTH 2 @@ -249,56 +256,52 @@ static int elan_i2c_get_pattern(struct i2c_client *client, u8 *pattern) dev_err(&client->dev, "failed to get pattern: %d\n", error); return error; } - *pattern = val[1]; + + /* + * Not all versions of firmware implement "get pattern" command. + * When this command is not implemented the device will respond + * with 0xFF 0xFF, which we will treat as "old" pattern 0. + */ + *pattern = val[0] == 0xFF && val[1] == 0xFF ? 0 : val[1]; return 0; } static int elan_i2c_get_version(struct i2c_client *client, - bool iap, u8 *version) + u8 pattern, bool iap, u8 *version) { int error; - u8 pattern_ver; + u16 cmd; u8 val[3]; - error = elan_i2c_get_pattern(client, &pattern_ver); - if (error) { - dev_err(&client->dev, "failed to get pattern version\n"); - return error; - } + if (!iap) + cmd = ETP_I2C_FW_VERSION_CMD; + else if (pattern == 0) + cmd = ETP_I2C_IAP_VERSION_P0_CMD; + else + cmd = ETP_I2C_IAP_VERSION_CMD; - error = elan_i2c_read_cmd(client, - iap ? ETP_I2C_IAP_VERSION_CMD : - ETP_I2C_FW_VERSION_CMD, - val); + error = elan_i2c_read_cmd(client, cmd, val); if (error) { dev_err(&client->dev, "failed to get %s version: %d\n", iap ? "IAP" : "FW", error); return error; } - if (pattern_ver == 0x01) + if (pattern >= 0x01) *version = iap ? val[1] : val[0]; else *version = val[0]; return 0; } -static int elan_i2c_get_sm_version(struct i2c_client *client, - u16 *ic_type, u8 *version, - u8 *clickpad) +static int elan_i2c_get_sm_version(struct i2c_client *client, u8 pattern, + u16 *ic_type, u8 *version, u8 *clickpad) { int error; - u8 pattern_ver; u8 val[3]; - error = elan_i2c_get_pattern(client, &pattern_ver); - if (error) { - dev_err(&client->dev, "failed to get pattern version\n"); - return error; - } - - if (pattern_ver == 0x01) { + if (pattern >= 0x01) { error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_CMD, val); if (error) { dev_err(&client->dev, "failed to get ic type: %d\n", @@ -324,7 +327,14 @@ static int elan_i2c_get_sm_version(struct i2c_client *client, return error; } *version = val[0]; - *ic_type = val[1]; + + error = elan_i2c_read_cmd(client, ETP_I2C_IC_TYPE_P0_CMD, val); + if (error) { + dev_err(&client->dev, "failed to get ic type: %d\n", + error); + return error; + } + *ic_type = val[0]; error = elan_i2c_read_cmd(client, ETP_I2C_NSM_VERSION_CMD, val); @@ -386,7 +396,7 @@ static int elan_i2c_get_max(struct i2c_client *client, return error; } - *max_x = le16_to_cpup((__le16 *)val) & 0x0fff; + *max_x = le16_to_cpup((__le16 *)val); error = elan_i2c_read_cmd(client, ETP_I2C_MAX_Y_AXIS_CMD, val); if (error) { @@ -394,7 +404,7 @@ static int elan_i2c_get_max(struct i2c_client *client, return error; } - *max_y = le16_to_cpup((__le16 *)val) & 0x0fff; + *max_y = le16_to_cpup((__le16 *)val); return 0; } @@ -507,7 +517,43 @@ static int elan_i2c_set_flash_key(struct i2c_client *client) return 0; } -static int elan_i2c_prepare_fw_update(struct i2c_client *client) +static int elan_read_write_iap_type(struct i2c_client *client) +{ + int error; + u16 constant; + u8 val[3]; + int retry = 3; + + do { + error = elan_i2c_write_cmd(client, ETP_I2C_IAP_TYPE_CMD, + ETP_I2C_IAP_TYPE_REG); + if (error) { + dev_err(&client->dev, + "cannot write iap type: %d\n", error); + return error; + } + + error = elan_i2c_read_cmd(client, ETP_I2C_IAP_TYPE_CMD, val); + if (error) { + dev_err(&client->dev, + "failed to read iap type register: %d\n", + error); + return error; + } + constant = le16_to_cpup((__le16 *)val); + dev_dbg(&client->dev, "iap type reg: 0x%04x\n", constant); + + if (constant == ETP_I2C_IAP_TYPE_REG) + return 0; + + } while (--retry > 0); + + dev_err(&client->dev, "cannot set iap type\n"); + return -EIO; +} + +static int elan_i2c_prepare_fw_update(struct i2c_client *client, u16 ic_type, + u8 iap_version) { struct device *dev = &client->dev; int error; @@ -547,6 +593,12 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client) return -EIO; } + if (ic_type >= 0x0D && iap_version >= 1) { + error = elan_read_write_iap_type(client); + if (error) + return error; + } + /* Set flash key again */ error = elan_i2c_set_flash_key(client); if (error) @@ -572,57 +624,64 @@ static int elan_i2c_prepare_fw_update(struct i2c_client *client) return 0; } -static int elan_i2c_write_fw_block(struct i2c_client *client, +static int elan_i2c_write_fw_block(struct i2c_client *client, u16 fw_page_size, const u8 *page, u16 checksum, int idx) { struct device *dev = &client->dev; - u8 page_store[ETP_FW_PAGE_SIZE + 4]; + u8 *page_store; u8 val[3]; u16 result; int ret, error; + page_store = kmalloc(fw_page_size + 4, GFP_KERNEL); + if (!page_store) + return -ENOMEM; + page_store[0] = ETP_I2C_IAP_REG_L; page_store[1] = ETP_I2C_IAP_REG_H; - memcpy(&page_store[2], page, ETP_FW_PAGE_SIZE); + memcpy(&page_store[2], page, fw_page_size); /* recode checksum at last two bytes */ - put_unaligned_le16(checksum, &page_store[ETP_FW_PAGE_SIZE + 2]); + put_unaligned_le16(checksum, &page_store[fw_page_size + 2]); - ret = i2c_master_send(client, page_store, sizeof(page_store)); - if (ret != sizeof(page_store)) { + ret = i2c_master_send(client, page_store, fw_page_size + 4); + if (ret != fw_page_size + 4) { error = ret < 0 ? ret : -EIO; dev_err(dev, "Failed to write page %d: %d\n", idx, error); - return error; + goto exit; } /* Wait for F/W to update one page ROM data. */ - msleep(35); + msleep(fw_page_size == ETP_FW_PAGE_SIZE_512 ? 50 : 35); error = elan_i2c_read_cmd(client, ETP_I2C_IAP_CTRL_CMD, val); if (error) { dev_err(dev, "Failed to read IAP write result: %d\n", error); - return error; + goto exit; } result = le16_to_cpup((__le16 *)val); if (result & (ETP_FW_IAP_PAGE_ERR | ETP_FW_IAP_INTF_ERR)) { dev_err(dev, "IAP reports failed write: %04hx\n", result); - return -EIO; + error = -EIO; + goto exit; } - return 0; +exit: + kfree(page_store); + return error; } static int elan_i2c_finish_fw_update(struct i2c_client *client, struct completion *completion) { struct device *dev = &client->dev; - int error; + int error = 0; int len; - u8 buffer[ETP_I2C_REPORT_LEN]; + u8 buffer[ETP_I2C_REPORT_MAX_LEN]; - len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN); - if (len != ETP_I2C_REPORT_LEN) { + len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_MAX_LEN); + if (len <= 0) { error = len < 0 ? len : -EIO; dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n", error, len); @@ -656,20 +715,31 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client, return 0; } -static int elan_i2c_get_report(struct i2c_client *client, u8 *report) +static int elan_i2c_get_report_features(struct i2c_client *client, u8 pattern, + unsigned int *features, + unsigned int *report_len) +{ + *features = ETP_FEATURE_REPORT_MK; + *report_len = pattern <= 0x01 ? + ETP_I2C_REPORT_LEN : ETP_I2C_REPORT_LEN_ID2; + return 0; +} + +static int elan_i2c_get_report(struct i2c_client *client, + u8 *report, unsigned int report_len) { int len; - len = i2c_master_recv(client, report, ETP_I2C_REPORT_LEN); + len = i2c_master_recv(client, report, report_len); if (len < 0) { dev_err(&client->dev, "failed to read report data: %d\n", len); return len; } - if (len != ETP_I2C_REPORT_LEN) { + if (len != report_len) { dev_err(&client->dev, "wrong report length (%d vs %d expected)\n", - len, ETP_I2C_REPORT_LEN); + len, report_len); return -EIO; } @@ -706,5 +776,6 @@ const struct elan_transport_ops elan_i2c_ops = { .get_pattern = elan_i2c_get_pattern, + .get_report_features = elan_i2c_get_report_features, .get_report = elan_i2c_get_report, }; diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c index 8c3185d54c73..8ff823751f3b 100644 --- a/drivers/input/mouse/elan_i2c_smbus.c +++ b/drivers/input/mouse/elan_i2c_smbus.c @@ -147,7 +147,7 @@ static int elan_smbus_get_baseline_data(struct i2c_client *client, } static int elan_smbus_get_version(struct i2c_client *client, - bool iap, u8 *version) + u8 pattern, bool iap, u8 *version) { int error; u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; @@ -166,9 +166,8 @@ static int elan_smbus_get_version(struct i2c_client *client, return 0; } -static int elan_smbus_get_sm_version(struct i2c_client *client, - u16 *ic_type, u8 *version, - u8 *clickpad) +static int elan_smbus_get_sm_version(struct i2c_client *client, u8 pattern, + u16 *ic_type, u8 *version, u8 *clickpad) { int error; u8 val[I2C_SMBUS_BLOCK_MAX] = {0}; @@ -340,7 +339,8 @@ static int elan_smbus_set_flash_key(struct i2c_client *client) return 0; } -static int elan_smbus_prepare_fw_update(struct i2c_client *client) +static int elan_smbus_prepare_fw_update(struct i2c_client *client, u16 ic_type, + u8 iap_version) { struct device *dev = &client->dev; int len; @@ -414,7 +414,7 @@ static int elan_smbus_prepare_fw_update(struct i2c_client *client) } -static int elan_smbus_write_fw_block(struct i2c_client *client, +static int elan_smbus_write_fw_block(struct i2c_client *client, u16 fw_page_size, const u8 *page, u16 checksum, int idx) { struct device *dev = &client->dev; @@ -429,7 +429,7 @@ static int elan_smbus_write_fw_block(struct i2c_client *client, */ error = i2c_smbus_write_block_data(client, ETP_SMBUS_WRITE_FW_BLOCK, - ETP_FW_PAGE_SIZE / 2, + fw_page_size / 2, page); if (error) { dev_err(dev, "Failed to write page %d (part %d): %d\n", @@ -439,8 +439,8 @@ static int elan_smbus_write_fw_block(struct i2c_client *client, error = i2c_smbus_write_block_data(client, ETP_SMBUS_WRITE_FW_BLOCK, - ETP_FW_PAGE_SIZE / 2, - page + ETP_FW_PAGE_SIZE / 2); + fw_page_size / 2, + page + fw_page_size / 2); if (error) { dev_err(dev, "Failed to write page %d (part %d): %d\n", idx, 2, error); @@ -469,7 +469,21 @@ static int elan_smbus_write_fw_block(struct i2c_client *client, return 0; } -static int elan_smbus_get_report(struct i2c_client *client, u8 *report) +static int elan_smbus_get_report_features(struct i2c_client *client, u8 pattern, + unsigned int *features, + unsigned int *report_len) +{ + /* + * SMBus controllers with pattern 2 lack area info, as newer + * high-precision packets use that space for coordinates. + */ + *features = pattern <= 0x01 ? ETP_FEATURE_REPORT_MK : 0; + *report_len = ETP_SMBUS_REPORT_LEN; + return 0; +} + +static int elan_smbus_get_report(struct i2c_client *client, + u8 *report, unsigned int report_len) { int len; @@ -534,6 +548,7 @@ const struct elan_transport_ops elan_smbus_ops = { .write_fw_block = elan_smbus_write_fw_block, .finish_fw_update = elan_smbus_finish_fw_update, + .get_report_features = elan_smbus_get_report_features, .get_report = elan_smbus_get_report, .get_pattern = elan_smbus_get_pattern, }; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 2d8434b7b623..90f8765f9efc 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -383,7 +383,7 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse) */ if (packet[3] & 0x80) fingers = 4; - /* fall through */ + fallthrough; case 1: /* * byte 1: . . . . x11 x10 x9 x8 @@ -1146,7 +1146,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) case 2: __set_bit(BTN_TOOL_QUADTAP, dev->keybit); __set_bit(INPUT_PROP_SEMI_MT, dev->propbit); - /* fall through */ + fallthrough; case 3: if (info->hw_version == 3) elantech_set_buttonpad_prop(psmouse); @@ -1877,12 +1877,10 @@ static bool elantech_use_host_notify(struct psmouse *psmouse, /* expected case */ break; case ETP_BUS_SMB_ALERT_ONLY: - /* fall-through */ case ETP_BUS_PS2_SMB_ALERT: psmouse_dbg(psmouse, "Ignoring SMBus provider through alert protocol.\n"); break; case ETP_BUS_SMB_HST_NTFY_ONLY: - /* fall-through */ case ETP_BUS_PS2_SMB_HST_NTFY: return true; default: @@ -1897,7 +1895,7 @@ static bool elantech_use_host_notify(struct psmouse *psmouse, int elantech_init_smbus(struct psmouse *psmouse) { struct elantech_device_info info; - int error = -EINVAL; + int error; psmouse_reset(psmouse); @@ -2015,7 +2013,7 @@ static int elantech_setup_ps2(struct psmouse *psmouse, int elantech_init_ps2(struct psmouse *psmouse) { struct elantech_device_info info; - int error = -EINVAL; + int error; psmouse_reset(psmouse); @@ -2036,7 +2034,7 @@ int elantech_init_ps2(struct psmouse *psmouse) int elantech_init(struct psmouse *psmouse) { struct elantech_device_info info; - int error = -EINVAL; + int error; psmouse_reset(psmouse); diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c index 72a083f3fc4a..4dc441309aac 100644 --- a/drivers/input/mouse/hgpk.c +++ b/drivers/input/mouse/hgpk.c @@ -238,7 +238,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse, /* we're not spewing, but this packet might be the start */ priv->spew_flag = MAYBE_SPEWING; - /* fall-through */ + fallthrough; case MAYBE_SPEWING: priv->spew_count++; @@ -249,7 +249,7 @@ static void hgpk_spewing_hack(struct psmouse *psmouse, /* excessive spew detected, request recalibration */ priv->spew_flag = SPEW_DETECTED; - /* fall-through */ + fallthrough; case SPEW_DETECTED: /* only recalibrate when the overall delta to the cursor diff --git a/drivers/input/mouse/navpoint.c b/drivers/input/mouse/navpoint.c index 0b75248c8380..c112980c2341 100644 --- a/drivers/input/mouse/navpoint.c +++ b/drivers/input/mouse/navpoint.c @@ -105,7 +105,7 @@ static void navpoint_packet(struct navpoint *navpoint) case 0x19: /* Module 0, Hello packet */ if ((navpoint->data[1] & 0xf0) == 0x10) break; - /* FALLTHROUGH */ + fallthrough; default: dev_warn(navpoint->dev, "spurious packet: data=0x%02x,0x%02x,...\n", diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 527ae0b9a191..0b4a3039f312 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp) { int type = *((unsigned int *)kp->arg); - return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name); + return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name); } static int __init psmouse_init(void) diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c index e99d9bf1a267..2716d2ba386a 100644 --- a/drivers/input/mouse/sentelic.c +++ b/drivers/input/mouse/sentelic.c @@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data, fsp_reg_write_enable(psmouse, false); - return count; + return retval; } PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg); @@ -794,7 +794,7 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse) /* on-pad click, filter it if necessary */ if ((ad->flags & FSPDRV_FLAG_EN_OPC) != FSPDRV_FLAG_EN_OPC) packet[0] &= ~FSP_PB0_LBTN; - /* fall through */ + fallthrough; case FSP_PKT_TYPE_NORMAL: /* normal packet */ diff --git a/drivers/input/mouse/sermouse.c b/drivers/input/mouse/sermouse.c index ea9242d53899..caa79c177c55 100644 --- a/drivers/input/mouse/sermouse.c +++ b/drivers/input/mouse/sermouse.c @@ -128,7 +128,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data) case SERIO_MS: sermouse->type = SERIO_MP; - /* fall through */ + fallthrough; case SERIO_MP: if ((data >> 2) & 3) break; /* M++ Wireless Extension packet. */ @@ -139,7 +139,7 @@ static void sermouse_process_ms(struct sermouse *sermouse, signed char data) case SERIO_MZP: case SERIO_MZPP: input_report_key(dev, BTN_SIDE, (data >> 5) & 1); - /* fall through */ + fallthrough; case SERIO_MZ: input_report_key(dev, BTN_MIDDLE, (data >> 4) & 1); diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 0dddf273afd9..d3eda48032e3 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -562,7 +562,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) str = last_str; break; } - /* fall through - report timeout */ + fallthrough; /* report timeout */ case 0xfc: case 0xfd: case 0xfe: dfl = SERIO_TIMEOUT; data = 0xfe; break; diff --git a/drivers/input/serio/libps2.c b/drivers/input/serio/libps2.c index a8c94a940a79..8a16e41f7b7f 100644 --- a/drivers/input/serio/libps2.c +++ b/drivers/input/serio/libps2.c @@ -418,7 +418,7 @@ bool ps2_handle_ack(struct ps2dev *ps2dev, u8 data) ps2dev->nak = 0; break; } - /* Fall through */ + fallthrough; default: /* * Do not signal errors if we get unexpected reply while diff --git a/drivers/input/sparse-keymap.c b/drivers/input/sparse-keymap.c index 530fd15eaeca..25bf8be6e711 100644 --- a/drivers/input/sparse-keymap.c +++ b/drivers/input/sparse-keymap.c @@ -247,7 +247,7 @@ void sparse_keymap_report_entry(struct input_dev *dev, const struct key_entry *k case KE_SW: value = ke->sw.value; - /* fall through */ + fallthrough; case KE_VSW: input_report_switch(dev, ke->sw.code, value); diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index 96d65575f75a..44bb1f69b4b2 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c @@ -676,8 +676,8 @@ static void gtco_urb_callback(struct urb *urbinfo) /* Mask out the Y tilt value used for pressure */ device->buffer[7] = (u8)((device->buffer[7]) & 0x7F); + fallthrough; - /* Fall thru */ case 4: /* Tilt */ input_report_abs(inputdev, ABS_TILT_X, @@ -685,8 +685,8 @@ static void gtco_urb_callback(struct urb *urbinfo) input_report_abs(inputdev, ABS_TILT_Y, sign_extend32(device->buffer[7], 6)); + fallthrough; - /* Fall thru */ case 2: case 3: /* Convert buttons, only 5 bits possible */ @@ -695,8 +695,8 @@ static void gtco_urb_callback(struct urb *urbinfo) /* We don't apply any meaning to the bitmask, just report */ input_event(inputdev, EV_MSC, MSC_SERIAL, val); + fallthrough; - /* Fall thru */ case 1: /* All reports have X and Y coords in the same place */ val = get_unaligned_le16(&device->buffer[1]); diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c index 38f087404f7a..749edbdb7ffa 100644 --- a/drivers/input/tablet/pegasus_notetaker.c +++ b/drivers/input/tablet/pegasus_notetaker.c @@ -146,7 +146,7 @@ static void pegasus_parse_packet(struct pegasus *pegasus) /* xy data */ case BATTERY_LOW: dev_warn_once(&dev->dev, "Pen battery low\n"); - /* fall through */ + fallthrough; case BATTERY_NO_REPORT: case BATTERY_GOOD: diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index a2189739e30f..6b71b0aff115 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c @@ -20,6 +20,7 @@ #include <linux/i2c.h> #include <linux/input/mt.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/of.h> #include <linux/property.h> #include <linux/slab.h> @@ -129,6 +130,7 @@ struct t9_range { /* MXT_SPT_COMMSCONFIG_T18 */ #define MXT_COMMS_CTRL 0 #define MXT_COMMS_CMD 1 +#define MXT_COMMS_RETRIGEN BIT(6) /* MXT_DEBUG_DIAGNOSTIC_T37 */ #define MXT_DIAGNOSTIC_PAGEUP 0x01 @@ -308,6 +310,7 @@ struct mxt_data { struct t7_config t7_cfg; struct mxt_dbg dbg; struct gpio_desc *reset_gpio; + bool use_retrigen_workaround; /* Cached parameters from object table */ u16 T5_address; @@ -318,6 +321,7 @@ struct mxt_data { u16 T71_address; u8 T9_reportid_min; u8 T9_reportid_max; + u16 T18_address; u8 T19_reportid; u16 T44_address; u8 T100_reportid_min; @@ -1190,9 +1194,11 @@ static int mxt_acquire_irq(struct mxt_data *data) enable_irq(data->irq); - error = mxt_process_messages_until_invalid(data); - if (error) - return error; + if (data->use_retrigen_workaround) { + error = mxt_process_messages_until_invalid(data); + if (error) + return error; + } return 0; } @@ -1282,6 +1288,38 @@ static u32 mxt_calculate_crc(u8 *base, off_t start_off, off_t end_off) return crc; } +static int mxt_check_retrigen(struct mxt_data *data) +{ + struct i2c_client *client = data->client; + int error; + int val; + struct irq_data *irqd; + + data->use_retrigen_workaround = false; + + irqd = irq_get_irq_data(data->irq); + if (!irqd) + return -EINVAL; + + if (irqd_is_level_type(irqd)) + return 0; + + if (data->T18_address) { + error = __mxt_read_reg(client, + data->T18_address + MXT_COMMS_CTRL, + 1, &val); + if (error) + return error; + + if (val & MXT_COMMS_RETRIGEN) + return 0; + } + + dev_warn(&client->dev, "Enabling RETRIGEN workaround\n"); + data->use_retrigen_workaround = true; + return 0; +} + static int mxt_prepare_cfg_mem(struct mxt_data *data, struct mxt_cfg *cfg) { struct device *dev = &data->client->dev; @@ -1561,6 +1599,10 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *fw) mxt_update_crc(data, MXT_COMMAND_BACKUPNV, MXT_BACKUP_VALUE); + ret = mxt_check_retrigen(data); + if (ret) + goto release_mem; + ret = mxt_soft_reset(data); if (ret) goto release_mem; @@ -1604,6 +1646,7 @@ static void mxt_free_object_table(struct mxt_data *data) data->T71_address = 0; data->T9_reportid_min = 0; data->T9_reportid_max = 0; + data->T18_address = 0; data->T19_reportid = 0; data->T44_address = 0; data->T100_reportid_min = 0; @@ -1678,6 +1721,9 @@ static int mxt_parse_object_table(struct mxt_data *data, object->num_report_ids - 1; data->num_touchids = object->num_report_ids; break; + case MXT_SPT_COMMSCONFIG_T18: + data->T18_address = object->start_address; + break; case MXT_SPT_MESSAGECOUNT_T44: data->T44_address = object->start_address; break; @@ -2141,6 +2187,10 @@ static int mxt_initialize(struct mxt_data *data) if (error) return error; + error = mxt_check_retrigen(data); + if (error) + return error; + error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, &client->dev, GFP_KERNEL, data, mxt_config_cb); diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 3a4f18d3450d..6ff81d48da86 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c @@ -288,7 +288,7 @@ static int edt_ft5x06_register_write(struct edt_ft5x06_ts_data *tsdata, wrbuf[3] = wrbuf[0] ^ wrbuf[1] ^ wrbuf[2]; return edt_ft5x06_ts_readwrite(tsdata->client, 4, wrbuf, 0, NULL); - /* fallthrough */ + case EDT_M09: case EDT_M12: case EV_FT: @@ -330,7 +330,6 @@ static int edt_ft5x06_register_read(struct edt_ft5x06_ts_data *tsdata, } break; - /* fallthrough */ case EDT_M09: case EDT_M12: case EV_FT: diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 5477a5718202..b0bd5bb079be 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -955,7 +955,7 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev) break; ts->state = ELAN_STATE_NORMAL; - /* fall through */ + fallthrough; case ELAN_STATE_NORMAL: diff --git a/drivers/input/touchscreen/elo.c b/drivers/input/touchscreen/elo.c index d6772a2c2d09..e0bacd34866a 100644 --- a/drivers/input/touchscreen/elo.c +++ b/drivers/input/touchscreen/elo.c @@ -348,7 +348,7 @@ static int elo_connect(struct serio *serio, struct serio_driver *drv) case 1: /* 6-byte protocol */ input_set_abs_params(input_dev, ABS_PRESSURE, 0, 15, 0, 0); - /* fall through */ + fallthrough; case 2: /* 4-byte protocol */ input_set_abs_params(input_dev, ABS_X, 96, 4000, 0, 0); diff --git a/drivers/input/touchscreen/exc3000.c b/drivers/input/touchscreen/exc3000.c index e007e2e8f626..a6597f026980 100644 --- a/drivers/input/touchscreen/exc3000.c +++ b/drivers/input/touchscreen/exc3000.c @@ -8,7 +8,9 @@ */ #include <linux/bitops.h> +#include <linux/delay.h> #include <linux/device.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/input/mt.h> @@ -16,6 +18,7 @@ #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/sizes.h> #include <linux/timer.h> #include <asm/unaligned.h> @@ -23,15 +26,59 @@ #define EXC3000_SLOTS_PER_FRAME 5 #define EXC3000_LEN_FRAME 66 #define EXC3000_LEN_POINT 10 -#define EXC3000_MT_EVENT 6 + +#define EXC3000_LEN_MODEL_NAME 16 +#define EXC3000_LEN_FW_VERSION 16 + +#define EXC3000_MT1_EVENT 0x06 +#define EXC3000_MT2_EVENT 0x18 + #define EXC3000_TIMEOUT_MS 100 +#define EXC3000_RESET_MS 10 +#define EXC3000_READY_MS 100 + +static const struct i2c_device_id exc3000_id[]; + +struct eeti_dev_info { + const char *name; + int max_xy; +}; + +enum eeti_dev_id { + EETI_EXC3000, + EETI_EXC80H60, + EETI_EXC80H84, +}; + +static struct eeti_dev_info exc3000_info[] = { + [EETI_EXC3000] = { + .name = "EETI EXC3000 Touch Screen", + .max_xy = SZ_4K - 1, + }, + [EETI_EXC80H60] = { + .name = "EETI EXC80H60 Touch Screen", + .max_xy = SZ_16K - 1, + }, + [EETI_EXC80H84] = { + .name = "EETI EXC80H84 Touch Screen", + .max_xy = SZ_16K - 1, + }, +}; + struct exc3000_data { struct i2c_client *client; + const struct eeti_dev_info *info; struct input_dev *input; struct touchscreen_properties prop; + struct gpio_desc *reset; struct timer_list timer; u8 buf[2 * EXC3000_LEN_FRAME]; + struct completion wait_event; + struct mutex query_lock; + int query_result; + char model[EXC3000_LEN_MODEL_NAME]; + char fw_version[EXC3000_LEN_FW_VERSION]; }; static void exc3000_report_slots(struct input_dev *input, @@ -58,10 +105,15 @@ static void exc3000_timer(struct timer_list *t) input_sync(data->input); } -static int exc3000_read_frame(struct i2c_client *client, u8 *buf) +static int exc3000_read_frame(struct exc3000_data *data, u8 *buf) { + struct i2c_client *client = data->client; + u8 expected_event = EXC3000_MT1_EVENT; int ret; + if (data->info->max_xy == SZ_16K - 1) + expected_event = EXC3000_MT2_EVENT; + ret = i2c_master_send(client, "'", 2); if (ret < 0) return ret; @@ -76,19 +128,21 @@ static int exc3000_read_frame(struct i2c_client *client, u8 *buf) if (ret != EXC3000_LEN_FRAME) return -EIO; - if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME || - buf[2] != EXC3000_MT_EVENT) + if (get_unaligned_le16(buf) != EXC3000_LEN_FRAME) + return -EINVAL; + + if (buf[2] != expected_event) return -EINVAL; return 0; } -static int exc3000_read_data(struct i2c_client *client, +static int exc3000_read_data(struct exc3000_data *data, u8 *buf, int *n_slots) { int error; - error = exc3000_read_frame(client, buf); + error = exc3000_read_frame(data, buf); if (error) return error; @@ -98,7 +152,7 @@ static int exc3000_read_data(struct i2c_client *client, if (*n_slots > EXC3000_SLOTS_PER_FRAME) { /* Read 2nd frame to get the rest of the contacts. */ - error = exc3000_read_frame(client, buf + EXC3000_LEN_FRAME); + error = exc3000_read_frame(data, buf + EXC3000_LEN_FRAME); if (error) return error; @@ -110,6 +164,28 @@ static int exc3000_read_data(struct i2c_client *client, return 0; } +static int exc3000_query_interrupt(struct exc3000_data *data) +{ + u8 *buf = data->buf; + int error; + + error = i2c_master_recv(data->client, buf, EXC3000_LEN_FRAME); + if (error < 0) + return error; + + if (buf[0] != 'B') + return -EPROTO; + + if (buf[4] == 'E') + strlcpy(data->model, buf + 5, sizeof(data->model)); + else if (buf[4] == 'D') + strlcpy(data->fw_version, buf + 5, sizeof(data->fw_version)); + else + return -EPROTO; + + return 0; +} + static irqreturn_t exc3000_interrupt(int irq, void *dev_id) { struct exc3000_data *data = dev_id; @@ -118,7 +194,13 @@ static irqreturn_t exc3000_interrupt(int irq, void *dev_id) int slots, total_slots; int error; - error = exc3000_read_data(data->client, buf, &total_slots); + if (mutex_is_locked(&data->query_lock)) { + data->query_result = exc3000_query_interrupt(data); + complete(&data->wait_event); + goto out; + } + + error = exc3000_read_data(data, buf, &total_slots); if (error) { /* Schedule a timer to release "stuck" contacts */ mod_timer(&data->timer, @@ -145,31 +227,132 @@ out: return IRQ_HANDLED; } -static int exc3000_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static ssize_t fw_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct exc3000_data *data = i2c_get_clientdata(client); + static const u8 request[68] = { + 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'D', 0x00 + }; + int error; + + mutex_lock(&data->query_lock); + + data->query_result = -ETIMEDOUT; + reinit_completion(&data->wait_event); + + error = i2c_master_send(client, request, sizeof(request)); + if (error < 0) { + mutex_unlock(&data->query_lock); + return error; + } + + wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ); + mutex_unlock(&data->query_lock); + + if (data->query_result < 0) + return data->query_result; + + return sprintf(buf, "%s\n", data->fw_version); +} +static DEVICE_ATTR_RO(fw_version); + +static ssize_t exc3000_get_model(struct exc3000_data *data) +{ + static const u8 request[68] = { + 0x67, 0x00, 0x42, 0x00, 0x03, 0x01, 'E', 0x00 + }; + struct i2c_client *client = data->client; + int error; + + mutex_lock(&data->query_lock); + data->query_result = -ETIMEDOUT; + reinit_completion(&data->wait_event); + + error = i2c_master_send(client, request, sizeof(request)); + if (error < 0) { + mutex_unlock(&data->query_lock); + return error; + } + + wait_for_completion_interruptible_timeout(&data->wait_event, 1 * HZ); + mutex_unlock(&data->query_lock); + + return data->query_result; +} + +static ssize_t model_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct exc3000_data *data = i2c_get_clientdata(client); + int error; + + error = exc3000_get_model(data); + if (error < 0) + return error; + + return sprintf(buf, "%s\n", data->model); +} +static DEVICE_ATTR_RO(model); + +static struct attribute *sysfs_attrs[] = { + &dev_attr_fw_version.attr, + &dev_attr_model.attr, + NULL +}; + +static struct attribute_group exc3000_attribute_group = { + .attrs = sysfs_attrs +}; + +static int exc3000_probe(struct i2c_client *client) { struct exc3000_data *data; struct input_dev *input; - int error; + int error, max_xy, retry; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->client = client; + data->info = device_get_match_data(&client->dev); + if (!data->info) { + enum eeti_dev_id eeti_dev_id = + i2c_match_id(exc3000_id, client)->driver_data; + data->info = &exc3000_info[eeti_dev_id]; + } timer_setup(&data->timer, exc3000_timer, 0); + init_completion(&data->wait_event); + mutex_init(&data->query_lock); + + data->reset = devm_gpiod_get_optional(&client->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(data->reset)) + return PTR_ERR(data->reset); + + if (data->reset) { + msleep(EXC3000_RESET_MS); + gpiod_set_value_cansleep(data->reset, 0); + msleep(EXC3000_READY_MS); + } input = devm_input_allocate_device(&client->dev); if (!input) return -ENOMEM; data->input = input; + input_set_drvdata(input, data); - input->name = "EETI EXC3000 Touch Screen"; + input->name = data->info->name; input->id.bustype = BUS_I2C; - input_set_abs_params(input, ABS_MT_POSITION_X, 0, 4095, 0, 0); - input_set_abs_params(input, ABS_MT_POSITION_Y, 0, 4095, 0, 0); + max_xy = data->info->max_xy; + input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0); + input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0); + touchscreen_parse_properties(input, true, &data->prop); error = input_mt_init_slots(input, EXC3000_NUM_SLOTS, @@ -187,18 +370,49 @@ static int exc3000_probe(struct i2c_client *client, if (error) return error; + /* + * I²C does not have built-in recovery, so retry on failure. This + * ensures, that the device probe will not fail for temporary issues + * on the bus. This is not needed for the sysfs calls (userspace + * will receive the error code and can start another query) and + * cannot be done for touch events (but that only means loosing one + * or two touch events anyways). + */ + for (retry = 0; retry < 3; retry++) { + error = exc3000_get_model(data); + if (!error) + break; + dev_warn(&client->dev, "Retry %d get EETI EXC3000 model: %d\n", + retry + 1, error); + } + + if (error) + return error; + + dev_dbg(&client->dev, "TS Model: %s", data->model); + + i2c_set_clientdata(client, data); + + error = devm_device_add_group(&client->dev, &exc3000_attribute_group); + if (error) + return error; + return 0; } static const struct i2c_device_id exc3000_id[] = { - { "exc3000", 0 }, + { "exc3000", EETI_EXC3000 }, + { "exc80h60", EETI_EXC80H60 }, + { "exc80h84", EETI_EXC80H84 }, { } }; MODULE_DEVICE_TABLE(i2c, exc3000_id); #ifdef CONFIG_OF static const struct of_device_id exc3000_of_match[] = { - { .compatible = "eeti,exc3000" }, + { .compatible = "eeti,exc3000", .data = &exc3000_info[EETI_EXC3000] }, + { .compatible = "eeti,exc80h60", .data = &exc3000_info[EETI_EXC80H60] }, + { .compatible = "eeti,exc80h84", .data = &exc3000_info[EETI_EXC80H84] }, { } }; MODULE_DEVICE_TABLE(of, exc3000_of_match); @@ -210,7 +424,7 @@ static struct i2c_driver exc3000_driver = { .of_match_table = of_match_ptr(exc3000_of_match), }, .id_table = exc3000_id, - .probe = exc3000_probe, + .probe_new = exc3000_probe, }; module_i2c_driver(exc3000_driver); diff --git a/drivers/input/touchscreen/iqs5xx.c b/drivers/input/touchscreen/iqs5xx.c index 5875bb1099a8..3162b68f7374 100644 --- a/drivers/input/touchscreen/iqs5xx.c +++ b/drivers/input/touchscreen/iqs5xx.c @@ -289,7 +289,7 @@ static int iqs5xx_bl_cmd(struct i2c_client *client, u8 bl_cmd, u16 bl_addr) break; case IQS5XX_BL_CMD_EXEC: usleep_range(10000, 10100); - /* fall through */ + fallthrough; default: return 0; } diff --git a/drivers/input/touchscreen/max11801_ts.c b/drivers/input/touchscreen/max11801_ts.c index 1af08d3dfaf7..f15713aaebc2 100644 --- a/drivers/input/touchscreen/max11801_ts.c +++ b/drivers/input/touchscreen/max11801_ts.c @@ -130,7 +130,6 @@ static irqreturn_t max11801_ts_interrupt(int irq, void *dev_id) switch (buf[1] & EVENT_TAG_MASK) { case EVENT_INIT: - /* fall through */ case EVENT_MIDDLE: input_report_abs(data->input_dev, ABS_X, x); input_report_abs(data->input_dev, ABS_Y, y); diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index b54cc64e4ea6..df946869d4cd 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c @@ -255,7 +255,7 @@ static void stmfts_parse_events(struct stmfts_data *sdata) case STMFTS_EV_SLEEP_OUT_CONTROLLER_READY: case STMFTS_EV_STATUS: complete(&sdata->cmd_done); - /* fall through */ + fallthrough; case STMFTS_EV_NO_EVENT: case STMFTS_EV_DEBUG: diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index b622af72448f..bef5d75e306b 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -129,140 +129,8 @@ config MSM_IOMMU If unsure, say N here. -config IOMMU_PGTABLES_L2 - def_bool y - depends on MSM_IOMMU && MMU && SMP && CPU_DCACHE_DISABLE=n - -# AMD IOMMU support -config AMD_IOMMU - bool "AMD IOMMU support" - select SWIOTLB - select PCI_MSI - select PCI_ATS - select PCI_PRI - select PCI_PASID - select IOMMU_API - select IOMMU_IOVA - select IOMMU_DMA - depends on X86_64 && PCI && ACPI - help - With this option you can enable support for AMD IOMMU hardware in - your system. An IOMMU is a hardware component which provides - remapping of DMA memory accesses from devices. With an AMD IOMMU you - can isolate the DMA memory of different devices and protect the - system from misbehaving device drivers or hardware. - - You can find out if your system has an AMD IOMMU if you look into - your BIOS for an option to enable it or if you have an IVRS ACPI - table. - -config AMD_IOMMU_V2 - tristate "AMD IOMMU Version 2 driver" - depends on AMD_IOMMU - select MMU_NOTIFIER - help - This option enables support for the AMD IOMMUv2 features of the IOMMU - hardware. Select this option if you want to use devices that support - the PCI PRI and PASID interface. - -config AMD_IOMMU_DEBUGFS - bool "Enable AMD IOMMU internals in DebugFS" - depends on AMD_IOMMU && IOMMU_DEBUGFS - help - !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! - - DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!! - Exposes AMD IOMMU device internals in DebugFS. - - This option is -NOT- intended for production environments, and should - not generally be enabled. - -# Intel IOMMU support -config DMAR_TABLE - bool - -config INTEL_IOMMU - bool "Support for Intel IOMMU using DMA Remapping Devices" - depends on PCI_MSI && ACPI && (X86 || IA64) - select DMA_OPS - select IOMMU_API - select IOMMU_IOVA - select NEED_DMA_MAP_STATE - select DMAR_TABLE - select SWIOTLB - select IOASID - help - DMA remapping (DMAR) devices support enables independent address - translations for Direct Memory Access (DMA) from devices. - These DMA remapping devices are reported via ACPI tables - and include PCI device scope covered by these DMA - remapping devices. - -config INTEL_IOMMU_DEBUGFS - bool "Export Intel IOMMU internals in Debugfs" - depends on INTEL_IOMMU && IOMMU_DEBUGFS - help - !!!WARNING!!! - - DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! - - Expose Intel IOMMU internals in Debugfs. - - This option is -NOT- intended for production environments, and should - only be enabled for debugging Intel IOMMU. - -config INTEL_IOMMU_SVM - bool "Support for Shared Virtual Memory with Intel IOMMU" - depends on INTEL_IOMMU && X86_64 - select PCI_PASID - select PCI_PRI - select MMU_NOTIFIER - select IOASID - help - Shared Virtual Memory (SVM) provides a facility for devices - to access DMA resources through process address space by - means of a Process Address Space ID (PASID). - -config INTEL_IOMMU_DEFAULT_ON - def_bool y - prompt "Enable Intel DMA Remapping Devices by default" - depends on INTEL_IOMMU - help - Selecting this option will enable a DMAR device at boot time if - one is found. If this option is not selected, DMAR support can - be enabled by passing intel_iommu=on to the kernel. - -config INTEL_IOMMU_BROKEN_GFX_WA - bool "Workaround broken graphics drivers (going away soon)" - depends on INTEL_IOMMU && BROKEN && X86 - help - Current Graphics drivers tend to use physical address - for DMA and avoid using DMA APIs. Setting this config - option permits the IOMMU driver to set a unity map for - all the OS-visible memory. Hence the driver can continue - to use physical addresses for DMA, at least until this - option is removed in the 2.6.32 kernel. - -config INTEL_IOMMU_FLOPPY_WA - def_bool y - depends on INTEL_IOMMU && X86 - help - Floppy disk drivers are known to bypass DMA API calls - thereby failing to work when IOMMU is enabled. This - workaround will setup a 1:1 mapping for the first - 16MiB to make floppy (an ISA device) work. - -config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON - bool "Enable Intel IOMMU scalable mode by default" - depends on INTEL_IOMMU - help - Selecting this option will enable by default the scalable mode if - hardware presents the capability. The scalable mode is defined in - VT-d 3.0. The scalable mode capability could be checked by reading - /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option - is not selected, scalable mode support could also be enabled by - passing intel_iommu=sm_on to the kernel. If not sure, please use - the default value. +source "drivers/iommu/amd/Kconfig" +source "drivers/iommu/intel/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" @@ -276,7 +144,6 @@ config IRQ_REMAP # OMAP IOMMU support config OMAP_IOMMU bool "OMAP IOMMU Support" - depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC)) depends on ARCH_OMAP2PLUS || COMPILE_TEST select IOMMU_API help @@ -294,7 +161,6 @@ config OMAP_IOMMU_DEBUG config ROCKCHIP_IOMMU bool "Rockchip IOMMU Support" - depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC)) depends on ARCH_ROCKCHIP || COMPILE_TEST select IOMMU_API select ARM_DMA_USE_IOMMU @@ -311,7 +177,6 @@ config SUN50I_IOMMU depends on ARCH_SUNXI || COMPILE_TEST select ARM_DMA_USE_IOMMU select IOMMU_API - select IOMMU_DMA help Support for the IOMMU introduced in the Allwinner H6 SoCs. @@ -338,7 +203,7 @@ config TEGRA_IOMMU_SMMU config EXYNOS_IOMMU bool "Exynos IOMMU Support" - depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC)) + depends on ARCH_EXYNOS || COMPILE_TEST depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes select IOMMU_API select ARM_DMA_USE_IOMMU @@ -361,7 +226,6 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" - depends on ARM || IOMMU_DMA depends on ARCH_RENESAS || (COMPILE_TEST && !GENERIC_ATOMIC64) select IOMMU_API select IOMMU_IO_PGTABLE_LPAE @@ -383,7 +247,7 @@ config SPAPR_TCE_IOMMU # ARM IOMMU support config ARM_SMMU tristate "ARM Ltd. System MMU (SMMU) Support" - depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU + depends on ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64) select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU if ARM @@ -469,11 +333,9 @@ config S390_AP_IOMMU config MTK_IOMMU bool "MTK IOMMU Support" - depends on HAS_DMA depends on ARCH_MEDIATEK || COMPILE_TEST select ARM_DMA_USE_IOMMU select IOMMU_API - select IOMMU_DMA select IOMMU_IO_PGTABLE_ARMV7S select MEMORY select MTK_SMI diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 342190196dfb..11f1771104f3 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 +obj-y += amd/ intel/ arm/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o @@ -11,19 +12,8 @@ obj-$(CONFIG_IOASID) += ioasid.o obj-$(CONFIG_IOMMU_IOVA) += iova.o obj-$(CONFIG_OF_IOMMU) += of_iommu.o obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o -obj-$(CONFIG_AMD_IOMMU) += amd/iommu.o amd/init.o amd/quirks.o -obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd/debugfs.o -obj-$(CONFIG_AMD_IOMMU_V2) += amd/iommu_v2.o -obj-$(CONFIG_ARM_SMMU) += arm_smmu.o -arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o -obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o -obj-$(CONFIG_DMAR_TABLE) += intel/dmar.o -obj-$(CONFIG_INTEL_IOMMU) += intel/iommu.o intel/pasid.o -obj-$(CONFIG_INTEL_IOMMU) += intel/trace.o -obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += intel/debugfs.o -obj-$(CONFIG_INTEL_IOMMU_SVM) += intel/svm.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o -obj-$(CONFIG_IRQ_REMAP) += intel/irq_remapping.o irq_remapping.o +obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o obj-$(CONFIG_MTK_IOMMU) += mtk_iommu.o obj-$(CONFIG_MTK_IOMMU_V1) += mtk_iommu_v1.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o @@ -35,6 +25,5 @@ obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_FSL_PAMU) += fsl_pamu.o fsl_pamu_domain.o obj-$(CONFIG_S390_IOMMU) += s390-iommu.o -obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o obj-$(CONFIG_HYPERV_IOMMU) += hyperv-iommu.o obj-$(CONFIG_VIRTIO_IOMMU) += virtio-iommu.o diff --git a/drivers/iommu/amd/Kconfig b/drivers/iommu/amd/Kconfig new file mode 100644 index 000000000000..1f061d91e0b8 --- /dev/null +++ b/drivers/iommu/amd/Kconfig @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: GPL-2.0-only +# AMD IOMMU support +config AMD_IOMMU + bool "AMD IOMMU support" + select SWIOTLB + select PCI_MSI + select PCI_ATS + select PCI_PRI + select PCI_PASID + select IOMMU_API + select IOMMU_IOVA + select IOMMU_DMA + depends on X86_64 && PCI && ACPI + help + With this option you can enable support for AMD IOMMU hardware in + your system. An IOMMU is a hardware component which provides + remapping of DMA memory accesses from devices. With an AMD IOMMU you + can isolate the DMA memory of different devices and protect the + system from misbehaving device drivers or hardware. + + You can find out if your system has an AMD IOMMU if you look into + your BIOS for an option to enable it or if you have an IVRS ACPI + table. + +config AMD_IOMMU_V2 + tristate "AMD IOMMU Version 2 driver" + depends on AMD_IOMMU + select MMU_NOTIFIER + help + This option enables support for the AMD IOMMUv2 features of the IOMMU + hardware. Select this option if you want to use devices that support + the PCI PRI and PASID interface. + +config AMD_IOMMU_DEBUGFS + bool "Enable AMD IOMMU internals in DebugFS" + depends on AMD_IOMMU && IOMMU_DEBUGFS + help + !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! !!!WARNING!!! + + DO NOT ENABLE THIS OPTION UNLESS YOU REALLY, -REALLY- KNOW WHAT YOU ARE DOING!!! + Exposes AMD IOMMU device internals in DebugFS. + + This option is -NOT- intended for production environments, and should + not generally be enabled. diff --git a/drivers/iommu/amd/Makefile b/drivers/iommu/amd/Makefile new file mode 100644 index 000000000000..dc5a2fa4fd37 --- /dev/null +++ b/drivers/iommu/amd/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_AMD_IOMMU) += iommu.o init.o quirks.o +obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += debugfs.o +obj-$(CONFIG_AMD_IOMMU_V2) += iommu_v2.o diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 6ebd4825e320..958050c213f9 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -720,21 +720,14 @@ static void iommu_enable_ppr_log(struct amd_iommu *iommu) static void __init free_ppr_log(struct amd_iommu *iommu) { - if (iommu->ppr_log == NULL) - return; - free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE)); } static void free_ga_log(struct amd_iommu *iommu) { #ifdef CONFIG_IRQ_REMAP - if (iommu->ga_log) - free_pages((unsigned long)iommu->ga_log, - get_order(GA_LOG_SIZE)); - if (iommu->ga_log_tail) - free_pages((unsigned long)iommu->ga_log_tail, - get_order(8)); + free_pages((unsigned long)iommu->ga_log, get_order(GA_LOG_SIZE)); + free_pages((unsigned long)iommu->ga_log_tail, get_order(8)); #endif } @@ -1842,7 +1835,7 @@ static void print_iommu_info(void) pci_info(pdev, "Found IOMMU cap 0x%hx\n", iommu->cap_ptr); if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - pci_info(pdev, "Extended features (%#llx):\n", + pci_info(pdev, "Extended features (%#llx):", iommu->features); for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { if (iommu_feature(iommu, (1ULL << i))) diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 2f22326ee4df..ba9f3dbc5b94 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -162,7 +162,18 @@ static void amd_iommu_domain_get_pgtable(struct protection_domain *domain, pgtable->mode = pt_root & 7; /* lowest 3 bits encode pgtable mode */ } -static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode) +static void amd_iommu_domain_set_pt_root(struct protection_domain *domain, u64 root) +{ + atomic64_set(&domain->pt_root, root); +} + +static void amd_iommu_domain_clr_pt_root(struct protection_domain *domain) +{ + amd_iommu_domain_set_pt_root(domain, 0); +} + +static void amd_iommu_domain_set_pgtable(struct protection_domain *domain, + u64 *root, int mode) { u64 pt_root; @@ -170,7 +181,7 @@ static u64 amd_iommu_domain_encode_pgtable(u64 *root, int mode) pt_root = mode & 7; pt_root |= (u64)root; - return pt_root; + amd_iommu_domain_set_pt_root(domain, pt_root); } static struct iommu_dev_data *alloc_dev_data(u16 devid) @@ -1410,7 +1421,7 @@ static bool increase_address_space(struct protection_domain *domain, struct domain_pgtable pgtable; unsigned long flags; bool ret = true; - u64 *pte, root; + u64 *pte; spin_lock_irqsave(&domain->lock, flags); @@ -1438,8 +1449,7 @@ static bool increase_address_space(struct protection_domain *domain, * Device Table needs to be updated and flushed before the new root can * be published. */ - root = amd_iommu_domain_encode_pgtable(pte, pgtable.mode); - atomic64_set(&domain->pt_root, root); + amd_iommu_domain_set_pgtable(domain, pte, pgtable.mode); ret = true; @@ -2319,7 +2329,7 @@ static void protection_domain_free(struct protection_domain *domain) domain_id_free(domain->id); amd_iommu_domain_get_pgtable(domain, &pgtable); - atomic64_set(&domain->pt_root, 0); + amd_iommu_domain_clr_pt_root(domain); free_pagetable(&pgtable); kfree(domain); @@ -2327,7 +2337,7 @@ static void protection_domain_free(struct protection_domain *domain) static int protection_domain_init(struct protection_domain *domain, int mode) { - u64 *pt_root = NULL, root; + u64 *pt_root = NULL; BUG_ON(mode < PAGE_MODE_NONE || mode > PAGE_MODE_6_LEVEL); @@ -2343,8 +2353,7 @@ static int protection_domain_init(struct protection_domain *domain, int mode) return -ENOMEM; } - root = amd_iommu_domain_encode_pgtable(pt_root, mode); - atomic64_set(&domain->pt_root, root); + amd_iommu_domain_set_pgtable(domain, pt_root, mode); return 0; } @@ -2713,8 +2722,8 @@ void amd_iommu_domain_direct_map(struct iommu_domain *dom) /* First save pgtable configuration*/ amd_iommu_domain_get_pgtable(domain, &pgtable); - /* Update data structure */ - atomic64_set(&domain->pt_root, 0); + /* Remove page-table from domain */ + amd_iommu_domain_clr_pt_root(domain); /* Make changes visible to IOMMUs */ update_domain(domain); diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c index e4b025c5637c..c259108ab6dd 100644 --- a/drivers/iommu/amd/iommu_v2.c +++ b/drivers/iommu/amd/iommu_v2.c @@ -495,7 +495,7 @@ static void do_fault(struct work_struct *work) if (access_error(vma, fault)) goto out; - ret = handle_mm_fault(vma, address, flags); + ret = handle_mm_fault(vma, address, flags, NULL); out: mmap_read_unlock(mm); diff --git a/drivers/iommu/arm/Makefile b/drivers/iommu/arm/Makefile new file mode 100644 index 000000000000..0f9efeab709f --- /dev/null +++ b/drivers/iommu/arm/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y += arm-smmu/ arm-smmu-v3/ diff --git a/drivers/iommu/arm/arm-smmu-v3/Makefile b/drivers/iommu/arm/arm-smmu-v3/Makefile new file mode 100644 index 000000000000..569e24e9f162 --- /dev/null +++ b/drivers/iommu/arm/arm-smmu-v3/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index f578677a5c41..7196207be7ea 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1479,7 +1479,7 @@ static int arm_smmu_cmdq_issue_cmdlist(struct arm_smmu_device *smmu, } /* - * Try to unlock the cmq lock. This will fail if we're the last + * Try to unlock the cmdq lock. This will fail if we're the last * reader, in which case we can safely update cmdq->q.llq.cons */ if (!arm_smmu_cmdq_shared_tryunlock(cmdq)) { @@ -2850,7 +2850,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, if (!ops) return -ENODEV; - return ops->map(ops, iova, paddr, size, prot); + return ops->map(ops, iova, paddr, size, prot, gfp); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, diff --git a/drivers/iommu/arm/arm-smmu/Makefile b/drivers/iommu/arm/arm-smmu/Makefile new file mode 100644 index 000000000000..e240a7bcf310 --- /dev/null +++ b/drivers/iommu/arm/arm-smmu/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_QCOM_IOMMU) += qcom_iommu.o +obj-$(CONFIG_ARM_SMMU) += arm_smmu.o +arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-nvidia.o arm-smmu-qcom.o diff --git a/drivers/iommu/arm-smmu-impl.c b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c index c75b9d957b70..f4ff124a1967 100644 --- a/drivers/iommu/arm-smmu-impl.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-impl.c @@ -147,16 +147,57 @@ static const struct arm_smmu_impl arm_mmu500_impl = { .reset = arm_mmu500_reset, }; +static u64 mrvl_mmu500_readq(struct arm_smmu_device *smmu, int page, int off) +{ + /* + * Marvell Armada-AP806 erratum #582743. + * Split all the readq to double readl + */ + return hi_lo_readq_relaxed(arm_smmu_page(smmu, page) + off); +} + +static void mrvl_mmu500_writeq(struct arm_smmu_device *smmu, int page, int off, + u64 val) +{ + /* + * Marvell Armada-AP806 erratum #582743. + * Split all the writeq to double writel + */ + hi_lo_writeq_relaxed(val, arm_smmu_page(smmu, page) + off); +} + +static int mrvl_mmu500_cfg_probe(struct arm_smmu_device *smmu) +{ + + /* + * Armada-AP806 erratum #582743. + * Hide the SMMU_IDR2.PTFSv8 fields to sidestep the AArch64 + * formats altogether and allow using 32 bits access on the + * interconnect. + */ + smmu->features &= ~(ARM_SMMU_FEAT_FMT_AARCH64_4K | + ARM_SMMU_FEAT_FMT_AARCH64_16K | + ARM_SMMU_FEAT_FMT_AARCH64_64K); + + return 0; +} + +static const struct arm_smmu_impl mrvl_mmu500_impl = { + .read_reg64 = mrvl_mmu500_readq, + .write_reg64 = mrvl_mmu500_writeq, + .cfg_probe = mrvl_mmu500_cfg_probe, + .reset = arm_mmu500_reset, +}; + struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) { const struct device_node *np = smmu->dev->of_node; /* - * We will inevitably have to combine model-specific implementation - * quirks with platform-specific integration quirks, but everything - * we currently support happens to work out as straightforward - * mutually-exclusive assignments. + * Set the impl for model-specific implementation quirks first, + * such that platform integration quirks can pick it up and + * inherit from it if necessary. */ switch (smmu->model) { case ARM_MMU500: @@ -168,12 +209,21 @@ struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu) break; } + /* This is implicitly MMU-400 */ if (of_property_read_bool(np, "calxeda,smmu-secure-config-access")) smmu->impl = &calxeda_impl; + if (of_device_is_compatible(np, "nvidia,tegra194-smmu")) + return nvidia_smmu_impl_init(smmu); + if (of_device_is_compatible(np, "qcom,sdm845-smmu-500") || - of_device_is_compatible(np, "qcom,sc7180-smmu-500")) + of_device_is_compatible(np, "qcom,sc7180-smmu-500") || + of_device_is_compatible(np, "qcom,sm8150-smmu-500") || + of_device_is_compatible(np, "qcom,sm8250-smmu-500")) return qcom_smmu_impl_init(smmu); + if (of_device_is_compatible(np, "marvell,ap806-smmu-500")) + smmu->impl = &mrvl_mmu500_impl; + return smmu; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c new file mode 100644 index 000000000000..31368057e9be --- /dev/null +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-nvidia.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved. + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include "arm-smmu.h" + +/* + * Tegra194 has three ARM MMU-500 Instances. + * Two of them are used together and must be programmed identically for + * interleaved IOVA accesses across them and translates accesses from + * non-isochronous HW devices. + * Third one is used for translating accesses from isochronous HW devices. + * This implementation supports programming of the two instances that must + * be programmed identically. + * The third instance usage is through standard arm-smmu driver itself and + * is out of scope of this implementation. + */ +#define NUM_SMMU_INSTANCES 2 + +struct nvidia_smmu { + struct arm_smmu_device smmu; + void __iomem *bases[NUM_SMMU_INSTANCES]; +}; + +static inline void __iomem *nvidia_smmu_page(struct arm_smmu_device *smmu, + unsigned int inst, int page) +{ + struct nvidia_smmu *nvidia_smmu; + + nvidia_smmu = container_of(smmu, struct nvidia_smmu, smmu); + return nvidia_smmu->bases[inst] + (page << smmu->pgshift); +} + +static u32 nvidia_smmu_read_reg(struct arm_smmu_device *smmu, + int page, int offset) +{ + void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset; + + return readl_relaxed(reg); +} + +static void nvidia_smmu_write_reg(struct arm_smmu_device *smmu, + int page, int offset, u32 val) +{ + unsigned int i; + + for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset; + + writel_relaxed(val, reg); + } +} + +static u64 nvidia_smmu_read_reg64(struct arm_smmu_device *smmu, + int page, int offset) +{ + void __iomem *reg = nvidia_smmu_page(smmu, 0, page) + offset; + + return readq_relaxed(reg); +} + +static void nvidia_smmu_write_reg64(struct arm_smmu_device *smmu, + int page, int offset, u64 val) +{ + unsigned int i; + + for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + void __iomem *reg = nvidia_smmu_page(smmu, i, page) + offset; + + writeq_relaxed(val, reg); + } +} + +static void nvidia_smmu_tlb_sync(struct arm_smmu_device *smmu, int page, + int sync, int status) +{ + unsigned int delay; + + arm_smmu_writel(smmu, page, sync, 0); + + for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) { + unsigned int spin_cnt; + + for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { + u32 val = 0; + unsigned int i; + + for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + void __iomem *reg; + + reg = nvidia_smmu_page(smmu, i, page) + status; + val |= readl_relaxed(reg); + } + + if (!(val & ARM_SMMU_sTLBGSTATUS_GSACTIVE)) + return; + + cpu_relax(); + } + + udelay(delay); + } + + dev_err_ratelimited(smmu->dev, + "TLB sync timed out -- SMMU may be deadlocked\n"); +} + +static int nvidia_smmu_reset(struct arm_smmu_device *smmu) +{ + unsigned int i; + + for (i = 0; i < NUM_SMMU_INSTANCES; i++) { + u32 val; + void __iomem *reg = nvidia_smmu_page(smmu, i, ARM_SMMU_GR0) + + ARM_SMMU_GR0_sGFSR; + + /* clear global FSR */ + val = readl_relaxed(reg); + writel_relaxed(val, reg); + } + + return 0; +} + +static irqreturn_t nvidia_smmu_global_fault_inst(int irq, + struct arm_smmu_device *smmu, + int inst) +{ + u32 gfsr, gfsynr0, gfsynr1, gfsynr2; + void __iomem *gr0_base = nvidia_smmu_page(smmu, inst, 0); + + gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR); + if (!gfsr) + return IRQ_NONE; + + gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0); + gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1); + gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2); + + dev_err_ratelimited(smmu->dev, + "Unexpected global fault, this could be serious\n"); + dev_err_ratelimited(smmu->dev, + "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n", + gfsr, gfsynr0, gfsynr1, gfsynr2); + + writel_relaxed(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR); + return IRQ_HANDLED; +} + +static irqreturn_t nvidia_smmu_global_fault(int irq, void *dev) +{ + unsigned int inst; + irqreturn_t ret = IRQ_NONE; + struct arm_smmu_device *smmu = dev; + + for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) { + irqreturn_t irq_ret; + + irq_ret = nvidia_smmu_global_fault_inst(irq, smmu, inst); + if (irq_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + + return ret; +} + +static irqreturn_t nvidia_smmu_context_fault_bank(int irq, + struct arm_smmu_device *smmu, + int idx, int inst) +{ + u32 fsr, fsynr, cbfrsynra; + unsigned long iova; + void __iomem *gr1_base = nvidia_smmu_page(smmu, inst, 1); + void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx); + + fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); + if (!(fsr & ARM_SMMU_FSR_FAULT)) + return IRQ_NONE; + + fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); + iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR); + cbfrsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(idx)); + + dev_err_ratelimited(smmu->dev, + "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n", + fsr, iova, fsynr, cbfrsynra, idx); + + writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR); + return IRQ_HANDLED; +} + +static irqreturn_t nvidia_smmu_context_fault(int irq, void *dev) +{ + int idx; + unsigned int inst; + irqreturn_t ret = IRQ_NONE; + struct arm_smmu_device *smmu; + struct iommu_domain *domain = dev; + struct arm_smmu_domain *smmu_domain; + + smmu_domain = container_of(domain, struct arm_smmu_domain, domain); + smmu = smmu_domain->smmu; + + for (inst = 0; inst < NUM_SMMU_INSTANCES; inst++) { + irqreturn_t irq_ret; + + /* + * Interrupt line is shared between all contexts. + * Check for faults across all contexts. + */ + for (idx = 0; idx < smmu->num_context_banks; idx++) { + irq_ret = nvidia_smmu_context_fault_bank(irq, smmu, + idx, inst); + if (irq_ret == IRQ_HANDLED) + ret = IRQ_HANDLED; + } + } + + return ret; +} + +static const struct arm_smmu_impl nvidia_smmu_impl = { + .read_reg = nvidia_smmu_read_reg, + .write_reg = nvidia_smmu_write_reg, + .read_reg64 = nvidia_smmu_read_reg64, + .write_reg64 = nvidia_smmu_write_reg64, + .reset = nvidia_smmu_reset, + .tlb_sync = nvidia_smmu_tlb_sync, + .global_fault = nvidia_smmu_global_fault, + .context_fault = nvidia_smmu_context_fault, +}; + +struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu) +{ + struct resource *res; + struct device *dev = smmu->dev; + struct nvidia_smmu *nvidia_smmu; + struct platform_device *pdev = to_platform_device(dev); + + nvidia_smmu = devm_kzalloc(dev, sizeof(*nvidia_smmu), GFP_KERNEL); + if (!nvidia_smmu) + return ERR_PTR(-ENOMEM); + + /* + * Copy the data from struct arm_smmu_device *smmu allocated in + * arm-smmu.c. The smmu from struct nvidia_smmu replaces the smmu + * pointer used in arm-smmu.c once this function returns. + * This is necessary to derive nvidia_smmu from smmu pointer passed + * through arm_smmu_impl function calls subsequently. + */ + nvidia_smmu->smmu = *smmu; + /* Instance 0 is ioremapped by arm-smmu.c. */ + nvidia_smmu->bases[0] = smmu->base; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) + return ERR_PTR(-ENODEV); + + nvidia_smmu->bases[1] = devm_ioremap_resource(dev, res); + if (IS_ERR(nvidia_smmu->bases[1])) + return ERR_CAST(nvidia_smmu->bases[1]); + + nvidia_smmu->smmu.impl = &nvidia_smmu_impl; + + /* + * Free the struct arm_smmu_device *smmu allocated in arm-smmu.c. + * Once this function returns, arm-smmu.c would use arm_smmu_device + * allocated as part of struct nvidia_smmu. + */ + devm_kfree(dev, smmu); + + return &nvidia_smmu->smmu; +} diff --git a/drivers/iommu/arm-smmu-qcom.c b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c index be4318044f96..be4318044f96 100644 --- a/drivers/iommu/arm-smmu-qcom.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu-qcom.c diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 243bc4cb2705..09c42af9f31e 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -52,9 +52,6 @@ */ #define QCOM_DUMMY_VAL -1 -#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ -#define TLB_SPIN_COUNT 10 - #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 @@ -673,6 +670,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, enum io_pgtable_fmt fmt; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_cfg *cfg = &smmu_domain->cfg; + irqreturn_t (*context_fault)(int irq, void *dev); mutex_lock(&smmu_domain->init_mutex); if (smmu_domain->smmu) @@ -835,7 +833,13 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, * handler seeing a half-initialised domain state. */ irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault, + + if (smmu->impl && smmu->impl->context_fault) + context_fault = smmu->impl->context_fault; + else + context_fault = arm_smmu_context_fault; + + ret = devm_request_irq(smmu->dev, irq, context_fault, IRQF_SHARED, "arm-smmu-context-fault", domain); if (ret < 0) { dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", @@ -1227,7 +1231,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; arm_smmu_rpm_get(smmu); - ret = ops->map(ops, iova, paddr, size, prot); + ret = ops->map(ops, iova, paddr, size, prot, gfp); arm_smmu_rpm_put(smmu); return ret; @@ -1728,7 +1732,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) unsigned int size; u32 id; bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; - int i; + int i, ret; dev_notice(smmu->dev, "probing hardware configuration...\n"); dev_notice(smmu->dev, "SMMUv%d with:\n", @@ -1891,6 +1895,12 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; } + if (smmu->impl && smmu->impl->cfg_probe) { + ret = smmu->impl->cfg_probe(smmu); + if (ret) + return ret; + } + /* Now we've corralled the various formats, what'll it do? */ if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; @@ -1918,9 +1928,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", smmu->ipa_size, smmu->pa_size); - if (smmu->impl && smmu->impl->cfg_probe) - return smmu->impl->cfg_probe(smmu); - return 0; } @@ -1946,6 +1953,7 @@ static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "arm,mmu-401", .data = &arm_mmu401 }, { .compatible = "arm,mmu-500", .data = &arm_mmu500 }, { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 }, + { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 }, { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 }, { }, }; @@ -2107,6 +2115,7 @@ static int arm_smmu_device_probe(struct platform_device *pdev) struct arm_smmu_device *smmu; struct device *dev = &pdev->dev; int num_irqs, i, err; + irqreturn_t (*global_fault)(int irq, void *dev); smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL); if (!smmu) { @@ -2123,10 +2132,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev) if (err) return err; - smmu = arm_smmu_impl_init(smmu); - if (IS_ERR(smmu)) - return PTR_ERR(smmu); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ioaddr = res->start; smmu->base = devm_ioremap_resource(dev, res); @@ -2138,6 +2143,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) */ smmu->numpage = resource_size(res); + smmu = arm_smmu_impl_init(smmu); + if (IS_ERR(smmu)) + return PTR_ERR(smmu); + num_irqs = 0; while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) { num_irqs++; @@ -2193,9 +2202,14 @@ static int arm_smmu_device_probe(struct platform_device *pdev) smmu->num_context_irqs = smmu->num_context_banks; } + if (smmu->impl && smmu->impl->global_fault) + global_fault = smmu->impl->global_fault; + else + global_fault = arm_smmu_global_fault; + for (i = 0; i < smmu->num_global_irqs; ++i) { err = devm_request_irq(smmu->dev, smmu->irqs[i], - arm_smmu_global_fault, + global_fault, IRQF_SHARED, "arm-smmu global fault", smmu); diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm/arm-smmu/arm-smmu.h index d172c024be61..d890a4a968e8 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.h @@ -18,6 +18,7 @@ #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/io-pgtable.h> #include <linux/iommu.h> +#include <linux/irqreturn.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/types.h> @@ -236,6 +237,8 @@ enum arm_smmu_cbar_type { /* Maximum number of context banks per SMMU */ #define ARM_SMMU_MAX_CBS 128 +#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ +#define TLB_SPIN_COUNT 10 /* Shared driver definitions */ enum arm_smmu_arch_version { @@ -387,6 +390,8 @@ struct arm_smmu_impl { void (*tlb_sync)(struct arm_smmu_device *smmu, int page, int sync, int status); int (*def_domain_type)(struct device *dev); + irqreturn_t (*global_fault)(int irq, void *dev); + irqreturn_t (*context_fault)(int irq, void *dev); }; static inline void __iomem *arm_smmu_page(struct arm_smmu_device *smmu, int n) @@ -450,6 +455,7 @@ static inline void arm_smmu_writeq(struct arm_smmu_device *smmu, int page, arm_smmu_writeq((s), ARM_SMMU_CB((s), (n)), (o), (v)) struct arm_smmu_device *arm_smmu_impl_init(struct arm_smmu_device *smmu); +struct arm_smmu_device *nvidia_smmu_impl_init(struct arm_smmu_device *smmu); struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); int arm_mmu500_reset(struct arm_smmu_device *smmu); diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index d176df569af8..af6bec3ace00 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -37,14 +37,20 @@ #define SMMU_INTR_SEL_NS 0x2000 +enum qcom_iommu_clk { + CLK_IFACE, + CLK_BUS, + CLK_TBU, + CLK_NUM, +}; + struct qcom_iommu_ctx; struct qcom_iommu_dev { /* IOMMU core code handle */ struct iommu_device iommu; struct device *dev; - struct clk *iface_clk; - struct clk *bus_clk; + struct clk_bulk_data clks[CLK_NUM]; void __iomem *local_base; u32 sec_id; u8 num_ctxs; @@ -301,7 +307,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ARM_SMMU_SCTLR_M | ARM_SMMU_SCTLR_S1_ASIDPNE | ARM_SMMU_SCTLR_CFCFG; - if (IS_ENABLED(CONFIG_BIG_ENDIAN)) + if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) reg |= ARM_SMMU_SCTLR_E; iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); @@ -438,7 +444,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova, return -ENODEV; spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); + ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC); spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags); return ret; } @@ -613,32 +619,6 @@ static const struct iommu_ops qcom_iommu_ops = { .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, }; -static int qcom_iommu_enable_clocks(struct qcom_iommu_dev *qcom_iommu) -{ - int ret; - - ret = clk_prepare_enable(qcom_iommu->iface_clk); - if (ret) { - dev_err(qcom_iommu->dev, "Couldn't enable iface_clk\n"); - return ret; - } - - ret = clk_prepare_enable(qcom_iommu->bus_clk); - if (ret) { - dev_err(qcom_iommu->dev, "Couldn't enable bus_clk\n"); - clk_disable_unprepare(qcom_iommu->iface_clk); - return ret; - } - - return 0; -} - -static void qcom_iommu_disable_clocks(struct qcom_iommu_dev *qcom_iommu) -{ - clk_disable_unprepare(qcom_iommu->bus_clk); - clk_disable_unprepare(qcom_iommu->iface_clk); -} - static int qcom_iommu_sec_ptbl_init(struct device *dev) { size_t psize = 0; @@ -795,6 +775,7 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) struct qcom_iommu_dev *qcom_iommu; struct device *dev = &pdev->dev; struct resource *res; + struct clk *clk; int ret, max_asid = 0; /* find the max asid (which is 1:1 to ctx bank idx), so we know how @@ -817,17 +798,26 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) return PTR_ERR(qcom_iommu->local_base); } - qcom_iommu->iface_clk = devm_clk_get(dev, "iface"); - if (IS_ERR(qcom_iommu->iface_clk)) { + clk = devm_clk_get(dev, "iface"); + if (IS_ERR(clk)) { dev_err(dev, "failed to get iface clock\n"); - return PTR_ERR(qcom_iommu->iface_clk); + return PTR_ERR(clk); } + qcom_iommu->clks[CLK_IFACE].clk = clk; - qcom_iommu->bus_clk = devm_clk_get(dev, "bus"); - if (IS_ERR(qcom_iommu->bus_clk)) { + clk = devm_clk_get(dev, "bus"); + if (IS_ERR(clk)) { dev_err(dev, "failed to get bus clock\n"); - return PTR_ERR(qcom_iommu->bus_clk); + return PTR_ERR(clk); + } + qcom_iommu->clks[CLK_BUS].clk = clk; + + clk = devm_clk_get_optional(dev, "tbu"); + if (IS_ERR(clk)) { + dev_err(dev, "failed to get tbu clock\n"); + return PTR_ERR(clk); } + qcom_iommu->clks[CLK_TBU].clk = clk; if (of_property_read_u32(dev->of_node, "qcom,iommu-secure-id", &qcom_iommu->sec_id)) { @@ -899,14 +889,14 @@ static int __maybe_unused qcom_iommu_resume(struct device *dev) { struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); - return qcom_iommu_enable_clocks(qcom_iommu); + return clk_bulk_prepare_enable(CLK_NUM, qcom_iommu->clks); } static int __maybe_unused qcom_iommu_suspend(struct device *dev) { struct qcom_iommu_dev *qcom_iommu = dev_get_drvdata(dev); - qcom_iommu_disable_clocks(qcom_iommu); + clk_bulk_disable_unprepare(CLK_NUM, qcom_iommu->clks); return 0; } diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 60c8a56e4a3f..bad3c0ce10cb 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -173,7 +173,7 @@ static u32 lv2ent_offset(sysmmu_iova_t iova) #define REG_V5_FAULT_AR_VA 0x070 #define REG_V5_FAULT_AW_VA 0x080 -#define has_sysmmu(dev) (dev->archdata.iommu != NULL) +#define has_sysmmu(dev) (dev_iommu_priv_get(dev) != NULL) static struct device *dma_dev; static struct kmem_cache *lv2table_kmem_cache; @@ -226,7 +226,7 @@ static const struct sysmmu_fault_info sysmmu_v5_faults[] = { }; /* - * This structure is attached to dev.archdata.iommu of the master device + * This structure is attached to dev->iommu->priv of the master device * on device add, contains a list of SYSMMU controllers defined by device tree, * which are bound to given master device. It is usually referenced by 'owner' * pointer. @@ -670,7 +670,7 @@ static int __maybe_unused exynos_sysmmu_suspend(struct device *dev) struct device *master = data->master; if (master) { - struct exynos_iommu_owner *owner = master->archdata.iommu; + struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); if (data->domain) { @@ -688,7 +688,7 @@ static int __maybe_unused exynos_sysmmu_resume(struct device *dev) struct device *master = data->master; if (master) { - struct exynos_iommu_owner *owner = master->archdata.iommu; + struct exynos_iommu_owner *owner = dev_iommu_priv_get(master); mutex_lock(&owner->rpm_lock); if (data->domain) { @@ -721,7 +721,7 @@ static struct platform_driver exynos_sysmmu_driver __refdata = { } }; -static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) +static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) { dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), DMA_TO_DEVICE); @@ -837,8 +837,8 @@ static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, struct device *dev) { - struct exynos_iommu_owner *owner = dev->archdata.iommu; struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); + struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); phys_addr_t pagetable = virt_to_phys(domain->pgtable); struct sysmmu_drvdata *data, *next; unsigned long flags; @@ -875,8 +875,8 @@ static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, struct device *dev) { - struct exynos_iommu_owner *owner = dev->archdata.iommu; struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); + struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct sysmmu_drvdata *data; phys_addr_t pagetable = virt_to_phys(domain->pgtable); unsigned long flags; @@ -933,7 +933,7 @@ static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, if (!pent) return ERR_PTR(-ENOMEM); - update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); + exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); kmemleak_ignore(pent); *pgcounter = NUM_LV2ENTRIES; handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE, @@ -994,7 +994,7 @@ static int lv1set_section(struct exynos_iommu_domain *domain, *pgcnt = 0; } - update_pte(sent, mk_lv1ent_sect(paddr, prot)); + exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot)); spin_lock(&domain->lock); if (lv1ent_page_zero(sent)) { @@ -1018,7 +1018,7 @@ static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, if (WARN_ON(!lv2ent_fault(pent))) return -EADDRINUSE; - update_pte(pent, mk_lv2ent_spage(paddr, prot)); + exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot)); *pgcnt -= 1; } else { /* size == LPAGE_SIZE */ int i; @@ -1150,7 +1150,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, } /* workaround for h/w bug in System MMU v3.3 */ - update_pte(ent, ZERO_LV2LINK); + exynos_iommu_set_pte(ent, ZERO_LV2LINK); size = SECT_SIZE; goto done; } @@ -1171,7 +1171,7 @@ static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, } if (lv2ent_small(ent)) { - update_pte(ent, 0); + exynos_iommu_set_pte(ent, 0); size = SPAGE_SIZE; domain->lv2entcnt[lv1ent_offset(iova)] += 1; goto done; @@ -1237,7 +1237,7 @@ static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, static struct iommu_device *exynos_iommu_probe_device(struct device *dev) { - struct exynos_iommu_owner *owner = dev->archdata.iommu; + struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct sysmmu_drvdata *data; if (!has_sysmmu(dev)) @@ -1263,7 +1263,7 @@ static struct iommu_device *exynos_iommu_probe_device(struct device *dev) static void exynos_iommu_release_device(struct device *dev) { - struct exynos_iommu_owner *owner = dev->archdata.iommu; + struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct sysmmu_drvdata *data; if (!has_sysmmu(dev)) @@ -1287,8 +1287,8 @@ static void exynos_iommu_release_device(struct device *dev) static int exynos_iommu_of_xlate(struct device *dev, struct of_phandle_args *spec) { - struct exynos_iommu_owner *owner = dev->archdata.iommu; struct platform_device *sysmmu = of_find_device_by_node(spec->np); + struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev); struct sysmmu_drvdata *data, *entry; if (!sysmmu) @@ -1305,7 +1305,7 @@ static int exynos_iommu_of_xlate(struct device *dev, INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); - dev->archdata.iommu = owner; + dev_iommu_priv_set(dev, owner); } list_for_each_entry(entry, &owner->controllers, owner_node) diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c index cde281b97afa..099a11a35fb9 100644 --- a/drivers/iommu/fsl_pamu.c +++ b/drivers/iommu/fsl_pamu.c @@ -1174,10 +1174,7 @@ error: if (irq != NO_IRQ) free_irq(irq, data); - if (data) { - memset(data, 0, sizeof(struct pamu_isr_data)); - kfree(data); - } + kzfree(data); if (pamu_regs) iounmap(pamu_regs); diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 928d37771ece..b2110767caf4 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c @@ -323,7 +323,7 @@ static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) pamu_disable_liodn(info->liodn); spin_unlock_irqrestore(&iommu_lock, flags); spin_lock_irqsave(&device_domain_lock, flags); - info->dev->archdata.iommu_domain = NULL; + dev_iommu_priv_set(info->dev, NULL); kmem_cache_free(iommu_devinfo_cache, info); spin_unlock_irqrestore(&device_domain_lock, flags); } @@ -352,7 +352,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d * Check here if the device is already attached to domain or not. * If the device is already attached to a domain detach it. */ - old_domain_info = dev->archdata.iommu_domain; + old_domain_info = dev_iommu_priv_get(dev); if (old_domain_info && old_domain_info->domain != dma_domain) { spin_unlock_irqrestore(&device_domain_lock, flags); detach_device(dev, old_domain_info->domain); @@ -371,8 +371,8 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d * the info for the first LIODN as all * LIODNs share the same domain */ - if (!dev->archdata.iommu_domain) - dev->archdata.iommu_domain = info; + if (!dev_iommu_priv_get(dev)) + dev_iommu_priv_set(dev, info); spin_unlock_irqrestore(&device_domain_lock, flags); } diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig new file mode 100644 index 000000000000..5337ee1584b0 --- /dev/null +++ b/drivers/iommu/intel/Kconfig @@ -0,0 +1,87 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Intel IOMMU support +config DMAR_TABLE + bool + +config INTEL_IOMMU + bool "Support for Intel IOMMU using DMA Remapping Devices" + depends on PCI_MSI && ACPI && (X86 || IA64) + select DMA_OPS + select IOMMU_API + select IOMMU_IOVA + select NEED_DMA_MAP_STATE + select DMAR_TABLE + select SWIOTLB + select IOASID + help + DMA remapping (DMAR) devices support enables independent address + translations for Direct Memory Access (DMA) from devices. + These DMA remapping devices are reported via ACPI tables + and include PCI device scope covered by these DMA + remapping devices. + +config INTEL_IOMMU_DEBUGFS + bool "Export Intel IOMMU internals in Debugfs" + depends on INTEL_IOMMU && IOMMU_DEBUGFS + help + !!!WARNING!!! + + DO NOT ENABLE THIS OPTION UNLESS YOU REALLY KNOW WHAT YOU ARE DOING!!! + + Expose Intel IOMMU internals in Debugfs. + + This option is -NOT- intended for production environments, and should + only be enabled for debugging Intel IOMMU. + +config INTEL_IOMMU_SVM + bool "Support for Shared Virtual Memory with Intel IOMMU" + depends on INTEL_IOMMU && X86_64 + select PCI_PASID + select PCI_PRI + select MMU_NOTIFIER + select IOASID + help + Shared Virtual Memory (SVM) provides a facility for devices + to access DMA resources through process address space by + means of a Process Address Space ID (PASID). + +config INTEL_IOMMU_DEFAULT_ON + def_bool y + prompt "Enable Intel DMA Remapping Devices by default" + depends on INTEL_IOMMU + help + Selecting this option will enable a DMAR device at boot time if + one is found. If this option is not selected, DMAR support can + be enabled by passing intel_iommu=on to the kernel. + +config INTEL_IOMMU_BROKEN_GFX_WA + bool "Workaround broken graphics drivers (going away soon)" + depends on INTEL_IOMMU && BROKEN && X86 + help + Current Graphics drivers tend to use physical address + for DMA and avoid using DMA APIs. Setting this config + option permits the IOMMU driver to set a unity map for + all the OS-visible memory. Hence the driver can continue + to use physical addresses for DMA, at least until this + option is removed in the 2.6.32 kernel. + +config INTEL_IOMMU_FLOPPY_WA + def_bool y + depends on INTEL_IOMMU && X86 + help + Floppy disk drivers are known to bypass DMA API calls + thereby failing to work when IOMMU is enabled. This + workaround will setup a 1:1 mapping for the first + 16MiB to make floppy (an ISA device) work. + +config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON + bool "Enable Intel IOMMU scalable mode by default" + depends on INTEL_IOMMU + help + Selecting this option will enable by default the scalable mode if + hardware presents the capability. The scalable mode is defined in + VT-d 3.0. The scalable mode capability could be checked by reading + /sys/devices/virtual/iommu/dmar*/intel-iommu/ecap. If this option + is not selected, scalable mode support could also be enabled by + passing intel_iommu=sm_on to the kernel. If not sure, please use + the default value. diff --git a/drivers/iommu/intel/Makefile b/drivers/iommu/intel/Makefile new file mode 100644 index 000000000000..fb8e1e8c8029 --- /dev/null +++ b/drivers/iommu/intel/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_DMAR_TABLE) += dmar.o +obj-$(CONFIG_INTEL_IOMMU) += iommu.o pasid.o +obj-$(CONFIG_INTEL_IOMMU) += trace.o +obj-$(CONFIG_INTEL_IOMMU_DEBUGFS) += debugfs.o +obj-$(CONFIG_INTEL_IOMMU_SVM) += svm.o +obj-$(CONFIG_IRQ_REMAP) += irq_remapping.o diff --git a/drivers/iommu/intel/debugfs.c b/drivers/iommu/intel/debugfs.c index cf1ebb98e418..efea7f02abd9 100644 --- a/drivers/iommu/intel/debugfs.c +++ b/drivers/iommu/intel/debugfs.c @@ -15,7 +15,7 @@ #include <asm/irq_remapping.h> -#include "intel-pasid.h" +#include "pasid.h" struct tbl_walk { u16 bus; diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 683b812c5c47..93e6345f3414 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1102,6 +1102,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) } drhd->iommu = iommu; + iommu->drhd = drhd; return 0; @@ -1438,8 +1439,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, /* PASID-based device IOTLB Invalidate */ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, - u32 pasid, u16 qdep, u64 addr, - unsigned int size_order, u64 granu) + u32 pasid, u16 qdep, u64 addr, unsigned int size_order) { unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1); struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0}; @@ -1447,7 +1447,6 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, desc.qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) | QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid); - desc.qw1 = QI_DEV_EIOTLB_GLOB(granu); /* * If S bit is 0, we only flush a single page. If S bit is set, @@ -1458,9 +1457,26 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, * Max Invs Pending (MIP) is set to 0 for now until we have DIT in * ECAP. */ - desc.qw1 |= addr & ~mask; - if (size_order) + if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0)) + pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n", + addr, size_order); + + /* Take page address */ + desc.qw1 = QI_DEV_EIOTLB_ADDR(addr); + + if (size_order) { + /* + * Existing 0s in address below size_order may be the least + * significant bit, we must set them to 1s to avoid having + * smaller size than desired. + */ + desc.qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1, + VTD_PAGE_SHIFT); + /* Clear size_order bit to indicate size */ + desc.qw1 &= ~mask; + /* Set the S bit to indicate flushing more than 1 page */ desc.qw1 |= QI_DEV_EIOTLB_SIZE; + } qi_submit_sync(iommu, &desc, 1, 0); } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index c29fd0991857..e9864e52b0e9 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -48,7 +48,7 @@ #include <trace/events/intel_iommu.h> #include "../irq_remapping.h" -#include "intel-pasid.h" +#include "pasid.h" #define ROOT_SIZE VTD_PAGE_SIZE #define CONTEXT_SIZE VTD_PAGE_SIZE @@ -356,6 +356,7 @@ static int intel_iommu_strict; static int intel_iommu_superpage = 1; static int iommu_identity_mapping; static int intel_no_bounce; +static int iommu_skip_te_disable; #define IDENTMAP_GFX 2 #define IDENTMAP_AZALIA 4 @@ -372,7 +373,7 @@ struct device_domain_info *get_domain_info(struct device *dev) if (!dev) return NULL; - info = dev->archdata.iommu; + info = dev_iommu_priv_get(dev); if (unlikely(info == DUMMY_DEVICE_DOMAIN_INFO || info == DEFER_DEVICE_DOMAIN_INFO)) return NULL; @@ -743,12 +744,12 @@ struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus, static int iommu_dummy(struct device *dev) { - return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; + return dev_iommu_priv_get(dev) == DUMMY_DEVICE_DOMAIN_INFO; } static bool attach_deferred(struct device *dev) { - return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO; + return dev_iommu_priv_get(dev) == DEFER_DEVICE_DOMAIN_INFO; } /** @@ -778,16 +779,16 @@ is_downstream_to_pci_bridge(struct device *dev, struct device *bridge) return false; } -static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) +struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn) { struct dmar_drhd_unit *drhd = NULL; + struct pci_dev *pdev = NULL; struct intel_iommu *iommu; struct device *tmp; - struct pci_dev *pdev = NULL; u16 segment = 0; int i; - if (iommu_dummy(dev)) + if (!dev || iommu_dummy(dev)) return NULL; if (dev_is_pci(dev)) { @@ -818,8 +819,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf if (pdev && pdev->is_virtfn) goto got_pdev; - *bus = drhd->devices[i].bus; - *devfn = drhd->devices[i].devfn; + if (bus && devfn) { + *bus = drhd->devices[i].bus; + *devfn = drhd->devices[i].devfn; + } goto out; } @@ -829,8 +832,10 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf if (pdev && drhd->include_all) { got_pdev: - *bus = pdev->bus->number; - *devfn = pdev->devfn; + if (bus && devfn) { + *bus = pdev->bus->number; + *devfn = pdev->devfn; + } goto out; } } @@ -1629,6 +1634,10 @@ static void iommu_disable_translation(struct intel_iommu *iommu) u32 sts; unsigned long flag; + if (iommu_skip_te_disable && iommu->drhd->gfx_dedicated && + (cap_read_drain(iommu->cap) || cap_write_drain(iommu->cap))) + return; + raw_spin_lock_irqsave(&iommu->register_lock, flag); iommu->gcmd &= ~DMA_GCMD_TE; writel(iommu->gcmd, iommu->reg + DMAR_GCMD_REG); @@ -2420,7 +2429,7 @@ static inline void unlink_domain_info(struct device_domain_info *info) list_del(&info->link); list_del(&info->global); if (info->dev) - info->dev->archdata.iommu = NULL; + dev_iommu_priv_set(info->dev, NULL); } static void domain_remove_dev_info(struct dmar_domain *domain) @@ -2453,7 +2462,7 @@ static void do_deferred_attach(struct device *dev) { struct iommu_domain *domain; - dev->archdata.iommu = NULL; + dev_iommu_priv_set(dev, NULL); domain = iommu_get_domain_for_dev(dev); if (domain) intel_iommu_attach_device(domain, dev); @@ -2560,7 +2569,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, } if (info->ats_supported && ecap_prs(iommu->ecap) && - pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI)) + pci_pri_supported(pdev)) info->pri_supported = 1; } } @@ -2599,7 +2608,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, list_add(&info->link, &domain->devices); list_add(&info->global, &device_domain_list); if (dev) - dev->archdata.iommu = info; + dev_iommu_priv_set(dev, info); spin_unlock_irqrestore(&device_domain_lock, flags); /* PASID table is mandatory for a PCI device in scalable mode. */ @@ -4004,7 +4013,7 @@ static void quirk_ioat_snb_local_iommu(struct pci_dev *pdev) if (!drhd || drhd->reg_base_addr - vtbar != 0xa000) { pr_warn_once(FW_BUG "BIOS assigned incorrect VT-d unit for Intel(R) QuickData Technology device\n"); add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); - pdev->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + dev_iommu_priv_set(&pdev->dev, DUMMY_DEVICE_DOMAIN_INFO); } } DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB, quirk_ioat_snb_local_iommu); @@ -4039,11 +4048,12 @@ static void __init init_no_remapping_devices(void) /* This IOMMU has *only* gfx devices. Either bypass it or set the gfx_mapped flag, as appropriate */ + drhd->gfx_dedicated = 1; if (!dmar_map_gfx) { drhd->ignored = 1; for_each_active_dev_scope(drhd->devices, drhd->devices_cnt, i, dev) - dev->archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; + dev_iommu_priv_set(dev, DUMMY_DEVICE_DOMAIN_INFO); } } } @@ -4738,12 +4748,12 @@ const struct attribute_group *intel_iommu_groups[] = { NULL, }; -static inline bool has_untrusted_dev(void) +static inline bool has_external_pci(void) { struct pci_dev *pdev = NULL; for_each_pci_dev(pdev) - if (pdev->untrusted) + if (pdev->external_facing) return true; return false; @@ -4751,7 +4761,7 @@ static inline bool has_untrusted_dev(void) static int __init platform_optin_force_iommu(void) { - if (!dmar_platform_optin() || no_platform_optin || !has_untrusted_dev()) + if (!dmar_platform_optin() || no_platform_optin || !has_external_pci()) return 0; if (no_iommu || dmar_disabled) @@ -5146,11 +5156,10 @@ static int aux_domain_add_dev(struct dmar_domain *domain, struct device *dev) { int ret; - u8 bus, devfn; unsigned long flags; struct intel_iommu *iommu; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return -ENODEV; @@ -5236,9 +5245,8 @@ static int prepare_domain_attach_device(struct iommu_domain *domain, struct dmar_domain *dmar_domain = to_dmar_domain(domain); struct intel_iommu *iommu; int addr_width; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return -ENODEV; @@ -5416,7 +5424,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, sid = PCI_DEVID(bus, devfn); /* Size is only valid in address selective invalidation */ - if (inv_info->granularity != IOMMU_INV_GRANU_PASID) + if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) size = to_vtd_size(inv_info->addr_info.granule_size, inv_info->addr_info.nb_granules); @@ -5425,6 +5433,7 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, IOMMU_CACHE_INV_TYPE_NR) { int granu = 0; u64 pasid = 0; + u64 addr = 0; granu = to_vtd_granularity(cache_type, inv_info->granularity); if (granu == -EINVAL) { @@ -5446,13 +5455,12 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, switch (BIT(cache_type)) { case IOMMU_CACHE_INV_TYPE_IOTLB: + /* HW will ignore LSB bits based on address mask */ if (inv_info->granularity == IOMMU_INV_GRANU_ADDR && size && (inv_info->addr_info.addr & ((BIT(VTD_PAGE_SHIFT + size)) - 1))) { - pr_err_ratelimited("Address out of range, 0x%llx, size order %llu\n", + pr_err_ratelimited("User address not aligned, 0x%llx, size order %llu\n", inv_info->addr_info.addr, size); - ret = -ERANGE; - goto out_unlock; } /* @@ -5464,25 +5472,35 @@ intel_iommu_sva_invalidate(struct iommu_domain *domain, struct device *dev, (granu == QI_GRAN_NONG_PASID) ? -1 : 1 << size, inv_info->addr_info.flags & IOMMU_INV_ADDR_FLAGS_LEAF); + if (!info->ats_enabled) + break; /* * Always flush device IOTLB if ATS is enabled. vIOMMU * in the guest may assume IOTLB flush is inclusive, * which is more efficient. */ - if (info->ats_enabled) - qi_flush_dev_iotlb_pasid(iommu, sid, - info->pfsid, pasid, - info->ats_qdep, - inv_info->addr_info.addr, - size, granu); - break; + fallthrough; case IOMMU_CACHE_INV_TYPE_DEV_IOTLB: + /* + * PASID based device TLB invalidation does not support + * IOMMU_INV_GRANU_PASID granularity but only supports + * IOMMU_INV_GRANU_ADDR. + * The equivalent of that is we set the size to be the + * entire range of 64 bit. User only provides PASID info + * without address info. So we set addr to 0. + */ + if (inv_info->granularity == IOMMU_INV_GRANU_PASID) { + size = 64 - VTD_PAGE_SHIFT; + addr = 0; + } else if (inv_info->granularity == IOMMU_INV_GRANU_ADDR) { + addr = inv_info->addr_info.addr; + } + if (info->ats_enabled) qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid, pasid, - info->ats_qdep, - inv_info->addr_info.addr, - size, granu); + info->ats_qdep, addr, + size); else pr_warn_ratelimited("Passdown device IOTLB flush w/o ATS!\n"); break; @@ -5658,14 +5676,13 @@ static bool intel_iommu_capable(enum iommu_cap cap) static struct iommu_device *intel_iommu_probe_device(struct device *dev) { struct intel_iommu *iommu; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return ERR_PTR(-ENODEV); if (translation_pre_enabled(iommu)) - dev->archdata.iommu = DEFER_DEVICE_DOMAIN_INFO; + dev_iommu_priv_set(dev, DEFER_DEVICE_DOMAIN_INFO); return &iommu->iommu; } @@ -5673,9 +5690,8 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) static void intel_iommu_release_device(struct device *dev) { struct intel_iommu *iommu; - u8 bus, devfn; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) return; @@ -5825,37 +5841,14 @@ static struct iommu_group *intel_iommu_device_group(struct device *dev) return generic_device_group(dev); } -#ifdef CONFIG_INTEL_IOMMU_SVM -struct intel_iommu *intel_svm_device_to_iommu(struct device *dev) -{ - struct intel_iommu *iommu; - u8 bus, devfn; - - if (iommu_dummy(dev)) { - dev_warn(dev, - "No IOMMU translation for device; cannot enable SVM\n"); - return NULL; - } - - iommu = device_to_iommu(dev, &bus, &devfn); - if ((!iommu)) { - dev_err(dev, "No IOMMU for device; cannot enable SVM\n"); - return NULL; - } - - return iommu; -} -#endif /* CONFIG_INTEL_IOMMU_SVM */ - static int intel_iommu_enable_auxd(struct device *dev) { struct device_domain_info *info; struct intel_iommu *iommu; unsigned long flags; - u8 bus, devfn; int ret; - iommu = device_to_iommu(dev, &bus, &devfn); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu || dmar_disabled) return -EINVAL; @@ -6080,6 +6073,7 @@ const struct iommu_ops intel_iommu_ops = { .sva_bind = intel_svm_bind, .sva_unbind = intel_svm_unbind, .sva_get_pasid = intel_svm_get_pasid, + .page_response = intel_svm_page_response, #endif }; @@ -6182,6 +6176,27 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_g DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0062, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x006a, quirk_calpella_no_shadow_gtt); +static void quirk_igfx_skip_te_disable(struct pci_dev *dev) +{ + unsigned short ver; + + if (!IS_GFX_DEVICE(dev)) + return; + + ver = (dev->device >> 8) & 0xff; + if (ver != 0x45 && ver != 0x46 && ver != 0x4c && + ver != 0x4e && ver != 0x8a && ver != 0x98 && + ver != 0x9a) + return; + + if (risky_device(dev)) + return; + + pci_info(dev, "Skip IOMMU disabling for graphics\n"); + iommu_skip_te_disable = 1; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, quirk_igfx_skip_te_disable); + /* On Tylersburg chipsets, some BIOSes have been known to enable the ISOCH DMAR unit for the Azalia sound device, but not give it any TLB entries, which causes it to deadlock. Check for that. We do diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index aa096b333a99..23583b0e66a5 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -15,6 +15,7 @@ #include <linux/irqdomain.h> #include <linux/crash_dump.h> #include <asm/io_apic.h> +#include <asm/apic.h> #include <asm/smp.h> #include <asm/cpu.h> #include <asm/irq_remapping.h> diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c81f0f17c6ba..e6faedf42fd4 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -19,7 +19,7 @@ #include <linux/pci-ats.h> #include <linux/spinlock.h> -#include "intel-pasid.h" +#include "pasid.h" /* * Intel IOMMU system wide PASID name space: @@ -486,7 +486,16 @@ devtlb_invalidation_with_pasid(struct intel_iommu *iommu, qdep = info->ats_qdep; pfsid = info->pfsid; - qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); + /* + * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID), + * devTLB flush w/o PASID should be used. For non-zero PASID under + * SVA usage, device could do DMA with multiple PASIDs. It is more + * efficient to flush devTLB specific to the PASID. + */ + if (pasid == PASID_RID2PASID) + qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); + else + qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); } void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, diff --git a/drivers/iommu/intel/intel-pasid.h b/drivers/iommu/intel/pasid.h index c5318d40e0fa..c9850766c3a9 100644 --- a/drivers/iommu/intel/intel-pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * intel-pasid.h - PASID idr, table and entry header + * pasid.h - PASID idr, table and entry header * * Copyright (C) 2018 Intel Corporation * diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 6c87c807a0ab..95c3164a2302 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -20,7 +20,7 @@ #include <linux/ioasid.h> #include <asm/page.h> -#include "intel-pasid.h" +#include "pasid.h" static irqreturn_t prq_event_thread(int irq, void *d); static void intel_svm_drain_prq(struct device *dev, int pasid); @@ -228,13 +228,57 @@ static LIST_HEAD(global_svm_list); list_for_each_entry((sdev), &(svm)->devs, list) \ if ((d) != (sdev)->dev) {} else +static int pasid_to_svm_sdev(struct device *dev, unsigned int pasid, + struct intel_svm **rsvm, + struct intel_svm_dev **rsdev) +{ + struct intel_svm_dev *d, *sdev = NULL; + struct intel_svm *svm; + + /* The caller should hold the pasid_mutex lock */ + if (WARN_ON(!mutex_is_locked(&pasid_mutex))) + return -EINVAL; + + if (pasid == INVALID_IOASID || pasid >= PASID_MAX) + return -EINVAL; + + svm = ioasid_find(NULL, pasid, NULL); + if (IS_ERR(svm)) + return PTR_ERR(svm); + + if (!svm) + goto out; + + /* + * If we found svm for the PASID, there must be at least one device + * bond. + */ + if (WARN_ON(list_empty(&svm->devs))) + return -EINVAL; + + rcu_read_lock(); + list_for_each_entry_rcu(d, &svm->devs, list) { + if (d->dev == dev) { + sdev = d; + break; + } + } + rcu_read_unlock(); + +out: + *rsvm = svm; + *rsdev = sdev; + + return 0; +} + int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, struct iommu_gpasid_bind_data *data) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); + struct intel_svm_dev *sdev = NULL; struct dmar_domain *dmar_domain; - struct intel_svm_dev *sdev; - struct intel_svm *svm; + struct intel_svm *svm = NULL; int ret = 0; if (WARN_ON(!iommu) || !data) @@ -261,39 +305,23 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, dmar_domain = to_dmar_domain(domain); mutex_lock(&pasid_mutex); - svm = ioasid_find(NULL, data->hpasid, NULL); - if (IS_ERR(svm)) { - ret = PTR_ERR(svm); + ret = pasid_to_svm_sdev(dev, data->hpasid, &svm, &sdev); + if (ret) goto out; - } - if (svm) { + if (sdev) { /* - * If we found svm for the PASID, there must be at - * least one device bond, otherwise svm should be freed. + * Do not allow multiple bindings of the same device-PASID since + * there is only one SL page tables per PASID. We may revisit + * once sharing PGD across domains are supported. */ - if (WARN_ON(list_empty(&svm->devs))) { - ret = -EINVAL; - goto out; - } + dev_warn_ratelimited(dev, "Already bound with PASID %u\n", + svm->pasid); + ret = -EBUSY; + goto out; + } - for_each_svm_dev(sdev, svm, dev) { - /* - * For devices with aux domains, we should allow - * multiple bind calls with the same PASID and pdev. - */ - if (iommu_dev_feature_enabled(dev, - IOMMU_DEV_FEAT_AUX)) { - sdev->users++; - } else { - dev_warn_ratelimited(dev, - "Already bound with PASID %u\n", - svm->pasid); - ret = -EBUSY; - } - goto out; - } - } else { + if (!svm) { /* We come here when PASID has never been bond to a device. */ svm = kzalloc(sizeof(*svm), GFP_KERNEL); if (!svm) { @@ -373,28 +401,20 @@ int intel_svm_bind_gpasid(struct iommu_domain *domain, struct device *dev, int intel_svm_unbind_gpasid(struct device *dev, int pasid) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct intel_svm_dev *sdev; struct intel_svm *svm; - int ret = -EINVAL; + int ret; if (WARN_ON(!iommu)) return -EINVAL; mutex_lock(&pasid_mutex); - svm = ioasid_find(NULL, pasid, NULL); - if (!svm) { - ret = -EINVAL; - goto out; - } - - if (IS_ERR(svm)) { - ret = PTR_ERR(svm); + ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); + if (ret) goto out; - } - for_each_svm_dev(sdev, svm, dev) { - ret = 0; + if (sdev) { if (iommu_dev_feature_enabled(dev, IOMMU_DEV_FEAT_AUX)) sdev->users--; if (!sdev->users) { @@ -418,7 +438,6 @@ int intel_svm_unbind_gpasid(struct device *dev, int pasid) kfree(svm); } } - break; } out: mutex_unlock(&pasid_mutex); @@ -430,7 +449,7 @@ static int intel_svm_bind_mm(struct device *dev, int flags, struct svm_dev_ops *ops, struct mm_struct *mm, struct intel_svm_dev **sd) { - struct intel_iommu *iommu = intel_svm_device_to_iommu(dev); + struct intel_iommu *iommu = device_to_iommu(dev, NULL, NULL); struct device_domain_info *info; struct intel_svm_dev *sdev; struct intel_svm *svm = NULL; @@ -596,7 +615,7 @@ success: if (sd) *sd = sdev; ret = 0; - out: +out: return ret; } @@ -608,21 +627,15 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid) struct intel_svm *svm; int ret = -EINVAL; - iommu = intel_svm_device_to_iommu(dev); + iommu = device_to_iommu(dev, NULL, NULL); if (!iommu) goto out; - svm = ioasid_find(NULL, pasid, NULL); - if (!svm) - goto out; - - if (IS_ERR(svm)) { - ret = PTR_ERR(svm); + ret = pasid_to_svm_sdev(dev, pasid, &svm, &sdev); + if (ret) goto out; - } - for_each_svm_dev(sdev, svm, dev) { - ret = 0; + if (sdev) { sdev->users--; if (!sdev->users) { list_del_rcu(&sdev->list); @@ -651,10 +664,8 @@ static int intel_svm_unbind_mm(struct device *dev, int pasid) kfree(svm); } } - break; } - out: - +out: return ret; } @@ -800,8 +811,63 @@ qi_retry: } } +static int prq_to_iommu_prot(struct page_req_dsc *req) +{ + int prot = 0; + + if (req->rd_req) + prot |= IOMMU_FAULT_PERM_READ; + if (req->wr_req) + prot |= IOMMU_FAULT_PERM_WRITE; + if (req->exe_req) + prot |= IOMMU_FAULT_PERM_EXEC; + if (req->pm_req) + prot |= IOMMU_FAULT_PERM_PRIV; + + return prot; +} + +static int +intel_svm_prq_report(struct device *dev, struct page_req_dsc *desc) +{ + struct iommu_fault_event event; + + if (!dev || !dev_is_pci(dev)) + return -ENODEV; + + /* Fill in event data for device specific processing */ + memset(&event, 0, sizeof(struct iommu_fault_event)); + event.fault.type = IOMMU_FAULT_PAGE_REQ; + event.fault.prm.addr = desc->addr; + event.fault.prm.pasid = desc->pasid; + event.fault.prm.grpid = desc->prg_index; + event.fault.prm.perm = prq_to_iommu_prot(desc); + + if (desc->lpig) + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + if (desc->pasid_present) { + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + event.fault.prm.flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; + } + if (desc->priv_data_present) { + /* + * Set last page in group bit if private data is present, + * page response is required as it does for LPIG. + * iommu_report_device_fault() doesn't understand this vendor + * specific requirement thus we set last_page as a workaround. + */ + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + event.fault.prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; + memcpy(event.fault.prm.private_data, desc->priv_data, + sizeof(desc->priv_data)); + } + + return iommu_report_device_fault(dev, &event); +} + static irqreturn_t prq_event_thread(int irq, void *d) { + struct intel_svm_dev *sdev = NULL; struct intel_iommu *iommu = d; struct intel_svm *svm = NULL; int head, tail, handled = 0; @@ -813,7 +879,6 @@ static irqreturn_t prq_event_thread(int irq, void *d) tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK; head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK; while (head != tail) { - struct intel_svm_dev *sdev; struct vm_area_struct *vma; struct page_req_dsc *req; struct qi_desc resp; @@ -849,6 +914,20 @@ static irqreturn_t prq_event_thread(int irq, void *d) } } + if (!sdev || sdev->sid != req->rid) { + struct intel_svm_dev *t; + + sdev = NULL; + rcu_read_lock(); + list_for_each_entry_rcu(t, &svm->devs, list) { + if (t->sid == req->rid) { + sdev = t; + break; + } + } + rcu_read_unlock(); + } + result = QI_RESP_INVALID; /* Since we're using init_mm.pgd directly, we should never take * any faults on kernel addresses. */ @@ -859,6 +938,17 @@ static irqreturn_t prq_event_thread(int irq, void *d) if (!is_canonical_address(address)) goto bad_req; + /* + * If prq is to be handled outside iommu driver via receiver of + * the fault notifiers, we skip the page response here. + */ + if (svm->flags & SVM_FLAG_GUEST_MODE) { + if (sdev && !intel_svm_prq_report(sdev->dev, req)) + goto prq_advance; + else + goto bad_req; + } + /* If the mm is already defunct, don't handle faults. */ if (!mmget_not_zero(svm->mm)) goto bad_req; @@ -872,29 +962,17 @@ static irqreturn_t prq_event_thread(int irq, void *d) goto invalid; ret = handle_mm_fault(vma, address, - req->wr_req ? FAULT_FLAG_WRITE : 0); + req->wr_req ? FAULT_FLAG_WRITE : 0, + NULL); if (ret & VM_FAULT_ERROR) goto invalid; result = QI_RESP_SUCCESS; - invalid: +invalid: mmap_read_unlock(svm->mm); mmput(svm->mm); - bad_req: - /* Accounting for major/minor faults? */ - rcu_read_lock(); - list_for_each_entry_rcu(sdev, &svm->devs, list) { - if (sdev->sid == req->rid) - break; - } - /* Other devices can go away, but the drivers are not permitted - * to unbind while any page faults might be in flight. So it's - * OK to drop the 'lock' here now we have it. */ - rcu_read_unlock(); - - if (WARN_ON(&sdev->list == &svm->devs)) - sdev = NULL; - +bad_req: + WARN_ON(!sdev); if (sdev && sdev->ops && sdev->ops->fault_cb) { int rwxp = (req->rd_req << 3) | (req->wr_req << 2) | (req->exe_req << 1) | (req->pm_req); @@ -905,7 +983,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) and these can be NULL. Do not use them below this point! */ sdev = NULL; svm = NULL; - no_pasid: +no_pasid: if (req->lpig || req->priv_data_present) { /* * Per VT-d spec. v3.0 ch7.7, system software must @@ -930,6 +1008,7 @@ static irqreturn_t prq_event_thread(int irq, void *d) resp.qw3 = 0; qi_submit_sync(iommu, &resp, 1, 0); } +prq_advance: head = (head + sizeof(*req)) & PRQ_RING_MASK; } @@ -1000,3 +1079,102 @@ int intel_svm_get_pasid(struct iommu_sva *sva) return pasid; } + +int intel_svm_page_response(struct device *dev, + struct iommu_fault_event *evt, + struct iommu_page_response *msg) +{ + struct iommu_fault_page_request *prm; + struct intel_svm_dev *sdev = NULL; + struct intel_svm *svm = NULL; + struct intel_iommu *iommu; + bool private_present; + bool pasid_present; + bool last_page; + u8 bus, devfn; + int ret = 0; + u16 sid; + + if (!dev || !dev_is_pci(dev)) + return -ENODEV; + + iommu = device_to_iommu(dev, &bus, &devfn); + if (!iommu) + return -ENODEV; + + if (!msg || !evt) + return -EINVAL; + + mutex_lock(&pasid_mutex); + + prm = &evt->fault.prm; + sid = PCI_DEVID(bus, devfn); + pasid_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + private_present = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PRIV_DATA; + last_page = prm->flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE; + + if (!pasid_present) { + ret = -EINVAL; + goto out; + } + + if (prm->pasid == 0 || prm->pasid >= PASID_MAX) { + ret = -EINVAL; + goto out; + } + + ret = pasid_to_svm_sdev(dev, prm->pasid, &svm, &sdev); + if (ret || !sdev) { + ret = -ENODEV; + goto out; + } + + /* + * For responses from userspace, need to make sure that the + * pasid has been bound to its mm. + */ + if (svm->flags & SVM_FLAG_GUEST_MODE) { + struct mm_struct *mm; + + mm = get_task_mm(current); + if (!mm) { + ret = -EINVAL; + goto out; + } + + if (mm != svm->mm) { + ret = -ENODEV; + mmput(mm); + goto out; + } + + mmput(mm); + } + + /* + * Per VT-d spec. v3.0 ch7.7, system software must respond + * with page group response if private data is present (PDP) + * or last page in group (LPIG) bit is set. This is an + * additional VT-d requirement beyond PCI ATS spec. + */ + if (last_page || private_present) { + struct qi_desc desc; + + desc.qw0 = QI_PGRP_PASID(prm->pasid) | QI_PGRP_DID(sid) | + QI_PGRP_PASID_P(pasid_present) | + QI_PGRP_PDP(private_present) | + QI_PGRP_RESP_CODE(msg->code) | + QI_PGRP_RESP_TYPE; + desc.qw1 = QI_PGRP_IDX(prm->grpid) | QI_PGRP_LPIG(last_page); + desc.qw2 = 0; + desc.qw3 = 0; + if (private_present) + memcpy(&desc.qw2, prm->private_data, + sizeof(prm->private_data)); + + qi_submit_sync(iommu, &desc, 1, 0); + } +out: + mutex_unlock(&pasid_mutex); + return ret; +} diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 4272fe4e17f4..a688f22cbe3b 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, int prot, - int lvl, arm_v7s_iopte *ptep) + int lvl, arm_v7s_iopte *ptep, gfp_t gfp) { struct io_pgtable_cfg *cfg = &data->iop.cfg; arm_v7s_iopte pte, *cptep; @@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); + cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data); if (!cptep) return -ENOMEM; @@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); + return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); } static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int prot) + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable *iop = &data->iop; @@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, paddr >= (1ULL << data->iop.cfg.oas))) return -ERANGE; - ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); + ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -922,12 +922,12 @@ static int __init arm_v7s_do_selftests(void) if (ops->map(ops, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | - IOMMU_CACHE)) + IOMMU_CACHE, GFP_KERNEL)) return __FAIL(ops); /* Overlapping mappings */ if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC)) + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -946,7 +946,7 @@ static int __init arm_v7s_do_selftests(void) return __FAIL(ops); /* Remap of partial unmap */ - if (ops->map(ops, iova_start + size, size, size, IOMMU_READ)) + if (ops->map(ops, iova_start + size, size, size, IOMMU_READ, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova_start + size + 42) @@ -967,7 +967,7 @@ static int __init arm_v7s_do_selftests(void) return __FAIL(ops); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) return __FAIL(ops); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 04fbd4bf0ff9..dc7bcf858b6d 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, arm_lpae_iopte prot, - int lvl, arm_lpae_iopte *ptep) + int lvl, arm_lpae_iopte *ptep, gfp_t gfp) { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); @@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, /* Grab a pointer to the next level */ pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); + cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg); if (!cptep) return -ENOMEM; @@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, } /* Rinse, repeat */ - return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); + return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp); } static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, @@ -438,9 +438,6 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, else if (prot & IOMMU_CACHE) pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE << ARM_LPAE_PTE_ATTRINDX_SHIFT); - else if (prot & IOMMU_SYS_CACHE_ONLY) - pte |= (ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE - << ARM_LPAE_PTE_ATTRINDX_SHIFT); } if (prot & IOMMU_CACHE) @@ -461,7 +458,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, } static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, - phys_addr_t paddr, size_t size, int iommu_prot) + phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp) { struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct io_pgtable_cfg *cfg = &data->iop.cfg; @@ -483,7 +480,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, return -ERANGE; prot = arm_lpae_prot_to_pte(data, iommu_prot); - ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); + ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp); /* * Synchronise all PTE updates for the new mapping before there's * a chance for anything to kick off a table walk for the new iova. @@ -1178,12 +1175,12 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) if (ops->map(ops, iova, iova, size, IOMMU_READ | IOMMU_WRITE | IOMMU_NOEXEC | - IOMMU_CACHE)) + IOMMU_CACHE, GFP_KERNEL)) return __FAIL(ops, i); /* Overlapping mappings */ if (!ops->map(ops, iova, iova + size, size, - IOMMU_READ | IOMMU_NOEXEC)) + IOMMU_READ | IOMMU_NOEXEC, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) @@ -1198,7 +1195,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) return __FAIL(ops, i); /* Remap of partial unmap */ - if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) + if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) @@ -1216,7 +1213,7 @@ static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) return __FAIL(ops, i); /* Remap full block */ - if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) + if (ops->map(ops, iova, iova, size, IOMMU_WRITE, GFP_KERNEL)) return __FAIL(ops, i); if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index b6858adc4f17..609bd25bf154 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -383,8 +383,8 @@ static ssize_t iommu_group_show_name(struct iommu_group *group, char *buf) * Elements are sorted by start address and overlapping segments * of the same type are merged. */ -int iommu_insert_resv_region(struct iommu_resv_region *new, - struct list_head *regions) +static int iommu_insert_resv_region(struct iommu_resv_region *new, + struct list_head *regions) { struct iommu_resv_region *iter, *tmp, *nr, *top; LIST_HEAD(stack); @@ -1185,11 +1185,12 @@ EXPORT_SYMBOL_GPL(iommu_report_device_fault); int iommu_page_response(struct device *dev, struct iommu_page_response *msg) { - bool pasid_valid; + bool needs_pasid; int ret = -EINVAL; struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; struct dev_iommu *param = dev->iommu; + bool has_pasid = msg->flags & IOMMU_PAGE_RESP_PASID_VALID; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); if (!domain || !domain->ops->page_response) @@ -1214,14 +1215,24 @@ int iommu_page_response(struct device *dev, */ list_for_each_entry(evt, ¶m->fault_param->faults, list) { prm = &evt->fault.prm; - pasid_valid = prm->flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID; + if (prm->grpid != msg->grpid) + continue; - if ((pasid_valid && prm->pasid != msg->pasid) || - prm->grpid != msg->grpid) + /* + * If the PASID is required, the corresponding request is + * matched using the group ID, the PASID valid bit and the PASID + * value. Otherwise only the group ID matches request and + * response. + */ + needs_pasid = prm->flags & IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID; + if (needs_pasid && (!has_pasid || msg->pasid != prm->pasid)) continue; - /* Sanitize the reply */ - msg->flags = pasid_valid ? IOMMU_PAGE_RESP_PASID_VALID : 0; + if (!needs_pasid && has_pasid) { + /* No big deal, just clear it. */ + msg->flags &= ~IOMMU_PAGE_RESP_PASID_VALID; + msg->pasid = 0; + } ret = domain->ops->page_response(dev, evt, msg); list_del(&evt->list); @@ -2168,8 +2179,8 @@ static size_t iommu_pgsize(struct iommu_domain *domain, return pgsize; } -int __iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot, gfp_t gfp) +static int __iommu_map(struct iommu_domain *domain, unsigned long iova, + phys_addr_t paddr, size_t size, int prot, gfp_t gfp) { const struct iommu_ops *ops = domain->ops; unsigned long orig_iova = iova; @@ -2319,9 +2330,9 @@ size_t iommu_unmap_fast(struct iommu_domain *domain, } EXPORT_SYMBOL_GPL(iommu_unmap_fast); -size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, - struct scatterlist *sg, unsigned int nents, int prot, - gfp_t gfp) +static size_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova, + struct scatterlist *sg, unsigned int nents, int prot, + gfp_t gfp) { size_t len = 0, mapped = 0; phys_addr_t start; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 49fc01f2a28d..45a251da5453 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad) for (i = 0 ; i < mag->size; ++i) { struct iova *iova = private_find_iova(iovad, mag->pfns[i]); - BUG_ON(!iova); + if (WARN_ON(!iova)) + continue; + private_free_iova(iovad, iova); } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 6de86e73dfc3..0f18abda0e20 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -3,7 +3,7 @@ * IOMMU API for Renesas VMSA-compatible IPMMU * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * - * Copyright (C) 2014 Renesas Electronics Corporation + * Copyright (C) 2014-2020 Renesas Electronics Corporation */ #include <linux/bitmap.h> @@ -686,7 +686,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, if (!domain) return -ENODEV; - return domain->iop->map(domain->iop, iova, paddr, size, prot); + return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp); } static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, @@ -739,7 +739,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = { { .soc_id = "r8a774a1", }, { .soc_id = "r8a774b1", }, { .soc_id = "r8a774c0", }, + { .soc_id = "r8a774e1", }, { .soc_id = "r8a7795", }, + { .soc_id = "r8a77961", }, { .soc_id = "r8a7796", }, { .soc_id = "r8a77965", }, { .soc_id = "r8a77970", }, @@ -751,7 +753,9 @@ static const struct soc_device_attribute soc_rcar_gen3[] = { static const struct soc_device_attribute soc_rcar_gen3_whitelist[] = { { .soc_id = "r8a774b1", }, { .soc_id = "r8a774c0", }, + { .soc_id = "r8a774e1", }, { .soc_id = "r8a7795", .revision = "ES3.*" }, + { .soc_id = "r8a77961", }, { .soc_id = "r8a77965", }, { .soc_id = "r8a77990", }, { .soc_id = "r8a77995", }, @@ -963,12 +967,18 @@ static const struct of_device_id ipmmu_of_ids[] = { .compatible = "renesas,ipmmu-r8a774c0", .data = &ipmmu_features_rcar_gen3, }, { + .compatible = "renesas,ipmmu-r8a774e1", + .data = &ipmmu_features_rcar_gen3, + }, { .compatible = "renesas,ipmmu-r8a7795", .data = &ipmmu_features_rcar_gen3, }, { .compatible = "renesas,ipmmu-r8a7796", .data = &ipmmu_features_rcar_gen3, }, { + .compatible = "renesas,ipmmu-r8a77961", + .data = &ipmmu_features_rcar_gen3, + }, { .compatible = "renesas,ipmmu-r8a77965", .data = &ipmmu_features_rcar_gen3, }, { diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c index 3d8a63555c25..3615cd6241c4 100644 --- a/drivers/iommu/msm_iommu.c +++ b/drivers/iommu/msm_iommu.c @@ -491,7 +491,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, int ret; spin_lock_irqsave(&priv->pgtlock, flags); - ret = priv->iop->map(priv->iop, iova, pa, len, prot); + ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); spin_unlock_irqrestore(&priv->pgtlock, flags); return ret; @@ -593,14 +593,14 @@ static void insert_iommu_master(struct device *dev, struct msm_iommu_dev **iommu, struct of_phandle_args *spec) { - struct msm_iommu_ctx_dev *master = dev->archdata.iommu; + struct msm_iommu_ctx_dev *master = dev_iommu_priv_get(dev); int sid; if (list_empty(&(*iommu)->ctx_list)) { master = kzalloc(sizeof(*master), GFP_ATOMIC); master->of_node = dev->of_node; list_add(&master->list, &(*iommu)->ctx_list); - dev->archdata.iommu = master; + dev_iommu_priv_set(dev, master); } for (sid = 0; sid < master->num_mids; sid++) diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 2be96f1cdbd2..785b228d39a6 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -37,12 +37,18 @@ #define REG_MMU_INVLD_START_A 0x024 #define REG_MMU_INVLD_END_A 0x028 -#define REG_MMU_INV_SEL 0x038 +#define REG_MMU_INV_SEL_GEN2 0x02c +#define REG_MMU_INV_SEL_GEN1 0x038 #define F_INVLD_EN0 BIT(0) #define F_INVLD_EN1 BIT(1) -#define REG_MMU_STANDARD_AXI_MODE 0x048 +#define REG_MMU_MISC_CTRL 0x048 +#define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17)) +#define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) + #define REG_MMU_DCM_DIS 0x050 +#define REG_MMU_WR_LEN_CTRL 0x054 +#define F_MMU_WR_THROT_DIS_MASK (BIT(5) | BIT(21)) #define REG_MMU_CTRL_REG 0x110 #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) @@ -88,10 +94,12 @@ #define REG_MMU1_INVLD_PA 0x148 #define REG_MMU0_INT_ID 0x150 #define REG_MMU1_INT_ID 0x154 +#define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) +#define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) -#define MTK_PROTECT_PA_ALIGN 128 +#define MTK_PROTECT_PA_ALIGN 256 /* * Get the local arbiter ID and the portid within the larb arbiter @@ -100,6 +108,18 @@ #define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) +#define HAS_4GB_MODE BIT(0) +/* HW will use the EMI clock if there isn't the "bclk". */ +#define HAS_BCLK BIT(1) +#define HAS_VLD_PA_RNG BIT(2) +#define RESET_AXI BIT(3) +#define OUT_ORDER_WR_EN BIT(4) +#define HAS_SUB_COMM BIT(5) +#define WR_THROT_EN BIT(6) + +#define MTK_IOMMU_HAS_FLAG(pdata, _x) \ + ((((pdata)->flags) & (_x)) == (_x)) + struct mtk_iommu_domain { struct io_pgtable_cfg cfg; struct io_pgtable_ops *iop; @@ -165,7 +185,7 @@ static void mtk_iommu_tlb_flush_all(void *cookie) for_each_m4u(data) { writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, - data->base + REG_MMU_INV_SEL); + data->base + data->plat_data->inv_sel_reg); writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); wmb(); /* Make sure the tlb flush all done */ } @@ -182,7 +202,7 @@ static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, for_each_m4u(data) { spin_lock_irqsave(&data->tlb_lock, flags); writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, - data->base + REG_MMU_INV_SEL); + data->base + data->plat_data->inv_sel_reg); writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); writel_relaxed(iova + size - 1, @@ -226,7 +246,7 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) struct mtk_iommu_data *data = dev_id; struct mtk_iommu_domain *dom = data->m4u_dom; u32 int_state, regval, fault_iova, fault_pa; - unsigned int fault_larb, fault_port; + unsigned int fault_larb, fault_port, sub_comm = 0; bool layer, write; /* Read error info from registers */ @@ -242,10 +262,14 @@ static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) } layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; - fault_larb = F_MMU_INT_ID_LARB_ID(regval); fault_port = F_MMU_INT_ID_PORT_ID(regval); - - fault_larb = data->plat_data->larbid_remap[fault_larb]; + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) { + fault_larb = F_MMU_INT_ID_COMM_ID(regval); + sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); + } else { + fault_larb = F_MMU_INT_ID_LARB_ID(regval); + } + fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; if (report_iommu_fault(&dom->domain, data->dev, fault_iova, write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { @@ -397,7 +421,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, paddr |= BIT_ULL(32); /* Synchronize with the tlb_lock */ - return dom->iop->map(dom->iop, iova, paddr, size, prot); + return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp); } static size_t mtk_iommu_unmap(struct iommu_domain *domain, @@ -532,11 +556,13 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) return ret; } - if (data->plat_data->m4u_plat == M4U_MT8173) + if (data->plat_data->m4u_plat == M4U_MT8173) { regval = F_MMU_PREFETCH_RT_REPLACE_MOD | F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; - else - regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR; + } else { + regval = readl_relaxed(data->base + REG_MMU_CTRL_REG); + regval |= F_MMU_TF_PROT_TO_PROGRAM_ADDR; + } writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); regval = F_L2_MULIT_HIT_EN | @@ -563,7 +589,8 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) upper_32_bits(data->protect_base); writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); - if (data->enable_4GB && data->plat_data->has_vld_pa_rng) { + if (data->enable_4GB && + MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { /* * If 4GB mode is enabled, the validate PA range is from * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. @@ -572,9 +599,23 @@ static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); } writel_relaxed(0, data->base + REG_MMU_DCM_DIS); + if (MTK_IOMMU_HAS_FLAG(data->plat_data, WR_THROT_EN)) { + /* write command throttling mode */ + regval = readl_relaxed(data->base + REG_MMU_WR_LEN_CTRL); + regval &= ~F_MMU_WR_THROT_DIS_MASK; + writel_relaxed(regval, data->base + REG_MMU_WR_LEN_CTRL); + } - if (data->plat_data->reset_axi) - writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE); + if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { + /* The register is called STANDARD_AXI_MODE in this case */ + regval = 0; + } else { + regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL); + regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; + if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) + regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; + } + writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL); if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, dev_name(data->dev), (void *)data)) { @@ -616,7 +657,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) /* Whether the current dram is over 4GB */ data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); - if (!data->plat_data->has_4gb_mode) + if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) data->enable_4GB = false; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -629,7 +670,7 @@ static int mtk_iommu_probe(struct platform_device *pdev) if (data->irq < 0) return data->irq; - if (data->plat_data->has_bclk) { + if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { data->bclk = devm_clk_get(dev, "bclk"); if (IS_ERR(data->bclk)) return PTR_ERR(data->bclk); @@ -718,8 +759,8 @@ static int __maybe_unused mtk_iommu_suspend(struct device *dev) struct mtk_iommu_suspend_reg *reg = &data->reg; void __iomem *base = data->base; - reg->standard_axi_mode = readl_relaxed(base + - REG_MMU_STANDARD_AXI_MODE); + reg->wr_len_ctrl = readl_relaxed(base + REG_MMU_WR_LEN_CTRL); + reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); @@ -743,8 +784,8 @@ static int __maybe_unused mtk_iommu_resume(struct device *dev) dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); return ret; } - writel_relaxed(reg->standard_axi_mode, - base + REG_MMU_STANDARD_AXI_MODE); + writel_relaxed(reg->wr_len_ctrl, base + REG_MMU_WR_LEN_CTRL); + writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); @@ -763,28 +804,35 @@ static const struct dev_pm_ops mtk_iommu_pm_ops = { static const struct mtk_iommu_plat_data mt2712_data = { .m4u_plat = M4U_MT2712, - .has_4gb_mode = true, - .has_bclk = true, - .has_vld_pa_rng = true, - .larbid_remap = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG, + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, + .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, +}; + +static const struct mtk_iommu_plat_data mt6779_data = { + .m4u_plat = M4U_MT6779, + .flags = HAS_SUB_COMM | OUT_ORDER_WR_EN | WR_THROT_EN, + .inv_sel_reg = REG_MMU_INV_SEL_GEN2, + .larbid_remap = {{0}, {1}, {2}, {3}, {5}, {7, 8}, {10}, {9}}, }; static const struct mtk_iommu_plat_data mt8173_data = { .m4u_plat = M4U_MT8173, - .has_4gb_mode = true, - .has_bclk = true, - .reset_axi = true, - .larbid_remap = {0, 1, 2, 3, 4, 5}, /* Linear mapping. */ + .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI, + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, + .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ }; static const struct mtk_iommu_plat_data mt8183_data = { .m4u_plat = M4U_MT8183, - .reset_axi = true, - .larbid_remap = {0, 4, 5, 6, 7, 2, 3, 1}, + .flags = RESET_AXI, + .inv_sel_reg = REG_MMU_INV_SEL_GEN1, + .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, }; static const struct of_device_id mtk_iommu_of_ids[] = { { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, + { .compatible = "mediatek,mt6779-m4u", .data = &mt6779_data}, { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, {} diff --git a/drivers/iommu/mtk_iommu.h b/drivers/iommu/mtk_iommu.h index ea949a324e33..122925dbe547 100644 --- a/drivers/iommu/mtk_iommu.h +++ b/drivers/iommu/mtk_iommu.h @@ -15,34 +15,39 @@ #include <linux/iommu.h> #include <linux/list.h> #include <linux/spinlock.h> +#include <linux/dma-mapping.h> #include <soc/mediatek/smi.h> +#define MTK_LARB_COM_MAX 8 +#define MTK_LARB_SUBCOM_MAX 4 + struct mtk_iommu_suspend_reg { - u32 standard_axi_mode; + union { + u32 standard_axi_mode;/* v1 */ + u32 misc_ctrl;/* v2 */ + }; u32 dcm_dis; u32 ctrl_reg; u32 int_control0; u32 int_main_control; u32 ivrp_paddr; u32 vld_pa_rng; + u32 wr_len_ctrl; }; enum mtk_iommu_plat { M4U_MT2701, M4U_MT2712, + M4U_MT6779, M4U_MT8173, M4U_MT8183, }; struct mtk_iommu_plat_data { enum mtk_iommu_plat m4u_plat; - bool has_4gb_mode; - - /* HW will use the EMI clock if there isn't the "bclk". */ - bool has_bclk; - bool has_vld_pa_rng; - bool reset_axi; - unsigned char larbid_remap[MTK_LARB_NR_MAX]; + u32 flags; + u32 inv_sel_reg; + unsigned char larbid_remap[MTK_LARB_COM_MAX][MTK_LARB_SUBCOM_MAX]; }; struct mtk_iommu_domain; @@ -62,6 +67,8 @@ struct mtk_iommu_data { struct iommu_device iommu; const struct mtk_iommu_plat_data *plat_data; + struct dma_iommu_mapping *mapping; /* For mtk_iommu_v1.c */ + struct list_head list; struct mtk_smi_larb_iommu larb_imu[MTK_LARB_NR_MAX]; }; diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index c9d79cff4d17..82ddfe9170d4 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -269,7 +269,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, int ret; /* Only allow the domain created internally. */ - mtk_mapping = data->dev->archdata.iommu; + mtk_mapping = data->mapping; if (mtk_mapping->domain != domain) return 0; @@ -369,7 +369,6 @@ static int mtk_iommu_create_mapping(struct device *dev, struct mtk_iommu_data *data; struct platform_device *m4updev; struct dma_iommu_mapping *mtk_mapping; - struct device *m4udev; int ret; if (args->args_count != 1) { @@ -401,8 +400,7 @@ static int mtk_iommu_create_mapping(struct device *dev, return ret; data = dev_iommu_priv_get(dev); - m4udev = data->dev; - mtk_mapping = m4udev->archdata.iommu; + mtk_mapping = data->mapping; if (!mtk_mapping) { /* MTK iommu support 4GB iova address space. */ mtk_mapping = arm_iommu_create_mapping(&platform_bus_type, @@ -410,7 +408,7 @@ static int mtk_iommu_create_mapping(struct device *dev, if (IS_ERR(mtk_mapping)) return PTR_ERR(mtk_mapping); - m4udev->archdata.iommu = mtk_mapping; + data->mapping = mtk_mapping; } return 0; @@ -459,7 +457,7 @@ static void mtk_iommu_probe_finalize(struct device *dev) int err; data = dev_iommu_priv_get(dev); - mtk_mapping = data->dev->archdata.iommu; + mtk_mapping = data->mapping; err = arm_iommu_attach_device(dev, mtk_mapping); if (err) diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c index 8e19bfa94121..a99afb5d9011 100644 --- a/drivers/iommu/omap-iommu-debug.c +++ b/drivers/iommu/omap-iommu-debug.c @@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, mutex_lock(&iommu_debug_lock); bytes = omap_iommu_dump_ctx(obj, p, count); + if (bytes < 0) + goto err; bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes); +err: mutex_unlock(&iommu_debug_lock); kfree(buf); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index c8282cc212cb..71f29c0927fc 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -3,7 +3,7 @@ * omap iommu: tlb and pagetable primitives * * Copyright (C) 2008-2010 Nokia Corporation - * Copyright (C) 2013-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/ * * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>, * Paul Mundt and Toshihiro Kobayashi @@ -71,7 +71,7 @@ static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom) **/ void omap_iommu_save_ctx(struct device *dev) { - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); struct omap_iommu *obj; u32 *p; int i; @@ -101,7 +101,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); **/ void omap_iommu_restore_ctx(struct device *dev) { - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); struct omap_iommu *obj; u32 *p; int i; @@ -1398,7 +1398,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, static int omap_iommu_count(struct device *dev) { - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); int count = 0; while (arch_data->iommu_dev) { @@ -1459,8 +1459,8 @@ static void omap_iommu_detach_fini(struct omap_iommu_domain *odomain) static int omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { + struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); struct omap_iommu_domain *omap_domain = to_omap_domain(domain); - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; struct omap_iommu_device *iommu; struct omap_iommu *oiommu; int ret = 0; @@ -1524,7 +1524,7 @@ out: static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, struct device *dev) { - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); struct omap_iommu_device *iommu = omap_domain->iommus; struct omap_iommu *oiommu; int i; @@ -1650,7 +1650,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) int num_iommus, i; /* - * Allocate the archdata iommu structure for DT-based devices. + * Allocate the per-device iommu structure for DT-based devices. * * TODO: Simplify this when removing non-DT support completely from the * IOMMU users. @@ -1698,7 +1698,7 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) of_node_put(np); } - dev->archdata.iommu = arch_data; + dev_iommu_priv_set(dev, arch_data); /* * use the first IOMMU alone for the sysfs device linking. @@ -1712,19 +1712,19 @@ static struct iommu_device *omap_iommu_probe_device(struct device *dev) static void omap_iommu_release_device(struct device *dev) { - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); if (!dev->of_node || !arch_data) return; - dev->archdata.iommu = NULL; + dev_iommu_priv_set(dev, NULL); kfree(arch_data); } static struct iommu_group *omap_iommu_device_group(struct device *dev) { - struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; + struct omap_iommu_arch_data *arch_data = dev_iommu_priv_get(dev); struct iommu_group *group = ERR_PTR(-EINVAL); if (!arch_data) diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index d25c2486ca07..e5d86b7177de 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -836,7 +836,7 @@ static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, static struct rk_iommu *rk_iommu_from_dev(struct device *dev) { - struct rk_iommudata *data = dev->archdata.iommu; + struct rk_iommudata *data = dev_iommu_priv_get(dev); return data ? data->iommu : NULL; } @@ -1059,7 +1059,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev) struct rk_iommudata *data; struct rk_iommu *iommu; - data = dev->archdata.iommu; + data = dev_iommu_priv_get(dev); if (!data) return ERR_PTR(-ENODEV); @@ -1073,7 +1073,7 @@ static struct iommu_device *rk_iommu_probe_device(struct device *dev) static void rk_iommu_release_device(struct device *dev) { - struct rk_iommudata *data = dev->archdata.iommu; + struct rk_iommudata *data = dev_iommu_priv_get(dev); device_link_del(data->link); } @@ -1100,7 +1100,7 @@ static int rk_iommu_of_xlate(struct device *dev, iommu_dev = of_find_device_by_node(args->np); data->iommu = platform_get_drvdata(iommu_dev); - dev->archdata.iommu = data; + dev_iommu_priv_set(dev, data); platform_device_put(iommu_dev); diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 5fbdff6ff41a..fac720273889 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -113,8 +113,8 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain, if (gart->active_domain && gart->active_domain != domain) { ret = -EBUSY; - } else if (dev->archdata.iommu != domain) { - dev->archdata.iommu = domain; + } else if (dev_iommu_priv_get(dev) != domain) { + dev_iommu_priv_set(dev, domain); gart->active_domain = domain; gart->active_devices++; } @@ -131,8 +131,8 @@ static void gart_iommu_detach_dev(struct iommu_domain *domain, spin_lock(&gart->dom_lock); - if (dev->archdata.iommu == domain) { - dev->archdata.iommu = NULL; + if (dev_iommu_priv_get(dev) == domain) { + dev_iommu_priv_set(dev, NULL); if (--gart->active_devices == 0) gart->active_domain = NULL; diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c index 7426b7666e2b..124c8848ab7e 100644 --- a/drivers/iommu/tegra-smmu.c +++ b/drivers/iommu/tegra-smmu.c @@ -465,7 +465,7 @@ static void tegra_smmu_as_unprepare(struct tegra_smmu *smmu, static int tegra_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct tegra_smmu *smmu = dev->archdata.iommu; + struct tegra_smmu *smmu = dev_iommu_priv_get(dev); struct tegra_smmu_as *as = to_smmu_as(domain); struct device_node *np = dev->of_node; struct of_phandle_args args; @@ -780,7 +780,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev) * supported by the Linux kernel, so abort after the * first match. */ - dev->archdata.iommu = smmu; + dev_iommu_priv_set(dev, smmu); break; } @@ -797,7 +797,7 @@ static struct iommu_device *tegra_smmu_probe_device(struct device *dev) static void tegra_smmu_release_device(struct device *dev) { - dev->archdata.iommu = NULL; + dev_iommu_priv_set(dev, NULL); } static const struct tegra_smmu_group_soc * @@ -856,7 +856,7 @@ static struct iommu_group *tegra_smmu_group_get(struct tegra_smmu *smmu, static struct iommu_group *tegra_smmu_device_group(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct tegra_smmu *smmu = dev->archdata.iommu; + struct tegra_smmu *smmu = dev_iommu_priv_get(dev); struct iommu_group *group; group = tegra_smmu_group_get(smmu, fwspec->ids[0]); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index f6f07489a9aa..b4da396cce60 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -1010,8 +1010,8 @@ static int viommu_probe(struct virtio_device *vdev) if (ret) return ret; - virtio_cread(vdev, struct virtio_iommu_config, page_size_mask, - &viommu->pgsize_bitmap); + virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask, + &viommu->pgsize_bitmap); if (!viommu->pgsize_bitmap) { ret = -EINVAL; @@ -1022,25 +1022,25 @@ static int viommu_probe(struct virtio_device *vdev) viommu->last_domain = ~0U; /* Optional features */ - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, - struct virtio_iommu_config, input_range.start, - &input_start); + virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, + struct virtio_iommu_config, input_range.start, + &input_start); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, - struct virtio_iommu_config, input_range.end, - &input_end); + virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE, + struct virtio_iommu_config, input_range.end, + &input_end); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, - struct virtio_iommu_config, domain_range.start, - &viommu->first_domain); + virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.start, + &viommu->first_domain); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, - struct virtio_iommu_config, domain_range.end, - &viommu->last_domain); + virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE, + struct virtio_iommu_config, domain_range.end, + &viommu->last_domain); - virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, - struct virtio_iommu_config, probe_size, - &viommu->probe_size); + virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE, + struct virtio_iommu_config, probe_size, + &viommu->probe_size); viommu->geometry = (struct iommu_domain_geometry) { .aperture_start = input_start, diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c index c10a9318a4b7..53945ca5d785 100644 --- a/drivers/mailbox/bcm-pdc-mailbox.c +++ b/drivers/mailbox/bcm-pdc-mailbox.c @@ -679,7 +679,7 @@ pdc_receive(struct pdc_state *pdcs) /* read last_rx_curr from register once */ pdcs->last_rx_curr = - (ioread32(&pdcs->rxregs_64->status0) & + (ioread32((const void __iomem *)&pdcs->rxregs_64->status0) & CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; do { diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index fb8d1fb14088..ef0fd4830803 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7019,7 +7019,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) } else goto abort; spin_lock_init(&conf->device_lock); - seqcount_init(&conf->gen_lock); + seqcount_spinlock_init(&conf->gen_lock, &conf->device_lock); mutex_init(&conf->cache_size_mutex); init_waitqueue_head(&conf->wait_for_quiescent); init_waitqueue_head(&conf->wait_for_stripe); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 7fb3b26a181a..16fc29472f5c 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -582,7 +582,7 @@ struct r5conf { int prev_chunk_sectors; int prev_algo; short generation; /* increments with every reshape */ - seqcount_t gen_lock; /* lock against generation changes */ + seqcount_spinlock_t gen_lock; /* lock against generation changes */ unsigned long reshape_checkpoint; /* Time we last updated * metadata */ long long min_offset_diff; /* minimum difference between diff --git a/drivers/media/pci/ngene/ngene-cards.c b/drivers/media/pci/ngene/ngene-cards.c index 6185806a00e0..8bfb3d8ea610 100644 --- a/drivers/media/pci/ngene/ngene-cards.c +++ b/drivers/media/pci/ngene/ngene-cards.c @@ -1186,7 +1186,7 @@ MODULE_DEVICE_TABLE(pci, ngene_id_tbl); /****************************************************************************/ static pci_ers_result_t ngene_error_detected(struct pci_dev *dev, - enum pci_channel_state state) + pci_channel_state_t state) { dev_err(&dev->dev, "PCI error\n"); if (state == pci_channel_io_perm_failure) diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h index 152a713fff78..1a32266b7ddc 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_iommu.h @@ -9,9 +9,11 @@ #if defined(CONFIG_EXYNOS_IOMMU) +#include <linux/iommu.h> + static inline bool exynos_is_iommu_available(struct device *dev) { - return dev->archdata.iommu != NULL; + return dev_iommu_priv_get(dev) != NULL; } #else diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index 97440af499b0..2c79e95dd486 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -188,6 +188,16 @@ config RENESAS_RPCIF host or HyperFlash. You'll have to select individual components under the corresponding menu. +config STM32_FMC2_EBI + tristate "Support for FMC2 External Bus Interface on STM32MP SoCs" + depends on MACH_STM32MP157 || COMPILE_TEST + select MFD_SYSCON + help + Select this option to enable the STM32 FMC2 External Bus Interface + controller. This driver configures the transactions with external + devices (like SRAM, ethernet adapters, FPGAs, LCD displays, ...) on + SOCs containing the FMC2 External Bus Interface. + source "drivers/memory/samsung/Kconfig" source "drivers/memory/tegra/Kconfig" diff --git a/drivers/memory/Makefile b/drivers/memory/Makefile index d105f8ebe8b8..b4533ffff2bc 100644 --- a/drivers/memory/Makefile +++ b/drivers/memory/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_MTK_SMI) += mtk-smi.o obj-$(CONFIG_DA8XX_DDRCTL) += da8xx-ddrctl.o obj-$(CONFIG_PL353_SMC) += pl353-smc.o obj-$(CONFIG_RENESAS_RPCIF) += renesas-rpc-if.o +obj-$(CONFIG_STM32_FMC2_EBI) += stm32-fmc2-ebi.o obj-$(CONFIG_SAMSUNG_MC) += samsung/ obj-$(CONFIG_TEGRA_MC) += tegra/ diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index e154bea3cf14..c21262502581 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -239,6 +239,13 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt2712 = { .larb_direct_to_common_mask = BIT(8) | BIT(9), /* bdpsys */ }; +static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = { + .config_port = mtk_smi_larb_config_port_gen2_general, + .larb_direct_to_common_mask = + BIT(4) | BIT(6) | BIT(11) | BIT(12) | BIT(13), + /* DUMMY | IPU0 | IPU1 | CCU | MDLA */ +}; + static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = { .has_gals = true, .config_port = mtk_smi_larb_config_port_gen2_general, @@ -260,6 +267,10 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { .data = &mtk_smi_larb_mt2712 }, { + .compatible = "mediatek,mt6779-smi-larb", + .data = &mtk_smi_larb_mt6779 + }, + { .compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183 }, @@ -388,6 +399,13 @@ static const struct mtk_smi_common_plat mtk_smi_common_gen2 = { .gen = MTK_SMI_GEN2, }; +static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = { + .gen = MTK_SMI_GEN2, + .has_gals = true, + .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) | + F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7), +}; + static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { .gen = MTK_SMI_GEN2, .has_gals = true, @@ -409,6 +427,10 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { .data = &mtk_smi_common_gen2, }, { + .compatible = "mediatek,mt6779-smi-common", + .data = &mtk_smi_common_mt6779, + }, + { .compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183, }, diff --git a/drivers/memory/stm32-fmc2-ebi.c b/drivers/memory/stm32-fmc2-ebi.c new file mode 100644 index 000000000000..4d5758c419c5 --- /dev/null +++ b/drivers/memory/stm32-fmc2-ebi.c @@ -0,0 +1,1206 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) STMicroelectronics 2020 + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/pinctrl/consumer.h> +#include <linux/regmap.h> +#include <linux/reset.h> + +/* FMC2 Controller Registers */ +#define FMC2_BCR1 0x0 +#define FMC2_BTR1 0x4 +#define FMC2_BCR(x) ((x) * 0x8 + FMC2_BCR1) +#define FMC2_BTR(x) ((x) * 0x8 + FMC2_BTR1) +#define FMC2_PCSCNTR 0x20 +#define FMC2_BWTR1 0x104 +#define FMC2_BWTR(x) ((x) * 0x8 + FMC2_BWTR1) + +/* Register: FMC2_BCR1 */ +#define FMC2_BCR1_CCLKEN BIT(20) +#define FMC2_BCR1_FMC2EN BIT(31) + +/* Register: FMC2_BCRx */ +#define FMC2_BCR_MBKEN BIT(0) +#define FMC2_BCR_MUXEN BIT(1) +#define FMC2_BCR_MTYP GENMASK(3, 2) +#define FMC2_BCR_MWID GENMASK(5, 4) +#define FMC2_BCR_FACCEN BIT(6) +#define FMC2_BCR_BURSTEN BIT(8) +#define FMC2_BCR_WAITPOL BIT(9) +#define FMC2_BCR_WAITCFG BIT(11) +#define FMC2_BCR_WREN BIT(12) +#define FMC2_BCR_WAITEN BIT(13) +#define FMC2_BCR_EXTMOD BIT(14) +#define FMC2_BCR_ASYNCWAIT BIT(15) +#define FMC2_BCR_CPSIZE GENMASK(18, 16) +#define FMC2_BCR_CBURSTRW BIT(19) +#define FMC2_BCR_NBLSET GENMASK(23, 22) + +/* Register: FMC2_BTRx/FMC2_BWTRx */ +#define FMC2_BXTR_ADDSET GENMASK(3, 0) +#define FMC2_BXTR_ADDHLD GENMASK(7, 4) +#define FMC2_BXTR_DATAST GENMASK(15, 8) +#define FMC2_BXTR_BUSTURN GENMASK(19, 16) +#define FMC2_BTR_CLKDIV GENMASK(23, 20) +#define FMC2_BTR_DATLAT GENMASK(27, 24) +#define FMC2_BXTR_ACCMOD GENMASK(29, 28) +#define FMC2_BXTR_DATAHLD GENMASK(31, 30) + +/* Register: FMC2_PCSCNTR */ +#define FMC2_PCSCNTR_CSCOUNT GENMASK(15, 0) +#define FMC2_PCSCNTR_CNTBEN(x) BIT((x) + 16) + +#define FMC2_MAX_EBI_CE 4 +#define FMC2_MAX_BANKS 5 + +#define FMC2_BCR_CPSIZE_0 0x0 +#define FMC2_BCR_CPSIZE_128 0x1 +#define FMC2_BCR_CPSIZE_256 0x2 +#define FMC2_BCR_CPSIZE_512 0x3 +#define FMC2_BCR_CPSIZE_1024 0x4 + +#define FMC2_BCR_MWID_8 0x0 +#define FMC2_BCR_MWID_16 0x1 + +#define FMC2_BCR_MTYP_SRAM 0x0 +#define FMC2_BCR_MTYP_PSRAM 0x1 +#define FMC2_BCR_MTYP_NOR 0x2 + +#define FMC2_BXTR_EXTMOD_A 0x0 +#define FMC2_BXTR_EXTMOD_B 0x1 +#define FMC2_BXTR_EXTMOD_C 0x2 +#define FMC2_BXTR_EXTMOD_D 0x3 + +#define FMC2_BCR_NBLSET_MAX 0x3 +#define FMC2_BXTR_ADDSET_MAX 0xf +#define FMC2_BXTR_ADDHLD_MAX 0xf +#define FMC2_BXTR_DATAST_MAX 0xff +#define FMC2_BXTR_BUSTURN_MAX 0xf +#define FMC2_BXTR_DATAHLD_MAX 0x3 +#define FMC2_BTR_CLKDIV_MAX 0xf +#define FMC2_BTR_DATLAT_MAX 0xf +#define FMC2_PCSCNTR_CSCOUNT_MAX 0xff + +enum stm32_fmc2_ebi_bank { + FMC2_EBI1 = 0, + FMC2_EBI2, + FMC2_EBI3, + FMC2_EBI4, + FMC2_NAND +}; + +enum stm32_fmc2_ebi_register_type { + FMC2_REG_BCR = 1, + FMC2_REG_BTR, + FMC2_REG_BWTR, + FMC2_REG_PCSCNTR +}; + +enum stm32_fmc2_ebi_transaction_type { + FMC2_ASYNC_MODE_1_SRAM = 0, + FMC2_ASYNC_MODE_1_PSRAM, + FMC2_ASYNC_MODE_A_SRAM, + FMC2_ASYNC_MODE_A_PSRAM, + FMC2_ASYNC_MODE_2_NOR, + FMC2_ASYNC_MODE_B_NOR, + FMC2_ASYNC_MODE_C_NOR, + FMC2_ASYNC_MODE_D_NOR, + FMC2_SYNC_READ_SYNC_WRITE_PSRAM, + FMC2_SYNC_READ_ASYNC_WRITE_PSRAM, + FMC2_SYNC_READ_SYNC_WRITE_NOR, + FMC2_SYNC_READ_ASYNC_WRITE_NOR +}; + +enum stm32_fmc2_ebi_buswidth { + FMC2_BUSWIDTH_8 = 8, + FMC2_BUSWIDTH_16 = 16 +}; + +enum stm32_fmc2_ebi_cpsize { + FMC2_CPSIZE_0 = 0, + FMC2_CPSIZE_128 = 128, + FMC2_CPSIZE_256 = 256, + FMC2_CPSIZE_512 = 512, + FMC2_CPSIZE_1024 = 1024 +}; + +struct stm32_fmc2_ebi { + struct device *dev; + struct clk *clk; + struct regmap *regmap; + u8 bank_assigned; + + u32 bcr[FMC2_MAX_EBI_CE]; + u32 btr[FMC2_MAX_EBI_CE]; + u32 bwtr[FMC2_MAX_EBI_CE]; + u32 pcscntr; +}; + +/* + * struct stm32_fmc2_prop - STM32 FMC2 EBI property + * @name: the device tree binding name of the property + * @bprop: indicate that it is a boolean property + * @mprop: indicate that it is a mandatory property + * @reg_type: the register that have to be modified + * @reg_mask: the bit that have to be modified in the selected register + * in case of it is a boolean property + * @reset_val: the default value that have to be set in case the property + * has not been defined in the device tree + * @check: this callback ckecks that the property is compliant with the + * transaction type selected + * @calculate: this callback is called to calculate for exemple a timing + * set in nanoseconds in the device tree in clock cycles or in + * clock period + * @set: this callback applies the values in the registers + */ +struct stm32_fmc2_prop { + const char *name; + bool bprop; + bool mprop; + int reg_type; + u32 reg_mask; + u32 reset_val; + int (*check)(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, int cs); + u32 (*calculate)(struct stm32_fmc2_ebi *ebi, int cs, u32 setup); + int (*set)(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup); +}; + +static int stm32_fmc2_ebi_check_mux(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 bcr; + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + + if (bcr & FMC2_BCR_MTYP) + return 0; + + return -EINVAL; +} + +static int stm32_fmc2_ebi_check_waitcfg(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + + if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN) + return 0; + + return -EINVAL; +} + +static int stm32_fmc2_ebi_check_sync_trans(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 bcr; + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + + if (bcr & FMC2_BCR_BURSTEN) + return 0; + + return -EINVAL; +} + +static int stm32_fmc2_ebi_check_async_trans(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 bcr; + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + + if (!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) + return 0; + + return -EINVAL; +} + +static int stm32_fmc2_ebi_check_cpsize(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 bcr, val = FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM); + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + + if ((bcr & FMC2_BCR_MTYP) == val && bcr & FMC2_BCR_BURSTEN) + return 0; + + return -EINVAL; +} + +static int stm32_fmc2_ebi_check_address_hold(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 bcr, bxtr, val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D); + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (prop->reg_type == FMC2_REG_BWTR) + regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); + else + regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); + + if ((!(bcr & FMC2_BCR_BURSTEN) || !(bcr & FMC2_BCR_CBURSTRW)) && + ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN)) + return 0; + + return -EINVAL; +} + +static int stm32_fmc2_ebi_check_clk_period(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + u32 bcr, bcr1; + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (cs) + regmap_read(ebi->regmap, FMC2_BCR1, &bcr1); + else + bcr1 = bcr; + + if (bcr & FMC2_BCR_BURSTEN && (!cs || !(bcr1 & FMC2_BCR1_CCLKEN))) + return 0; + + return -EINVAL; +} + +static int stm32_fmc2_ebi_check_cclk(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs) +{ + if (cs) + return -EINVAL; + + return stm32_fmc2_ebi_check_sync_trans(ebi, prop, cs); +} + +static u32 stm32_fmc2_ebi_ns_to_clock_cycles(struct stm32_fmc2_ebi *ebi, + int cs, u32 setup) +{ + unsigned long hclk = clk_get_rate(ebi->clk); + unsigned long hclkp = NSEC_PER_SEC / (hclk / 1000); + + return DIV_ROUND_UP(setup * 1000, hclkp); +} + +static u32 stm32_fmc2_ebi_ns_to_clk_period(struct stm32_fmc2_ebi *ebi, + int cs, u32 setup) +{ + u32 nb_clk_cycles = stm32_fmc2_ebi_ns_to_clock_cycles(ebi, cs, setup); + u32 bcr, btr, clk_period; + + regmap_read(ebi->regmap, FMC2_BCR1, &bcr); + if (bcr & FMC2_BCR1_CCLKEN || !cs) + regmap_read(ebi->regmap, FMC2_BTR1, &btr); + else + regmap_read(ebi->regmap, FMC2_BTR(cs), &btr); + + clk_period = FIELD_GET(FMC2_BTR_CLKDIV, btr) + 1; + + return DIV_ROUND_UP(nb_clk_cycles, clk_period); +} + +static int stm32_fmc2_ebi_get_reg(int reg_type, int cs, u32 *reg) +{ + switch (reg_type) { + case FMC2_REG_BCR: + *reg = FMC2_BCR(cs); + break; + case FMC2_REG_BTR: + *reg = FMC2_BTR(cs); + break; + case FMC2_REG_BWTR: + *reg = FMC2_BWTR(cs); + break; + case FMC2_REG_PCSCNTR: + *reg = FMC2_PCSCNTR; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int stm32_fmc2_ebi_set_bit_field(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 reg; + int ret; + + ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, ®); + if (ret) + return ret; + + regmap_update_bits(ebi->regmap, reg, prop->reg_mask, + setup ? prop->reg_mask : 0); + + return 0; +} + +static int stm32_fmc2_ebi_set_trans_type(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 bcr_mask, bcr = FMC2_BCR_WREN; + u32 btr_mask, btr = 0; + u32 bwtr_mask, bwtr = 0; + + bwtr_mask = FMC2_BXTR_ACCMOD; + btr_mask = FMC2_BXTR_ACCMOD; + bcr_mask = FMC2_BCR_MUXEN | FMC2_BCR_MTYP | FMC2_BCR_FACCEN | + FMC2_BCR_WREN | FMC2_BCR_WAITEN | FMC2_BCR_BURSTEN | + FMC2_BCR_EXTMOD | FMC2_BCR_CBURSTRW; + + switch (setup) { + case FMC2_ASYNC_MODE_1_SRAM: + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_SRAM); + /* + * MUXEN = 0, MTYP = 0, FACCEN = 0, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0 + */ + break; + case FMC2_ASYNC_MODE_1_PSRAM: + /* + * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM); + break; + case FMC2_ASYNC_MODE_A_SRAM: + /* + * MUXEN = 0, MTYP = 0, FACCEN = 0, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_SRAM); + bcr |= FMC2_BCR_EXTMOD; + btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A); + bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A); + break; + case FMC2_ASYNC_MODE_A_PSRAM: + /* + * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM); + bcr |= FMC2_BCR_EXTMOD; + btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A); + bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_A); + break; + case FMC2_ASYNC_MODE_2_NOR: + /* + * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + bcr |= FMC2_BCR_FACCEN; + break; + case FMC2_ASYNC_MODE_B_NOR: + /* + * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 1 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD; + btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_B); + bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_B); + break; + case FMC2_ASYNC_MODE_C_NOR: + /* + * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 2 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD; + btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_C); + bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_C); + break; + case FMC2_ASYNC_MODE_D_NOR: + /* + * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 0, WAITEN = 0, + * WREN = 1, EXTMOD = 1, CBURSTRW = 0, ACCMOD = 3 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + bcr |= FMC2_BCR_FACCEN | FMC2_BCR_EXTMOD; + btr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D); + bwtr |= FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D); + break; + case FMC2_SYNC_READ_SYNC_WRITE_PSRAM: + /* + * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 1, WAITEN = 0, + * WREN = 1, EXTMOD = 0, CBURSTRW = 1, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM); + bcr |= FMC2_BCR_BURSTEN | FMC2_BCR_CBURSTRW; + break; + case FMC2_SYNC_READ_ASYNC_WRITE_PSRAM: + /* + * MUXEN = 0, MTYP = 1, FACCEN = 0, BURSTEN = 1, WAITEN = 0, + * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_PSRAM); + bcr |= FMC2_BCR_BURSTEN; + break; + case FMC2_SYNC_READ_SYNC_WRITE_NOR: + /* + * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 1, WAITEN = 0, + * WREN = 1, EXTMOD = 0, CBURSTRW = 1, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + bcr |= FMC2_BCR_FACCEN | FMC2_BCR_BURSTEN | FMC2_BCR_CBURSTRW; + break; + case FMC2_SYNC_READ_ASYNC_WRITE_NOR: + /* + * MUXEN = 0, MTYP = 2, FACCEN = 1, BURSTEN = 1, WAITEN = 0, + * WREN = 1, EXTMOD = 0, CBURSTRW = 0, ACCMOD = 0 + */ + bcr |= FIELD_PREP(FMC2_BCR_MTYP, FMC2_BCR_MTYP_NOR); + bcr |= FMC2_BCR_FACCEN | FMC2_BCR_BURSTEN; + break; + default: + /* Type of transaction not supported */ + return -EINVAL; + } + + if (bcr & FMC2_BCR_EXTMOD) + regmap_update_bits(ebi->regmap, FMC2_BWTR(cs), + bwtr_mask, bwtr); + regmap_update_bits(ebi->regmap, FMC2_BTR(cs), btr_mask, btr); + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), bcr_mask, bcr); + + return 0; +} + +static int stm32_fmc2_ebi_set_buswidth(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val; + + switch (setup) { + case FMC2_BUSWIDTH_8: + val = FIELD_PREP(FMC2_BCR_MWID, FMC2_BCR_MWID_8); + break; + case FMC2_BUSWIDTH_16: + val = FIELD_PREP(FMC2_BCR_MWID, FMC2_BCR_MWID_16); + break; + default: + /* Buswidth not supported */ + return -EINVAL; + } + + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MWID, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_cpsize(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val; + + switch (setup) { + case FMC2_CPSIZE_0: + val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_0); + break; + case FMC2_CPSIZE_128: + val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_128); + break; + case FMC2_CPSIZE_256: + val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_256); + break; + case FMC2_CPSIZE_512: + val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_512); + break; + case FMC2_CPSIZE_1024: + val = FIELD_PREP(FMC2_BCR_CPSIZE, FMC2_BCR_CPSIZE_1024); + break; + default: + /* Cpsize not supported */ + return -EINVAL; + } + + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_CPSIZE, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_bl_setup(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val; + + val = min_t(u32, setup, FMC2_BCR_NBLSET_MAX); + val = FIELD_PREP(FMC2_BCR_NBLSET, val); + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_NBLSET, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_address_setup(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 bcr, bxtr, reg; + u32 val = FIELD_PREP(FMC2_BXTR_ACCMOD, FMC2_BXTR_EXTMOD_D); + int ret; + + ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, ®); + if (ret) + return ret; + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if (prop->reg_type == FMC2_REG_BWTR) + regmap_read(ebi->regmap, FMC2_BWTR(cs), &bxtr); + else + regmap_read(ebi->regmap, FMC2_BTR(cs), &bxtr); + + if ((bxtr & FMC2_BXTR_ACCMOD) == val || bcr & FMC2_BCR_MUXEN) + val = clamp_val(setup, 1, FMC2_BXTR_ADDSET_MAX); + else + val = min_t(u32, setup, FMC2_BXTR_ADDSET_MAX); + val = FIELD_PREP(FMC2_BXTR_ADDSET, val); + regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_ADDSET, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_address_hold(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val, reg; + int ret; + + ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, ®); + if (ret) + return ret; + + val = clamp_val(setup, 1, FMC2_BXTR_ADDHLD_MAX); + val = FIELD_PREP(FMC2_BXTR_ADDHLD, val); + regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_ADDHLD, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_data_setup(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val, reg; + int ret; + + ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, ®); + if (ret) + return ret; + + val = clamp_val(setup, 1, FMC2_BXTR_DATAST_MAX); + val = FIELD_PREP(FMC2_BXTR_DATAST, val); + regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_DATAST, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_bus_turnaround(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val, reg; + int ret; + + ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, ®); + if (ret) + return ret; + + val = setup ? min_t(u32, setup - 1, FMC2_BXTR_BUSTURN_MAX) : 0; + val = FIELD_PREP(FMC2_BXTR_BUSTURN, val); + regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_BUSTURN, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_data_hold(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val, reg; + int ret; + + ret = stm32_fmc2_ebi_get_reg(prop->reg_type, cs, ®); + if (ret) + return ret; + + if (prop->reg_type == FMC2_REG_BWTR) + val = setup ? min_t(u32, setup - 1, FMC2_BXTR_DATAHLD_MAX) : 0; + else + val = min_t(u32, setup, FMC2_BXTR_DATAHLD_MAX); + val = FIELD_PREP(FMC2_BXTR_DATAHLD, val); + regmap_update_bits(ebi->regmap, reg, FMC2_BXTR_DATAHLD, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_clk_period(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val; + + val = setup ? clamp_val(setup - 1, 1, FMC2_BTR_CLKDIV_MAX) : 1; + val = FIELD_PREP(FMC2_BTR_CLKDIV, val); + regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_CLKDIV, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_data_latency(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 val; + + val = setup > 1 ? min_t(u32, setup - 2, FMC2_BTR_DATLAT_MAX) : 0; + val = FIELD_PREP(FMC2_BTR_DATLAT, val); + regmap_update_bits(ebi->regmap, FMC2_BTR(cs), FMC2_BTR_DATLAT, val); + + return 0; +} + +static int stm32_fmc2_ebi_set_max_low_pulse(struct stm32_fmc2_ebi *ebi, + const struct stm32_fmc2_prop *prop, + int cs, u32 setup) +{ + u32 old_val, new_val, pcscntr; + + if (setup < 1) + return 0; + + regmap_read(ebi->regmap, FMC2_PCSCNTR, &pcscntr); + + /* Enable counter for the bank */ + regmap_update_bits(ebi->regmap, FMC2_PCSCNTR, + FMC2_PCSCNTR_CNTBEN(cs), + FMC2_PCSCNTR_CNTBEN(cs)); + + new_val = min_t(u32, setup - 1, FMC2_PCSCNTR_CSCOUNT_MAX); + old_val = FIELD_GET(FMC2_PCSCNTR_CSCOUNT, pcscntr); + if (old_val && new_val > old_val) + /* Keep current counter value */ + return 0; + + new_val = FIELD_PREP(FMC2_PCSCNTR_CSCOUNT, new_val); + regmap_update_bits(ebi->regmap, FMC2_PCSCNTR, + FMC2_PCSCNTR_CSCOUNT, new_val); + + return 0; +} + +static const struct stm32_fmc2_prop stm32_fmc2_child_props[] = { + /* st,fmc2-ebi-cs-trans-type must be the first property */ + { + .name = "st,fmc2-ebi-cs-transaction-type", + .mprop = true, + .set = stm32_fmc2_ebi_set_trans_type, + }, + { + .name = "st,fmc2-ebi-cs-cclk-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR1_CCLKEN, + .check = stm32_fmc2_ebi_check_cclk, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-mux-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_MUXEN, + .check = stm32_fmc2_ebi_check_mux, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-buswidth", + .reset_val = FMC2_BUSWIDTH_16, + .set = stm32_fmc2_ebi_set_buswidth, + }, + { + .name = "st,fmc2-ebi-cs-waitpol-high", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_WAITPOL, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-waitcfg-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_WAITCFG, + .check = stm32_fmc2_ebi_check_waitcfg, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-wait-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_WAITEN, + .check = stm32_fmc2_ebi_check_sync_trans, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-asyncwait-enable", + .bprop = true, + .reg_type = FMC2_REG_BCR, + .reg_mask = FMC2_BCR_ASYNCWAIT, + .check = stm32_fmc2_ebi_check_async_trans, + .set = stm32_fmc2_ebi_set_bit_field, + }, + { + .name = "st,fmc2-ebi-cs-cpsize", + .check = stm32_fmc2_ebi_check_cpsize, + .set = stm32_fmc2_ebi_set_cpsize, + }, + { + .name = "st,fmc2-ebi-cs-byte-lane-setup-ns", + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_bl_setup, + }, + { + .name = "st,fmc2-ebi-cs-address-setup-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_ADDSET_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_setup, + }, + { + .name = "st,fmc2-ebi-cs-address-hold-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_ADDHLD_MAX, + .check = stm32_fmc2_ebi_check_address_hold, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_hold, + }, + { + .name = "st,fmc2-ebi-cs-data-setup-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_DATAST_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_setup, + }, + { + .name = "st,fmc2-ebi-cs-bus-turnaround-ns", + .reg_type = FMC2_REG_BTR, + .reset_val = FMC2_BXTR_BUSTURN_MAX + 1, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_bus_turnaround, + }, + { + .name = "st,fmc2-ebi-cs-data-hold-ns", + .reg_type = FMC2_REG_BTR, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_hold, + }, + { + .name = "st,fmc2-ebi-cs-clk-period-ns", + .reset_val = FMC2_BTR_CLKDIV_MAX + 1, + .check = stm32_fmc2_ebi_check_clk_period, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_clk_period, + }, + { + .name = "st,fmc2-ebi-cs-data-latency-ns", + .check = stm32_fmc2_ebi_check_sync_trans, + .calculate = stm32_fmc2_ebi_ns_to_clk_period, + .set = stm32_fmc2_ebi_set_data_latency, + }, + { + .name = "st,fmc2-ebi-cs-write-address-setup-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_ADDSET_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_setup, + }, + { + .name = "st,fmc2-ebi-cs-write-address-hold-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_ADDHLD_MAX, + .check = stm32_fmc2_ebi_check_address_hold, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_address_hold, + }, + { + .name = "st,fmc2-ebi-cs-write-data-setup-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_DATAST_MAX, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_setup, + }, + { + .name = "st,fmc2-ebi-cs-write-bus-turnaround-ns", + .reg_type = FMC2_REG_BWTR, + .reset_val = FMC2_BXTR_BUSTURN_MAX + 1, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_bus_turnaround, + }, + { + .name = "st,fmc2-ebi-cs-write-data-hold-ns", + .reg_type = FMC2_REG_BWTR, + .check = stm32_fmc2_ebi_check_async_trans, + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_data_hold, + }, + { + .name = "st,fmc2-ebi-cs-max-low-pulse-ns", + .calculate = stm32_fmc2_ebi_ns_to_clock_cycles, + .set = stm32_fmc2_ebi_set_max_low_pulse, + }, +}; + +static int stm32_fmc2_ebi_parse_prop(struct stm32_fmc2_ebi *ebi, + struct device_node *dev_node, + const struct stm32_fmc2_prop *prop, + int cs) +{ + struct device *dev = ebi->dev; + u32 setup = 0; + + if (!prop->set) { + dev_err(dev, "property %s is not well defined\n", prop->name); + return -EINVAL; + } + + if (prop->check && prop->check(ebi, prop, cs)) + /* Skeep this property */ + return 0; + + if (prop->bprop) { + bool bprop; + + bprop = of_property_read_bool(dev_node, prop->name); + if (prop->mprop && !bprop) { + dev_err(dev, "mandatory property %s not defined in the device tree\n", + prop->name); + return -EINVAL; + } + + if (bprop) + setup = 1; + } else { + u32 val; + int ret; + + ret = of_property_read_u32(dev_node, prop->name, &val); + if (prop->mprop && ret) { + dev_err(dev, "mandatory property %s not defined in the device tree\n", + prop->name); + return ret; + } + + if (ret) + setup = prop->reset_val; + else if (prop->calculate) + setup = prop->calculate(ebi, cs, val); + else + setup = val; + } + + return prop->set(ebi, prop, cs, setup); +} + +static void stm32_fmc2_ebi_enable_bank(struct stm32_fmc2_ebi *ebi, int cs) +{ + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), + FMC2_BCR_MBKEN, FMC2_BCR_MBKEN); +} + +static void stm32_fmc2_ebi_disable_bank(struct stm32_fmc2_ebi *ebi, int cs) +{ + regmap_update_bits(ebi->regmap, FMC2_BCR(cs), FMC2_BCR_MBKEN, 0); +} + +static void stm32_fmc2_ebi_save_setup(struct stm32_fmc2_ebi *ebi) +{ + unsigned int cs; + + for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { + regmap_read(ebi->regmap, FMC2_BCR(cs), &ebi->bcr[cs]); + regmap_read(ebi->regmap, FMC2_BTR(cs), &ebi->btr[cs]); + regmap_read(ebi->regmap, FMC2_BWTR(cs), &ebi->bwtr[cs]); + } + + regmap_read(ebi->regmap, FMC2_PCSCNTR, &ebi->pcscntr); +} + +static void stm32_fmc2_ebi_set_setup(struct stm32_fmc2_ebi *ebi) +{ + unsigned int cs; + + for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { + regmap_write(ebi->regmap, FMC2_BCR(cs), ebi->bcr[cs]); + regmap_write(ebi->regmap, FMC2_BTR(cs), ebi->btr[cs]); + regmap_write(ebi->regmap, FMC2_BWTR(cs), ebi->bwtr[cs]); + } + + regmap_write(ebi->regmap, FMC2_PCSCNTR, ebi->pcscntr); +} + +static void stm32_fmc2_ebi_disable_banks(struct stm32_fmc2_ebi *ebi) +{ + unsigned int cs; + + for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { + if (!(ebi->bank_assigned & BIT(cs))) + continue; + + stm32_fmc2_ebi_disable_bank(ebi, cs); + } +} + +/* NWAIT signal can not be connected to EBI controller and NAND controller */ +static bool stm32_fmc2_ebi_nwait_used_by_ctrls(struct stm32_fmc2_ebi *ebi) +{ + unsigned int cs; + u32 bcr; + + for (cs = 0; cs < FMC2_MAX_EBI_CE; cs++) { + if (!(ebi->bank_assigned & BIT(cs))) + continue; + + regmap_read(ebi->regmap, FMC2_BCR(cs), &bcr); + if ((bcr & FMC2_BCR_WAITEN || bcr & FMC2_BCR_ASYNCWAIT) && + ebi->bank_assigned & BIT(FMC2_NAND)) + return true; + } + + return false; +} + +static void stm32_fmc2_ebi_enable(struct stm32_fmc2_ebi *ebi) +{ + regmap_update_bits(ebi->regmap, FMC2_BCR1, + FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN); +} + +static void stm32_fmc2_ebi_disable(struct stm32_fmc2_ebi *ebi) +{ + regmap_update_bits(ebi->regmap, FMC2_BCR1, FMC2_BCR1_FMC2EN, 0); +} + +static int stm32_fmc2_ebi_setup_cs(struct stm32_fmc2_ebi *ebi, + struct device_node *dev_node, + u32 cs) +{ + unsigned int i; + int ret; + + stm32_fmc2_ebi_disable_bank(ebi, cs); + + for (i = 0; i < ARRAY_SIZE(stm32_fmc2_child_props); i++) { + const struct stm32_fmc2_prop *p = &stm32_fmc2_child_props[i]; + + ret = stm32_fmc2_ebi_parse_prop(ebi, dev_node, p, cs); + if (ret) { + dev_err(ebi->dev, "property %s could not be set: %d\n", + p->name, ret); + return ret; + } + } + + stm32_fmc2_ebi_enable_bank(ebi, cs); + + return 0; +} + +static int stm32_fmc2_ebi_parse_dt(struct stm32_fmc2_ebi *ebi) +{ + struct device *dev = ebi->dev; + struct device_node *child; + bool child_found = false; + u32 bank; + int ret; + + for_each_available_child_of_node(dev->of_node, child) { + ret = of_property_read_u32(child, "reg", &bank); + if (ret) { + dev_err(dev, "could not retrieve reg property: %d\n", + ret); + return ret; + } + + if (bank >= FMC2_MAX_BANKS) { + dev_err(dev, "invalid reg value: %d\n", bank); + return -EINVAL; + } + + if (ebi->bank_assigned & BIT(bank)) { + dev_err(dev, "bank already assigned: %d\n", bank); + return -EINVAL; + } + + if (bank < FMC2_MAX_EBI_CE) { + ret = stm32_fmc2_ebi_setup_cs(ebi, child, bank); + if (ret) { + dev_err(dev, "setup chip select %d failed: %d\n", + bank, ret); + return ret; + } + } + + ebi->bank_assigned |= BIT(bank); + child_found = true; + } + + if (!child_found) { + dev_warn(dev, "no subnodes found, disable the driver.\n"); + return -ENODEV; + } + + if (stm32_fmc2_ebi_nwait_used_by_ctrls(ebi)) { + dev_err(dev, "NWAIT signal connected to EBI and NAND controllers\n"); + return -EINVAL; + } + + stm32_fmc2_ebi_enable(ebi); + + return of_platform_populate(dev->of_node, NULL, NULL, dev); +} + +static int stm32_fmc2_ebi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct stm32_fmc2_ebi *ebi; + struct reset_control *rstc; + int ret; + + ebi = devm_kzalloc(&pdev->dev, sizeof(*ebi), GFP_KERNEL); + if (!ebi) + return -ENOMEM; + + ebi->dev = dev; + + ebi->regmap = device_node_to_regmap(dev->of_node); + if (IS_ERR(ebi->regmap)) + return PTR_ERR(ebi->regmap); + + ebi->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ebi->clk)) + return PTR_ERR(ebi->clk); + + rstc = devm_reset_control_get(dev, NULL); + if (PTR_ERR(rstc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + ret = clk_prepare_enable(ebi->clk); + if (ret) + return ret; + + if (!IS_ERR(rstc)) { + reset_control_assert(rstc); + reset_control_deassert(rstc); + } + + ret = stm32_fmc2_ebi_parse_dt(ebi); + if (ret) + goto err_release; + + stm32_fmc2_ebi_save_setup(ebi); + platform_set_drvdata(pdev, ebi); + + return 0; + +err_release: + stm32_fmc2_ebi_disable_banks(ebi); + stm32_fmc2_ebi_disable(ebi); + clk_disable_unprepare(ebi->clk); + + return ret; +} + +static int stm32_fmc2_ebi_remove(struct platform_device *pdev) +{ + struct stm32_fmc2_ebi *ebi = platform_get_drvdata(pdev); + + of_platform_depopulate(&pdev->dev); + stm32_fmc2_ebi_disable_banks(ebi); + stm32_fmc2_ebi_disable(ebi); + clk_disable_unprepare(ebi->clk); + + return 0; +} + +static int __maybe_unused stm32_fmc2_ebi_suspend(struct device *dev) +{ + struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev); + + stm32_fmc2_ebi_disable(ebi); + clk_disable_unprepare(ebi->clk); + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int __maybe_unused stm32_fmc2_ebi_resume(struct device *dev) +{ + struct stm32_fmc2_ebi *ebi = dev_get_drvdata(dev); + int ret; + + pinctrl_pm_select_default_state(dev); + + ret = clk_prepare_enable(ebi->clk); + if (ret) + return ret; + + stm32_fmc2_ebi_set_setup(ebi); + stm32_fmc2_ebi_enable(ebi); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(stm32_fmc2_ebi_pm_ops, stm32_fmc2_ebi_suspend, + stm32_fmc2_ebi_resume); + +static const struct of_device_id stm32_fmc2_ebi_match[] = { + {.compatible = "st,stm32mp1-fmc2-ebi"}, + {} +}; +MODULE_DEVICE_TABLE(of, stm32_fmc2_ebi_match); + +static struct platform_driver stm32_fmc2_ebi_driver = { + .probe = stm32_fmc2_ebi_probe, + .remove = stm32_fmc2_ebi_remove, + .driver = { + .name = "stm32_fmc2_ebi", + .of_match_table = stm32_fmc2_ebi_match, + .pm = &stm32_fmc2_ebi_pm_ops, + }, +}; +module_platform_driver(stm32_fmc2_ebi_driver); + +MODULE_ALIAS("platform:stm32_fmc2_ebi"); +MODULE_AUTHOR("Christophe Kerello <christophe.kerello@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 FMC2 ebi driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index a37d7d171382..33df0837ab41 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -1193,18 +1193,6 @@ config MFD_SKY81452 This driver can also be built as a module. If so, the module will be called sky81452. -config MFD_SMSC - bool "SMSC ECE1099 series chips" - depends on I2C=y - select MFD_CORE - select REGMAP_I2C - help - If you say yes here you get support for the - ece1099 chips from SMSC. - - To compile this driver as a module, choose M here: the - module will be called smsc. - config MFD_SC27XX_PMIC tristate "Spreadtrum SC27xx PMICs" depends on ARCH_SPRD || COMPILE_TEST @@ -2053,6 +2041,27 @@ config MFD_WCD934X This driver provides common support WCD934x audio codec and its associated Pin Controller, Soundwire Controller and Audio codec. +config MFD_KHADAS_MCU + tristate "Support for Khadas System control Microcontroller" + depends on I2C + depends on ARCH_MESON || ARCH_ROCKCHIP || COMPILE_TEST + select MFD_CORE + select REGMAP_I2C + help + Support for the Khadas System control Microcontroller interface + present on their VIM and Edge boards. + + This Microcontroller is present on the Khadas VIM1, VIM2, VIM3 and + Edge boards. + + It provides multiple boot control features like password check, + power-on options, power-off control and system FAN control on recent + boards. + + This driver provides common support for accessing the device, + additional drivers must be enabled in order to use the functionality + of the device. + menu "Multimedia Capabilities Port drivers" depends on ARCH_SA1100 diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 9367a92f795a..a60e5f835283 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -127,7 +127,6 @@ obj-$(CONFIG_MFD_CPCAP) += motorola-cpcap.o obj-$(CONFIG_MCP) += mcp-core.o obj-$(CONFIG_MCP_SA11X0) += mcp-sa11x0.o obj-$(CONFIG_MCP_UCB1200) += ucb1x00-core.o -obj-$(CONFIG_MFD_SMSC) += smsc-ece1099.o obj-$(CONFIG_MCP_UCB1200_TS) += ucb1x00-ts.o ifeq ($(CONFIG_SA1100_ASSABET),y) @@ -262,5 +261,6 @@ obj-$(CONFIG_MFD_ROHM_BD70528) += rohm-bd70528.o obj-$(CONFIG_MFD_ROHM_BD71828) += rohm-bd71828.o obj-$(CONFIG_MFD_ROHM_BD718XX) += rohm-bd718x7.o obj-$(CONFIG_MFD_STMFX) += stmfx.o +obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index 57723f116bb5..ee71ae04b5e6 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c @@ -498,7 +498,7 @@ static ssize_t ab3100_get_set_reg(struct file *file, int i = 0; /* Get userspace string and assure termination */ - buf_size = min(count, (sizeof(buf)-1)); + buf_size = min((ssize_t)count, (ssize_t)(sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = 0; diff --git a/drivers/mfd/ab3100-otp.c b/drivers/mfd/ab3100-otp.c index c4751fb9bc22..c393102e3a39 100644 --- a/drivers/mfd/ab3100-otp.c +++ b/drivers/mfd/ab3100-otp.c @@ -29,22 +29,22 @@ /** * struct ab3100_otp - * @dev containing device - * @locked whether the OTP is locked, after locking, no more bits + * @dev: containing device + * @locked: whether the OTP is locked, after locking, no more bits * can be changed but before locking it is still possible * to change bits from 1->0. - * @freq clocking frequency for the OTP, this frequency is either + * @freq: clocking frequency for the OTP, this frequency is either * 32768Hz or 1MHz/30 - * @paf product activation flag, indicates whether this is a real + * @paf: product activation flag, indicates whether this is a real * product (paf true) or a lab board etc (paf false) - * @imeich if this is set it is possible to override the + * @imeich: if this is set it is possible to override the * IMEI number found in the tac, fac and svn fields with * (secured) software - * @cid customer ID - * @tac type allocation code of the IMEI - * @fac final assembly code of the IMEI - * @svn software version number of the IMEI - * @debugfs a debugfs file used when dumping to file + * @cid: customer ID + * @tac: type allocation code of the IMEI + * @fac: final assembly code of the IMEI + * @svn: software version number of the IMEI + * @debugfs: a debugfs file used when dumping to file */ struct ab3100_otp { struct device *dev; diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c index 1a9a3414d4fa..6d1bf7c3ca3b 100644 --- a/drivers/mfd/ab8500-debugfs.c +++ b/drivers/mfd/ab8500-debugfs.c @@ -1801,7 +1801,7 @@ static ssize_t ab8500_hwreg_write(struct file *file, int buf_size, ret; /* Get userspace string and assure termination */ - buf_size = min(count, (sizeof(buf)-1)); + buf_size = min((int)count, (int)(sizeof(buf)-1)); if (copy_from_user(buf, user_buf, buf_size)) return -EFAULT; buf[buf_size] = 0; diff --git a/drivers/mfd/altera-sysmgr.c b/drivers/mfd/altera-sysmgr.c index d2a13a547a3c..41076d121dd5 100644 --- a/drivers/mfd/altera-sysmgr.c +++ b/drivers/mfd/altera-sysmgr.c @@ -22,11 +22,9 @@ /** * struct altr_sysmgr - Altera SOCFPGA System Manager * @regmap: the regmap used for System Manager accesses. - * @base : the base address for the System Manager */ struct altr_sysmgr { struct regmap *regmap; - resource_size_t *base; }; static struct platform_driver altr_sysmgr_driver; @@ -91,6 +89,9 @@ static struct regmap_config altr_sysmgr_regmap_cfg = { * altr_sysmgr_regmap_lookup_by_phandle * Find the sysmgr previous configured in probe() and return regmap property. * Return: regmap if found or error if not found. + * + * @np: Pointer to device's Device Tree node + * @property: Device Tree property name which references the sysmgr */ struct regmap *altr_sysmgr_regmap_lookup_by_phandle(struct device_node *np, const char *property) @@ -127,6 +128,7 @@ static int sysmgr_probe(struct platform_device *pdev) struct regmap_config sysmgr_config = altr_sysmgr_regmap_cfg; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + void __iomem *base; sysmgr = devm_kzalloc(dev, sizeof(*sysmgr), GFP_KERNEL); if (!sysmgr) @@ -139,22 +141,19 @@ static int sysmgr_probe(struct platform_device *pdev) sysmgr_config.max_register = resource_size(res) - sysmgr_config.reg_stride; if (of_device_is_compatible(np, "altr,sys-mgr-s10")) { - /* Need physical address for SMCC call */ - sysmgr->base = (resource_size_t *)res->start; sysmgr_config.reg_read = s10_protected_reg_read; sysmgr_config.reg_write = s10_protected_reg_write; - regmap = devm_regmap_init(dev, NULL, sysmgr->base, + /* Need physical address for SMCC call */ + regmap = devm_regmap_init(dev, NULL, (void *)res->start, &sysmgr_config); } else { - sysmgr->base = devm_ioremap(dev, res->start, - resource_size(res)); - if (!sysmgr->base) + base = devm_ioremap(dev, res->start, resource_size(res)); + if (!base) return -ENOMEM; sysmgr_config.max_register = res->end - res->start - 3; - regmap = devm_regmap_init_mmio(dev, sysmgr->base, - &sysmgr_config); + regmap = devm_regmap_init_mmio(dev, base, &sysmgr_config); } if (IS_ERR(regmap)) { diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c index f73cf76d1373..000cb82023e3 100644 --- a/drivers/mfd/arizona-core.c +++ b/drivers/mfd/arizona-core.c @@ -80,7 +80,7 @@ int arizona_clk32k_disable(struct arizona *arizona) { mutex_lock(&arizona->clk_lock); - BUG_ON(arizona->clk32k_ref <= 0); + WARN_ON(arizona->clk32k_ref <= 0); arizona->clk32k_ref--; @@ -1426,6 +1426,15 @@ err_irq: arizona_irq_exit(arizona); err_pm: pm_runtime_disable(arizona->dev); + + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } err_reset: arizona_enable_reset(arizona); regulator_disable(arizona->dcvdd); @@ -1448,6 +1457,15 @@ int arizona_dev_exit(struct arizona *arizona) regulator_disable(arizona->dcvdd); regulator_put(arizona->dcvdd); + switch (arizona->pdata.clk32k_src) { + case ARIZONA_32KZ_MCLK1: + case ARIZONA_32KZ_MCLK2: + arizona_clk32k_disable(arizona); + break; + default: + break; + } + mfd_remove_devices(arizona->dev); arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona); arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona); diff --git a/drivers/mfd/atmel-smc.c b/drivers/mfd/atmel-smc.c index 1fa2ec950e7d..d96f1d689e7f 100644 --- a/drivers/mfd/atmel-smc.c +++ b/drivers/mfd/atmel-smc.c @@ -237,7 +237,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_set_cycle); * atmel_smc_cs_conf_apply - apply an SMC CS conf * @regmap: the SMC regmap * @cs: the CS id - * @conf the SMC CS conf to apply + * @conf: the SMC CS conf to apply * * Applies an SMC CS configuration. * Only valid on at91sam9/avr32 SoCs. @@ -257,7 +257,7 @@ EXPORT_SYMBOL_GPL(atmel_smc_cs_conf_apply); * @regmap: the HSMC regmap * @cs: the CS id * @layout: the layout of registers - * @conf the SMC CS conf to apply + * @conf: the SMC CS conf to apply * * Applies an SMC CS configuration. * Only valid on post-sama5 SoCs. diff --git a/drivers/mfd/axp20x-i2c.c b/drivers/mfd/axp20x-i2c.c index 14f9df74f855..068e9c083f13 100644 --- a/drivers/mfd/axp20x-i2c.c +++ b/drivers/mfd/axp20x-i2c.c @@ -63,6 +63,7 @@ static const struct of_device_id axp20x_i2c_of_match[] = { { .compatible = "x-powers,axp209", .data = (void *)AXP209_ID }, { .compatible = "x-powers,axp221", .data = (void *)AXP221_ID }, { .compatible = "x-powers,axp223", .data = (void *)AXP223_ID }, + { .compatible = "x-powers,axp803", .data = (void *)AXP803_ID }, { .compatible = "x-powers,axp806", .data = (void *)AXP806_ID }, { }, }; @@ -74,11 +75,13 @@ static const struct i2c_device_id axp20x_i2c_id[] = { { "axp209", 0 }, { "axp221", 0 }, { "axp223", 0 }, + { "axp803", 0 }, { "axp806", 0 }, { }, }; MODULE_DEVICE_TABLE(i2c, axp20x_i2c_id); +#ifdef CONFIG_ACPI static const struct acpi_device_id axp20x_i2c_acpi_match[] = { { .id = "INT33F4", @@ -87,6 +90,7 @@ static const struct acpi_device_id axp20x_i2c_acpi_match[] = { { }, }; MODULE_DEVICE_TABLE(acpi, axp20x_i2c_acpi_match); +#endif static struct i2c_driver axp20x_i2c_driver = { .driver = { diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index 32c2b912b58b..d07b43d7c761 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -24,7 +24,7 @@ static struct class cros_class = { }; /** - * cros_feature_to_name - CrOS feature id to name/short description. + * struct cros_feature_to_name - CrOS feature id to name/short description. * @id: The feature identifier. * @name: Device name associated with the feature id. * @desc: Short name that will be displayed. @@ -36,7 +36,7 @@ struct cros_feature_to_name { }; /** - * cros_feature_to_cells - CrOS feature id to mfd cells association. + * struct cros_feature_to_cells - CrOS feature id to mfd cells association. * @id: The feature identifier. * @mfd_cells: Pointer to the array of mfd cells that needs to be added. * @num_cells: Number of mfd cells into the array. diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c index b125f90dd375..a353d52210a9 100644 --- a/drivers/mfd/da9063-core.c +++ b/drivers/mfd/da9063-core.c @@ -160,7 +160,6 @@ static int da9063_clear_fault_log(struct da9063 *da9063) int da9063_device_init(struct da9063 *da9063, unsigned int irq) { - int model, variant_id, variant_code; int ret; ret = da9063_clear_fault_log(da9063); @@ -171,36 +170,6 @@ int da9063_device_init(struct da9063 *da9063, unsigned int irq) da9063->irq_base = -1; da9063->chip_irq = irq; - ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_ID, &model); - if (ret < 0) { - dev_err(da9063->dev, "Cannot read chip model id.\n"); - return -EIO; - } - if (model != PMIC_CHIP_ID_DA9063) { - dev_err(da9063->dev, "Invalid chip model id: 0x%02x\n", model); - return -ENODEV; - } - - ret = regmap_read(da9063->regmap, DA9063_REG_CHIP_VARIANT, &variant_id); - if (ret < 0) { - dev_err(da9063->dev, "Cannot read chip variant id.\n"); - return -EIO; - } - - variant_code = variant_id >> DA9063_CHIP_VARIANT_SHIFT; - - dev_info(da9063->dev, - "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n", - model, variant_id); - - if (variant_code < PMIC_DA9063_BB && variant_code != PMIC_DA9063_AD) { - dev_err(da9063->dev, - "Cannot support variant code: 0x%02X\n", variant_code); - return -ENODEV; - } - - da9063->variant_code = variant_code; - ret = da9063_irq_init(da9063); if (ret) { dev_err(da9063->dev, "Cannot initialize interrupts.\n"); diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c index 455de74c0dd2..b8217ad303ce 100644 --- a/drivers/mfd/da9063-i2c.c +++ b/drivers/mfd/da9063-i2c.c @@ -22,12 +22,124 @@ #include <linux/of.h> #include <linux/regulator/of_regulator.h> +/* + * Raw I2C access required for just accessing chip and variant info before we + * know which device is present. The info read from the device using this + * approach is then used to select the correct regmap tables. + */ + +#define DA9063_REG_PAGE_SIZE 0x100 +#define DA9063_REG_PAGED_ADDR_MASK 0xFF + +enum da9063_page_sel_buf_fmt { + DA9063_PAGE_SEL_BUF_PAGE_REG = 0, + DA9063_PAGE_SEL_BUF_PAGE_VAL, + DA9063_PAGE_SEL_BUF_SIZE, +}; + +enum da9063_paged_read_msgs { + DA9063_PAGED_READ_MSG_PAGE_SEL = 0, + DA9063_PAGED_READ_MSG_REG_SEL, + DA9063_PAGED_READ_MSG_DATA, + DA9063_PAGED_READ_MSG_CNT, +}; + +static int da9063_i2c_blockreg_read(struct i2c_client *client, u16 addr, + u8 *buf, int count) +{ + struct i2c_msg xfer[DA9063_PAGED_READ_MSG_CNT]; + u8 page_sel_buf[DA9063_PAGE_SEL_BUF_SIZE]; + u8 page_num, paged_addr; + int ret; + + /* Determine page info based on register address */ + page_num = (addr / DA9063_REG_PAGE_SIZE); + if (page_num > 1) { + dev_err(&client->dev, "Invalid register address provided\n"); + return -EINVAL; + } + + paged_addr = (addr % DA9063_REG_PAGE_SIZE) & DA9063_REG_PAGED_ADDR_MASK; + page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_REG] = DA9063_REG_PAGE_CON; + page_sel_buf[DA9063_PAGE_SEL_BUF_PAGE_VAL] = + (page_num << DA9063_I2C_PAGE_SEL_SHIFT) & DA9063_REG_PAGE_MASK; + + /* Write reg address, page selection */ + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].flags = 0; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].len = DA9063_PAGE_SEL_BUF_SIZE; + xfer[DA9063_PAGED_READ_MSG_PAGE_SEL].buf = page_sel_buf; + + /* Select register address */ + xfer[DA9063_PAGED_READ_MSG_REG_SEL].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_REG_SEL].flags = 0; + xfer[DA9063_PAGED_READ_MSG_REG_SEL].len = sizeof(paged_addr); + xfer[DA9063_PAGED_READ_MSG_REG_SEL].buf = &paged_addr; + + /* Read data */ + xfer[DA9063_PAGED_READ_MSG_DATA].addr = client->addr; + xfer[DA9063_PAGED_READ_MSG_DATA].flags = I2C_M_RD; + xfer[DA9063_PAGED_READ_MSG_DATA].len = count; + xfer[DA9063_PAGED_READ_MSG_DATA].buf = buf; + + ret = i2c_transfer(client->adapter, xfer, DA9063_PAGED_READ_MSG_CNT); + if (ret < 0) { + dev_err(&client->dev, "Paged block read failed: %d\n", ret); + return ret; + } + + if (ret != DA9063_PAGED_READ_MSG_CNT) { + dev_err(&client->dev, "Paged block read failed to complete\n"); + return -EIO; + } + + return 0; +} + +enum { + DA9063_DEV_ID_REG = 0, + DA9063_VAR_ID_REG, + DA9063_CHIP_ID_REGS, +}; + +static int da9063_get_device_type(struct i2c_client *i2c, struct da9063 *da9063) +{ + u8 buf[DA9063_CHIP_ID_REGS]; + int ret; + + ret = da9063_i2c_blockreg_read(i2c, DA9063_REG_DEVICE_ID, buf, + DA9063_CHIP_ID_REGS); + if (ret) + return ret; + + if (buf[DA9063_DEV_ID_REG] != PMIC_CHIP_ID_DA9063) { + dev_err(da9063->dev, + "Invalid chip device ID: 0x%02x\n", + buf[DA9063_DEV_ID_REG]); + return -ENODEV; + } + + dev_info(da9063->dev, + "Device detected (chip-ID: 0x%02X, var-ID: 0x%02X)\n", + buf[DA9063_DEV_ID_REG], buf[DA9063_VAR_ID_REG]); + + da9063->variant_code = + (buf[DA9063_VAR_ID_REG] & DA9063_VARIANT_ID_MRC_MASK) + >> DA9063_VARIANT_ID_MRC_SHIFT; + + return 0; +} + +/* + * Variant specific regmap configs + */ + static const struct regmap_range da9063_ad_readable_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_AD_REG_SECOND_D), regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_AD_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063_ad_writeable_ranges[] = { @@ -72,7 +184,7 @@ static const struct regmap_range da9063_bb_readable_ranges[] = { regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063_bb_writeable_ranges[] = { @@ -85,7 +197,7 @@ static const struct regmap_range da9063_bb_writeable_ranges[] = { regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19), }; -static const struct regmap_range da9063_bb_volatile_ranges[] = { +static const struct regmap_range da9063_bb_da_volatile_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D), regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B), regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F), @@ -107,9 +219,9 @@ static const struct regmap_access_table da9063_bb_writeable_table = { .n_yes_ranges = ARRAY_SIZE(da9063_bb_writeable_ranges), }; -static const struct regmap_access_table da9063_bb_volatile_table = { - .yes_ranges = da9063_bb_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063_bb_volatile_ranges), +static const struct regmap_access_table da9063_bb_da_volatile_table = { + .yes_ranges = da9063_bb_da_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_bb_da_volatile_ranges), }; static const struct regmap_range da9063l_bb_readable_ranges[] = { @@ -117,7 +229,7 @@ static const struct regmap_range da9063l_bb_readable_ranges[] = { regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_19), - regmap_reg_range(DA9063_REG_CHIP_ID, DA9063_REG_CHIP_VARIANT), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), }; static const struct regmap_range da9063l_bb_writeable_ranges[] = { @@ -129,7 +241,7 @@ static const struct regmap_range da9063l_bb_writeable_ranges[] = { regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_19), }; -static const struct regmap_range da9063l_bb_volatile_ranges[] = { +static const struct regmap_range da9063l_bb_da_volatile_ranges[] = { regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_EVENT_D), regmap_reg_range(DA9063_REG_CONTROL_A, DA9063_REG_CONTROL_B), regmap_reg_range(DA9063_REG_CONTROL_E, DA9063_REG_CONTROL_F), @@ -151,15 +263,70 @@ static const struct regmap_access_table da9063l_bb_writeable_table = { .n_yes_ranges = ARRAY_SIZE(da9063l_bb_writeable_ranges), }; -static const struct regmap_access_table da9063l_bb_volatile_table = { - .yes_ranges = da9063l_bb_volatile_ranges, - .n_yes_ranges = ARRAY_SIZE(da9063l_bb_volatile_ranges), +static const struct regmap_access_table da9063l_bb_da_volatile_table = { + .yes_ranges = da9063l_bb_da_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_bb_da_volatile_ranges), +}; + +static const struct regmap_range da9063_da_readable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_BB_REG_SECOND_D), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), +}; + +static const struct regmap_range da9063_da_writeable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON), + regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON), + regmap_reg_range(DA9063_REG_COUNT_S, DA9063_BB_REG_ALARM_Y), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4), + regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11), +}; + +static const struct regmap_access_table da9063_da_readable_table = { + .yes_ranges = da9063_da_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_da_readable_ranges), +}; + +static const struct regmap_access_table da9063_da_writeable_table = { + .yes_ranges = da9063_da_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063_da_writeable_ranges), +}; + +static const struct regmap_range da9063l_da_readable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_MON_A10_RES), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_T_OFFSET, DA9063_BB_REG_GP_ID_11), + regmap_reg_range(DA9063_REG_DEVICE_ID, DA9063_REG_VARIANT_ID), +}; + +static const struct regmap_range da9063l_da_writeable_ranges[] = { + regmap_reg_range(DA9063_REG_PAGE_CON, DA9063_REG_PAGE_CON), + regmap_reg_range(DA9063_REG_FAULT_LOG, DA9063_REG_VSYS_MON), + regmap_reg_range(DA9063_REG_SEQ, DA9063_REG_ID_32_31), + regmap_reg_range(DA9063_REG_SEQ_A, DA9063_REG_AUTO3_LOW), + regmap_reg_range(DA9063_REG_CONFIG_I, DA9063_BB_REG_MON_REG_4), + regmap_reg_range(DA9063_BB_REG_GP_ID_0, DA9063_BB_REG_GP_ID_11), +}; + +static const struct regmap_access_table da9063l_da_readable_table = { + .yes_ranges = da9063l_da_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_da_readable_ranges), +}; + +static const struct regmap_access_table da9063l_da_writeable_table = { + .yes_ranges = da9063l_da_writeable_ranges, + .n_yes_ranges = ARRAY_SIZE(da9063l_da_writeable_ranges), }; static const struct regmap_range_cfg da9063_range_cfg[] = { { .range_min = DA9063_REG_PAGE_CON, - .range_max = DA9063_REG_CHIP_VARIANT, + .range_max = DA9063_REG_CONFIG_ID, .selector_reg = DA9063_REG_PAGE_CON, .selector_mask = 1 << DA9063_I2C_PAGE_SEL_SHIFT, .selector_shift = DA9063_I2C_PAGE_SEL_SHIFT, @@ -173,7 +340,7 @@ static struct regmap_config da9063_regmap_config = { .val_bits = 8, .ranges = da9063_range_cfg, .num_ranges = ARRAY_SIZE(da9063_range_cfg), - .max_register = DA9063_REG_CHIP_VARIANT, + .max_register = DA9063_REG_CONFIG_ID, .cache_type = REGCACHE_RBTREE, }; @@ -199,18 +366,72 @@ static int da9063_i2c_probe(struct i2c_client *i2c, da9063->chip_irq = i2c->irq; da9063->type = id->driver_data; - if (da9063->variant_code == PMIC_DA9063_AD) { - da9063_regmap_config.rd_table = &da9063_ad_readable_table; - da9063_regmap_config.wr_table = &da9063_ad_writeable_table; - da9063_regmap_config.volatile_table = &da9063_ad_volatile_table; - } else if (da9063->type == PMIC_TYPE_DA9063L) { - da9063_regmap_config.rd_table = &da9063l_bb_readable_table; - da9063_regmap_config.wr_table = &da9063l_bb_writeable_table; - da9063_regmap_config.volatile_table = &da9063l_bb_volatile_table; - } else { - da9063_regmap_config.rd_table = &da9063_bb_readable_table; - da9063_regmap_config.wr_table = &da9063_bb_writeable_table; - da9063_regmap_config.volatile_table = &da9063_bb_volatile_table; + ret = da9063_get_device_type(i2c, da9063); + if (ret) + return ret; + + switch (da9063->type) { + case PMIC_TYPE_DA9063: + switch (da9063->variant_code) { + case PMIC_DA9063_AD: + da9063_regmap_config.rd_table = + &da9063_ad_readable_table; + da9063_regmap_config.wr_table = + &da9063_ad_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_ad_volatile_table; + break; + case PMIC_DA9063_BB: + case PMIC_DA9063_CA: + da9063_regmap_config.rd_table = + &da9063_bb_readable_table; + da9063_regmap_config.wr_table = + &da9063_bb_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_bb_da_volatile_table; + break; + case PMIC_DA9063_DA: + da9063_regmap_config.rd_table = + &da9063_da_readable_table; + da9063_regmap_config.wr_table = + &da9063_da_writeable_table; + da9063_regmap_config.volatile_table = + &da9063_bb_da_volatile_table; + break; + default: + dev_err(da9063->dev, + "Chip variant not supported for DA9063\n"); + return -ENODEV; + } + break; + case PMIC_TYPE_DA9063L: + switch (da9063->variant_code) { + case PMIC_DA9063_BB: + case PMIC_DA9063_CA: + da9063_regmap_config.rd_table = + &da9063l_bb_readable_table; + da9063_regmap_config.wr_table = + &da9063l_bb_writeable_table; + da9063_regmap_config.volatile_table = + &da9063l_bb_da_volatile_table; + break; + case PMIC_DA9063_DA: + da9063_regmap_config.rd_table = + &da9063l_da_readable_table; + da9063_regmap_config.wr_table = + &da9063l_da_writeable_table; + da9063_regmap_config.volatile_table = + &da9063l_bb_da_volatile_table; + break; + default: + dev_err(da9063->dev, + "Chip variant not supported for DA9063L\n"); + return -ENODEV; + } + break; + default: + dev_err(da9063->dev, "Chip type not supported\n"); + return -ENODEV; } da9063->regmap = devm_regmap_init_i2c(i2c, &da9063_regmap_config); diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 0452b43b0423..a9d9c1cdf546 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -2276,6 +2276,8 @@ bool db8500_prcmu_is_ac_wake_requested(void) * * Saves the reset reason code and then sets the APE_SOFTRST register which * fires interrupt to fw + * + * @reset_code: The reason for system reset */ void db8500_prcmu_system_reset(u16 reset_code) { @@ -3004,10 +3006,6 @@ static int db8500_prcmu_register_ab8500(struct device *parent) return mfd_add_devices(parent, 0, ab850x_cell, 1, NULL, 0, NULL); } -/** - * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic - * - */ static int db8500_prcmu_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 39276fa626d2..83e676a096dc 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb) len = urb->actual_length - sizeof(struct dln2_header); if (handle == DLN2_HANDLE_EVENT) { + unsigned long flags; + + spin_lock_irqsave(&dln2->event_cb_lock, flags); dln2_run_event_callbacks(dln2, id, echo, data, len); + spin_unlock_irqrestore(&dln2->event_cb_lock, flags); } else { /* URB will be re-submitted in _dln2_transfer (free_rx_slot) */ if (dln2_transfer_complete(dln2, urb, handle, echo)) diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c index edfc172b8607..eba88b80d969 100644 --- a/drivers/mfd/hi6421-pmic-core.c +++ b/drivers/mfd/hi6421-pmic-core.c @@ -5,7 +5,7 @@ * Copyright (c) <2011-2014> HiSilicon Technologies Co., Ltd. * http://www.hisilicon.com * Copyright (c) <2013-2017> Linaro Ltd. - * http://www.linaro.org + * https://www.linaro.org * * Author: Guodong Xu <guodong.xu@linaro.org> */ diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 046222684b8b..9a58032f818a 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -201,6 +201,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info }, + /* EBG */ + { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info }, /* GLK */ { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info }, { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info }, @@ -230,6 +233,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info }, { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info }, + /* TGL-H */ + { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info }, + { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info }, /* EHL */ { PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info }, { PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info }, diff --git a/drivers/mfd/intel_soc_pmic_mrfld.c b/drivers/mfd/intel_soc_pmic_mrfld.c index bd94c989d232..71da861e8c27 100644 --- a/drivers/mfd/intel_soc_pmic_mrfld.c +++ b/drivers/mfd/intel_soc_pmic_mrfld.c @@ -91,13 +91,8 @@ static int bcove_ipc_byte_reg_write(void *context, unsigned int reg, { struct intel_soc_pmic *pmic = context; u8 ipc_in = val; - int ret; - ret = intel_scu_ipc_dev_iowrite8(pmic->scu, reg, ipc_in); - if (ret) - return ret; - - return 0; + return intel_scu_ipc_dev_iowrite8(pmic->scu, reg, ipc_in); } static const struct regmap_config bcove_regmap_config = { diff --git a/drivers/mfd/kempld-core.c b/drivers/mfd/kempld-core.c index f48e21d8b97c..52bec01149e5 100644 --- a/drivers/mfd/kempld-core.c +++ b/drivers/mfd/kempld-core.c @@ -79,39 +79,31 @@ enum kempld_cells { KEMPLD_UART, }; -static const struct mfd_cell kempld_devs[] = { - [KEMPLD_I2C] = { - .name = "kempld-i2c", - }, - [KEMPLD_WDT] = { - .name = "kempld-wdt", - }, - [KEMPLD_GPIO] = { - .name = "kempld-gpio", - }, - [KEMPLD_UART] = { - .name = "kempld-uart", - }, +static const char *kempld_dev_names[] = { + [KEMPLD_I2C] = "kempld-i2c", + [KEMPLD_WDT] = "kempld-wdt", + [KEMPLD_GPIO] = "kempld-gpio", + [KEMPLD_UART] = "kempld-uart", }; -#define KEMPLD_MAX_DEVS ARRAY_SIZE(kempld_devs) +#define KEMPLD_MAX_DEVS ARRAY_SIZE(kempld_dev_names) static int kempld_register_cells_generic(struct kempld_device_data *pld) { - struct mfd_cell devs[KEMPLD_MAX_DEVS]; + struct mfd_cell devs[KEMPLD_MAX_DEVS] = {}; int i = 0; if (pld->feature_mask & KEMPLD_FEATURE_BIT_I2C) - devs[i++] = kempld_devs[KEMPLD_I2C]; + devs[i++].name = kempld_dev_names[KEMPLD_I2C]; if (pld->feature_mask & KEMPLD_FEATURE_BIT_WATCHDOG) - devs[i++] = kempld_devs[KEMPLD_WDT]; + devs[i++].name = kempld_dev_names[KEMPLD_WDT]; if (pld->feature_mask & KEMPLD_FEATURE_BIT_GPIO) - devs[i++] = kempld_devs[KEMPLD_GPIO]; + devs[i++].name = kempld_dev_names[KEMPLD_GPIO]; if (pld->feature_mask & KEMPLD_FEATURE_MASK_UART) - devs[i++] = kempld_devs[KEMPLD_UART]; + devs[i++].name = kempld_dev_names[KEMPLD_UART]; return mfd_add_devices(pld->dev, -1, devs, i, NULL, 0, NULL); } diff --git a/drivers/mfd/khadas-mcu.c b/drivers/mfd/khadas-mcu.c new file mode 100644 index 000000000000..44d5bb462dab --- /dev/null +++ b/drivers/mfd/khadas-mcu.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Driver for Khadas System control Microcontroller + * + * Copyright (C) 2020 BayLibre SAS + * + * Author(s): Neil Armstrong <narmstrong@baylibre.com> + */ +#include <linux/bitfield.h> +#include <linux/i2c.h> +#include <linux/mfd/core.h> +#include <linux/mfd/khadas-mcu.h> +#include <linux/module.h> +#include <linux/regmap.h> + +static bool khadas_mcu_reg_volatile(struct device *dev, unsigned int reg) +{ + if (reg >= KHADAS_MCU_USER_DATA_0_REG && + reg < KHADAS_MCU_PWR_OFF_CMD_REG) + return true; + + switch (reg) { + case KHADAS_MCU_PWR_OFF_CMD_REG: + case KHADAS_MCU_PASSWD_START_REG: + case KHADAS_MCU_CHECK_VEN_PASSWD_REG: + case KHADAS_MCU_CHECK_USER_PASSWD_REG: + case KHADAS_MCU_WOL_INIT_START_REG: + case KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG: + return true; + default: + return false; + } +} + +static bool khadas_mcu_reg_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case KHADAS_MCU_PASSWD_VEN_0_REG: + case KHADAS_MCU_PASSWD_VEN_1_REG: + case KHADAS_MCU_PASSWD_VEN_2_REG: + case KHADAS_MCU_PASSWD_VEN_3_REG: + case KHADAS_MCU_PASSWD_VEN_4_REG: + case KHADAS_MCU_PASSWD_VEN_5_REG: + case KHADAS_MCU_MAC_0_REG: + case KHADAS_MCU_MAC_1_REG: + case KHADAS_MCU_MAC_2_REG: + case KHADAS_MCU_MAC_3_REG: + case KHADAS_MCU_MAC_4_REG: + case KHADAS_MCU_MAC_5_REG: + case KHADAS_MCU_USID_0_REG: + case KHADAS_MCU_USID_1_REG: + case KHADAS_MCU_USID_2_REG: + case KHADAS_MCU_USID_3_REG: + case KHADAS_MCU_USID_4_REG: + case KHADAS_MCU_USID_5_REG: + case KHADAS_MCU_VERSION_0_REG: + case KHADAS_MCU_VERSION_1_REG: + case KHADAS_MCU_DEVICE_NO_0_REG: + case KHADAS_MCU_DEVICE_NO_1_REG: + case KHADAS_MCU_FACTORY_TEST_REG: + case KHADAS_MCU_SHUTDOWN_NORMAL_STATUS_REG: + return false; + default: + return true; + } +} + +static const struct regmap_config khadas_mcu_regmap_config = { + .reg_bits = 8, + .reg_stride = 1, + .val_bits = 8, + .max_register = KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG, + .volatile_reg = khadas_mcu_reg_volatile, + .writeable_reg = khadas_mcu_reg_writeable, + .cache_type = REGCACHE_RBTREE, +}; + +static struct mfd_cell khadas_mcu_fan_cells[] = { + /* VIM1/2 Rev13+ and VIM3 only */ + { .name = "khadas-mcu-fan-ctrl", }, +}; + +static struct mfd_cell khadas_mcu_cells[] = { + { .name = "khadas-mcu-user-mem", }, +}; + +static int khadas_mcu_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct khadas_mcu *ddata; + int ret; + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + i2c_set_clientdata(client, ddata); + + ddata->dev = dev; + + ddata->regmap = devm_regmap_init_i2c(client, &khadas_mcu_regmap_config); + if (IS_ERR(ddata->regmap)) { + ret = PTR_ERR(ddata->regmap); + dev_err(dev, "Failed to allocate register map: %d\n", ret); + return ret; + } + + ret = devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_cells, + ARRAY_SIZE(khadas_mcu_cells), + NULL, 0, NULL); + if (ret) + return ret; + + if (of_find_property(dev->of_node, "#cooling-cells", NULL)) + return devm_mfd_add_devices(dev, PLATFORM_DEVID_NONE, + khadas_mcu_fan_cells, + ARRAY_SIZE(khadas_mcu_fan_cells), + NULL, 0, NULL); + + return 0; +} + +static const struct of_device_id khadas_mcu_of_match[] = { + { .compatible = "khadas,mcu", }, + {}, +}; +MODULE_DEVICE_TABLE(of, khadas_mcu_of_match); + +static struct i2c_driver khadas_mcu_driver = { + .driver = { + .name = "khadas-mcu-core", + .of_match_table = of_match_ptr(khadas_mcu_of_match), + }, + .probe = khadas_mcu_probe, +}; +module_i2c_driver(khadas_mcu_driver); + +MODULE_DESCRIPTION("Khadas MCU core driver"); +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c index 34fba06ec705..2537dfade51c 100644 --- a/drivers/mfd/lm3533-ctrlbank.c +++ b/drivers/mfd/lm3533-ctrlbank.c @@ -17,7 +17,6 @@ #define LM3533_MAX_CURRENT_MAX 29800 #define LM3533_MAX_CURRENT_STEP 800 -#define LM3533_BRIGHTNESS_MAX 255 #define LM3533_PWM_MAX 0x3f #define LM3533_REG_PWM_BASE 0x14 @@ -89,41 +88,33 @@ int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax) } EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current); -#define lm3533_ctrlbank_set(_name, _NAME) \ -int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val) \ -{ \ - u8 reg; \ - int ret; \ - \ - if (val > LM3533_##_NAME##_MAX) \ - return -EINVAL; \ - \ - reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \ - ret = lm3533_write(cb->lm3533, reg, val); \ - if (ret) \ - dev_err(cb->dev, "failed to set " #_name "\n"); \ - \ - return ret; \ -} \ -EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name); - -#define lm3533_ctrlbank_get(_name, _NAME) \ -int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val) \ -{ \ - u8 reg; \ - int ret; \ - \ - reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \ - ret = lm3533_read(cb->lm3533, reg, val); \ - if (ret) \ - dev_err(cb->dev, "failed to get " #_name "\n"); \ - \ - return ret; \ -} \ -EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name); - -lm3533_ctrlbank_set(brightness, BRIGHTNESS); -lm3533_ctrlbank_get(brightness, BRIGHTNESS); +int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val) +{ + u8 reg; + int ret; + + reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE); + ret = lm3533_write(cb->lm3533, reg, val); + if (ret) + dev_err(cb->dev, "failed to set brightness\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_brightness); + +int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val) +{ + u8 reg; + int ret; + + reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_BRIGHTNESS_BASE); + ret = lm3533_read(cb->lm3533, reg, val); + if (ret) + dev_err(cb->dev, "failed to get brightness\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_brightness); /* * PWM-input control mask: @@ -135,9 +126,36 @@ lm3533_ctrlbank_get(brightness, BRIGHTNESS); * bit 1 - PWM-input enabled in Zone 0 * bit 0 - PWM-input enabled */ -lm3533_ctrlbank_set(pwm, PWM); -lm3533_ctrlbank_get(pwm, PWM); +int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val) +{ + u8 reg; + int ret; + + if (val > LM3533_PWM_MAX) + return -EINVAL; + + reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE); + ret = lm3533_write(cb->lm3533, reg, val); + if (ret) + dev_err(cb->dev, "failed to set PWM mask\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_pwm); + +int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val) +{ + u8 reg; + int ret; + reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_PWM_BASE); + ret = lm3533_read(cb->lm3533, reg, val); + if (ret) + dev_err(cb->dev, "failed to get PWM mask\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_pwm); MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>"); MODULE_DESCRIPTION("LM3533 Control Bank interface"); diff --git a/drivers/mfd/lp873x.c b/drivers/mfd/lp873x.c index 873c608e6a5d..858c9e0a49a4 100644 --- a/drivers/mfd/lp873x.c +++ b/drivers/mfd/lp873x.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2016 Texas Instruments Incorporated - https://www.ti.com/ * * Author: Keerthy <j-keerthy@ti.com> * diff --git a/drivers/mfd/lp87565.c b/drivers/mfd/lp87565.c index 4a5c8ade4ae0..2268be9113f1 100644 --- a/drivers/mfd/lp87565.c +++ b/drivers/mfd/lp87565.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (C) 2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2017 Texas Instruments Incorporated - https://www.ti.com/ * * Author: Keerthy <j-keerthy@ti.com> */ diff --git a/drivers/mfd/madera-core.c b/drivers/mfd/madera-core.c index 7e0835cb062b..8a8d733fdce5 100644 --- a/drivers/mfd/madera-core.c +++ b/drivers/mfd/madera-core.c @@ -44,7 +44,10 @@ static const char * const madera_core_supplies[] = { }; static const struct mfd_cell madera_ldo1_devs[] = { - { .name = "madera-ldo1" }, + { + .name = "madera-ldo1", + .level = MFD_DEP_LEVEL_HIGH, + }, }; static const char * const cs47l15_supplies[] = { @@ -55,8 +58,8 @@ static const char * const cs47l15_supplies[] = { static const struct mfd_cell cs47l15_devs[] = { { .name = "madera-pinctrl", }, - { .name = "madera-irq" }, - { .name = "madera-gpio" }, + { .name = "madera-irq", }, + { .name = "madera-gpio", }, { .name = "madera-extcon", .parent_supplies = cs47l15_supplies, @@ -108,7 +111,7 @@ static const char * const cs47l85_supplies[] = { static const struct mfd_cell cs47l85_devs[] = { { .name = "madera-pinctrl", }, { .name = "madera-irq", }, - { .name = "madera-micsupp" }, + { .name = "madera-micsupp", }, { .name = "madera-gpio", }, { .name = "madera-extcon", @@ -155,10 +158,10 @@ static const char * const cs47l92_supplies[] = { }; static const struct mfd_cell cs47l92_devs[] = { - { .name = "madera-pinctrl" }, + { .name = "madera-pinctrl", }, { .name = "madera-irq", }, { .name = "madera-micsupp", }, - { .name = "madera-gpio" }, + { .name = "madera-gpio", }, { .name = "madera-extcon", .parent_supplies = cs47l92_supplies, @@ -743,18 +746,22 @@ int madera_dev_exit(struct madera *madera) /* Prevent any IRQs being serviced while we clean up */ disable_irq(madera->irq); - /* - * DCVDD could be supplied by a child node, we must disable it before - * removing the children, and prevent PM runtime from turning it back on - */ - pm_runtime_disable(madera->dev); + pm_runtime_get_sync(madera->dev); - clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk); + mfd_remove_devices(madera->dev); + + pm_runtime_disable(madera->dev); regulator_disable(madera->dcvdd); regulator_put(madera->dcvdd); - mfd_remove_devices(madera->dev); + mfd_remove_devices_late(madera->dev); + + pm_runtime_set_suspended(madera->dev); + pm_runtime_put_noidle(madera->dev); + + clk_disable_unprepare(madera->mclk[MADERA_MCLK2].clk); + madera_enable_hard_reset(madera); regulator_bulk_disable(madera->num_core_supplies, diff --git a/drivers/mfd/madera-i2c.c b/drivers/mfd/madera-i2c.c index 6b965eb034b6..7df5b9ba5855 100644 --- a/drivers/mfd/madera-i2c.c +++ b/drivers/mfd/madera-i2c.c @@ -88,7 +88,6 @@ static int madera_i2c_probe(struct i2c_client *i2c, if (!madera) return -ENOMEM; - madera->regmap = devm_regmap_init_i2c(i2c, regmap_16bit_config); if (IS_ERR(madera->regmap)) { ret = PTR_ERR(madera->regmap); diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c index fd8864cafd25..be185e9d5f16 100644 --- a/drivers/mfd/max14577.c +++ b/drivers/mfd/max14577.c @@ -61,7 +61,7 @@ EXPORT_SYMBOL_GPL(maxim_charger_currents); int maxim_charger_calc_reg_current(const struct maxim_charger_current *limits, unsigned int min_ua, unsigned int max_ua, u8 *dst) { - unsigned int current_bits = 0xf; + unsigned int current_bits; if (min_ua > max_ua) return -EINVAL; diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index f5a73af60dd4..c3651f06684f 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c @@ -10,6 +10,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/acpi.h> +#include <linux/list.h> #include <linux/property.h> #include <linux/mfd/core.h> #include <linux/pm_runtime.h> @@ -17,8 +18,17 @@ #include <linux/module.h> #include <linux/irqdomain.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/regulator/consumer.h> +static LIST_HEAD(mfd_of_node_list); + +struct mfd_of_node_entry { + struct list_head list; + struct device *dev; + struct device_node *np; +}; + static struct device_type mfd_dev_type = { .name = "mfd_device", }; @@ -107,6 +117,55 @@ static inline void mfd_acpi_add_device(const struct mfd_cell *cell, } #endif +static int mfd_match_of_node_to_dev(struct platform_device *pdev, + struct device_node *np, + const struct mfd_cell *cell) +{ +#if IS_ENABLED(CONFIG_OF) + struct mfd_of_node_entry *of_entry; + const __be32 *reg; + u64 of_node_addr; + + /* Skip devices 'disabled' by Device Tree */ + if (!of_device_is_available(np)) + return -ENODEV; + + /* Skip if OF node has previously been allocated to a device */ + list_for_each_entry(of_entry, &mfd_of_node_list, list) + if (of_entry->np == np) + return -EAGAIN; + + if (!cell->use_of_reg) + /* No of_reg defined - allocate first free compatible match */ + goto allocate_of_node; + + /* We only care about each node's first defined address */ + reg = of_get_address(np, 0, NULL, NULL); + if (!reg) + /* OF node does not contatin a 'reg' property to match to */ + return -EAGAIN; + + of_node_addr = of_read_number(reg, of_n_addr_cells(np)); + + if (cell->of_reg != of_node_addr) + /* No match */ + return -EAGAIN; + +allocate_of_node: + of_entry = kzalloc(sizeof(*of_entry), GFP_KERNEL); + if (!of_entry) + return -ENOMEM; + + of_entry->dev = &pdev->dev; + of_entry->np = np; + list_add_tail(&of_entry->list, &mfd_of_node_list); + + pdev->dev.of_node = np; + pdev->dev.fwnode = &np->fwnode; +#endif + return 0; +} + static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, @@ -115,6 +174,7 @@ static int mfd_add_device(struct device *parent, int id, struct resource *res; struct platform_device *pdev; struct device_node *np = NULL; + struct mfd_of_node_entry *of_entry, *tmp; int ret = -ENOMEM; int platform_id; int r; @@ -149,19 +209,22 @@ static int mfd_add_device(struct device *parent, int id, if (ret < 0) goto fail_res; - if (parent->of_node && cell->of_compatible) { + if (IS_ENABLED(CONFIG_OF) && parent->of_node && cell->of_compatible) { for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { - if (!of_device_is_available(np)) { - /* Ignore disabled devices error free */ - ret = 0; + ret = mfd_match_of_node_to_dev(pdev, np, cell); + if (ret == -EAGAIN) + continue; + if (ret) goto fail_alias; - } - pdev->dev.of_node = np; - pdev->dev.fwnode = &np->fwnode; + break; } } + + if (!pdev->dev.of_node) + pr_warn("%s: Failed to locate of_node [id: %d]\n", + cell->name, platform_id); } mfd_acpi_add_device(cell, pdev); @@ -170,13 +233,13 @@ static int mfd_add_device(struct device *parent, int id, ret = platform_device_add_data(pdev, cell->platform_data, cell->pdata_size); if (ret) - goto fail_alias; + goto fail_of_entry; } if (cell->properties) { ret = platform_device_add_properties(pdev, cell->properties); if (ret) - goto fail_alias; + goto fail_of_entry; } for (r = 0; r < cell->num_resources; r++) { @@ -213,18 +276,18 @@ static int mfd_add_device(struct device *parent, int id, if (has_acpi_companion(&pdev->dev)) { ret = acpi_check_resource_conflict(&res[r]); if (ret) - goto fail_alias; + goto fail_of_entry; } } } ret = platform_device_add_resources(pdev, res, cell->num_resources); if (ret) - goto fail_alias; + goto fail_of_entry; ret = platform_device_add(pdev); if (ret) - goto fail_alias; + goto fail_of_entry; if (cell->pm_runtime_no_callbacks) pm_runtime_no_callbacks(&pdev->dev); @@ -233,6 +296,12 @@ static int mfd_add_device(struct device *parent, int id, return 0; +fail_of_entry: + list_for_each_entry_safe(of_entry, tmp, &mfd_of_node_list, list) + if (of_entry->dev == &pdev->dev) { + list_del(&of_entry->list); + kfree(of_entry); + } fail_alias: regulator_bulk_unregister_supply_alias(&pdev->dev, cell->parent_supplies, @@ -287,6 +356,7 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) { struct platform_device *pdev; const struct mfd_cell *cell; + int *level = data; if (dev->type != &mfd_dev_type) return 0; @@ -294,16 +364,31 @@ static int mfd_remove_devices_fn(struct device *dev, void *data) pdev = to_platform_device(dev); cell = mfd_get_cell(pdev); + if (level && cell->level > *level) + return 0; + regulator_bulk_unregister_supply_alias(dev, cell->parent_supplies, cell->num_parent_supplies); + kfree(cell); + platform_device_unregister(pdev); return 0; } +void mfd_remove_devices_late(struct device *parent) +{ + int level = MFD_DEP_LEVEL_HIGH; + + device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); +} +EXPORT_SYMBOL(mfd_remove_devices_late); + void mfd_remove_devices(struct device *parent) { - device_for_each_child_reverse(parent, NULL, mfd_remove_devices_fn); + int level = MFD_DEP_LEVEL_NORMAL; + + device_for_each_child_reverse(parent, &level, mfd_remove_devices_fn); } EXPORT_SYMBOL(mfd_remove_devices); @@ -318,6 +403,16 @@ static void devm_mfd_dev_release(struct device *dev, void *res) * Returns 0 on success or an appropriate negative error number on failure. * All child-devices of the MFD will automatically be removed when it gets * unbinded. + * + * @dev: Pointer to parent device. + * @id: Can be PLATFORM_DEVID_AUTO to let the Platform API take care + * of device numbering, or will be added to a device's cell_id. + * @cells: Array of (struct mfd_cell)s describing child devices. + * @n_devs: Number of child devices to register. + * @mem_base: Parent register range resource for child devices. + * @irq_base: Base of the range of virtual interrupt numbers allocated for + * this MFD device. Unused if @domain is specified. + * @domain: Interrupt domain to create mappings for hardware interrupts. */ int devm_mfd_add_devices(struct device *dev, int id, const struct mfd_cell *cells, int n_devs, diff --git a/drivers/mfd/motorola-cpcap.c b/drivers/mfd/motorola-cpcap.c index 52f38e57cdc1..2283d88adcc2 100644 --- a/drivers/mfd/motorola-cpcap.c +++ b/drivers/mfd/motorola-cpcap.c @@ -214,6 +214,28 @@ static const struct regmap_config cpcap_regmap_config = { .val_format_endian = REGMAP_ENDIAN_LITTLE, }; +#ifdef CONFIG_PM_SLEEP +static int cpcap_suspend(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + + disable_irq(spi->irq); + + return 0; +} + +static int cpcap_resume(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + + enable_irq(spi->irq); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cpcap_pm, cpcap_suspend, cpcap_resume); + static const struct mfd_cell cpcap_mfd_devices[] = { { .name = "cpcap_adc", @@ -313,6 +335,7 @@ static struct spi_driver cpcap_driver = { .driver = { .name = "cpcap-core", .of_match_table = cpcap_of_match, + .pm = &cpcap_pm, }, .probe = cpcap_probe, }; diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 1f4f01b02d98..1e6431cb8536 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -2,7 +2,7 @@ /** * omap-usb-host.c - The USBHS core driver for OMAP EHCI & OHCI * - * Copyright (C) 2011-2013 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2011-2013 Texas Instruments Incorporated - https://www.ti.com * Author: Keshava Munegowda <keshava_mgowda@ti.com> * Author: Roger Quadros <rogerq@ti.com> */ @@ -120,7 +120,7 @@ static inline u32 usbhs_read(void __iomem *base, u32 reg) /*-------------------------------------------------------------------------*/ -/** +/* * Map 'enum usbhs_omap_port_mode' found in <linux/platform_data/usb-omap.h> * to the device tree binding portN-mode found in * 'Documentation/devicetree/bindings/mfd/omap-usb-host.txt' @@ -526,6 +526,8 @@ static const struct of_device_id usbhs_child_match_table[] = { * usbhs_omap_probe - initialize TI-based HCDs * * Allocates basic resources for this USB host controller. + * + * @pdev: Pointer to this device's platform device structure */ static int usbhs_omap_probe(struct platform_device *pdev) { diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 4b7f73c317e8..16fad79c73f1 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -2,7 +2,7 @@ /** * omap-usb-tll.c - The USB TLL driver for OMAP EHCI & OHCI * - * Copyright (C) 2012-2013 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2012-2013 Texas Instruments Incorporated - https://www.ti.com * Author: Keshava Munegowda <keshava_mgowda@ti.com> * Author: Roger Quadros <rogerq@ti.com> */ @@ -199,6 +199,8 @@ static unsigned ohci_omap3_fslsmode(enum usbhs_omap_port_mode mode) * usbtll_omap_probe - initialize TI-based HCDs * * Allocates basic resources for this USB host controller. + * + * @pdev: Pointer to this device's platform device structure */ static int usbtll_omap_probe(struct platform_device *pdev) { diff --git a/drivers/mfd/rave-sp.c b/drivers/mfd/rave-sp.c index 26c7b63e008a..abaab541df19 100644 --- a/drivers/mfd/rave-sp.c +++ b/drivers/mfd/rave-sp.c @@ -96,7 +96,7 @@ struct rave_sp_deframer { * @data: Buffer to store reply payload in * @code: Expected reply code * @ackid: Expected reply ACK ID - * @completion: Successful reply reception completion + * @received: Successful reply reception completion */ struct rave_sp_reply { size_t length; diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c index 232de50562f9..e25407ed3ad4 100644 --- a/drivers/mfd/rn5t618.c +++ b/drivers/mfd/rn5t618.c @@ -44,6 +44,9 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg) case RN5T618_INTMON: case RN5T618_RTC_CTRL1 ... RN5T618_RTC_CTRL2: case RN5T618_RTC_SECONDS ... RN5T618_RTC_YEAR: + case RN5T618_CHGSTATE: + case RN5T618_CHGCTRL_IRR ... RN5T618_CHGERR_MONI: + case RN5T618_CONTROL ... RN5T618_CC_AVEREG0: return true; default: return false; @@ -77,7 +80,7 @@ static const struct regmap_irq_chip rc5t619_irq_chip = { .mask_invert = true, }; -static struct rn5t618 *rn5t618_pm_power_off; +static struct i2c_client *rn5t618_pm_power_off; static struct notifier_block rn5t618_restart_handler; static int rn5t618_irq_init(struct rn5t618 *rn5t618) @@ -110,13 +113,38 @@ static int rn5t618_irq_init(struct rn5t618 *rn5t618) static void rn5t618_trigger_poweroff_sequence(bool repower) { + int ret; + /* disable automatic repower-on */ - regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_REPCNT, - RN5T618_REPCNT_REPWRON, - repower ? RN5T618_REPCNT_REPWRON : 0); + ret = i2c_smbus_read_byte_data(rn5t618_pm_power_off, RN5T618_REPCNT); + if (ret < 0) + goto err; + + ret &= ~RN5T618_REPCNT_REPWRON; + if (repower) + ret |= RN5T618_REPCNT_REPWRON; + + ret = i2c_smbus_write_byte_data(rn5t618_pm_power_off, + RN5T618_REPCNT, (u8)ret); + if (ret < 0) + goto err; + /* start power-off sequence */ - regmap_update_bits(rn5t618_pm_power_off->regmap, RN5T618_SLPCNT, - RN5T618_SLPCNT_SWPWROFF, RN5T618_SLPCNT_SWPWROFF); + ret = i2c_smbus_read_byte_data(rn5t618_pm_power_off, RN5T618_SLPCNT); + if (ret < 0) + goto err; + + ret |= RN5T618_SLPCNT_SWPWROFF; + + ret = i2c_smbus_write_byte_data(rn5t618_pm_power_off, + RN5T618_SLPCNT, (u8)ret); + if (ret < 0) + goto err; + + return; + +err: + dev_alert(&rn5t618_pm_power_off->dev, "Failed to shutdown (err = %d)\n", ret); } static void rn5t618_power_off(void) @@ -189,7 +217,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c) return ret; } - rn5t618_pm_power_off = priv; + rn5t618_pm_power_off = i2c; if (of_device_is_system_power_controller(i2c->dev.of_node)) { if (!pm_power_off) pm_power_off = rn5t618_power_off; @@ -211,9 +239,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c) static int rn5t618_i2c_remove(struct i2c_client *i2c) { - struct rn5t618 *priv = i2c_get_clientdata(i2c); - - if (priv == rn5t618_pm_power_off) { + if (i2c == rn5t618_pm_power_off) { rn5t618_pm_power_off = NULL; pm_power_off = NULL; } diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c index 4a09ce9609c9..d15b3e783369 100644 --- a/drivers/mfd/si476x-cmd.c +++ b/drivers/mfd/si476x-cmd.c @@ -241,13 +241,13 @@ static int si476x_core_parse_and_nag_about_error(struct si476x_core *core) /** * si476x_core_send_command() - sends a command to si476x and waits its * response - * @core: si476x_device structure for the device we are + * @core: si476x_device structure for the device we are * communicating with * @command: command id * @args: command arguments we are sending * @argn: actual size of @args - * @response: buffer to place the expected response from the device - * @respn: actual size of @response + * @resp: buffer to place the expected response from the device + * @respn: actual size of @resp * @usecs: amount of time to wait before reading the response (in * usecs) * @@ -496,7 +496,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property); * enable 1MOhm pulldown * SI476X_DFS_DAUDIO - set the pin to be a part of digital * audio interface - * @dout - DOUT pin function configuration: + * @dout: - DOUT pin function configuration: * SI476X_DOUT_NOOP - do not modify the behaviour * SI476X_DOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown @@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property); * port 1 * SI476X_DOUT_I2S_INPUT - set this pin to be digital in on I2S * port 1 - * @xout - XOUT pin function configuration: + * @xout: - XOUT pin function configuration: * SI476X_XOUT_NOOP - do not modify the behaviour * SI476X_XOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown @@ -540,25 +540,25 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_dig_audio_pin_cfg); /** * si476x_cmd_zif_pin_cfg - send 'ZIF_PIN_CFG_COMMAND' - * @core - device to send the command to - * @iqclk - IQCL pin function configuration: + * @core: - device to send the command to + * @iqclk: - IQCL pin function configuration: * SI476X_IQCLK_NOOP - do not modify the behaviour * SI476X_IQCLK_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_IQCLK_IQ - set pin to be a part of I/Q interace * in master mode - * @iqfs - IQFS pin function configuration: + * @iqfs: - IQFS pin function configuration: * SI476X_IQFS_NOOP - do not modify the behaviour * SI476X_IQFS_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_IQFS_IQ - set pin to be a part of I/Q interace * in master mode - * @iout - IOUT pin function configuration: + * @iout: - IOUT pin function configuration: * SI476X_IOUT_NOOP - do not modify the behaviour * SI476X_IOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_IOUT_OUTPUT - set pin to be I out - * @qout - QOUT pin function configuration: + * @qout: - QOUT pin function configuration: * SI476X_QOUT_NOOP - do not modify the behaviour * SI476X_QOUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown @@ -590,29 +590,29 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_zif_pin_cfg); /** * si476x_cmd_ic_link_gpo_ctl_pin_cfg - send * 'IC_LINK_GPIO_CTL_PIN_CFG' comand to the device - * @core - device to send the command to - * @icin - ICIN pin function configuration: + * @core: - device to send the command to + * @icin: - ICIN pin function configuration: * SI476X_ICIN_NOOP - do not modify the behaviour * SI476X_ICIN_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_ICIN_GPO1_HIGH - set pin to be an output, drive it high * SI476X_ICIN_GPO1_LOW - set pin to be an output, drive it low * SI476X_ICIN_IC_LINK - set the pin to be a part of Inter-Chip link - * @icip - ICIP pin function configuration: + * @icip: - ICIP pin function configuration: * SI476X_ICIP_NOOP - do not modify the behaviour * SI476X_ICIP_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_ICIP_GPO1_HIGH - set pin to be an output, drive it high * SI476X_ICIP_GPO1_LOW - set pin to be an output, drive it low * SI476X_ICIP_IC_LINK - set the pin to be a part of Inter-Chip link - * @icon - ICON pin function configuration: + * @icon: - ICON pin function configuration: * SI476X_ICON_NOOP - do not modify the behaviour * SI476X_ICON_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_ICON_I2S - set the pin to be a part of audio * interface in slave mode (DCLK) * SI476X_ICON_IC_LINK - set the pin to be a part of Inter-Chip link - * @icop - ICOP pin function configuration: + * @icop: - ICOP pin function configuration: * SI476X_ICOP_NOOP - do not modify the behaviour * SI476X_ICOP_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown @@ -647,8 +647,8 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_ic_link_gpo_ctl_pin_cfg); /** * si476x_cmd_ana_audio_pin_cfg - send 'ANA_AUDIO_PIN_CFG' to the * device - * @core - device to send the command to - * @lrout - LROUT pin function configuration: + * @core: - device to send the command to + * @lrout: - LROUT pin function configuration: * SI476X_LROUT_NOOP - do not modify the behaviour * SI476X_LROUT_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown @@ -675,15 +675,15 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_ana_audio_pin_cfg); /** * si476x_cmd_intb_pin_cfg - send 'INTB_PIN_CFG' command to the device - * @core - device to send the command to - * @intb - INTB pin function configuration: + * @core: - device to send the command to + * @intb: - INTB pin function configuration: * SI476X_INTB_NOOP - do not modify the behaviour * SI476X_INTB_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown * SI476X_INTB_DAUDIO - set pin to be a part of digital * audio interface in slave mode * SI476X_INTB_IRQ - set pin to be an interrupt request line - * @a1 - A1 pin function configuration: + * @a1: - A1 pin function configuration: * SI476X_A1_NOOP - do not modify the behaviour * SI476X_A1_TRISTATE - put the pin in tristate condition, * enable 1MOhm pulldown @@ -728,14 +728,10 @@ static int si476x_core_cmd_intb_pin_cfg_a20(struct si476x_core *core, /** * si476x_cmd_am_rsq_status - send 'AM_RSQ_STATUS' command to the * device - * @core - device to send the command to - * @rsqack - if set command clears RSQINT, SNRINT, SNRLINT, RSSIHINT, - * RSSSILINT, BLENDINT, MULTHINT and MULTLINT - * @attune - when set the values in the status report are the values - * that were calculated at tune - * @cancel - abort ongoing seek/tune opertation - * @stcack - clear the STCINT bin in status register - * @report - all signal quality information retured by the command + * @core: - device to send the command to + * @rsqargs: - pointer to a structure containing a group of sub-args + * relevant to sending the RSQ status command + * @report: - all signal quality information retured by the command * (if NULL then the output of the command is ignored) * * Function returns 0 on success and negative error code on failure @@ -862,9 +858,9 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_am_acf_status); /** * si476x_cmd_fm_seek_start - send 'FM_SEEK_START' command to the * device - * @core - device to send the command to - * @seekup - if set the direction of the search is 'up' - * @wrap - if set seek wraps when hitting band limit + * @core: - device to send the command to + * @seekup: - if set the direction of the search is 'up' + * @wrap: - if set seek wraps when hitting band limit * * This function begins search for a valid station. The station is * considered valid when 'FM_VALID_SNR_THRESHOLD' and @@ -890,12 +886,14 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_seek_start); /** * si476x_cmd_fm_rds_status - send 'FM_RDS_STATUS' command to the * device - * @core - device to send the command to - * @status_only - if set the data is not removed from RDSFIFO, + * @core: - device to send the command to + * @status_only: - if set the data is not removed from RDSFIFO, * RDSFIFOUSED is not decremented and data in all the * rest RDS data contains the last valid info received - * @mtfifo if set the command clears RDS receive FIFO - * @intack if set the command clards the RDSINT bit. + * @mtfifo: if set the command clears RDS receive FIFO + * @intack: if set the command clards the RDSINT bit. + * @report: - all signal quality information retured by the command + * (if NULL then the output of the command is ignored) * * Function returns 0 on success and negative error code on failure */ @@ -1036,9 +1034,9 @@ EXPORT_SYMBOL_GPL(si476x_core_cmd_fm_phase_div_status); /** * si476x_cmd_am_seek_start - send 'FM_SEEK_START' command to the * device - * @core - device to send the command to - * @seekup - if set the direction of the search is 'up' - * @wrap - if set seek wraps when hitting band limit + * @core: - device to send the command to + * @seekup: - if set the direction of the search is 'up' + * @wrap: - if set seek wraps when hitting band limit * * This function begins search for a valid station. The station is * considered valid when 'FM_VALID_SNR_THRESHOLD' and diff --git a/drivers/mfd/si476x-i2c.c b/drivers/mfd/si476x-i2c.c index c8d28b844def..c1d7b845244e 100644 --- a/drivers/mfd/si476x-i2c.c +++ b/drivers/mfd/si476x-i2c.c @@ -534,6 +534,11 @@ static irqreturn_t si476x_core_interrupt(int irq, void *dev) /** * si476x_firmware_version_to_revision() * @core: Core device structure + * @func: Selects the boot function of the device: + * *_BOOTLOADER - Boot loader + * *_FM_RECEIVER - FM receiver + * *_AM_RECEIVER - AM receiver + * *_WB_RECEIVER - Weatherband receiver * @major: Firmware major number * @minor1: Firmware first minor number * @minor2: Firmware second minor number @@ -583,7 +588,7 @@ static int si476x_core_fwver_to_revision(struct si476x_core *core, goto unknown_revision; } case SI476X_FUNC_BOOTLOADER: - default: /* FALLTHROUG */ + default: /* FALLTHROUGH */ BUG(); return -1; } diff --git a/drivers/mfd/sky81452.c b/drivers/mfd/sky81452.c index 76eedfae8553..3ad35bf0c015 100644 --- a/drivers/mfd/sky81452.c +++ b/drivers/mfd/sky81452.c @@ -47,8 +47,6 @@ static int sky81452_probe(struct i2c_client *client, memset(cells, 0, sizeof(cells)); cells[0].name = "sky81452-backlight"; cells[0].of_compatible = "skyworks,sky81452-backlight"; - cells[0].platform_data = pdata->bl_pdata; - cells[0].pdata_size = sizeof(*pdata->bl_pdata); cells[1].name = "sky81452-regulator"; cells[1].platform_data = pdata->regulator_init_data; cells[1].pdata_size = sizeof(*pdata->regulator_init_data); diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c deleted file mode 100644 index 57b792eb58fd..000000000000 --- a/drivers/mfd/smsc-ece1099.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * TI SMSC MFD Driver - * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com - * - * Author: Sourav Poddar <sourav.poddar@ti.com> - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; GPL v2. - * - */ - -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/i2c.h> -#include <linux/gpio.h> -#include <linux/workqueue.h> -#include <linux/irq.h> -#include <linux/regmap.h> -#include <linux/err.h> -#include <linux/mfd/core.h> -#include <linux/mfd/smsc.h> -#include <linux/of_platform.h> - -static const struct regmap_config smsc_regmap_config = { - .reg_bits = 8, - .val_bits = 8, - .max_register = SMSC_VEN_ID_H, - .cache_type = REGCACHE_RBTREE, -}; - -static int smsc_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) -{ - struct smsc *smsc; - int devid, rev, venid_l, venid_h; - int ret; - - smsc = devm_kzalloc(&i2c->dev, sizeof(*smsc), GFP_KERNEL); - if (!smsc) - return -ENOMEM; - - smsc->regmap = devm_regmap_init_i2c(i2c, &smsc_regmap_config); - if (IS_ERR(smsc->regmap)) - return PTR_ERR(smsc->regmap); - - i2c_set_clientdata(i2c, smsc); - smsc->dev = &i2c->dev; - -#ifdef CONFIG_OF - of_property_read_u32(i2c->dev.of_node, "clock", &smsc->clk); -#endif - - regmap_read(smsc->regmap, SMSC_DEV_ID, &devid); - regmap_read(smsc->regmap, SMSC_DEV_REV, &rev); - regmap_read(smsc->regmap, SMSC_VEN_ID_L, &venid_l); - regmap_read(smsc->regmap, SMSC_VEN_ID_H, &venid_h); - - dev_info(&i2c->dev, "SMSCxxx devid: %02x rev: %02x venid: %02x\n", - devid, rev, (venid_h << 8) | venid_l); - - ret = regmap_write(smsc->regmap, SMSC_CLK_CTRL, smsc->clk); - if (ret) - return ret; - -#ifdef CONFIG_OF - if (i2c->dev.of_node) - ret = devm_of_platform_populate(&i2c->dev); -#endif - - return ret; -} - -static const struct i2c_device_id smsc_i2c_id[] = { - { "smscece1099", 0}, - {}, -}; - -static struct i2c_driver smsc_i2c_driver = { - .driver = { - .name = "smsc", - }, - .probe = smsc_i2c_probe, - .id_table = smsc_i2c_id, -}; -builtin_i2c_driver(smsc_i2c_driver); diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index 33336cde4724..f8a8b918c60d 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -7,7 +7,9 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mfd/core.h> +#include <linux/mfd/sc27xx-pmic.h> #include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/regmap.h> #include <linux/spi/spi.h> #include <uapi/linux/usb/charger.h> @@ -93,73 +95,6 @@ enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev) } EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type); -static const struct mfd_cell sprd_pmic_devs[] = { - { - .name = "sc27xx-wdt", - .of_compatible = "sprd,sc2731-wdt", - }, { - .name = "sc27xx-rtc", - .of_compatible = "sprd,sc2731-rtc", - }, { - .name = "sc27xx-charger", - .of_compatible = "sprd,sc2731-charger", - }, { - .name = "sc27xx-chg-timer", - .of_compatible = "sprd,sc2731-chg-timer", - }, { - .name = "sc27xx-fast-chg", - .of_compatible = "sprd,sc2731-fast-chg", - }, { - .name = "sc27xx-chg-wdt", - .of_compatible = "sprd,sc2731-chg-wdt", - }, { - .name = "sc27xx-typec", - .of_compatible = "sprd,sc2731-typec", - }, { - .name = "sc27xx-flash", - .of_compatible = "sprd,sc2731-flash", - }, { - .name = "sc27xx-eic", - .of_compatible = "sprd,sc2731-eic", - }, { - .name = "sc27xx-efuse", - .of_compatible = "sprd,sc2731-efuse", - }, { - .name = "sc27xx-thermal", - .of_compatible = "sprd,sc2731-thermal", - }, { - .name = "sc27xx-adc", - .of_compatible = "sprd,sc2731-adc", - }, { - .name = "sc27xx-audio-codec", - .of_compatible = "sprd,sc2731-audio-codec", - }, { - .name = "sc27xx-regulator", - .of_compatible = "sprd,sc2731-regulator", - }, { - .name = "sc27xx-vibrator", - .of_compatible = "sprd,sc2731-vibrator", - }, { - .name = "sc27xx-keypad-led", - .of_compatible = "sprd,sc2731-keypad-led", - }, { - .name = "sc27xx-bltc", - .of_compatible = "sprd,sc2731-bltc", - }, { - .name = "sc27xx-fgu", - .of_compatible = "sprd,sc2731-fgu", - }, { - .name = "sc27xx-7sreset", - .of_compatible = "sprd,sc2731-7sreset", - }, { - .name = "sc27xx-poweroff", - .of_compatible = "sprd,sc2731-poweroff", - }, { - .name = "sc27xx-syscon", - .of_compatible = "sprd,sc2731-syscon", - }, -}; - static int sprd_pmic_spi_write(void *context, const void *data, size_t count) { struct device *dev = context; @@ -250,10 +185,8 @@ static int sprd_pmic_probe(struct spi_device *spi) return -ENOMEM; ddata->irq_chip.irqs = ddata->irqs; - for (i = 0; i < pdata->num_irqs; i++) { - ddata->irqs[i].reg_offset = i / pdata->num_irqs; - ddata->irqs[i].mask = BIT(i % pdata->num_irqs); - } + for (i = 0; i < pdata->num_irqs; i++) + ddata->irqs[i].mask = BIT(i); ret = devm_regmap_add_irq_chip(&spi->dev, ddata->regmap, ddata->irq, IRQF_ONESHOT | IRQF_NO_SUSPEND, 0, @@ -263,12 +196,9 @@ static int sprd_pmic_probe(struct spi_device *spi) return ret; } - ret = devm_mfd_add_devices(&spi->dev, PLATFORM_DEVID_AUTO, - sprd_pmic_devs, ARRAY_SIZE(sprd_pmic_devs), - NULL, 0, - regmap_irq_get_domain(ddata->irq_data)); + ret = devm_of_platform_populate(&spi->dev); if (ret) { - dev_err(&spi->dev, "Failed to register device %d\n", ret); + dev_err(&spi->dev, "Failed to populate sub-devices %d\n", ret); return ret; } diff --git a/drivers/mfd/stm32-lptimer.c b/drivers/mfd/stm32-lptimer.c index a00f99f36559..746e51a17cc8 100644 --- a/drivers/mfd/stm32-lptimer.c +++ b/drivers/mfd/stm32-lptimer.c @@ -17,6 +17,7 @@ static const struct regmap_config stm32_lptimer_regmap_cfg = { .val_bits = 32, .reg_stride = sizeof(u32), .max_register = STM32_LPTIM_MAX_REGISTER, + .fast_io = true, }; static int stm32_lptimer_detect_encoder(struct stm32_lptimer *ddata) diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 3a97816d0cba..75859e492984 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -101,12 +101,14 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) } } - syscon_config.name = of_node_full_name(np); + syscon_config.name = kasprintf(GFP_KERNEL, "%pOFn@%llx", np, + (u64)res.start); syscon_config.reg_stride = reg_io_width; syscon_config.val_bits = reg_io_width * 8; syscon_config.max_register = resource_size(&res) - reg_io_width; regmap = regmap_init_mmio(NULL, base, &syscon_config); + kfree(syscon_config.name); if (IS_ERR(regmap)) { pr_err("regmap init failed\n"); ret = PTR_ERR(regmap); diff --git a/drivers/mfd/tc3589x.c b/drivers/mfd/tc3589x.c index 67c9995bb1aa..7882a37ffc35 100644 --- a/drivers/mfd/tc3589x.c +++ b/drivers/mfd/tc3589x.c @@ -18,7 +18,7 @@ #include <linux/mfd/tc3589x.h> #include <linux/err.h> -/** +/* * enum tc3589x_version - indicates the TC3589x version */ enum tc3589x_version { diff --git a/drivers/mfd/ti_am335x_tscadc.c b/drivers/mfd/ti_am335x_tscadc.c index 926c289cb040..0e6e25308190 100644 --- a/drivers/mfd/ti_am335x_tscadc.c +++ b/drivers/mfd/ti_am335x_tscadc.c @@ -1,7 +1,7 @@ /* * TI Touch Screen / ADC MFD driver * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as diff --git a/drivers/mfd/tps65010.c b/drivers/mfd/tps65010.c index 65fcc58c02da..7e7dbee58ca9 100644 --- a/drivers/mfd/tps65010.c +++ b/drivers/mfd/tps65010.c @@ -404,7 +404,6 @@ static void tps65010_work(struct work_struct *work) tps65010_interrupt(tps); if (test_and_clear_bit(FLAG_VBUS_CHANGED, &tps->flags)) { - int status; u8 chgconfig, tmp; chgconfig = i2c_smbus_read_byte_data(tps->client, @@ -415,8 +414,8 @@ static void tps65010_work(struct work_struct *work) else if (tps->vbus >= 100) chgconfig |= TPS_VBUS_CHARGING; - status = i2c_smbus_write_byte_data(tps->client, - TPS_CHGCONFIG, chgconfig); + i2c_smbus_write_byte_data(tps->client, + TPS_CHGCONFIG, chgconfig); /* vbus update fails unless VBUS is connected! */ tmp = i2c_smbus_read_byte_data(tps->client, TPS_CHGCONFIG); diff --git a/drivers/mfd/tps65086.c b/drivers/mfd/tps65086.c index 43119a6867fe..341466ef20cc 100644 --- a/drivers/mfd/tps65086.c +++ b/drivers/mfd/tps65086.c @@ -1,5 +1,5 @@ /* - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c index 7566ce4457a0..2d9c282ec917 100644 --- a/drivers/mfd/tps65217.c +++ b/drivers/mfd/tps65217.c @@ -3,7 +3,7 @@ * * TPS65217 chip family multi-function driver * - * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -205,7 +205,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_read); /** * tps65217_reg_write: Write a single tps65217 register. * - * @tps65217: Device to write to. + * @tps: Device to write to. * @reg: Register to write to. * @val: Value to write. * @level: Password protected level @@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write); /** * tps65217_update_bits: Modify bits w.r.t mask, val and level. * - * @tps65217: Device to write to. + * @tps: Device to write to. * @reg: Register to read-write to. * @mask: Mask. * @val: Value to write. diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index a62ea4cb8be7..167e9fc308ef 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -1,7 +1,7 @@ /* * Driver for TPS65218 Integrated power management chipsets * - * Copyright (C) 2014 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 as @@ -48,7 +48,7 @@ static const struct mfd_cell tps65218_cells[] = { /** * tps65218_reg_write: Write a single tps65218 register. * - * @tps65218: Device to write to. + * @tps: Device to write to. * @reg: Register to write to. * @val: Value to write. * @level: Password protected level @@ -79,7 +79,7 @@ EXPORT_SYMBOL_GPL(tps65218_reg_write); /** * tps65218_update_bits: Modify bits w.r.t mask, val and level. * - * @tps65218: Device to write to. + * @tps: Device to write to. * @reg: Register to read-write to. * @mask: Mask. * @val: Value to write. diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index c8aadd39324e..c36597797ddd 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c @@ -309,18 +309,19 @@ static const struct irq_domain_ops tps6586x_domain_ops = { static irqreturn_t tps6586x_irq(int irq, void *data) { struct tps6586x *tps6586x = data; - u32 acks; + uint32_t acks; + __le32 val; int ret = 0; ret = tps6586x_reads(tps6586x->dev, TPS6586X_INT_ACK1, - sizeof(acks), (uint8_t *)&acks); + sizeof(acks), (uint8_t *)&val); if (ret < 0) { dev_err(tps6586x->dev, "failed to read interrupt status\n"); return IRQ_NONE; } - acks = le32_to_cpu(acks); + acks = le32_to_cpu(val); while (acks) { int i = __ffs(acks); diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c index f33567bc428d..b55b1d5d6955 100644 --- a/drivers/mfd/tps65912-core.c +++ b/drivers/mfd/tps65912-core.c @@ -1,7 +1,7 @@ /* * Core functions for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65912-i2c.c b/drivers/mfd/tps65912-i2c.c index 785d19f6f7c9..f7c22ea7b36c 100644 --- a/drivers/mfd/tps65912-i2c.c +++ b/drivers/mfd/tps65912-i2c.c @@ -1,7 +1,7 @@ /* * I2C access driver for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c index f78be039e463..21a8d6ac5c4a 100644 --- a/drivers/mfd/tps65912-spi.c +++ b/drivers/mfd/tps65912-spi.c @@ -1,7 +1,7 @@ /* * SPI access driver for TI TPS65912x PMICs * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c index 910a304b397c..ab417438d1fa 100644 --- a/drivers/mfd/twl4030-irq.c +++ b/drivers/mfd/twl4030-irq.c @@ -477,7 +477,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data) if (agent->imr_change_pending) { union { - u32 word; + __le32 word; u8 bytes[4]; } imr; @@ -561,7 +561,7 @@ static inline int sih_read_isr(const struct sih *sih) int status; union { u8 bytes[4]; - u32 word; + __le32 word; } isr; /* FIXME need retry-on-error ... */ diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 02f879b23d9f..b0344e5353e4 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c @@ -114,6 +114,8 @@ static int wm831x_reg_locked(struct wm831x *wm831x, unsigned short reg) * The WM831x has a user key preventing writes to particularly * critical registers. This function locks those registers, * allowing writes to them. + * + * @wm831x: pointer to local driver data structure */ void wm831x_reg_lock(struct wm831x *wm831x) { @@ -140,6 +142,8 @@ EXPORT_SYMBOL_GPL(wm831x_reg_lock); * The WM831x has a user key preventing writes to particularly * critical registers. This function locks those registers, * preventing spurious writes. + * + * @wm831x: pointer to local driver data structure */ int wm831x_reg_unlock(struct wm831x *wm831x) { diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c index 42b16503e6cd..fbc77b218215 100644 --- a/drivers/mfd/wm8350-core.c +++ b/drivers/mfd/wm8350-core.c @@ -131,6 +131,8 @@ EXPORT_SYMBOL_GPL(wm8350_block_write); * The WM8350 has a hardware lock which can be used to prevent writes to * some registers (generally those which can cause particularly serious * problems if misused). This function enables that lock. + * + * @wm8350: pointer to local driver data structure */ int wm8350_reg_lock(struct wm8350 *wm8350) { @@ -160,6 +162,8 @@ EXPORT_SYMBOL_GPL(wm8350_reg_lock); * problems if misused). This function disables that lock so updates * can be performed. For maximum safety this should be done only when * required. + * + * @wm8350: pointer to local driver data structure */ int wm8350_reg_unlock(struct wm8350 *wm8350) { diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c index 3055d6f47afc..0fe32a05421b 100644 --- a/drivers/mfd/wm8400-core.c +++ b/drivers/mfd/wm8400-core.c @@ -108,6 +108,8 @@ static const struct regmap_config wm8400_regmap_config = { /** * wm8400_reset_codec_reg_cache - Reset cached codec registers to * their default values. + * + * @wm8400: pointer to local driver data structure */ void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400) { diff --git a/drivers/misc/genwqe/card_base.c b/drivers/misc/genwqe/card_base.c index acc459fc8105..c9b886618071 100644 --- a/drivers/misc/genwqe/card_base.c +++ b/drivers/misc/genwqe/card_base.c @@ -1256,7 +1256,7 @@ static void genwqe_remove(struct pci_dev *pci_dev) * error is detected. */ static pci_ers_result_t genwqe_err_error_detected(struct pci_dev *pci_dev, - enum pci_channel_state state) + pci_channel_state_t state) { struct genwqe_dev *cd; diff --git a/drivers/misc/pci_endpoint_test.c b/drivers/misc/pci_endpoint_test.c index 41c40971979e..e060796f9caa 100644 --- a/drivers/misc/pci_endpoint_test.c +++ b/drivers/misc/pci_endpoint_test.c @@ -68,6 +68,7 @@ #define PCI_ENDPOINT_TEST_FLAGS 0x2c #define FLAG_USE_DMA BIT(0) +#define PCI_DEVICE_ID_TI_J721E 0xb00d #define PCI_DEVICE_ID_TI_AM654 0xb00c #define is_am654_pci_dev(pdev) \ @@ -932,6 +933,11 @@ static const struct pci_endpoint_test_data am654_data = { .irq_type = IRQ_TYPE_MSI, }; +static const struct pci_endpoint_test_data j721e_data = { + .alignment = 256, + .irq_type = IRQ_TYPE_MSI, +}; + static const struct pci_device_id pci_endpoint_test_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x), .driver_data = (kernel_ulong_t)&default_data, @@ -946,6 +952,9 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = { }, { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0), }, + { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), + .driver_data = (kernel_ulong_t)&j721e_data, + }, { } }; MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); diff --git a/drivers/misc/uacce/uacce.c b/drivers/misc/uacce/uacce.c index aa91f69a5fa9..a5b8dab80c76 100644 --- a/drivers/misc/uacce/uacce.c +++ b/drivers/misc/uacce/uacce.c @@ -4,6 +4,7 @@ #include <linux/iommu.h> #include <linux/module.h> #include <linux/poll.h> +#include <linux/slab.h> #include <linux/uacce.h> static struct class *uacce_class; diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index a7e47e068ad9..aef14990e5f7 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig @@ -11,7 +11,7 @@ config MTD_CFI AMD and other flash manufactures that provides a universal method for probing the capabilities of flash devices. If you wish to support any device that is CFI-compliant, you need to enable this - option. Visit <http://www.amd.com/products/nvd/overview/cfi.html> + option. Visit <https://www.amd.com/products/nvd/overview/cfi.html> for more information on CFI. config MTD_JEDECPROBE diff --git a/drivers/mtd/hyperbus/hbmc-am654.c b/drivers/mtd/hyperbus/hbmc-am654.c index f350a0809f88..e0e33f6bf513 100644 --- a/drivers/mtd/hyperbus/hbmc-am654.c +++ b/drivers/mtd/hyperbus/hbmc-am654.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // -// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ // Author: Vignesh Raghavendra <vigneshr@ti.com> #include <linux/err.h> diff --git a/drivers/mtd/hyperbus/hyperbus-core.c b/drivers/mtd/hyperbus/hyperbus-core.c index 32685e8dd278..2f9fc4e17d53 100644 --- a/drivers/mtd/hyperbus/hyperbus-core.c +++ b/drivers/mtd/hyperbus/hyperbus-core.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 // -// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// Copyright (C) 2019 Texas Instruments Incorporated - https://www.ti.com/ // Author: Vignesh Raghavendra <vigneshr@ti.com> #include <linux/err.h> diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index b28225a7c4f3..fd37553f1b07 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -310,7 +310,7 @@ config MTD_DC21285 help This provides a driver for the flash accessed using Intel's 21285 bridge used with Intel's StrongARM processors. More info at - <http://www.intel.com/design/bridge/docs/21285_documentation.htm>. + <https://www.intel.com/design/bridge/docs/21285_documentation.htm>. config MTD_IXP4XX tristate "CFI Flash device mapped on Intel IXP4xx based systems" diff --git a/drivers/mtd/maps/sc520cdp.c b/drivers/mtd/maps/sc520cdp.c index 9902b37e18b4..8ef7aec634c7 100644 --- a/drivers/mtd/maps/sc520cdp.c +++ b/drivers/mtd/maps/sc520cdp.c @@ -6,7 +6,7 @@ * The SC520CDP is an evaluation board for the Elan SC520 processor available * from AMD. It has two banks of 32-bit Flash ROM, each 8 Megabytes in size, * and up to 512 KiB of 8-bit DIL Flash ROM. - * For details see http://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html + * For details see https://www.amd.com/products/epd/desiging/evalboards/18.elansc520/520_cdp_brief/index.html */ #include <linux/module.h> diff --git a/drivers/mtd/mtdpstore.c b/drivers/mtd/mtdpstore.c index a4fe6060b960..a3ae8778f6a9 100644 --- a/drivers/mtd/mtdpstore.c +++ b/drivers/mtd/mtdpstore.c @@ -7,6 +7,7 @@ #include <linux/pstore_blk.h> #include <linux/mtd/mtd.h> #include <linux/bitops.h> +#include <linux/slab.h> static struct mtdpstore_context { int index; diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index a5d8a211cb8a..c1a45b071165 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -1,7 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only + +menu "NAND" + config MTD_NAND_CORE tristate source "drivers/mtd/nand/onenand/Kconfig" source "drivers/mtd/nand/raw/Kconfig" source "drivers/mtd/nand/spi/Kconfig" + +endmenu diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig index 572b8fe69abb..1a0e65bc246e 100644 --- a/drivers/mtd/nand/onenand/Kconfig +++ b/drivers/mtd/nand/onenand/Kconfig @@ -1,7 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig MTD_ONENAND tristate "OneNAND Device Support" - depends on MTD depends on HAS_IOMEM help This enables support for accessing all type of OneNAND flash diff --git a/drivers/mtd/nand/raw/Kconfig b/drivers/mtd/nand/raw/Kconfig index 113f61052269..1203775023ad 100644 --- a/drivers/mtd/nand/raw/Kconfig +++ b/drivers/mtd/nand/raw/Kconfig @@ -12,7 +12,6 @@ config MTD_NAND_ECC_SW_HAMMING_SMC menuconfig MTD_RAW_NAND tristate "Raw/Parallel NAND Device Support" - depends on MTD select MTD_NAND_CORE select MTD_NAND_ECC_SW_HAMMING help @@ -415,6 +414,7 @@ config MTD_NAND_TEGRA config MTD_NAND_STM32_FMC2 tristate "Support for NAND controller on STM32MP SoCs" depends on MACH_STM32MP157 || COMPILE_TEST + select MFD_SYSCON help Enables support for NAND Flash chips on SoCs containing the FMC2 NAND controller. This controller is found on STM32MP SoCs. diff --git a/drivers/mtd/nand/raw/ams-delta.c b/drivers/mtd/nand/raw/ams-delta.c index 3711e7a0436c..fdba155416d2 100644 --- a/drivers/mtd/nand/raw/ams-delta.c +++ b/drivers/mtd/nand/raw/ams-delta.c @@ -191,8 +191,8 @@ static int gpio_nand_exec_op(struct nand_chip *this, return ret; } -static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, - const struct nand_data_interface *cf) +static int gpio_nand_setup_interface(struct nand_chip *this, int csline, + const struct nand_interface_config *cf) { struct gpio_nand *priv = nand_get_controller_data(this); const struct nand_sdr_timings *sdr = nand_get_sdr_timings(cf); @@ -217,7 +217,7 @@ static int gpio_nand_setup_data_interface(struct nand_chip *this, int csline, static const struct nand_controller_ops gpio_nand_ops = { .exec_op = gpio_nand_exec_op, - .setup_data_interface = gpio_nand_setup_data_interface, + .setup_interface = gpio_nand_setup_interface, }; /* diff --git a/drivers/mtd/nand/raw/arasan-nand-controller.c b/drivers/mtd/nand/raw/arasan-nand-controller.c index 7141dcccba3c..12c643e97c85 100644 --- a/drivers/mtd/nand/raw/arasan-nand-controller.c +++ b/drivers/mtd/nand/raw/arasan-nand-controller.c @@ -854,8 +854,8 @@ static int anfc_exec_op(struct nand_chip *chip, return nand_op_parser_exec_op(chip, &anfc_op_parser, op, check_only); } -static int anfc_setup_data_interface(struct nand_chip *chip, int target, - const struct nand_data_interface *conf) +static int anfc_setup_interface(struct nand_chip *chip, int target, + const struct nand_interface_config *conf) { struct anand *anand = to_anand(chip); struct arasan_nfc *nfc = to_anfc(chip->controller); @@ -1083,7 +1083,7 @@ static void anfc_detach_chip(struct nand_chip *chip) static const struct nand_controller_ops anfc_ops = { .exec_op = anfc_exec_op, - .setup_data_interface = anfc_setup_data_interface, + .setup_interface = anfc_setup_interface, .attach_chip = anfc_attach_chip, .detach_chip = anfc_detach_chip, }; diff --git a/drivers/mtd/nand/raw/atmel/nand-controller.c b/drivers/mtd/nand/raw/atmel/nand-controller.c index 46a3724a788e..c9818f548d07 100644 --- a/drivers/mtd/nand/raw/atmel/nand-controller.c +++ b/drivers/mtd/nand/raw/atmel/nand-controller.c @@ -200,8 +200,8 @@ struct atmel_nand_controller_ops { void (*nand_init)(struct atmel_nand_controller *nc, struct atmel_nand *nand); int (*ecc_init)(struct nand_chip *chip); - int (*setup_data_interface)(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct atmel_nand *nand, int csline, + const struct nand_interface_config *conf); }; struct atmel_nand_controller_caps { @@ -1168,7 +1168,7 @@ static int atmel_hsmc_nand_ecc_init(struct nand_chip *chip) } static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, - const struct nand_data_interface *conf, + const struct nand_interface_config *conf, struct atmel_smc_cs_conf *smcconf) { u32 ncycles, totalcycles, timeps, mckperiodps; @@ -1397,9 +1397,9 @@ static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, return 0; } -static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_smc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1422,9 +1422,9 @@ static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, +static int atmel_hsmc_nand_setup_interface(struct atmel_nand *nand, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { struct atmel_hsmc_nand_controller *nc; struct atmel_smc_cs_conf smcconf; @@ -1452,8 +1452,8 @@ static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, return 0; } -static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int atmel_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct atmel_nand *nand = to_atmel_nand(chip); struct atmel_nand_controller *nc; @@ -1464,7 +1464,7 @@ static int atmel_nand_setup_data_interface(struct nand_chip *chip, int csline, (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY)) return -EINVAL; - return nc->caps->ops->setup_data_interface(nand, csline, conf); + return nc->caps->ops->setup_interface(nand, csline, conf); } static void atmel_nand_init(struct atmel_nand_controller *nc, @@ -1483,7 +1483,7 @@ static void atmel_nand_init(struct atmel_nand_controller *nc, chip->legacy.write_buf = atmel_nand_write_buf; chip->legacy.select_chip = atmel_nand_select_chip; - if (!nc->mck || !nc->caps->ops->setup_data_interface) + if (!nc->mck || !nc->caps->ops->setup_interface) chip->options |= NAND_KEEP_TIMINGS; /* Some NANDs require a longer delay than the default one (20us). */ @@ -1956,7 +1956,7 @@ static int atmel_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops atmel_nand_controller_ops = { .attach_chip = atmel_nand_attach_chip, - .setup_data_interface = atmel_nand_setup_data_interface, + .setup_interface = atmel_nand_setup_interface, }; static int atmel_nand_controller_init(struct atmel_nand_controller *nc, @@ -2318,7 +2318,7 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = { .remove = atmel_hsmc_nand_controller_remove, .ecc_init = atmel_hsmc_nand_ecc_init, .nand_init = atmel_hsmc_nand_init, - .setup_data_interface = atmel_hsmc_nand_setup_data_interface, + .setup_interface = atmel_hsmc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = { @@ -2375,10 +2375,10 @@ atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc) /* * The SMC reg layout of at91rm9200 is completely different which prevents us - * from re-using atmel_smc_nand_setup_data_interface() for the - * ->setup_data_interface() hook. + * from re-using atmel_smc_nand_setup_interface() for the + * ->setup_interface() hook. * At this point, there's no support for the at91rm9200 SMC IP, so we leave - * ->setup_data_interface() unassigned. + * ->setup_interface() unassigned. */ static const struct atmel_nand_controller_ops at91rm9200_nc_ops = { .probe = atmel_smc_nand_controller_probe, @@ -2399,7 +2399,7 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { .remove = atmel_smc_nand_controller_remove, .ecc_init = atmel_nand_ecc_init, .nand_init = atmel_smc_nand_init, - .setup_data_interface = atmel_smc_nand_setup_data_interface, + .setup_interface = atmel_smc_nand_setup_interface, }; static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = { diff --git a/drivers/mtd/nand/raw/brcmnand/brcmnand.c b/drivers/mtd/nand/raw/brcmnand/brcmnand.c index 44068e9eea03..a4033d32a710 100644 --- a/drivers/mtd/nand/raw/brcmnand/brcmnand.c +++ b/drivers/mtd/nand/raw/brcmnand/brcmnand.c @@ -1918,6 +1918,22 @@ static int brcmnand_edu_trans(struct brcmnand_host *host, u64 addr, u32 *buf, edu_writel(ctrl, EDU_STOP, 0); /* force stop */ edu_readl(ctrl, EDU_STOP); + if (!ret && edu_cmd == EDU_CMD_READ) { + u64 err_addr = 0; + + /* + * check for ECC errors here, subpage ECC errors are + * retained in ECC error address register + */ + err_addr = brcmnand_get_uncorrecc_addr(ctrl); + if (!err_addr) { + err_addr = brcmnand_get_correcc_addr(ctrl); + if (err_addr) + ret = -EUCLEAN; + } else + ret = -EBADMSG; + } + return ret; } @@ -2124,6 +2140,7 @@ static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, u64 err_addr = 0; int err; bool retry = true; + bool edu_err = false; dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); @@ -2141,6 +2158,10 @@ try_dmaread: else return -EIO; } + + if (has_edu(ctrl) && err_addr) + edu_err = true; + } else { if (oob) memset(oob, 0x99, mtd->oobsize); @@ -2188,6 +2209,11 @@ try_dmaread: if (mtd_is_bitflip(err)) { unsigned int corrected = brcmnand_count_corrected(ctrl); + /* in case of EDU correctable error we read again using PIO */ + if (edu_err) + err = brcmnand_read_by_pio(mtd, chip, addr, trans, buf, + oob, &err_addr); + dev_dbg(ctrl->dev, "corrected error at 0x%llx\n", (unsigned long long)err_addr); mtd->ecc_stats.corrected += corrected; @@ -3023,8 +3049,9 @@ int brcmnand_probe(struct platform_device *pdev, struct brcmnand_soc *soc) if (ret < 0) goto err; - /* set edu transfer function to call */ - ctrl->dma_trans = brcmnand_edu_trans; + if (has_edu(ctrl)) + /* set edu transfer function to call */ + ctrl->dma_trans = brcmnand_edu_trans; } /* Disable automatic device ID config, direct addressing */ diff --git a/drivers/mtd/nand/raw/cadence-nand-controller.c b/drivers/mtd/nand/raw/cadence-nand-controller.c index c405722adfe1..71516af85f23 100644 --- a/drivers/mtd/nand/raw/cadence-nand-controller.c +++ b/drivers/mtd/nand/raw/cadence-nand-controller.c @@ -17,6 +17,7 @@ #include <linux/mtd/rawnand.h> #include <linux/of_device.h> #include <linux/iopoll.h> +#include <linux/slab.h> /* * HPNFC can work in 3 modes: @@ -2303,8 +2304,8 @@ static inline u32 calc_tdvw(u32 trp_cnt, u32 clk_period, u32 trhoh_min, } static int -cadence_nand_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +cadence_nand_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr; struct cdns_nand_ctrl *cdns_ctrl = to_cdns_nand_ctrl(chip->controller); @@ -2690,7 +2691,7 @@ static int cadence_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops cadence_nand_controller_ops = { .attach_chip = cadence_nand_attach_chip, .exec_op = cadence_nand_exec_op, - .setup_data_interface = cadence_nand_setup_data_interface, + .setup_interface = cadence_nand_setup_interface, }; static int cadence_nand_chip_init(struct cdns_nand_ctrl *cdns_ctrl, diff --git a/drivers/mtd/nand/raw/denali.c b/drivers/mtd/nand/raw/denali.c index 4e6e1578aa2d..9d99dade95ce 100644 --- a/drivers/mtd/nand/raw/denali.c +++ b/drivers/mtd/nand/raw/denali.c @@ -761,8 +761,8 @@ static int denali_write_page(struct nand_chip *chip, const u8 *buf, return denali_page_xfer(chip, (void *)buf, mtd->writesize, page, true); } -static int denali_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int denali_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { static const unsigned int data_setup_on_host = 10000; struct denali_controller *denali = to_denali_controller(chip); @@ -1173,7 +1173,7 @@ static int denali_exec_op(struct nand_chip *chip, static const struct nand_controller_ops denali_controller_ops = { .attach_chip = denali_attach_chip, .exec_op = denali_exec_op, - .setup_data_interface = denali_setup_data_interface, + .setup_interface = denali_setup_interface, }; int denali_chip_init(struct denali_controller *denali, @@ -1230,7 +1230,7 @@ int denali_chip_init(struct denali_controller *denali, chip->buf_align = 16; } - /* clk rate info is needed for setup_data_interface */ + /* clk rate info is needed for setup_interface */ if (!denali->clk_rate || !denali->clk_x_rate) chip->options |= NAND_KEEP_TIMINGS; diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index 627deb26db51..197850aeb261 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -14,32 +14,23 @@ #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <linux/mtd/mtd.h> -#include <linux/of_address.h> #include <linux/of_platform.h> -#include <linux/of_gpio.h> #include <linux/io.h> #include <linux/slab.h> #include <asm/fsl_lbc.h> -#define FSL_UPM_WAIT_RUN_PATTERN 0x1 -#define FSL_UPM_WAIT_WRITE_BYTE 0x2 -#define FSL_UPM_WAIT_WRITE_BUFFER 0x4 - struct fsl_upm_nand { + struct nand_controller base; struct device *dev; struct nand_chip chip; - int last_ctrl; - struct mtd_partition *parts; struct fsl_upm upm; uint8_t upm_addr_offset; uint8_t upm_cmd_offset; void __iomem *io_base; - int rnb_gpio[NAND_MAX_CHIPS]; + struct gpio_desc *rnb_gpio[NAND_MAX_CHIPS]; uint32_t mchip_offsets[NAND_MAX_CHIPS]; uint32_t mchip_count; uint32_t mchip_number; - int chip_delay; - uint32_t wait_flags; }; static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo) @@ -48,106 +39,6 @@ static inline struct fsl_upm_nand *to_fsl_upm_nand(struct mtd_info *mtdinfo) chip); } -static int fun_chip_ready(struct nand_chip *chip) -{ - struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); - - if (gpio_get_value(fun->rnb_gpio[fun->mchip_number])) - return 1; - - dev_vdbg(fun->dev, "busy\n"); - return 0; -} - -static void fun_wait_rnb(struct fsl_upm_nand *fun) -{ - if (fun->rnb_gpio[fun->mchip_number] >= 0) { - struct mtd_info *mtd = nand_to_mtd(&fun->chip); - int cnt = 1000000; - - while (--cnt && !fun_chip_ready(&fun->chip)) - cpu_relax(); - if (!cnt) - dev_err(fun->dev, "tired waiting for RNB\n"); - } else { - ndelay(100); - } -} - -static void fun_cmd_ctrl(struct nand_chip *chip, int cmd, unsigned int ctrl) -{ - struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); - u32 mar; - - if (!(ctrl & fun->last_ctrl)) { - fsl_upm_end_pattern(&fun->upm); - - if (cmd == NAND_CMD_NONE) - return; - - fun->last_ctrl = ctrl & (NAND_ALE | NAND_CLE); - } - - if (ctrl & NAND_CTRL_CHANGE) { - if (ctrl & NAND_ALE) - fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset); - else if (ctrl & NAND_CLE) - fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset); - } - - mar = (cmd << (32 - fun->upm.width)) | - fun->mchip_offsets[fun->mchip_number]; - fsl_upm_run_pattern(&fun->upm, chip->legacy.IO_ADDR_R, mar); - - if (fun->wait_flags & FSL_UPM_WAIT_RUN_PATTERN) - fun_wait_rnb(fun); -} - -static void fun_select_chip(struct nand_chip *chip, int mchip_nr) -{ - struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); - - if (mchip_nr == -1) { - chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE); - } else if (mchip_nr >= 0 && mchip_nr < NAND_MAX_CHIPS) { - fun->mchip_number = mchip_nr; - chip->legacy.IO_ADDR_R = fun->io_base + fun->mchip_offsets[mchip_nr]; - chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R; - } else { - BUG(); - } -} - -static uint8_t fun_read_byte(struct nand_chip *chip) -{ - struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); - - return in_8(fun->chip.legacy.IO_ADDR_R); -} - -static void fun_read_buf(struct nand_chip *chip, uint8_t *buf, int len) -{ - struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); - int i; - - for (i = 0; i < len; i++) - buf[i] = in_8(fun->chip.legacy.IO_ADDR_R); -} - -static void fun_write_buf(struct nand_chip *chip, const uint8_t *buf, int len) -{ - struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); - int i; - - for (i = 0; i < len; i++) { - out_8(fun->chip.legacy.IO_ADDR_W, buf[i]); - if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BYTE) - fun_wait_rnb(fun); - } - if (fun->wait_flags & FSL_UPM_WAIT_WRITE_BUFFER) - fun_wait_rnb(fun); -} - static int fun_chip_init(struct fsl_upm_nand *fun, const struct device_node *upm_np, const struct resource *io_res) @@ -156,21 +47,9 @@ static int fun_chip_init(struct fsl_upm_nand *fun, int ret; struct device_node *flash_np; - fun->chip.legacy.IO_ADDR_R = fun->io_base; - fun->chip.legacy.IO_ADDR_W = fun->io_base; - fun->chip.legacy.cmd_ctrl = fun_cmd_ctrl; - fun->chip.legacy.chip_delay = fun->chip_delay; - fun->chip.legacy.read_byte = fun_read_byte; - fun->chip.legacy.read_buf = fun_read_buf; - fun->chip.legacy.write_buf = fun_write_buf; fun->chip.ecc.mode = NAND_ECC_SOFT; fun->chip.ecc.algo = NAND_ECC_HAMMING; - if (fun->mchip_count > 1) - fun->chip.legacy.select_chip = fun_select_chip; - - if (fun->rnb_gpio[0] >= 0) - fun->chip.legacy.dev_ready = fun_chip_ready; - + fun->chip.controller = &fun->base; mtd->dev.parent = fun->dev; flash_np = of_get_next_child(upm_np, NULL); @@ -178,8 +57,9 @@ static int fun_chip_init(struct fsl_upm_nand *fun, return -ENODEV; nand_set_flash_node(&fun->chip, flash_np); - mtd->name = kasprintf(GFP_KERNEL, "0x%llx.%pOFn", (u64)io_res->start, - flash_np); + mtd->name = devm_kasprintf(fun->dev, GFP_KERNEL, "0x%llx.%pOFn", + (u64)io_res->start, + flash_np); if (!mtd->name) { ret = -ENOMEM; goto err; @@ -192,51 +72,130 @@ static int fun_chip_init(struct fsl_upm_nand *fun, ret = mtd_device_register(mtd, NULL, 0); err: of_node_put(flash_np); - if (ret) - kfree(mtd->name); return ret; } +static int func_exec_instr(struct nand_chip *chip, + const struct nand_op_instr *instr) +{ + struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); + u32 mar, reg_offs = fun->mchip_offsets[fun->mchip_number]; + unsigned int i; + const u8 *out; + u8 *in; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + fsl_upm_start_pattern(&fun->upm, fun->upm_cmd_offset); + mar = (instr->ctx.cmd.opcode << (32 - fun->upm.width)) | + reg_offs; + fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar); + fsl_upm_end_pattern(&fun->upm); + return 0; + + case NAND_OP_ADDR_INSTR: + fsl_upm_start_pattern(&fun->upm, fun->upm_addr_offset); + for (i = 0; i < instr->ctx.addr.naddrs; i++) { + mar = (instr->ctx.addr.addrs[i] << (32 - fun->upm.width)) | + reg_offs; + fsl_upm_run_pattern(&fun->upm, fun->io_base + reg_offs, mar); + } + fsl_upm_end_pattern(&fun->upm); + return 0; + + case NAND_OP_DATA_IN_INSTR: + in = instr->ctx.data.buf.in; + for (i = 0; i < instr->ctx.data.len; i++) + in[i] = in_8(fun->io_base + reg_offs); + return 0; + + case NAND_OP_DATA_OUT_INSTR: + out = instr->ctx.data.buf.out; + for (i = 0; i < instr->ctx.data.len; i++) + out_8(fun->io_base + reg_offs, out[i]); + return 0; + + case NAND_OP_WAITRDY_INSTR: + if (!fun->rnb_gpio[fun->mchip_number]) + return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms); + + return nand_gpio_waitrdy(chip, fun->rnb_gpio[fun->mchip_number], + instr->ctx.waitrdy.timeout_ms); + + default: + return -EINVAL; + } + + return 0; +} + +static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op, + bool check_only) +{ + struct fsl_upm_nand *fun = to_fsl_upm_nand(nand_to_mtd(chip)); + unsigned int i; + int ret; + + if (op->cs > NAND_MAX_CHIPS) + return -EINVAL; + + if (check_only) + return 0; + + fun->mchip_number = op->cs; + + for (i = 0; i < op->ninstrs; i++) { + ret = func_exec_instr(chip, &op->instrs[i]); + if (ret) + return ret; + + if (op->instrs[i].delay_ns) + ndelay(op->instrs[i].delay_ns); + } + + return 0; +} + +static const struct nand_controller_ops fun_ops = { + .exec_op = fun_exec_op, +}; + static int fun_probe(struct platform_device *ofdev) { struct fsl_upm_nand *fun; - struct resource io_res; + struct resource *io_res; const __be32 *prop; - int rnb_gpio; int ret; int size; int i; - fun = kzalloc(sizeof(*fun), GFP_KERNEL); + fun = devm_kzalloc(&ofdev->dev, sizeof(*fun), GFP_KERNEL); if (!fun) return -ENOMEM; - ret = of_address_to_resource(ofdev->dev.of_node, 0, &io_res); - if (ret) { - dev_err(&ofdev->dev, "can't get IO base\n"); - goto err1; - } + io_res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); + fun->io_base = devm_ioremap_resource(&ofdev->dev, io_res); + if (IS_ERR(fun->io_base)) + return PTR_ERR(fun->io_base); - ret = fsl_upm_find(io_res.start, &fun->upm); + ret = fsl_upm_find(io_res->start, &fun->upm); if (ret) { dev_err(&ofdev->dev, "can't find UPM\n"); - goto err1; + return ret; } prop = of_get_property(ofdev->dev.of_node, "fsl,upm-addr-offset", &size); if (!prop || size != sizeof(uint32_t)) { dev_err(&ofdev->dev, "can't get UPM address offset\n"); - ret = -EINVAL; - goto err1; + return -EINVAL; } fun->upm_addr_offset = *prop; prop = of_get_property(ofdev->dev.of_node, "fsl,upm-cmd-offset", &size); if (!prop || size != sizeof(uint32_t)) { dev_err(&ofdev->dev, "can't get UPM command offset\n"); - ret = -EINVAL; - goto err1; + return -EINVAL; } fun->upm_cmd_offset = *prop; @@ -246,7 +205,7 @@ static int fun_probe(struct platform_device *ofdev) fun->mchip_count = size / sizeof(uint32_t); if (fun->mchip_count >= NAND_MAX_CHIPS) { dev_err(&ofdev->dev, "too much multiple chips\n"); - goto err1; + return -EINVAL; } for (i = 0; i < fun->mchip_count; i++) fun->mchip_offsets[i] = be32_to_cpu(prop[i]); @@ -255,63 +214,26 @@ static int fun_probe(struct platform_device *ofdev) } for (i = 0; i < fun->mchip_count; i++) { - fun->rnb_gpio[i] = -1; - rnb_gpio = of_get_gpio(ofdev->dev.of_node, i); - if (rnb_gpio >= 0) { - ret = gpio_request(rnb_gpio, dev_name(&ofdev->dev)); - if (ret) { - dev_err(&ofdev->dev, - "can't request RNB gpio #%d\n", i); - goto err2; - } - gpio_direction_input(rnb_gpio); - fun->rnb_gpio[i] = rnb_gpio; - } else if (rnb_gpio == -EINVAL) { + fun->rnb_gpio[i] = devm_gpiod_get_index_optional(&ofdev->dev, + NULL, i, + GPIOD_IN); + if (IS_ERR(fun->rnb_gpio[i])) { dev_err(&ofdev->dev, "RNB gpio #%d is invalid\n", i); - goto err2; + return PTR_ERR(fun->rnb_gpio[i]); } } - prop = of_get_property(ofdev->dev.of_node, "chip-delay", NULL); - if (prop) - fun->chip_delay = be32_to_cpup(prop); - else - fun->chip_delay = 50; - - prop = of_get_property(ofdev->dev.of_node, "fsl,upm-wait-flags", &size); - if (prop && size == sizeof(uint32_t)) - fun->wait_flags = be32_to_cpup(prop); - else - fun->wait_flags = FSL_UPM_WAIT_RUN_PATTERN | - FSL_UPM_WAIT_WRITE_BYTE; - - fun->io_base = devm_ioremap(&ofdev->dev, io_res.start, - resource_size(&io_res)); - if (!fun->io_base) { - ret = -ENOMEM; - goto err2; - } - + nand_controller_init(&fun->base); + fun->base.ops = &fun_ops; fun->dev = &ofdev->dev; - fun->last_ctrl = NAND_CLE; - ret = fun_chip_init(fun, ofdev->dev.of_node, &io_res); + ret = fun_chip_init(fun, ofdev->dev.of_node, io_res); if (ret) - goto err2; + return ret; dev_set_drvdata(&ofdev->dev, fun); return 0; -err2: - for (i = 0; i < fun->mchip_count; i++) { - if (fun->rnb_gpio[i] < 0) - break; - gpio_free(fun->rnb_gpio[i]); - } -err1: - kfree(fun); - - return ret; } static int fun_remove(struct platform_device *ofdev) @@ -319,20 +241,11 @@ static int fun_remove(struct platform_device *ofdev) struct fsl_upm_nand *fun = dev_get_drvdata(&ofdev->dev); struct nand_chip *chip = &fun->chip; struct mtd_info *mtd = nand_to_mtd(chip); - int ret, i; + int ret; ret = mtd_device_unregister(mtd); WARN_ON(ret); nand_cleanup(chip); - kfree(mtd->name); - - for (i = 0; i < fun->mchip_count; i++) { - if (fun->rnb_gpio[i] < 0) - break; - gpio_free(fun->rnb_gpio[i]); - } - - kfree(fun); return 0; } diff --git a/drivers/mtd/nand/raw/fsmc_nand.c b/drivers/mtd/nand/raw/fsmc_nand.c index 3909752b14c5..92ddc41d0ff0 100644 --- a/drivers/mtd/nand/raw/fsmc_nand.c +++ b/drivers/mtd/nand/raw/fsmc_nand.c @@ -327,8 +327,8 @@ static int fsmc_calc_timings(struct fsmc_nand_data *host, return 0; } -static int fsmc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int fsmc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct fsmc_nand_data *host = nand_to_fsmc(nand); struct fsmc_nand_timings tims; @@ -951,7 +951,7 @@ static int fsmc_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops fsmc_nand_controller_ops = { .attach_chip = fsmc_nand_attach_chip, .exec_op = fsmc_exec_op, - .setup_data_interface = fsmc_setup_data_interface, + .setup_interface = fsmc_setup_interface, }; /** diff --git a/drivers/mtd/nand/raw/gpio.c b/drivers/mtd/nand/raw/gpio.c index 938077e5c6a9..3bd847ccc3f3 100644 --- a/drivers/mtd/nand/raw/gpio.c +++ b/drivers/mtd/nand/raw/gpio.c @@ -25,8 +25,11 @@ #include <linux/mtd/nand-gpio.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/delay.h> struct gpiomtd { + struct nand_controller base; + void __iomem *io; void __iomem *io_sync; struct nand_chip nand_chip; struct gpio_nand_platdata plat; @@ -69,34 +72,99 @@ static void gpio_nand_dosync(struct gpiomtd *gpiomtd) static inline void gpio_nand_dosync(struct gpiomtd *gpiomtd) {} #endif -static void gpio_nand_cmd_ctrl(struct nand_chip *chip, int cmd, - unsigned int ctrl) +static int gpio_nand_exec_instr(struct nand_chip *chip, + const struct nand_op_instr *instr) { struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip)); + unsigned int i; - gpio_nand_dosync(gpiomtd); + switch (instr->type) { + case NAND_OP_CMD_INSTR: + gpio_nand_dosync(gpiomtd); + gpiod_set_value(gpiomtd->cle, 1); + gpio_nand_dosync(gpiomtd); + writeb(instr->ctx.cmd.opcode, gpiomtd->io); + gpio_nand_dosync(gpiomtd); + gpiod_set_value(gpiomtd->cle, 0); + return 0; + + case NAND_OP_ADDR_INSTR: + gpio_nand_dosync(gpiomtd); + gpiod_set_value(gpiomtd->ale, 1); + gpio_nand_dosync(gpiomtd); + for (i = 0; i < instr->ctx.addr.naddrs; i++) + writeb(instr->ctx.addr.addrs[i], gpiomtd->io); + gpio_nand_dosync(gpiomtd); + gpiod_set_value(gpiomtd->ale, 0); + return 0; + + case NAND_OP_DATA_IN_INSTR: + gpio_nand_dosync(gpiomtd); + if ((chip->options & NAND_BUSWIDTH_16) && + !instr->ctx.data.force_8bit) + ioread16_rep(gpiomtd->io, instr->ctx.data.buf.in, + instr->ctx.data.len / 2); + else + ioread8_rep(gpiomtd->io, instr->ctx.data.buf.in, + instr->ctx.data.len); + return 0; - if (ctrl & NAND_CTRL_CHANGE) { - if (gpiomtd->nce) - gpiod_set_value(gpiomtd->nce, !(ctrl & NAND_NCE)); - gpiod_set_value(gpiomtd->cle, !!(ctrl & NAND_CLE)); - gpiod_set_value(gpiomtd->ale, !!(ctrl & NAND_ALE)); + case NAND_OP_DATA_OUT_INSTR: gpio_nand_dosync(gpiomtd); + if ((chip->options & NAND_BUSWIDTH_16) && + !instr->ctx.data.force_8bit) + iowrite16_rep(gpiomtd->io, instr->ctx.data.buf.out, + instr->ctx.data.len / 2); + else + iowrite8_rep(gpiomtd->io, instr->ctx.data.buf.out, + instr->ctx.data.len); + return 0; + + case NAND_OP_WAITRDY_INSTR: + if (!gpiomtd->rdy) + return nand_soft_waitrdy(chip, instr->ctx.waitrdy.timeout_ms); + + return nand_gpio_waitrdy(chip, gpiomtd->rdy, + instr->ctx.waitrdy.timeout_ms); + + default: + return -EINVAL; } - if (cmd == NAND_CMD_NONE) - return; - writeb(cmd, gpiomtd->nand_chip.legacy.IO_ADDR_W); - gpio_nand_dosync(gpiomtd); + return 0; } -static int gpio_nand_devready(struct nand_chip *chip) +static int gpio_nand_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) { struct gpiomtd *gpiomtd = gpio_nand_getpriv(nand_to_mtd(chip)); + unsigned int i; + int ret = 0; + + if (check_only) + return 0; - return gpiod_get_value(gpiomtd->rdy); + gpio_nand_dosync(gpiomtd); + gpiod_set_value(gpiomtd->nce, 0); + for (i = 0; i < op->ninstrs; i++) { + ret = gpio_nand_exec_instr(chip, &op->instrs[i]); + if (ret) + break; + + if (op->instrs[i].delay_ns) + ndelay(op->instrs[i].delay_ns); + } + gpio_nand_dosync(gpiomtd); + gpiod_set_value(gpiomtd->nce, 1); + + return ret; } +static const struct nand_controller_ops gpio_nand_ops = { + .exec_op = gpio_nand_exec_op, +}; + #ifdef CONFIG_OF static const struct of_device_id gpio_nand_id_table[] = { { .compatible = "gpio-control-nand" }, @@ -225,9 +293,9 @@ static int gpio_nand_probe(struct platform_device *pdev) chip = &gpiomtd->nand_chip; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - chip->legacy.IO_ADDR_R = devm_ioremap_resource(dev, res); - if (IS_ERR(chip->legacy.IO_ADDR_R)) - return PTR_ERR(chip->legacy.IO_ADDR_R); + gpiomtd->io = devm_ioremap_resource(dev, res); + if (IS_ERR(gpiomtd->io)) + return PTR_ERR(gpiomtd->io); res = gpio_nand_get_io_sync(pdev); if (res) { @@ -269,17 +337,15 @@ static int gpio_nand_probe(struct platform_device *pdev) ret = PTR_ERR(gpiomtd->rdy); goto out_ce; } - /* Using RDY pin */ - if (gpiomtd->rdy) - chip->legacy.dev_ready = gpio_nand_devready; + + nand_controller_init(&gpiomtd->base); + gpiomtd->base.ops = &gpio_nand_ops; nand_set_flash_node(chip, pdev->dev.of_node); - chip->legacy.IO_ADDR_W = chip->legacy.IO_ADDR_R; chip->ecc.mode = NAND_ECC_SOFT; chip->ecc.algo = NAND_ECC_HAMMING; chip->options = gpiomtd->plat.options; - chip->legacy.chip_delay = gpiomtd->plat.chip_delay; - chip->legacy.cmd_ctrl = gpio_nand_cmd_ctrl; + chip->controller = &gpiomtd->base; mtd = nand_to_mtd(chip); mtd->dev.parent = dev; diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 061a8ddda275..5d4aee46cc55 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -736,8 +736,8 @@ static void gpmi_nfc_apply_timings(struct gpmi_nand_data *this) udelay(dll_wait_time_us); } -static int gpmi_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int gpmi_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct gpmi_nand_data *this = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -2400,7 +2400,7 @@ unmap: static const struct nand_controller_ops gpmi_nand_controller_ops = { .attach_chip = gpmi_nand_attach_chip, - .setup_data_interface = gpmi_setup_data_interface, + .setup_interface = gpmi_setup_interface, .exec_op = gpmi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c index 13fea645c7f0..54e377754a6c 100644 --- a/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c +++ b/drivers/mtd/nand/raw/ingenic/jz4740_ecc.c @@ -90,8 +90,8 @@ static int jz4740_ecc_calculate(struct ingenic_ecc *ecc, * If the written data is completely 0xff, we also want to write 0xff as * ECC, otherwise we will get in trouble when doing subpage writes. */ - if (memcmp(ecc_code, empty_block_ecc, ARRAY_SIZE(empty_block_ecc)) == 0) - memset(ecc_code, 0xff, ARRAY_SIZE(empty_block_ecc)); + if (memcmp(ecc_code, empty_block_ecc, sizeof(empty_block_ecc)) == 0) + memset(ecc_code, 0xff, sizeof(empty_block_ecc)); return 0; } diff --git a/drivers/mtd/nand/raw/internals.h b/drivers/mtd/nand/raw/internals.h index 03866b0aadea..012876e14317 100644 --- a/drivers/mtd/nand/raw/internals.h +++ b/drivers/mtd/nand/raw/internals.h @@ -53,12 +53,12 @@ struct nand_manufacturer_ops { }; /** - * struct nand_manufacturer - NAND Flash Manufacturer structure + * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor * @name: Manufacturer name * @id: manufacturer ID code of device. * @ops: manufacturer operations */ -struct nand_manufacturer { +struct nand_manufacturer_desc { int id; char *name; const struct nand_manufacturer_ops *ops; @@ -79,14 +79,21 @@ extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops; extern const struct mtd_pairing_scheme dist3_pairing_scheme; /* Core functions */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id); +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id); int nand_bbm_get_next_page(struct nand_chip *chip, int page); int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs); int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr, int allowbbt); -int onfi_fill_data_interface(struct nand_chip *chip, - enum nand_data_interface_type type, - int timing_mode); +void onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode); +unsigned int +onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings); +int nand_choose_best_sdr_timings(struct nand_chip *chip, + struct nand_interface_config *iface, + struct nand_sdr_timings *spec_timings); +const struct nand_interface_config *nand_get_reset_interface_config(void); int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param); int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf, @@ -130,10 +137,10 @@ static inline int nand_exec_op(struct nand_chip *chip, return chip->controller->ops->exec_op(chip, op, false); } -static inline bool nand_has_setup_data_iface(struct nand_chip *chip) +static inline bool nand_controller_can_setup_interface(struct nand_chip *chip) { if (!chip->controller || !chip->controller->ops || - !chip->controller->ops->setup_data_interface) + !chip->controller->ops->setup_interface) return false; if (chip->options & NAND_KEEP_TIMINGS) diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index 260a0430313e..8482d3bd8b1f 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -1096,6 +1096,8 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, const u8 *oob_buf, bool raw, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; @@ -1141,7 +1143,7 @@ static int marvell_nfc_hw_ecc_hmg_do_write_page(struct nand_chip *chip, return ret; ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + PSEC_TO_MSEC(sdr->tPROG_max)); return ret; } @@ -1562,6 +1564,8 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); struct mtd_info *mtd = nand_to_mtd(chip); const struct marvell_hw_ecc_layout *lt = to_marvell_nand(chip)->layout; const u8 *data = buf; @@ -1598,8 +1602,7 @@ static int marvell_nfc_hw_ecc_bch_write_page(struct nand_chip *chip, marvell_nfc_wait_ndrun(chip); } - ret = marvell_nfc_wait_op(chip, - PSEC_TO_MSEC(chip->data_interface.timings.sdr.tPROG_max)); + ret = marvell_nfc_wait_op(chip, PSEC_TO_MSEC(sdr->tPROG_max)); marvell_nfc_disable_hw_ecc(chip); @@ -2305,9 +2308,8 @@ static struct nand_bbt_descr bbt_mirror_descr = { .pattern = bbt_mirror_pattern }; -static int marvell_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface - *conf) +static int marvell_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct marvell_nand_chip *marvell_nand = to_marvell_nand(chip); struct marvell_nfc *nfc = to_marvell_nfc(chip->controller); @@ -2508,7 +2510,7 @@ static int marvell_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops marvell_nand_controller_ops = { .attach_chip = marvell_nand_attach_chip, .exec_op = marvell_nfc_exec_op, - .setup_data_interface = marvell_nfc_setup_data_interface, + .setup_interface = marvell_nfc_setup_interface, }; static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, @@ -2644,7 +2646,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, /* * Save a reference value for timing registers before - * ->setup_data_interface() is called. + * ->setup_interface() is called. */ marvell_nand->ndtr0 = readl_relaxed(nfc->regs + NDTR0); marvell_nand->ndtr1 = readl_relaxed(nfc->regs + NDTR1); diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index 3f376471f3f7..0e5829a2b54f 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -573,10 +573,10 @@ static int meson_nfc_write_buf(struct nand_chip *nand, u8 *buf, int len) static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, int page, bool in) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(nand)); struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); - const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); u32 *addrs = nfc->cmdfifo.rw.addrs; u32 cs = nfc->param.chip_select; u32 cmd0, cmd_num, row_start; @@ -626,9 +626,9 @@ static int meson_nfc_rw_cmd_prepare_and_execute(struct nand_chip *nand, static int meson_nfc_write_page_sub(struct nand_chip *nand, int page, int raw) { - struct mtd_info *mtd = nand_to_mtd(nand); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&nand->data_interface); + nand_get_sdr_timings(nand_get_interface_config(nand)); + struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct meson_nfc *nfc = nand_get_controller_data(nand); int data_len, info_len; @@ -1097,8 +1097,8 @@ static int meson_chip_buffer_init(struct nand_chip *nand) } static -int meson_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +int meson_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); const struct nand_sdr_timings *timings; @@ -1222,7 +1222,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) static const struct nand_controller_ops meson_nand_controller_ops = { .attach_chip = meson_nand_attach_chip, .detach_chip = meson_nand_detach_chip, - .setup_data_interface = meson_nfc_setup_data_interface, + .setup_interface = meson_nfc_setup_interface, .exec_op = meson_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/mtk_nand.c b/drivers/mtd/nand/raw/mtk_nand.c index c1a6e31aabb8..ad1b55dab211 100644 --- a/drivers/mtd/nand/raw/mtk_nand.c +++ b/drivers/mtd/nand/raw/mtk_nand.c @@ -387,44 +387,6 @@ static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd) return 0; } -static void mtk_nfc_select_chip(struct nand_chip *nand, int chip) -{ - struct mtk_nfc *nfc = nand_get_controller_data(nand); - struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand); - - if (chip < 0) - return; - - mtk_nfc_hw_runtime_config(nand_to_mtd(nand)); - - nfi_writel(nfc, mtk_nand->sels[chip], NFI_CSEL); -} - -static int mtk_nfc_dev_ready(struct nand_chip *nand) -{ - struct mtk_nfc *nfc = nand_get_controller_data(nand); - - if (nfi_readl(nfc, NFI_STA) & STA_BUSY) - return 0; - - return 1; -} - -static void mtk_nfc_cmd_ctrl(struct nand_chip *chip, int dat, - unsigned int ctrl) -{ - struct mtk_nfc *nfc = nand_get_controller_data(chip); - - if (ctrl & NAND_ALE) { - mtk_nfc_send_address(nfc, dat); - } else if (ctrl & NAND_CLE) { - mtk_nfc_hw_reset(nfc); - - nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG); - mtk_nfc_send_command(nfc, dat); - } -} - static inline void mtk_nfc_wait_ioready(struct mtk_nfc *nfc) { int rc; @@ -501,8 +463,76 @@ static void mtk_nfc_write_buf(struct nand_chip *chip, const u8 *buf, int len) mtk_nfc_write_byte(chip, buf[i]); } -static int mtk_nfc_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mtk_nfc_exec_instr(struct nand_chip *chip, + const struct nand_op_instr *instr) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + unsigned int i; + u32 status; + + switch (instr->type) { + case NAND_OP_CMD_INSTR: + mtk_nfc_send_command(nfc, instr->ctx.cmd.opcode); + return 0; + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + mtk_nfc_send_address(nfc, instr->ctx.addr.addrs[i]); + return 0; + case NAND_OP_DATA_IN_INSTR: + mtk_nfc_read_buf(chip, instr->ctx.data.buf.in, + instr->ctx.data.len); + return 0; + case NAND_OP_DATA_OUT_INSTR: + mtk_nfc_write_buf(chip, instr->ctx.data.buf.out, + instr->ctx.data.len); + return 0; + case NAND_OP_WAITRDY_INSTR: + return readl_poll_timeout(nfc->regs + NFI_STA, status, + status & STA_BUSY, 20, + instr->ctx.waitrdy.timeout_ms); + default: + break; + } + + return -EINVAL; +} + +static void mtk_nfc_select_target(struct nand_chip *nand, unsigned int cs) +{ + struct mtk_nfc *nfc = nand_get_controller_data(nand); + struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(nand); + + mtk_nfc_hw_runtime_config(nand_to_mtd(nand)); + + nfi_writel(nfc, mtk_nand->sels[cs], NFI_CSEL); +} + +static int mtk_nfc_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) +{ + struct mtk_nfc *nfc = nand_get_controller_data(chip); + unsigned int i; + int ret = 0; + + if (check_only) + return 0; + + mtk_nfc_hw_reset(nfc); + nfi_writew(nfc, CNFG_OP_CUST, NFI_CNFG); + mtk_nfc_select_target(chip, op->cs); + + for (i = 0; i < op->ninstrs; i++) { + ret = mtk_nfc_exec_instr(chip, &op->instrs[i]); + if (ret) + break; + } + + return ret; +} + +static int mtk_nfc_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtk_nfc *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *timings; @@ -803,6 +833,7 @@ static int mtk_nfc_write_page(struct mtd_info *mtd, struct nand_chip *chip, u32 reg; int ret; + mtk_nfc_select_target(chip, chip->cur_cs); nand_prog_page_begin_op(chip, page, 0, NULL, 0); if (!raw) { @@ -920,6 +951,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip, u8 *buf; int rc; + mtk_nfc_select_target(chip, chip->cur_cs); start = data_offs / chip->ecc.size; end = DIV_ROUND_UP(data_offs + readlen, chip->ecc.size); @@ -1325,7 +1357,8 @@ static int mtk_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops mtk_nfc_controller_ops = { .attach_chip = mtk_nfc_attach_chip, - .setup_data_interface = mtk_nfc_setup_data_interface, + .setup_interface = mtk_nfc_setup_interface, + .exec_op = mtk_nfc_exec_op, }; static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, @@ -1381,13 +1414,6 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, nand_set_controller_data(nand, nfc); nand->options |= NAND_USES_DMA | NAND_SUBPAGE_READ; - nand->legacy.dev_ready = mtk_nfc_dev_ready; - nand->legacy.select_chip = mtk_nfc_select_chip; - nand->legacy.write_byte = mtk_nfc_write_byte; - nand->legacy.write_buf = mtk_nfc_write_buf; - nand->legacy.read_byte = mtk_nfc_read_byte; - nand->legacy.read_buf = mtk_nfc_read_buf; - nand->legacy.cmd_ctrl = mtk_nfc_cmd_ctrl; /* set default mode in case dt entry is missing */ nand->ecc.mode = NAND_ECC_HW; diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c index 09dacb83cb5a..a043d76b48cb 100644 --- a/drivers/mtd/nand/raw/mxc_nand.c +++ b/drivers/mtd/nand/raw/mxc_nand.c @@ -137,8 +137,8 @@ struct mxc_nand_devtype_data { u32 (*get_ecc_status)(struct mxc_nand_host *); const struct mtd_ooblayout_ops *ooblayout; void (*select_chip)(struct nand_chip *chip, int cs); - int (*setup_data_interface)(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf); + int (*setup_interface)(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf); void (*enable_hwecc)(struct nand_chip *chip, bool enable); /* @@ -1139,8 +1139,8 @@ static void preset_v1(struct mtd_info *mtd) writew(0x4, NFC_V1_V2_WRPROT); } -static int mxc_nand_v2_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int mxc_nand_v2_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); int tRC_min_ns, tRC_ps, ret; @@ -1432,7 +1432,7 @@ static int mxc_nand_get_features(struct nand_chip *chip, int addr, } /* - * The generic flash bbt decriptors overlap with our ecc + * The generic flash bbt descriptors overlap with our ecc * hardware, so define some i.MX specific ones. */ static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' }; @@ -1521,7 +1521,7 @@ static const struct mxc_nand_devtype_data imx25_nand_devtype_data = { .get_ecc_status = get_ecc_status_v2, .ooblayout = &mxc_v2_ooblayout_ops, .select_chip = mxc_nand_select_chip_v2, - .setup_data_interface = mxc_nand_v2_setup_data_interface, + .setup_interface = mxc_nand_v2_setup_interface, .enable_hwecc = mxc_nand_enable_hwecc_v1_v2, .irqpending_quirk = 0, .needs_ip = 0, @@ -1738,17 +1738,17 @@ static int mxcnd_attach_chip(struct nand_chip *chip) return 0; } -static int mxcnd_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxcnd_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxc_nand_host *host = nand_get_controller_data(chip); - return host->devtype_data->setup_data_interface(chip, chipnr, conf); + return host->devtype_data->setup_interface(chip, chipnr, conf); } static const struct nand_controller_ops mxcnd_controller_ops = { .attach_chip = mxcnd_attach_chip, - .setup_data_interface = mxcnd_setup_data_interface, + .setup_interface = mxcnd_setup_interface, }; static int mxcnd_probe(struct platform_device *pdev) @@ -1809,7 +1809,7 @@ static int mxcnd_probe(struct platform_device *pdev) if (err < 0) return err; - if (!host->devtype_data->setup_data_interface) + if (!host->devtype_data->setup_interface) this->options |= NAND_KEEP_TIMINGS; if (host->devtype_data->needs_ip) { diff --git a/drivers/mtd/nand/raw/mxic_nand.c b/drivers/mtd/nand/raw/mxic_nand.c index 57f36721f4c6..d66b5b0971fa 100644 --- a/drivers/mtd/nand/raw/mxic_nand.c +++ b/drivers/mtd/nand/raw/mxic_nand.c @@ -451,8 +451,8 @@ static int mxic_nfc_exec_op(struct nand_chip *chip, return ret; } -static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) +static int mxic_nfc_setup_interface(struct nand_chip *chip, int chipnr, + const struct nand_interface_config *conf) { struct mxic_nand_ctlr *nfc = nand_get_controller_data(chip); const struct nand_sdr_timings *sdr; @@ -480,7 +480,7 @@ static int mxic_nfc_setup_data_interface(struct nand_chip *chip, int chipnr, static const struct nand_controller_ops mxic_nand_controller_ops = { .exec_op = mxic_nfc_exec_op, - .setup_data_interface = mxic_nfc_setup_data_interface, + .setup_interface = mxic_nfc_setup_interface, }; static int mxic_nfc_probe(struct platform_device *pdev) diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 45124dbb1835..0c768cb88f96 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -773,7 +773,7 @@ int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms) return -ENOTSUPP; /* Wait tWB before polling the STATUS reg. */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); ret = nand_status_op(chip, NULL); @@ -898,7 +898,7 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) } /** - * nand_reset_data_interface - Reset data interface and timings + * nand_reset_interface - Reset data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * @@ -906,11 +906,12 @@ static bool nand_supports_set_features(struct nand_chip *chip, int addr) * * Returns 0 for success or negative error code otherwise. */ -static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) +static int nand_reset_interface(struct nand_chip *chip, int chipnr) { + const struct nand_controller_ops *ops = chip->controller->ops; int ret; - if (!nand_has_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; /* @@ -927,9 +928,9 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) * timings to timing mode 0. */ - onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + chip->current_interface_config = nand_get_reset_interface_config(); + ret = ops->setup_interface(chip, chipnr, + chip->current_interface_config); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -937,28 +938,36 @@ static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) } /** - * nand_setup_data_interface - Setup the best data interface and timings + * nand_setup_interface - Setup the best data interface and timings * @chip: The NAND chip * @chipnr: Internal die id * - * Find and configure the best data interface and NAND timings supported by - * the chip and the driver. - * First tries to retrieve supported timing modes from ONFI information, - * and if the NAND chip does not support ONFI, relies on the - * ->onfi_timing_mode_default specified in the nand_ids table. + * Configure what has been reported to be the best data interface and NAND + * timings supported by the chip and the driver. * * Returns 0 for success or negative error code otherwise. */ -static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) +static int nand_setup_interface(struct nand_chip *chip, int chipnr) { - u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { - chip->onfi_timing_mode_default, - }; + const struct nand_controller_ops *ops = chip->controller->ops; + u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { }; int ret; - if (!nand_has_setup_data_iface(chip)) + if (!nand_controller_can_setup_interface(chip)) return 0; + /* + * A nand_reset_interface() put both the NAND chip and the NAND + * controller in timings mode 0. If the default mode for this chip is + * also 0, no need to proceed to the change again. Plus, at probe time, + * nand_setup_interface() uses ->set/get_features() which would + * fail anyway as the parameter page is not available yet. + */ + if (!chip->best_interface_config) + return 0; + + tmode_param[0] = chip->best_interface_config->timings.mode; + /* Change the mode on the chip side (if supported by the NAND chip) */ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) { nand_select_target(chip, chipnr); @@ -970,14 +979,13 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) } /* Change the mode on the controller side */ - ret = chip->controller->ops->setup_data_interface(chip, chipnr, - &chip->data_interface); + ret = ops->setup_interface(chip, chipnr, chip->best_interface_config); if (ret) return ret; /* Check the mode has been accepted by the chip, if supported */ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) - return 0; + goto update_interface_config; memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN); nand_select_target(chip, chipnr); @@ -987,12 +995,15 @@ static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) if (ret) goto err_reset_chip; - if (tmode_param[0] != chip->onfi_timing_mode_default) { + if (tmode_param[0] != chip->best_interface_config->timings.mode) { pr_warn("timing mode %d not acknowledged by the NAND chip\n", - chip->onfi_timing_mode_default); + chip->best_interface_config->timings.mode); goto err_reset_chip; } +update_interface_config: + chip->current_interface_config = chip->best_interface_config; + return 0; err_reset_chip: @@ -1000,7 +1011,7 @@ err_reset_chip: * Fallback to mode 0 if the chip explicitly did not ack the chosen * timing mode. */ - nand_reset_data_interface(chip, chipnr); + nand_reset_interface(chip, chipnr); nand_select_target(chip, chipnr); nand_reset_op(chip); nand_deselect_target(chip); @@ -1009,62 +1020,93 @@ err_reset_chip: } /** - * nand_init_data_interface - find the best data interface and timings - * @chip: The NAND chip - * - * Find the best data interface and NAND timings supported by the chip - * and the driver. - * First tries to retrieve supported timing modes from ONFI information, - * and if the NAND chip does not support ONFI, relies on the - * ->onfi_timing_mode_default specified in the nand_ids table. After this - * function nand_chip->data_interface is initialized with the best timing mode - * available. + * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the + * NAND controller and the NAND chip support + * @chip: the NAND chip + * @iface: the interface configuration (can eventually be updated) + * @spec_timings: specific timings, when not fitting the ONFI specification * - * Returns 0 for success or negative error code otherwise. + * If specific timings are provided, use them. Otherwise, retrieve supported + * timing modes from ONFI information. */ -static int nand_init_data_interface(struct nand_chip *chip) +int nand_choose_best_sdr_timings(struct nand_chip *chip, + struct nand_interface_config *iface, + struct nand_sdr_timings *spec_timings) { - int modes, mode, ret; + const struct nand_controller_ops *ops = chip->controller->ops; + int best_mode = 0, mode, ret; - if (!nand_has_setup_data_iface(chip)) - return 0; + iface->type = NAND_SDR_IFACE; - /* - * First try to identify the best timings from ONFI parameters and - * if the NAND does not support ONFI, fallback to the default ONFI - * timing mode. - */ - if (chip->parameters.onfi) { - modes = chip->parameters.onfi->async_timing_mode; - } else { - if (!chip->onfi_timing_mode_default) - return 0; + if (spec_timings) { + iface->timings.sdr = *spec_timings; + iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings); - modes = GENMASK(chip->onfi_timing_mode_default, 0); + /* Verify the controller supports the requested interface */ + ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, + iface); + if (!ret) { + chip->best_interface_config = iface; + return ret; + } + + /* Fallback to slower modes */ + best_mode = iface->timings.mode; + } else if (chip->parameters.onfi) { + best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1; } - for (mode = fls(modes) - 1; mode >= 0; mode--) { - ret = onfi_fill_data_interface(chip, NAND_SDR_IFACE, mode); - if (ret) - continue; + for (mode = best_mode; mode >= 0; mode--) { + onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode); - /* - * Pass NAND_DATA_IFACE_CHECK_ONLY to only check if the - * controller supports the requested timings. - */ - ret = chip->controller->ops->setup_data_interface(chip, - NAND_DATA_IFACE_CHECK_ONLY, - &chip->data_interface); - if (!ret) { - chip->onfi_timing_mode_default = mode; + ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY, + iface); + if (!ret) break; - } } + chip->best_interface_config = iface; + return 0; } /** + * nand_choose_interface_config - find the best data interface and timings + * @chip: The NAND chip + * + * Find the best data interface and NAND timings supported by the chip + * and the driver. Eventually let the NAND manufacturer driver propose his own + * set of timings. + * + * After this function nand_chip->interface_config is initialized with the best + * timing mode available. + * + * Returns 0 for success or negative error code otherwise. + */ +static int nand_choose_interface_config(struct nand_chip *chip) +{ + struct nand_interface_config *iface; + int ret; + + if (!nand_controller_can_setup_interface(chip)) + return 0; + + iface = kzalloc(sizeof(*iface), GFP_KERNEL); + if (!iface) + return -ENOMEM; + + if (chip->ops.choose_interface_config) + ret = chip->ops.choose_interface_config(chip, iface); + else + ret = nand_choose_best_sdr_timings(chip, iface, NULL); + + if (ret) + kfree(iface); + + return ret; +} + +/** * nand_fill_column_cycles - fill the column cycles of an address * @chip: The NAND chip * @addrs: Array of address cycles to fill @@ -1122,9 +1164,9 @@ static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, void *buf, unsigned int len) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[4]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1166,7 +1208,7 @@ static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page, unsigned int len) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[5]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READ0, 0), @@ -1263,7 +1305,7 @@ int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PARAM, 0), NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1318,7 +1360,7 @@ int nand_change_read_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2] = {}; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDOUT, 0), @@ -1392,9 +1434,9 @@ static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page, unsigned int offset_in_page, const void *buf, unsigned int len, bool prog) { - struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); + struct mtd_info *mtd = nand_to_mtd(chip); u8 addrs[5] = {}; struct nand_op_instr instrs[] = { /* @@ -1517,7 +1559,7 @@ int nand_prog_page_end_op(struct nand_chip *chip) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1624,7 +1666,7 @@ int nand_change_write_column_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[2]; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RNDIN, 0), @@ -1679,7 +1721,7 @@ int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_READID, 0), NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1718,7 +1760,7 @@ int nand_status_op(struct nand_chip *chip, u8 *status) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_STATUS, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1787,7 +1829,7 @@ int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock) if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); u8 addrs[3] = { page, page >> 8, page >> 16 }; struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_ERASE1, 0), @@ -1846,7 +1888,7 @@ static int nand_set_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)), @@ -1893,7 +1935,7 @@ static int nand_get_features_op(struct nand_chip *chip, u8 feature, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0), NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)), @@ -1950,7 +1992,7 @@ int nand_reset_op(struct nand_chip *chip) { if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)), NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0), @@ -2480,17 +2522,16 @@ EXPORT_SYMBOL_GPL(nand_subop_get_data_len); * @chipnr: Internal die id * * Save the timings data structure, then apply SDR timings mode 0 (see - * nand_reset_data_interface for details), do the reset operation, and - * apply back the previous timings. + * nand_reset_interface for details), do the reset operation, and apply + * back the previous timings. * * Returns 0 on success, a negative error code otherwise. */ int nand_reset(struct nand_chip *chip, int chipnr) { - struct nand_data_interface saved_data_intf = chip->data_interface; int ret; - ret = nand_reset_data_interface(chip, chipnr); + ret = nand_reset_interface(chip, chipnr); if (ret) return ret; @@ -2505,18 +2546,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) if (ret) return ret; - /* - * A nand_reset_data_interface() put both the NAND chip and the NAND - * controller in timings mode 0. If the default mode for this chip is - * also 0, no need to proceed to the change again. Plus, at probe time, - * nand_setup_data_interface() uses ->set/get_features() which would - * fail anyway as the parameter page is not available yet. - */ - if (!chip->onfi_timing_mode_default) - return 0; - - chip->data_interface = saved_data_intf; - ret = nand_setup_data_interface(chip, chipnr); + ret = nand_setup_interface(chip, chipnr); if (ret) return ret; @@ -3215,10 +3245,10 @@ static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode) if (retry_mode >= chip->read_retries) return -EINVAL; - if (!chip->setup_read_retry) + if (!chip->ops.setup_read_retry) return -EOPNOTSUPP; - return chip->setup_read_retry(chip, retry_mode); + return chip->ops.setup_read_retry(chip, retry_mode); } static void nand_wait_readrdy(struct nand_chip *chip) @@ -3228,7 +3258,7 @@ static void nand_wait_readrdy(struct nand_chip *chip) if (!(chip->options & NAND_NEED_READRDY)) return; - sdr = nand_get_sdr_timings(&chip->data_interface); + sdr = nand_get_sdr_timings(nand_get_interface_config(chip)); WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0)); } @@ -4462,8 +4492,8 @@ static int nand_suspend(struct mtd_info *mtd) int ret = 0; mutex_lock(&chip->lock); - if (chip->suspend) - ret = chip->suspend(chip); + if (chip->ops.suspend) + ret = chip->ops.suspend(chip); if (!ret) chip->suspended = 1; mutex_unlock(&chip->lock); @@ -4481,8 +4511,8 @@ static void nand_resume(struct mtd_info *mtd) mutex_lock(&chip->lock); if (chip->suspended) { - if (chip->resume) - chip->resume(chip); + if (chip->ops.resume) + chip->ops.resume(chip); chip->suspended = 0; } else { pr_err("%s called for a chip which is not in suspended state\n", @@ -4511,10 +4541,10 @@ static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->lock_area) + if (!chip->ops.lock_area) return -ENOTSUPP; - return chip->lock_area(chip, ofs, len); + return chip->ops.lock_area(chip, ofs, len); } /** @@ -4527,10 +4557,10 @@ static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct nand_chip *chip = mtd_to_nand(mtd); - if (!chip->unlock_area) + if (!chip->ops.unlock_area) return -ENOTSUPP; - return chip->unlock_area(chip, ofs, len); + return chip->ops.unlock_area(chip, ofs, len); } /* Set default functions */ @@ -4743,8 +4773,6 @@ static bool find_full_id_nand(struct nand_chip *chip, chip->options |= type->options; chip->base.eccreq.strength = NAND_ECC_STRENGTH(type); chip->base.eccreq.step_size = NAND_ECC_STEP(type); - chip->onfi_timing_mode_default = - type->onfi_timing_mode_default; chip->parameters.model = kstrdup(type->name, GFP_KERNEL); if (!chip->parameters.model) @@ -4810,9 +4838,9 @@ static void nand_manufacturer_cleanup(struct nand_chip *chip) } static const char * -nand_manufacturer_name(const struct nand_manufacturer *manufacturer) +nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc) { - return manufacturer ? manufacturer->name : "Unknown"; + return manufacturer_desc ? manufacturer_desc->name : "Unknown"; } /* @@ -4820,7 +4848,7 @@ nand_manufacturer_name(const struct nand_manufacturer *manufacturer) */ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) { - const struct nand_manufacturer *manufacturer; + const struct nand_manufacturer_desc *manufacturer_desc; struct mtd_info *mtd = nand_to_mtd(chip); struct nand_memory_organization *memorg; int busw, ret; @@ -4877,8 +4905,8 @@ static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type) chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data)); /* Try to identify manufacturer */ - manufacturer = nand_get_manufacturer(maf_id); - chip->manufacturer.desc = manufacturer; + manufacturer_desc = nand_get_manufacturer_desc(maf_id); + chip->manufacturer.desc = manufacturer_desc; if (!type) type = nand_flash_ids; @@ -4957,7 +4985,7 @@ ident_done: */ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), mtd->name); pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8, (chip->options & NAND_BUSWIDTH_16) ? 16 : 8); @@ -4992,7 +5020,7 @@ ident_done: pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n", maf_id, dev_id); - pr_info("%s %s\n", nand_manufacturer_name(manufacturer), + pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc), chip->parameters.model); pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n", (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC", @@ -5185,7 +5213,7 @@ static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips, mutex_init(&chip->lock); /* Enforce the right timings for reset/detection */ - onfi_fill_data_interface(chip, NAND_SDR_IFACE, 0); + chip->current_interface_config = nand_get_reset_interface_config(); ret = nand_dt_init(chip); if (ret) @@ -5972,16 +6000,16 @@ static int nand_scan_tail(struct nand_chip *chip) if (!mtd->bitflip_threshold) mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); - /* Initialize the ->data_interface field. */ - ret = nand_init_data_interface(chip); + /* Find the fastest data interface for this chip */ + ret = nand_choose_interface_config(chip); if (ret) goto err_nanddev_cleanup; /* Enter fastest possible mode on all dies. */ for (i = 0; i < nanddev_ntargets(&chip->base); i++) { - ret = nand_setup_data_interface(chip, i); + ret = nand_setup_interface(chip, i); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; } /* Check, if we should skip the bad block table scan */ @@ -5991,10 +6019,12 @@ static int nand_scan_tail(struct nand_chip *chip) /* Build bad block table */ ret = nand_create_bbt(chip); if (ret) - goto err_nanddev_cleanup; + goto err_free_interface_config; return 0; +err_free_interface_config: + kfree(chip->best_interface_config); err_nanddev_cleanup: nanddev_cleanup(&chip->base); @@ -6088,6 +6118,9 @@ void nand_cleanup(struct nand_chip *chip) & NAND_BBT_DYNAMICSTRUCT) kfree(chip->badblock_pattern); + /* Free the data interface */ + kfree(chip->best_interface_config); + /* Free manufacturer priv data. */ nand_manufacturer_cleanup(chip); diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 96045d60471e..344a24fd2ca8 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c @@ -1226,7 +1226,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd) return -ENOMEM; /* - * If no primary table decriptor is given, scan the device to build a + * If no primary table descriptor is given, scan the device to build a * memory based bad block table. */ if (!td) { diff --git a/drivers/mtd/nand/raw/nand_hynix.c b/drivers/mtd/nand/raw/nand_hynix.c index 7caedaa5b9e5..6d08eb834456 100644 --- a/drivers/mtd/nand/raw/nand_hynix.c +++ b/drivers/mtd/nand/raw/nand_hynix.c @@ -337,7 +337,7 @@ static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip, rr->nregs = nregs; rr->regs = hynix_1xnm_mlc_read_retry_regs; hynix->read_retry = rr; - chip->setup_read_retry = hynix_nand_setup_read_retry; + chip->ops.setup_read_retry = hynix_nand_setup_read_retry; chip->read_retries = nmodes; out: @@ -673,6 +673,15 @@ static void hynix_nand_cleanup(struct nand_chip *chip) nand_set_manufacturer_data(chip, NULL); } +static int +h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface) +{ + onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4); + + return nand_choose_best_sdr_timings(chip, iface, NULL); +} + static int hynix_nand_init(struct nand_chip *chip) { struct hynix_nand *hynix; @@ -689,6 +698,11 @@ static int hynix_nand_init(struct nand_chip *chip) nand_set_manufacturer_data(chip, hynix); + if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model, + sizeof("H27UCG8T2ATR-BC") - 1)) + chip->ops.choose_interface_config = + h27ucg8t2atrbc_choose_interface_config; + ret = hynix_nand_rr_init(chip); if (ret) hynix_nand_cleanup(chip); diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c index ba27902fc54b..b9945791a9d7 100644 --- a/drivers/mtd/nand/raw/nand_ids.c +++ b/drivers/mtd/nand/raw/nand_ids.c @@ -28,8 +28,7 @@ struct nand_flash_dev nand_flash_ids[] = { */ {"TC58NVG0S3E 1G 3.3V 8-bit", { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} }, - SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), - 2 }, + SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), }, {"TC58NVG2S0F 4G 3.3V 8-bit", { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} }, SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) }, @@ -51,7 +50,10 @@ struct nand_flash_dev nand_flash_ids[] = { {"H27UCG8T2ATR-BC 64G 3.3V 8-bit", { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} }, SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640, - NAND_ECC_INFO(40, SZ_1K), 4 }, + NAND_ECC_INFO(40, SZ_1K) }, + {"TH58NVG2S3HBAI4 4G 3.3V 8-bit", + { .id = {0x98, 0xdc, 0x91, 0x15, 0x76} }, + SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) }, LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), @@ -166,7 +168,7 @@ struct nand_flash_dev nand_flash_ids[] = { }; /* Manufacturer IDs */ -static const struct nand_manufacturer nand_manufacturers[] = { +static const struct nand_manufacturer_desc nand_manufacturer_descs[] = { {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops}, {NAND_MFR_ATO, "ATO"}, {NAND_MFR_EON, "Eon"}, @@ -186,20 +188,20 @@ static const struct nand_manufacturer nand_manufacturers[] = { }; /** - * nand_get_manufacturer - Get manufacturer information from the manufacturer - * ID + * nand_get_manufacturer_desc - Get manufacturer information from the + * manufacturer ID * @id: manufacturer ID * - * Returns a pointer a nand_manufacturer object if the manufacturer is defined + * Returns a nand_manufacturer_desc object if the manufacturer is defined * in the NAND manufacturers database, NULL otherwise. */ -const struct nand_manufacturer *nand_get_manufacturer(u8 id) +const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id) { int i; - for (i = 0; i < ARRAY_SIZE(nand_manufacturers); i++) - if (nand_manufacturers[i].id == id) - return &nand_manufacturers[i]; + for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++) + if (nand_manufacturer_descs[i].id == id) + return &nand_manufacturer_descs[i]; return NULL; } diff --git a/drivers/mtd/nand/raw/nand_legacy.c b/drivers/mtd/nand/raw/nand_legacy.c index d64791c06a97..2bcc03714432 100644 --- a/drivers/mtd/nand/raw/nand_legacy.c +++ b/drivers/mtd/nand/raw/nand_legacy.c @@ -354,6 +354,9 @@ static void nand_command(struct nand_chip *chip, unsigned int command, static void nand_ccs_delay(struct nand_chip *chip) { + const struct nand_sdr_timings *sdr = + nand_get_sdr_timings(nand_get_interface_config(chip)); + /* * The controller already takes care of waiting for tCCS when the RNDIN * or RNDOUT command is sent, return directly. @@ -365,8 +368,8 @@ static void nand_ccs_delay(struct nand_chip *chip) * Wait tCCS_min if it is correctly defined, otherwise wait 500ns * (which should be safe for all NANDs). */ - if (nand_has_setup_data_iface(chip)) - ndelay(chip->data_interface.timings.sdr.tCCS_min / 1000); + if (nand_controller_can_setup_interface(chip)) + ndelay(sdr->tCCS_min / 1000); else ndelay(500); } diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c index 09c254c97b5c..1472f925f386 100644 --- a/drivers/mtd/nand/raw/nand_macronix.c +++ b/drivers/mtd/nand/raw/nand_macronix.c @@ -130,7 +130,7 @@ static void macronix_nand_onfi_init(struct nand_chip *chip) return; chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES; - chip->setup_read_retry = macronix_nand_setup_read_retry; + chip->ops.setup_read_retry = macronix_nand_setup_read_retry; if (p->supports_set_get_features) { bitmap_set(p->set_feature_list, @@ -242,8 +242,8 @@ static void macronix_nand_block_protection_support(struct nand_chip *chip) bitmap_set(chip->parameters.set_feature_list, ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1); - chip->lock_area = mxic_nand_lock; - chip->unlock_area = mxic_nand_unlock; + chip->ops.lock_area = mxic_nand_lock; + chip->ops.unlock_area = mxic_nand_unlock; } static int nand_power_down_op(struct nand_chip *chip) @@ -312,8 +312,8 @@ static void macronix_nand_deep_power_down_support(struct nand_chip *chip) if (i < 0) return; - chip->suspend = mxic_nand_suspend; - chip->resume = mxic_nand_resume; + chip->ops.suspend = mxic_nand_suspend; + chip->ops.resume = mxic_nand_resume; } static int macronix_nand_init(struct nand_chip *chip) diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c index 3589b4fce0d4..4385092a9325 100644 --- a/drivers/mtd/nand/raw/nand_micron.c +++ b/drivers/mtd/nand/raw/nand_micron.c @@ -84,7 +84,7 @@ static int micron_nand_onfi_init(struct nand_chip *chip) struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor; chip->read_retries = micron->read_retry_options; - chip->setup_read_retry = micron_nand_setup_read_retry; + chip->ops.setup_read_retry = micron_nand_setup_read_retry; } if (p->supports_set_get_features) { diff --git a/drivers/mtd/nand/raw/nand_timings.c b/drivers/mtd/nand/raw/nand_timings.c index 36d21be3dfe5..94d832646487 100644 --- a/drivers/mtd/nand/raw/nand_timings.c +++ b/drivers/mtd/nand/raw/nand_timings.c @@ -12,7 +12,14 @@ #define ONFI_DYN_TIMING_MAX U16_MAX -static const struct nand_data_interface onfi_sdr_timings[] = { +/* + * For non-ONFI chips we use the highest possible value for tPROG and tBERS. + * tR and tCCS will take the default values precised in the ONFI specification + * for timing mode 0, respectively 200us and 500ns. + * + * These four values are tweaked to be more accurate in the case of ONFI chips. + */ +static const struct nand_interface_config onfi_sdr_timings[] = { /* Mode 0 */ { .type = NAND_SDR_IFACE, @@ -20,6 +27,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = { .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, .tADL_min = 400000, .tALH_min = 20000, .tALS_min = 50000, @@ -63,6 +72,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = { .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, .tADL_min = 400000, .tALH_min = 10000, .tALS_min = 25000, @@ -106,6 +117,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = { .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, .tADL_min = 400000, .tALH_min = 10000, .tALS_min = 15000, @@ -149,6 +162,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = { .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, .tADL_min = 400000, .tALH_min = 5000, .tALS_min = 10000, @@ -192,6 +207,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = { .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, .tADL_min = 400000, .tALH_min = 5000, .tALS_min = 10000, @@ -235,6 +252,8 @@ static const struct nand_data_interface onfi_sdr_timings[] = { .timings.sdr = { .tCCS_min = 500000, .tR_max = 200000000, + .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX, + .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX, .tADL_min = 400000, .tALH_min = 5000, .tALS_min = 10000, @@ -273,23 +292,79 @@ static const struct nand_data_interface onfi_sdr_timings[] = { }, }; +/* All NAND chips share the same reset data interface: SDR mode 0 */ +const struct nand_interface_config *nand_get_reset_interface_config(void) +{ + return &onfi_sdr_timings[0]; +} + +/** + * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a + * set of timings + * @spec_timings: the timings to challenge + */ +unsigned int +onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings) +{ + const struct nand_sdr_timings *onfi_timings; + int mode; + + for (mode = ARRAY_SIZE(onfi_sdr_timings) - 1; mode > 0; mode--) { + onfi_timings = &onfi_sdr_timings[mode].timings.sdr; + + if (spec_timings->tCCS_min <= onfi_timings->tCCS_min && + spec_timings->tADL_min <= onfi_timings->tADL_min && + spec_timings->tALH_min <= onfi_timings->tALH_min && + spec_timings->tALS_min <= onfi_timings->tALS_min && + spec_timings->tAR_min <= onfi_timings->tAR_min && + spec_timings->tCEH_min <= onfi_timings->tCEH_min && + spec_timings->tCH_min <= onfi_timings->tCH_min && + spec_timings->tCLH_min <= onfi_timings->tCLH_min && + spec_timings->tCLR_min <= onfi_timings->tCLR_min && + spec_timings->tCLS_min <= onfi_timings->tCLS_min && + spec_timings->tCOH_min <= onfi_timings->tCOH_min && + spec_timings->tCS_min <= onfi_timings->tCS_min && + spec_timings->tDH_min <= onfi_timings->tDH_min && + spec_timings->tDS_min <= onfi_timings->tDS_min && + spec_timings->tIR_min <= onfi_timings->tIR_min && + spec_timings->tRC_min <= onfi_timings->tRC_min && + spec_timings->tREH_min <= onfi_timings->tREH_min && + spec_timings->tRHOH_min <= onfi_timings->tRHOH_min && + spec_timings->tRHW_min <= onfi_timings->tRHW_min && + spec_timings->tRLOH_min <= onfi_timings->tRLOH_min && + spec_timings->tRP_min <= onfi_timings->tRP_min && + spec_timings->tRR_min <= onfi_timings->tRR_min && + spec_timings->tWC_min <= onfi_timings->tWC_min && + spec_timings->tWH_min <= onfi_timings->tWH_min && + spec_timings->tWHR_min <= onfi_timings->tWHR_min && + spec_timings->tWP_min <= onfi_timings->tWP_min && + spec_timings->tWW_min <= onfi_timings->tWW_min) + return mode; + } + + return 0; +} + /** - * onfi_fill_data_interface - [NAND Interface] Initialize a data interface from - * given ONFI mode - * @mode: The ONFI timing mode + * onfi_fill_interface_config - Initialize an interface config from a given + * ONFI mode + * @chip: The NAND chip + * @iface: The interface configuration to fill + * @type: The interface type + * @timing_mode: The ONFI timing mode */ -int onfi_fill_data_interface(struct nand_chip *chip, - enum nand_data_interface_type type, - int timing_mode) +void onfi_fill_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface, + enum nand_interface_type type, + unsigned int timing_mode) { - struct nand_data_interface *iface = &chip->data_interface; struct onfi_params *onfi = chip->parameters.onfi; - if (type != NAND_SDR_IFACE) - return -EINVAL; + if (WARN_ON(type != NAND_SDR_IFACE)) + return; - if (timing_mode < 0 || timing_mode >= ARRAY_SIZE(onfi_sdr_timings)) - return -EINVAL; + if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings))) + return; *iface = onfi_sdr_timings[timing_mode]; @@ -308,22 +383,5 @@ int onfi_fill_data_interface(struct nand_chip *chip, /* nanoseconds -> picoseconds */ timings->tCCS_min = 1000UL * onfi->tCCS; - } else { - struct nand_sdr_timings *timings = &iface->timings.sdr; - /* - * For non-ONFI chips we use the highest possible value for - * tPROG and tBERS. tR and tCCS will take the default values - * precised in the ONFI specification for timing mode 0, - * respectively 200us and 500ns. - */ - - /* microseconds -> picoseconds */ - timings->tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX; - timings->tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX; - - timings->tR_max = 200000000; - timings->tCCS_min = 500000; } - - return 0; } diff --git a/drivers/mtd/nand/raw/nand_toshiba.c b/drivers/mtd/nand/raw/nand_toshiba.c index ae069905d7e4..f746c19f3b2c 100644 --- a/drivers/mtd/nand/raw/nand_toshiba.c +++ b/drivers/mtd/nand/raw/nand_toshiba.c @@ -33,7 +33,7 @@ static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip, if (nand_has_exec_op(chip)) { const struct nand_sdr_timings *sdr = - nand_get_sdr_timings(&chip->data_interface); + nand_get_sdr_timings(nand_get_interface_config(chip)); struct nand_op_instr instrs[] = { NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ, PSEC_TO_NSEC(sdr->tADL_min)), @@ -194,17 +194,79 @@ static void toshiba_nand_decode_id(struct nand_chip *chip) } } +static int +tc58teg5dclta00_choose_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface) +{ + onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 5); + + return nand_choose_best_sdr_timings(chip, iface, NULL); +} + +static int +tc58nvg0s3e_choose_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface) +{ + onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 2); + + return nand_choose_best_sdr_timings(chip, iface, NULL); +} + +static int +th58nvg2s3hbai4_choose_interface_config(struct nand_chip *chip, + struct nand_interface_config *iface) +{ + struct nand_sdr_timings *sdr = &iface->timings.sdr; + + /* Start with timings from the closest timing mode, mode 4. */ + onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4); + + /* Patch timings that differ from mode 4. */ + sdr->tALS_min = 12000; + sdr->tCHZ_max = 20000; + sdr->tCLS_min = 12000; + sdr->tCOH_min = 0; + sdr->tDS_min = 12000; + sdr->tRHOH_min = 25000; + sdr->tRHW_min = 30000; + sdr->tRHZ_max = 60000; + sdr->tWHR_min = 60000; + + /* Patch timings not part of onfi timing mode. */ + sdr->tPROG_max = 700000000; + sdr->tBERS_max = 5000000000; + + return nand_choose_best_sdr_timings(chip, iface, sdr); +} + static int tc58teg5dclta00_init(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); - chip->onfi_timing_mode_default = 5; + chip->ops.choose_interface_config = + &tc58teg5dclta00_choose_interface_config; chip->options |= NAND_NEED_SCRAMBLING; mtd_set_pairing_scheme(mtd, &dist3_pairing_scheme); return 0; } +static int tc58nvg0s3e_init(struct nand_chip *chip) +{ + chip->ops.choose_interface_config = + &tc58nvg0s3e_choose_interface_config; + + return 0; +} + +static int th58nvg2s3hbai4_init(struct nand_chip *chip) +{ + chip->ops.choose_interface_config = + &th58nvg2s3hbai4_choose_interface_config; + + return 0; +} + static int toshiba_nand_init(struct nand_chip *chip) { if (nand_is_slc(chip)) @@ -217,6 +279,12 @@ static int toshiba_nand_init(struct nand_chip *chip) if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model)) tc58teg5dclta00_init(chip); + if (!strncmp("TC58NVG0S3E", chip->parameters.model, + sizeof("TC58NVG0S3E") - 1)) + tc58nvg0s3e_init(chip); + if (!strncmp("TH58NVG2S3HBAI4", chip->parameters.model, + sizeof("TH58NVG2S3HBAI4") - 1)) + th58nvg2s3hbai4_init(chip); return 0; } diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 078b1022ac2a..4b799521a427 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -2,7 +2,7 @@ /* * Error Location Module * - * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ */ #define DRIVER_NAME "omap-elm" diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index f1daf330951b..bd7a7251429b 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -459,11 +459,13 @@ struct qcom_nand_host { * among different NAND controllers. * @ecc_modes - ecc mode for NAND * @is_bam - whether NAND controller is using BAM + * @is_qpic - whether NAND CTRL is part of qpic IP * @dev_cmd_reg_start - NAND_DEV_CMD_* registers starting offset */ struct qcom_nandc_props { u32 ecc_modes; bool is_bam; + bool is_qpic; u32 dev_cmd_reg_start; }; @@ -2774,14 +2776,24 @@ static int qcom_nandc_setup(struct qcom_nand_controller *nandc) u32 nand_ctrl; /* kill onenand */ - nandc_write(nandc, SFLASHC_BURST_CFG, 0); + if (!nandc->props->is_qpic) + nandc_write(nandc, SFLASHC_BURST_CFG, 0); nandc_write(nandc, dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD), NAND_DEV_CMD_VLD_VAL); /* enable ADM or BAM DMA */ if (nandc->props->is_bam) { nand_ctrl = nandc_read(nandc, NAND_CTRL); - nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN); + + /* + *NAND_CTRL is an operational registers, and CPU + * access to operational registers are read only + * in BAM mode. So update the NAND_CTRL register + * only if it is not in BAM mode. In most cases BAM + * mode will be enabled in bootloader + */ + if (!(nand_ctrl & BAM_MODE_EN)) + nandc_write(nandc, NAND_CTRL, nand_ctrl | BAM_MODE_EN); } else { nandc_write(nandc, NAND_FLASH_CHIP_SELECT, DM_EN); } @@ -3035,12 +3047,14 @@ static const struct qcom_nandc_props ipq806x_nandc_props = { static const struct qcom_nandc_props ipq4019_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x0, }; static const struct qcom_nandc_props ipq8074_nandc_props = { .ecc_modes = (ECC_BCH_4BIT | ECC_BCH_8BIT), .is_bam = true, + .is_qpic = true, .dev_cmd_reg_start = 0x7000, }; diff --git a/drivers/mtd/nand/raw/s3c2410.c b/drivers/mtd/nand/raw/s3c2410.c index d0dd0c446e4d..105522205979 100644 --- a/drivers/mtd/nand/raw/s3c2410.c +++ b/drivers/mtd/nand/raw/s3c2410.c @@ -808,8 +808,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, return -ENODEV; } -static int s3c2410_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int s3c2410_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct mtd_info *mtd = nand_to_mtd(chip); struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); @@ -999,7 +999,7 @@ static int s3c2410_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops s3c24xx_nand_controller_ops = { .attach_chip = s3c2410_nand_attach_chip, - .setup_data_interface = s3c2410_nand_setup_data_interface, + .setup_interface = s3c2410_nand_setup_interface, }; static const struct of_device_id s3c24xx_nand_dt_ids[] = { diff --git a/drivers/mtd/nand/raw/stm32_fmc2_nand.c b/drivers/mtd/nand/raw/stm32_fmc2_nand.c index 65c9d17b25a3..7f4546ae9130 100644 --- a/drivers/mtd/nand/raw/stm32_fmc2_nand.c +++ b/drivers/mtd/nand/raw/stm32_fmc2_nand.c @@ -11,10 +11,13 @@ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/iopoll.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/mtd/rawnand.h> +#include <linux/of_address.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> +#include <linux/regmap.h> #include <linux/reset.h> /* Bad block marker length */ @@ -242,7 +245,8 @@ struct stm32_fmc2_nfc { struct nand_controller base; struct stm32_fmc2_nand nand; struct device *dev; - void __iomem *io_base; + struct device *cdev; + struct regmap *regmap; void __iomem *data_base[FMC2_MAX_CE]; void __iomem *cmd_base[FMC2_MAX_CE]; void __iomem *addr_base[FMC2_MAX_CE]; @@ -277,40 +281,37 @@ static void stm32_fmc2_nfc_timings_init(struct nand_chip *chip) struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct stm32_fmc2_nand *nand = to_fmc2_nand(chip); struct stm32_fmc2_timings *timings = &nand->timings; - u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); u32 pmem, patt; /* Set tclr/tar timings */ - pcr &= ~FMC2_PCR_TCLR; - pcr |= FIELD_PREP(FMC2_PCR_TCLR, timings->tclr); - pcr &= ~FMC2_PCR_TAR; - pcr |= FIELD_PREP(FMC2_PCR_TAR, timings->tar); + regmap_update_bits(nfc->regmap, FMC2_PCR, + FMC2_PCR_TCLR | FMC2_PCR_TAR, + FIELD_PREP(FMC2_PCR_TCLR, timings->tclr) | + FIELD_PREP(FMC2_PCR_TAR, timings->tar)); /* Set tset/twait/thold/thiz timings in common bank */ pmem = FIELD_PREP(FMC2_PMEM_MEMSET, timings->tset_mem); pmem |= FIELD_PREP(FMC2_PMEM_MEMWAIT, timings->twait); pmem |= FIELD_PREP(FMC2_PMEM_MEMHOLD, timings->thold_mem); pmem |= FIELD_PREP(FMC2_PMEM_MEMHIZ, timings->thiz); + regmap_write(nfc->regmap, FMC2_PMEM, pmem); /* Set tset/twait/thold/thiz timings in attribut bank */ patt = FIELD_PREP(FMC2_PATT_ATTSET, timings->tset_att); patt |= FIELD_PREP(FMC2_PATT_ATTWAIT, timings->twait); patt |= FIELD_PREP(FMC2_PATT_ATTHOLD, timings->thold_att); patt |= FIELD_PREP(FMC2_PATT_ATTHIZ, timings->thiz); - - writel_relaxed(pcr, nfc->io_base + FMC2_PCR); - writel_relaxed(pmem, nfc->io_base + FMC2_PMEM); - writel_relaxed(patt, nfc->io_base + FMC2_PATT); + regmap_write(nfc->regmap, FMC2_PATT, patt); } static void stm32_fmc2_nfc_setup(struct nand_chip *chip) { struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); - u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); + u32 pcr = 0, pcr_mask; /* Configure ECC algorithm (default configuration is Hamming) */ - pcr &= ~FMC2_PCR_ECCALG; - pcr &= ~FMC2_PCR_BCHECC; + pcr_mask = FMC2_PCR_ECCALG; + pcr_mask |= FMC2_PCR_BCHECC; if (chip->ecc.strength == FMC2_ECC_BCH8) { pcr |= FMC2_PCR_ECCALG; pcr |= FMC2_PCR_BCHECC; @@ -319,15 +320,15 @@ static void stm32_fmc2_nfc_setup(struct nand_chip *chip) } /* Set buswidth */ - pcr &= ~FMC2_PCR_PWID; + pcr_mask |= FMC2_PCR_PWID; if (chip->options & NAND_BUSWIDTH_16) pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16); /* Set ECC sector size */ - pcr &= ~FMC2_PCR_ECCSS; + pcr_mask |= FMC2_PCR_ECCSS; pcr |= FIELD_PREP(FMC2_PCR_ECCSS, FMC2_PCR_ECCSS_512); - writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + regmap_update_bits(nfc->regmap, FMC2_PCR, pcr_mask, pcr); } static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr) @@ -393,81 +394,63 @@ static int stm32_fmc2_nfc_select_chip(struct nand_chip *chip, int chipnr) static void stm32_fmc2_nfc_set_buswidth_16(struct stm32_fmc2_nfc *nfc, bool set) { - u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); + u32 pcr; - pcr &= ~FMC2_PCR_PWID; - if (set) - pcr |= FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16); - writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + pcr = set ? FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_16) : + FIELD_PREP(FMC2_PCR_PWID, FMC2_PCR_PWID_BUSWIDTH_8); + + regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_PWID, pcr); } static void stm32_fmc2_nfc_set_ecc(struct stm32_fmc2_nfc *nfc, bool enable) { - u32 pcr = readl(nfc->io_base + FMC2_PCR); - - pcr &= ~FMC2_PCR_ECCEN; - if (enable) - pcr |= FMC2_PCR_ECCEN; - writel(pcr, nfc->io_base + FMC2_PCR); + regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_ECCEN, + enable ? FMC2_PCR_ECCEN : 0); } -static inline void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc) +static void stm32_fmc2_nfc_enable_seq_irq(struct stm32_fmc2_nfc *nfc) { - u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER); - - csqier |= FMC2_CSQIER_TCIE; - nfc->irq_state = FMC2_IRQ_SEQ; - writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER); + regmap_update_bits(nfc->regmap, FMC2_CSQIER, + FMC2_CSQIER_TCIE, FMC2_CSQIER_TCIE); } -static inline void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc) +static void stm32_fmc2_nfc_disable_seq_irq(struct stm32_fmc2_nfc *nfc) { - u32 csqier = readl_relaxed(nfc->io_base + FMC2_CSQIER); - - csqier &= ~FMC2_CSQIER_TCIE; - - writel_relaxed(csqier, nfc->io_base + FMC2_CSQIER); + regmap_update_bits(nfc->regmap, FMC2_CSQIER, FMC2_CSQIER_TCIE, 0); nfc->irq_state = FMC2_IRQ_UNKNOWN; } -static inline void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc) +static void stm32_fmc2_nfc_clear_seq_irq(struct stm32_fmc2_nfc *nfc) { - writel_relaxed(FMC2_CSQICR_CLEAR_IRQ, nfc->io_base + FMC2_CSQICR); + regmap_write(nfc->regmap, FMC2_CSQICR, FMC2_CSQICR_CLEAR_IRQ); } -static inline void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, - int mode) +static void stm32_fmc2_nfc_enable_bch_irq(struct stm32_fmc2_nfc *nfc, int mode) { - u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER); + nfc->irq_state = FMC2_IRQ_BCH; if (mode == NAND_ECC_WRITE) - bchier |= FMC2_BCHIER_EPBRIE; + regmap_update_bits(nfc->regmap, FMC2_BCHIER, + FMC2_BCHIER_EPBRIE, FMC2_BCHIER_EPBRIE); else - bchier |= FMC2_BCHIER_DERIE; - - nfc->irq_state = FMC2_IRQ_BCH; - - writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER); + regmap_update_bits(nfc->regmap, FMC2_BCHIER, + FMC2_BCHIER_DERIE, FMC2_BCHIER_DERIE); } -static inline void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc) +static void stm32_fmc2_nfc_disable_bch_irq(struct stm32_fmc2_nfc *nfc) { - u32 bchier = readl_relaxed(nfc->io_base + FMC2_BCHIER); - - bchier &= ~FMC2_BCHIER_DERIE; - bchier &= ~FMC2_BCHIER_EPBRIE; - - writel_relaxed(bchier, nfc->io_base + FMC2_BCHIER); + regmap_update_bits(nfc->regmap, FMC2_BCHIER, + FMC2_BCHIER_DERIE | FMC2_BCHIER_EPBRIE, 0); nfc->irq_state = FMC2_IRQ_UNKNOWN; } -static inline void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc) +static void stm32_fmc2_nfc_clear_bch_irq(struct stm32_fmc2_nfc *nfc) { - writel_relaxed(FMC2_BCHICR_CLEAR_IRQ, nfc->io_base + FMC2_BCHICR); + regmap_write(nfc->regmap, FMC2_BCHICR, FMC2_BCHICR_CLEAR_IRQ); } /* @@ -481,13 +464,8 @@ static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode) stm32_fmc2_nfc_set_ecc(nfc, false); if (chip->ecc.strength != FMC2_ECC_HAM) { - u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); - - if (mode == NAND_ECC_WRITE) - pcr |= FMC2_PCR_WEN; - else - pcr &= ~FMC2_PCR_WEN; - writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN, + mode == NAND_ECC_WRITE ? FMC2_PCR_WEN : 0); reinit_completion(&nfc->complete); stm32_fmc2_nfc_clear_bch_irq(nfc); @@ -502,7 +480,7 @@ static void stm32_fmc2_nfc_hwctl(struct nand_chip *chip, int mode) * ECC is 3 bytes for 512 bytes of data (supports error correction up to * max of 1-bit) */ -static inline void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc) +static void stm32_fmc2_nfc_ham_set_ecc(const u32 ecc_sta, u8 *ecc) { ecc[0] = ecc_sta; ecc[1] = ecc_sta >> 8; @@ -516,15 +494,15 @@ static int stm32_fmc2_nfc_ham_calculate(struct nand_chip *chip, const u8 *data, u32 sr, heccr; int ret; - ret = readl_relaxed_poll_timeout(nfc->io_base + FMC2_SR, - sr, sr & FMC2_SR_NWRF, 1, - 1000 * FMC2_TIMEOUT_MS); + ret = regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr, + sr & FMC2_SR_NWRF, 1, + 1000 * FMC2_TIMEOUT_MS); if (ret) { dev_err(nfc->dev, "ham timeout\n"); return ret; } - heccr = readl_relaxed(nfc->io_base + FMC2_HECCR); + regmap_read(nfc->regmap, FMC2_HECCR, &heccr); stm32_fmc2_nfc_ham_set_ecc(heccr, ecc); stm32_fmc2_nfc_set_ecc(nfc, false); @@ -603,13 +581,13 @@ static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data, } /* Read parity bits */ - bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR1); + regmap_read(nfc->regmap, FMC2_BCHPBR1, &bchpbr); ecc[0] = bchpbr; ecc[1] = bchpbr >> 8; ecc[2] = bchpbr >> 16; ecc[3] = bchpbr >> 24; - bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR2); + regmap_read(nfc->regmap, FMC2_BCHPBR2, &bchpbr); ecc[4] = bchpbr; ecc[5] = bchpbr >> 8; ecc[6] = bchpbr >> 16; @@ -617,13 +595,13 @@ static int stm32_fmc2_nfc_bch_calculate(struct nand_chip *chip, const u8 *data, if (chip->ecc.strength == FMC2_ECC_BCH8) { ecc[7] = bchpbr >> 24; - bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR3); + regmap_read(nfc->regmap, FMC2_BCHPBR3, &bchpbr); ecc[8] = bchpbr; ecc[9] = bchpbr >> 8; ecc[10] = bchpbr >> 16; ecc[11] = bchpbr >> 24; - bchpbr = readl_relaxed(nfc->io_base + FMC2_BCHPBR4); + regmap_read(nfc->regmap, FMC2_BCHPBR4, &bchpbr); ecc[12] = bchpbr; } @@ -685,11 +663,7 @@ static int stm32_fmc2_nfc_bch_correct(struct nand_chip *chip, u8 *dat, return -ETIMEDOUT; } - ecc_sta[0] = readl_relaxed(nfc->io_base + FMC2_BCHDSR0); - ecc_sta[1] = readl_relaxed(nfc->io_base + FMC2_BCHDSR1); - ecc_sta[2] = readl_relaxed(nfc->io_base + FMC2_BCHDSR2); - ecc_sta[3] = readl_relaxed(nfc->io_base + FMC2_BCHDSR3); - ecc_sta[4] = readl_relaxed(nfc->io_base + FMC2_BCHDSR4); + regmap_bulk_read(nfc->regmap, FMC2_BCHDSR0, ecc_sta, 5); stm32_fmc2_nfc_set_ecc(nfc, false); @@ -764,30 +738,29 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, { struct stm32_fmc2_nfc *nfc = to_stm32_nfc(chip->controller); struct mtd_info *mtd = nand_to_mtd(chip); - u32 csqcfgr1, csqcfgr2, csqcfgr3; - u32 csqar1, csqar2; u32 ecc_offset = mtd->writesize + FMC2_BBM_LEN; - u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); + /* + * cfg[0] => csqcfgr1, cfg[1] => csqcfgr2, cfg[2] => csqcfgr3 + * cfg[3] => csqar1, cfg[4] => csqar2 + */ + u32 cfg[5]; - if (write_data) - pcr |= FMC2_PCR_WEN; - else - pcr &= ~FMC2_PCR_WEN; - writel_relaxed(pcr, nfc->io_base + FMC2_PCR); + regmap_update_bits(nfc->regmap, FMC2_PCR, FMC2_PCR_WEN, + write_data ? FMC2_PCR_WEN : 0); /* * - Set Program Page/Page Read command * - Enable DMA request data * - Set timings */ - csqcfgr1 = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T; + cfg[0] = FMC2_CSQCFGR1_DMADEN | FMC2_CSQCFGR1_CMD1T; if (write_data) - csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN); + cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_SEQIN); else - csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) | - FMC2_CSQCFGR1_CMD2EN | - FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) | - FMC2_CSQCFGR1_CMD2T; + cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_CMD1, NAND_CMD_READ0) | + FMC2_CSQCFGR1_CMD2EN | + FIELD_PREP(FMC2_CSQCFGR1_CMD2, NAND_CMD_READSTART) | + FMC2_CSQCFGR1_CMD2T; /* * - Set Random Data Input/Random Data Read command @@ -796,30 +769,29 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, * - Set timings */ if (write_data) - csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN); + cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDIN); else - csqcfgr2 = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) | - FMC2_CSQCFGR2_RCMD2EN | - FIELD_PREP(FMC2_CSQCFGR2_RCMD2, - NAND_CMD_RNDOUTSTART) | - FMC2_CSQCFGR2_RCMD1T | - FMC2_CSQCFGR2_RCMD2T; + cfg[1] = FIELD_PREP(FMC2_CSQCFGR2_RCMD1, NAND_CMD_RNDOUT) | + FMC2_CSQCFGR2_RCMD2EN | + FIELD_PREP(FMC2_CSQCFGR2_RCMD2, NAND_CMD_RNDOUTSTART) | + FMC2_CSQCFGR2_RCMD1T | + FMC2_CSQCFGR2_RCMD2T; if (!raw) { - csqcfgr2 |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN; - csqcfgr2 |= FMC2_CSQCFGR2_SQSDTEN; + cfg[1] |= write_data ? 0 : FMC2_CSQCFGR2_DMASEN; + cfg[1] |= FMC2_CSQCFGR2_SQSDTEN; } /* * - Set the number of sectors to be written * - Set timings */ - csqcfgr3 = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1); + cfg[2] = FIELD_PREP(FMC2_CSQCFGR3_SNBR, chip->ecc.steps - 1); if (write_data) { - csqcfgr3 |= FMC2_CSQCFGR3_RAC2T; + cfg[2] |= FMC2_CSQCFGR3_RAC2T; if (chip->options & NAND_ROW_ADDR_3) - csqcfgr3 |= FMC2_CSQCFGR3_AC5T; + cfg[2] |= FMC2_CSQCFGR3_AC5T; else - csqcfgr3 |= FMC2_CSQCFGR3_AC4T; + cfg[2] |= FMC2_CSQCFGR3_AC4T; } /* @@ -827,8 +799,8 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, * Byte 1 and byte 2 => column, we start at 0x0 * Byte 3 and byte 4 => page */ - csqar1 = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page); - csqar1 |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8); + cfg[3] = FIELD_PREP(FMC2_CSQCAR1_ADDC3, page); + cfg[3] |= FIELD_PREP(FMC2_CSQCAR1_ADDC4, page >> 8); /* * - Set chip enable number @@ -836,23 +808,19 @@ static void stm32_fmc2_nfc_rw_page_init(struct nand_chip *chip, int page, * - Calculate the number of address cycles to be issued * - Set byte 5 of address cycle if needed */ - csqar2 = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel); + cfg[4] = FIELD_PREP(FMC2_CSQCAR2_NANDCEN, nfc->cs_sel); if (chip->options & NAND_BUSWIDTH_16) - csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1); + cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset >> 1); else - csqar2 |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset); + cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_SAO, ecc_offset); if (chip->options & NAND_ROW_ADDR_3) { - csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5); - csqar2 |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16); + cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 5); + cfg[4] |= FIELD_PREP(FMC2_CSQCAR2_ADDC5, page >> 16); } else { - csqcfgr1 |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4); + cfg[0] |= FIELD_PREP(FMC2_CSQCFGR1_ACYNBR, 4); } - writel_relaxed(csqcfgr1, nfc->io_base + FMC2_CSQCFGR1); - writel_relaxed(csqcfgr2, nfc->io_base + FMC2_CSQCFGR2); - writel_relaxed(csqcfgr3, nfc->io_base + FMC2_CSQCFGR3); - writel_relaxed(csqar1, nfc->io_base + FMC2_CSQAR1); - writel_relaxed(csqar2, nfc->io_base + FMC2_CSQAR2); + regmap_bulk_write(nfc->regmap, FMC2_CSQCFGR1, cfg, 5); } static void stm32_fmc2_nfc_dma_callback(void *arg) @@ -870,7 +838,6 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, struct dma_chan *dma_ch = nfc->dma_rx_ch; enum dma_data_direction dma_data_dir = DMA_FROM_DEVICE; enum dma_transfer_direction dma_transfer_dir = DMA_DEV_TO_MEM; - u32 csqcr = readl_relaxed(nfc->io_base + FMC2_CSQCR); int eccsteps = chip->ecc.steps; int eccsize = chip->ecc.size; unsigned long timeout = msecs_to_jiffies(FMC2_TIMEOUT_MS); @@ -948,8 +915,8 @@ static int stm32_fmc2_nfc_xfer(struct nand_chip *chip, const u8 *buf, stm32_fmc2_nfc_enable_seq_irq(nfc); /* Start the transfer */ - csqcr |= FMC2_CSQCR_CSQSTART; - writel_relaxed(csqcr, nfc->io_base + FMC2_CSQCR); + regmap_update_bits(nfc->regmap, FMC2_CSQCR, + FMC2_CSQCR_CSQSTART, FMC2_CSQCR_CSQSTART); /* Wait end of sequencer transfer */ if (!wait_for_completion_timeout(&nfc->complete, timeout)) { @@ -1042,11 +1009,13 @@ static int stm32_fmc2_nfc_seq_write_page_raw(struct nand_chip *chip, } /* Get a status indicating which sectors have errors */ -static inline u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc) +static u16 stm32_fmc2_nfc_get_mapping_status(struct stm32_fmc2_nfc *nfc) { - u32 csqemsr = readl_relaxed(nfc->io_base + FMC2_CSQEMSR); + u32 csqemsr; - return csqemsr & FMC2_CSQEMSR_SEM; + regmap_read(nfc->regmap, FMC2_CSQEMSR, &csqemsr); + + return FIELD_GET(FMC2_CSQEMSR_SEM, csqemsr); } static int stm32_fmc2_nfc_seq_correct(struct nand_chip *chip, u8 *dat, @@ -1302,22 +1271,22 @@ static int stm32_fmc2_nfc_waitrdy(struct nand_chip *chip, u32 isr, sr; /* Check if there is no pending requests to the NAND flash */ - if (readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_SR, sr, - sr & FMC2_SR_NWRF, 1, - 1000 * FMC2_TIMEOUT_MS)) + if (regmap_read_poll_timeout(nfc->regmap, FMC2_SR, sr, + sr & FMC2_SR_NWRF, 1, + 1000 * FMC2_TIMEOUT_MS)) dev_warn(nfc->dev, "Waitrdy timeout\n"); /* Wait tWB before R/B# signal is low */ - timings = nand_get_sdr_timings(&chip->data_interface); + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); ndelay(PSEC_TO_NSEC(timings->tWB_max)); /* R/B# signal is low, clear high level flag */ - writel_relaxed(FMC2_ICR_CIHLF, nfc->io_base + FMC2_ICR); + regmap_write(nfc->regmap, FMC2_ICR, FMC2_ICR_CIHLF); /* Wait R/B# signal is high */ - return readl_relaxed_poll_timeout_atomic(nfc->io_base + FMC2_ISR, - isr, isr & FMC2_ISR_IHLF, - 5, 1000 * timeout_ms); + return regmap_read_poll_timeout(nfc->regmap, FMC2_ISR, isr, + isr & FMC2_ISR_IHLF, 5, + 1000 * FMC2_TIMEOUT_MS); } static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip, @@ -1375,8 +1344,9 @@ static int stm32_fmc2_nfc_exec_op(struct nand_chip *chip, static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc) { - u32 pcr = readl_relaxed(nfc->io_base + FMC2_PCR); - u32 bcr1 = readl_relaxed(nfc->io_base + FMC2_BCR1); + u32 pcr; + + regmap_read(nfc->regmap, FMC2_PCR, &pcr); /* Set CS used to undefined */ nfc->cs_sel = -1; @@ -1407,12 +1377,13 @@ static void stm32_fmc2_nfc_init(struct stm32_fmc2_nfc *nfc) pcr |= FIELD_PREP(FMC2_PCR_TAR, FMC2_PCR_TAR_DEFAULT); /* Enable FMC2 controller */ - bcr1 |= FMC2_BCR1_FMC2EN; + if (nfc->dev == nfc->cdev) + regmap_update_bits(nfc->regmap, FMC2_BCR1, + FMC2_BCR1_FMC2EN, FMC2_BCR1_FMC2EN); - writel_relaxed(bcr1, nfc->io_base + FMC2_BCR1); - writel_relaxed(pcr, nfc->io_base + FMC2_PCR); - writel_relaxed(FMC2_PMEM_DEFAULT, nfc->io_base + FMC2_PMEM); - writel_relaxed(FMC2_PATT_DEFAULT, nfc->io_base + FMC2_PATT); + regmap_write(nfc->regmap, FMC2_PCR, pcr); + regmap_write(nfc->regmap, FMC2_PMEM, FMC2_PMEM_DEFAULT); + regmap_write(nfc->regmap, FMC2_PATT, FMC2_PATT_DEFAULT); } static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip, @@ -1546,7 +1517,7 @@ static void stm32_fmc2_nfc_calc_timings(struct nand_chip *chip, } static int stm32_fmc2_nfc_setup_interface(struct nand_chip *chip, int chipnr, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdrt; @@ -1570,7 +1541,7 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc) nfc->dma_tx_ch = dma_request_chan(nfc->dev, "tx"); if (IS_ERR(nfc->dma_tx_ch)) { ret = PTR_ERR(nfc->dma_tx_ch); - if (ret != -ENODEV) + if (ret != -ENODEV && ret != -EPROBE_DEFER) dev_err(nfc->dev, "failed to request tx DMA channel: %d\n", ret); nfc->dma_tx_ch = NULL; @@ -1580,7 +1551,7 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc) nfc->dma_rx_ch = dma_request_chan(nfc->dev, "rx"); if (IS_ERR(nfc->dma_rx_ch)) { ret = PTR_ERR(nfc->dma_rx_ch); - if (ret != -ENODEV) + if (ret != -ENODEV && ret != -EPROBE_DEFER) dev_err(nfc->dev, "failed to request rx DMA channel: %d\n", ret); nfc->dma_rx_ch = NULL; @@ -1590,7 +1561,7 @@ static int stm32_fmc2_nfc_dma_setup(struct stm32_fmc2_nfc *nfc) nfc->dma_ecc_ch = dma_request_chan(nfc->dev, "ecc"); if (IS_ERR(nfc->dma_ecc_ch)) { ret = PTR_ERR(nfc->dma_ecc_ch); - if (ret != -ENODEV) + if (ret != -ENODEV && ret != -EPROBE_DEFER) dev_err(nfc->dev, "failed to request ecc DMA channel: %d\n", ret); nfc->dma_ecc_ch = NULL; @@ -1764,7 +1735,7 @@ static int stm32_fmc2_nfc_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops stm32_fmc2_nfc_controller_ops = { .attach_chip = stm32_fmc2_nfc_attach_chip, .exec_op = stm32_fmc2_nfc_exec_op, - .setup_data_interface = stm32_fmc2_nfc_setup_interface, + .setup_interface = stm32_fmc2_nfc_setup_interface, }; static int stm32_fmc2_nfc_parse_child(struct stm32_fmc2_nfc *nfc, @@ -1838,6 +1809,33 @@ static int stm32_fmc2_nfc_parse_dt(struct stm32_fmc2_nfc *nfc) return ret; } +static int stm32_fmc2_nfc_set_cdev(struct stm32_fmc2_nfc *nfc) +{ + struct device *dev = nfc->dev; + bool ebi_found = false; + + if (dev->parent && of_device_is_compatible(dev->parent->of_node, + "st,stm32mp1-fmc2-ebi")) + ebi_found = true; + + if (of_device_is_compatible(dev->of_node, "st,stm32mp1-fmc2-nfc")) { + if (ebi_found) { + nfc->cdev = dev->parent; + + return 0; + } + + return -EINVAL; + } + + if (ebi_found) + return -EINVAL; + + nfc->cdev = dev; + + return 0; +} + static int stm32_fmc2_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1847,7 +1845,9 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev) struct resource *res; struct mtd_info *mtd; struct nand_chip *chip; + struct resource cres; int chip_cs, mem_region, ret, irq; + int start_region = 0; nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); if (!nfc) @@ -1857,18 +1857,28 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev) nand_controller_init(&nfc->base); nfc->base.ops = &stm32_fmc2_nfc_controller_ops; + ret = stm32_fmc2_nfc_set_cdev(nfc); + if (ret) + return ret; + ret = stm32_fmc2_nfc_parse_dt(nfc); if (ret) return ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - nfc->io_base = devm_ioremap_resource(dev, res); - if (IS_ERR(nfc->io_base)) - return PTR_ERR(nfc->io_base); + ret = of_address_to_resource(nfc->cdev->of_node, 0, &cres); + if (ret) + return ret; + + nfc->io_phys_addr = cres.start; + + nfc->regmap = device_node_to_regmap(nfc->cdev->of_node); + if (IS_ERR(nfc->regmap)) + return PTR_ERR(nfc->regmap); - nfc->io_phys_addr = res->start; + if (nfc->dev == nfc->cdev) + start_region = 1; - for (chip_cs = 0, mem_region = 1; chip_cs < FMC2_MAX_CE; + for (chip_cs = 0, mem_region = start_region; chip_cs < FMC2_MAX_CE; chip_cs++, mem_region += 3) { if (!(nfc->cs_assigned & BIT(chip_cs))) continue; @@ -1906,7 +1916,7 @@ static int stm32_fmc2_nfc_probe(struct platform_device *pdev) init_completion(&nfc->complete); - nfc->clk = devm_clk_get(dev, NULL); + nfc->clk = devm_clk_get(nfc->cdev, NULL); if (IS_ERR(nfc->clk)) return PTR_ERR(nfc->clk); @@ -2047,6 +2057,7 @@ static SIMPLE_DEV_PM_OPS(stm32_fmc2_nfc_pm_ops, stm32_fmc2_nfc_suspend, static const struct of_device_id stm32_fmc2_nfc_match[] = { {.compatible = "st,stm32mp15-fmc2"}, + {.compatible = "st,stm32mp1-fmc2-nfc"}, {} }; MODULE_DEVICE_TABLE(of, stm32_fmc2_nfc_match); diff --git a/drivers/mtd/nand/raw/sunxi_nand.c b/drivers/mtd/nand/raw/sunxi_nand.c index ffbc1651fadc..9c50c2b965e1 100644 --- a/drivers/mtd/nand/raw/sunxi_nand.c +++ b/drivers/mtd/nand/raw/sunxi_nand.c @@ -1376,8 +1376,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, #define sunxi_nand_lookup_timing(l, p, c) \ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) -static int sunxi_nfc_setup_data_interface(struct nand_chip *nand, int csline, - const struct nand_data_interface *conf) +static int sunxi_nfc_setup_interface(struct nand_chip *nand, int csline, + const struct nand_interface_config *conf) { struct sunxi_nand_chip *sunxi_nand = to_sunxi_nand(nand); struct sunxi_nfc *nfc = to_sunxi_nfc(sunxi_nand->nand.controller); @@ -1920,7 +1920,7 @@ static int sunxi_nfc_exec_op(struct nand_chip *nand, static const struct nand_controller_ops sunxi_nand_controller_ops = { .attach_chip = sunxi_nand_attach_chip, - .setup_data_interface = sunxi_nfc_setup_data_interface, + .setup_interface = sunxi_nfc_setup_interface, .exec_op = sunxi_nfc_exec_op, }; diff --git a/drivers/mtd/nand/raw/tango_nand.c b/drivers/mtd/nand/raw/tango_nand.c index 246871e01027..bdb965ae7a4a 100644 --- a/drivers/mtd/nand/raw/tango_nand.c +++ b/drivers/mtd/nand/raw/tango_nand.c @@ -113,59 +113,80 @@ struct tango_chip { #define TIMING(t0, t1, t2, t3) ((t0) << 24 | (t1) << 16 | (t2) << 8 | (t3)) -static void tango_cmd_ctrl(struct nand_chip *chip, int dat, unsigned int ctrl) +static void tango_select_target(struct nand_chip *chip, unsigned int cs) { + struct tango_nfc *nfc = to_tango_nfc(chip->controller); struct tango_chip *tchip = to_tango_chip(chip); - if (ctrl & NAND_CLE) - writeb_relaxed(dat, tchip->base + PBUS_CMD); - - if (ctrl & NAND_ALE) - writeb_relaxed(dat, tchip->base + PBUS_ADDR); + writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1); + writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2); + writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG); + writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG); + writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG); + writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG); } -static int tango_dev_ready(struct nand_chip *chip) +static int tango_waitrdy(struct nand_chip *chip, unsigned int timeout_ms) { struct tango_nfc *nfc = to_tango_nfc(chip->controller); + u32 status; - return readl_relaxed(nfc->pbus_base + PBUS_CS_CTRL) & PBUS_IORDY; + return readl_relaxed_poll_timeout(nfc->pbus_base + PBUS_CS_CTRL, + status, status & PBUS_IORDY, 20, + timeout_ms); } -static u8 tango_read_byte(struct nand_chip *chip) +static int tango_exec_instr(struct nand_chip *chip, + const struct nand_op_instr *instr) { struct tango_chip *tchip = to_tango_chip(chip); + unsigned int i; - return readb_relaxed(tchip->base + PBUS_DATA); -} - -static void tango_read_buf(struct nand_chip *chip, u8 *buf, int len) -{ - struct tango_chip *tchip = to_tango_chip(chip); + switch (instr->type) { + case NAND_OP_CMD_INSTR: + writeb_relaxed(instr->ctx.cmd.opcode, tchip->base + PBUS_CMD); + return 0; + case NAND_OP_ADDR_INSTR: + for (i = 0; i < instr->ctx.addr.naddrs; i++) + writeb_relaxed(instr->ctx.addr.addrs[i], + tchip->base + PBUS_ADDR); + return 0; + case NAND_OP_DATA_IN_INSTR: + ioread8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.in, + instr->ctx.data.len); + return 0; + case NAND_OP_DATA_OUT_INSTR: + iowrite8_rep(tchip->base + PBUS_DATA, instr->ctx.data.buf.out, + instr->ctx.data.len); + return 0; + case NAND_OP_WAITRDY_INSTR: + return tango_waitrdy(chip, + instr->ctx.waitrdy.timeout_ms); + default: + break; + } - ioread8_rep(tchip->base + PBUS_DATA, buf, len); + return -EINVAL; } -static void tango_write_buf(struct nand_chip *chip, const u8 *buf, int len) +static int tango_exec_op(struct nand_chip *chip, + const struct nand_operation *op, + bool check_only) { - struct tango_chip *tchip = to_tango_chip(chip); - - iowrite8_rep(tchip->base + PBUS_DATA, buf, len); -} + unsigned int i; + int ret = 0; -static void tango_select_chip(struct nand_chip *chip, int idx) -{ - struct tango_nfc *nfc = to_tango_nfc(chip->controller); - struct tango_chip *tchip = to_tango_chip(chip); + if (check_only) + return 0; - if (idx < 0) - return; /* No "chip unselect" function */ + tango_select_target(chip, op->cs); + for (i = 0; i < op->ninstrs; i++) { + ret = tango_exec_instr(chip, &op->instrs[i]); + if (ret) + break; + } - writel_relaxed(tchip->timing1, nfc->reg_base + NFC_TIMING1); - writel_relaxed(tchip->timing2, nfc->reg_base + NFC_TIMING2); - writel_relaxed(tchip->xfer_cfg, nfc->reg_base + NFC_XFER_CFG); - writel_relaxed(tchip->pkt_0_cfg, nfc->reg_base + NFC_PKT_0_CFG); - writel_relaxed(tchip->pkt_n_cfg, nfc->reg_base + NFC_PKT_N_CFG); - writel_relaxed(tchip->bb_cfg, nfc->reg_base + NFC_BB_CFG); + return ret; } /* @@ -279,6 +300,7 @@ static int tango_read_page(struct nand_chip *chip, u8 *buf, struct tango_nfc *nfc = to_tango_nfc(chip->controller); int err, res, len = mtd->writesize; + tango_select_target(chip, chip->cur_cs); if (oob_required) chip->ecc.read_oob(chip, page); @@ -300,22 +322,30 @@ static int tango_write_page(struct nand_chip *chip, const u8 *buf, { struct mtd_info *mtd = nand_to_mtd(chip); struct tango_nfc *nfc = to_tango_nfc(chip->controller); - int err, status, len = mtd->writesize; + const struct nand_sdr_timings *timings; + int err, len = mtd->writesize; + u8 status; /* Calling tango_write_oob() would send PAGEPROG twice */ if (oob_required) return -ENOTSUPP; + tango_select_target(chip, chip->cur_cs); writel_relaxed(0xffffffff, nfc->mem_base + METADATA); err = do_dma(nfc, DMA_TO_DEVICE, NFC_WRITE, buf, len, page); if (err) return err; - status = chip->legacy.waitfunc(chip); - if (status & NAND_STATUS_FAIL) - return -EIO; + timings = nand_get_sdr_timings(nand_get_interface_config(chip)); + err = tango_waitrdy(chip, PSEC_TO_MSEC(timings->tR_max)); + if (err) + return err; - return 0; + err = nand_status_op(chip, &status); + if (err) + return err; + + return (status & NAND_STATUS_FAIL) ? -EIO : 0; } static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos) @@ -326,7 +356,9 @@ static void aux_read(struct nand_chip *chip, u8 **buf, int len, int *pos) /* skip over "len" bytes */ nand_change_read_column_op(chip, *pos, NULL, 0, false); } else { - tango_read_buf(chip, *buf, len); + struct tango_chip *tchip = to_tango_chip(chip); + + ioread8_rep(tchip->base + PBUS_DATA, *buf, len); *buf += len; } } @@ -339,7 +371,9 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos) /* skip over "len" bytes */ nand_change_write_column_op(chip, *pos, NULL, 0, false); } else { - tango_write_buf(chip, *buf, len); + struct tango_chip *tchip = to_tango_chip(chip); + + iowrite8_rep(tchip->base + PBUS_DATA, *buf, len); *buf += len; } } @@ -420,6 +454,7 @@ static void raw_write(struct nand_chip *chip, const u8 *buf, const u8 *oob) static int tango_read_page_raw(struct nand_chip *chip, u8 *buf, int oob_required, int page) { + tango_select_target(chip, chip->cur_cs); nand_read_page_op(chip, page, 0, NULL, 0); raw_read(chip, buf, chip->oob_poi); return 0; @@ -428,6 +463,7 @@ static int tango_read_page_raw(struct nand_chip *chip, u8 *buf, static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf, int oob_required, int page) { + tango_select_target(chip, chip->cur_cs); nand_prog_page_begin_op(chip, page, 0, NULL, 0); raw_write(chip, buf, chip->oob_poi); return nand_prog_page_end_op(chip); @@ -435,6 +471,7 @@ static int tango_write_page_raw(struct nand_chip *chip, const u8 *buf, static int tango_read_oob(struct nand_chip *chip, int page) { + tango_select_target(chip, chip->cur_cs); nand_read_page_op(chip, page, 0, NULL, 0); raw_read(chip, NULL, chip->oob_poi); return 0; @@ -442,6 +479,7 @@ static int tango_read_oob(struct nand_chip *chip, int page) static int tango_write_oob(struct nand_chip *chip, int page) { + tango_select_target(chip, chip->cur_cs); nand_prog_page_begin_op(chip, page, 0, NULL, 0); raw_write(chip, NULL, chip->oob_poi); return nand_prog_page_end_op(chip); @@ -477,7 +515,7 @@ static u32 to_ticks(int kHz, int ps) } static int tango_set_timings(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) + const struct nand_interface_config *conf) { const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf); struct tango_nfc *nfc = to_tango_nfc(chip->controller); @@ -527,7 +565,8 @@ static int tango_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tango_controller_ops = { .attach_chip = tango_attach_chip, - .setup_data_interface = tango_set_timings, + .setup_interface = tango_set_timings, + .exec_op = tango_exec_op, }; static int chip_init(struct device *dev, struct device_node *np) @@ -562,12 +601,6 @@ static int chip_init(struct device *dev, struct device_node *np) ecc = &chip->ecc; mtd = nand_to_mtd(chip); - chip->legacy.read_byte = tango_read_byte; - chip->legacy.write_buf = tango_write_buf; - chip->legacy.read_buf = tango_read_buf; - chip->legacy.select_chip = tango_select_chip; - chip->legacy.cmd_ctrl = tango_cmd_ctrl; - chip->legacy.dev_ready = tango_dev_ready; chip->options = NAND_USES_DMA | NAND_NO_SUBPAGE_WRITE | NAND_WAIT_TCCS; diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index f9d046b2cd3b..6b6212ffa01c 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -813,8 +813,8 @@ static void tegra_nand_setup_timing(struct tegra_nand_controller *ctrl, writel_relaxed(reg, ctrl->regs + TIMING_2); } -static int tegra_nand_setup_data_interface(struct nand_chip *chip, int csline, - const struct nand_data_interface *conf) +static int tegra_nand_setup_interface(struct nand_chip *chip, int csline, + const struct nand_interface_config *conf) { struct tegra_nand_controller *ctrl = to_tegra_ctrl(chip->controller); const struct nand_sdr_timings *timings; @@ -1053,7 +1053,7 @@ static int tegra_nand_attach_chip(struct nand_chip *chip) static const struct nand_controller_ops tegra_nand_controller_ops = { .attach_chip = &tegra_nand_attach_chip, .exec_op = tegra_nand_exec_op, - .setup_data_interface = tegra_nand_setup_data_interface, + .setup_interface = tegra_nand_setup_interface, }; static int tegra_nand_chips_init(struct device *dev, diff --git a/drivers/mtd/parsers/bcm63xxpart.c b/drivers/mtd/parsers/bcm63xxpart.c index 78f90c6c18fd..b15bdadaedb5 100644 --- a/drivers/mtd/parsers/bcm63xxpart.c +++ b/drivers/mtd/parsers/bcm63xxpart.c @@ -22,6 +22,11 @@ #include <linux/mtd/partitions.h> #include <linux/of.h> +#ifdef CONFIG_MIPS +#include <asm/bootinfo.h> +#include <asm/fw/cfe/cfe_api.h> +#endif /* CONFIG_MIPS */ + #define BCM963XX_CFE_BLOCK_SIZE SZ_64K /* always at least 64KiB */ #define BCM963XX_CFE_MAGIC_OFFSET 0x4e0 @@ -32,28 +37,15 @@ #define STR_NULL_TERMINATE(x) \ do { char *_str = (x); _str[sizeof(x) - 1] = 0; } while (0) -static int bcm63xx_detect_cfe(struct mtd_info *master) +static inline int bcm63xx_detect_cfe(void) { - char buf[9]; - int ret; - size_t retlen; + int ret = 0; - ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen, - (void *)buf); - buf[retlen] = 0; +#ifdef CONFIG_MIPS + ret = (fw_arg3 == CFE_EPTSEAL); +#endif /* CONFIG_MIPS */ - if (ret) - return ret; - - if (strncmp("cfe-v", buf, 5) == 0) - return 0; - - /* very old CFE's do not have the cfe-v string, so check for magic */ - ret = mtd_read(master, BCM963XX_CFE_MAGIC_OFFSET, 8, &retlen, - (void *)buf); - buf[retlen] = 0; - - return strncmp("CFE1CFE1", buf, 8); + return ret; } static int bcm63xx_read_nvram(struct mtd_info *master, @@ -138,7 +130,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master, struct bcm963xx_nvram *nvram = NULL; int ret; - if (bcm63xx_detect_cfe(master)) + if (!bcm63xx_detect_cfe()) return -EINVAL; nvram = vzalloc(sizeof(*nvram)); diff --git a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c index 81329f680bec..c72aa1ab71ad 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi-pci.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi-pci.c @@ -68,7 +68,9 @@ static const struct pci_device_id intel_spi_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x06a4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x18e0), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x19e0), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x1bca), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x34a4), (unsigned long)&bxt_info }, + { PCI_VDEVICE(INTEL, 0x43a4), (unsigned long)&cnl_info }, { PCI_VDEVICE(INTEL, 0x4b24), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0x4da4), (unsigned long)&bxt_info }, { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&bxt_info }, diff --git a/drivers/mtd/spi-nor/controllers/intel-spi.c b/drivers/mtd/spi-nor/controllers/intel-spi.c index 61d2a0ad2131..b54a56a68100 100644 --- a/drivers/mtd/spi-nor/controllers/intel-spi.c +++ b/drivers/mtd/spi-nor/controllers/intel-spi.c @@ -292,7 +292,7 @@ static int intel_spi_wait_hw_busy(struct intel_spi *ispi) u32 val; return readl_poll_timeout(ispi->base + HSFSTS_CTL, val, - !(val & HSFSTS_CTL_SCIP), 40, + !(val & HSFSTS_CTL_SCIP), 0, INTEL_SPI_TIMEOUT * 1000); } @@ -301,7 +301,7 @@ static int intel_spi_wait_sw_busy(struct intel_spi *ispi) u32 val; return readl_poll_timeout(ispi->sregs + SSFSTS_CTL, val, - !(val & SSFSTS_CTL_SCIP), 40, + !(val & SSFSTS_CTL_SCIP), 0, INTEL_SPI_TIMEOUT * 1000); } @@ -612,6 +612,15 @@ static int intel_spi_write_reg(struct spi_nor *nor, u8 opcode, const u8 *buf, return 0; } + /* + * We hope that HW sequencer will do the right thing automatically and + * with the SW sequencer we cannot use preopcode anyway, so just ignore + * the Write Disable operation and pretend it was completed + * successfully. + */ + if (opcode == SPINOR_OP_WRDI) + return 0; + writel(0, ispi->base + FADDR); /* Write the value beforehand */ diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index 0369d98b2d12..65eff4ce6ab1 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -1907,15 +1907,16 @@ static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) } /** - * spi_nor_sr1_bit6_quad_enable() - Set the Quad Enable BIT(6) in the Status - * Register 1. + * spi_nor_sr1_bit6_quad_enable() - Set/Unset the Quad Enable BIT(6) in the + * Status Register 1. * @nor: pointer to a 'struct spi_nor' + * @enable: true to enable Quad mode, false to disable Quad mode. * * Bit 6 of the Status Register 1 is the QE bit for Macronix like QSPI memories. * * Return: 0 on success, -errno otherwise. */ -int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) +int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable) { int ret; @@ -1923,45 +1924,56 @@ int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor) if (ret) return ret; - if (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6) + if ((enable && (nor->bouncebuf[0] & SR1_QUAD_EN_BIT6)) || + (!enable && !(nor->bouncebuf[0] & SR1_QUAD_EN_BIT6))) return 0; - nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; + if (enable) + nor->bouncebuf[0] |= SR1_QUAD_EN_BIT6; + else + nor->bouncebuf[0] &= ~SR1_QUAD_EN_BIT6; return spi_nor_write_sr1_and_check(nor, nor->bouncebuf[0]); } /** - * spi_nor_sr2_bit1_quad_enable() - set the Quad Enable BIT(1) in the Status - * Register 2. + * spi_nor_sr2_bit1_quad_enable() - set/unset the Quad Enable BIT(1) in the + * Status Register 2. * @nor: pointer to a 'struct spi_nor'. + * @enable: true to enable Quad mode, false to disable Quad mode. * * Bit 1 of the Status Register 2 is the QE bit for Spansion like QSPI memories. * * Return: 0 on success, -errno otherwise. */ -int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) +int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable) { int ret; if (nor->flags & SNOR_F_NO_READ_CR) - return spi_nor_write_16bit_cr_and_check(nor, SR2_QUAD_EN_BIT1); + return spi_nor_write_16bit_cr_and_check(nor, + enable ? SR2_QUAD_EN_BIT1 : 0); ret = spi_nor_read_cr(nor, nor->bouncebuf); if (ret) return ret; - if (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1) + if ((enable && (nor->bouncebuf[0] & SR2_QUAD_EN_BIT1)) || + (!enable && !(nor->bouncebuf[0] & SR2_QUAD_EN_BIT1))) return 0; - nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; + if (enable) + nor->bouncebuf[0] |= SR2_QUAD_EN_BIT1; + else + nor->bouncebuf[0] &= ~SR2_QUAD_EN_BIT1; return spi_nor_write_16bit_cr_and_check(nor, nor->bouncebuf[0]); } /** - * spi_nor_sr2_bit7_quad_enable() - set QE bit in Status Register 2. + * spi_nor_sr2_bit7_quad_enable() - set/unset QE bit in Status Register 2. * @nor: pointer to a 'struct spi_nor' + * @enable: true to enable Quad mode, false to disable Quad mode. * * Set the Quad Enable (QE) bit in the Status Register 2. * @@ -1971,7 +1983,7 @@ int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor) * * Return: 0 on success, -errno otherwise. */ -int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) +int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable) { u8 *sr2 = nor->bouncebuf; int ret; @@ -1981,11 +1993,15 @@ int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor) ret = spi_nor_read_sr2(nor, sr2); if (ret) return ret; - if (*sr2 & SR2_QUAD_EN_BIT7) + if ((enable && (*sr2 & SR2_QUAD_EN_BIT7)) || + (!enable && !(*sr2 & SR2_QUAD_EN_BIT7))) return 0; /* Update the Quad Enable bit. */ - *sr2 |= SR2_QUAD_EN_BIT7; + if (enable) + *sr2 |= SR2_QUAD_EN_BIT7; + else + *sr2 &= ~SR2_QUAD_EN_BIT7; ret = spi_nor_write_sr2(nor, sr2); if (ret) @@ -2898,12 +2914,13 @@ static int spi_nor_init_params(struct spi_nor *nor) } /** - * spi_nor_quad_enable() - enable Quad I/O if needed. + * spi_nor_quad_enable() - enable/disable Quad I/O if needed. * @nor: pointer to a 'struct spi_nor' + * @enable: true to enable Quad mode. false to disable Quad mode. * * Return: 0 on success, -errno otherwise. */ -static int spi_nor_quad_enable(struct spi_nor *nor) +static int spi_nor_quad_enable(struct spi_nor *nor, bool enable) { if (!nor->params->quad_enable) return 0; @@ -2912,7 +2929,7 @@ static int spi_nor_quad_enable(struct spi_nor *nor) spi_nor_get_protocol_width(nor->write_proto) == 4)) return 0; - return nor->params->quad_enable(nor); + return nor->params->quad_enable(nor, enable); } /** @@ -2936,7 +2953,7 @@ static int spi_nor_init(struct spi_nor *nor) { int err; - err = spi_nor_quad_enable(nor); + err = spi_nor_quad_enable(nor, true); if (err) { dev_dbg(nor->dev, "quad mode not supported\n"); return err; @@ -2983,6 +3000,8 @@ void spi_nor_restore(struct spi_nor *nor) if (nor->addr_width == 4 && !(nor->flags & SNOR_F_4B_OPCODES) && nor->flags & SNOR_F_BROKEN_RESET) nor->params->set_4byte_addr_mode(nor, false); + + spi_nor_quad_enable(nor, false); } EXPORT_SYMBOL_GPL(spi_nor_restore); diff --git a/drivers/mtd/spi-nor/core.h b/drivers/mtd/spi-nor/core.h index 6f2f6b27173f..95aa32f3ceb1 100644 --- a/drivers/mtd/spi-nor/core.h +++ b/drivers/mtd/spi-nor/core.h @@ -198,7 +198,7 @@ struct spi_nor_locking_ops { * higher index in the array, the higher priority. * @erase_map: the erase map parsed from the SFDP Sector Map Parameter * Table. - * @quad_enable: enables SPI NOR quad mode. + * @quad_enable: enables/disables SPI NOR Quad mode. * @set_4byte_addr_mode: puts the SPI NOR in 4 byte addressing mode. * @convert_addr: converts an absolute address into something the flash * will understand. Particularly useful when pagesize is @@ -219,7 +219,7 @@ struct spi_nor_flash_parameter { struct spi_nor_erase_map erase_map; - int (*quad_enable)(struct spi_nor *nor); + int (*quad_enable)(struct spi_nor *nor, bool enable); int (*set_4byte_addr_mode)(struct spi_nor *nor, bool enable); u32 (*convert_addr)(struct spi_nor *nor, u32 addr); int (*setup)(struct spi_nor *nor, const struct spi_nor_hwcaps *hwcaps); @@ -406,9 +406,9 @@ int spi_nor_write_ear(struct spi_nor *nor, u8 ear); int spi_nor_wait_till_ready(struct spi_nor *nor); int spi_nor_lock_and_prep(struct spi_nor *nor); void spi_nor_unlock_and_unprep(struct spi_nor *nor); -int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor); -int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor); -int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor); +int spi_nor_sr1_bit6_quad_enable(struct spi_nor *nor, bool enable); +int spi_nor_sr2_bit1_quad_enable(struct spi_nor *nor, bool enable); +int spi_nor_sr2_bit7_quad_enable(struct spi_nor *nor, bool enable); int spi_nor_xread_sr(struct spi_nor *nor, u8 *sr); ssize_t spi_nor_read_data(struct spi_nor *nor, loff_t from, size_t len, diff --git a/drivers/mtd/spi-nor/macronix.c b/drivers/mtd/spi-nor/macronix.c index 96735d83c77c..f97f3d127575 100644 --- a/drivers/mtd/spi-nor/macronix.c +++ b/drivers/mtd/spi-nor/macronix.c @@ -52,6 +52,9 @@ static const struct flash_info macronix_parts[] = { { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, + { "mx25r1635f", INFO(0xc22815, 0, 64 * 1024, 32, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ) }, { "mx25r3235f", INFO(0xc22816, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, @@ -84,6 +87,9 @@ static const struct flash_info macronix_parts[] = { SPI_NOR_QUAD_READ) }, { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, + { "mx66u2g45g", INFO(0xc2253c, 0, 64 * 1024, 4096, + SECT_4K | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, }; static void macronix_default_init(struct spi_nor *nor) diff --git a/drivers/mtd/spi-nor/micron-st.c b/drivers/mtd/spi-nor/micron-st.c index 3dca5b9af3b6..ef3695080710 100644 --- a/drivers/mtd/spi-nor/micron-st.c +++ b/drivers/mtd/spi-nor/micron-st.c @@ -71,8 +71,8 @@ static const struct flash_info st_parts[] = { SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, { "mt25qu02g", INFO(0x20bb22, 0, 64 * 1024, 4096, - SECT_4K | USE_FSR | SPI_NOR_QUAD_READ | - NO_CHIP_ERASE) }, + SECT_4K | USE_FSR | SPI_NOR_DUAL_READ | + SPI_NOR_QUAD_READ | NO_CHIP_ERASE) }, { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, diff --git a/drivers/mtd/spi-nor/sfdp.c b/drivers/mtd/spi-nor/sfdp.c index 55c0c508464b..e2a43d39eb5f 100644 --- a/drivers/mtd/spi-nor/sfdp.c +++ b/drivers/mtd/spi-nor/sfdp.c @@ -598,7 +598,8 @@ static int spi_nor_parse_bfpt(struct spi_nor *nor, break; default: - return -EINVAL; + dev_dbg(nor->dev, "BFPT QER reserved value used\n"); + break; } /* Stop here if not JESD216 rev C or later. */ diff --git a/drivers/mtd/spi-nor/spansion.c b/drivers/mtd/spi-nor/spansion.c index e550cd5c9d3a..8429b4af999a 100644 --- a/drivers/mtd/spi-nor/spansion.c +++ b/drivers/mtd/spi-nor/spansion.c @@ -64,7 +64,6 @@ static const struct flash_info spansion_parts[] = { { "s25fs512s", INFO6(0x010220, 0x4d0081, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | USE_CLSR) .fixups = &s25fs_s_fixups, }, - { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, @@ -84,7 +83,8 @@ static const struct flash_info spansion_parts[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, - { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, + { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25fl116k", INFO(0x014015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) }, diff --git a/drivers/mtd/spi-nor/winbond.c b/drivers/mtd/spi-nor/winbond.c index 5062af10f138..6dcde15fb1aa 100644 --- a/drivers/mtd/spi-nor/winbond.c +++ b/drivers/mtd/spi-nor/winbond.c @@ -64,10 +64,12 @@ static const struct flash_info winbond_parts[] = { SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, - { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, + { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, + SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, + { "w25q64jvm", INFO(0xef7017, 0, 64 * 1024, 128, SECT_4K) }, { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_HAS_LOCK | SPI_NOR_HAS_TB) }, diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 83afc00e365a..28f55f9cf715 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -381,6 +381,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi) ubi->fm_anchor = NULL; } + if (ubi->fm_next_anchor) { + return_unused_peb(ubi, ubi->fm_next_anchor); + ubi->fm_next_anchor = NULL; + } + if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 27636063ed1b..42cac572f82d 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1086,7 +1086,8 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) if (!err) { spin_lock(&ubi->wl_lock); - if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) { + if (!ubi->fm_disabled && !ubi->fm_next_anchor && + e->pnum < UBI_FM_MAX_START) { /* Abort anchor production, if needed it will be * enabled again in the wear leveling started below. */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 9162856de1b1..e972138a14ad 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -1728,7 +1728,7 @@ static int hclgevf_reset_wait(struct hclgevf_dev *hdev) /* hardware completion status should be available by this time */ if (ret) { dev_err(&hdev->pdev->dev, - "could'nt get reset done status from h/w, timeout!\n"); + "couldn't get reset done status from h/w, timeout!\n"); return ret; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index c6adc776f3c8..16bda7381ba0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -334,19 +334,14 @@ void hinic_devlink_unregister(struct hinic_devlink_priv *priv) static int chip_fault_show(struct devlink_fmsg *fmsg, struct hinic_fault_event *event) { - char fault_level[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { - "fatal", "reset", "flr", "general", "suggestion"}; - char level_str[FAULT_SHOW_STR_LEN + 1] = {0}; - u8 level; + const char * const level_str[FAULT_LEVEL_MAX + 1] = { + "fatal", "reset", "flr", "general", "suggestion", "Unknown"}; + u8 fault_level; int err; - level = event->event.chip.err_level; - if (level < FAULT_LEVEL_MAX) - strncpy(level_str, fault_level[level], strlen(fault_level[level])); - else - strncpy(level_str, "Unknown", strlen("Unknown")); - - if (level == FAULT_LEVEL_SERIOUS_FLR) { + fault_level = (event->event.chip.err_level < FAULT_LEVEL_MAX) ? + event->event.chip.err_level : FAULT_LEVEL_MAX; + if (fault_level == FAULT_LEVEL_SERIOUS_FLR) { err = devlink_fmsg_u32_pair_put(fmsg, "Function level err func_id", (u32)event->event.chip.func_id); if (err) @@ -361,7 +356,7 @@ static int chip_fault_show(struct devlink_fmsg *fmsg, if (err) return err; - err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str); + err = devlink_fmsg_string_pair_put(fmsg, "err_level", level_str[fault_level]); if (err) return err; @@ -381,18 +376,15 @@ static int chip_fault_show(struct devlink_fmsg *fmsg, static int fault_report_show(struct devlink_fmsg *fmsg, struct hinic_fault_event *event) { - char fault_type[FAULT_TYPE_MAX][FAULT_SHOW_STR_LEN + 1] = { + const char * const type_str[FAULT_TYPE_MAX + 1] = { "chip", "ucode", "mem rd timeout", "mem wr timeout", - "reg rd timeout", "reg wr timeout", "phy fault"}; - char type_str[FAULT_SHOW_STR_LEN + 1] = {0}; + "reg rd timeout", "reg wr timeout", "phy fault", "Unknown"}; + u8 fault_type; int err; - if (event->type < FAULT_TYPE_MAX) - strncpy(type_str, fault_type[event->type], strlen(fault_type[event->type])); - else - strncpy(type_str, "Unknown", strlen("Unknown")); + fault_type = (event->type < FAULT_TYPE_MAX) ? event->type : FAULT_TYPE_MAX; - err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str); + err = devlink_fmsg_string_pair_put(fmsg, "Fault type", type_str[fault_type]); if (err) return err; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index dc6e645f2689..701eb81e09a7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -504,8 +504,6 @@ enum hinic_fault_type { FAULT_TYPE_MAX, }; -#define FAULT_SHOW_STR_LEN 16 - enum hinic_fault_err_level { FAULT_LEVEL_FATAL, FAULT_LEVEL_SERIOUS_RESET, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index d8315811cbdf..b5399357a667 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -15585,7 +15585,7 @@ unmap: * remediation. **/ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, - enum pci_channel_state error) + pci_channel_state_t error) { struct i40e_pf *pf = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 8437d72795b0..4634b48949bb 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -4593,7 +4593,7 @@ static int __maybe_unused ice_resume(struct device *dev) * is in progress. Allows the driver to gracefully prepare/handle PCI errors. */ static pci_ers_result_t -ice_pci_err_detected(struct pci_dev *pdev, enum pci_channel_state err) +ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err) { struct ice_pf *pf = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c index 46829cfd54df..048351cf0e4a 100644 --- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c +++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c @@ -79,7 +79,7 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, static void ixgb_restore_vlan(struct ixgb_adapter *adapter); static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev, - enum pci_channel_state state); + pci_channel_state_t state); static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev); static void ixgb_io_resume (struct pci_dev *pdev); @@ -2190,7 +2190,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter) * a PCI bus error is detected. */ static pci_ers_result_t ixgb_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { struct net_device *netdev = pci_get_drvdata(pdev); struct ixgb_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 1944bf5264db..26988ad7ec97 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -412,7 +412,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, new->flags = flags; - new->q.info = devm_kzalloc(dev, sizeof(*new->q.info) * num_descs, + new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info), GFP_KERNEL); if (!new->q.info) { netdev_err(lif->netdev, "Cannot allocate queue info\n"); @@ -462,7 +462,7 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED; } - new->cq.info = devm_kzalloc(dev, sizeof(*new->cq.info) * num_descs, + new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info), GFP_KERNEL); if (!new->cq.info) { netdev_err(lif->netdev, "Cannot allocate completion queue info\n"); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 20b1b43a0e39..1166b98d8bb2 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -474,13 +474,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev, ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); if (ret) - return ret; + goto disable_clk_axi; ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); if (ret) - return ret; + goto disable_clk_cfg_ahb; + + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + if (ret) + goto disable_clk_cfg_ahb; - return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); + return 0; + +disable_clk_cfg_ahb: + clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]); +disable_clk_axi: + clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]); + + return ret; } /* Enable clocks; needs emac_clks_phase1_init to be called before */ diff --git a/drivers/net/ethernet/rocker/rocker_hw.h b/drivers/net/ethernet/rocker/rocker_hw.h index 59f1f8b690d2..62fd84cf3435 100644 --- a/drivers/net/ethernet/rocker/rocker_hw.h +++ b/drivers/net/ethernet/rocker/rocker_hw.h @@ -25,7 +25,6 @@ enum { #define ROCKER_FP_PORTS_MAX 62 -#define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_ROCKER 0x0006 #define ROCKER_PCI_BAR0_SIZE 0x2000 diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c index 36598d0542ed..206d70f9d95b 100644 --- a/drivers/net/ethernet/sfc/ef100_nic.c +++ b/drivers/net/ethernet/sfc/ef100_nic.c @@ -979,7 +979,8 @@ static int ef100_process_design_param(struct efx_nic *efx, * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY. * This is very unlikely to fail. */ - if (EFX_MIN_DMAQ_SIZE % reader->value) { + if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE || + EFX_MIN_DMAQ_SIZE % (u32)reader->value) { netif_err(efx, probe, efx->net_dev, "%s size granularity is %llu, can't guarantee safety\n", reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ", diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 26b05ec4e712..dfc6032e75f4 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -1229,7 +1229,7 @@ void efx_fini_mcdi_logging(struct efx_nic *efx) * Stop the software path and request a slot reset. */ static pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; struct efx_nic *efx = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 42bcd34fc508..f8979991970e 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -3118,7 +3118,7 @@ static const struct dev_pm_ops ef4_pm_ops = { * Stop the software path and request a slot reset. */ static pci_ers_result_t ef4_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED; struct ef4_nic *efx = pci_get_drvdata(pdev); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 02102c781a8c..bf3250e0e59c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -351,6 +351,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) plat_dat->has_gmac = true; plat_dat->bsp_priv = gmac; plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed; + plat_dat->multicast_filter_bins = 0; err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (err) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index efc6ec1b8027..fc8759f146c7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -164,6 +164,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF; } else if (dev->flags & IFF_ALLMULTI) { value = GMAC_FRAME_FILTER_PM; /* pass all multi */ + } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) { + /* Fall back to all multicast if we've no filter */ + value = GMAC_FRAME_FILTER_PM; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h index b10a85392952..55115cfb2972 100644 --- a/drivers/net/ipa/ipa.h +++ b/drivers/net/ipa/ipa.h @@ -10,6 +10,7 @@ #include <linux/device.h> #include <linux/notifier.h> #include <linux/pm_wakeup.h> +#include <linux/notifier.h> #include "ipa_version.h" #include "gsi.h" @@ -73,6 +74,8 @@ struct ipa { enum ipa_version version; struct platform_device *pdev; struct rproc *modem_rproc; + struct notifier_block nb; + void *notifier; struct ipa_smp2p *smp2p; struct ipa_clock *clock; atomic_t suspend_ref; diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c index ed10818dd99f..e34fe2d77324 100644 --- a/drivers/net/ipa/ipa_modem.c +++ b/drivers/net/ipa/ipa_modem.c @@ -9,7 +9,7 @@ #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/if_rmnet.h> -#include <linux/remoteproc/qcom_q6v5_ipa_notify.h> +#include <linux/remoteproc/qcom_rproc.h> #include "ipa.h" #include "ipa_data.h" @@ -311,43 +311,40 @@ static void ipa_modem_crashed(struct ipa *ipa) dev_err(dev, "error %d zeroing modem memory regions\n", ret); } -static void ipa_modem_notify(void *data, enum qcom_rproc_event event) +static int ipa_modem_notify(struct notifier_block *nb, unsigned long action, + void *data) { - struct ipa *ipa = data; - struct device *dev; + struct ipa *ipa = container_of(nb, struct ipa, nb); + struct qcom_ssr_notify_data *notify_data = data; + struct device *dev = &ipa->pdev->dev; - dev = &ipa->pdev->dev; - switch (event) { - case MODEM_STARTING: + switch (action) { + case QCOM_SSR_BEFORE_POWERUP: dev_info(dev, "received modem starting event\n"); ipa_smp2p_notify_reset(ipa); break; - case MODEM_RUNNING: + case QCOM_SSR_AFTER_POWERUP: dev_info(dev, "received modem running event\n"); break; - case MODEM_STOPPING: - case MODEM_CRASHED: + case QCOM_SSR_BEFORE_SHUTDOWN: dev_info(dev, "received modem %s event\n", - event == MODEM_STOPPING ? "stopping" - : "crashed"); + notify_data->crashed ? "crashed" : "stopping"); if (ipa->setup_complete) ipa_modem_crashed(ipa); break; - case MODEM_OFFLINE: + case QCOM_SSR_AFTER_SHUTDOWN: dev_info(dev, "received modem offline event\n"); break; - case MODEM_REMOVING: - dev_info(dev, "received modem stopping event\n"); - break; - default: - dev_err(&ipa->pdev->dev, "unrecognized event %u\n", event); + dev_err(dev, "received unrecognized event %lu\n", action); break; } + + return NOTIFY_OK; } int ipa_modem_init(struct ipa *ipa, bool modem_init) @@ -362,13 +359,30 @@ void ipa_modem_exit(struct ipa *ipa) int ipa_modem_config(struct ipa *ipa) { - return qcom_register_ipa_notify(ipa->modem_rproc, ipa_modem_notify, - ipa); + void *notifier; + + ipa->nb.notifier_call = ipa_modem_notify; + + notifier = qcom_register_ssr_notifier("mpss", &ipa->nb); + if (IS_ERR(notifier)) + return PTR_ERR(notifier); + + ipa->notifier = notifier; + + return 0; } void ipa_modem_deconfig(struct ipa *ipa) { - qcom_deregister_ipa_notify(ipa->modem_rproc); + struct device *dev = &ipa->pdev->dev; + int ret; + + ret = qcom_unregister_ssr_notifier(ipa->notifier, &ipa->nb); + if (ret) + dev_err(dev, "error %d unregistering notifier", ret); + + ipa->notifier = NULL; + memset(&ipa->nb, 0, sizeof(ipa->nb)); } int ipa_modem_setup(struct ipa *ipa) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index a7610eb55f30..1901ba277413 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -208,13 +208,6 @@ static int mv3310_hwmon_config(struct phy_device *phydev, bool enable) MV_V2_TEMP_CTRL_MASK, val); } -static void mv3310_hwmon_disable(void *data) -{ - struct phy_device *phydev = data; - - mv3310_hwmon_config(phydev, false); -} - static int mv3310_hwmon_probe(struct phy_device *phydev) { struct device *dev = &phydev->mdio.dev; @@ -238,10 +231,6 @@ static int mv3310_hwmon_probe(struct phy_device *phydev) if (ret) return ret; - ret = devm_add_action_or_reset(dev, mv3310_hwmon_disable, phydev); - if (ret) - return ret; - priv->hwmon_dev = devm_hwmon_device_register_with_info(dev, priv->hwmon_name, phydev, &mv3310_hwmon_chip_info, NULL); @@ -426,6 +415,11 @@ static int mv3310_probe(struct phy_device *phydev) return phy_sfp_probe(phydev, &mv3310_sfp_ops); } +static void mv3310_remove(struct phy_device *phydev) +{ + mv3310_hwmon_config(phydev, false); +} + static int mv3310_suspend(struct phy_device *phydev) { return mv3310_power_down(phydev); @@ -784,6 +778,7 @@ static struct phy_driver mv3310_drivers[] = { .read_status = mv3310_read_status, .get_tunable = mv3310_get_tunable, .set_tunable = mv3310_set_tunable, + .remove = mv3310_remove, }, { .phy_id = MARVELL_PHY_ID_88E2110, @@ -798,6 +793,7 @@ static struct phy_driver mv3310_drivers[] = { .read_status = mv3310_read_status, .get_tunable = mv3310_get_tunable, .set_tunable = mv3310_set_tunable, + .remove = mv3310_remove, }, }; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 1b9523595839..57d44648c8dd 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -615,7 +615,9 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, if (c45_ids) dev->c45_ids = *c45_ids; dev->irq = bus->irq[addr]; + dev_set_name(&mdiodev->dev, PHY_ID_FMT, bus->id, addr); + device_initialize(&mdiodev->dev); dev->state = PHY_DOWN; @@ -649,10 +651,8 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id, ret = phy_request_driver_module(dev, phy_id); } - if (!ret) { - device_initialize(&mdiodev->dev); - } else { - kfree(dev); + if (ret) { + put_device(&mdiodev->dev); dev = ERR_PTR(ret); } diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 7d39f998535d..2b02fefd094d 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1504,7 +1504,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa) sa->sa_family = dev->type; - ret = eth_platform_get_mac_address(&dev->dev, sa->sa_data); + ret = eth_platform_get_mac_address(&tp->udev->dev, sa->sa_data); if (ret < 0) { if (tp->version == RTL_VER_01) { ret = pla_ocp_read(tp, PLA_IDR, 8, sa->sa_data); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6fa8fe5ef160..0ada48edf749 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2264,12 +2264,13 @@ static void virtnet_update_settings(struct virtnet_info *vi) if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX)) return; - speed = virtio_cread32(vi->vdev, offsetof(struct virtio_net_config, - speed)); + virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed); + if (ethtool_validate_speed(speed)) vi->speed = speed; - duplex = virtio_cread8(vi->vdev, offsetof(struct virtio_net_config, - duplex)); + + virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex); + if (ethtool_validate_duplex(duplex)) vi->duplex = duplex; } diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index ca395f9679d0..2818015324b8 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -886,7 +886,8 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, switch (protocol) { case IPPROTO_TCP: - ctx->l4_hdr_size = tcp_hdrlen(skb); + ctx->l4_hdr_size = skb->encapsulation ? inner_tcp_hdrlen(skb) : + tcp_hdrlen(skb); break; case IPPROTO_UDP: ctx->l4_hdr_size = sizeof(struct udphdr); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index b2868433718f..1ea15f2123ed 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -157,6 +157,12 @@ static netdev_tx_t lapbeth_xmit(struct sk_buff *skb, if (!netif_running(dev)) goto drop; + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) + goto drop; + switch (skb->data[0]) { case X25_IFACE_DATA: break; @@ -305,6 +311,7 @@ static void lapbeth_setup(struct net_device *dev) dev->netdev_ops = &lapbeth_netdev_ops; dev->needs_free_netdev = true; dev->type = ARPHRD_X25; + dev->hard_header_len = 0; dev->mtu = 1000; dev->addr_len = 0; } @@ -331,7 +338,8 @@ static int lapbeth_new_device(struct net_device *dev) * then this driver prepends a length field of 2 bytes, * then the underlying Ethernet device prepends its own header. */ - ndev->hard_header_len = -1 + 3 + 2 + dev->hard_header_len; + ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len + + dev->needed_headroom; lapbeth = netdev_priv(ndev); lapbeth->axdev = ndev; diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 84640a0c13f3..de7984463595 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c @@ -307,6 +307,14 @@ static netdev_tx_t x25_asy_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } + /* There should be a pseudo header of 1 byte added by upper layers. + * Check to make sure it is there before reading it. + */ + if (skb->len < 1) { + kfree_skb(skb); + return NETDEV_TX_OK; + } + switch (skb->data[0]) { case X25_IFACE_DATA: break; @@ -752,6 +760,12 @@ static void x25_asy_setup(struct net_device *dev) dev->type = ARPHRD_X25; dev->tx_queue_len = 10; + /* When transmitting data: + * first this driver removes a pseudo header of 1 byte, + * then the lapb module prepends an LAPB header of at most 3 bytes. + */ + dev->needed_headroom = 3 - 1; + /* New-style flags. */ dev->flags = IFF_NOARP; } diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h index 7948a2da195a..2ff00800d45b 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/rtl8180.h @@ -150,17 +150,17 @@ void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data); void rtl8180_set_anaparam(struct rtl8180_priv *priv, u32 anaparam); void rtl8180_set_anaparam2(struct rtl8180_priv *priv, u32 anaparam2); -static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, u8 __iomem *addr) +static inline u8 rtl818x_ioread8(struct rtl8180_priv *priv, const u8 __iomem *addr) { return ioread8(addr); } -static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, __le16 __iomem *addr) +static inline u16 rtl818x_ioread16(struct rtl8180_priv *priv, const __le16 __iomem *addr) { return ioread16(addr); } -static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, __le32 __iomem *addr) +static inline u32 rtl818x_ioread32(struct rtl8180_priv *priv, const __le32 __iomem *addr) { return ioread32(addr); } diff --git a/drivers/ntb/hw/intel/ntb_hw_gen1.c b/drivers/ntb/hw/intel/ntb_hw_gen1.c index 423f9b8fbbcf..3185efeab487 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen1.c +++ b/drivers/ntb/hw/intel/ntb_hw_gen1.c @@ -1205,7 +1205,7 @@ int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, ndev->peer_reg->spad); } -static u64 xeon_db_ioread(void __iomem *mmio) +static u64 xeon_db_ioread(const void __iomem *mmio) { return (u64)ioread16(mmio); } diff --git a/drivers/ntb/hw/intel/ntb_hw_gen3.h b/drivers/ntb/hw/intel/ntb_hw_gen3.h index 2bc5d8356045..dea93989942d 100644 --- a/drivers/ntb/hw/intel/ntb_hw_gen3.h +++ b/drivers/ntb/hw/intel/ntb_hw_gen3.h @@ -91,7 +91,7 @@ #define GEN3_DB_TOTAL_SHIFT 33 #define GEN3_SPAD_COUNT 16 -static inline u64 gen3_db_ioread(void __iomem *mmio) +static inline u64 gen3_db_ioread(const void __iomem *mmio) { return ioread64(mmio); } diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h index d61fcd91714b..05e2335c9596 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.h +++ b/drivers/ntb/hw/intel/ntb_hw_intel.h @@ -103,7 +103,7 @@ struct intel_ntb_dev; struct intel_ntb_reg { int (*poll_link)(struct intel_ntb_dev *ndev); int (*link_is_up)(struct intel_ntb_dev *ndev); - u64 (*db_ioread)(void __iomem *mmio); + u64 (*db_ioread)(const void __iomem *mmio); void (*db_iowrite)(u64 db_bits, void __iomem *mmio); unsigned long ntb_ctl; resource_size_t db_size; diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 412d21d8f643..0ff610e728ff 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1490,10 +1490,8 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector, { struct btt *btt = bdev->bd_disk->private_data; int rc; - unsigned int len; - len = hpage_nr_pages(page) * PAGE_SIZE; - rc = btt_do_bvec(btt, NULL, page, len, 0, op, sector); + rc = btt_do_bvec(btt, NULL, page, thp_size(page), 0, op, sector); if (rc == 0) page_endio(page, op_is_write(op), 0); diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index 09087c38fabd..955265656b96 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -1037,9 +1037,25 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, dimm_name = "bus"; } + /* Validate command family support against bus declared support */ if (cmd == ND_CMD_CALL) { + unsigned long *mask; + if (copy_from_user(&pkg, p, sizeof(pkg))) return -EFAULT; + + if (nvdimm) { + if (pkg.nd_family > NVDIMM_FAMILY_MAX) + return -EINVAL; + mask = &nd_desc->dimm_family_mask; + } else { + if (pkg.nd_family > NVDIMM_BUS_FAMILY_MAX) + return -EINVAL; + mask = &nd_desc->bus_family_mask; + } + + if (!test_bit(pkg.nd_family, mask)) + return -EINVAL; } if (!desc || diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index fe9bd6febdd2..c21ba0602029 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -4,6 +4,7 @@ */ #include <linux/libnvdimm.h> #include <linux/badblocks.h> +#include <linux/suspend.h> #include <linux/export.h> #include <linux/module.h> #include <linux/blkdev.h> @@ -389,8 +390,156 @@ static const struct attribute_group nvdimm_bus_attribute_group = { .attrs = nvdimm_bus_attributes, }; +static ssize_t capability_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + switch (cap) { + case NVDIMM_FWA_CAP_QUIESCE: + return sprintf(buf, "quiesce\n"); + case NVDIMM_FWA_CAP_LIVE: + return sprintf(buf, "live\n"); + default: + return -EOPNOTSUPP; + } +} + +static DEVICE_ATTR_RO(capability); + +static ssize_t activate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + enum nvdimm_fwa_state state; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + state = nd_desc->fw_ops->activate_state(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return -EOPNOTSUPP; + + switch (state) { + case NVDIMM_FWA_IDLE: + return sprintf(buf, "idle\n"); + case NVDIMM_FWA_BUSY: + return sprintf(buf, "busy\n"); + case NVDIMM_FWA_ARMED: + return sprintf(buf, "armed\n"); + case NVDIMM_FWA_ARM_OVERFLOW: + return sprintf(buf, "overflow\n"); + default: + return -ENXIO; + } +} + +static int exec_firmware_activate(void *data) +{ + struct nvdimm_bus_descriptor *nd_desc = data; + + return nd_desc->fw_ops->activate(nd_desc); +} + +static ssize_t activate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_state state; + bool quiesce; + ssize_t rc; + + if (!nd_desc->fw_ops) + return -EOPNOTSUPP; + + if (sysfs_streq(buf, "live")) + quiesce = false; + else if (sysfs_streq(buf, "quiesce")) + quiesce = true; + else + return -EINVAL; + + nvdimm_bus_lock(dev); + state = nd_desc->fw_ops->activate_state(nd_desc); + + switch (state) { + case NVDIMM_FWA_BUSY: + rc = -EBUSY; + break; + case NVDIMM_FWA_ARMED: + case NVDIMM_FWA_ARM_OVERFLOW: + if (quiesce) + rc = hibernate_quiet_exec(exec_firmware_activate, nd_desc); + else + rc = nd_desc->fw_ops->activate(nd_desc); + break; + case NVDIMM_FWA_IDLE: + default: + rc = -ENXIO; + } + nvdimm_bus_unlock(dev); + + if (rc == 0) + rc = len; + return rc; +} + +static DEVICE_ATTR_ADMIN_RW(activate); + +static umode_t nvdimm_bus_firmware_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + enum nvdimm_fwa_capability cap; + + /* + * Both 'activate' and 'capability' disappear when no ops + * detected, or a negative capability is indicated. + */ + if (!nd_desc->fw_ops) + return 0; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return 0; + + return a->mode; +} +static struct attribute *nvdimm_bus_firmware_attributes[] = { + &dev_attr_activate.attr, + &dev_attr_capability.attr, + NULL, +}; + +static const struct attribute_group nvdimm_bus_firmware_attribute_group = { + .name = "firmware", + .attrs = nvdimm_bus_firmware_attributes, + .is_visible = nvdimm_bus_firmware_visible, +}; + const struct attribute_group *nvdimm_bus_attribute_groups[] = { &nvdimm_bus_attribute_group, + &nvdimm_bus_firmware_attribute_group, NULL, }; diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index b7b77e8d9027..61374def5155 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -363,14 +363,14 @@ __weak ssize_t security_show(struct device *dev, { struct nvdimm *nvdimm = to_nvdimm(dev); + if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) + return sprintf(buf, "overwrite\n"); if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) return sprintf(buf, "disabled\n"); if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) return sprintf(buf, "unlocked\n"); if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags)) return sprintf(buf, "locked\n"); - if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) - return sprintf(buf, "overwrite\n"); return -ENOTTY; } @@ -446,9 +446,124 @@ static const struct attribute_group nvdimm_attribute_group = { .is_visible = nvdimm_visible, }; +static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_result result; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + result = nvdimm->fw_ops->activate_result(nvdimm); + nvdimm_bus_unlock(dev); + + switch (result) { + case NVDIMM_FWA_RESULT_NONE: + return sprintf(buf, "none\n"); + case NVDIMM_FWA_RESULT_SUCCESS: + return sprintf(buf, "success\n"); + case NVDIMM_FWA_RESULT_FAIL: + return sprintf(buf, "fail\n"); + case NVDIMM_FWA_RESULT_NOTSTAGED: + return sprintf(buf, "not_staged\n"); + case NVDIMM_FWA_RESULT_NEEDRESET: + return sprintf(buf, "need_reset\n"); + default: + return -ENXIO; + } +} +static DEVICE_ATTR_ADMIN_RO(result); + +static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_state state; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + nvdimm_bus_lock(dev); + state = nvdimm->fw_ops->activate_state(nvdimm); + nvdimm_bus_unlock(dev); + + switch (state) { + case NVDIMM_FWA_IDLE: + return sprintf(buf, "idle\n"); + case NVDIMM_FWA_BUSY: + return sprintf(buf, "busy\n"); + case NVDIMM_FWA_ARMED: + return sprintf(buf, "armed\n"); + default: + return -ENXIO; + } +} + +static ssize_t activate_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t len) +{ + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_trigger arg; + int rc; + + if (!nvdimm->fw_ops) + return -EOPNOTSUPP; + + if (sysfs_streq(buf, "arm")) + arg = NVDIMM_FWA_ARM; + else if (sysfs_streq(buf, "disarm")) + arg = NVDIMM_FWA_DISARM; + else + return -EINVAL; + + nvdimm_bus_lock(dev); + rc = nvdimm->fw_ops->arm(nvdimm, arg); + nvdimm_bus_unlock(dev); + + if (rc < 0) + return rc; + return len; +} +static DEVICE_ATTR_ADMIN_RW(activate); + +static struct attribute *nvdimm_firmware_attributes[] = { + &dev_attr_activate.attr, + &dev_attr_result.attr, +}; + +static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n) +{ + struct device *dev = container_of(kobj, typeof(*dev), kobj); + struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev); + struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; + struct nvdimm *nvdimm = to_nvdimm(dev); + enum nvdimm_fwa_capability cap; + + if (!nd_desc->fw_ops) + return 0; + if (!nvdimm->fw_ops) + return 0; + + nvdimm_bus_lock(dev); + cap = nd_desc->fw_ops->capability(nd_desc); + nvdimm_bus_unlock(dev); + + if (cap < NVDIMM_FWA_CAP_QUIESCE) + return 0; + + return a->mode; +} + +static const struct attribute_group nvdimm_firmware_attribute_group = { + .name = "firmware", + .attrs = nvdimm_firmware_attributes, + .is_visible = nvdimm_firmware_visible, +}; + static const struct attribute_group *nvdimm_attribute_groups[] = { &nd_device_attribute_group, &nvdimm_attribute_group, + &nvdimm_firmware_attribute_group, NULL, }; @@ -467,7 +582,8 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, void *provider_data, const struct attribute_group **groups, unsigned long flags, unsigned long cmd_mask, int num_flush, struct resource *flush_wpq, const char *dimm_id, - const struct nvdimm_security_ops *sec_ops) + const struct nvdimm_security_ops *sec_ops, + const struct nvdimm_fw_ops *fw_ops) { struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL); struct device *dev; @@ -497,6 +613,7 @@ struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus, dev->devt = MKDEV(nvdimm_major, nvdimm->id); dev->groups = groups; nvdimm->sec.ops = sec_ops; + nvdimm->fw_ops = fw_ops; nvdimm->sec.overwrite_tmo = 0; INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); /* diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index ae155e860fdc..6da67f4d641a 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1309,7 +1309,7 @@ static ssize_t resource_show(struct device *dev, return -ENXIO; return sprintf(buf, "%#llx\n", (unsigned long long) res->start); } -static DEVICE_ATTR(resource, 0400, resource_show, NULL); +static DEVICE_ATTR_ADMIN_RO(resource); static const unsigned long blk_lbasize_supported[] = { 512, 520, 528, 4096, 4104, 4160, 4224, 0 }; diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h index ddb9d97d9129..564faa36a3ca 100644 --- a/drivers/nvdimm/nd-core.h +++ b/drivers/nvdimm/nd-core.h @@ -45,6 +45,7 @@ struct nvdimm { struct kernfs_node *overwrite_state; } sec; struct delayed_work dwork; + const struct nvdimm_fw_ops *fw_ops; }; static inline unsigned long nvdimm_security_flags( diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index 34db557dbad1..3e11ef8d3f5b 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -218,7 +218,7 @@ static ssize_t resource_show(struct device *dev, return rc; } -static DEVICE_ATTR(resource, 0400, resource_show, NULL); +static DEVICE_ATTR_ADMIN_RO(resource); static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 94790e6e0e4c..fab29b514372 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -238,11 +238,9 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, blk_status_t rc; if (op_is_write(op)) - rc = pmem_do_write(pmem, page, 0, sector, - hpage_nr_pages(page) * PAGE_SIZE); + rc = pmem_do_write(pmem, page, 0, sector, thp_size(page)); else - rc = pmem_do_read(pmem, page, 0, sector, - hpage_nr_pages(page) * PAGE_SIZE); + rc = pmem_do_read(pmem, page, 0, sector, thp_size(page)); /* * The ->rw_page interface is subtle and tricky. The core * retries on any error, so we can only invoke page_endio() in diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index c3237c2b03a6..ef23119db574 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -605,7 +605,7 @@ static ssize_t resource_show(struct device *dev, return sprintf(buf, "%#llx\n", nd_region->ndr_start); } -static DEVICE_ATTR(resource, 0400, resource_show, NULL); +static DEVICE_ATTR_ADMIN_RO(resource); static ssize_t persistence_domain_show(struct device *dev, struct device_attribute *attr, char *buf) diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c index 4cef69bd3c1b..4b80150e4afa 100644 --- a/drivers/nvdimm/security.c +++ b/drivers/nvdimm/security.c @@ -450,14 +450,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm) else dev_dbg(&nvdimm->dev, "overwrite completed\n"); - if (nvdimm->sec.overwrite_state) - sysfs_notify_dirent(nvdimm->sec.overwrite_state); + /* + * Mark the overwrite work done and update dimm security flags, + * then send a sysfs event notification to wake up userspace + * poll threads to picked up the changed state. + */ nvdimm->sec.overwrite_tmo = 0; clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); clear_bit(NDD_WORK_PENDING, &nvdimm->flags); - put_device(&nvdimm->dev); nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); - nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); + if (nvdimm->sec.overwrite_state) + sysfs_notify_dirent(nvdimm->sec.overwrite_state); + put_device(&nvdimm->dev); } void nvdimm_security_overwrite_query(struct work_struct *work) diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c index 5e3d07b47e0c..726c7354d465 100644 --- a/drivers/nvdimm/virtio_pmem.c +++ b/drivers/nvdimm/virtio_pmem.c @@ -58,9 +58,9 @@ static int virtio_pmem_probe(struct virtio_device *vdev) goto out_err; } - virtio_cread(vpmem->vdev, struct virtio_pmem_config, + virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, start, &vpmem->start); - virtio_cread(vpmem->vdev, struct virtio_pmem_config, + virtio_cread_le(vpmem->vdev, struct virtio_pmem_config, size, &vpmem->size); res.start = vpmem->start; diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 5368452eb5a6..d4314fba0269 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c @@ -1270,7 +1270,7 @@ sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num) ** (one that doesn't overlap memory or LMMIO space) in the ** IBASE and IMASK registers. */ - ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE); + ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL; iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1; if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) { diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 79c4a2ef269a..46935695cfb9 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -204,17 +204,13 @@ EXPORT_SYMBOL(pci_bus_set_ops); static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait); static noinline void pci_wait_cfg(struct pci_dev *dev) + __must_hold(&pci_lock) { - DECLARE_WAITQUEUE(wait, current); - - __add_wait_queue(&pci_cfg_wait, &wait); do { - set_current_state(TASK_UNINTERRUPTIBLE); raw_spin_unlock_irq(&pci_lock); - schedule(); + wait_event(pci_cfg_wait, !dev->block_cfg_access); raw_spin_lock_irq(&pci_lock); } while (dev->block_cfg_access); - __remove_wait_queue(&pci_cfg_wait, &wait); } /* Returns 0 on success, negative values indicate error. */ @@ -409,7 +405,7 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) *val = 0; if (pos & 1) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); @@ -444,7 +440,7 @@ int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val) *val = 0; if (pos & 3) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (pcie_capability_reg_implemented(dev, pos)) { ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val); @@ -469,7 +465,7 @@ EXPORT_SYMBOL(pcie_capability_read_dword); int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) { if (pos & 1) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (!pcie_capability_reg_implemented(dev, pos)) return 0; @@ -481,7 +477,7 @@ EXPORT_SYMBOL(pcie_capability_write_word); int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val) { if (pos & 3) - return -EINVAL; + return PCIBIOS_BAD_REGISTER_NUMBER; if (!pcie_capability_reg_implemented(dev, pos)) return 0; diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index b761c1f72f67..0d3719407b8b 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -188,7 +188,8 @@ void pci_pri_init(struct pci_dev *pdev) /** * pci_enable_pri - Enable PRI capability - * @ pdev: PCI device structure + * @pdev: PCI device structure + * @reqs: outstanding requests * * Returns 0 on success, negative value on error */ @@ -325,6 +326,21 @@ int pci_prg_resp_pasid_required(struct pci_dev *pdev) return pdev->pasid_required; } + +/** + * pci_pri_supported - Check if PRI is supported. + * @pdev: PCI device structure + * + * Returns true if PRI capability is present, false otherwise. + */ +bool pci_pri_supported(struct pci_dev *pdev) +{ + /* VFs share the PF PRI */ + if (pci_physfn(pdev)->pri_cap) + return true; + return false; +} +EXPORT_SYMBOL_GPL(pci_pri_supported); #endif /* CONFIG_PCI_PRI */ #ifdef CONFIG_PCI_PASID diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 8e40b3e6da77..3cef835b375f 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev) dev->match_driver = true; retval = device_attach(&dev->dev); - if (retval < 0 && retval != -EPROBE_DEFER) { + if (retval < 0 && retval != -EPROBE_DEFER) pci_warn(dev, "device attach failed (%d)\n", retval); - pci_proc_detach_device(dev); - pci_remove_sysfs_dev_files(dev); - return; - } pci_dev_assign_added(dev, true); } diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index adddf21fa381..f18c3725ef80 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -99,6 +99,14 @@ config PCIE_XILINX Say 'Y' here if you want kernel to support the Xilinx AXI PCIe Host Bridge driver. +config PCIE_XILINX_CPM + bool "Xilinx Versal CPM host bridge support" + depends on ARCH_ZYNQMP || COMPILE_TEST + select PCI_HOST_COMMON + help + Say 'Y' here if you want kernel support for the + Xilinx Versal CPM host bridge. + config PCI_XGENE bool "X-Gene PCIe controller" depends on ARM64 || COMPILE_TEST diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index efd9733ead26..bcdbf49ab1e4 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_PCI_HOST_COMMON) += pci-host-common.o obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o obj-$(CONFIG_PCIE_XILINX_NWL) += pcie-xilinx-nwl.o +obj-$(CONFIG_PCIE_XILINX_CPM) += pcie-xilinx-cpm.o obj-$(CONFIG_PCI_V3_SEMI) += pci-v3-semi.o obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o diff --git a/drivers/pci/controller/cadence/Kconfig b/drivers/pci/controller/cadence/Kconfig index b76b3cf55ce5..5d30564190e1 100644 --- a/drivers/pci/controller/cadence/Kconfig +++ b/drivers/pci/controller/cadence/Kconfig @@ -42,4 +42,27 @@ config PCIE_CADENCE_PLAT_EP endpoint mode. This PCIe controller may be embedded into many different vendors SoCs. +config PCI_J721E + bool + +config PCI_J721E_HOST + bool "TI J721E PCIe platform host controller" + depends on OF + select PCIE_CADENCE_HOST + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in host mode. TI J721E PCIe controller uses Cadence PCIe + core. + +config PCI_J721E_EP + bool "TI J721E PCIe platform endpoint controller" + depends on OF + depends on PCI_ENDPOINT + select PCIE_CADENCE_EP + select PCI_J721E + help + Say Y here if you want to support the TI J721E PCIe platform + controller in endpoint mode. TI J721E PCIe controller uses Cadence PCIe + core. endmenu diff --git a/drivers/pci/controller/cadence/Makefile b/drivers/pci/controller/cadence/Makefile index 232a3f20876a..9bac5fb2f13d 100644 --- a/drivers/pci/controller/cadence/Makefile +++ b/drivers/pci/controller/cadence/Makefile @@ -3,3 +3,4 @@ obj-$(CONFIG_PCIE_CADENCE) += pcie-cadence.o obj-$(CONFIG_PCIE_CADENCE_HOST) += pcie-cadence-host.o obj-$(CONFIG_PCIE_CADENCE_EP) += pcie-cadence-ep.o obj-$(CONFIG_PCIE_CADENCE_PLAT) += pcie-cadence-plat.o +obj-$(CONFIG_PCI_J721E) += pci-j721e.o diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c new file mode 100644 index 000000000000..586b9d69fa5e --- /dev/null +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * pci-j721e - PCIe controller driver for TI's J721E SoCs + * + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com + * Author: Kishon Vijay Abraham I <kishon@ti.com> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/io.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/mfd/syscon.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include "../../pci.h" +#include "pcie-cadence.h" + +#define ENABLE_REG_SYS_2 0x108 +#define STATUS_REG_SYS_2 0x508 +#define STATUS_CLR_REG_SYS_2 0x708 +#define LINK_DOWN BIT(1) + +#define J721E_PCIE_USER_CMD_STATUS 0x4 +#define LINK_TRAINING_ENABLE BIT(0) + +#define J721E_PCIE_USER_LINKSTATUS 0x14 +#define LINK_STATUS GENMASK(1, 0) + +enum link_status { + NO_RECEIVERS_DETECTED, + LINK_TRAINING_IN_PROGRESS, + LINK_UP_DL_IN_PROGRESS, + LINK_UP_DL_COMPLETED, +}; + +#define J721E_MODE_RC BIT(7) +#define LANE_COUNT_MASK BIT(8) +#define LANE_COUNT(n) ((n) << 8) + +#define GENERATION_SEL_MASK GENMASK(1, 0) + +#define MAX_LANES 2 + +struct j721e_pcie { + struct device *dev; + u32 mode; + u32 num_lanes; + struct cdns_pcie *cdns_pcie; + void __iomem *user_cfg_base; + void __iomem *intd_cfg_base; +}; + +enum j721e_pcie_mode { + PCI_MODE_RC, + PCI_MODE_EP, +}; + +struct j721e_pcie_data { + enum j721e_pcie_mode mode; +}; + +static inline u32 j721e_pcie_user_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->user_cfg_base + offset); +} + +static inline void j721e_pcie_user_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->user_cfg_base + offset); +} + +static inline u32 j721e_pcie_intd_readl(struct j721e_pcie *pcie, u32 offset) +{ + return readl(pcie->intd_cfg_base + offset); +} + +static inline void j721e_pcie_intd_writel(struct j721e_pcie *pcie, u32 offset, + u32 value) +{ + writel(value, pcie->intd_cfg_base + offset); +} + +static irqreturn_t j721e_pcie_link_irq_handler(int irq, void *priv) +{ + struct j721e_pcie *pcie = priv; + struct device *dev = pcie->dev; + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, STATUS_REG_SYS_2); + if (!(reg & LINK_DOWN)) + return IRQ_NONE; + + dev_err(dev, "LINK DOWN!\n"); + + j721e_pcie_intd_writel(pcie, STATUS_CLR_REG_SYS_2, LINK_DOWN); + return IRQ_HANDLED; +} + +static void j721e_pcie_config_link_irq(struct j721e_pcie *pcie) +{ + u32 reg; + + reg = j721e_pcie_intd_readl(pcie, ENABLE_REG_SYS_2); + reg |= LINK_DOWN; + j721e_pcie_intd_writel(pcie, ENABLE_REG_SYS_2, reg); +} + +static int j721e_pcie_start_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg |= LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); + + return 0; +} + +static void j721e_pcie_stop_link(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_CMD_STATUS); + reg &= ~LINK_TRAINING_ENABLE; + j721e_pcie_user_writel(pcie, J721E_PCIE_USER_CMD_STATUS, reg); +} + +static bool j721e_pcie_link_up(struct cdns_pcie *cdns_pcie) +{ + struct j721e_pcie *pcie = dev_get_drvdata(cdns_pcie->dev); + u32 reg; + + reg = j721e_pcie_user_readl(pcie, J721E_PCIE_USER_LINKSTATUS); + reg &= LINK_STATUS; + if (reg == LINK_UP_DL_COMPLETED) + return true; + + return false; +} + +static const struct cdns_pcie_ops j721e_pcie_ops = { + .start_link = j721e_pcie_start_link, + .stop_link = j721e_pcie_stop_link, + .link_up = j721e_pcie_link_up, +}; + +static int j721e_pcie_set_mode(struct j721e_pcie *pcie, struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 mask = J721E_MODE_RC; + u32 mode = pcie->mode; + u32 val = 0; + int ret = 0; + + if (mode == PCI_MODE_RC) + val = J721E_MODE_RC; + + ret = regmap_update_bits(syscon, 0, mask, val); + if (ret) + dev_err(dev, "failed to set pcie mode\n"); + + return ret; +} + +static int j721e_pcie_set_link_speed(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + struct device_node *np = dev->of_node; + int link_speed; + u32 val = 0; + int ret; + + link_speed = of_pci_get_max_link_speed(np); + if (link_speed < 2) + link_speed = 2; + + val = link_speed - 1; + ret = regmap_update_bits(syscon, 0, GENERATION_SEL_MASK, val); + if (ret) + dev_err(dev, "failed to set link speed\n"); + + return ret; +} + +static int j721e_pcie_set_lane_count(struct j721e_pcie *pcie, + struct regmap *syscon) +{ + struct device *dev = pcie->dev; + u32 lanes = pcie->num_lanes; + u32 val = 0; + int ret; + + val = LANE_COUNT(lanes - 1); + ret = regmap_update_bits(syscon, 0, LANE_COUNT_MASK, val); + if (ret) + dev_err(dev, "failed to set link count\n"); + + return ret; +} + +static int j721e_pcie_ctrl_init(struct j721e_pcie *pcie) +{ + struct device *dev = pcie->dev; + struct device_node *node = dev->of_node; + struct regmap *syscon; + int ret; + + syscon = syscon_regmap_lookup_by_phandle(node, "ti,syscon-pcie-ctrl"); + if (IS_ERR(syscon)) { + dev_err(dev, "Unable to get ti,syscon-pcie-ctrl regmap\n"); + return PTR_ERR(syscon); + } + + ret = j721e_pcie_set_mode(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set pci mode\n"); + return ret; + } + + ret = j721e_pcie_set_link_speed(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set link speed\n"); + return ret; + } + + ret = j721e_pcie_set_lane_count(pcie, syscon); + if (ret < 0) { + dev_err(dev, "Failed to set num-lanes\n"); + return ret; + } + + return 0; +} + +static int cdns_ti_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *value) +{ + if (pci_is_root_bus(bus)) + return pci_generic_config_read32(bus, devfn, where, size, + value); + + return pci_generic_config_read(bus, devfn, where, size, value); +} + +static int cdns_ti_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 value) +{ + if (pci_is_root_bus(bus)) + return pci_generic_config_write32(bus, devfn, where, size, + value); + + return pci_generic_config_write(bus, devfn, where, size, value); +} + +static struct pci_ops cdns_ti_pcie_host_ops = { + .map_bus = cdns_pci_map_bus, + .read = cdns_ti_pcie_config_read, + .write = cdns_ti_pcie_config_write, +}; + +static const struct j721e_pcie_data j721e_pcie_rc_data = { + .mode = PCI_MODE_RC, +}; + +static const struct j721e_pcie_data j721e_pcie_ep_data = { + .mode = PCI_MODE_EP, +}; + +static const struct of_device_id of_j721e_pcie_match[] = { + { + .compatible = "ti,j721e-pcie-host", + .data = &j721e_pcie_rc_data, + }, + { + .compatible = "ti,j721e-pcie-ep", + .data = &j721e_pcie_ep_data, + }, + {}, +}; + +static int j721e_pcie_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct pci_host_bridge *bridge; + struct j721e_pcie_data *data; + struct cdns_pcie *cdns_pcie; + struct j721e_pcie *pcie; + struct cdns_pcie_rc *rc; + struct cdns_pcie_ep *ep; + struct gpio_desc *gpiod; + void __iomem *base; + u32 num_lanes; + u32 mode; + int ret; + int irq; + + data = (struct j721e_pcie_data *)of_device_get_match_data(dev); + if (!data) + return -EINVAL; + + mode = (u32)data->mode; + + pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL); + if (!pcie) + return -ENOMEM; + + pcie->dev = dev; + pcie->mode = mode; + + base = devm_platform_ioremap_resource_byname(pdev, "intd_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->intd_cfg_base = base; + + base = devm_platform_ioremap_resource_byname(pdev, "user_cfg"); + if (IS_ERR(base)) + return PTR_ERR(base); + pcie->user_cfg_base = base; + + ret = of_property_read_u32(node, "num-lanes", &num_lanes); + if (ret || num_lanes > MAX_LANES) + num_lanes = 1; + pcie->num_lanes = num_lanes; + + if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48))) + return -EINVAL; + + irq = platform_get_irq_byname(pdev, "link_state"); + if (irq < 0) + return irq; + + dev_set_drvdata(dev, pcie); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = j721e_pcie_ctrl_init(pcie); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + goto err_get_sync; + } + + ret = devm_request_irq(dev, irq, j721e_pcie_link_irq_handler, 0, + "j721e-pcie-link-down-irq", pcie); + if (ret < 0) { + dev_err(dev, "failed to request link state IRQ %d\n", irq); + goto err_get_sync; + } + + j721e_pcie_config_link_irq(pcie); + + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { + ret = -ENODEV; + goto err_get_sync; + } + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) { + ret = -ENOMEM; + goto err_get_sync; + } + + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(gpiod)) { + ret = PTR_ERR(gpiod); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Failed to get reset GPIO\n"); + goto err_get_sync; + } + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + /* + * "Power Sequencing and Reset Signal Timings" table in + * PCI EXPRESS CARD ELECTROMECHANICAL SPECIFICATION, REV. 3.0 + * indicates PERST# should be deasserted after minimum of 100us + * once REFCLK is stable. The REFCLK to the connector in RC + * mode is selected while enabling the PHY. So deassert PERST# + * after 100 us. + */ + if (gpiod) { + usleep_range(100, 200); + gpiod_set_value_cansleep(gpiod, 1); + } + + ret = cdns_pcie_host_setup(rc); + if (ret < 0) + goto err_pcie_setup; + + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { + ret = -ENODEV; + goto err_get_sync; + } + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) { + ret = -ENOMEM; + goto err_get_sync; + } + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + + ret = cdns_pcie_init_phy(dev, cdns_pcie); + if (ret) { + dev_err(dev, "Failed to init phy\n"); + goto err_get_sync; + } + + ret = cdns_pcie_ep_setup(ep); + if (ret < 0) + goto err_pcie_setup; + + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + } + + return 0; + +err_pcie_setup: + cdns_pcie_disable_phy(cdns_pcie); + +err_get_sync: + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return ret; +} + +static int j721e_pcie_remove(struct platform_device *pdev) +{ + struct j721e_pcie *pcie = platform_get_drvdata(pdev); + struct cdns_pcie *cdns_pcie = pcie->cdns_pcie; + struct device *dev = &pdev->dev; + + cdns_pcie_disable_phy(cdns_pcie); + pm_runtime_put(dev); + pm_runtime_disable(dev); + + return 0; +} + +static struct platform_driver j721e_pcie_driver = { + .probe = j721e_pcie_probe, + .remove = j721e_pcie_remove, + .driver = { + .name = "j721e-pcie", + .of_match_table = of_j721e_pcie_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(j721e_pcie_driver); diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c index 1c15c8352125..254a3e1eff50 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-ep.c +++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c @@ -8,7 +8,6 @@ #include <linux/of.h> #include <linux/pci-epc.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include <linux/sizes.h> #include "pcie-cadence.h" @@ -52,6 +51,7 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; dma_addr_t bar_phys = epf_bar->phys_addr; enum pci_barno bar = epf_bar->barno; @@ -112,6 +112,8 @@ static int cdns_pcie_ep_set_bar(struct pci_epc *epc, u8 fn, CDNS_PCIE_LM_EP_FUNC_BAR_CFG_BAR_CTRL(b, ctrl)); cdns_pcie_writel(pcie, reg, cfg); + epf->epf_bar[bar] = epf_bar; + return 0; } @@ -119,6 +121,7 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, struct pci_epf_bar *epf_bar) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie_epf *epf = &ep->epf[fn]; struct cdns_pcie *pcie = &ep->pcie; enum pci_barno bar = epf_bar->barno; u32 reg, cfg, b, ctrl; @@ -140,6 +143,8 @@ static void cdns_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar), 0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar), 0); + + epf->epf_bar[bar] = NULL; } static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, @@ -156,7 +161,7 @@ static int cdns_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, phys_addr_t addr, return -EINVAL; } - cdns_pcie_set_outbound_region(pcie, fn, r, false, addr, pci_addr, size); + cdns_pcie_set_outbound_region(pcie, 0, fn, r, false, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); ep->ob_addr[r] = addr; @@ -225,10 +230,55 @@ static int cdns_pcie_ep_get_msi(struct pci_epc *epc, u8 fn) return mme; } +static int cdns_pcie_ep_get_msix(struct pci_epc *epc, u8 func_no) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, func_no, reg); + if (!(val & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + val &= PCI_MSIX_FLAGS_QSIZE; + + return val; +} + +static int cdns_pcie_ep_set_msix(struct pci_epc *epc, u8 fn, u16 interrupts, + enum pci_barno bir, u32 offset) +{ + struct cdns_pcie_ep *ep = epc_get_drvdata(epc); + struct cdns_pcie *pcie = &ep->pcie; + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 val, reg; + + reg = cap + PCI_MSIX_FLAGS; + val = cdns_pcie_ep_fn_readw(pcie, fn, reg); + val &= ~PCI_MSIX_FLAGS_QSIZE; + val |= interrupts; + cdns_pcie_ep_fn_writew(pcie, fn, reg, val); + + /* Set MSIX BAR and offset */ + reg = cap + PCI_MSIX_TABLE; + val = offset | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + /* Set PBA BAR and offset. BAR must match MSIX BAR */ + reg = cap + PCI_MSIX_PBA; + val = (offset + (interrupts * PCI_MSIX_ENTRY_SIZE)) | bir; + cdns_pcie_ep_fn_writel(pcie, fn, reg, val); + + return 0; +} + static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, u8 intx, bool is_asserted) { struct cdns_pcie *pcie = &ep->pcie; + unsigned long flags; u32 offset; u16 status; u8 msg_code; @@ -239,7 +289,7 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, if (unlikely(ep->irq_pci_addr != CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region_for_normal_msg(pcie, fn, 0, + cdns_pcie_set_outbound_region_for_normal_msg(pcie, 0, fn, 0, ep->irq_phys_addr); ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_LEGACY; ep->irq_pci_fn = fn; @@ -253,11 +303,13 @@ static void cdns_pcie_ep_assert_intx(struct cdns_pcie_ep *ep, u8 fn, msg_code = MSG_CODE_DEASSERT_INTA + intx; } + spin_lock_irqsave(&ep->lock, flags); status = cdns_pcie_ep_fn_readw(pcie, fn, PCI_STATUS); if (((status & PCI_STATUS_INTERRUPT) != 0) ^ (ep->irq_pending != 0)) { status ^= PCI_STATUS_INTERRUPT; cdns_pcie_ep_fn_writew(pcie, fn, PCI_STATUS, status); } + spin_unlock_irqrestore(&ep->lock, flags); offset = CDNS_PCIE_NORMAL_MSG_ROUTING(MSG_ROUTING_LOCAL) | CDNS_PCIE_NORMAL_MSG_CODE(msg_code) | @@ -318,7 +370,7 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || ep->irq_pci_fn != fn)) { /* First region was reserved for IRQ writes. */ - cdns_pcie_set_outbound_region(pcie, fn, 0, + cdns_pcie_set_outbound_region(pcie, 0, fn, 0, false, ep->irq_phys_addr, pci_addr & ~pci_addr_mask, @@ -331,6 +383,51 @@ static int cdns_pcie_ep_send_msi_irq(struct cdns_pcie_ep *ep, u8 fn, return 0; } +static int cdns_pcie_ep_send_msix_irq(struct cdns_pcie_ep *ep, u8 fn, + u16 interrupt_num) +{ + u32 cap = CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET; + u32 tbl_offset, msg_data, reg; + struct cdns_pcie *pcie = &ep->pcie; + struct pci_epf_msix_tbl *msix_tbl; + struct cdns_pcie_epf *epf; + u64 pci_addr_mask = 0xff; + u64 msg_addr; + u16 flags; + u8 bir; + + /* Check whether the MSI-X feature has been enabled by the PCI host. */ + flags = cdns_pcie_ep_fn_readw(pcie, fn, cap + PCI_MSIX_FLAGS); + if (!(flags & PCI_MSIX_FLAGS_ENABLE)) + return -EINVAL; + + reg = cap + PCI_MSIX_TABLE; + tbl_offset = cdns_pcie_ep_fn_readl(pcie, fn, reg); + bir = tbl_offset & PCI_MSIX_TABLE_BIR; + tbl_offset &= PCI_MSIX_TABLE_OFFSET; + + epf = &ep->epf[fn]; + msix_tbl = epf->epf_bar[bir]->addr + tbl_offset; + msg_addr = msix_tbl[(interrupt_num - 1)].msg_addr; + msg_data = msix_tbl[(interrupt_num - 1)].msg_data; + + /* Set the outbound region if needed. */ + if (ep->irq_pci_addr != (msg_addr & ~pci_addr_mask) || + ep->irq_pci_fn != fn) { + /* First region was reserved for IRQ writes. */ + cdns_pcie_set_outbound_region(pcie, 0, fn, 0, + false, + ep->irq_phys_addr, + msg_addr & ~pci_addr_mask, + pci_addr_mask + 1); + ep->irq_pci_addr = (msg_addr & ~pci_addr_mask); + ep->irq_pci_fn = fn; + } + writel(msg_data, ep->irq_cpu_addr + (msg_addr & pci_addr_mask)); + + return 0; +} + static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, enum pci_epc_irq_type type, u16 interrupt_num) @@ -344,6 +441,9 @@ static int cdns_pcie_ep_raise_irq(struct pci_epc *epc, u8 fn, case PCI_EPC_IRQ_MSI: return cdns_pcie_ep_send_msi_irq(ep, fn, interrupt_num); + case PCI_EPC_IRQ_MSIX: + return cdns_pcie_ep_send_msix_irq(ep, fn, interrupt_num); + default: break; } @@ -355,8 +455,10 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) { struct cdns_pcie_ep *ep = epc_get_drvdata(epc); struct cdns_pcie *pcie = &ep->pcie; + struct device *dev = pcie->dev; struct pci_epf *epf; u32 cfg; + int ret; /* * BIT(0) is hardwired to 1, hence function 0 is always enabled @@ -367,13 +469,19 @@ static int cdns_pcie_ep_start(struct pci_epc *epc) cfg |= BIT(epf->func_no); cdns_pcie_writel(pcie, CDNS_PCIE_LM_EP_FUNC_CFG, cfg); + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; + } + return 0; } static const struct pci_epc_features cdns_pcie_epc_features = { .linkup_notifier = false, .msi_capable = true, - .msix_capable = false, + .msix_capable = true, }; static const struct pci_epc_features* @@ -390,6 +498,8 @@ static const struct pci_epc_ops cdns_pcie_epc_ops = { .unmap_addr = cdns_pcie_ep_unmap_addr, .set_msi = cdns_pcie_ep_set_msi, .get_msi = cdns_pcie_ep_get_msi, + .set_msix = cdns_pcie_ep_set_msix, + .get_msix = cdns_pcie_ep_get_msix, .raise_irq = cdns_pcie_ep_raise_irq, .start = cdns_pcie_ep_start, .get_features = cdns_pcie_ep_get_features, @@ -408,8 +518,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) pcie->is_rc = false; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); - pcie->reg_base = devm_ioremap_resource(dev, res); + pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); if (IS_ERR(pcie->reg_base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(pcie->reg_base); @@ -440,8 +549,7 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) epc = devm_pci_epc_create(dev, &cdns_pcie_epc_ops); if (IS_ERR(epc)) { dev_err(dev, "failed to create epc device\n"); - ret = PTR_ERR(epc); - goto err_init; + return PTR_ERR(epc); } epc_set_drvdata(epc, ep); @@ -449,11 +557,16 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) if (of_property_read_u8(np, "max-functions", &epc->max_functions) < 0) epc->max_functions = 1; + ep->epf = devm_kcalloc(dev, epc->max_functions, sizeof(*ep->epf), + GFP_KERNEL); + if (!ep->epf) + return -ENOMEM; + ret = pci_epc_mem_init(epc, pcie->mem_res->start, resource_size(pcie->mem_res), PAGE_SIZE); if (ret < 0) { dev_err(dev, "failed to initialize the memory space\n"); - goto err_init; + return ret; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, @@ -466,14 +579,12 @@ int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) ep->irq_pci_addr = CDNS_PCIE_EP_IRQ_PCI_ADDR_NONE; /* Reserve region 0 for IRQs */ set_bit(0, &ep->ob_region_map); + spin_lock_init(&ep->lock); return 0; free_epc_mem: pci_epc_mem_exit(epc); - err_init: - pm_runtime_put_sync(dev); - return ret; } diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c index 8c2543f28ba0..4550e0d469ca 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-host.c +++ b/drivers/pci/controller/cadence/pcie-cadence-host.c @@ -3,16 +3,28 @@ // Cadence PCIe host controller driver. // Author: Cyrille Pitchen <cyrille.pitchen@free-electrons.com> +#include <linux/delay.h> #include <linux/kernel.h> +#include <linux/list_sort.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include "pcie-cadence.h" -static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, - int where) +static u64 bar_max_size[] = { + [RP_BAR0] = _ULL(128 * SZ_2G), + [RP_BAR1] = SZ_2G, + [RP_NO_BAR] = _BITULL(63), +}; + +static u8 bar_aperture_mask[] = { + [RP_BAR0] = 0x1F, + [RP_BAR1] = 0xF, +}; + +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) { struct pci_host_bridge *bridge = pci_find_host_bridge(bus); struct cdns_pcie_rc *rc = pci_host_bridge_priv(bridge); @@ -20,7 +32,7 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, unsigned int busn = bus->number; u32 addr0, desc0; - if (busn == rc->bus_range->start) { + if (pci_is_root_bus(bus)) { /* * Only the root port (devfn == 0) is connected to this bus. * All other PCI devices are behind some bridge hence on another @@ -50,7 +62,7 @@ static void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, * The bus number was already set once for all in desc1 by * cdns_pcie_host_init_address_translation(). */ - if (busn == rc->bus_range->start + 1) + if (busn == bridge->busnr + 1) desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE0; else desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_TYPE_CONF_TYPE1; @@ -70,6 +82,7 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; u32 value, ctrl; + u32 id; /* * Set the root complex BAR configuration register: @@ -89,8 +102,12 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); /* Set root port configuration space */ - if (rc->vendor_id != 0xffff) - cdns_pcie_rp_writew(pcie, PCI_VENDOR_ID, rc->vendor_id); + if (rc->vendor_id != 0xffff) { + id = CDNS_PCIE_LM_ID_VENDOR(rc->vendor_id) | + CDNS_PCIE_LM_ID_SUBSYS(rc->vendor_id); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_ID, id); + } + if (rc->device_id != 0xffff) cdns_pcie_rp_writew(pcie, PCI_DEVICE_ID, rc->device_id); @@ -101,19 +118,230 @@ static int cdns_pcie_host_init_root_port(struct cdns_pcie_rc *rc) return 0; } -static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) +static int cdns_pcie_host_bar_ib_config(struct cdns_pcie_rc *rc, + enum cdns_pcie_rp_bar bar, + u64 cpu_addr, u64 size, + unsigned long flags) +{ + struct cdns_pcie *pcie = &rc->pcie; + u32 addr0, addr1, aperture, value; + + if (!rc->avail_ib_bar[bar]) + return -EBUSY; + + rc->avail_ib_bar[bar] = false; + + aperture = ilog2(size); + addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(aperture) | + (lower_32_bits(cpu_addr) & GENMASK(31, 8)); + addr1 = upper_32_bits(cpu_addr); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(bar), addr0); + cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(bar), addr1); + + if (bar == RP_NO_BAR) + return 0; + + value = cdns_pcie_readl(pcie, CDNS_PCIE_LM_RC_BAR_CFG); + value &= ~(LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) | + LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) | + LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) | + LM_RC_BAR_CFG_APERTURE(bar, bar_aperture_mask[bar] + 2)); + if (size + cpu_addr >= SZ_4G) { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar); + } else { + if (!(flags & IORESOURCE_PREFETCH)) + value |= LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar); + value |= LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar); + } + + value |= LM_RC_BAR_CFG_APERTURE(bar, aperture); + cdns_pcie_writel(pcie, CDNS_PCIE_LM_RC_BAR_CFG, value); + + return 0; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_min_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size <= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] < bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static enum cdns_pcie_rp_bar +cdns_pcie_host_find_max_bar(struct cdns_pcie_rc *rc, u64 size) +{ + enum cdns_pcie_rp_bar bar, sel_bar; + + sel_bar = RP_BAR_UNDEFINED; + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) { + if (!rc->avail_ib_bar[bar]) + continue; + + if (size >= bar_max_size[bar]) { + if (sel_bar == RP_BAR_UNDEFINED) { + sel_bar = bar; + continue; + } + + if (bar_max_size[bar] > bar_max_size[sel_bar]) + sel_bar = bar; + } + } + + return sel_bar; +} + +static int cdns_pcie_host_bar_config(struct cdns_pcie_rc *rc, + struct resource_entry *entry) +{ + u64 cpu_addr, pci_addr, size, winsize; + struct cdns_pcie *pcie = &rc->pcie; + struct device *dev = pcie->dev; + enum cdns_pcie_rp_bar bar; + unsigned long flags; + int ret; + + cpu_addr = entry->res->start; + pci_addr = entry->res->start - entry->offset; + flags = entry->res->flags; + size = resource_size(entry->res); + + if (entry->offset) { + dev_err(dev, "PCI addr: %llx must be equal to CPU addr: %llx\n", + pci_addr, cpu_addr); + return -EINVAL; + } + + while (size > 0) { + /* + * Try to find a minimum BAR whose size is greater than + * or equal to the remaining resource_entry size. This will + * fail if the size of each of the available BARs is less than + * the remaining resource_entry size. + * If a minimum BAR is found, IB ATU will be configured and + * exited. + */ + bar = cdns_pcie_host_find_min_bar(rc, size); + if (bar != RP_BAR_UNDEFINED) { + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, + size, flags); + if (ret) + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + /* + * If the control reaches here, it would mean the remaining + * resource_entry size cannot be fitted in a single BAR. So we + * find a maximum BAR whose size is less than or equal to the + * remaining resource_entry size and split the resource entry + * so that part of resource entry is fitted inside the maximum + * BAR. The remaining size would be fitted during the next + * iteration of the loop. + * If a maximum BAR is not found, there is no way we can fit + * this resource_entry, so we error out. + */ + bar = cdns_pcie_host_find_max_bar(rc, size); + if (bar == RP_BAR_UNDEFINED) { + dev_err(dev, "No free BAR to map cpu_addr %llx\n", + cpu_addr); + return -EINVAL; + } + + winsize = bar_max_size[bar]; + ret = cdns_pcie_host_bar_ib_config(rc, bar, cpu_addr, winsize, + flags); + if (ret) { + dev_err(dev, "IB BAR: %d config failed\n", bar); + return ret; + } + + size -= winsize; + cpu_addr += winsize; + } + + return 0; +} + +static int cdns_pcie_host_dma_ranges_cmp(void *priv, struct list_head *a, struct list_head *b) +{ + struct resource_entry *entry1, *entry2; + + entry1 = container_of(a, struct resource_entry, node); + entry2 = container_of(b, struct resource_entry, node); + + return resource_size(entry2->res) - resource_size(entry1->res); +} + +static int cdns_pcie_host_map_dma_ranges(struct cdns_pcie_rc *rc) { struct cdns_pcie *pcie = &rc->pcie; - struct resource *mem_res = pcie->mem_res; - struct resource *bus_range = rc->bus_range; - struct resource *cfg_res = rc->cfg_res; struct device *dev = pcie->dev; struct device_node *np = dev->of_node; - struct of_pci_range_parser parser; - struct of_pci_range range; + struct pci_host_bridge *bridge; + struct resource_entry *entry; + u32 no_bar_nbits = 32; + int err; + + bridge = pci_host_bridge_from_priv(rc); + if (!bridge) + return -ENOMEM; + + if (list_empty(&bridge->dma_ranges)) { + of_property_read_u32(np, "cdns,no-bar-match-nbits", + &no_bar_nbits); + err = cdns_pcie_host_bar_ib_config(rc, RP_NO_BAR, 0x0, + (u64)1 << no_bar_nbits, 0); + if (err) + dev_err(dev, "IB BAR: %d config failed\n", RP_NO_BAR); + return err; + } + + list_sort(NULL, &bridge->dma_ranges, cdns_pcie_host_dma_ranges_cmp); + + resource_list_for_each_entry(entry, &bridge->dma_ranges) { + err = cdns_pcie_host_bar_config(rc, entry); + if (err) + dev_err(dev, "Fail to configure IB using dma-ranges\n"); + return err; + } + + return 0; +} + +static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) +{ + struct cdns_pcie *pcie = &rc->pcie; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rc); + struct resource *cfg_res = rc->cfg_res; + struct resource_entry *entry; + u64 cpu_addr = cfg_res->start; u32 addr0, addr1, desc1; - u64 cpu_addr; - int r, err; + int r, err, busnr = 0; + + entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (entry) + busnr = entry->res->start; /* * Reserve region 0 for PCI configure space accesses: @@ -121,81 +349,74 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc) * cdns_pci_map_bus(), other region registers are set here once for all. */ addr1 = 0; /* Should be programmed to zero. */ - desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(bus_range->start); + desc1 = CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_PCI_ADDR1(0), addr1); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(0), desc1); - cpu_addr = cfg_res->start - mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(12) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR0(0), addr0); cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(0), addr1); - err = of_pci_range_parser_init(&parser, np); - if (err) - return err; - r = 1; - for_each_of_pci_range(&parser, &range) { - bool is_io; - - if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM) - is_io = false; - else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO) - is_io = true; + resource_list_for_each_entry(entry, &bridge->windows) { + struct resource *res = entry->res; + u64 pci_addr = res->start - entry->offset; + + if (resource_type(res) == IORESOURCE_IO) + cdns_pcie_set_outbound_region(pcie, busnr, 0, r, + true, + pci_pio_to_address(res->start), + pci_addr, + resource_size(res)); else - continue; + cdns_pcie_set_outbound_region(pcie, busnr, 0, r, + false, + res->start, + pci_addr, + resource_size(res)); - cdns_pcie_set_outbound_region(pcie, 0, r, is_io, - range.cpu_addr, - range.pci_addr, - range.size); r++; } - /* - * Set Root Port no BAR match Inbound Translation registers: - * needed for MSI and DMA. - * Root Port BAR0 and BAR1 are disabled, hence no need to set their - * inbound translation registers. - */ - addr0 = CDNS_PCIE_AT_IB_RP_BAR_ADDR0_NBITS(rc->no_bar_nbits); - addr1 = 0; - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR0(RP_NO_BAR), addr0); - cdns_pcie_writel(pcie, CDNS_PCIE_AT_IB_RP_BAR_ADDR1(RP_NO_BAR), addr1); + err = cdns_pcie_host_map_dma_ranges(rc); + if (err) + return err; return 0; } static int cdns_pcie_host_init(struct device *dev, - struct list_head *resources, struct cdns_pcie_rc *rc) { - struct resource *bus_range = NULL; int err; - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range); - if (err) - return err; - - rc->bus_range = bus_range; - rc->pcie.bus = bus_range->start; - err = cdns_pcie_host_init_root_port(rc); if (err) - goto err_out; + return err; - err = cdns_pcie_host_init_address_translation(rc); - if (err) - goto err_out; + return cdns_pcie_host_init_address_translation(rc); +} - return 0; +static int cdns_pcie_host_wait_for_link(struct cdns_pcie *pcie) +{ + struct device *dev = pcie->dev; + int retries; + + /* Check if the link is up or not */ + for (retries = 0; retries < LINK_WAIT_MAX_RETRIES; retries++) { + if (cdns_pcie_link_up(pcie)) { + dev_info(dev, "Link up\n"); + return 0; + } + usleep_range(LINK_WAIT_USLEEP_MIN, LINK_WAIT_USLEEP_MAX); + } - err_out: - pci_free_resource_list(resources); - return err; + return -ETIMEDOUT; } int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) @@ -204,7 +425,7 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) struct platform_device *pdev = to_platform_device(dev); struct device_node *np = dev->of_node; struct pci_host_bridge *bridge; - struct list_head resources; + enum cdns_pcie_rp_bar bar; struct cdns_pcie *pcie; struct resource *res; int ret; @@ -216,17 +437,13 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) pcie = &rc->pcie; pcie->is_rc = true; - rc->no_bar_nbits = 32; - of_property_read_u32(np, "cdns,no-bar-match-nbits", &rc->no_bar_nbits); - rc->vendor_id = 0xffff; of_property_read_u32(np, "vendor-id", &rc->vendor_id); rc->device_id = 0xffff; of_property_read_u32(np, "device-id", &rc->device_id); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "reg"); - pcie->reg_base = devm_ioremap_resource(dev, res); + pcie->reg_base = devm_platform_ioremap_resource_byname(pdev, "reg"); if (IS_ERR(pcie->reg_base)) { dev_err(dev, "missing \"reg\"\n"); return PTR_ERR(pcie->reg_base); @@ -234,40 +451,36 @@ int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); rc->cfg_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(rc->cfg_base)) { - dev_err(dev, "missing \"cfg\"\n"); + if (IS_ERR(rc->cfg_base)) return PTR_ERR(rc->cfg_base); - } rc->cfg_res = res; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); - if (!res) { - dev_err(dev, "missing \"mem\"\n"); - return -EINVAL; + ret = cdns_pcie_start_link(pcie); + if (ret) { + dev_err(dev, "Failed to start link\n"); + return ret; } - pcie->mem_res = res; + ret = cdns_pcie_host_wait_for_link(pcie); + if (ret) + dev_dbg(dev, "PCIe link never came up\n"); + + for (bar = RP_BAR0; bar <= RP_NO_BAR; bar++) + rc->avail_ib_bar[bar] = true; - ret = cdns_pcie_host_init(dev, &resources, rc); + ret = cdns_pcie_host_init(dev, rc); if (ret) - goto err_init; + return ret; - list_splice_init(&resources, &bridge->windows); - bridge->dev.parent = dev; - bridge->busnr = pcie->bus; - bridge->ops = &cdns_pcie_host_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; + if (!bridge->ops) + bridge->ops = &cdns_pcie_host_ops; ret = pci_host_probe(bridge); if (ret < 0) - goto err_host_probe; + goto err_init; return 0; - err_host_probe: - pci_free_resource_list(&resources); - err_init: pm_runtime_put_sync(dev); diff --git a/drivers/pci/controller/cadence/pcie-cadence-plat.c b/drivers/pci/controller/cadence/pcie-cadence-plat.c index f5c6bf6dfcb8..5fee0f89ab59 100644 --- a/drivers/pci/controller/cadence/pcie-cadence-plat.c +++ b/drivers/pci/controller/cadence/pcie-cadence-plat.c @@ -13,6 +13,8 @@ #include <linux/of_device.h> #include "pcie-cadence.h" +#define CDNS_PLAT_CPU_TO_BUS_ADDR 0x0FFFFFFF + /** * struct cdns_plat_pcie - private data for this PCIe platform driver * @pcie: Cadence PCIe controller @@ -30,6 +32,15 @@ struct cdns_plat_pcie_of_data { static const struct of_device_id cdns_plat_pcie_of_match[]; +static u64 cdns_plat_cpu_addr_fixup(struct cdns_pcie *pcie, u64 cpu_addr) +{ + return cpu_addr & CDNS_PLAT_CPU_TO_BUS_ADDR; +} + +static const struct cdns_pcie_ops cdns_plat_ops = { + .cpu_addr_fixup = cdns_plat_cpu_addr_fixup, +}; + static int cdns_plat_pcie_probe(struct platform_device *pdev) { const struct cdns_plat_pcie_of_data *data; @@ -66,6 +77,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) rc = pci_host_bridge_priv(bridge); rc->pcie.dev = dev; + rc->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &rc->pcie; cdns_plat_pcie->is_rc = is_rc; @@ -93,6 +105,7 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) return -ENOMEM; ep->pcie.dev = dev; + ep->pcie.ops = &cdns_plat_ops; cdns_plat_pcie->pcie = &ep->pcie; cdns_plat_pcie->is_rc = is_rc; @@ -115,9 +128,8 @@ static int cdns_plat_pcie_probe(struct platform_device *pdev) } err_init: - pm_runtime_put_sync(dev); - err_get_sync: + pm_runtime_put_sync(dev); pm_runtime_disable(dev); cdns_pcie_disable_phy(cdns_plat_pcie->pcie); phy_count = cdns_plat_pcie->pcie->phy_count; diff --git a/drivers/pci/controller/cadence/pcie-cadence.c b/drivers/pci/controller/cadence/pcie-cadence.c index cd795f6fc1e2..3c3646502d05 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.c +++ b/drivers/pci/controller/cadence/pcie-cadence.c @@ -7,7 +7,7 @@ #include "pcie-cadence.h" -void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, u32 r, bool is_io, u64 cpu_addr, u64 pci_addr, size_t size) { @@ -60,7 +60,7 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, /* The device and function numbers are always 0. */ desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); - desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); + desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); } else { /* * Use captured values for bus and device numbers but still @@ -73,7 +73,9 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_DESC1(r), desc1); /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(nbits) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); @@ -82,7 +84,8 @@ void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, cdns_pcie_writel(pcie, CDNS_PCIE_AT_OB_REGION_CPU_ADDR1(r), addr1); } -void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, + u8 busnr, u8 fn, u32 r, u64 cpu_addr) { u32 addr0, addr1, desc0, desc1; @@ -94,13 +97,15 @@ void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, if (pcie->is_rc) { desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID | CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(0); - desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(pcie->bus); + desc1 |= CDNS_PCIE_AT_OB_REGION_DESC1_BUS(busnr); } else { desc0 |= CDNS_PCIE_AT_OB_REGION_DESC0_DEVFN(fn); } /* Set the CPU address */ - cpu_addr -= pcie->mem_res->start; + if (pcie->ops->cpu_addr_fixup) + cpu_addr = pcie->ops->cpu_addr_fixup(pcie, cpu_addr); + addr0 = CDNS_PCIE_AT_OB_REGION_CPU_ADDR0_NBITS(17) | (lower_32_bits(cpu_addr) & GENMASK(31, 8)); addr1 = upper_32_bits(cpu_addr); diff --git a/drivers/pci/controller/cadence/pcie-cadence.h b/drivers/pci/controller/cadence/pcie-cadence.h index df14ad002fe9..feed1e3038f4 100644 --- a/drivers/pci/controller/cadence/pcie-cadence.h +++ b/drivers/pci/controller/cadence/pcie-cadence.h @@ -10,6 +10,11 @@ #include <linux/pci.h> #include <linux/phy/phy.h> +/* Parameters for the waiting for link up routine */ +#define LINK_WAIT_MAX_RETRIES 10 +#define LINK_WAIT_USLEEP_MIN 90000 +#define LINK_WAIT_USLEEP_MAX 100000 + /* * Local Management Registers */ @@ -87,6 +92,20 @@ #define CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS 0x6 #define CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS 0x7 +#define LM_RC_BAR_CFG_CTRL_DISABLED(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_DISABLED << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_IO_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_IO_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_32BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_32BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_CTRL_PREF_MEM_64BITS(bar) \ + (CDNS_PCIE_LM_BAR_CFG_CTRL_PREFETCH_MEM_64BITS << (((bar) * 8) + 6)) +#define LM_RC_BAR_CFG_APERTURE(bar, aperture) \ + (((aperture) - 2) << ((bar) * 8)) /* * Endpoint Function Registers (PCI configuration space for endpoint functions) @@ -94,6 +113,7 @@ #define CDNS_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) #define CDNS_PCIE_EP_FUNC_MSI_CAP_OFFSET 0x90 +#define CDNS_PCIE_EP_FUNC_MSIX_CAP_OFFSET 0xb0 /* * Root Port Registers (PCI configuration space for the root port function) @@ -170,11 +190,19 @@ #define CDNS_PCIE_AT_LINKDOWN (CDNS_PCIE_AT_BASE + 0x0824) enum cdns_pcie_rp_bar { + RP_BAR_UNDEFINED = -1, RP_BAR0, RP_BAR1, RP_NO_BAR }; +#define CDNS_PCIE_RP_MAX_IB 0x3 + +struct cdns_pcie_rp_ib_bar { + u64 size; + bool free; +}; + /* Endpoint Function BAR Inbound PCIe to AXI Address Translation Register */ #define CDNS_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ (CDNS_PCIE_AT_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) @@ -223,23 +251,31 @@ enum cdns_pcie_msg_routing { MSG_ROUTING_GATHER, }; +struct cdns_pcie_ops { + int (*start_link)(struct cdns_pcie *pcie); + void (*stop_link)(struct cdns_pcie *pcie); + bool (*link_up)(struct cdns_pcie *pcie); + u64 (*cpu_addr_fixup)(struct cdns_pcie *pcie, u64 cpu_addr); +}; + /** * struct cdns_pcie - private data for Cadence PCIe controller drivers * @reg_base: IO mapped register base * @mem_res: start/end offsets in the physical system memory to map PCI accesses * @is_rc: tell whether the PCIe controller mode is Root Complex or Endpoint. * @bus: In Root Complex mode, the bus number + * @ops: Platform specific ops to control various inputs from Cadence PCIe + * wrapper */ struct cdns_pcie { void __iomem *reg_base; struct resource *mem_res; struct device *dev; bool is_rc; - u8 bus; int phy_count; struct phy **phy; struct device_link **link; - const struct cdns_pcie_common_ops *ops; + const struct cdns_pcie_ops *ops; }; /** @@ -248,22 +284,28 @@ struct cdns_pcie { * @dev: pointer to PCIe device * @cfg_res: start/end offsets in the physical system memory to map PCI * configuration space accesses - * @bus_range: first/last buses behind the PCIe host controller * @cfg_base: IO mapped window to access the PCI configuration space of a * single function at a time - * @no_bar_nbits: Number of bits to keep for inbound (PCIe -> CPU) address - * translation (nbits sets into the "no BAR match" register) * @vendor_id: PCI vendor ID * @device_id: PCI device ID + * @avail_ib_bar: Satus of RP_BAR0, RP_BAR1 and RP_NO_BAR if it's free or + * available */ struct cdns_pcie_rc { struct cdns_pcie pcie; struct resource *cfg_res; - struct resource *bus_range; void __iomem *cfg_base; - u32 no_bar_nbits; u32 vendor_id; u32 device_id; + bool avail_ib_bar[CDNS_PCIE_RP_MAX_IB]; +}; + +/** + * struct cdns_pcie_epf - Structure to hold info about endpoint function + * @epf_bar: reference to the pci_epf_bar for the six Base Address Registers + */ +struct cdns_pcie_epf { + struct pci_epf_bar *epf_bar[PCI_STD_NUM_BARS]; }; /** @@ -282,6 +324,10 @@ struct cdns_pcie_rc { * @irq_pci_fn: the latest PCI function that has updated the mapping of * the MSI/legacy IRQ dedicated outbound region. * @irq_pending: bitmask of asserted legacy IRQs. + * @lock: spin lock to disable interrupts while modifying PCIe controller + * registers fields (RMW) accessible by both remote RC and EP to + * minimize time between read and write + * @epf: Structure to hold info about endpoint function */ struct cdns_pcie_ep { struct cdns_pcie pcie; @@ -293,54 +339,95 @@ struct cdns_pcie_ep { u64 irq_pci_addr; u8 irq_pci_fn; u8 irq_pending; + /* protect writing to PCI_STATUS while raising legacy interrupts */ + spinlock_t lock; + struct cdns_pcie_epf *epf; }; /* Register access */ -static inline void cdns_pcie_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) +static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) { - writeb(value, pcie->reg_base + reg); + writel(value, pcie->reg_base + reg); } -static inline void cdns_pcie_writew(struct cdns_pcie *pcie, u32 reg, u16 value) +static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) { - writew(value, pcie->reg_base + reg); + return readl(pcie->reg_base + reg); } -static inline void cdns_pcie_writel(struct cdns_pcie *pcie, u32 reg, u32 value) +static inline u32 cdns_pcie_read_sz(void __iomem *addr, int size) { - writel(value, pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 val = readl(aligned_addr); + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return 0; + } + + if (size > 2) + return val; + + return (val >> (8 * offset)) & ((1 << (size * 8)) - 1); } -static inline u32 cdns_pcie_readl(struct cdns_pcie *pcie, u32 reg) +static inline void cdns_pcie_write_sz(void __iomem *addr, int size, u32 value) { - return readl(pcie->reg_base + reg); + void __iomem *aligned_addr = PTR_ALIGN_DOWN(addr, 0x4); + unsigned int offset = (unsigned long)addr & 0x3; + u32 mask; + u32 val; + + if (!IS_ALIGNED((uintptr_t)addr, size)) { + pr_warn("Address %p and size %d are not aligned\n", addr, size); + return; + } + + if (size > 2) { + writel(value, addr); + return; + } + + mask = ~(((1 << (size * 8)) - 1) << (offset * 8)); + val = readl(aligned_addr) & mask; + val |= value << (offset * 8); + writel(val, aligned_addr); } /* Root Port register access */ static inline void cdns_pcie_rp_writeb(struct cdns_pcie *pcie, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_rp_writew(struct cdns_pcie *pcie, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_RP_BASE + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_RP_BASE + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } /* Endpoint Function register access */ static inline void cdns_pcie_ep_fn_writeb(struct cdns_pcie *pcie, u8 fn, u32 reg, u8 value) { - writeb(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x1, value); } static inline void cdns_pcie_ep_fn_writew(struct cdns_pcie *pcie, u8 fn, u32 reg, u16 value) { - writew(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + cdns_pcie_write_sz(addr, 0x2, value); } static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, @@ -349,14 +436,11 @@ static inline void cdns_pcie_ep_fn_writel(struct cdns_pcie *pcie, u8 fn, writel(value, pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } -static inline u8 cdns_pcie_ep_fn_readb(struct cdns_pcie *pcie, u8 fn, u32 reg) -{ - return readb(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); -} - static inline u16 cdns_pcie_ep_fn_readw(struct cdns_pcie *pcie, u8 fn, u32 reg) { - return readw(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); + void __iomem *addr = pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg; + + return cdns_pcie_read_sz(addr, 0x2); } static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) @@ -364,13 +448,43 @@ static inline u32 cdns_pcie_ep_fn_readl(struct cdns_pcie *pcie, u8 fn, u32 reg) return readl(pcie->reg_base + CDNS_PCIE_EP_FUNC_BASE(fn) + reg); } +static inline int cdns_pcie_start_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->start_link) + return pcie->ops->start_link(pcie); + + return 0; +} + +static inline void cdns_pcie_stop_link(struct cdns_pcie *pcie) +{ + if (pcie->ops->stop_link) + pcie->ops->stop_link(pcie); +} + +static inline bool cdns_pcie_link_up(struct cdns_pcie *pcie) +{ + if (pcie->ops->link_up) + return pcie->ops->link_up(pcie); + + return true; +} + #ifdef CONFIG_PCIE_CADENCE_HOST int cdns_pcie_host_setup(struct cdns_pcie_rc *rc); +void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where); #else static inline int cdns_pcie_host_setup(struct cdns_pcie_rc *rc) { return 0; } + +static inline void __iomem *cdns_pci_map_bus(struct pci_bus *bus, unsigned int devfn, + int where) +{ + return NULL; +} #endif #ifdef CONFIG_PCIE_CADENCE_EP @@ -381,11 +495,12 @@ static inline int cdns_pcie_ep_setup(struct cdns_pcie_ep *ep) return 0; } #endif -void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region(struct cdns_pcie *pcie, u8 busnr, u8 fn, u32 r, bool is_io, u64 cpu_addr, u64 pci_addr, size_t size); -void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, u8 fn, +void cdns_pcie_set_outbound_region_for_normal_msg(struct cdns_pcie *pcie, + u8 busnr, u8 fn, u32 r, u64 cpu_addr); void cdns_pcie_reset_outbound_region(struct cdns_pcie *pcie, u32 r); diff --git a/drivers/pci/controller/dwc/pci-dra7xx.c b/drivers/pci/controller/dwc/pci-dra7xx.c index 6184ebc9392d..dc387724cf08 100644 --- a/drivers/pci/controller/dwc/pci-dra7xx.c +++ b/drivers/pci/controller/dwc/pci-dra7xx.c @@ -2,7 +2,7 @@ /* * pcie-dra7xx - PCIe controller driver for TI DRA7xx SoCs * - * Copyright (C) 2013-2014 Texas Instruments Incorporated - http://www.ti.com + * Copyright (C) 2013-2014 Texas Instruments Incorporated - https://www.ti.com * * Authors: Kishon Vijay Abraham I <kishon@ti.com> */ @@ -593,13 +593,12 @@ static int __init dra7xx_add_pcie_ep(struct dra7xx_pcie *dra7xx, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "ep_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ep_dbics2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = + devm_platform_ioremap_resource_byname(pdev, "ep_dbics2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); @@ -626,20 +625,16 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx, struct dw_pcie *pci = dra7xx->pci; struct pcie_port *pp = &pci->pp; struct device *dev = pci->dev; - struct resource *res; pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "missing IRQ resource\n"); + if (pp->irq < 0) return pp->irq; - } ret = dra7xx_pcie_init_irq_domain(pp); if (ret < 0) return ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbics"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc_dbics"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); @@ -871,10 +866,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) pci->ops = &dw_pcie_ops; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "missing IRQ resource: %d\n", irq); + if (irq < 0) return irq; - } base = devm_platform_ioremap_resource_byname(pdev, "ti_conf"); if (IS_ERR(base)) @@ -998,9 +991,8 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev) return 0; err_gpio: - pm_runtime_put(dev); - err_get_sync: + pm_runtime_put(dev); pm_runtime_disable(dev); dra7xx_pcie_disable_phy(dra7xx); diff --git a/drivers/pci/controller/dwc/pci-exynos.c b/drivers/pci/controller/dwc/pci-exynos.c index c5043d951e80..8d82c43ae299 100644 --- a/drivers/pci/controller/dwc/pci-exynos.c +++ b/drivers/pci/controller/dwc/pci-exynos.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Samsung Exynos SoCs * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -84,14 +84,12 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev, { struct dw_pcie *pci = ep->pci; struct device *dev = pci->dev; - struct resource *res; ep->mem_res = devm_kzalloc(dev, sizeof(*ep->mem_res), GFP_KERNEL); if (!ep->mem_res) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ep->mem_res->elbi_base = devm_ioremap_resource(dev, res); + ep->mem_res->elbi_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ep->mem_res->elbi_base)) return PTR_ERR(ep->mem_res->elbi_base); @@ -402,10 +400,9 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, int ret; pp->irq = platform_get_irq(pdev, 1); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (pp->irq < 0) return pp->irq; - } + ret = devm_request_irq(dev, pp->irq, exynos_pcie_irq_handler, IRQF_SHARED, "exynos-pcie", ep); if (ret) { @@ -415,10 +412,8 @@ static int __init exynos_add_pcie_port(struct exynos_pcie *ep, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get msi irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &exynos_pcie_host_ops; diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 8f08ae53f53e..90df28c7cb0c 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Freescale i.MX6 SoCs * * Copyright (C) 2013 Kosagi - * http://www.kosagi.com + * https://www.kosagi.com * * Author: Sean Cross <xobs@kosagi.com> */ @@ -868,10 +868,8 @@ static int imx6_add_pcie_port(struct imx6_pcie *imx6_pcie, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &imx6_pcie_host_ops; @@ -1269,7 +1267,7 @@ static void imx6_pcie_quirk(struct pci_dev *dev) if (bus->dev.parent->parent->driver != &imx6_pcie_driver.driver) return; - if (bus->number == pp->root_bus_nr) { + if (pci_is_root_bus(bus)) { struct dw_pcie *pci = to_dw_pcie_from_pp(pp); struct imx6_pcie *imx6_pcie = to_imx6_pcie(pci); diff --git a/drivers/pci/controller/dwc/pci-keystone.c b/drivers/pci/controller/dwc/pci-keystone.c index 790679fdfa48..c8c9d6a75f17 100644 --- a/drivers/pci/controller/dwc/pci-keystone.c +++ b/drivers/pci/controller/dwc/pci-keystone.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Texas Instruments Keystone SoCs * * Copyright (C) 2013-2014 Texas Instruments., Ltd. - * http://www.ti.com + * https://www.ti.com * * Author: Murali Karicheri <m-karicheri2@ti.com> * Implementation based on pci-exynos.c and pcie-designware.c @@ -440,7 +440,7 @@ static int ks_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number != pp->root_bus_nr) + if (!pci_is_root_bus(bus->parent)) reg |= CFG_TYPE1; ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); @@ -457,7 +457,7 @@ static int ks_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, reg = CFG_BUS(bus->number) | CFG_DEVICE(PCI_SLOT(devfn)) | CFG_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number != pp->root_bus_nr) + if (!pci_is_root_bus(bus->parent)) reg |= CFG_TYPE1; ks_pcie_app_writel(ks_pcie, CFG_SETUP, reg); @@ -1250,10 +1250,8 @@ static int __init ks_pcie_probe(struct platform_device *pdev) pci->version = version; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "missing IRQ resource: %d\n", irq); + if (irq < 0) return irq; - } ret = request_irq(irq, ks_pcie_err_irq_handler, IRQF_SHARED, "ks-pcie-error-irq", ks_pcie); @@ -1323,8 +1321,7 @@ static int __init ks_pcie_probe(struct platform_device *pdev) } if (pci->version >= 0x480A) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "atu"); - atu_base = devm_ioremap_resource(dev, res); + atu_base = devm_platform_ioremap_resource_byname(pdev, "atu"); if (IS_ERR(atu_base)) { ret = PTR_ERR(atu_base); goto err_get_sync; diff --git a/drivers/pci/controller/dwc/pci-meson.c b/drivers/pci/controller/dwc/pci-meson.c index ca59ba9e0ecd..4f183b96afbb 100644 --- a/drivers/pci/controller/dwc/pci-meson.c +++ b/drivers/pci/controller/dwc/pci-meson.c @@ -488,10 +488,8 @@ static int meson_add_pcie_port(struct meson_pcie *mp, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq(pdev, 0); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI IRQ\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &meson_pcie_host_ops; diff --git a/drivers/pci/controller/dwc/pcie-al.c b/drivers/pci/controller/dwc/pcie-al.c index 270868f3859a..d57d4ee15848 100644 --- a/drivers/pci/controller/dwc/pcie-al.c +++ b/drivers/pci/controller/dwc/pcie-al.c @@ -67,13 +67,8 @@ static int al_pcie_init(struct pci_config_window *cfg) dev_dbg(dev, "Root port dbi res: %pR\n", res); al_pcie->dbi_base = devm_pci_remap_cfg_resource(dev, res); - if (IS_ERR(al_pcie->dbi_base)) { - long err = PTR_ERR(al_pcie->dbi_base); - - dev_err(dev, "couldn't remap dbi base %pR (err:%ld)\n", - res, err); - return err; - } + if (IS_ERR(al_pcie->dbi_base)) + return PTR_ERR(al_pcie->dbi_base); cfg->priv = al_pcie; @@ -408,10 +403,8 @@ static int al_pcie_probe(struct platform_device *pdev) dbi_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_res); - if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap dbi base %pR\n", dbi_res); + if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - } ecam_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); if (!ecam_res) { diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index 49596547e8c2..13901f359a41 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c @@ -248,10 +248,8 @@ static int armada8k_add_pcie_port(struct armada8k_pcie *pcie, pp->ops = &armada8k_pcie_host_ops; pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq for port\n"); + if (pp->irq < 0) return pp->irq; - } ret = devm_request_irq(dev, pp->irq, armada8k_pcie_irq_handler, IRQF_SHARED, "armada8k-pcie", pcie); @@ -317,7 +315,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, base); if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap regs base %p\n", base); ret = PTR_ERR(pci->dbi_base); goto fail_clkreg; } diff --git a/drivers/pci/controller/dwc/pcie-artpec6.c b/drivers/pci/controller/dwc/pcie-artpec6.c index 28d5a1095200..97d50bb50f06 100644 --- a/drivers/pci/controller/dwc/pcie-artpec6.c +++ b/drivers/pci/controller/dwc/pcie-artpec6.c @@ -387,10 +387,8 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie, if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "failed to get MSI irq\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } pp->ops = &artpec6_pcie_host_ops; @@ -455,8 +453,7 @@ static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); @@ -481,8 +478,6 @@ static int artpec6_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct dw_pcie *pci; struct artpec6_pcie *artpec6_pcie; - struct resource *dbi_base; - struct resource *phy_base; int ret; const struct of_device_id *match; const struct artpec_pcie_of_data *data; @@ -512,13 +507,12 @@ static int artpec6_pcie_probe(struct platform_device *pdev) artpec6_pcie->variant = variant; artpec6_pcie->mode = mode; - dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, dbi_base); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); - phy_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - artpec6_pcie->phy_base = devm_ioremap_resource(dev, phy_base); + artpec6_pcie->phy_base = + devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(artpec6_pcie->phy_base)) return PTR_ERR(artpec6_pcie->phy_base); diff --git a/drivers/pci/controller/dwc/pcie-designware-ep.c b/drivers/pci/controller/dwc/pcie-designware-ep.c index 5e5b8821bed8..305bfec2424d 100644 --- a/drivers/pci/controller/dwc/pcie-designware-ep.c +++ b/drivers/pci/controller/dwc/pcie-designware-ep.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * Synopsys DesignWare PCIe Endpoint controller driver * * Copyright (C) 2017 Texas Instruments diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c index 0a4a5aa6fe46..9dafecba347f 100644 --- a/drivers/pci/controller/dwc/pcie-designware-host.c +++ b/drivers/pci/controller/dwc/pcie-designware-host.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -346,11 +346,6 @@ int dw_pcie_host_init(struct pcie_port *pp) if (!bridge) return -ENOMEM; - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - /* Get the I/O and memory ranges from DT */ resource_list_for_each_entry(win, &bridge->windows) { switch (resource_type(win->res)) { @@ -473,14 +468,8 @@ int dw_pcie_host_init(struct pcie_port *pp) goto err_free_msi; } - pp->root_bus_nr = pp->busn->start; - - bridge->dev.parent = dev; bridge->sysdata = pp; - bridge->busnr = pp->root_bus_nr; bridge->ops = &dw_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; ret = pci_scan_root_bus_bridge(bridge); if (ret) @@ -529,7 +518,7 @@ static int dw_pcie_access_other_conf(struct pcie_port *pp, struct pci_bus *bus, busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | PCIE_ATU_FUNC(PCI_FUNC(devfn)); - if (bus->parent->number == pp->root_bus_nr) { + if (pci_is_root_bus(bus->parent)) { type = PCIE_ATU_TYPE_CFG0; cpu_addr = pp->cfg0_base; cfg_size = pp->cfg0_size; @@ -585,13 +574,11 @@ static int dw_pcie_valid_device(struct pcie_port *pp, struct pci_bus *bus, struct dw_pcie *pci = to_dw_pcie_from_pp(pp); /* If there is no link, then there is no device */ - if (bus->number != pp->root_bus_nr) { + if (!pci_is_root_bus(bus)) { if (!dw_pcie_link_up(pci)) return 0; - } - - /* Access only one slot on each root port */ - if (bus->number == pp->root_bus_nr && dev > 0) + } else if (dev > 0) + /* Access only one slot on each root port */ return 0; return 1; @@ -607,7 +594,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number == pp->root_bus_nr) + if (pci_is_root_bus(bus)) return dw_pcie_rd_own_conf(pp, where, size, val); return dw_pcie_rd_other_conf(pp, bus, devfn, where, size, val); @@ -621,7 +608,7 @@ static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!dw_pcie_valid_device(pp, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == pp->root_bus_nr) + if (pci_is_root_bus(bus)) return dw_pcie_wr_own_conf(pp, where, size, val); return dw_pcie_wr_other_conf(pp, bus, devfn, where, size, val); diff --git a/drivers/pci/controller/dwc/pcie-designware-plat.c b/drivers/pci/controller/dwc/pcie-designware-plat.c index 73646b677aff..712456f6ce36 100644 --- a/drivers/pci/controller/dwc/pcie-designware-plat.c +++ b/drivers/pci/controller/dwc/pcie-designware-plat.c @@ -153,8 +153,7 @@ static int dw_plat_add_pcie_ep(struct dw_plat_pcie *dw_plat_pcie, ep = &pci->ep; ep->ops = &pcie_ep_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2"); - pci->dbi_base2 = devm_ioremap_resource(dev, res); + pci->dbi_base2 = devm_platform_ioremap_resource_byname(pdev, "dbi2"); if (IS_ERR(pci->dbi_base2)) return PTR_ERR(pci->dbi_base2); diff --git a/drivers/pci/controller/dwc/pcie-designware.c b/drivers/pci/controller/dwc/pcie-designware.c index c92496e36fd5..b723e0cc41fb 100644 --- a/drivers/pci/controller/dwc/pcie-designware.c +++ b/drivers/pci/controller/dwc/pcie-designware.c @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h index 656e00f8fbeb..f911760dcc69 100644 --- a/drivers/pci/controller/dwc/pcie-designware.h +++ b/drivers/pci/controller/dwc/pcie-designware.h @@ -3,7 +3,7 @@ * Synopsys DesignWare PCIe host controller driver * * Copyright (C) 2013 Samsung Electronics Co., Ltd. - * http://www.samsung.com + * https://www.samsung.com * * Author: Jingoo Han <jg1.han@samsung.com> */ @@ -173,7 +173,6 @@ struct dw_pcie_host_ops { }; struct pcie_port { - u8 root_bus_nr; u64 cfg0_base; void __iomem *va_cfg0_base; u32 cfg0_size; diff --git a/drivers/pci/controller/dwc/pcie-hisi.c b/drivers/pci/controller/dwc/pcie-hisi.c index 0ad4e07dd4c2..5ca86796d43a 100644 --- a/drivers/pci/controller/dwc/pcie-hisi.c +++ b/drivers/pci/controller/dwc/pcie-hisi.c @@ -10,15 +10,10 @@ */ #include <linux/interrupt.h> #include <linux/init.h> -#include <linux/mfd/syscon.h> -#include <linux/of_address.h> -#include <linux/of_pci.h> #include <linux/platform_device.h> -#include <linux/of_device.h> #include <linux/pci.h> #include <linux/pci-acpi.h> #include <linux/pci-ecam.h> -#include <linux/regmap.h> #include "../../pci.h" #if defined(CONFIG_PCI_HISI) || (defined(CONFIG_ACPI) && defined(CONFIG_PCI_QUIRKS)) @@ -118,220 +113,6 @@ const struct pci_ecam_ops hisi_pcie_ops = { #ifdef CONFIG_PCI_HISI -#include "pcie-designware.h" - -#define PCIE_SUBCTRL_SYS_STATE4_REG 0x6818 -#define PCIE_HIP06_CTRL_OFF 0x1000 -#define PCIE_SYS_STATE4 (PCIE_HIP06_CTRL_OFF + 0x31c) -#define PCIE_LTSSM_LINKUP_STATE 0x11 -#define PCIE_LTSSM_STATE_MASK 0x3F - -#define to_hisi_pcie(x) dev_get_drvdata((x)->dev) - -struct hisi_pcie; - -struct pcie_soc_ops { - int (*hisi_pcie_link_up)(struct hisi_pcie *hisi_pcie); -}; - -struct hisi_pcie { - struct dw_pcie *pci; - struct regmap *subctrl; - u32 port_id; - const struct pcie_soc_ops *soc_ops; -}; - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_read(struct pcie_port *pp, int where, int size, - u32 *val) -{ - u32 reg; - u32 reg_val; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - reg_val = dw_pcie_readl_dbi(pci, reg); - - if (size == 1) - *val = *(u8 __force *) walker; - else if (size == 2) - *val = *(u16 __force *) walker; - else if (size == 4) - *val = reg_val; - else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -/* HipXX PCIe host only supports 32-bit config access */ -static int hisi_pcie_cfg_write(struct pcie_port *pp, int where, int size, - u32 val) -{ - u32 reg_val; - u32 reg; - void *walker = ®_val; - struct dw_pcie *pci = to_dw_pcie_from_pp(pp); - - walker += (where & 0x3); - reg = where & ~0x3; - if (size == 4) - dw_pcie_writel_dbi(pci, reg, val); - else if (size == 2) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u16 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else if (size == 1) { - reg_val = dw_pcie_readl_dbi(pci, reg); - *(u8 __force *) walker = val; - dw_pcie_writel_dbi(pci, reg, reg_val); - } else - return PCIBIOS_BAD_REGISTER_NUMBER; - - return PCIBIOS_SUCCESSFUL; -} - -static int hisi_pcie_link_up_hip05(struct hisi_pcie *hisi_pcie) -{ - u32 val; - - regmap_read(hisi_pcie->subctrl, PCIE_SUBCTRL_SYS_STATE4_REG + - 0x100 * hisi_pcie->port_id, &val); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up_hip06(struct hisi_pcie *hisi_pcie) -{ - struct dw_pcie *pci = hisi_pcie->pci; - u32 val; - - val = dw_pcie_readl_dbi(pci, PCIE_SYS_STATE4); - - return ((val & PCIE_LTSSM_STATE_MASK) == PCIE_LTSSM_LINKUP_STATE); -} - -static int hisi_pcie_link_up(struct dw_pcie *pci) -{ - struct hisi_pcie *hisi_pcie = to_hisi_pcie(pci); - - return hisi_pcie->soc_ops->hisi_pcie_link_up(hisi_pcie); -} - -static const struct dw_pcie_host_ops hisi_pcie_host_ops = { - .rd_own_conf = hisi_pcie_cfg_read, - .wr_own_conf = hisi_pcie_cfg_write, -}; - -static int hisi_add_pcie_port(struct hisi_pcie *hisi_pcie, - struct platform_device *pdev) -{ - struct dw_pcie *pci = hisi_pcie->pci; - struct pcie_port *pp = &pci->pp; - struct device *dev = &pdev->dev; - int ret; - u32 port_id; - - if (of_property_read_u32(dev->of_node, "port-id", &port_id)) { - dev_err(dev, "failed to read port-id\n"); - return -EINVAL; - } - if (port_id > 3) { - dev_err(dev, "Invalid port-id: %d\n", port_id); - return -EINVAL; - } - hisi_pcie->port_id = port_id; - - pp->ops = &hisi_pcie_host_ops; - - ret = dw_pcie_host_init(pp); - if (ret) { - dev_err(dev, "failed to initialize host\n"); - return ret; - } - - return 0; -} - -static const struct dw_pcie_ops dw_pcie_ops = { - .link_up = hisi_pcie_link_up, -}; - -static int hisi_pcie_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct dw_pcie *pci; - struct hisi_pcie *hisi_pcie; - struct resource *reg; - int ret; - - hisi_pcie = devm_kzalloc(dev, sizeof(*hisi_pcie), GFP_KERNEL); - if (!hisi_pcie) - return -ENOMEM; - - pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); - if (!pci) - return -ENOMEM; - - pci->dev = dev; - pci->ops = &dw_pcie_ops; - - hisi_pcie->pci = pci; - - hisi_pcie->soc_ops = of_device_get_match_data(dev); - - hisi_pcie->subctrl = - syscon_regmap_lookup_by_compatible("hisilicon,pcie-sas-subctrl"); - if (IS_ERR(hisi_pcie->subctrl)) { - dev_err(dev, "cannot get subctrl base\n"); - return PTR_ERR(hisi_pcie->subctrl); - } - - reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc_dbi"); - pci->dbi_base = devm_pci_remap_cfg_resource(dev, reg); - if (IS_ERR(pci->dbi_base)) - return PTR_ERR(pci->dbi_base); - platform_set_drvdata(pdev, hisi_pcie); - - ret = hisi_add_pcie_port(hisi_pcie, pdev); - if (ret) - return ret; - - return 0; -} - -static struct pcie_soc_ops hip05_ops = { - &hisi_pcie_link_up_hip05 -}; - -static struct pcie_soc_ops hip06_ops = { - &hisi_pcie_link_up_hip06 -}; - -static const struct of_device_id hisi_pcie_of_match[] = { - { - .compatible = "hisilicon,hip05-pcie", - .data = (void *) &hip05_ops, - }, - { - .compatible = "hisilicon,hip06-pcie", - .data = (void *) &hip06_ops, - }, - {}, -}; - -static struct platform_driver hisi_pcie_driver = { - .probe = hisi_pcie_probe, - .driver = { - .name = "hisi-pcie", - .of_match_table = hisi_pcie_of_match, - .suppress_bind_attrs = true, - }, -}; -builtin_platform_driver(hisi_pcie_driver); - static int hisi_pcie_platform_init(struct pci_config_window *cfg) { struct device *dev = cfg->parent; diff --git a/drivers/pci/controller/dwc/pcie-histb.c b/drivers/pci/controller/dwc/pcie-histb.c index 811b5c6d62ea..2a2835746077 100644 --- a/drivers/pci/controller/dwc/pcie-histb.c +++ b/drivers/pci/controller/dwc/pcie-histb.c @@ -304,7 +304,6 @@ static int histb_pcie_probe(struct platform_device *pdev) struct histb_pcie *hipcie; struct dw_pcie *pci; struct pcie_port *pp; - struct resource *res; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; enum of_gpio_flags of_flags; @@ -324,15 +323,13 @@ static int histb_pcie_probe(struct platform_device *pdev) pci->dev = dev; pci->ops = &dw_pcie_ops; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control"); - hipcie->ctrl = devm_ioremap_resource(dev, res); + hipcie->ctrl = devm_platform_ioremap_resource_byname(pdev, "control"); if (IS_ERR(hipcie->ctrl)) { dev_err(dev, "cannot get control reg base\n"); return PTR_ERR(hipcie->ctrl); } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rc-dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "rc-dbi"); if (IS_ERR(pci->dbi_base)) { dev_err(dev, "cannot get rc-dbi base\n"); return PTR_ERR(pci->dbi_base); @@ -402,10 +399,8 @@ static int histb_pcie_probe(struct platform_device *pdev) if (IS_ENABLED(CONFIG_PCI_MSI)) { pp->msi_irq = platform_get_irq_byname(pdev, "msi"); - if (pp->msi_irq < 0) { - dev_err(dev, "Failed to get MSI IRQ\n"); + if (pp->msi_irq < 0) return pp->msi_irq; - } } hipcie->phy = devm_phy_get(dev, "phy"); diff --git a/drivers/pci/controller/dwc/pcie-intel-gw.c b/drivers/pci/controller/dwc/pcie-intel-gw.c index 2d8dbb318087..c3b3a1d162b5 100644 --- a/drivers/pci/controller/dwc/pcie-intel-gw.c +++ b/drivers/pci/controller/dwc/pcie-intel-gw.c @@ -253,11 +253,9 @@ static int intel_pcie_get_resources(struct platform_device *pdev) struct intel_pcie_port *lpp = platform_get_drvdata(pdev); struct dw_pcie *pci = &lpp->pci; struct device *dev = pci->dev; - struct resource *res; int ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - pci->dbi_base = devm_ioremap_resource(dev, res); + pci->dbi_base = devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(pci->dbi_base)) return PTR_ERR(pci->dbi_base); @@ -291,8 +289,7 @@ static int intel_pcie_get_resources(struct platform_device *pdev) ret = of_pci_get_max_link_speed(dev->of_node); lpp->link_gen = ret < 0 ? 0 : ret; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "app"); - lpp->app_base = devm_ioremap_resource(dev, res); + lpp->app_base = devm_platform_ioremap_resource_byname(pdev, "app"); if (IS_ERR(lpp->app_base)) return PTR_ERR(lpp->app_base); diff --git a/drivers/pci/controller/dwc/pcie-kirin.c b/drivers/pci/controller/dwc/pcie-kirin.c index c19617a912bd..e496f51e0152 100644 --- a/drivers/pci/controller/dwc/pcie-kirin.c +++ b/drivers/pci/controller/dwc/pcie-kirin.c @@ -3,7 +3,7 @@ * PCIe host controller driver for Kirin Phone SoCs * * Copyright (C) 2017 HiSilicon Electronics Co., Ltd. - * http://www.huawei.com + * https://www.huawei.com * * Author: Xiaowei Song <songxiaowei@huawei.com> */ @@ -147,23 +147,18 @@ static long kirin_pcie_get_clk(struct kirin_pcie *kirin_pcie, static long kirin_pcie_get_resource(struct kirin_pcie *kirin_pcie, struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct resource *apb; - struct resource *phy; - struct resource *dbi; - - apb = platform_get_resource_byname(pdev, IORESOURCE_MEM, "apb"); - kirin_pcie->apb_base = devm_ioremap_resource(dev, apb); + kirin_pcie->apb_base = + devm_platform_ioremap_resource_byname(pdev, "apb"); if (IS_ERR(kirin_pcie->apb_base)) return PTR_ERR(kirin_pcie->apb_base); - phy = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy"); - kirin_pcie->phy_base = devm_ioremap_resource(dev, phy); + kirin_pcie->phy_base = + devm_platform_ioremap_resource_byname(pdev, "phy"); if (IS_ERR(kirin_pcie->phy_base)) return PTR_ERR(kirin_pcie->phy_base); - dbi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); - kirin_pcie->pci->dbi_base = devm_ioremap_resource(dev, dbi); + kirin_pcie->pci->dbi_base = + devm_platform_ioremap_resource_byname(pdev, "dbi"); if (IS_ERR(kirin_pcie->pci->dbi_base)) return PTR_ERR(kirin_pcie->pci->dbi_base); @@ -455,11 +450,8 @@ static int kirin_pcie_add_msi(struct dw_pcie *pci, if (IS_ENABLED(CONFIG_PCI_MSI)) { irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, - "failed to get MSI IRQ (%d)\n", irq); + if (irq < 0) return irq; - } pci->pp.msi_irq = irq; } diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c index 138e1a2d21cc..3aac77a295ba 100644 --- a/drivers/pci/controller/dwc/pcie-qcom.c +++ b/drivers/pci/controller/dwc/pcie-qcom.c @@ -27,6 +27,7 @@ #include <linux/slab.h> #include <linux/types.h> +#include "../../pci.h" #include "pcie-designware.h" #define PCIE20_PARF_SYS_CTRL 0x00 @@ -39,13 +40,14 @@ #define L23_CLK_RMV_DIS BIT(2) #define L1_CLK_RMV_DIS BIT(1) -#define PCIE20_COMMAND_STATUS 0x04 -#define CMD_BME_VAL 0x4 -#define PCIE20_DEVICE_CONTROL2_STATUS2 0x98 -#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10 - #define PCIE20_PARF_PHY_CTRL 0x40 +#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16) +#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16) + #define PCIE20_PARF_PHY_REFCLK 0x4C +#define PHY_REFCLK_SSP_EN BIT(16) +#define PHY_REFCLK_USE_PAD BIT(12) + #define PCIE20_PARF_DBI_BASE_ADDR 0x168 #define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C #define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174 @@ -66,8 +68,8 @@ #define CFG_BRIDGE_SB_INIT BIT(0) #define PCIE20_CAP 0x70 -#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + 0xC) -#define PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT (BIT(10) | BIT(11)) +#define PCIE20_DEVICE_CONTROL2_STATUS2 (PCIE20_CAP + PCI_EXP_DEVCTL2) +#define PCIE20_CAP_LINK_CAPABILITIES (PCIE20_CAP + PCI_EXP_LNKCAP) #define PCIE20_CAP_LINK_1 (PCIE20_CAP + 0x14) #define PCIE_CAP_LINK1_VAL 0x2FD7F @@ -77,22 +79,36 @@ #define DBI_RO_WR_EN 1 #define PERST_DELAY_US 1000 +/* PARF registers */ +#define PCIE20_PARF_PCS_DEEMPH 0x34 +#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8) +#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0) + +#define PCIE20_PARF_PCS_SWING 0x38 +#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8) +#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0) + +#define PCIE20_PARF_CONFIG_BITS 0x50 +#define PHY_RX0_EQ(x) ((x) << 24) #define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358 #define SLV_ADDR_SPACE_SZ 0x10000000 +#define PCIE20_LNK_CONTROL2_LINK_STATUS2 0xa0 + #define DEVICE_TYPE_RC 0x4 #define QCOM_PCIE_2_1_0_MAX_SUPPLY 3 +#define QCOM_PCIE_2_1_0_MAX_CLOCKS 5 struct qcom_pcie_resources_2_1_0 { - struct clk *iface_clk; - struct clk *core_clk; - struct clk *phy_clk; + struct clk_bulk_data clks[QCOM_PCIE_2_1_0_MAX_CLOCKS]; struct reset_control *pci_reset; struct reset_control *axi_reset; struct reset_control *ahb_reset; struct reset_control *por_reset; struct reset_control *phy_reset; + struct reset_control *ext_reset; struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY]; }; @@ -177,6 +193,7 @@ struct qcom_pcie { struct phy *phy; struct gpio_desc *reset; const struct qcom_pcie_ops *ops; + int gen; }; #define to_qcom_pcie(x) dev_get_drvdata((x)->dev) @@ -234,17 +251,21 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (ret) return ret; - res->iface_clk = devm_clk_get(dev, "iface"); - if (IS_ERR(res->iface_clk)) - return PTR_ERR(res->iface_clk); + res->clks[0].id = "iface"; + res->clks[1].id = "core"; + res->clks[2].id = "phy"; + res->clks[3].id = "aux"; + res->clks[4].id = "ref"; - res->core_clk = devm_clk_get(dev, "core"); - if (IS_ERR(res->core_clk)) - return PTR_ERR(res->core_clk); + /* iface, core, phy are required */ + ret = devm_clk_bulk_get(dev, 3, res->clks); + if (ret < 0) + return ret; - res->phy_clk = devm_clk_get(dev, "phy"); - if (IS_ERR(res->phy_clk)) - return PTR_ERR(res->phy_clk); + /* aux, ref are optional */ + ret = devm_clk_bulk_get_optional(dev, 2, res->clks + 3); + if (ret < 0) + return ret; res->pci_reset = devm_reset_control_get_exclusive(dev, "pci"); if (IS_ERR(res->pci_reset)) @@ -262,6 +283,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie) if (IS_ERR(res->por_reset)) return PTR_ERR(res->por_reset); + res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext"); + if (IS_ERR(res->ext_reset)) + return PTR_ERR(res->ext_reset); + res->phy_reset = devm_reset_control_get_exclusive(dev, "phy"); return PTR_ERR_OR_ZERO(res->phy_reset); } @@ -270,14 +295,13 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie) { struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; + clk_bulk_disable_unprepare(ARRAY_SIZE(res->clks), res->clks); reset_control_assert(res->pci_reset); reset_control_assert(res->axi_reset); reset_control_assert(res->ahb_reset); reset_control_assert(res->por_reset); - reset_control_assert(res->pci_reset); - clk_disable_unprepare(res->iface_clk); - clk_disable_unprepare(res->core_clk); - clk_disable_unprepare(res->phy_clk); + reset_control_assert(res->ext_reset); + reset_control_assert(res->phy_reset); regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); } @@ -286,6 +310,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0; struct dw_pcie *pci = pcie->pci; struct device *dev = pci->dev; + struct device_node *node = dev->of_node; u32 val; int ret; @@ -295,73 +320,85 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return ret; } - ret = reset_control_assert(res->ahb_reset); - if (ret) { - dev_err(dev, "cannot assert ahb reset\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->iface_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable iface clock\n"); - goto err_assert_ahb; - } - - ret = clk_prepare_enable(res->phy_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable phy clock\n"); - goto err_clk_phy; - } - - ret = clk_prepare_enable(res->core_clk); - if (ret) { - dev_err(dev, "cannot prepare/enable core clock\n"); - goto err_clk_core; - } - ret = reset_control_deassert(res->ahb_reset); if (ret) { dev_err(dev, "cannot deassert ahb reset\n"); goto err_deassert_ahb; } - /* enable PCIe clocks and resets */ - val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); - val &= ~BIT(0); - writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); - - /* enable external reference clock */ - val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); - val |= BIT(16); - writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + ret = reset_control_deassert(res->ext_reset); + if (ret) { + dev_err(dev, "cannot deassert ext reset\n"); + goto err_deassert_ext; + } ret = reset_control_deassert(res->phy_reset); if (ret) { dev_err(dev, "cannot deassert phy reset\n"); - return ret; + goto err_deassert_phy; } ret = reset_control_deassert(res->pci_reset); if (ret) { dev_err(dev, "cannot deassert pci reset\n"); - return ret; + goto err_deassert_pci; } ret = reset_control_deassert(res->por_reset); if (ret) { dev_err(dev, "cannot deassert por reset\n"); - return ret; + goto err_deassert_por; } ret = reset_control_deassert(res->axi_reset); if (ret) { dev_err(dev, "cannot deassert axi reset\n"); - return ret; + goto err_deassert_axi; } + ret = clk_bulk_prepare_enable(ARRAY_SIZE(res->clks), res->clks); + if (ret) + goto err_clks; + + /* enable PCIe clocks and resets */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~BIT(0); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064") || + of_device_is_compatible(node, "qcom,pcie-ipq8064-v2")) { + writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) | + PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34), + pcie->parf + PCIE20_PARF_PCS_DEEMPH); + writel(PCS_SWING_TX_SWING_FULL(120) | + PCS_SWING_TX_SWING_LOW(120), + pcie->parf + PCIE20_PARF_PCS_SWING); + writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS); + } + + if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) { + /* set TX termination offset */ + val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL); + val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK; + val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7); + writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL); + } + + /* enable external reference clock */ + val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK); + val &= ~PHY_REFCLK_USE_PAD; + val |= PHY_REFCLK_SSP_EN; + writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK); + /* wait for clock acquisition */ usleep_range(1000, 1500); + if (pcie->gen == 1) { + val = readl(pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); + val |= PCI_EXP_LNKSTA_CLS_2_5GB; + writel(val, pci->dbi_base + PCIE20_LNK_CONTROL2_LINK_STATUS2); + } /* Set the Max TLP size to 2K, instead of using default of 4K */ writel(CFG_REMOTE_RD_REQ_BRIDGE_SIZE_2K, @@ -371,13 +408,19 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie) return 0; +err_clks: + reset_control_assert(res->axi_reset); +err_deassert_axi: + reset_control_assert(res->por_reset); +err_deassert_por: + reset_control_assert(res->pci_reset); +err_deassert_pci: + reset_control_assert(res->phy_reset); +err_deassert_phy: + reset_control_assert(res->ext_reset); +err_deassert_ext: + reset_control_assert(res->ahb_reset); err_deassert_ahb: - clk_disable_unprepare(res->core_clk); -err_clk_core: - clk_disable_unprepare(res->phy_clk); -err_clk_phy: - clk_disable_unprepare(res->iface_clk); -err_assert_ahb: regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies); return ret; @@ -1047,15 +1090,15 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie) pcie->parf + PCIE20_PARF_SYS_CTRL); writel(0, pcie->parf + PCIE20_PARF_Q2A_FLUSH); - writel(CMD_BME_VAL, pci->dbi_base + PCIE20_COMMAND_STATUS); + writel(PCI_COMMAND_MASTER, pci->dbi_base + PCI_COMMAND); writel(DBI_RO_WR_EN, pci->dbi_base + PCIE20_MISC_CONTROL_1_REG); writel(PCIE_CAP_LINK1_VAL, pci->dbi_base + PCIE20_CAP_LINK_1); val = readl(pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - val &= ~PCIE20_CAP_ACTIVE_STATE_LINK_PM_SUPPORT; + val &= ~PCI_EXP_LNKCAP_ASPMS; writel(val, pci->dbi_base + PCIE20_CAP_LINK_CAPABILITIES); - writel(PCIE_CAP_CPL_TIMEOUT_DISABLE, pci->dbi_base + + writel(PCI_EXP_DEVCTL2_COMP_TMOUT_DIS, pci->dbi_base + PCIE20_DEVICE_CONTROL2_STATUS2); return 0; @@ -1339,10 +1382,8 @@ static int qcom_pcie_probe(struct platform_device *pdev) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_disable(dev); - return ret; - } + if (ret < 0) + goto err_pm_runtime_put; pci->dev = dev; pci->ops = &dw_pcie_ops; @@ -1358,8 +1399,11 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "parf"); - pcie->parf = devm_ioremap_resource(dev, res); + pcie->gen = of_pci_get_max_link_speed(pdev->dev.of_node); + if (pcie->gen < 0) + pcie->gen = 2; + + pcie->parf = devm_platform_ioremap_resource_byname(pdev, "parf"); if (IS_ERR(pcie->parf)) { ret = PTR_ERR(pcie->parf); goto err_pm_runtime_put; @@ -1372,8 +1416,7 @@ static int qcom_pcie_probe(struct platform_device *pdev) goto err_pm_runtime_put; } - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "elbi"); - pcie->elbi = devm_ioremap_resource(dev, res); + pcie->elbi = devm_platform_ioremap_resource_byname(pdev, "elbi"); if (IS_ERR(pcie->elbi)) { ret = PTR_ERR(pcie->elbi); goto err_pm_runtime_put; @@ -1426,6 +1469,7 @@ err_pm_runtime_put: static const struct of_device_id qcom_pcie_match[] = { { .compatible = "qcom,pcie-apq8084", .data = &ops_1_0_0 }, { .compatible = "qcom,pcie-ipq8064", .data = &ops_2_1_0 }, + { .compatible = "qcom,pcie-ipq8064-v2", .data = &ops_2_1_0 }, { .compatible = "qcom,pcie-apq8064", .data = &ops_2_1_0 }, { .compatible = "qcom,pcie-msm8996", .data = &ops_2_3_2 }, { .compatible = "qcom,pcie-ipq8074", .data = &ops_2_3_3 }, diff --git a/drivers/pci/controller/dwc/pcie-spear13xx.c b/drivers/pci/controller/dwc/pcie-spear13xx.c index 7d0cdfd8138b..62846562da0b 100644 --- a/drivers/pci/controller/dwc/pcie-spear13xx.c +++ b/drivers/pci/controller/dwc/pcie-spear13xx.c @@ -198,10 +198,9 @@ static int spear13xx_add_pcie_port(struct spear13xx_pcie *spear13xx_pcie, int ret; pp->irq = platform_get_irq(pdev, 0); - if (pp->irq < 0) { - dev_err(dev, "failed to get irq\n"); + if (pp->irq < 0) return pp->irq; - } + ret = devm_request_irq(dev, pp->irq, spear13xx_pcie_irq_handler, IRQF_SHARED | IRQF_NO_THREAD, "spear1340-pcie", spear13xx_pcie); @@ -273,7 +272,6 @@ static int spear13xx_pcie_probe(struct platform_device *pdev) dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi"); pci->dbi_base = devm_pci_remap_cfg_resource(dev, dbi_base); if (IS_ERR(pci->dbi_base)) { - dev_err(dev, "couldn't remap dbi base %p\n", dbi_base); ret = PTR_ERR(pci->dbi_base); goto fail_clk; } diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index 92b77f7d8354..70498689d0c0 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -2189,10 +2189,8 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev) } pp->irq = platform_get_irq_byname(pdev, "intr"); - if (pp->irq < 0) { - dev_err(dev, "Failed to get \"intr\" interrupt\n"); + if (pp->irq < 0) return pp->irq; - } pcie->bpmp = tegra_bpmp_get(dev); if (IS_ERR(pcie->bpmp)) diff --git a/drivers/pci/controller/dwc/pcie-uniphier.c b/drivers/pci/controller/dwc/pcie-uniphier.c index a5401a0b1e58..3a7f403b57b8 100644 --- a/drivers/pci/controller/dwc/pcie-uniphier.c +++ b/drivers/pci/controller/dwc/pcie-uniphier.c @@ -416,8 +416,7 @@ static int uniphier_pcie_probe(struct platform_device *pdev) if (IS_ERR(priv->pci.dbi_base)) return PTR_ERR(priv->pci.dbi_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "link"); - priv->base = devm_ioremap_resource(dev, res); + priv->base = devm_platform_ioremap_resource_byname(pdev, "link"); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); diff --git a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c index a6d2190a6753..ee0156921ebc 100644 --- a/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c +++ b/drivers/pci/controller/mobiveil/pcie-layerscape-gen4.c @@ -170,10 +170,9 @@ static int ls_pcie_g4_interrupt_init(struct mobiveil_pcie *mv_pci) int ret; pcie->irq = platform_get_irq_byname(pdev, "intr"); - if (pcie->irq < 0) { - dev_err(dev, "Can't get 'intr' IRQ, errno = %d\n", pcie->irq); + if (pcie->irq < 0) return pcie->irq; - } + ret = devm_request_irq(dev, pcie->irq, ls_pcie_g4_isr, IRQF_SHARED, pdev->name, pcie); if (ret) { diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c index 5907baa9b1f2..3adec419a45b 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil-host.c @@ -29,18 +29,15 @@ static bool mobiveil_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) { - struct mobiveil_pcie *pcie = bus->sysdata; - struct mobiveil_root_port *rp = &pcie->rp; - /* Only one device down on each root port */ - if ((bus->number == rp->root_bus_nr) && (devfn > 0)) + if (pci_is_root_bus(bus) && (devfn > 0)) return false; /* * Do not read more than one device on the bus directly * attached to RC */ - if ((bus->primary == rp->root_bus_nr) && (PCI_SLOT(devfn) > 0)) + if ((bus->primary == to_pci_host_bridge(bus->bridge)->busnr) && (PCI_SLOT(devfn) > 0)) return false; return true; @@ -61,7 +58,7 @@ static void __iomem *mobiveil_pcie_map_bus(struct pci_bus *bus, return NULL; /* RC config access */ - if (bus->number == rp->root_bus_nr) + if (pci_is_root_bus(bus)) return pcie->csr_axi_slave_base + where; /* @@ -522,10 +519,8 @@ static int mobiveil_pcie_integrated_interrupt_init(struct mobiveil_pcie *pcie) mobiveil_pcie_enable_msi(pcie); rp->irq = platform_get_irq(pdev, 0); - if (rp->irq < 0) { - dev_err(dev, "failed to map IRQ: %d\n", rp->irq); + if (rp->irq < 0) return rp->irq; - } /* initialize the IRQ domains */ ret = mobiveil_pcie_init_irq_domain(pcie); @@ -569,8 +564,6 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) struct mobiveil_root_port *rp = &pcie->rp; struct pci_host_bridge *bridge = rp->bridge; struct device *dev = &pcie->pdev->dev; - struct pci_bus *bus; - struct pci_bus *child; int ret; ret = mobiveil_pcie_parse_dt(pcie); @@ -582,14 +575,6 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) if (!mobiveil_pcie_is_bridge(pcie)) return -ENODEV; - /* parse the host bridge base addresses from the device tree file */ - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "Getting bridge resources failed\n"); - return ret; - } - /* * configure all inbound and outbound windows and prepare the RC for * config access @@ -607,12 +592,8 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) } /* Initialize bridge */ - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = rp->root_bus_nr; bridge->ops = &mobiveil_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; ret = mobiveil_bringup_link(pcie); if (ret) { @@ -620,17 +601,5 @@ int mobiveil_pcie_host_probe(struct mobiveil_pcie *pcie) return ret; } - /* setup the kernel resources for the newly added PCIe root bus */ - ret = pci_scan_root_bus_bridge(bridge); - if (ret) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - - return 0; + return pci_host_probe(bridge); } diff --git a/drivers/pci/controller/mobiveil/pcie-mobiveil.h b/drivers/pci/controller/mobiveil/pcie-mobiveil.h index 767e36a8522d..6082b8afbc31 100644 --- a/drivers/pci/controller/mobiveil/pcie-mobiveil.h +++ b/drivers/pci/controller/mobiveil/pcie-mobiveil.h @@ -149,7 +149,6 @@ struct mobiveil_rp_ops { }; struct mobiveil_root_port { - char root_bus_nr; void __iomem *config_axi_slave_base; /* endpoint config base */ struct resource *ob_io_res; struct mobiveil_rp_ops *ops; diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c index 90ff291c24f0..1559f79e63b6 100644 --- a/drivers/pci/controller/pci-aardvark.c +++ b/drivers/pci/controller/pci-aardvark.c @@ -195,7 +195,6 @@ struct advk_pcie { DECLARE_BITMAP(msi_used, MSI_IRQ_NUM); struct mutex msi_used_lock; u16 msi_msg; - int root_bus_nr; int link_gen; struct pci_bridge_emul bridge; struct gpio_desc *reset_gpio; @@ -641,7 +640,14 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie) static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus, int devfn) { - if ((bus->number == pcie->root_bus_nr) && PCI_SLOT(devfn) != 0) + if (pci_is_root_bus(bus) && PCI_SLOT(devfn) != 0) + return false; + + /* + * If the link goes down after we check for link-up, nothing bad + * happens but the config access times out. + */ + if (!pci_is_root_bus(bus) && !advk_pcie_link_up(pcie)) return false; return true; @@ -659,7 +665,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number == pcie->root_bus_nr) + if (pci_is_root_bus(bus)) return pci_bridge_emul_conf_read(&pcie->bridge, where, size, val); @@ -670,7 +676,7 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->primary == pcie->root_bus_nr) + if (pci_is_root_bus(bus->parent)) reg |= PCIE_CONFIG_RD_TYPE0; else reg |= PCIE_CONFIG_RD_TYPE1; @@ -688,8 +694,10 @@ static int advk_pcie_rd_conf(struct pci_bus *bus, u32 devfn, advk_writel(pcie, 1, PIO_START); ret = advk_pcie_wait_pio(pcie); - if (ret < 0) + if (ret < 0) { + *val = 0xffffffff; return PCIBIOS_SET_FAILED; + } advk_pcie_check_pio_status(pcie); @@ -715,7 +723,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!advk_pcie_valid_device(pcie, bus, devfn)) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == pcie->root_bus_nr) + if (pci_is_root_bus(bus)) return pci_bridge_emul_conf_write(&pcie->bridge, where, size, val); @@ -729,7 +737,7 @@ static int advk_pcie_wr_conf(struct pci_bus *bus, u32 devfn, /* Program the control register */ reg = advk_readl(pcie, PIO_CTRL); reg &= ~PIO_CTRL_TYPE_MASK; - if (bus->primary == pcie->root_bus_nr) + if (pci_is_root_bus(bus->parent)) reg |= PCIE_CONFIG_WR_TYPE0; else reg |= PCIE_CONFIG_WR_TYPE1; @@ -1105,7 +1113,6 @@ static int advk_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct advk_pcie *pcie; - struct resource *res, *bus; struct pci_host_bridge *bridge; int ret, irq; @@ -1116,8 +1123,7 @@ static int advk_pcie_probe(struct platform_device *pdev) pcie = pci_host_bridge_priv(bridge); pcie->pdev = pdev; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pcie->base = devm_ioremap_resource(dev, res); + pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); @@ -1133,14 +1139,6 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, &bus); - if (ret) { - dev_err(dev, "Failed to parse resources\n"); - return ret; - } - pcie->root_bus_nr = bus->start; - pcie->reset_gpio = devm_gpiod_get_from_of_node(dev, dev->of_node, "reset-gpios", 0, GPIOD_OUT_LOW, @@ -1184,12 +1182,8 @@ static int advk_pcie_probe(struct platform_device *pdev) return ret; } - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = 0; bridge->ops = &advk_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; ret = pci_host_probe(bridge); if (ret < 0) { diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c index 1b67564de7af..da3cd216da00 100644 --- a/drivers/pci/controller/pci-ftpci100.c +++ b/drivers/pci/controller/pci-ftpci100.c @@ -422,7 +422,6 @@ static int faraday_pci_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct faraday_pci_variant *variant = of_device_get_match_data(dev); - struct resource *regs; struct resource_entry *win; struct faraday_pci *p; struct resource *io; @@ -437,12 +436,7 @@ static int faraday_pci_probe(struct platform_device *pdev) if (!host) return -ENOMEM; - host->dev.parent = dev; host->ops = &faraday_pci_ops; - host->busnr = 0; - host->msi = NULL; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; p = pci_host_bridge_priv(host); host->sysdata = p; p->dev = dev; @@ -465,16 +459,10 @@ static int faraday_pci_probe(struct platform_device *pdev) return ret; } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - p->base = devm_ioremap_resource(dev, regs); + p->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(p->base)) return PTR_ERR(p->base); - ret = pci_parse_request_of_pci_ranges(dev, &host->windows, - &host->dma_ranges, NULL); - if (ret) - return ret; - win = resource_list_first_type(&host->windows, IORESOURCE_IO); if (win) { io = win->res; diff --git a/drivers/pci/controller/pci-host-common.c b/drivers/pci/controller/pci-host-common.c index 953de57f6c57..6ce34a1deecb 100644 --- a/drivers/pci/controller/pci-host-common.c +++ b/drivers/pci/controller/pci-host-common.c @@ -21,39 +21,32 @@ static void gen_pci_unmap_cfg(void *ptr) } static struct pci_config_window *gen_pci_init(struct device *dev, - struct list_head *resources, const struct pci_ecam_ops *ops) + struct pci_host_bridge *bridge, const struct pci_ecam_ops *ops) { int err; struct resource cfgres; - struct resource *bus_range = NULL; + struct resource_entry *bus; struct pci_config_window *cfg; - /* Parse our PCI ranges and request their resources */ - err = pci_parse_request_of_pci_ranges(dev, resources, NULL, &bus_range); - if (err) - return ERR_PTR(err); - err = of_address_to_resource(dev->of_node, 0, &cfgres); if (err) { dev_err(dev, "missing \"reg\" property\n"); - goto err_out; + return ERR_PTR(err); } - cfg = pci_ecam_create(dev, &cfgres, bus_range, ops); - if (IS_ERR(cfg)) { - err = PTR_ERR(cfg); - goto err_out; - } + bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!bus) + return ERR_PTR(-ENODEV); + + cfg = pci_ecam_create(dev, &cfgres, bus->res, ops); + if (IS_ERR(cfg)) + return cfg; err = devm_add_action_or_reset(dev, gen_pci_unmap_cfg, cfg); - if (err) { - goto err_out; - } - return cfg; + if (err) + return ERR_PTR(err); -err_out: - pci_free_resource_list(resources); - return ERR_PTR(err); + return cfg; } int pci_host_common_probe(struct platform_device *pdev) @@ -61,9 +54,7 @@ int pci_host_common_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; struct pci_config_window *cfg; - struct list_head resources; const struct pci_ecam_ops *ops; - int ret; ops = of_device_get_match_data(&pdev->dev); if (!ops) @@ -76,7 +67,7 @@ int pci_host_common_probe(struct platform_device *pdev) of_pci_check_probe_only(); /* Parse and map our Configuration Space windows */ - cfg = gen_pci_init(dev, &resources, ops); + cfg = gen_pci_init(dev, bridge, ops); if (IS_ERR(cfg)) return PTR_ERR(cfg); @@ -84,32 +75,22 @@ int pci_host_common_probe(struct platform_device *pdev) if (!pci_has_flag(PCI_PROBE_ONLY)) pci_add_flags(PCI_REASSIGN_ALL_BUS); - list_splice_init(&resources, &bridge->windows); - bridge->dev.parent = dev; bridge->sysdata = cfg; - bridge->busnr = cfg->busr.start; bridge->ops = (struct pci_ops *)&ops->pci_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_host_probe(bridge); - if (ret < 0) { - pci_free_resource_list(&resources); - return ret; - } + platform_set_drvdata(pdev, bridge); - platform_set_drvdata(pdev, bridge->bus); - return 0; + return pci_host_probe(bridge); } EXPORT_SYMBOL_GPL(pci_host_common_probe); int pci_host_common_remove(struct platform_device *pdev) { - struct pci_bus *bus = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = platform_get_drvdata(pdev); pci_lock_rescan_remove(); - pci_stop_root_bus(bus); - pci_remove_root_bus(bus); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); pci_unlock_rescan_remove(); return 0; diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index bf40ff09c99d..fc4c3a15e570 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -938,8 +938,9 @@ out: * * Return: 0 on success, -errno on failure */ -int hv_read_config_block(struct pci_dev *pdev, void *buf, unsigned int len, - unsigned int block_id, unsigned int *bytes_returned) +static int hv_read_config_block(struct pci_dev *pdev, void *buf, + unsigned int len, unsigned int block_id, + unsigned int *bytes_returned) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, @@ -1018,8 +1019,8 @@ static void hv_pci_write_config_compl(void *context, struct pci_response *resp, * * Return: 0 on success, -errno on failure */ -int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, - unsigned int block_id) +static int hv_write_config_block(struct pci_dev *pdev, void *buf, + unsigned int len, unsigned int block_id) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, @@ -1087,9 +1088,9 @@ int hv_write_config_block(struct pci_dev *pdev, void *buf, unsigned int len, * * Return: 0 on success, -errno on failure */ -int hv_register_block_invalidate(struct pci_dev *pdev, void *context, - void (*block_invalidate)(void *context, - u64 block_mask)) +static int hv_register_block_invalidate(struct pci_dev *pdev, void *context, + void (*block_invalidate)(void *context, + u64 block_mask)) { struct hv_pcibus_device *hbus = container_of(pdev->bus->sysdata, struct hv_pcibus_device, @@ -2759,10 +2760,8 @@ static int hv_pci_enter_d0(struct hv_device *hdev) struct pci_bus_d0_entry *d0_entry; struct hv_pci_compl comp_pkt; struct pci_packet *pkt; - bool retry = true; int ret; -enter_d0_retry: /* * Tell the host that the bus is ready to use, and moved into the * powered-on state. This includes telling the host which region @@ -2789,38 +2788,6 @@ enter_d0_retry: if (ret) goto exit; - /* - * In certain case (Kdump) the pci device of interest was - * not cleanly shut down and resource is still held on host - * side, the host could return invalid device status. - * We need to explicitly request host to release the resource - * and try to enter D0 again. - */ - if (comp_pkt.completion_status < 0 && retry) { - retry = false; - - dev_err(&hdev->device, "Retrying D0 Entry\n"); - - /* - * Hv_pci_bus_exit() calls hv_send_resource_released() - * to free up resources of its child devices. - * In the kdump kernel we need to set the - * wslot_res_allocated to 255 so it scans all child - * devices to release resources allocated in the - * normal kernel before panic happened. - */ - hbus->wslot_res_allocated = 255; - - ret = hv_pci_bus_exit(hdev, true); - - if (ret == 0) { - kfree(pkt); - goto enter_d0_retry; - } - dev_err(&hdev->device, - "Retrying D0 failed with ret %d\n", ret); - } - if (comp_pkt.completion_status < 0) { dev_err(&hdev->device, "PCI Pass-through VSP failed D0 Entry with status %x\n", @@ -3058,6 +3025,7 @@ static int hv_pci_probe(struct hv_device *hdev, struct hv_pcibus_device *hbus; u16 dom_req, dom; char *name; + bool enter_d0_retry = true; int ret; /* @@ -3178,11 +3146,47 @@ static int hv_pci_probe(struct hv_device *hdev, if (ret) goto free_fwnode; +retry: ret = hv_pci_query_relations(hdev); if (ret) goto free_irq_domain; ret = hv_pci_enter_d0(hdev); + /* + * In certain case (Kdump) the pci device of interest was + * not cleanly shut down and resource is still held on host + * side, the host could return invalid device status. + * We need to explicitly request host to release the resource + * and try to enter D0 again. + * Since the hv_pci_bus_exit() call releases structures + * of all its child devices, we need to start the retry from + * hv_pci_query_relations() call, requesting host to send + * the synchronous child device relations message before this + * information is needed in hv_send_resources_allocated() + * call later. + */ + if (ret == -EPROTO && enter_d0_retry) { + enter_d0_retry = false; + + dev_err(&hdev->device, "Retrying D0 Entry\n"); + + /* + * Hv_pci_bus_exit() calls hv_send_resources_released() + * to free up resources of its child devices. + * In the kdump kernel we need to set the + * wslot_res_allocated to 255 so it scans all child + * devices to release resources allocated in the + * normal kernel before panic happened. + */ + hbus->wslot_res_allocated = 255; + ret = hv_pci_bus_exit(hdev, true); + + if (ret == 0) + goto retry; + + dev_err(&hdev->device, + "Retrying D0 failed with ret %d\n", ret); + } if (ret) goto free_irq_domain; diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index 459009c8a4a0..719c19fe2bfb 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -37,11 +37,11 @@ static void bridge_class_quirk(struct pci_dev *dev) { dev->class = PCI_CLASS_BRIDGE_PCI << 8; } -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_0, bridge_class_quirk); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_1, bridge_class_quirk); -DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_LOONGSON, +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_PCIE_PORT_2, bridge_class_quirk); static void system_bus_quirk(struct pci_dev *pdev) @@ -218,14 +218,6 @@ static int loongson_pci_probe(struct platform_device *pdev) } } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "failed to get bridge resources\n"); - return err; - } - - bridge->dev.parent = dev; bridge->sysdata = priv; bridge->ops = &loongson_pci_ops; bridge->map_irq = loongson_map_irq; diff --git a/drivers/pci/controller/pci-mvebu.c b/drivers/pci/controller/pci-mvebu.c index 153a64676bc9..c39978b750ec 100644 --- a/drivers/pci/controller/pci-mvebu.c +++ b/drivers/pci/controller/pci-mvebu.c @@ -71,7 +71,6 @@ struct mvebu_pcie { struct platform_device *pdev; struct mvebu_pcie_port *ports; struct msi_controller *msi; - struct list_head resources; struct resource io; struct resource realio; struct resource mem; @@ -105,6 +104,7 @@ struct mvebu_pcie_port { struct mvebu_pcie_window memwin; struct mvebu_pcie_window iowin; u32 saved_pcie_stat; + struct resource regs; }; static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg) @@ -149,7 +149,9 @@ static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr) /* * Setup PCIE BARs and Address Decode Wins: - * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks + * BAR[0] -> internal registers (needed for MSI) + * BAR[1] -> covers all DRAM banks + * BAR[2] -> Disabled * WIN[0-3] -> DRAM bank[0-3] */ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) @@ -203,6 +205,12 @@ static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port) mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1)); mvebu_writel(port, ((size - 1) & 0xffff0000) | 1, PCIE_BAR_CTRL_OFF(1)); + + /* + * Point BAR[0] to the device's internal registers. + */ + mvebu_writel(port, round_down(port->regs.start, SZ_1M), PCIE_BAR_LO_OFF(0)); + mvebu_writel(port, 0, PCIE_BAR_HI_OFF(0)); } static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port) @@ -708,14 +716,13 @@ static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev, struct device_node *np, struct mvebu_pcie_port *port) { - struct resource regs; int ret = 0; - ret = of_address_to_resource(np, 0, ®s); + ret = of_address_to_resource(np, 0, &port->regs); if (ret) return (void __iomem *)ERR_PTR(ret); - return devm_ioremap_resource(&pdev->dev, ®s); + return devm_ioremap_resource(&pdev->dev, &port->regs); } #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03) @@ -961,17 +968,16 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) { struct device *dev = &pcie->pdev->dev; struct device_node *np = dev->of_node; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); int ret; - INIT_LIST_HEAD(&pcie->resources); - /* Get the bus range */ ret = of_pci_parse_bus_range(np, &pcie->busn); if (ret) { dev_err(dev, "failed to parse bus-range property: %d\n", ret); return ret; } - pci_add_resource(&pcie->resources, &pcie->busn); + pci_add_resource(&bridge->windows, &pcie->busn); /* Get the PCIe memory aperture */ mvebu_mbus_get_pcie_mem_aperture(&pcie->mem); @@ -981,7 +987,7 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) } pcie->mem.name = "PCI MEM"; - pci_add_resource(&pcie->resources, &pcie->mem); + pci_add_resource(&bridge->windows, &pcie->mem); /* Get the PCIe IO aperture */ mvebu_mbus_get_pcie_io_aperture(&pcie->io); @@ -994,10 +1000,10 @@ static int mvebu_pcie_parse_request_resources(struct mvebu_pcie *pcie) resource_size(&pcie->io) - 1); pcie->realio.name = "PCI I/O"; - pci_add_resource(&pcie->resources, &pcie->realio); + pci_add_resource(&bridge->windows, &pcie->realio); } - return devm_request_pci_bus_resources(dev, &pcie->resources); + return devm_request_pci_bus_resources(dev, &bridge->windows); } /* @@ -1118,13 +1124,8 @@ static int mvebu_pcie_probe(struct platform_device *pdev) pcie->nports = i; - list_splice_init(&pcie->resources, &bridge->windows); - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = 0; bridge->ops = &mvebu_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; bridge->align_resource = mvebu_pcie_align_resource; bridge->msi = pcie->msi; diff --git a/drivers/pci/controller/pci-rcar-gen2.c b/drivers/pci/controller/pci-rcar-gen2.c index 326171cb1a97..c9530038ca9a 100644 --- a/drivers/pci/controller/pci-rcar-gen2.c +++ b/drivers/pci/controller/pci-rcar-gen2.c @@ -98,22 +98,17 @@ struct rcar_pci_priv { void __iomem *reg; struct resource mem_res; struct resource *cfg_res; - unsigned busnr; int irq; - unsigned long window_size; - unsigned long window_addr; - unsigned long window_pci; }; /* PCI configuration space operations */ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, int where) { - struct pci_sys_data *sys = bus->sysdata; - struct rcar_pci_priv *priv = sys->private_data; + struct rcar_pci_priv *priv = bus->sysdata; int slot, val; - if (sys->busnr != bus->number || PCI_FUNC(devfn)) + if (!pci_is_root_bus(bus) || PCI_FUNC(devfn)) return NULL; /* Only one EHCI/OHCI device built-in */ @@ -132,20 +127,6 @@ static void __iomem *rcar_pci_cfg_base(struct pci_bus *bus, unsigned int devfn, return priv->reg + (slot >> 1) * 0x100 + where; } -/* PCI interrupt mapping */ -static int rcar_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) -{ - struct pci_sys_data *sys = dev->bus->sysdata; - struct rcar_pci_priv *priv = sys->private_data; - int irq; - - irq = of_irq_parse_and_map_pci(dev, slot, pin); - if (!irq) - irq = priv->irq; - - return irq; -} - #ifdef CONFIG_PCI_DEBUG /* if debug enabled, then attach an error handler irq to the bridge */ @@ -189,19 +170,33 @@ static inline void rcar_pci_setup_errirq(struct rcar_pci_priv *priv) { } #endif /* PCI host controller setup */ -static int rcar_pci_setup(int nr, struct pci_sys_data *sys) +static void rcar_pci_setup(struct rcar_pci_priv *priv) { - struct rcar_pci_priv *priv = sys->private_data; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(priv); struct device *dev = priv->dev; void __iomem *reg = priv->reg; + struct resource_entry *entry; + unsigned long window_size; + unsigned long window_addr; + unsigned long window_pci; u32 val; - int ret; + + entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM); + if (!entry) { + window_addr = 0x40000000; + window_pci = 0x40000000; + window_size = SZ_1G; + } else { + window_addr = entry->res->start; + window_pci = entry->res->start - entry->offset; + window_size = resource_size(entry->res); + } pm_runtime_enable(dev); pm_runtime_get_sync(dev); val = ioread32(reg + RCAR_PCI_UNIT_REV_REG); - dev_info(dev, "PCI: bus%u revision %x\n", sys->busnr, val); + dev_info(dev, "PCI: revision %x\n", val); /* Disable Direct Power Down State and assert reset */ val = ioread32(reg + RCAR_USBCTR_REG) & ~RCAR_USBCTR_DIRPD; @@ -214,7 +209,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) RCAR_USBCTR_USBH_RST | RCAR_USBCTR_PLL_RST); /* Setup PCIAHB window1 size */ - switch (priv->window_size) { + switch (window_size) { case SZ_2G: val |= RCAR_USBCTR_PCIAHB_WIN1_2G; break; @@ -226,8 +221,8 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) break; default: pr_warn("unknown window size %ld - defaulting to 256M\n", - priv->window_size); - priv->window_size = SZ_256M; + window_size); + window_size = SZ_256M; /* fall-through */ case SZ_256M: val |= RCAR_USBCTR_PCIAHB_WIN1_256M; @@ -245,7 +240,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) iowrite32(val, reg + RCAR_PCI_ARBITER_CTR_REG); /* PCI-AHB mapping */ - iowrite32(priv->window_addr | RCAR_PCIAHB_PREFETCH16, + iowrite32(window_addr | RCAR_PCIAHB_PREFETCH16, reg + RCAR_PCIAHB_WIN1_CTR_REG); /* AHB-PCI mapping: OHCI/EHCI registers */ @@ -256,7 +251,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) iowrite32(RCAR_AHBPCI_WIN1_HOST | RCAR_AHBPCI_WIN_CTR_CFG, reg + RCAR_AHBPCI_WIN1_CTR_REG); /* Set PCI-AHB Window1 address */ - iowrite32(priv->window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, + iowrite32(window_pci | PCI_BASE_ADDRESS_MEM_PREFETCH, reg + PCI_BASE_ADDRESS_1); /* Set AHB-PCI bridge PCI communication area address */ val = priv->cfg_res->start + RCAR_AHBPCI_PCICOM_OFFSET; @@ -271,18 +266,7 @@ static int rcar_pci_setup(int nr, struct pci_sys_data *sys) iowrite32(RCAR_PCI_INT_A | RCAR_PCI_INT_B | RCAR_PCI_INT_PME, reg + RCAR_PCI_INT_ENABLE_REG); - if (priv->irq > 0) - rcar_pci_setup_errirq(priv); - - /* Add PCI resources */ - pci_add_resource(&sys->resources, &priv->mem_res); - ret = devm_request_pci_bus_resources(dev, &sys->resources); - if (ret < 0) - return ret; - - /* Setup bus number based on platform device id / of bus-range */ - sys->busnr = priv->busnr; - return 1; + rcar_pci_setup_errirq(priv); } static struct pci_ops rcar_pci_ops = { @@ -291,55 +275,20 @@ static struct pci_ops rcar_pci_ops = { .write = pci_generic_config_write, }; -static int rcar_pci_parse_map_dma_ranges(struct rcar_pci_priv *pci, - struct device_node *np) -{ - struct device *dev = pci->dev; - struct of_pci_range range; - struct of_pci_range_parser parser; - int index = 0; - - /* Failure to parse is ok as we fall back to defaults */ - if (of_pci_dma_range_parser_init(&parser, np)) - return 0; - - /* Get the dma-ranges from DT */ - for_each_of_pci_range(&parser, &range) { - /* Hardware only allows one inbound 32-bit range */ - if (index) - return -EINVAL; - - pci->window_addr = (unsigned long)range.cpu_addr; - pci->window_pci = (unsigned long)range.pci_addr; - pci->window_size = (unsigned long)range.size; - - /* Catch HW limitations */ - if (!(range.flags & IORESOURCE_PREFETCH)) { - dev_err(dev, "window must be prefetchable\n"); - return -EINVAL; - } - if (pci->window_addr) { - u32 lowaddr = 1 << (ffs(pci->window_addr) - 1); - - if (lowaddr < pci->window_size) { - dev_err(dev, "invalid window size/addr\n"); - return -EINVAL; - } - } - index++; - } - - return 0; -} - static int rcar_pci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *cfg_res, *mem_res; struct rcar_pci_priv *priv; + struct pci_host_bridge *bridge; void __iomem *reg; - struct hw_pci hw; - void *hw_private[1]; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*priv)); + if (!bridge) + return -ENOMEM; + + priv = pci_host_bridge_priv(bridge); + bridge->sysdata = priv; cfg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); reg = devm_ioremap_resource(dev, cfg_res); @@ -353,10 +302,6 @@ static int rcar_pci_probe(struct platform_device *pdev) if (mem_res->start & 0xFFFF) return -EINVAL; - priv = devm_kzalloc(dev, sizeof(struct rcar_pci_priv), GFP_KERNEL); - if (!priv) - return -ENOMEM; - priv->mem_res = *mem_res; priv->cfg_res = cfg_res; @@ -369,44 +314,13 @@ static int rcar_pci_probe(struct platform_device *pdev) return priv->irq; } - /* default window addr and size if not specified in DT */ - priv->window_addr = 0x40000000; - priv->window_pci = 0x40000000; - priv->window_size = SZ_1G; - - if (dev->of_node) { - struct resource busnr; - int ret; - - ret = of_pci_parse_bus_range(dev->of_node, &busnr); - if (ret < 0) { - dev_err(dev, "failed to parse bus-range\n"); - return ret; - } - - priv->busnr = busnr.start; - if (busnr.end != busnr.start) - dev_warn(dev, "only one bus number supported\n"); - - ret = rcar_pci_parse_map_dma_ranges(priv, dev->of_node); - if (ret < 0) { - dev_err(dev, "failed to parse dma-range\n"); - return ret; - } - } else { - priv->busnr = pdev->id; - } + bridge->ops = &rcar_pci_ops; + + pci_add_flags(PCI_REASSIGN_ALL_BUS); + + rcar_pci_setup(priv); - hw_private[0] = priv; - memset(&hw, 0, sizeof(hw)); - hw.nr_controllers = ARRAY_SIZE(hw_private); - hw.io_optional = 1; - hw.private_data = hw_private; - hw.map_irq = rcar_pci_map_irq; - hw.ops = &rcar_pci_ops; - hw.setup = rcar_pci_setup; - pci_common_init_dev(dev, &hw); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id rcar_pci_of_match[] = { diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 235b456698fc..c1d34353c29b 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -181,13 +181,6 @@ #define AFI_PEXBIAS_CTRL_0 0x168 -#define RP_PRIV_XP_DL 0x00000494 -#define RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD (0x1ff << 1) - -#define RP_RX_HDR_LIMIT 0x00000e00 -#define RP_RX_HDR_LIMIT_PW_MASK (0xff << 8) -#define RP_RX_HDR_LIMIT_PW (0x0e << 8) - #define RP_ECTL_2_R1 0x00000e84 #define RP_ECTL_2_R1_RX_CTLE_1C_MASK 0xffff @@ -323,7 +316,6 @@ struct tegra_pcie_soc { bool program_uphy; bool update_clamp_threshold; bool program_deskew_time; - bool raw_violation_fixup; bool update_fc_timer; bool has_cache_bars; struct { @@ -659,23 +651,6 @@ static void tegra_pcie_apply_sw_fixup(struct tegra_pcie_port *port) writel(value, port->base + RP_VEND_CTL0); } - /* Fixup for read after write violation. */ - if (soc->raw_violation_fixup) { - value = readl(port->base + RP_RX_HDR_LIMIT); - value &= ~RP_RX_HDR_LIMIT_PW_MASK; - value |= RP_RX_HDR_LIMIT_PW; - writel(value, port->base + RP_RX_HDR_LIMIT); - - value = readl(port->base + RP_PRIV_XP_DL); - value |= RP_PRIV_XP_DL_GEN2_UPD_FC_TSHOLD; - writel(value, port->base + RP_PRIV_XP_DL); - - value = readl(port->base + RP_VEND_XP); - value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; - value |= soc->update_fc_threshold; - writel(value, port->base + RP_VEND_XP); - } - if (soc->update_fc_timer) { value = readl(port->base + RP_VEND_XP); value &= ~RP_VEND_XP_UPDATE_FC_THRESHOLD_MASK; @@ -1462,7 +1437,7 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) { struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); - struct resource *pads, *afi, *res; + struct resource *res; const struct tegra_pcie_soc *soc = pcie->soc; int err; @@ -1486,15 +1461,13 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) } } - pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads"); - pcie->pads = devm_ioremap_resource(dev, pads); + pcie->pads = devm_platform_ioremap_resource_byname(pdev, "pads"); if (IS_ERR(pcie->pads)) { err = PTR_ERR(pcie->pads); goto phys_put; } - afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi"); - pcie->afi = devm_ioremap_resource(dev, afi); + pcie->afi = devm_platform_ioremap_resource_byname(pdev, "afi"); if (IS_ERR(pcie->afi)) { err = PTR_ERR(pcie->afi); goto phys_put; @@ -1520,10 +1493,8 @@ static int tegra_pcie_get_resources(struct tegra_pcie *pcie) /* request interrupt */ err = platform_get_irq_byname(pdev, "intr"); - if (err < 0) { - dev_err(dev, "failed to get IRQ: %d\n", err); + if (err < 0) goto phys_put; - } pcie->irq = err; @@ -1738,10 +1709,8 @@ static int tegra_pcie_msi_setup(struct tegra_pcie *pcie) } err = platform_get_irq_byname(pdev, "msi"); - if (err < 0) { - dev_err(dev, "failed to get IRQ: %d\n", err); + if (err < 0) goto free_irq_domain; - } msi->irq = err; @@ -2025,7 +1994,7 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) pcie->supplies[i++].supply = "hvdd-pex"; pcie->supplies[i++].supply = "vddio-pexctl-aud"; } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) { - pcie->num_supplies = 6; + pcie->num_supplies = 3; pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies, sizeof(*pcie->supplies), @@ -2033,14 +2002,11 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) if (!pcie->supplies) return -ENOMEM; - pcie->supplies[i++].supply = "avdd-pll-uerefe"; pcie->supplies[i++].supply = "hvddio-pex"; pcie->supplies[i++].supply = "dvddio-pex"; - pcie->supplies[i++].supply = "dvdd-pex-pll"; - pcie->supplies[i++].supply = "hvdd-pex-pll-e"; pcie->supplies[i++].supply = "vddio-pex-ctl"; } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) { - pcie->num_supplies = 7; + pcie->num_supplies = 4; pcie->supplies = devm_kcalloc(dev, pcie->num_supplies, sizeof(*pcie->supplies), @@ -2050,11 +2016,8 @@ static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask) pcie->supplies[i++].supply = "avddio-pex"; pcie->supplies[i++].supply = "dvddio-pex"; - pcie->supplies[i++].supply = "avdd-pex-pll"; pcie->supplies[i++].supply = "hvdd-pex"; - pcie->supplies[i++].supply = "hvdd-pex-pll-e"; pcie->supplies[i++].supply = "vddio-pex-ctl"; - pcie->supplies[i++].supply = "avdd-pll-erefe"; } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) { bool need_pexa = false, need_pexb = false; @@ -2416,7 +2379,6 @@ static const struct tegra_pcie_soc tegra20_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = true, .ectl.enable = false, @@ -2446,7 +2408,6 @@ static const struct tegra_pcie_soc tegra30_pcie = { .program_uphy = true, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2459,8 +2420,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .pads_pll_ctl = PADS_PLL_CTL_TEGRA30, .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN, .pads_refclk_cfg0 = 0x44ac44ac, - /* FC threshold is bit[25:18] */ - .update_fc_threshold = 0x03fc0000, .has_pex_clkreq_en = true, .has_pex_bias_ctrl = true, .has_intr_prsnt_sense = true, @@ -2470,7 +2429,6 @@ static const struct tegra_pcie_soc tegra124_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = false, - .raw_violation_fixup = true, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2494,7 +2452,6 @@ static const struct tegra_pcie_soc tegra210_pcie = { .program_uphy = true, .update_clamp_threshold = true, .program_deskew_time = true, - .raw_violation_fixup = false, .update_fc_timer = true, .has_cache_bars = false, .ectl = { @@ -2536,7 +2493,6 @@ static const struct tegra_pcie_soc tegra186_pcie = { .program_uphy = false, .update_clamp_threshold = false, .program_deskew_time = false, - .raw_violation_fixup = false, .update_fc_timer = false, .has_cache_bars = false, .ectl.enable = false, @@ -2670,8 +2626,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct pci_host_bridge *host; struct tegra_pcie *pcie; - struct pci_bus *child; - struct resource *bus; int err; host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie)); @@ -2686,12 +2640,6 @@ static int tegra_pcie_probe(struct platform_device *pdev) INIT_LIST_HEAD(&pcie->ports); pcie->dev = dev; - err = pci_parse_request_of_pci_ranges(dev, &host->windows, NULL, &bus); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - err = tegra_pcie_parse_dt(pcie); if (err < 0) return err; @@ -2715,26 +2663,15 @@ static int tegra_pcie_probe(struct platform_device *pdev) goto pm_runtime_put; } - host->busnr = bus->start; - host->dev.parent = &pdev->dev; host->ops = &tegra_pcie_ops; host->map_irq = tegra_pcie_map_irq; - host->swizzle_irq = pci_common_swizzle; - err = pci_scan_root_bus_bridge(host); + err = pci_host_probe(host); if (err < 0) { dev_err(dev, "failed to register host: %d\n", err); goto pm_runtime_put; } - pci_bus_size_bridges(host->bus); - pci_bus_assign_resources(host->bus); - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - if (IS_ENABLED(CONFIG_DEBUG_FS)) { err = tegra_pcie_debugfs_init(pcie); if (err < 0) diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c index 3681e5af3878..1f54334f09f7 100644 --- a/drivers/pci/controller/pci-v3-semi.c +++ b/drivers/pci/controller/pci-v3-semi.c @@ -239,7 +239,6 @@ struct v3_pci { struct device *dev; void __iomem *base; void __iomem *config_base; - struct pci_bus *bus; u32 config_mem; u32 non_pre_mem; u32 pre_mem; @@ -585,8 +584,6 @@ static int v3_pci_setup_resource(struct v3_pci *v3, } break; case IORESOURCE_BUS: - dev_dbg(dev, "BUS %pR\n", win->res); - host->busnr = win->res->start; break; default: dev_info(dev, "Unknown resource type %lu\n", @@ -724,12 +721,7 @@ static int v3_pci_probe(struct platform_device *pdev) if (!host) return -ENOMEM; - host->dev.parent = dev; host->ops = &v3_pci_ops; - host->busnr = 0; - host->msi = NULL; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; v3 = pci_host_bridge_priv(host); host->sysdata = v3; v3->dev = dev; @@ -770,17 +762,11 @@ static int v3_pci_probe(struct platform_device *pdev) if (IS_ERR(v3->config_base)) return PTR_ERR(v3->config_base); - ret = pci_parse_request_of_pci_ranges(dev, &host->windows, - &host->dma_ranges, NULL); - if (ret) - return ret; - /* Get and request error IRQ resource */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "unable to obtain PCIv3 error IRQ\n"); + if (irq < 0) return irq; - } + ret = devm_request_irq(dev, irq, v3_irq, 0, "PCIv3 error", v3); if (ret < 0) { @@ -904,17 +890,7 @@ static int v3_pci_probe(struct platform_device *pdev) val |= V3_SYSTEM_M_LOCK; writew(val, v3->base + V3_SYSTEM); - ret = pci_scan_root_bus_bridge(host); - if (ret) { - dev_err(dev, "failed to register host: %d\n", ret); - return ret; - } - v3->bus = host->bus; - - pci_bus_assign_resources(v3->bus); - pci_bus_add_devices(v3->bus); - - return 0; + return pci_host_probe(host); } static const struct of_device_id v3_pci_of_match[] = { diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c index b911359b6d81..653d5d0ecf81 100644 --- a/drivers/pci/controller/pci-versatile.c +++ b/drivers/pci/controller/pci-versatile.c @@ -67,23 +67,20 @@ static int versatile_pci_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct resource *res; struct resource_entry *entry; - int ret, i, myslot = -1, mem = 1; + int i, myslot = -1, mem = 1; u32 val; void __iomem *local_pci_cfg_base; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; bridge = devm_pci_alloc_host_bridge(dev, 0); if (!bridge) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - versatile_pci_base = devm_ioremap_resource(dev, res); + versatile_pci_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(versatile_pci_base)) return PTR_ERR(versatile_pci_base); - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - versatile_cfg_base[0] = devm_ioremap_resource(dev, res); + versatile_cfg_base[0] = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(versatile_cfg_base[0])) return PTR_ERR(versatile_cfg_base[0]); @@ -92,11 +89,6 @@ static int versatile_pci_probe(struct platform_device *pdev) if (IS_ERR(versatile_cfg_base[1])) return PTR_ERR(versatile_cfg_base[1]); - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - NULL, NULL); - if (ret) - return ret; - resource_list_for_each_entry(entry, &bridge->windows) { if (resource_type(entry->res) == IORESOURCE_MEM) { writel(entry->res->start >> 28, PCI_IMAP(mem)); @@ -154,28 +146,11 @@ static int versatile_pci_probe(struct platform_device *pdev) */ writel(0, versatile_cfg_base[0] + PCI_INTERRUPT_LINE); - pci_add_flags(PCI_ENABLE_PROC_DOMAINS); pci_add_flags(PCI_REASSIGN_ALL_BUS); - bridge->dev.parent = dev; - bridge->sysdata = NULL; - bridge->busnr = 0; bridge->ops = &pci_versatile_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id versatile_pci_of_match[] = { diff --git a/drivers/pci/controller/pci-xgene-msi.c b/drivers/pci/controller/pci-xgene-msi.c index f4c02da84e59..02271c6d17a1 100644 --- a/drivers/pci/controller/pci-xgene-msi.c +++ b/drivers/pci/controller/pci-xgene-msi.c @@ -478,8 +478,6 @@ static int xgene_msi_probe(struct platform_device *pdev) for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { virt_msir = platform_get_irq(pdev, irq_index); if (virt_msir < 0) { - dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", - irq_index); rc = virt_msir; goto error; } diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c index d1efa8ffbae1..8e0db84f089d 100644 --- a/drivers/pci/controller/pci-xgene.c +++ b/drivers/pci/controller/pci-xgene.c @@ -355,8 +355,7 @@ static int xgene_pcie_map_reg(struct xgene_pcie_port *port, if (IS_ERR(port->csr_base)) return PTR_ERR(port->csr_base); - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); - port->cfg_base = devm_ioremap_resource(dev, res); + port->cfg_base = devm_platform_ioremap_resource_byname(pdev, "cfg"); if (IS_ERR(port->cfg_base)) return PTR_ERR(port->cfg_base); port->cfg_addr = res->start; @@ -591,7 +590,6 @@ static int xgene_pcie_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *dn = dev->of_node; struct xgene_pcie_port *port; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; int ret; @@ -616,33 +614,14 @@ static int xgene_pcie_probe(struct platform_device *pdev) if (ret) return ret; - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - ret = xgene_pcie_setup(port); if (ret) return ret; - bridge->dev.parent = dev; bridge->sysdata = port; - bridge->busnr = 0; bridge->ops = &xgene_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id xgene_pcie_match_table[] = { diff --git a/drivers/pci/controller/pcie-altera-msi.c b/drivers/pci/controller/pcie-altera-msi.c index 16d938920ca5..e1636f7714ca 100644 --- a/drivers/pci/controller/pcie-altera-msi.c +++ b/drivers/pci/controller/pcie-altera-msi.c @@ -228,8 +228,7 @@ static int altera_msi_probe(struct platform_device *pdev) mutex_init(&msi->lock); msi->pdev = pdev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr"); - msi->csr_base = devm_ioremap_resource(&pdev->dev, res); + msi->csr_base = devm_platform_ioremap_resource_byname(pdev, "csr"); if (IS_ERR(msi->csr_base)) { dev_err(&pdev->dev, "failed to map csr memory\n"); return PTR_ERR(msi->csr_base); @@ -256,7 +255,6 @@ static int altera_msi_probe(struct platform_device *pdev) msi->irq = platform_get_irq(pdev, 0); if (msi->irq < 0) { - dev_err(&pdev->dev, "failed to map IRQ: %d\n", msi->irq); ret = msi->irq; goto err; } diff --git a/drivers/pci/controller/pcie-altera.c b/drivers/pci/controller/pcie-altera.c index 24cb1c331058..523bd928b380 100644 --- a/drivers/pci/controller/pcie-altera.c +++ b/drivers/pci/controller/pcie-altera.c @@ -694,29 +694,23 @@ static void altera_pcie_irq_teardown(struct altera_pcie *pcie) static int altera_pcie_parse_dt(struct altera_pcie *pcie) { - struct device *dev = &pcie->pdev->dev; struct platform_device *pdev = pcie->pdev; - struct resource *cra; - struct resource *hip; - cra = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Cra"); - pcie->cra_base = devm_ioremap_resource(dev, cra); + pcie->cra_base = devm_platform_ioremap_resource_byname(pdev, "Cra"); if (IS_ERR(pcie->cra_base)) return PTR_ERR(pcie->cra_base); if (pcie->pcie_data->version == ALTERA_PCIE_V2) { - hip = platform_get_resource_byname(pdev, IORESOURCE_MEM, "Hip"); - pcie->hip_base = devm_ioremap_resource(&pdev->dev, hip); + pcie->hip_base = + devm_platform_ioremap_resource_byname(pdev, "Hip"); if (IS_ERR(pcie->hip_base)) return PTR_ERR(pcie->hip_base); } /* setup IRQ */ pcie->irq = platform_get_irq(pdev, 0); - if (pcie->irq < 0) { - dev_err(dev, "failed to get IRQ: %d\n", pcie->irq); + if (pcie->irq < 0) return pcie->irq; - } irq_set_chained_handler_and_data(pcie->irq, altera_pcie_isr, pcie); return 0; @@ -773,8 +767,6 @@ static int altera_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct altera_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; struct pci_host_bridge *bridge; int ret; const struct of_device_id *match; @@ -799,13 +791,6 @@ static int altera_pcie_probe(struct platform_device *pdev) return ret; } - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "Failed add resources\n"); - return ret; - } - ret = altera_pcie_init_irq_domain(pcie); if (ret) { dev_err(dev, "Failed creating IRQ Domain\n"); @@ -818,27 +803,11 @@ static int altera_pcie_probe(struct platform_device *pdev) cra_writel(pcie, P2A_INT_ENA_ALL, P2A_INT_ENABLE); altera_pcie_host_init(pcie); - bridge->dev.parent = dev; bridge->sysdata = pcie; bridge->busnr = pcie->root_bus_nr; bridge->ops = &altera_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - - /* Configure PCI Express setting. */ - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - return ret; + return pci_host_probe(bridge); } static int altera_pcie_remove(struct platform_device *pdev) diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 7730ea845ff2..85fa7d54f11f 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -172,7 +172,6 @@ struct brcm_pcie { struct device *dev; void __iomem *base; struct clk *clk; - struct pci_bus *root_bus; struct device_node *np; bool ssc; int gen; @@ -919,9 +918,10 @@ static void __brcm_pcie_remove(struct brcm_pcie *pcie) static int brcm_pcie_remove(struct platform_device *pdev) { struct brcm_pcie *pcie = platform_get_drvdata(pdev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie); - pci_stop_root_bus(pcie->root_bus); - pci_remove_root_bus(pcie->root_bus); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); __brcm_pcie_remove(pcie); return 0; @@ -933,8 +933,6 @@ static int brcm_pcie_probe(struct platform_device *pdev) struct pci_host_bridge *bridge; struct device_node *fw_np; struct brcm_pcie *pcie; - struct pci_bus *child; - struct resource *res; int ret; /* @@ -959,8 +957,7 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->dev = &pdev->dev; pcie->np = np; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - pcie->base = devm_ioremap_resource(&pdev->dev, res); + pcie->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(pcie->base)) return PTR_ERR(pcie->base); @@ -973,11 +970,6 @@ static int brcm_pcie_probe(struct platform_device *pdev) pcie->ssc = of_property_read_bool(np, "brcm,enable-ssc"); - ret = pci_parse_request_of_pci_ranges(pcie->dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) - return ret; - ret = clk_prepare_enable(pcie->clk); if (ret) { dev_err(&pdev->dev, "could not enable clock\n"); @@ -997,27 +989,12 @@ static int brcm_pcie_probe(struct platform_device *pdev) } } - bridge->dev.parent = &pdev->dev; - bridge->busnr = 0; bridge->ops = &brcm_pcie_ops; bridge->sysdata = pcie; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) { - dev_err(pcie->dev, "Scanning root bridge failed\n"); - goto fail; - } - - pci_assign_unassigned_bus_resources(bridge->bus); - list_for_each_entry(child, &bridge->bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bridge->bus); platform_set_drvdata(pdev, pcie); - pcie->root_bus = bridge->bus; - return 0; + return pci_host_probe(bridge); fail: __brcm_pcie_remove(pcie); return ret; diff --git a/drivers/pci/controller/pcie-iproc-platform.c b/drivers/pci/controller/pcie-iproc-platform.c index ff0a81a632a1..a956b0c18bd1 100644 --- a/drivers/pci/controller/pcie-iproc-platform.c +++ b/drivers/pci/controller/pcie-iproc-platform.c @@ -95,20 +95,14 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev) if (IS_ERR(pcie->phy)) return PTR_ERR(pcie->phy); - ret = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (ret) { - dev_err(dev, "unable to get PCI host bridge resources\n"); - return ret; - } - /* PAXC doesn't support legacy IRQs, skip mapping */ switch (pcie->type) { case IPROC_PCIE_PAXC: case IPROC_PCIE_PAXC_V2: + pcie->map_irq = 0; break; default: - pcie->map_irq = of_irq_parse_and_map_pci; + break; } ret = iproc_pcie_setup(pcie, &bridge->windows); diff --git a/drivers/pci/controller/pcie-iproc.c b/drivers/pci/controller/pcie-iproc.c index 8c7f875acf7f..905e93808243 100644 --- a/drivers/pci/controller/pcie-iproc.c +++ b/drivers/pci/controller/pcie-iproc.c @@ -1470,7 +1470,6 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) { struct device *dev; int ret; - struct pci_bus *child; struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); dev = pcie->dev; @@ -1524,28 +1523,16 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) if (iproc_pcie_msi_enable(pcie)) dev_info(dev, "not using iProc MSI\n"); - host->busnr = 0; - host->dev.parent = dev; host->ops = &iproc_pcie_ops; host->sysdata = pcie; host->map_irq = pcie->map_irq; - host->swizzle_irq = pci_common_swizzle; - ret = pci_scan_root_bus_bridge(host); + ret = pci_host_probe(host); if (ret < 0) { dev_err(dev, "failed to scan host: %d\n", ret); goto err_power_off_phy; } - pci_assign_unassigned_bus_resources(host->bus); - - pcie->root_bus = host->bus; - - list_for_each_entry(child, &host->bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(host->bus); - return 0; err_power_off_phy: @@ -1558,8 +1545,10 @@ EXPORT_SYMBOL(iproc_pcie_setup); int iproc_pcie_remove(struct iproc_pcie *pcie) { - pci_stop_root_bus(pcie->root_bus); - pci_remove_root_bus(pcie->root_bus); + struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); + + pci_stop_root_bus(host->bus); + pci_remove_root_bus(host->bus); iproc_pcie_msi_disable(pcie); diff --git a/drivers/pci/controller/pcie-iproc.h b/drivers/pci/controller/pcie-iproc.h index 4f03ea539805..c2676e442f55 100644 --- a/drivers/pci/controller/pcie-iproc.h +++ b/drivers/pci/controller/pcie-iproc.h @@ -54,7 +54,6 @@ struct iproc_msi; * @reg_offsets: register offsets * @base: PCIe host controller I/O register base * @base_addr: PCIe host controller register base physical address - * @root_bus: pointer to root bus * @phy: optional PHY device that controls the Serdes * @map_irq: function callback to map interrupts * @ep_is_internal: indicates an internal emulated endpoint device is connected @@ -85,7 +84,6 @@ struct iproc_pcie { void __iomem *base; phys_addr_t base_addr; struct resource mem; - struct pci_bus *root_bus; struct phy *phy; int (*map_irq)(const struct pci_dev *, u8, u8); bool ep_is_internal; diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c index ebfa7d5a4e2d..cf4c18f0c25a 100644 --- a/drivers/pci/controller/pcie-mediatek.c +++ b/drivers/pci/controller/pcie-mediatek.c @@ -209,7 +209,6 @@ struct mtk_pcie_port { * @mem: non-prefetchable memory resource * @ports: pointer to PCIe port information * @soc: pointer to SoC-dependent operations - * @busnr: root bus number */ struct mtk_pcie { struct device *dev; @@ -218,7 +217,6 @@ struct mtk_pcie { struct list_head ports; const struct mtk_pcie_soc *soc; - unsigned int busnr; }; static void mtk_pcie_subsys_powerdown(struct mtk_pcie *pcie) @@ -905,7 +903,6 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie, int slot) { struct mtk_pcie_port *port; - struct resource *regs; struct device *dev = pcie->dev; struct platform_device *pdev = to_platform_device(dev); char name[10]; @@ -916,8 +913,7 @@ static int mtk_pcie_parse_port(struct mtk_pcie *pcie, return -ENOMEM; snprintf(name, sizeof(name), "port%d", slot); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); - port->base = devm_ioremap_resource(dev, regs); + port->base = devm_platform_ioremap_resource_byname(pdev, name); if (IS_ERR(port->base)) { dev_err(dev, "failed to map port%d base\n", slot); return PTR_ERR(port->base); @@ -1031,18 +1027,8 @@ static int mtk_pcie_setup(struct mtk_pcie *pcie) struct device *dev = pcie->dev; struct device_node *node = dev->of_node, *child; struct mtk_pcie_port *port, *tmp; - struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie); - struct list_head *windows = &host->windows; - struct resource *bus; int err; - err = pci_parse_request_of_pci_ranges(dev, windows, - &host->dma_ranges, &bus); - if (err) - return err; - - pcie->busnr = bus->start; - for_each_available_child_of_node(node, child) { int slot; @@ -1096,11 +1082,7 @@ static int mtk_pcie_probe(struct platform_device *pdev) if (err) return err; - host->busnr = pcie->busnr; - host->dev.parent = pcie->dev; host->ops = pcie->soc->ops; - host->map_irq = of_irq_parse_and_map_pci; - host->swizzle_irq = pci_common_swizzle; host->sysdata = pcie; err = pci_host_probe(host); diff --git a/drivers/pci/controller/pcie-rcar-host.c b/drivers/pci/controller/pcie-rcar-host.c index d210a36561be..cdc0963f154e 100644 --- a/drivers/pci/controller/pcie-rcar-host.c +++ b/drivers/pci/controller/pcie-rcar-host.c @@ -53,8 +53,6 @@ struct rcar_pcie_host { struct device *dev; struct phy *phy; void __iomem *base; - struct list_head resources; - int root_bus_nr; struct clk *bus_clk; struct rcar_msi msi; int (*phy_init_fn)(struct rcar_pcie_host *host); @@ -100,22 +98,14 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, if (dev != 0) return PCIBIOS_DEVICE_NOT_FOUND; - if (access_type == RCAR_PCI_ACCESS_READ) { + if (access_type == RCAR_PCI_ACCESS_READ) *data = rcar_pci_read_reg(pcie, PCICONF(index)); - } else { - /* Keep an eye out for changes to the root bus number */ - if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS)) - host->root_bus_nr = *data & 0xff; - + else rcar_pci_write_reg(pcie, *data, PCICONF(index)); - } return PCIBIOS_SUCCESSFUL; } - if (host->root_bus_nr < 0) - return PCIBIOS_DEVICE_NOT_FOUND; - /* Clear errors */ rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR); @@ -124,7 +114,7 @@ static int rcar_pcie_config_access(struct rcar_pcie_host *host, PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR); /* Enable the configuration access */ - if (bus->parent->number == host->root_bus_nr) + if (pci_is_root_bus(bus->parent)) rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR); else rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR); @@ -212,38 +202,6 @@ static struct pci_ops rcar_pcie_ops = { .write = rcar_pcie_write_conf, }; -static int rcar_pcie_setup(struct list_head *resource, - struct rcar_pcie_host *host) -{ - struct resource_entry *win; - int i = 0; - - /* Setup PCI resources */ - resource_list_for_each_entry(win, &host->resources) { - struct resource *res = win->res; - - if (!res->flags) - continue; - - switch (resource_type(res)) { - case IORESOURCE_IO: - case IORESOURCE_MEM: - rcar_pcie_set_outbound(&host->pcie, i, win); - i++; - break; - case IORESOURCE_BUS: - host->root_bus_nr = res->start; - break; - default: - continue; - } - - pci_add_resource(resource, res); - } - - return 1; -} - static void rcar_pcie_force_speedup(struct rcar_pcie *pcie) { struct device *dev = pcie->dev; @@ -301,6 +259,7 @@ done: static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) { struct rcar_pcie *pcie = &host->pcie; + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); struct resource_entry *win; LIST_HEAD(res); int i = 0; @@ -309,7 +268,7 @@ static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) rcar_pcie_force_speedup(pcie); /* Setup PCI resources */ - resource_list_for_each_entry(win, &host->resources) { + resource_list_for_each_entry(win, &bridge->windows) { struct resource *res = win->res; if (!res->flags) @@ -328,42 +287,17 @@ static void rcar_pcie_hw_enable(struct rcar_pcie_host *host) static int rcar_pcie_enable(struct rcar_pcie_host *host) { struct pci_host_bridge *bridge = pci_host_bridge_from_priv(host); - struct rcar_pcie *pcie = &host->pcie; - struct device *dev = pcie->dev; - struct pci_bus *bus, *child; - int ret; - - /* Try setting 5 GT/s link speed */ - rcar_pcie_force_speedup(pcie); - rcar_pcie_setup(&bridge->windows, host); + rcar_pcie_hw_enable(host); pci_add_flags(PCI_REASSIGN_ALL_BUS); - bridge->dev.parent = dev; bridge->sysdata = host; - bridge->busnr = host->root_bus_nr; bridge->ops = &rcar_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; if (IS_ENABLED(CONFIG_PCI_MSI)) bridge->msi = &host->msi.chip; - ret = pci_scan_root_bus_bridge(bridge); - if (ret < 0) - return ret; - - bus = bridge->bus; - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); - - return 0; + return pci_host_probe(bridge); } static int phy_wait_for_ack(struct rcar_pcie *pcie) @@ -968,7 +902,7 @@ static int rcar_pcie_probe(struct platform_device *pdev) int err; struct pci_host_bridge *bridge; - bridge = pci_alloc_host_bridge(sizeof(*host)); + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*host)); if (!bridge) return -ENOMEM; @@ -977,16 +911,11 @@ static int rcar_pcie_probe(struct platform_device *pdev) pcie->dev = dev; platform_set_drvdata(pdev, host); - err = pci_parse_request_of_pci_ranges(dev, &host->resources, - &bridge->dma_ranges, NULL); - if (err) - goto err_free_bridge; - pm_runtime_enable(pcie->dev); err = pm_runtime_get_sync(pcie->dev); if (err < 0) { dev_err(pcie->dev, "pm_runtime_get_sync failed\n"); - goto err_pm_disable; + goto err_pm_put; } err = rcar_pcie_get_resources(host); @@ -1057,13 +986,7 @@ err_unmap_msi_irqs: err_pm_put: pm_runtime_put(dev); - -err_pm_disable: pm_runtime_disable(dev); - pci_free_resource_list(&host->resources); - -err_free_bridge: - pci_free_host_bridge(bridge); return err; } diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 5eaf36629a75..7631dc3961c1 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -22,6 +22,7 @@ /** * struct rockchip_pcie_ep - private data for PCIe endpoint controller driver * @rockchip: Rockchip PCIe controller + * @epc: PCI EPC device * @max_regions: maximum number of regions supported by hardware * @ob_region_map: bitmask of mapped outbound regions * @ob_addr: base addresses in the AXI bus where the outbound regions start diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 94af6f5828a3..0bb2fb3e8a0b 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -72,14 +72,14 @@ static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, struct pci_bus *bus, int dev) { /* access only one slot on each root port */ - if (bus->number == rockchip->root_bus_nr && dev > 0) + if (pci_is_root_bus(bus) && dev > 0) return 0; /* * do not read more than one device on the bus directly attached * to RC's downstream side. */ - if (bus->primary == rockchip->root_bus_nr && dev > 0) + if (pci_is_root_bus(bus->parent) && dev > 0) return 0; return 1; @@ -170,7 +170,7 @@ static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, return PCIBIOS_BAD_REGISTER_NUMBER; } - if (bus->parent->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else @@ -201,7 +201,7 @@ static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, if (!IS_ALIGNED(busdev, size)) return PCIBIOS_BAD_REGISTER_NUMBER; - if (bus->parent->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else @@ -230,7 +230,7 @@ static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, return PCIBIOS_DEVICE_NOT_FOUND; } - if (bus->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus)) return rockchip_pcie_rd_own_conf(rockchip, where, size, val); return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, @@ -245,7 +245,7 @@ static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; - if (bus->number == rockchip->root_bus_nr) + if (pci_is_root_bus(bus)) return rockchip_pcie_wr_own_conf(rockchip, where, size, val); return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, @@ -549,10 +549,8 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) struct platform_device *pdev = to_platform_device(dev); irq = platform_get_irq_byname(pdev, "sys"); - if (irq < 0) { - dev_err(dev, "missing sys IRQ resource\n"); + if (irq < 0) return irq; - } err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, IRQF_SHARED, "pcie-sys", rockchip); @@ -562,20 +560,16 @@ static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) } irq = platform_get_irq_byname(pdev, "legacy"); - if (irq < 0) { - dev_err(dev, "missing legacy IRQ resource\n"); + if (irq < 0) return irq; - } irq_set_chained_handler_and_data(irq, rockchip_pcie_legacy_int_handler, rockchip); irq = platform_get_irq_byname(pdev, "client"); - if (irq < 0) { - dev_err(dev, "missing client IRQ resource\n"); + if (irq < 0) return irq; - } err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, IRQF_SHARED, "pcie-client", rockchip); @@ -949,9 +943,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) { struct rockchip_pcie *rockchip; struct device *dev = &pdev->dev; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; - struct resource *bus_res; int err; if (!dev->of_node) @@ -991,13 +983,6 @@ static int rockchip_pcie_probe(struct platform_device *pdev) if (err < 0) goto err_deinit_port; - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, &bus_res); - if (err) - goto err_remove_irq_domain; - - rockchip->root_bus_nr = bus_res->start; - err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_remove_irq_domain; @@ -1008,27 +993,13 @@ static int rockchip_pcie_probe(struct platform_device *pdev) goto err_remove_irq_domain; } - bridge->dev.parent = dev; bridge->sysdata = rockchip; - bridge->busnr = 0; bridge->ops = &rockchip_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; - err = pci_scan_root_bus_bridge(bridge); + err = pci_host_probe(bridge); if (err < 0) goto err_remove_irq_domain; - bus = bridge->bus; - - rockchip->root_bus = bus; - - pci_bus_size_bridges(bus); - pci_bus_assign_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - - pci_bus_add_devices(bus); return 0; err_remove_irq_domain: @@ -1051,9 +1022,10 @@ static int rockchip_pcie_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie *rockchip = dev_get_drvdata(dev); + struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip); - pci_stop_root_bus(rockchip->root_bus); - pci_remove_root_bus(rockchip->root_bus); + pci_stop_root_bus(bridge->bus); + pci_remove_root_bus(bridge->bus); irq_domain_remove(rockchip->irq_domain); rockchip_pcie_deinit_phys(rockchip); diff --git a/drivers/pci/controller/pcie-rockchip.c b/drivers/pci/controller/pcie-rockchip.c index c53d1322a3d6..904dec0d3a88 100644 --- a/drivers/pci/controller/pcie-rockchip.c +++ b/drivers/pci/controller/pcie-rockchip.c @@ -45,9 +45,8 @@ int rockchip_pcie_parse_dt(struct rockchip_pcie *rockchip) return -EINVAL; } - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "apb-base"); - rockchip->apb_base = devm_ioremap_resource(dev, regs); + rockchip->apb_base = + devm_platform_ioremap_resource_byname(pdev, "apb-base"); if (IS_ERR(rockchip->apb_base)) return PTR_ERR(rockchip->apb_base); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index d90dfb354573..c7d0178fc8c2 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -298,12 +298,10 @@ struct rockchip_pcie { struct gpio_desc *ep_gpio; u32 lanes; u8 lanes_map; - u8 root_bus_nr; int link_gen; struct device *dev; struct irq_domain *irq_domain; int offset; - struct pci_bus *root_bus; void __iomem *msg_region; phys_addr_t msg_bus_addr; bool is_rc; diff --git a/drivers/pci/controller/pcie-tango.c b/drivers/pci/controller/pcie-tango.c index 8f640c70f936..d093a8ce4bb1 100644 --- a/drivers/pci/controller/pcie-tango.c +++ b/drivers/pci/controller/pcie-tango.c @@ -273,10 +273,8 @@ static int tango_pcie_probe(struct platform_device *pdev) writel_relaxed(0, pcie->base + SMP8759_ENABLE + offset); virq = platform_get_irq(pdev, 1); - if (virq < 0) { - dev_err(dev, "Failed to map IRQ\n"); + if (virq < 0) return virq; - } irq_dom = irq_domain_create_linear(fwnode, MSI_MAX, &dom_ops, pcie); if (!irq_dom) { diff --git a/drivers/pci/controller/pcie-xilinx-cpm.c b/drivers/pci/controller/pcie-xilinx-cpm.c new file mode 100644 index 000000000000..f3082de44e8a --- /dev/null +++ b/drivers/pci/controller/pcie-xilinx-cpm.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * PCIe host controller driver for Xilinx Versal CPM DMA Bridge + * + * (C) Copyright 2019 - 2020, Xilinx, Inc. + */ + +#include <linux/bitfield.h> +#include <linux/interrupt.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/of_irq.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/pci-ecam.h> + +#include "../pci.h" + +/* Register definitions */ +#define XILINX_CPM_PCIE_REG_IDR 0x00000E10 +#define XILINX_CPM_PCIE_REG_IMR 0x00000E14 +#define XILINX_CPM_PCIE_REG_PSCR 0x00000E1C +#define XILINX_CPM_PCIE_REG_RPSC 0x00000E20 +#define XILINX_CPM_PCIE_REG_RPEFR 0x00000E2C +#define XILINX_CPM_PCIE_REG_IDRN 0x00000E38 +#define XILINX_CPM_PCIE_REG_IDRN_MASK 0x00000E3C +#define XILINX_CPM_PCIE_MISC_IR_STATUS 0x00000340 +#define XILINX_CPM_PCIE_MISC_IR_ENABLE 0x00000348 +#define XILINX_CPM_PCIE_MISC_IR_LOCAL BIT(1) + +/* Interrupt registers definitions */ +#define XILINX_CPM_PCIE_INTR_LINK_DOWN 0 +#define XILINX_CPM_PCIE_INTR_HOT_RESET 3 +#define XILINX_CPM_PCIE_INTR_CFG_PCIE_TIMEOUT 4 +#define XILINX_CPM_PCIE_INTR_CFG_TIMEOUT 8 +#define XILINX_CPM_PCIE_INTR_CORRECTABLE 9 +#define XILINX_CPM_PCIE_INTR_NONFATAL 10 +#define XILINX_CPM_PCIE_INTR_FATAL 11 +#define XILINX_CPM_PCIE_INTR_CFG_ERR_POISON 12 +#define XILINX_CPM_PCIE_INTR_PME_TO_ACK_RCVD 15 +#define XILINX_CPM_PCIE_INTR_INTX 16 +#define XILINX_CPM_PCIE_INTR_PM_PME_RCVD 17 +#define XILINX_CPM_PCIE_INTR_SLV_UNSUPP 20 +#define XILINX_CPM_PCIE_INTR_SLV_UNEXP 21 +#define XILINX_CPM_PCIE_INTR_SLV_COMPL 22 +#define XILINX_CPM_PCIE_INTR_SLV_ERRP 23 +#define XILINX_CPM_PCIE_INTR_SLV_CMPABT 24 +#define XILINX_CPM_PCIE_INTR_SLV_ILLBUR 25 +#define XILINX_CPM_PCIE_INTR_MST_DECERR 26 +#define XILINX_CPM_PCIE_INTR_MST_SLVERR 27 +#define XILINX_CPM_PCIE_INTR_SLV_PCIE_TIMEOUT 28 + +#define IMR(x) BIT(XILINX_CPM_PCIE_INTR_ ##x) + +#define XILINX_CPM_PCIE_IMR_ALL_MASK \ + ( \ + IMR(LINK_DOWN) | \ + IMR(HOT_RESET) | \ + IMR(CFG_PCIE_TIMEOUT) | \ + IMR(CFG_TIMEOUT) | \ + IMR(CORRECTABLE) | \ + IMR(NONFATAL) | \ + IMR(FATAL) | \ + IMR(CFG_ERR_POISON) | \ + IMR(PME_TO_ACK_RCVD) | \ + IMR(INTX) | \ + IMR(PM_PME_RCVD) | \ + IMR(SLV_UNSUPP) | \ + IMR(SLV_UNEXP) | \ + IMR(SLV_COMPL) | \ + IMR(SLV_ERRP) | \ + IMR(SLV_CMPABT) | \ + IMR(SLV_ILLBUR) | \ + IMR(MST_DECERR) | \ + IMR(MST_SLVERR) | \ + IMR(SLV_PCIE_TIMEOUT) \ + ) + +#define XILINX_CPM_PCIE_IDR_ALL_MASK 0xFFFFFFFF +#define XILINX_CPM_PCIE_IDRN_MASK GENMASK(19, 16) +#define XILINX_CPM_PCIE_IDRN_SHIFT 16 + +/* Root Port Error FIFO Read Register definitions */ +#define XILINX_CPM_PCIE_RPEFR_ERR_VALID BIT(18) +#define XILINX_CPM_PCIE_RPEFR_REQ_ID GENMASK(15, 0) +#define XILINX_CPM_PCIE_RPEFR_ALL_MASK 0xFFFFFFFF + +/* Root Port Status/control Register definitions */ +#define XILINX_CPM_PCIE_REG_RPSC_BEN BIT(0) + +/* Phy Status/Control Register definitions */ +#define XILINX_CPM_PCIE_REG_PSCR_LNKUP BIT(11) + +/** + * struct xilinx_cpm_pcie_port - PCIe port information + * @reg_base: Bridge Register Base + * @cpm_base: CPM System Level Control and Status Register(SLCR) Base + * @dev: Device pointer + * @intx_domain: Legacy IRQ domain pointer + * @cpm_domain: CPM IRQ domain pointer + * @cfg: Holds mappings of config space window + * @intx_irq: legacy interrupt number + * @irq: Error interrupt number + * @lock: lock protecting shared register access + */ +struct xilinx_cpm_pcie_port { + void __iomem *reg_base; + void __iomem *cpm_base; + struct device *dev; + struct irq_domain *intx_domain; + struct irq_domain *cpm_domain; + struct pci_config_window *cfg; + int intx_irq; + int irq; + raw_spinlock_t lock; +}; + +static u32 pcie_read(struct xilinx_cpm_pcie_port *port, u32 reg) +{ + return readl_relaxed(port->reg_base + reg); +} + +static void pcie_write(struct xilinx_cpm_pcie_port *port, + u32 val, u32 reg) +{ + writel_relaxed(val, port->reg_base + reg); +} + +static bool cpm_pcie_link_up(struct xilinx_cpm_pcie_port *port) +{ + return (pcie_read(port, XILINX_CPM_PCIE_REG_PSCR) & + XILINX_CPM_PCIE_REG_PSCR_LNKUP); +} + +static void cpm_pcie_clear_err_interrupts(struct xilinx_cpm_pcie_port *port) +{ + unsigned long val = pcie_read(port, XILINX_CPM_PCIE_REG_RPEFR); + + if (val & XILINX_CPM_PCIE_RPEFR_ERR_VALID) { + dev_dbg(port->dev, "Requester ID %lu\n", + val & XILINX_CPM_PCIE_RPEFR_REQ_ID); + pcie_write(port, XILINX_CPM_PCIE_RPEFR_ALL_MASK, + XILINX_CPM_PCIE_REG_RPEFR); + } +} + +static void xilinx_cpm_mask_leg_irq(struct irq_data *data) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data); + unsigned long flags; + u32 mask; + u32 val; + + mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); + raw_spin_lock_irqsave(&port->lock, flags); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); + pcie_write(port, (val & (~mask)), XILINX_CPM_PCIE_REG_IDRN_MASK); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static void xilinx_cpm_unmask_leg_irq(struct irq_data *data) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(data); + unsigned long flags; + u32 mask; + u32 val; + + mask = BIT(data->hwirq + XILINX_CPM_PCIE_IDRN_SHIFT); + raw_spin_lock_irqsave(&port->lock, flags); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDRN_MASK); + pcie_write(port, (val | mask), XILINX_CPM_PCIE_REG_IDRN_MASK); + raw_spin_unlock_irqrestore(&port->lock, flags); +} + +static struct irq_chip xilinx_cpm_leg_irq_chip = { + .name = "INTx", + .irq_mask = xilinx_cpm_mask_leg_irq, + .irq_unmask = xilinx_cpm_unmask_leg_irq, +}; + +/** + * xilinx_cpm_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid + * @domain: IRQ domain + * @irq: Virtual IRQ number + * @hwirq: HW interrupt number + * + * Return: Always returns 0. + */ +static int xilinx_cpm_pcie_intx_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &xilinx_cpm_leg_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + + return 0; +} + +/* INTx IRQ Domain operations */ +static const struct irq_domain_ops intx_domain_ops = { + .map = xilinx_cpm_pcie_intx_map, +}; + +static void xilinx_cpm_pcie_intx_flow(struct irq_desc *desc) +{ + struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long val; + int i; + + chained_irq_enter(chip, desc); + + val = FIELD_GET(XILINX_CPM_PCIE_IDRN_MASK, + pcie_read(port, XILINX_CPM_PCIE_REG_IDRN)); + + for_each_set_bit(i, &val, PCI_NUM_INTX) + generic_handle_irq(irq_find_mapping(port->intx_domain, i)); + + chained_irq_exit(chip, desc); +} + +static void xilinx_cpm_mask_event_irq(struct irq_data *d) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d); + u32 val; + + raw_spin_lock(&port->lock); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); + val &= ~BIT(d->hwirq); + pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); + raw_spin_unlock(&port->lock); +} + +static void xilinx_cpm_unmask_event_irq(struct irq_data *d) +{ + struct xilinx_cpm_pcie_port *port = irq_data_get_irq_chip_data(d); + u32 val; + + raw_spin_lock(&port->lock); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IMR); + val |= BIT(d->hwirq); + pcie_write(port, val, XILINX_CPM_PCIE_REG_IMR); + raw_spin_unlock(&port->lock); +} + +static struct irq_chip xilinx_cpm_event_irq_chip = { + .name = "RC-Event", + .irq_mask = xilinx_cpm_mask_event_irq, + .irq_unmask = xilinx_cpm_unmask_event_irq, +}; + +static int xilinx_cpm_pcie_event_map(struct irq_domain *domain, + unsigned int irq, irq_hw_number_t hwirq) +{ + irq_set_chip_and_handler(irq, &xilinx_cpm_event_irq_chip, + handle_level_irq); + irq_set_chip_data(irq, domain->host_data); + irq_set_status_flags(irq, IRQ_LEVEL); + return 0; +} + +static const struct irq_domain_ops event_domain_ops = { + .map = xilinx_cpm_pcie_event_map, +}; + +static void xilinx_cpm_pcie_event_flow(struct irq_desc *desc) +{ + struct xilinx_cpm_pcie_port *port = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned long val; + int i; + + chained_irq_enter(chip, desc); + val = pcie_read(port, XILINX_CPM_PCIE_REG_IDR); + val &= pcie_read(port, XILINX_CPM_PCIE_REG_IMR); + for_each_set_bit(i, &val, 32) + generic_handle_irq(irq_find_mapping(port->cpm_domain, i)); + pcie_write(port, val, XILINX_CPM_PCIE_REG_IDR); + + /* + * XILINX_CPM_PCIE_MISC_IR_STATUS register is mapped to + * CPM SLCR block. + */ + val = readl_relaxed(port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); + if (val) + writel_relaxed(val, + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_STATUS); + + chained_irq_exit(chip, desc); +} + +#define _IC(x, s) \ + [XILINX_CPM_PCIE_INTR_ ## x] = { __stringify(x), s } + +static const struct { + const char *sym; + const char *str; +} intr_cause[32] = { + _IC(LINK_DOWN, "Link Down"), + _IC(HOT_RESET, "Hot reset"), + _IC(CFG_TIMEOUT, "ECAM access timeout"), + _IC(CORRECTABLE, "Correctable error message"), + _IC(NONFATAL, "Non fatal error message"), + _IC(FATAL, "Fatal error message"), + _IC(SLV_UNSUPP, "Slave unsupported request"), + _IC(SLV_UNEXP, "Slave unexpected completion"), + _IC(SLV_COMPL, "Slave completion timeout"), + _IC(SLV_ERRP, "Slave Error Poison"), + _IC(SLV_CMPABT, "Slave Completer Abort"), + _IC(SLV_ILLBUR, "Slave Illegal Burst"), + _IC(MST_DECERR, "Master decode error"), + _IC(MST_SLVERR, "Master slave error"), + _IC(CFG_PCIE_TIMEOUT, "PCIe ECAM access timeout"), + _IC(CFG_ERR_POISON, "ECAM poisoned completion received"), + _IC(PME_TO_ACK_RCVD, "PME_TO_ACK message received"), + _IC(PM_PME_RCVD, "PM_PME message received"), + _IC(SLV_PCIE_TIMEOUT, "PCIe completion timeout received"), +}; + +static irqreturn_t xilinx_cpm_pcie_intr_handler(int irq, void *dev_id) +{ + struct xilinx_cpm_pcie_port *port = dev_id; + struct device *dev = port->dev; + struct irq_data *d; + + d = irq_domain_get_irq_data(port->cpm_domain, irq); + + switch (d->hwirq) { + case XILINX_CPM_PCIE_INTR_CORRECTABLE: + case XILINX_CPM_PCIE_INTR_NONFATAL: + case XILINX_CPM_PCIE_INTR_FATAL: + cpm_pcie_clear_err_interrupts(port); + fallthrough; + + default: + if (intr_cause[d->hwirq].str) + dev_warn(dev, "%s\n", intr_cause[d->hwirq].str); + else + dev_warn(dev, "Unknown IRQ %ld\n", d->hwirq); + } + + return IRQ_HANDLED; +} + +static void xilinx_cpm_free_irq_domains(struct xilinx_cpm_pcie_port *port) +{ + if (port->intx_domain) { + irq_domain_remove(port->intx_domain); + port->intx_domain = NULL; + } + + if (port->cpm_domain) { + irq_domain_remove(port->cpm_domain); + port->cpm_domain = NULL; + } +} + +/** + * xilinx_cpm_pcie_init_irq_domain - Initialize IRQ domain + * @port: PCIe port information + * + * Return: '0' on success and error value on failure + */ +static int xilinx_cpm_pcie_init_irq_domain(struct xilinx_cpm_pcie_port *port) +{ + struct device *dev = port->dev; + struct device_node *node = dev->of_node; + struct device_node *pcie_intc_node; + + /* Setup INTx */ + pcie_intc_node = of_get_next_child(node, NULL); + if (!pcie_intc_node) { + dev_err(dev, "No PCIe Intc node found\n"); + return -EINVAL; + } + + port->cpm_domain = irq_domain_add_linear(pcie_intc_node, 32, + &event_domain_ops, + port); + if (!port->cpm_domain) + goto out; + + irq_domain_update_bus_token(port->cpm_domain, DOMAIN_BUS_NEXUS); + + port->intx_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX, + &intx_domain_ops, + port); + if (!port->intx_domain) + goto out; + + irq_domain_update_bus_token(port->intx_domain, DOMAIN_BUS_WIRED); + + of_node_put(pcie_intc_node); + raw_spin_lock_init(&port->lock); + + return 0; +out: + xilinx_cpm_free_irq_domains(port); + dev_err(dev, "Failed to allocate IRQ domains\n"); + + return -ENOMEM; +} + +static int xilinx_cpm_setup_irq(struct xilinx_cpm_pcie_port *port) +{ + struct device *dev = port->dev; + struct platform_device *pdev = to_platform_device(dev); + int i, irq; + + port->irq = platform_get_irq(pdev, 0); + if (port->irq < 0) + return port->irq; + + for (i = 0; i < ARRAY_SIZE(intr_cause); i++) { + int err; + + if (!intr_cause[i].str) + continue; + + irq = irq_create_mapping(port->cpm_domain, i); + if (!irq) { + dev_err(dev, "Failed to map interrupt\n"); + return -ENXIO; + } + + err = devm_request_irq(dev, irq, xilinx_cpm_pcie_intr_handler, + 0, intr_cause[i].sym, port); + if (err) { + dev_err(dev, "Failed to request IRQ %d\n", irq); + return err; + } + } + + port->intx_irq = irq_create_mapping(port->cpm_domain, + XILINX_CPM_PCIE_INTR_INTX); + if (!port->intx_irq) { + dev_err(dev, "Failed to map INTx interrupt\n"); + return -ENXIO; + } + + /* Plug the INTx chained handler */ + irq_set_chained_handler_and_data(port->intx_irq, + xilinx_cpm_pcie_intx_flow, port); + + /* Plug the main event chained handler */ + irq_set_chained_handler_and_data(port->irq, + xilinx_cpm_pcie_event_flow, port); + + return 0; +} + +/** + * xilinx_cpm_pcie_init_port - Initialize hardware + * @port: PCIe port information + */ +static void xilinx_cpm_pcie_init_port(struct xilinx_cpm_pcie_port *port) +{ + if (cpm_pcie_link_up(port)) + dev_info(port->dev, "PCIe Link is UP\n"); + else + dev_info(port->dev, "PCIe Link is DOWN\n"); + + /* Disable all interrupts */ + pcie_write(port, ~XILINX_CPM_PCIE_IDR_ALL_MASK, + XILINX_CPM_PCIE_REG_IMR); + + /* Clear pending interrupts */ + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_IDR) & + XILINX_CPM_PCIE_IMR_ALL_MASK, + XILINX_CPM_PCIE_REG_IDR); + + /* + * XILINX_CPM_PCIE_MISC_IR_ENABLE register is mapped to + * CPM SLCR block. + */ + writel(XILINX_CPM_PCIE_MISC_IR_LOCAL, + port->cpm_base + XILINX_CPM_PCIE_MISC_IR_ENABLE); + /* Enable the Bridge enable bit */ + pcie_write(port, pcie_read(port, XILINX_CPM_PCIE_REG_RPSC) | + XILINX_CPM_PCIE_REG_RPSC_BEN, + XILINX_CPM_PCIE_REG_RPSC); +} + +/** + * xilinx_cpm_pcie_parse_dt - Parse Device tree + * @port: PCIe port information + * @bus_range: Bus resource + * + * Return: '0' on success and error value on failure + */ +static int xilinx_cpm_pcie_parse_dt(struct xilinx_cpm_pcie_port *port, + struct resource *bus_range) +{ + struct device *dev = port->dev; + struct platform_device *pdev = to_platform_device(dev); + struct resource *res; + + port->cpm_base = devm_platform_ioremap_resource_byname(pdev, + "cpm_slcr"); + if (IS_ERR(port->cpm_base)) + return PTR_ERR(port->cpm_base); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg"); + if (!res) + return -ENXIO; + + port->cfg = pci_ecam_create(dev, res, bus_range, + &pci_generic_ecam_ops); + if (IS_ERR(port->cfg)) + return PTR_ERR(port->cfg); + + port->reg_base = port->cfg->win; + + return 0; +} + +static void xilinx_cpm_free_interrupts(struct xilinx_cpm_pcie_port *port) +{ + irq_set_chained_handler_and_data(port->intx_irq, NULL, NULL); + irq_set_chained_handler_and_data(port->irq, NULL, NULL); +} + +/** + * xilinx_cpm_pcie_probe - Probe function + * @pdev: Platform device pointer + * + * Return: '0' on success and error value on failure + */ +static int xilinx_cpm_pcie_probe(struct platform_device *pdev) +{ + struct xilinx_cpm_pcie_port *port; + struct device *dev = &pdev->dev; + struct pci_host_bridge *bridge; + struct resource_entry *bus; + int err; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*port)); + if (!bridge) + return -ENODEV; + + port = pci_host_bridge_priv(bridge); + + port->dev = dev; + + err = xilinx_cpm_pcie_init_irq_domain(port); + if (err) + return err; + + bus = resource_list_first_type(&bridge->windows, IORESOURCE_BUS); + if (!bus) + return -ENODEV; + + err = xilinx_cpm_pcie_parse_dt(port, bus->res); + if (err) { + dev_err(dev, "Parsing DT failed\n"); + goto err_parse_dt; + } + + xilinx_cpm_pcie_init_port(port); + + err = xilinx_cpm_setup_irq(port); + if (err) { + dev_err(dev, "Failed to set up interrupts\n"); + goto err_setup_irq; + } + + bridge->dev.parent = dev; + bridge->sysdata = port->cfg; + bridge->busnr = port->cfg->busr.start; + bridge->ops = (struct pci_ops *)&pci_generic_ecam_ops.pci_ops; + bridge->map_irq = of_irq_parse_and_map_pci; + bridge->swizzle_irq = pci_common_swizzle; + + err = pci_host_probe(bridge); + if (err < 0) + goto err_host_bridge; + + return 0; + +err_host_bridge: + xilinx_cpm_free_interrupts(port); +err_setup_irq: + pci_ecam_free(port->cfg); +err_parse_dt: + xilinx_cpm_free_irq_domains(port); + return err; +} + +static const struct of_device_id xilinx_cpm_pcie_of_match[] = { + { .compatible = "xlnx,versal-cpm-host-1.00", }, + {} +}; + +static struct platform_driver xilinx_cpm_pcie_driver = { + .driver = { + .name = "xilinx-cpm-pcie", + .of_match_table = xilinx_cpm_pcie_of_match, + .suppress_bind_attrs = true, + }, + .probe = xilinx_cpm_pcie_probe, +}; + +builtin_platform_driver(xilinx_cpm_pcie_driver); diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c index 9bd1427f2fd6..f3cf7d61924f 100644 --- a/drivers/pci/controller/pcie-xilinx-nwl.c +++ b/drivers/pci/controller/pcie-xilinx-nwl.c @@ -166,7 +166,6 @@ struct nwl_pcie { int irq_misc; u32 ecam_value; u8 last_busno; - u8 root_busno; struct nwl_msi msi; struct irq_domain *legacy_irq_domain; raw_spinlock_t leg_mask_lock; @@ -217,13 +216,11 @@ static bool nwl_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) struct nwl_pcie *pcie = bus->sysdata; /* Check link before accessing downstream ports */ - if (bus->number != pcie->root_busno) { + if (!pci_is_root_bus(bus)) { if (!nwl_pcie_link_up(pcie)) return false; - } - - /* Only one device down on each root port */ - if (bus->number == pcie->root_busno && devfn > 0) + } else if (devfn > 0) + /* Only one device down on each root port */ return false; return true; @@ -586,7 +583,6 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) /* Get msi_1 IRQ number */ msi->irq_msi1 = platform_get_irq_byname(pdev, "msi1"); if (msi->irq_msi1 < 0) { - dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi1); ret = -EINVAL; goto err; } @@ -597,7 +593,6 @@ static int nwl_pcie_enable_msi(struct nwl_pcie *pcie) /* Get msi_0 IRQ number */ msi->irq_msi0 = platform_get_irq_byname(pdev, "msi0"); if (msi->irq_msi0 < 0) { - dev_err(dev, "failed to get IRQ#%d\n", msi->irq_msi0); ret = -EINVAL; goto err; } @@ -728,11 +723,8 @@ static int nwl_pcie_bridge_init(struct nwl_pcie *pcie) /* Get misc IRQ number */ pcie->irq_misc = platform_get_irq_byname(pdev, "misc"); - if (pcie->irq_misc < 0) { - dev_err(dev, "failed to get misc IRQ %d\n", - pcie->irq_misc); + if (pcie->irq_misc < 0) return -EINVAL; - } err = devm_request_irq(dev, pcie->irq_misc, nwl_pcie_misc_handler, IRQF_SHARED, @@ -797,10 +789,8 @@ static int nwl_pcie_parse_dt(struct nwl_pcie *pcie, /* Get intx IRQ number */ pcie->irq_intx = platform_get_irq_byname(pdev, "intx"); - if (pcie->irq_intx < 0) { - dev_err(dev, "failed to get intx IRQ %d\n", pcie->irq_intx); + if (pcie->irq_intx < 0) return pcie->irq_intx; - } irq_set_chained_handler_and_data(pcie->irq_intx, nwl_pcie_leg_handler, pcie); @@ -817,8 +807,6 @@ static int nwl_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct nwl_pcie *pcie; - struct pci_bus *bus; - struct pci_bus *child; struct pci_host_bridge *bridge; int err; @@ -843,25 +831,14 @@ static int nwl_pcie_probe(struct platform_device *pdev) return err; } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - err = nwl_pcie_init_irq_domain(pcie); if (err) { dev_err(dev, "Failed creating IRQ Domain\n"); return err; } - bridge->dev.parent = dev; bridge->sysdata = pcie; - bridge->busnr = pcie->root_busno; bridge->ops = &nwl_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; if (IS_ENABLED(CONFIG_PCI_MSI)) { err = nwl_pcie_enable_msi(pcie); @@ -871,17 +848,7 @@ static int nwl_pcie_probe(struct platform_device *pdev) } } - err = pci_scan_root_bus_bridge(bridge); - if (err) - return err; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static struct platform_driver nwl_pcie_driver = { diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c index 98e55297815b..8523be61bba5 100644 --- a/drivers/pci/controller/pcie-xilinx.c +++ b/drivers/pci/controller/pcie-xilinx.c @@ -98,7 +98,6 @@ * @reg_base: IO Mapped Register Base * @irq: Interrupt number * @msi_pages: MSI pages - * @root_busno: Root Bus number * @dev: Device pointer * @msi_domain: MSI IRQ domain pointer * @leg_domain: Legacy IRQ domain pointer @@ -108,7 +107,6 @@ struct xilinx_pcie_port { void __iomem *reg_base; u32 irq; unsigned long msi_pages; - u8 root_busno; struct device *dev; struct irq_domain *msi_domain; struct irq_domain *leg_domain; @@ -162,14 +160,13 @@ static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) struct xilinx_pcie_port *port = bus->sysdata; /* Check if link is up when trying to access downstream ports */ - if (bus->number != port->root_busno) + if (!pci_is_root_bus(bus)) { if (!xilinx_pcie_link_up(port)) return false; - - /* Only one device down on each root port */ - if (bus->number == port->root_busno && devfn > 0) + } else if (devfn > 0) { + /* Only one device down on each root port */ return false; - + } return true; } @@ -616,7 +613,6 @@ static int xilinx_pcie_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct xilinx_pcie_port *port; - struct pci_bus *bus, *child; struct pci_host_bridge *bridge; int err; @@ -645,35 +641,14 @@ static int xilinx_pcie_probe(struct platform_device *pdev) return err; } - err = pci_parse_request_of_pci_ranges(dev, &bridge->windows, - &bridge->dma_ranges, NULL); - if (err) { - dev_err(dev, "Getting bridge resources failed\n"); - return err; - } - - bridge->dev.parent = dev; bridge->sysdata = port; - bridge->busnr = 0; bridge->ops = &xilinx_pcie_ops; - bridge->map_irq = of_irq_parse_and_map_pci; - bridge->swizzle_irq = pci_common_swizzle; #ifdef CONFIG_PCI_MSI xilinx_pcie_msi_chip.dev = dev; bridge->msi = &xilinx_pcie_msi_chip; #endif - err = pci_scan_root_bus_bridge(bridge); - if (err < 0) - return err; - - bus = bridge->bus; - - pci_assign_unassigned_bus_resources(bus); - list_for_each_entry(child, &bus->children, node) - pcie_bus_configure_settings(child); - pci_bus_add_devices(bus); - return 0; + return pci_host_probe(bridge); } static const struct of_device_id xilinx_pcie_of_match[] = { diff --git a/drivers/pci/controller/vmd.c b/drivers/pci/controller/vmd.c index ebec0a6e77ed..f69ef8c89f72 100644 --- a/drivers/pci/controller/vmd.c +++ b/drivers/pci/controller/vmd.c @@ -40,13 +40,19 @@ enum vmd_features { * membars, in order to allow proper address translation during * resource assignment to enable guest virtualization */ - VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), + VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0), /* * Device may provide root port configuration information which limits * bus numbering */ - VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), + VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1), + + /* + * Device contains physical location shadow registers in + * vendor-specific capability space + */ + VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2), }; /* @@ -454,6 +460,28 @@ static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features) } } + if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) { + int pos = pci_find_capability(vmd->dev, PCI_CAP_ID_VNDR); + u32 reg, regu; + + pci_read_config_dword(vmd->dev, pos + 4, ®); + + /* "SHDW" */ + if (pos && reg == 0x53484457) { + pci_read_config_dword(vmd->dev, pos + 8, ®); + pci_read_config_dword(vmd->dev, pos + 12, ®u); + offset[0] = vmd->dev->resource[VMD_MEMBAR1].start - + (((u64) regu << 32 | reg) & + PCI_BASE_ADDRESS_MEM_MASK); + + pci_read_config_dword(vmd->dev, pos + 16, ®); + pci_read_config_dword(vmd->dev, pos + 20, ®u); + offset[1] = vmd->dev->resource[VMD_MEMBAR2].start - + (((u64) regu << 32 | reg) & + PCI_BASE_ADDRESS_MEM_MASK); + } + } + /* * Certain VMD devices may have a root port configuration option which * limits the bus range to between 0-127, 128-255, or 224-255 @@ -720,16 +748,20 @@ static int vmd_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume); static const struct pci_device_id vmd_ids[] = { - {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),}, + {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D), + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0), .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW | VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f), - .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d), - .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B), - .driver_data = VMD_FEAT_HAS_BUS_RESTRICTIONS,}, + .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP | + VMD_FEAT_HAS_BUS_RESTRICTIONS,}, {0,} }; MODULE_DEVICE_TABLE(pci, vmd_ids); diff --git a/drivers/pci/endpoint/functions/pci-epf-test.c b/drivers/pci/endpoint/functions/pci-epf-test.c index c89a9561439f..e4e51d884553 100644 --- a/drivers/pci/endpoint/functions/pci-epf-test.c +++ b/drivers/pci/endpoint/functions/pci-epf-test.c @@ -181,7 +181,7 @@ static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) /** * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel - * @epf: the EPF test device that performs data transfer operation + * @epf_test: the EPF test device that performs data transfer operation * * Helper to cleanup EPF test DMA channel. */ diff --git a/drivers/pci/endpoint/pci-ep-cfs.c b/drivers/pci/endpoint/pci-ep-cfs.c index 55edce50be96..3710adf51912 100644 --- a/drivers/pci/endpoint/pci-ep-cfs.c +++ b/drivers/pci/endpoint/pci-ep-cfs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * configfs to configure the PCI endpoint * * Copyright (C) 2017 Texas Instruments diff --git a/drivers/pci/endpoint/pci-epc-core.c b/drivers/pci/endpoint/pci-epc-core.c index 82ba0dc7f2f5..cadd3db0cbb0 100644 --- a/drivers/pci/endpoint/pci-epc-core.c +++ b/drivers/pci/endpoint/pci-epc-core.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * PCI Endpoint *Controller* (EPC) library * * Copyright (C) 2017 Texas Instruments diff --git a/drivers/pci/endpoint/pci-epc-mem.c b/drivers/pci/endpoint/pci-epc-mem.c index 80c46f3a4590..a97b56a6d2db 100644 --- a/drivers/pci/endpoint/pci-epc-mem.c +++ b/drivers/pci/endpoint/pci-epc-mem.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * PCI Endpoint *Controller* Address Space Management * * Copyright (C) 2017 Texas Instruments diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c index 244e00f48c5c..c977cf9dce56 100644 --- a/drivers/pci/endpoint/pci-epf-core.c +++ b/drivers/pci/endpoint/pci-epf-core.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * PCI Endpoint *Function* (EPF) library * * Copyright (C) 2017 Texas Instruments @@ -71,6 +71,7 @@ EXPORT_SYMBOL_GPL(pci_epf_bind); /** * pci_epf_free_space() - free the allocated PCI EPF register space + * @epf: the EPF device from whom to free the memory * @addr: the virtual address of the PCI EPF register space * @bar: the BAR number corresponding to the register space * @@ -96,6 +97,7 @@ EXPORT_SYMBOL_GPL(pci_epf_free_space); /** * pci_epf_alloc_space() - allocate memory for the PCI EPF register space + * @epf: the EPF device to whom allocate the memory * @size: the size of the memory that has to be allocated * @bar: the BAR number corresponding to the allocated register space * @align: alignment size for the allocation region diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 6b7c1ed58e7e..2750a64cecd3 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -61,7 +61,7 @@ static acpi_status acpi_run_oshp(acpi_handle handle) /** * acpi_get_hp_hw_control_from_firmware - * @dev: the pci_dev of the bridge that has a hotplug controller + * @pdev: the pci_dev of the bridge that has a hotplug controller * * Attempt to take hotplug control from firmware. */ @@ -191,7 +191,7 @@ check_hotplug(acpi_handle handle, u32 lvl, void *context, void **rv) /** * acpi_pci_detect_ejectable - check if the PCI bus has ejectable slots - * @handle - handle of the PCI bus to scan + * @handle: handle of the PCI bus to scan * * Returns 1 if the PCI bus has ACPI based ejectable slots, 0 otherwise. */ diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index bf779f291f15..ad3393930ecb 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -153,6 +153,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) /** * pciehp_check_presence() - synthesize event if presence has changed + * @ctrl: controller to check * * On probe and resume, an explicit presence check is necessary to bring up an * occupied slot or bring down an unoccupied slot. This can't be triggered by diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index c5eb509c72f0..f979b7098acf 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -352,7 +352,7 @@ static int dlpar_remove_vio_slot(char *drc_name, struct device_node *dn) * -ENODEV Not a valid drc_name * -EIO Internal PCI Error */ -int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) +static int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn) { struct pci_bus *bus; struct slot *slot; @@ -458,7 +458,7 @@ static inline int is_dlpar_capable(void) return (int) (rc != RTAS_UNKNOWN_SERVICE); } -int __init rpadlpar_io_init(void) +static int __init rpadlpar_io_init(void) { if (!is_dlpar_capable()) { @@ -470,7 +470,7 @@ int __init rpadlpar_io_init(void) return dlpar_sysfs_init(); } -void rpadlpar_io_exit(void) +static void __exit rpadlpar_io_exit(void) { dlpar_sysfs_exit(); } diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c index a1de501a2729..12ecd0aaa28d 100644 --- a/drivers/pci/irq.c +++ b/drivers/pci/irq.c @@ -6,61 +6,11 @@ * Copyright (C) 2017 Christoph Hellwig. */ -#include <linux/acpi.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/pci.h> -static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) -{ - struct pci_dev *parent = to_pci_dev(pdev->dev.parent); - - pci_err(pdev, "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", - dev_name(&parent->dev), parent->vendor, parent->device); - pci_err(pdev, "%s\n", reason); - pci_err(pdev, "Please report to linux-kernel@vger.kernel.org\n"); - WARN_ON(1); -} - -/** - * pci_lost_interrupt - reports a lost PCI interrupt - * @pdev: device whose interrupt is lost - * - * The primary function of this routine is to report a lost interrupt - * in a standard way which users can recognise (instead of blaming the - * driver). - * - * Returns: - * a suggestion for fixing it (although the driver is not required to - * act on this). - */ -enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev) -{ - if (pdev->msi_enabled || pdev->msix_enabled) { - enum pci_lost_interrupt_reason ret; - - if (pdev->msix_enabled) { - pci_note_irq_problem(pdev, "MSIX routing failure"); - ret = PCI_LOST_IRQ_DISABLE_MSIX; - } else { - pci_note_irq_problem(pdev, "MSI routing failure"); - ret = PCI_LOST_IRQ_DISABLE_MSI; - } - return ret; - } -#ifdef CONFIG_ACPI - if (!(acpi_disabled || acpi_noirq)) { - pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq"); - /* currently no way to fix acpi on the fly */ - return PCI_LOST_IRQ_DISABLE_ACPI; - } -#endif - pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)"); - return PCI_LOST_IRQ_NO_INFORMATION; -} -EXPORT_SYMBOL(pci_lost_interrupt); - /** * pci_request_irq - allocate an interrupt line for a PCI device * @dev: PCI device to operate on diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 19aeadb22f11..30ae4ffda5c1 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1191,8 +1191,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, struct irq_affinity *affd) { struct irq_affinity msi_default_affd = {0}; - int msix_vecs = -ENOSPC; - int msi_vecs = -ENOSPC; + int nvecs = -ENOSPC; if (flags & PCI_IRQ_AFFINITY) { if (!affd) @@ -1203,17 +1202,16 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } if (flags & PCI_IRQ_MSIX) { - msix_vecs = __pci_enable_msix_range(dev, NULL, min_vecs, - max_vecs, affd, flags); - if (msix_vecs > 0) - return msix_vecs; + nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, + affd, flags); + if (nvecs > 0) + return nvecs; } if (flags & PCI_IRQ_MSI) { - msi_vecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, - affd); - if (msi_vecs > 0) - return msi_vecs; + nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); + if (nvecs > 0) + return nvecs; } /* use legacy IRQ if allowed */ @@ -1231,9 +1229,7 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, } } - if (msix_vecs == -ENOSPC) - return -ENOSPC; - return msi_vecs; + return nvecs; } EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); diff --git a/drivers/pci/of.c b/drivers/pci/of.c index 27839cd2459f..ac24cd5439a9 100644 --- a/drivers/pci/of.c +++ b/drivers/pci/of.c @@ -42,7 +42,7 @@ void pci_set_bus_of_node(struct pci_bus *bus) } else { node = of_node_get(bus->self->dev.of_node); if (node && of_property_read_bool(node, "external-facing")) - bus->self->untrusted = true; + bus->self->external_facing = true; } bus->dev.of_node = node; @@ -243,6 +243,8 @@ EXPORT_SYMBOL_GPL(of_pci_check_probe_only); * @busno: bus number associated with the bridge root bus * @bus_max: maximum number of buses for this bridge * @resources: list where the range of resources will be added after DT parsing + * @ib_resources: list where the range of inbound resources (with addresses + * from 'dma-ranges') will be added after DT parsing * @io_base: pointer to a variable that will contain on return the physical * address for the start of the I/O range. Can be NULL if the caller doesn't * expect I/O ranges to be present in the device tree. @@ -521,28 +523,26 @@ int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin) EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci); #endif /* CONFIG_OF_IRQ */ -int pci_parse_request_of_pci_ranges(struct device *dev, - struct list_head *resources, - struct list_head *ib_resources, - struct resource **bus_range) +static int pci_parse_request_of_pci_ranges(struct device *dev, + struct pci_host_bridge *bridge) { int err, res_valid = 0; resource_size_t iobase; struct resource_entry *win, *tmp; - INIT_LIST_HEAD(resources); - if (ib_resources) - INIT_LIST_HEAD(ib_resources); - err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, resources, - ib_resources, &iobase); + INIT_LIST_HEAD(&bridge->windows); + INIT_LIST_HEAD(&bridge->dma_ranges); + + err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows, + &bridge->dma_ranges, &iobase); if (err) return err; - err = devm_request_pci_bus_resources(dev, resources); + err = devm_request_pci_bus_resources(dev, &bridge->windows); if (err) - goto out_release_res; + return err; - resource_list_for_each_entry_safe(win, tmp, resources) { + resource_list_for_each_entry_safe(win, tmp, &bridge->windows) { struct resource *res = win->res; switch (resource_type(res)) { @@ -557,24 +557,25 @@ int pci_parse_request_of_pci_ranges(struct device *dev, case IORESOURCE_MEM: res_valid |= !(res->flags & IORESOURCE_PREFETCH); break; - case IORESOURCE_BUS: - if (bus_range) - *bus_range = res; - break; } } - if (res_valid) + if (!res_valid) + dev_warn(dev, "non-prefetchable memory resource required\n"); + + return 0; +} + +int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) +{ + if (!dev->of_node) return 0; - dev_err(dev, "non-prefetchable memory resource required\n"); - err = -EINVAL; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = of_irq_parse_and_map_pci; - out_release_res: - pci_free_resource_list(resources); - return err; + return pci_parse_request_of_pci_ranges(dev, bridge); } -EXPORT_SYMBOL_GPL(pci_parse_request_of_pci_ranges); #endif /* CONFIG_PCI */ diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c index e8e444eeb1cd..64ebed129dbf 100644 --- a/drivers/pci/p2pdma.c +++ b/drivers/pci/p2pdma.c @@ -253,7 +253,7 @@ static int pci_bridge_has_acs_redir(struct pci_dev *pdev) int pos; u16 ctrl; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + pos = pdev->acs_cap; if (!pos) return 0; @@ -273,6 +273,19 @@ static void seq_buf_print_bus_devfn(struct seq_buf *buf, struct pci_dev *pdev) seq_buf_printf(buf, "%s;", pci_name(pdev)); } +static bool cpu_supports_p2pdma(void) +{ +#ifdef CONFIG_X86 + struct cpuinfo_x86 *c = &cpu_data(0); + + /* Any AMD CPU whose family ID is Zen or newer supports p2pdma */ + if (c->x86_vendor == X86_VENDOR_AMD && c->x86 >= 0x17) + return true; +#endif + + return false; +} + static const struct pci_p2pdma_whitelist_entry { unsigned short vendor; unsigned short device; @@ -280,11 +293,6 @@ static const struct pci_p2pdma_whitelist_entry { REQ_SAME_HOST_BRIDGE = 1 << 0, } flags; } pci_p2pdma_whitelist[] = { - /* AMD ZEN */ - {PCI_VENDOR_ID_AMD, 0x1450, 0}, - {PCI_VENDOR_ID_AMD, 0x15d0, 0}, - {PCI_VENDOR_ID_AMD, 0x1630, 0}, - /* Intel Xeon E5/Core i7 */ {PCI_VENDOR_ID_INTEL, 0x3c00, REQ_SAME_HOST_BRIDGE}, {PCI_VENDOR_ID_INTEL, 0x3c01, REQ_SAME_HOST_BRIDGE}, @@ -473,7 +481,8 @@ upstream_bridge_distance(struct pci_dev *provider, struct pci_dev *client, acs_redirects, acs_list); if (map_type == PCI_P2PDMA_MAP_THRU_HOST_BRIDGE) { - if (!host_bridge_whitelist(provider, client)) + if (!cpu_supports_p2pdma() && + !host_bridge_whitelist(provider, client)) map_type = PCI_P2PDMA_MAP_NOT_SUPPORTED; } diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 0d85025c55fd..d5869a03f748 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1213,7 +1213,7 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev, ACPI_FREE(obj); } -static void pci_acpi_set_untrusted(struct pci_dev *dev) +static void pci_acpi_set_external_facing(struct pci_dev *dev) { u8 val; @@ -1224,11 +1224,10 @@ static void pci_acpi_set_untrusted(struct pci_dev *dev) /* * These root ports expose PCIe (including DMA) outside of the - * system so make sure we treat them and everything behind as - * untrusted. + * system. Everything downstream from them is external. */ if (val) - dev->untrusted = 1; + dev->external_facing = 1; } static void pci_acpi_setup(struct device *dev) @@ -1240,7 +1239,7 @@ static void pci_acpi_setup(struct device *dev) return; pci_acpi_optimize_delay(pci_dev, adev->handle); - pci_acpi_set_untrusted(pci_dev); + pci_acpi_set_external_facing(pci_dev); pci_acpi_add_edr_notifier(pci_dev); pci_acpi_add_pm_notifier(adev, pci_dev); diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c index 707dd9808676..781e45cf60d1 100644 --- a/drivers/pci/pci-label.c +++ b/drivers/pci/pci-label.c @@ -18,7 +18,7 @@ * the instance number and string from the type 41 record and exports * it to sysfs. * - * Please see http://linux.dell.com/files/biosdevname/ for more + * Please see https://linux.dell.com/files/biosdevname/ for more * information. */ diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c index ef293e735c55..a0b2bd6c918a 100644 --- a/drivers/pci/pci-pf-stub.c +++ b/drivers/pci/pci-pf-stub.c @@ -9,7 +9,7 @@ #include <linux/module.h> #include <linux/pci.h> -/** +/* * pci_pf_stub_whitelist - White list of devices to bind pci-pf-stub onto * * This table provides the list of IDs this driver is supposed to bind diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index c9338f914a0e..a458c46d7e39 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -777,6 +777,133 @@ int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask) return 0; } +static int pci_acs_enable; + +/** + * pci_request_acs - ask for ACS to be enabled if supported + */ +void pci_request_acs(void) +{ + pci_acs_enable = 1; +} + +static const char *disable_acs_redir_param; + +/** + * pci_disable_acs_redir - disable ACS redirect capabilities + * @dev: the PCI device + * + * For only devices specified in the disable_acs_redir parameter. + */ +static void pci_disable_acs_redir(struct pci_dev *dev) +{ + int ret = 0; + const char *p; + int pos; + u16 ctrl; + + if (!disable_acs_redir_param) + return; + + p = disable_acs_redir_param; + while (*p) { + ret = pci_dev_str_match(dev, p, &p); + if (ret < 0) { + pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", + disable_acs_redir_param); + + break; + } else if (ret == 1) { + /* Found a match */ + break; + } + + if (*p != ';' && *p != ',') { + /* End of param or invalid format */ + break; + } + p++; + } + + if (ret != 1) + return; + + if (!pci_dev_specific_disable_acs_redir(dev)) + return; + + pos = dev->acs_cap; + if (!pos) { + pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); + return; + } + + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); + + /* P2P Request & Completion Redirect */ + ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); + + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); + + pci_info(dev, "disabled ACS redirect\n"); +} + +/** + * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities + * @dev: the PCI device + */ +static void pci_std_enable_acs(struct pci_dev *dev) +{ + int pos; + u16 cap; + u16 ctrl; + + pos = dev->acs_cap; + if (!pos) + return; + + pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); + pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); + + /* Source Validation */ + ctrl |= (cap & PCI_ACS_SV); + + /* P2P Request Redirect */ + ctrl |= (cap & PCI_ACS_RR); + + /* P2P Completion Redirect */ + ctrl |= (cap & PCI_ACS_CR); + + /* Upstream Forwarding */ + ctrl |= (cap & PCI_ACS_UF); + + pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); +} + +/** + * pci_enable_acs - enable ACS if hardware support it + * @dev: the PCI device + */ +static void pci_enable_acs(struct pci_dev *dev) +{ + if (!pci_acs_enable) + goto disable_acs_redir; + + if (!pci_dev_specific_enable_acs(dev)) + goto disable_acs_redir; + + pci_std_enable_acs(dev); + +disable_acs_redir: + /* + * Note: pci_disable_acs_redir() must be called even if ACS was not + * enabled by the kernel because it may have been enabled by + * platform firmware. So if we are told to disable it, we should + * always disable it after setting the kernel's default + * preferences. + */ + pci_disable_acs_redir(dev); +} + /** * pci_restore_bars - restore a device's BAR values (e.g. after wake-up) * @dev: PCI device to have its BARs restored @@ -2046,6 +2173,14 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) } EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); +void pcie_clear_device_status(struct pci_dev *dev) +{ + u16 sta; + + pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); + pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); +} + /** * pcie_clear_root_pme_status - Clear root port PME interrupt status. * @dev: PCIe root port or event collector. @@ -3230,139 +3365,12 @@ void pci_configure_ari(struct pci_dev *dev) } } -static int pci_acs_enable; - -/** - * pci_request_acs - ask for ACS to be enabled if supported - */ -void pci_request_acs(void) -{ - pci_acs_enable = 1; -} - -static const char *disable_acs_redir_param; - -/** - * pci_disable_acs_redir - disable ACS redirect capabilities - * @dev: the PCI device - * - * For only devices specified in the disable_acs_redir parameter. - */ -static void pci_disable_acs_redir(struct pci_dev *dev) -{ - int ret = 0; - const char *p; - int pos; - u16 ctrl; - - if (!disable_acs_redir_param) - return; - - p = disable_acs_redir_param; - while (*p) { - ret = pci_dev_str_match(dev, p, &p); - if (ret < 0) { - pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n", - disable_acs_redir_param); - - break; - } else if (ret == 1) { - /* Found a match */ - break; - } - - if (*p != ';' && *p != ',') { - /* End of param or invalid format */ - break; - } - p++; - } - - if (ret != 1) - return; - - if (!pci_dev_specific_disable_acs_redir(dev)) - return; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); - if (!pos) { - pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n"); - return; - } - - pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); - - /* P2P Request & Completion Redirect */ - ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC); - - pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); - - pci_info(dev, "disabled ACS redirect\n"); -} - -/** - * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities - * @dev: the PCI device - */ -static void pci_std_enable_acs(struct pci_dev *dev) -{ - int pos; - u16 cap; - u16 ctrl; - - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); - if (!pos) - return; - - pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); - pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); - - /* Source Validation */ - ctrl |= (cap & PCI_ACS_SV); - - /* P2P Request Redirect */ - ctrl |= (cap & PCI_ACS_RR); - - /* P2P Completion Redirect */ - ctrl |= (cap & PCI_ACS_CR); - - /* Upstream Forwarding */ - ctrl |= (cap & PCI_ACS_UF); - - pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); -} - -/** - * pci_enable_acs - enable ACS if hardware support it - * @dev: the PCI device - */ -void pci_enable_acs(struct pci_dev *dev) -{ - if (!pci_acs_enable) - goto disable_acs_redir; - - if (!pci_dev_specific_enable_acs(dev)) - goto disable_acs_redir; - - pci_std_enable_acs(dev); - -disable_acs_redir: - /* - * Note: pci_disable_acs_redir() must be called even if ACS was not - * enabled by the kernel because it may have been enabled by - * platform firmware. So if we are told to disable it, we should - * always disable it after setting the kernel's default - * preferences. - */ - pci_disable_acs_redir(dev); -} - static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags) { int pos; u16 cap, ctrl; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS); + pos = pdev->acs_cap; if (!pos) return false; @@ -3488,6 +3496,18 @@ bool pci_acs_path_enabled(struct pci_dev *start, } /** + * pci_acs_init - Initialize ACS if hardware supports it + * @dev: the PCI device + */ +void pci_acs_init(struct pci_dev *dev) +{ + dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + + if (dev->acs_cap) + pci_enable_acs(dev); +} + +/** * pci_rebar_find_pos - find position of resize ctrl reg for BAR * @pdev: PCI device * @bar: BAR to find @@ -5676,6 +5696,7 @@ EXPORT_SYMBOL(pcie_get_readrq); int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; + int ret; if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -5694,8 +5715,10 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); + + return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_readrq); @@ -5726,6 +5749,7 @@ EXPORT_SYMBOL(pcie_get_mps); int pcie_set_mps(struct pci_dev *dev, int mps) { u16 v; + int ret; if (mps < 128 || mps > 4096 || !is_power_of_2(mps)) return -EINVAL; @@ -5735,8 +5759,10 @@ int pcie_set_mps(struct pci_dev *dev, int mps) return -EINVAL; v <<= 5; - return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, + ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_PAYLOAD, v); + + return pcibios_err_to_errno(ret); } EXPORT_SYMBOL(pcie_set_mps); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 6d3f75867106..fa12f7cbc1a0 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -92,6 +92,7 @@ void pci_refresh_power_state(struct pci_dev *dev); int pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); int pci_finish_runtime_suspend(struct pci_dev *dev); +void pcie_clear_device_status(struct pci_dev *dev); void pcie_clear_root_pme_status(struct pci_dev *dev); bool pci_check_pme_status(struct pci_dev *dev); void pci_pme_wakeup_bus(struct pci_bus *bus); @@ -532,7 +533,7 @@ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, return resource_alignment(res); } -void pci_enable_acs(struct pci_dev *dev); +void pci_acs_init(struct pci_dev *dev); #ifdef CONFIG_PCI_QUIRKS int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags); int pci_dev_specific_enable_acs(struct pci_dev *dev); @@ -555,7 +556,7 @@ static inline int pci_dev_specific_disable_acs_redir(struct pci_dev *dev) /* PCI error reporting and recovery */ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, pci_ers_result_t (*reset_link)(struct pci_dev *pdev)); bool pcie_wait_for_link(struct pci_dev *pdev, bool active); @@ -627,6 +628,8 @@ void pci_release_of_node(struct pci_dev *dev); void pci_set_bus_of_node(struct pci_bus *bus); void pci_release_bus_of_node(struct pci_bus *bus); +int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge); + #else static inline int of_pci_parse_bus_range(struct device_node *node, struct resource *res) @@ -650,6 +653,12 @@ static inline void pci_set_of_node(struct pci_dev *dev) { } static inline void pci_release_of_node(struct pci_dev *dev) { } static inline void pci_set_bus_of_node(struct pci_bus *bus) { } static inline void pci_release_bus_of_node(struct pci_bus *bus) { } + +static inline int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge) +{ + return 0; +} + #endif /* CONFIG_OF */ #ifdef CONFIG_PCIEAER @@ -658,7 +667,6 @@ void pci_aer_init(struct pci_dev *dev); void pci_aer_exit(struct pci_dev *dev); extern const struct attribute_group aer_stats_attr_group; void pci_aer_clear_fatal_status(struct pci_dev *dev); -void pci_aer_clear_device_status(struct pci_dev *dev); int pci_aer_clear_status(struct pci_dev *dev); int pci_aer_raw_clear_status(struct pci_dev *dev); #else @@ -666,7 +674,6 @@ static inline void pci_no_aer(void) { } static inline void pci_aer_init(struct pci_dev *d) { } static inline void pci_aer_exit(struct pci_dev *d) { } static inline void pci_aer_clear_fatal_status(struct pci_dev *dev) { } -static inline void pci_aer_clear_device_status(struct pci_dev *dev) { } static inline int pci_aer_clear_status(struct pci_dev *dev) { return -EINVAL; } static inline int pci_aer_raw_clear_status(struct pci_dev *dev) { return -EINVAL; } #endif diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig index 9cd31331aee9..3946555a6042 100644 --- a/drivers/pci/pcie/Kconfig +++ b/drivers/pci/pcie/Kconfig @@ -43,7 +43,7 @@ config PCIEAER_INJECT error injection can fake almost all kinds of errors with the help of a user space helper tool aer-inject, which can be gotten from: - http://www.kernel.org/pub/linux/utils/pci/aer-inject/ + https://www.kernel.org/pub/linux/utils/pci/aer-inject/ # # PCI Express ECRC diff --git a/drivers/pci/pcie/aer.c b/drivers/pci/pcie/aer.c index 14af4c97c626..65dff5f3457a 100644 --- a/drivers/pci/pcie/aer.c +++ b/drivers/pci/pcie/aer.c @@ -224,31 +224,28 @@ int pcie_aer_is_native(struct pci_dev *dev) int pci_enable_pcie_error_reporting(struct pci_dev *dev) { + int rc; + if (!pcie_aer_is_native(dev)) return -EIO; - return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + rc = pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); } EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); int pci_disable_pcie_error_reporting(struct pci_dev *dev) { + int rc; + if (!pcie_aer_is_native(dev)) return -EIO; - return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, - PCI_EXP_AER_FLAGS); + rc = pcie_capability_clear_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS); + return pcibios_err_to_errno(rc); } EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); -void pci_aer_clear_device_status(struct pci_dev *dev) -{ - u16 sta; - - pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta); - pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta); -} - int pci_aer_clear_nonfatal_status(struct pci_dev *dev) { int aer = dev->aer_cap; @@ -447,7 +444,7 @@ static const char *aer_error_layer[] = { "Transaction Layer" }; -static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = { +static const char *aer_correctable_error_string[] = { "RxErr", /* Bit Position 0 */ NULL, NULL, @@ -464,9 +461,25 @@ static const char *aer_correctable_error_string[AER_MAX_TYPEOF_COR_ERRS] = { "NonFatalErr", /* Bit Position 13 */ "CorrIntErr", /* Bit Position 14 */ "HeaderOF", /* Bit Position 15 */ + NULL, /* Bit Position 16 */ + NULL, /* Bit Position 17 */ + NULL, /* Bit Position 18 */ + NULL, /* Bit Position 19 */ + NULL, /* Bit Position 20 */ + NULL, /* Bit Position 21 */ + NULL, /* Bit Position 22 */ + NULL, /* Bit Position 23 */ + NULL, /* Bit Position 24 */ + NULL, /* Bit Position 25 */ + NULL, /* Bit Position 26 */ + NULL, /* Bit Position 27 */ + NULL, /* Bit Position 28 */ + NULL, /* Bit Position 29 */ + NULL, /* Bit Position 30 */ + NULL, /* Bit Position 31 */ }; -static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { +static const char *aer_uncorrectable_error_string[] = { "Undefined", /* Bit Position 0 */ NULL, NULL, @@ -494,6 +507,11 @@ static const char *aer_uncorrectable_error_string[AER_MAX_TYPEOF_UNCOR_ERRS] = { "AtomicOpBlocked", /* Bit Position 24 */ "TLPBlockedErr", /* Bit Position 25 */ "PoisonTLPBlocked", /* Bit Position 26 */ + NULL, /* Bit Position 27 */ + NULL, /* Bit Position 28 */ + NULL, /* Bit Position 29 */ + NULL, /* Bit Position 30 */ + NULL, /* Bit Position 31 */ }; static const char *aer_agent_string[] = { @@ -650,24 +668,26 @@ static void __print_tlp_header(struct pci_dev *dev, static void __aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { + const char **strings; unsigned long status = info->status & ~info->mask; - const char *errmsg = NULL; + const char *level, *errmsg; int i; + if (info->severity == AER_CORRECTABLE) { + strings = aer_correctable_error_string; + level = KERN_WARNING; + } else { + strings = aer_uncorrectable_error_string; + level = KERN_ERR; + } + for_each_set_bit(i, &status, 32) { - if (info->severity == AER_CORRECTABLE) - errmsg = i < ARRAY_SIZE(aer_correctable_error_string) ? - aer_correctable_error_string[i] : NULL; - else - errmsg = i < ARRAY_SIZE(aer_uncorrectable_error_string) ? - aer_uncorrectable_error_string[i] : NULL; + errmsg = strings[i]; + if (!errmsg) + errmsg = "Unknown Error Bit"; - if (errmsg) - pci_err(dev, " [%2d] %-22s%s\n", i, errmsg, + pci_printk(level, dev, " [%2d] %-22s%s\n", i, errmsg, info->first_error == i ? " (First)" : ""); - else - pci_err(dev, " [%2d] Unknown Error Bit%s\n", - i, info->first_error == i ? " (First)" : ""); } pci_dev_aer_stats_incr(dev, info); } @@ -676,6 +696,7 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) { int layer, agent; int id = ((dev->bus->number << 8) | dev->devfn); + const char *level; if (!info->status) { pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n", @@ -686,13 +707,14 @@ void aer_print_error(struct pci_dev *dev, struct aer_err_info *info) layer = AER_GET_LAYER_ERROR(info->severity, info->status); agent = AER_GET_AGENT(info->severity, info->status); - pci_err(dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", - aer_error_severity_string[info->severity], - aer_error_layer[layer], aer_agent_string[agent]); + level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR; + + pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n", + aer_error_severity_string[info->severity], + aer_error_layer[layer], aer_agent_string[agent]); - pci_err(dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", - dev->vendor, dev->device, - info->status, info->mask); + pci_printk(level, dev, " device [%04x:%04x] error status/mask=%08x/%08x\n", + dev->vendor, dev->device, info->status, info->mask); __aer_print_error(dev, info); @@ -922,7 +944,8 @@ static void handle_error_source(struct pci_dev *dev, struct aer_err_info *info) if (aer) pci_write_config_dword(dev, aer + PCI_ERR_COR_STATUS, info->status); - pci_aer_clear_device_status(dev); + if (pcie_aer_is_native(dev)) + pcie_clear_device_status(dev); } else if (info->severity == AER_NONFATAL) pcie_do_recovery(dev, pci_channel_io_normal, aer_root_reset); else if (info->severity == AER_FATAL) diff --git a/drivers/pci/pcie/aer_inject.c b/drivers/pci/pcie/aer_inject.c index 21cc3d3387f7..c2cbf425afc5 100644 --- a/drivers/pci/pcie/aer_inject.c +++ b/drivers/pci/pcie/aer_inject.c @@ -6,7 +6,7 @@ * trigger various real hardware errors. Software based error * injection can fake almost all kinds of errors with the help of a * user space helper tool aer-inject, which can be gotten from: - * http://www.kernel.org/pub/linux/utils/pci/aer-inject/ + * https://www.kernel.org/pub/linux/utils/pci/aer-inject/ * * Copyright 2009 Intel Corporation. * Huang Ying <ying.huang@intel.com> diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index b17e5ffd31b1..253c30cc1967 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -1182,6 +1182,7 @@ static int pcie_aspm_get_policy(char *buffer, const struct kernel_param *kp) cnt += sprintf(buffer + cnt, "[%s] ", policy_str[i]); else cnt += sprintf(buffer + cnt, "%s ", policy_str[i]); + cnt += sprintf(buffer + cnt, "\n"); return cnt; } diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c index 14bb8f54723e..c543f419d8f9 100644 --- a/drivers/pci/pcie/err.c +++ b/drivers/pci/pcie/err.c @@ -46,7 +46,7 @@ static pci_ers_result_t merge_result(enum pci_ers_result orig, } static int report_error_detected(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, enum pci_ers_result *result) { pci_ers_result_t vote; @@ -147,7 +147,7 @@ out: } pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, - enum pci_channel_state state, + pci_channel_state_t state, pci_ers_result_t (*reset_link)(struct pci_dev *pdev)) { pci_ers_result_t status = PCI_ERS_RESULT_CAN_RECOVER; @@ -197,7 +197,8 @@ pci_ers_result_t pcie_do_recovery(struct pci_dev *dev, pci_dbg(dev, "broadcast resume message\n"); pci_walk_bus(bus, report_resume, &status); - pci_aer_clear_device_status(dev); + if (pcie_aer_is_native(dev)) + pcie_clear_device_status(dev); pci_aer_clear_nonfatal_status(dev); pci_info(dev, "device recovery successful\n"); return status; diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 3acf151ae015..3a3ce40ae1ab 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -146,7 +146,7 @@ static void pcie_portdrv_remove(struct pci_dev *dev) } static pci_ers_result_t pcie_portdrv_error_detected(struct pci_dev *dev, - enum pci_channel_state error) + pci_channel_state_t error) { /* Root Port has no impact. Always recovers. */ return PCI_ERS_RESULT_CAN_RECOVER; diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 2f66988cea25..03d37128a24f 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -628,11 +628,17 @@ struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev, if (!bridge) return NULL; + bridge->dev.parent = dev; + ret = devm_add_action_or_reset(dev, devm_pci_alloc_host_bridge_release, bridge); if (ret) return NULL; + ret = devm_of_pci_bridge_init(dev, bridge); + if (ret) + return NULL; + return bridge; } EXPORT_SYMBOL(devm_pci_alloc_host_bridge); @@ -1552,7 +1558,7 @@ static void set_pcie_untrusted(struct pci_dev *dev) * untrusted as well. */ parent = pci_upstream_bridge(dev); - if (parent && parent->untrusted) + if (parent && (parent->untrusted || parent->external_facing)) dev->untrusted = true; } @@ -1802,9 +1808,6 @@ int pci_setup_device(struct pci_dev *dev) dev->revision = class & 0xff; dev->class = class >> 8; /* upper 3 bytes */ - pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", - dev->vendor, dev->device, dev->hdr_type, dev->class); - if (pci_early_dump) early_dump_pci_device(dev); @@ -1822,6 +1825,9 @@ int pci_setup_device(struct pci_dev *dev) /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); + pci_info(dev, "[%04x:%04x] type %02x class %#08x\n", + dev->vendor, dev->device, dev->hdr_type, dev->class); + /* Device class may be changed after fixup */ class = dev->class >> 8; @@ -2390,7 +2396,7 @@ static void pci_init_capabilities(struct pci_dev *dev) pci_ats_init(dev); /* Address Translation Services */ pci_pri_init(dev); /* Page Request Interface */ pci_pasid_init(dev); /* Process Address Space ID */ - pci_enable_acs(dev); /* Enable ACS P2P upstream forwarding */ + pci_acs_init(dev); /* Access Control Services */ pci_ptm_init(dev); /* Precision Time Measurement */ pci_aer_init(dev); /* Advanced Error Reporting */ pci_dpc_init(dev); /* Downstream Port Containment */ @@ -3086,6 +3092,7 @@ int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge) resource_list_for_each_entry(window, &bridge->windows) if (window->res->flags & IORESOURCE_BUS) { + bridge->busnr = window->res->start; found = true; break; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 2ea61abd5830..bdf9b52567e0 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3562,7 +3562,7 @@ static void quirk_no_bus_reset(struct pci_dev *dev) * The device will throw a Link Down error on AER-capable systems and * regardless of AER, config space of the device is never accessible again * and typically causes the system to hang or reset when access is attempted. - * http://www.spinics.net/lists/linux-pci/msg34797.html + * https://lore.kernel.org/r/20140923210318.498dacbd@dualc.maya.org/ */ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0032, quirk_no_bus_reset); @@ -4391,9 +4391,9 @@ static int pci_acs_ctrl_enabled(u16 acs_ctrl_req, u16 acs_ctrl_ena) * redirect (CR) since all transactions are redirected to the upstream * root complex. * - * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94086 - * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/94102 - * http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/99402 + * https://lore.kernel.org/r/201207111426.q6BEQTbh002928@mail.maya.org/ + * https://lore.kernel.org/r/20120711165854.GM25282@amd.com/ + * https://lore.kernel.org/r/20121005130857.GX4009@amd.com/ * * 1002:4385 SBx00 SMBus Controller * 1002:439c SB7x0/SB8x0/SB9x0 IDE Controller @@ -4422,6 +4422,8 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags) if (ACPI_FAILURE(status)) return -ENODEV; + acpi_put_table(header); + /* Filter out flags not applicable to multifunction */ acs_flags &= (PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC | PCI_ACS_DT); @@ -4633,11 +4635,11 @@ static int pci_quirk_al_acs(struct pci_dev *dev, u16 acs_flags) * * 0x9d10-0x9d1b PCI Express Root port #{1-12} * - * [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html - * [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html - * [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html - * [4] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html - * [5] http://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html + * [1] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html + * [2] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html + * [3] https://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html + * [4] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-spec-update.html + * [5] https://www.intel.com/content/www/us/en/chipsets/200-series-chipset-pch-datasheet-vol-1.html * [6] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-spec-update.html * [7] https://www.intel.com/content/www/us/en/processors/core/7th-gen-core-family-mobile-u-y-processor-lines-i-o-datasheet-vol-1.html */ @@ -4666,7 +4668,7 @@ static int pci_quirk_intel_spt_pch_acs(struct pci_dev *dev, u16 acs_flags) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -4974,7 +4976,7 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -5001,7 +5003,7 @@ static int pci_quirk_disable_intel_spt_pch_acs_redir(struct pci_dev *dev) if (!pci_quirk_intel_spt_pch_acs_match(dev)) return -ENOTTY; - pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); + pos = dev->acs_cap; if (!pos) return -ENOTTY; @@ -5205,7 +5207,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags); */ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) { - if (pdev->device == 0x7340 && pdev->revision != 0xc5) + if ((pdev->device == 0x7312 && pdev->revision != 0x00) || + (pdev->device == 0x7340 && pdev->revision != 0xc5)) return; pci_info(pdev, "disabling ATS\n"); @@ -5216,6 +5219,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats); /* AMD Iceland dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats); +/* AMD Navi10 dGPU */ +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats); /* AMD Navi14 dGPU */ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats); #endif /* CONFIG_PCI_ATS */ @@ -5368,7 +5373,7 @@ int pci_idt_bus_quirk(struct pci_bus *bus, int devfn, u32 *l, int timeout) bool found; struct pci_dev *bridge = bus->self; - pos = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ACS); + pos = bridge->acs_cap; /* Disable ACS SV before initial config reads */ if (pos) { diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 9b94b1f16d80..3951e02b7ded 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -55,6 +55,7 @@ static void free_list(struct list_head *head) * @dev: Device to which the resource belongs * @res: Resource to be tracked * @add_size: Additional size to be optionally added to the resource + * @min_align: Minimum memory window alignment */ static int add_to_list(struct list_head *head, struct pci_dev *dev, struct resource *res, resource_size_t add_size, @@ -152,7 +153,7 @@ static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head) tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) - panic("pdev_sort_resources(): kmalloc() failed!\n"); + panic("%s: kzalloc() failed!\n", __func__); tmp->res = r; tmp->dev = dev; diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index d21fa04fa44d..43eda101fcf4 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -73,7 +73,8 @@ static void pci_std_update_resource(struct pci_dev *dev, int resno) /* * Apparently some Matrox devices have ROM BARs that read * as zero when disabled, so don't update ROM BARs unless - * they're enabled. See https://lkml.org/lkml/2005/8/30/138. + * they're enabled. See + * https://lore.kernel.org/r/43147B3D.1030309@vc.cvut.cz/ */ if (!(res->flags & IORESOURCE_ROM_ENABLE)) return; diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index cc386ef2fa12..3861505741e6 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -268,13 +268,16 @@ placeholder: slot_name = make_slot_name(name); if (!slot_name) { err = -ENOMEM; + kfree(slot); goto err; } err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, "%s", slot_name); - if (err) + if (err) { + kobject_put(&slot->kobj); goto err; + } INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); @@ -293,7 +296,6 @@ out: mutex_unlock(&pci_slot_mutex); return slot; err: - kfree(slot); slot = ERR_PTR(err); goto out; } diff --git a/drivers/pci/switch/switchtec.c b/drivers/pci/switch/switchtec.c index 850cfeb74608..ba52459928f7 100644 --- a/drivers/pci/switch/switchtec.c +++ b/drivers/pci/switch/switchtec.c @@ -940,7 +940,7 @@ static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev, size_t off; if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) - return ERR_PTR(-EINVAL); + return (u32 __iomem *)ERR_PTR(-EINVAL); off = event_regs[event_id].offset; @@ -948,10 +948,10 @@ static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev, if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) index = stdev->partition; else if (index < 0 || index >= stdev->partition_count) - return ERR_PTR(-EINVAL); + return (u32 __iomem *)ERR_PTR(-EINVAL); } else if (event_regs[event_id].map_reg == pff_ev_reg) { if (index < 0 || index >= stdev->pff_csr_count) - return ERR_PTR(-EINVAL); + return (u32 __iomem *)ERR_PTR(-EINVAL); } return event_regs[event_id].map_reg(stdev, off, index); @@ -1057,11 +1057,11 @@ static int ioctl_event_ctl(struct switchtec_dev *stdev, } static int ioctl_pff_to_port(struct switchtec_dev *stdev, - struct switchtec_ioctl_pff_port *up) + struct switchtec_ioctl_pff_port __user *up) { int i, part; u32 reg; - struct part_cfg_regs *pcfg; + struct part_cfg_regs __iomem *pcfg; struct switchtec_ioctl_pff_port p; if (copy_from_user(&p, up, sizeof(p))) @@ -1104,10 +1104,10 @@ static int ioctl_pff_to_port(struct switchtec_dev *stdev, } static int ioctl_port_to_pff(struct switchtec_dev *stdev, - struct switchtec_ioctl_pff_port *up) + struct switchtec_ioctl_pff_port __user *up) { struct switchtec_ioctl_pff_port p; - struct part_cfg_regs *pcfg; + struct part_cfg_regs __iomem *pcfg; if (copy_from_user(&p, up, sizeof(p))) return -EFAULT; @@ -1484,7 +1484,7 @@ static void init_pff(struct switchtec_dev *stdev) { int i; u32 reg; - struct part_cfg_regs *pcfg = stdev->mmio_part_cfg; + struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg; for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id); diff --git a/drivers/pci/vc.c b/drivers/pci/vc.c index 5486f8768c86..5fc59ac31145 100644 --- a/drivers/pci/vc.c +++ b/drivers/pci/vc.c @@ -172,7 +172,6 @@ enable: * @dev: device * @pos: starting position of VC capability (VC/VC9/MFVC) * @save_state: buffer for save/restore - * @name: for error message * @save: if provided a buffer, this indicates what to do with it * * Walking Virtual Channel config space to size, save, or restore it diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index a9261cf48293..7305d57d1890 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -82,6 +82,7 @@ config FSL_IMX8_DDR_PMU config QCOM_L2_PMU bool "Qualcomm Technologies L2-cache PMU" depends on ARCH_QCOM && ARM64 && ACPI + select QCOM_KRYO_L2_ACCESSORS help Provides support for the L2 cache performance monitor unit (PMU) in Qualcomm Technologies processors. diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index 4da37f650f98..23a0e008dafa 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -23,6 +23,7 @@ #include <asm/barrier.h> #include <asm/local64.h> #include <asm/sysreg.h> +#include <soc/qcom/kryo-l2-accessors.h> #define MAX_L2_CTRS 9 @@ -79,8 +80,6 @@ #define L2_COUNTER_RELOAD BIT_ULL(31) #define L2_CYCLE_COUNTER_RELOAD BIT_ULL(63) -#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6) -#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7) #define reg_idx(reg, i) (((i) * IA_L2_REG_OFFSET) + reg##_BASE) @@ -99,48 +98,7 @@ #define L2_EVENT_STREX 0x421 #define L2_EVENT_CLREX 0x422 -static DEFINE_RAW_SPINLOCK(l2_access_lock); -/** - * set_l2_indirect_reg: write value to an L2 register - * @reg: Address of L2 register. - * @value: Value to be written to register. - * - * Use architecturally required barriers for ordering between system register - * accesses - */ -static void set_l2_indirect_reg(u64 reg, u64 val) -{ - unsigned long flags; - - raw_spin_lock_irqsave(&l2_access_lock, flags); - write_sysreg_s(reg, L2CPUSRSELR_EL1); - isb(); - write_sysreg_s(val, L2CPUSRDR_EL1); - isb(); - raw_spin_unlock_irqrestore(&l2_access_lock, flags); -} - -/** - * get_l2_indirect_reg: read an L2 register value - * @reg: Address of L2 register. - * - * Use architecturally required barriers for ordering between system register - * accesses - */ -static u64 get_l2_indirect_reg(u64 reg) -{ - u64 val; - unsigned long flags; - - raw_spin_lock_irqsave(&l2_access_lock, flags); - write_sysreg_s(reg, L2CPUSRSELR_EL1); - isb(); - val = read_sysreg_s(L2CPUSRDR_EL1); - raw_spin_unlock_irqrestore(&l2_access_lock, flags); - - return val; -} struct cluster_pmu; @@ -211,28 +169,28 @@ static inline struct cluster_pmu *get_cluster_pmu( static void cluster_pmu_reset(void) { /* Reset all counters */ - set_l2_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); - set_l2_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); - set_l2_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); - set_l2_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_RESET_ALL); + kryo_l2_set_indirect_reg(L2PMCNTENCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMINTENCLR, l2_counter_present_mask); + kryo_l2_set_indirect_reg(L2PMOVSCLR, l2_counter_present_mask); } static inline void cluster_pmu_enable(void) { - set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_ENABLE); } static inline void cluster_pmu_disable(void) { - set_l2_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); + kryo_l2_set_indirect_reg(L2PMCR, L2PMCR_COUNTERS_DISABLE); } static inline void cluster_pmu_counter_set_value(u32 idx, u64 value) { if (idx == l2_cycle_ctr_idx) - set_l2_indirect_reg(L2PMCCNTR, value); + kryo_l2_set_indirect_reg(L2PMCCNTR, value); else - set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx), value); } static inline u64 cluster_pmu_counter_get_value(u32 idx) @@ -240,46 +198,46 @@ static inline u64 cluster_pmu_counter_get_value(u32 idx) u64 value; if (idx == l2_cycle_ctr_idx) - value = get_l2_indirect_reg(L2PMCCNTR); + value = kryo_l2_get_indirect_reg(L2PMCCNTR); else - value = get_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); + value = kryo_l2_get_indirect_reg(reg_idx(IA_L2PMXEVCNTR, idx)); return value; } static inline void cluster_pmu_counter_enable(u32 idx) { - set_l2_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMCNTENSET, idx_to_reg_bit(idx)); } static inline void cluster_pmu_counter_disable(u32 idx) { - set_l2_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMCNTENCLR, idx_to_reg_bit(idx)); } static inline void cluster_pmu_counter_enable_interrupt(u32 idx) { - set_l2_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMINTENSET, idx_to_reg_bit(idx)); } static inline void cluster_pmu_counter_disable_interrupt(u32 idx) { - set_l2_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); + kryo_l2_set_indirect_reg(L2PMINTENCLR, idx_to_reg_bit(idx)); } static inline void cluster_pmu_set_evccntcr(u32 val) { - set_l2_indirect_reg(L2PMCCNTCR, val); + kryo_l2_set_indirect_reg(L2PMCCNTCR, val); } static inline void cluster_pmu_set_evcntcr(u32 ctr, u32 val) { - set_l2_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVCNTCR, ctr), val); } static inline void cluster_pmu_set_evtyper(u32 ctr, u32 val) { - set_l2_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVTYPER, ctr), val); } static void cluster_pmu_set_resr(struct cluster_pmu *cluster, @@ -295,11 +253,11 @@ static void cluster_pmu_set_resr(struct cluster_pmu *cluster, spin_lock_irqsave(&cluster->pmu_lock, flags); - resr_val = get_l2_indirect_reg(L2PMRESR); + resr_val = kryo_l2_get_indirect_reg(L2PMRESR); resr_val &= ~(L2PMRESR_GROUP_MASK << shift); resr_val |= field; resr_val |= L2PMRESR_EN; - set_l2_indirect_reg(L2PMRESR, resr_val); + kryo_l2_set_indirect_reg(L2PMRESR, resr_val); spin_unlock_irqrestore(&cluster->pmu_lock, flags); } @@ -315,14 +273,14 @@ static inline void cluster_pmu_set_evfilter_sys_mode(u32 ctr) L2PMXEVFILTER_ORGFILTER_IDINDEP | L2PMXEVFILTER_ORGFILTER_ALL; - set_l2_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); + kryo_l2_set_indirect_reg(reg_idx(IA_L2PMXEVFILTER, ctr), val); } static inline u32 cluster_pmu_getreset_ovsr(void) { - u32 result = get_l2_indirect_reg(L2PMOVSSET); + u32 result = kryo_l2_get_indirect_reg(L2PMOVSSET); - set_l2_indirect_reg(L2PMOVSCLR, result); + kryo_l2_set_indirect_reg(L2PMOVSCLR, result); return result; } @@ -767,7 +725,7 @@ static int get_num_counters(void) { int val; - val = get_l2_indirect_reg(L2PMCR); + val = kryo_l2_get_indirect_reg(L2PMCR); /* * Read number of counters from L2PMCR and add 1 diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c index 5a0c8e87aa7c..7efdfb4f3e9b 100644 --- a/drivers/pinctrl/actions/pinctrl-owl.c +++ b/drivers/pinctrl/actions/pinctrl-owl.c @@ -35,8 +35,12 @@ * @pctrldev: pinctrl handle * @chip: gpio chip * @lock: spinlock to protect registers + * @clk: clock control * @soc: reference to soc_data * @base: pinctrl register base address + * @irq_chip: IRQ chip information + * @num_irq: number of possible interrupts + * @irq: interrupt numbers */ struct owl_pinctrl { struct device *dev; diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index fa32c3e9c9d1..7efe6dbe4398 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -46,6 +46,7 @@ #define SCU634 0x634 /* Disable GPIO Internal Pull-Down #5 */ #define SCU638 0x638 /* Disable GPIO Internal Pull-Down #6 */ #define SCU694 0x694 /* Multi-function Pin Control #25 */ +#define SCU69C 0x69C /* Multi-function Pin Control #27 */ #define SCUC20 0xC20 /* PCIE configuration Setting Control */ #define ASPEED_G6_NR_PINS 256 @@ -819,11 +820,13 @@ FUNC_DECL_2(PWM14, PWM14G0, PWM14G1); #define Y23 127 SIG_EXPR_LIST_DECL_SEMG(Y23, PWM15, PWM15G1, PWM15, SIG_DESC_SET(SCU41C, 31)); SIG_EXPR_LIST_DECL_SESG(Y23, THRUOUT3, THRU3, SIG_DESC_SET(SCU4BC, 31)); -PIN_DECL_2(Y23, GPIOP7, PWM15, THRUOUT3); +SIG_EXPR_LIST_DECL_SESG(Y23, HEARTBEAT, HEARTBEAT, SIG_DESC_SET(SCU69C, 31)); +PIN_DECL_3(Y23, GPIOP7, PWM15, THRUOUT3, HEARTBEAT); GROUP_DECL(PWM15G1, Y23); FUNC_DECL_2(PWM15, PWM15G0, PWM15G1); FUNC_GROUP_DECL(THRU3, AB24, Y23); +FUNC_GROUP_DECL(HEARTBEAT, Y23); #define AA25 128 SSSF_PIN_DECL(AA25, GPIOQ0, TACH0, SIG_DESC_SET(SCU430, 0)); @@ -1920,6 +1923,7 @@ static const struct aspeed_pin_group aspeed_g6_groups[] = { ASPEED_PINCTRL_GROUP(GPIU5), ASPEED_PINCTRL_GROUP(GPIU6), ASPEED_PINCTRL_GROUP(GPIU7), + ASPEED_PINCTRL_GROUP(HEARTBEAT), ASPEED_PINCTRL_GROUP(HVI3C3), ASPEED_PINCTRL_GROUP(HVI3C4), ASPEED_PINCTRL_GROUP(I2C1), @@ -2158,6 +2162,7 @@ static const struct aspeed_pin_function aspeed_g6_functions[] = { ASPEED_PINCTRL_FUNC(GPIU5), ASPEED_PINCTRL_FUNC(GPIU6), ASPEED_PINCTRL_FUNC(GPIU7), + ASPEED_PINCTRL_FUNC(HEARTBEAT), ASPEED_PINCTRL_FUNC(I2C1), ASPEED_PINCTRL_FUNC(I2C10), ASPEED_PINCTRL_FUNC(I2C11), diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed.c b/drivers/pinctrl/aspeed/pinctrl-aspeed.c index b625a657171e..53f3f8aec695 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed.c @@ -76,6 +76,9 @@ static int aspeed_sig_expr_enable(struct aspeed_pinmux_data *ctx, { int ret; + pr_debug("Enabling signal %s for %s\n", expr->signal, + expr->function); + ret = aspeed_sig_expr_eval(ctx, expr, true); if (ret < 0) return ret; @@ -91,6 +94,9 @@ static int aspeed_sig_expr_disable(struct aspeed_pinmux_data *ctx, { int ret; + pr_debug("Disabling signal %s for %s\n", expr->signal, + expr->function); + ret = aspeed_sig_expr_eval(ctx, expr, true); if (ret < 0) return ret; @@ -229,7 +235,7 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, const struct aspeed_sig_expr **funcs; const struct aspeed_sig_expr ***prios; - pr_debug("Muxing pin %d for %s\n", pin, pfunc->name); + pr_debug("Muxing pin %s for %s\n", pdesc->name, pfunc->name); if (!pdesc) return -EINVAL; @@ -269,6 +275,9 @@ int aspeed_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, ret = aspeed_sig_expr_enable(&pdata->pinmux, expr); if (ret) return ret; + + pr_debug("Muxed pin %s as %s for %s\n", pdesc->name, expr->signal, + expr->function); } return 0; @@ -317,6 +326,8 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev, if (!prios) return -ENXIO; + pr_debug("Muxing pin %s for GPIO\n", pdesc->name); + /* Disable any functions of higher priority than GPIO */ while ((funcs = *prios)) { if (aspeed_gpio_in_exprs(funcs)) @@ -346,14 +357,22 @@ int aspeed_gpio_request_enable(struct pinctrl_dev *pctldev, * lowest-priority signal type. As such it has no associated * expression. */ - if (!expr) + if (!expr) { + pr_debug("Muxed pin %s as GPIO\n", pdesc->name); return 0; + } /* * If GPIO is not the lowest priority signal type, assume there is only * one expression defined to enable the GPIO function */ - return aspeed_sig_expr_enable(&pdata->pinmux, expr); + ret = aspeed_sig_expr_enable(&pdata->pinmux, expr); + if (ret) + return ret; + + pr_debug("Muxed pin %s as %s\n", pdesc->name, expr->signal); + + return 0; } int aspeed_pinctrl_probe(struct platform_device *pdev, diff --git a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c index 71e666178300..9ab1f427286a 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm281xx.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm281xx.c @@ -59,7 +59,7 @@ #define BCM281XX_HDMI_PIN_REG_MODE_MASK 0x0010 #define BCM281XX_HDMI_PIN_REG_MODE_SHIFT 4 -/** +/* * bcm281xx_pin_type - types of pin register */ enum bcm281xx_pin_type { @@ -73,7 +73,7 @@ static enum bcm281xx_pin_type std_pin = BCM281XX_PIN_TYPE_STD; static enum bcm281xx_pin_type i2c_pin = BCM281XX_PIN_TYPE_I2C; static enum bcm281xx_pin_type hdmi_pin = BCM281XX_PIN_TYPE_HDMI; -/** +/* * bcm281xx_pin_function- define pin function */ struct bcm281xx_pin_function { @@ -82,7 +82,7 @@ struct bcm281xx_pin_function { const unsigned ngroups; }; -/** +/* * bcm281xx_pinctrl_data - Broadcom-specific pinctrl data * @reg_base - base of pinctrl registers */ diff --git a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c index a38f0d5f47ce..e2bd2dce6bb4 100644 --- a/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-iproc-gpio.c @@ -131,7 +131,7 @@ static inline unsigned iproc_pin_to_gpio(unsigned pin) * iproc_set_bit - set or clear one bit (corresponding to the GPIO pin) in a * Iproc GPIO register * - * @iproc_gpio: Iproc GPIO device + * @chip: Iproc GPIO device * @reg: register offset * @gpio: GPIO pin * @set: set or clear diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c index bed0124388c0..a00a42a61a90 100644 --- a/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c +++ b/drivers/pinctrl/bcm/pinctrl-nsp-gpio.c @@ -154,15 +154,9 @@ static irqreturn_t nsp_gpio_irq_handler(int irq, void *data) level &= readl(chip->base + NSP_GPIO_INT_MASK); int_bits = level | event; - for_each_set_bit(bit, &int_bits, gc->ngpio) { - /* - * Clear the interrupt before invoking the - * handler, so we do not leave any window - */ - writel(BIT(bit), chip->base + NSP_GPIO_EVENT); + for_each_set_bit(bit, &int_bits, gc->ngpio) generic_handle_irq( irq_linear_revmap(gc->irq.domain, bit)); - } } return int_bits ? IRQ_HANDLED : IRQ_NONE; @@ -178,7 +172,7 @@ static void nsp_gpio_irq_ack(struct irq_data *d) trigger_type = irq_get_trigger_type(d->irq); if (trigger_type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) - nsp_set_bit(chip, REG, NSP_GPIO_EVENT, gpio, val); + writel(val, chip->base + NSP_GPIO_EVENT); } /* @@ -262,6 +256,12 @@ static int nsp_gpio_irq_set_type(struct irq_data *d, unsigned int type) nsp_set_bit(chip, REG, NSP_GPIO_EVENT_INT_POLARITY, gpio, falling); nsp_set_bit(chip, REG, NSP_GPIO_INT_POLARITY, gpio, level_low); + + if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else + irq_set_handler_locked(d, handle_level_irq); + raw_spin_unlock_irqrestore(&chip->lock, flags); dev_dbg(chip->dev, "gpio:%u level_low:%s falling:%s\n", gpio, @@ -691,7 +691,7 @@ static int nsp_gpio_probe(struct platform_device *pdev) girq->num_parents = 0; girq->parents = NULL; girq->default_type = IRQ_TYPE_NONE; - girq->handler = handle_simple_irq; + girq->handler = handle_bad_irq; } ret = devm_gpiochip_add_data(dev, gc, chip); diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 821242bb4b16..3663d87f51a0 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -27,6 +27,7 @@ #include <linux/pinctrl/machine.h> #ifdef CONFIG_GPIOLIB +#include "../gpio/gpiolib.h" #include <asm-generic/gpio.h> #endif @@ -161,7 +162,7 @@ int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name) /** * pin_get_name_from_id() - look up a pin name from a pin id * @pctldev: the pin control device to lookup the pin on - * @name: the name of the pin to look up + * @pin: pin number/id to look up */ const char *pin_get_name(struct pinctrl_dev *pctldev, const unsigned pin) { @@ -577,7 +578,7 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_get_group_pins); /** * pinctrl_generic_get_group() - returns a pin group based on the number * @pctldev: pin controller device - * @gselector: group number + * @selector: group number */ struct group_desc *pinctrl_generic_get_group(struct pinctrl_dev *pctldev, unsigned int selector) @@ -1329,7 +1330,7 @@ static void devm_pinctrl_release(struct device *dev, void *res) } /** - * struct devm_pinctrl_get() - Resource managed pinctrl_get() + * devm_pinctrl_get() - Resource managed pinctrl_get() * @dev: the device to obtain the handle for * * If there is a need to explicitly destroy the returned struct pinctrl, @@ -1451,7 +1452,7 @@ EXPORT_SYMBOL_GPL(pinctrl_register_mappings); /** * pinctrl_unregister_mappings() - unregister a set of pin controller mappings - * @maps: the pincontrol mappings table passed to pinctrl_register_mappings() + * @map: the pincontrol mappings table passed to pinctrl_register_mappings() * when registering the mappings. */ void pinctrl_unregister_mappings(const struct pinctrl_map *map) @@ -1601,6 +1602,9 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) struct pinctrl_dev *pctldev = s->private; const struct pinctrl_ops *ops = pctldev->desc->pctlops; unsigned i, pin; + struct pinctrl_gpio_range *range; + unsigned int gpio_num; + struct gpio_chip *chip; seq_printf(s, "registered pins: %d\n", pctldev->desc->npins); @@ -1618,6 +1622,23 @@ static int pinctrl_pins_show(struct seq_file *s, void *what) seq_printf(s, "pin %d (%s) ", pin, desc->name); +#ifdef CONFIG_GPIOLIB + gpio_num = 0; + list_for_each_entry(range, &pctldev->gpio_ranges, node) { + if ((pin >= range->pin_base) && + (pin < (range->pin_base + range->npins))) { + gpio_num = range->base + (pin - range->pin_base); + break; + } + } + chip = gpio_to_chip(gpio_num); + if (chip && chip->gpiodev && chip->gpiodev->base) + seq_printf(s, "%u:%s ", gpio_num - + chip->gpiodev->base, chip->label); + else + seq_puts(s, "0:? "); +#endif + /* Driver-specific info per pin */ if (ops->pin_dbg_show) ops->pin_dbg_show(pctldev, s, pin); @@ -2226,9 +2247,9 @@ EXPORT_SYMBOL_GPL(devm_pinctrl_register); * @dev: parent device for this pin controller * @pctldesc: descriptor for this pin controller * @driver_data: private pin controller data for this pin controller + * @pctldev: pin controller device * - * Returns an error pointer if pincontrol register failed. Otherwise - * it returns valid pinctrl handle. + * Returns zero on success or an error number on failure. * * The pinctrl device will be automatically released when the device is unbound. */ diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index c6fe7d64c913..5eff8c296552 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -17,7 +17,8 @@ * struct pinctrl_dt_map - mapping table chunk parsed from device tree * @node: list node for struct pinctrl's @dt_maps field * @pctldev: the pin controller that allocated this struct, and will free it - * @maps: the mapping table entries + * @map: the mapping table entries + * @num_maps: number of mapping table entries */ struct pinctrl_dt_map { struct list_head node; @@ -397,7 +398,7 @@ static int pinctrl_copy_args(const struct device_node *np, * @np: pointer to device node with the property * @list_name: property that contains the list * @index: index within the list - * @out_arts: entries in the list pointed by index + * @out_args: entries in the list pointed by index * * Finds the selected element in a pinctrl array consisting of an index * within the controller and a number of u32 entries specified for each diff --git a/drivers/pinctrl/freescale/Kconfig b/drivers/pinctrl/freescale/Kconfig index 4ca44dd69e53..08fcf5c79296 100644 --- a/drivers/pinctrl/freescale/Kconfig +++ b/drivers/pinctrl/freescale/Kconfig @@ -124,49 +124,49 @@ config PINCTRL_IMX7ULP Say Y here to enable the imx7ulp pinctrl driver config PINCTRL_IMX8MM - bool "IMX8MM pinctrl driver" + tristate "IMX8MM pinctrl driver" depends on ARCH_MXC select PINCTRL_IMX help Say Y here to enable the imx8mm pinctrl driver config PINCTRL_IMX8MN - bool "IMX8MN pinctrl driver" + tristate "IMX8MN pinctrl driver" depends on ARCH_MXC select PINCTRL_IMX help Say Y here to enable the imx8mn pinctrl driver config PINCTRL_IMX8MP - bool "IMX8MP pinctrl driver" + tristate "IMX8MP pinctrl driver" depends on ARCH_MXC select PINCTRL_IMX help Say Y here to enable the imx8mp pinctrl driver config PINCTRL_IMX8MQ - bool "IMX8MQ pinctrl driver" + tristate "IMX8MQ pinctrl driver" depends on ARCH_MXC select PINCTRL_IMX help Say Y here to enable the imx8mq pinctrl driver config PINCTRL_IMX8QM - bool "IMX8QM pinctrl driver" + tristate "IMX8QM pinctrl driver" depends on IMX_SCU && ARCH_MXC && ARM64 select PINCTRL_IMX_SCU help Say Y here to enable the imx8qm pinctrl driver config PINCTRL_IMX8QXP - bool "IMX8QXP pinctrl driver" + tristate "IMX8QXP pinctrl driver" depends on IMX_SCU && ARCH_MXC && ARM64 select PINCTRL_IMX_SCU help Say Y here to enable the imx8qxp pinctrl driver config PINCTRL_IMX8DXL - bool "IMX8DXL pinctrl driver" + tristate "IMX8DXL pinctrl driver" depends on IMX_SCU && ARCH_MXC && ARM64 select PINCTRL_IMX_SCU help diff --git a/drivers/pinctrl/freescale/pinctrl-imx.c b/drivers/pinctrl/freescale/pinctrl-imx.c index 1f81569c7ae3..507e4affcd73 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx.c +++ b/drivers/pinctrl/freescale/pinctrl-imx.c @@ -877,6 +877,7 @@ int imx_pinctrl_probe(struct platform_device *pdev, return pinctrl_enable(ipctl->pctl); } +EXPORT_SYMBOL_GPL(imx_pinctrl_probe); static int __maybe_unused imx_pinctrl_suspend(struct device *dev) { @@ -896,3 +897,4 @@ const struct dev_pm_ops imx_pinctrl_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(imx_pinctrl_suspend, imx_pinctrl_resume) }; +EXPORT_SYMBOL_GPL(imx_pinctrl_pm_ops); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c index 7f32e57b7f6a..12b97daa0407 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8dxl.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8dxl.c @@ -165,6 +165,7 @@ static const struct of_device_id imx8dxl_pinctrl_of_match[] = { { .compatible = "fsl,imx8dxl-iomuxc", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx8dxl_pinctrl_of_match); static int imx8dxl_pinctrl_probe(struct platform_device *pdev) { @@ -191,3 +192,7 @@ static int __init imx8dxl_pinctrl_init(void) return platform_driver_register(&imx8dxl_pinctrl_driver); } arch_initcall(imx8dxl_pinctrl_init); + +MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>"); +MODULE_DESCRIPTION("NXP i.MX8DXL pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mm.c b/drivers/pinctrl/freescale/pinctrl-imx8mm.c index 6d1038af59f4..31c5d8861406 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8mm.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8mm.c @@ -5,6 +5,7 @@ #include <linux/err.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include <linux/platform_device.h> @@ -326,6 +327,7 @@ static const struct of_device_id imx8mm_pinctrl_of_match[] = { { .compatible = "fsl,imx8mm-iomuxc", .data = &imx8mm_pinctrl_info, }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx8mm_pinctrl_of_match); static int imx8mm_pinctrl_probe(struct platform_device *pdev) { @@ -346,3 +348,7 @@ static int __init imx8mm_pinctrl_init(void) return platform_driver_register(&imx8mm_pinctrl_driver); } arch_initcall(imx8mm_pinctrl_init); + +MODULE_AUTHOR("Bai Ping <ping.bai@nxp.com>"); +MODULE_DESCRIPTION("NXP i.MX8MM pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mn.c b/drivers/pinctrl/freescale/pinctrl-imx8mn.c index 100ed8c1039a..14c9deb51fec 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8mn.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8mn.c @@ -5,6 +5,7 @@ #include <linux/err.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/pinctrl.h> #include <linux/platform_device.h> @@ -326,6 +327,7 @@ static const struct of_device_id imx8mn_pinctrl_of_match[] = { { .compatible = "fsl,imx8mn-iomuxc", .data = &imx8mn_pinctrl_info, }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx8mn_pinctrl_of_match); static int imx8mn_pinctrl_probe(struct platform_device *pdev) { @@ -346,3 +348,7 @@ static int __init imx8mn_pinctrl_init(void) return platform_driver_register(&imx8mn_pinctrl_driver); } arch_initcall(imx8mn_pinctrl_init); + +MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>"); +MODULE_DESCRIPTION("NXP i.MX8MN pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mp.c b/drivers/pinctrl/freescale/pinctrl-imx8mp.c index e3f644c2ec13..bf4bbb5e2446 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8mp.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8mp.c @@ -5,6 +5,7 @@ #include <linux/err.h> #include <linux/init.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/pinctrl.h> #include <linux/platform_device.h> @@ -324,6 +325,7 @@ static const struct of_device_id imx8mp_pinctrl_of_match[] = { { .compatible = "fsl,imx8mp-iomuxc", .data = &imx8mp_pinctrl_info, }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx8mp_pinctrl_of_match); static int imx8mp_pinctrl_probe(struct platform_device *pdev) { @@ -343,3 +345,7 @@ static int __init imx8mp_pinctrl_init(void) return platform_driver_register(&imx8mp_pinctrl_driver); } arch_initcall(imx8mp_pinctrl_init); + +MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>"); +MODULE_DESCRIPTION("NXP i.MX8MP pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8mq.c b/drivers/pinctrl/freescale/pinctrl-imx8mq.c index 50aa1c00c4b2..ae3ea5b5c204 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8mq.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8mq.c @@ -8,6 +8,7 @@ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> +#include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> @@ -329,6 +330,7 @@ static const struct of_device_id imx8mq_pinctrl_of_match[] = { { .compatible = "fsl,imx8mq-iomuxc", .data = &imx8mq_pinctrl_info, }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx8mq_pinctrl_of_match); static int imx8mq_pinctrl_probe(struct platform_device *pdev) { @@ -350,3 +352,7 @@ static int __init imx8mq_pinctrl_init(void) return platform_driver_register(&imx8mq_pinctrl_driver); } arch_initcall(imx8mq_pinctrl_init); + +MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>"); +MODULE_DESCRIPTION("NXP i.MX8MQ pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qm.c b/drivers/pinctrl/freescale/pinctrl-imx8qm.c index 0b6029b29731..095acf494641 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8qm.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8qm.c @@ -298,6 +298,7 @@ static const struct of_device_id imx8qm_pinctrl_of_match[] = { { .compatible = "fsl,imx8qm-iomuxc", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx8qm_pinctrl_of_match); static int imx8qm_pinctrl_probe(struct platform_device *pdev) { @@ -324,3 +325,7 @@ static int __init imx8qm_pinctrl_init(void) return platform_driver_register(&imx8qm_pinctrl_driver); } arch_initcall(imx8qm_pinctrl_init); + +MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>"); +MODULE_DESCRIPTION("NXP i.MX8QM pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c index 1131dc3c084e..81ebd4c952ec 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx8qxp.c +++ b/drivers/pinctrl/freescale/pinctrl-imx8qxp.c @@ -204,6 +204,7 @@ static const struct of_device_id imx8qxp_pinctrl_of_match[] = { { .compatible = "fsl,imx8qxp-iomuxc", }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx8qxp_pinctrl_of_match); static int imx8qxp_pinctrl_probe(struct platform_device *pdev) { @@ -230,3 +231,7 @@ static int __init imx8qxp_pinctrl_init(void) return platform_driver_register(&imx8qxp_pinctrl_driver); } arch_initcall(imx8qxp_pinctrl_init); + +MODULE_AUTHOR("Aisheng Dong <aisheng.dong@nxp.com>"); +MODULE_DESCRIPTION("NXP i.MX8QXP pinctrl driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/freescale/pinctrl-scu.c b/drivers/pinctrl/freescale/pinctrl-scu.c index 23cf04bdfc55..9df45d3e3226 100644 --- a/drivers/pinctrl/freescale/pinctrl-scu.c +++ b/drivers/pinctrl/freescale/pinctrl-scu.c @@ -41,6 +41,7 @@ int imx_pinctrl_sc_ipc_init(struct platform_device *pdev) { return imx_scu_get_handle(&pinctrl_ipc_handle); } +EXPORT_SYMBOL_GPL(imx_pinctrl_sc_ipc_init); int imx_pinconf_get_scu(struct pinctrl_dev *pctldev, unsigned pin_id, unsigned long *config) @@ -66,6 +67,7 @@ int imx_pinconf_get_scu(struct pinctrl_dev *pctldev, unsigned pin_id, return 0; } +EXPORT_SYMBOL_GPL(imx_pinconf_get_scu); int imx_pinconf_set_scu(struct pinctrl_dev *pctldev, unsigned pin_id, unsigned long *configs, unsigned num_configs) @@ -101,6 +103,7 @@ int imx_pinconf_set_scu(struct pinctrl_dev *pctldev, unsigned pin_id, return ret; } +EXPORT_SYMBOL_GPL(imx_pinconf_set_scu); void imx_pinctrl_parse_pin_scu(struct imx_pinctrl *ipctl, unsigned int *pin_id, struct imx_pin *pin, @@ -119,3 +122,4 @@ void imx_pinctrl_parse_pin_scu(struct imx_pinctrl *ipctl, dev_dbg(ipctl->dev, "%s: 0x%x 0x%08lx", info->pins[pin->pin].name, pin_scu->mux_mode, pin_scu->config); } +EXPORT_SYMBOL_GPL(imx_pinctrl_parse_pin_scu); diff --git a/drivers/pinctrl/intel/Kconfig b/drivers/pinctrl/intel/Kconfig index 787833e343a4..b3e6060db52d 100644 --- a/drivers/pinctrl/intel/Kconfig +++ b/drivers/pinctrl/intel/Kconfig @@ -95,6 +95,14 @@ config PINCTRL_DENVERTON This pinctrl driver provides an interface that allows configuring of Intel Denverton SoC pins and using them as GPIOs. +config PINCTRL_EMMITSBURG + tristate "Intel Emmitsburg pinctrl and GPIO driver" + depends on ACPI + select PINCTRL_INTEL + help + This pinctrl driver provides an interface that allows configuring + of Intel Emmitsburg pins and using them as GPIOs. + config PINCTRL_GEMINILAKE tristate "Intel Gemini Lake SoC pinctrl and GPIO driver" depends on ACPI diff --git a/drivers/pinctrl/intel/Makefile b/drivers/pinctrl/intel/Makefile index f6f63eb8100f..1c1c316f98b9 100644 --- a/drivers/pinctrl/intel/Makefile +++ b/drivers/pinctrl/intel/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PINCTRL_BROXTON) += pinctrl-broxton.o obj-$(CONFIG_PINCTRL_CANNONLAKE) += pinctrl-cannonlake.o obj-$(CONFIG_PINCTRL_CEDARFORK) += pinctrl-cedarfork.o obj-$(CONFIG_PINCTRL_DENVERTON) += pinctrl-denverton.o +obj-$(CONFIG_PINCTRL_EMMITSBURG) += pinctrl-emmitsburg.o obj-$(CONFIG_PINCTRL_GEMINILAKE) += pinctrl-geminilake.o obj-$(CONFIG_PINCTRL_ICELAKE) += pinctrl-icelake.o obj-$(CONFIG_PINCTRL_JASPERLAKE) += pinctrl-jasperlake.o diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c index 615174a9d1e0..d6e35cba3065 100644 --- a/drivers/pinctrl/intel/pinctrl-baytrail.c +++ b/drivers/pinctrl/intel/pinctrl-baytrail.c @@ -1372,13 +1372,13 @@ static void byt_irq_unmask(struct irq_data *d) switch (irqd_get_trigger_type(d)) { case IRQ_TYPE_LEVEL_HIGH: value |= BYT_TRIG_LVL; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_RISING: value |= BYT_TRIG_POS; break; case IRQ_TYPE_LEVEL_LOW: value |= BYT_TRIG_LVL; - /* fall through */ + fallthrough; case IRQ_TYPE_EDGE_FALLING: value |= BYT_TRIG_NEG; break; @@ -1796,9 +1796,8 @@ static struct platform_driver byt_gpio_driver = { .driver = { .name = "byt_gpio", .pm = &byt_gpio_pm_ops, + .acpi_match_table = byt_gpio_acpi_match, .suppress_bind_attrs = true, - - .acpi_match_table = ACPI_PTR(byt_gpio_acpi_match), }, }; diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 8e3953a223d0..9ef246145bde 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -2,7 +2,7 @@ /* * Cherryview/Braswell pinctrl driver * - * Copyright (C) 2014, Intel Corporation + * Copyright (C) 2014, 2020 Intel Corporation * Author: Mika Westerberg <mika.westerberg@linux.intel.com> * * This driver is based on the original Cherryview GPIO driver by @@ -67,35 +67,7 @@ #define CHV_PADCTRL1_INTWAKECFG_BOTH 3 #define CHV_PADCTRL1_INTWAKECFG_LEVEL 4 -/** - * struct chv_community - A community specific configuration - * @uid: ACPI _UID used to match the community - * @pins: All pins in this community - * @npins: Number of pins - * @groups: All groups in this community - * @ngroups: Number of groups - * @functions: All functions in this community - * @nfunctions: Number of functions - * @gpps: Pad groups - * @ngpps: Number of pad groups in this community - * @nirqs: Total number of IRQs this community can generate - * @acpi_space_id: An address space ID for ACPI OpRegion handler - */ -struct chv_community { - const char *uid; - const struct pinctrl_pin_desc *pins; - size_t npins; - const struct intel_pingroup *groups; - size_t ngroups; - const struct intel_function *functions; - size_t nfunctions; - const struct intel_padgroup *gpps; - size_t ngpps; - size_t nirqs; - acpi_adr_space_type acpi_space_id; -}; - -struct chv_pin_context { +struct intel_pad_context { u32 padctrl0; u32 padctrl1; }; @@ -107,13 +79,13 @@ struct chv_pin_context { * @pctldev: Pointer to the pin controller device * @chip: GPIO chip in this pin controller * @irqchip: IRQ chip in this pin controller - * @regs: MMIO registers + * @soc: Community specific pin configuration data + * @communities: All communities in this pin controller + * @ncommunities: Number of communities in this pin controller + * @context: Configuration saved over system sleep * @irq: Our parent irq - * @intr_lines: Stores mapping between 16 HW interrupt wires and GPIO - * offset (in GPIO number space) - * @community: Community this pinctrl instance represents + * @intr_lines: Mapping between 16 HW interrupt wires and GPIO offset (in GPIO number space) * @saved_intmask: Interrupt mask saved for system sleep - * @saved_pin_context: Pointer to a context of the pins saved for system sleep * * The first group in @groups is expected to contain all pins that can be * used as GPIOs. @@ -124,24 +96,34 @@ struct chv_pinctrl { struct pinctrl_dev *pctldev; struct gpio_chip chip; struct irq_chip irqchip; - void __iomem *regs; - unsigned int irq; + const struct intel_pinctrl_soc_data *soc; + struct intel_community *communities; + size_t ncommunities; + struct intel_pinctrl_context context; + int irq; + unsigned int intr_lines[16]; - const struct chv_community *community; u32 saved_intmask; - struct chv_pin_context *saved_pin_context; }; #define PINMODE_INVERT_OE BIT(15) #define PINMODE(m, i) ((m) | ((i) * PINMODE_INVERT_OE)) -#define CHV_GPP(start, end) \ +#define CHV_GPP(start, end) \ { \ .base = (start), \ .size = (end) - (start) + 1, \ } +#define CHV_COMMUNITY(g, i, a) \ + { \ + .gpps = (g), \ + .ngpps = ARRAY_SIZE(g), \ + .nirqs = (i), \ + .acpi_space_id = (a), \ + } + static const struct pinctrl_pin_desc southwest_pins[] = { PINCTRL_PIN(0, "FST_SPI_D2"), PINCTRL_PIN(1, "FST_SPI_D0"), @@ -303,7 +285,15 @@ static const struct intel_padgroup southwest_gpps[] = { CHV_GPP(90, 97), }; -static const struct chv_community southwest_community = { +/* + * Southwest community can generate GPIO interrupts only for the first 8 + * interrupts. The upper half (8-15) can only be used to trigger GPEs. + */ +static const struct intel_community southwest_communities[] = { + CHV_COMMUNITY(southwest_gpps, 8, 0x91), +}; + +static const struct intel_pinctrl_soc_data southwest_soc_data = { .uid = "1", .pins = southwest_pins, .npins = ARRAY_SIZE(southwest_pins), @@ -311,15 +301,8 @@ static const struct chv_community southwest_community = { .ngroups = ARRAY_SIZE(southwest_groups), .functions = southwest_functions, .nfunctions = ARRAY_SIZE(southwest_functions), - .gpps = southwest_gpps, - .ngpps = ARRAY_SIZE(southwest_gpps), - /* - * Southwest community can generate GPIO interrupts only for the - * first 8 interrupts. The upper half (8-15) can only be used to - * trigger GPEs. - */ - .nirqs = 8, - .acpi_space_id = 0x91, + .communities = southwest_communities, + .ncommunities = ARRAY_SIZE(southwest_communities), }; static const struct pinctrl_pin_desc north_pins[] = { @@ -396,19 +379,20 @@ static const struct intel_padgroup north_gpps[] = { CHV_GPP(60, 72), }; -static const struct chv_community north_community = { +/* + * North community can generate GPIO interrupts only for the first 8 + * interrupts. The upper half (8-15) can only be used to trigger GPEs. + */ +static const struct intel_community north_communities[] = { + CHV_COMMUNITY(north_gpps, 8, 0x92), +}; + +static const struct intel_pinctrl_soc_data north_soc_data = { .uid = "2", .pins = north_pins, .npins = ARRAY_SIZE(north_pins), - .gpps = north_gpps, - .ngpps = ARRAY_SIZE(north_gpps), - /* - * North community can generate GPIO interrupts only for the first - * 8 interrupts. The upper half (8-15) can only be used to trigger - * GPEs. - */ - .nirqs = 8, - .acpi_space_id = 0x92, + .communities = north_communities, + .ncommunities = ARRAY_SIZE(north_communities), }; static const struct pinctrl_pin_desc east_pins[] = { @@ -444,14 +428,16 @@ static const struct intel_padgroup east_gpps[] = { CHV_GPP(15, 26), }; -static const struct chv_community east_community = { +static const struct intel_community east_communities[] = { + CHV_COMMUNITY(east_gpps, 16, 0x93), +}; + +static const struct intel_pinctrl_soc_data east_soc_data = { .uid = "3", .pins = east_pins, .npins = ARRAY_SIZE(east_pins), - .gpps = east_gpps, - .ngpps = ARRAY_SIZE(east_gpps), - .nirqs = 16, - .acpi_space_id = 0x93, + .communities = east_communities, + .ncommunities = ARRAY_SIZE(east_communities), }; static const struct pinctrl_pin_desc southeast_pins[] = { @@ -566,7 +552,11 @@ static const struct intel_padgroup southeast_gpps[] = { CHV_GPP(75, 85), }; -static const struct chv_community southeast_community = { +static const struct intel_community southeast_communities[] = { + CHV_COMMUNITY(southeast_gpps, 16, 0x94), +}; + +static const struct intel_pinctrl_soc_data southeast_soc_data = { .uid = "4", .pins = southeast_pins, .npins = ARRAY_SIZE(southeast_pins), @@ -574,17 +564,16 @@ static const struct chv_community southeast_community = { .ngroups = ARRAY_SIZE(southeast_groups), .functions = southeast_functions, .nfunctions = ARRAY_SIZE(southeast_functions), - .gpps = southeast_gpps, - .ngpps = ARRAY_SIZE(southeast_gpps), - .nirqs = 16, - .acpi_space_id = 0x94, + .communities = southeast_communities, + .ncommunities = ARRAY_SIZE(southeast_communities), }; -static const struct chv_community *chv_communities[] = { - &southwest_community, - &north_community, - &east_community, - &southeast_community, +static const struct intel_pinctrl_soc_data *chv_soc_data[] = { + &southwest_soc_data, + &north_soc_data, + &east_soc_data, + &southeast_soc_data, + NULL }; /* @@ -598,39 +587,60 @@ static const struct chv_community *chv_communities[] = { */ static DEFINE_RAW_SPINLOCK(chv_lock); +static u32 chv_pctrl_readl(struct chv_pinctrl *pctrl, unsigned int offset) +{ + const struct intel_community *community = &pctrl->communities[0]; + + return readl(community->regs + offset); +} + +static void chv_pctrl_writel(struct chv_pinctrl *pctrl, unsigned int offset, u32 value) +{ + const struct intel_community *community = &pctrl->communities[0]; + void __iomem *reg = community->regs + offset; + + /* Write and simple read back to confirm the bus transferring done */ + writel(value, reg); + readl(reg); +} + static void __iomem *chv_padreg(struct chv_pinctrl *pctrl, unsigned int offset, unsigned int reg) { + const struct intel_community *community = &pctrl->communities[0]; unsigned int family_no = offset / MAX_FAMILY_PAD_GPIO_NO; unsigned int pad_no = offset % MAX_FAMILY_PAD_GPIO_NO; - offset = FAMILY_PAD_REGS_OFF + FAMILY_PAD_REGS_SIZE * family_no + - GPIO_REGS_SIZE * pad_no; + offset = FAMILY_PAD_REGS_SIZE * family_no + GPIO_REGS_SIZE * pad_no; - return pctrl->regs + offset + reg; + return community->pad_regs + offset + reg; } -static void chv_writel(u32 value, void __iomem *reg) +static u32 chv_readl(struct chv_pinctrl *pctrl, unsigned int pin, unsigned int offset) { + return readl(chv_padreg(pctrl, pin, offset)); +} + +static void chv_writel(struct chv_pinctrl *pctrl, unsigned int pin, unsigned int offset, u32 value) +{ + void __iomem *reg = chv_padreg(pctrl, pin, offset); + + /* Write and simple read back to confirm the bus transferring done */ writel(value, reg); - /* simple readback to confirm the bus transferring done */ readl(reg); } /* When Pad Cfg is locked, driver can only change GPIOTXState or GPIORXState */ static bool chv_pad_locked(struct chv_pinctrl *pctrl, unsigned int offset) { - void __iomem *reg; - - reg = chv_padreg(pctrl, offset, CHV_PADCTRL1); - return readl(reg) & CHV_PADCTRL1_CFGLOCK; + return chv_readl(pctrl, offset, CHV_PADCTRL1) & CHV_PADCTRL1_CFGLOCK; } static int chv_get_groups_count(struct pinctrl_dev *pctldev) { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - return pctrl->community->ngroups; + return pctrl->soc->ngroups; } static const char *chv_get_group_name(struct pinctrl_dev *pctldev, @@ -638,7 +648,7 @@ static const char *chv_get_group_name(struct pinctrl_dev *pctldev, { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - return pctrl->community->groups[group].name; + return pctrl->soc->groups[group].name; } static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, @@ -646,8 +656,8 @@ static int chv_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - *pins = pctrl->community->groups[group].pins; - *npins = pctrl->community->groups[group].npins; + *pins = pctrl->soc->groups[group].pins; + *npins = pctrl->soc->groups[group].npins; return 0; } @@ -661,8 +671,8 @@ static void chv_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, raw_spin_lock_irqsave(&chv_lock, flags); - ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); - ctrl1 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL1)); + ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0); + ctrl1 = chv_readl(pctrl, offset, CHV_PADCTRL1); locked = chv_pad_locked(pctrl, offset); raw_spin_unlock_irqrestore(&chv_lock, flags); @@ -695,7 +705,7 @@ static int chv_get_functions_count(struct pinctrl_dev *pctldev) { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - return pctrl->community->nfunctions; + return pctrl->soc->nfunctions; } static const char *chv_get_function_name(struct pinctrl_dev *pctldev, @@ -703,7 +713,7 @@ static const char *chv_get_function_name(struct pinctrl_dev *pctldev, { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - return pctrl->community->functions[function].name; + return pctrl->soc->functions[function].name; } static int chv_get_function_groups(struct pinctrl_dev *pctldev, @@ -713,8 +723,8 @@ static int chv_get_function_groups(struct pinctrl_dev *pctldev, { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - *groups = pctrl->community->functions[function].groups; - *ngroups = pctrl->community->functions[function].ngroups; + *groups = pctrl->soc->functions[function].groups; + *ngroups = pctrl->soc->functions[function].ngroups; return 0; } @@ -726,7 +736,7 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned long flags; int i; - grp = &pctrl->community->groups[group]; + grp = &pctrl->soc->groups[group]; raw_spin_lock_irqsave(&chv_lock, flags); @@ -742,7 +752,6 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, for (i = 0; i < grp->npins; i++) { int pin = grp->pins[i]; - void __iomem *reg; unsigned int mode; bool invert_oe; u32 value; @@ -757,21 +766,19 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, invert_oe = mode & PINMODE_INVERT_OE; mode &= ~PINMODE_INVERT_OE; - reg = chv_padreg(pctrl, pin, CHV_PADCTRL0); - value = readl(reg); + value = chv_readl(pctrl, pin, CHV_PADCTRL0); /* Disable GPIO mode */ value &= ~CHV_PADCTRL0_GPIOEN; /* Set to desired mode */ value &= ~CHV_PADCTRL0_PMODE_MASK; value |= mode << CHV_PADCTRL0_PMODE_SHIFT; - chv_writel(value, reg); + chv_writel(pctrl, pin, CHV_PADCTRL0, value); /* Update for invert_oe */ - reg = chv_padreg(pctrl, pin, CHV_PADCTRL1); - value = readl(reg) & ~CHV_PADCTRL1_INVRXTX_MASK; + value = chv_readl(pctrl, pin, CHV_PADCTRL1) & ~CHV_PADCTRL1_INVRXTX_MASK; if (invert_oe) value |= CHV_PADCTRL1_INVRXTX_TXENABLE; - chv_writel(value, reg); + chv_writel(pctrl, pin, CHV_PADCTRL1, value); dev_dbg(pctrl->dev, "configured pin %u mode %u OE %sinverted\n", pin, mode, invert_oe ? "" : "not "); @@ -785,14 +792,12 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, static void chv_gpio_clear_triggering(struct chv_pinctrl *pctrl, unsigned int offset) { - void __iomem *reg; u32 value; - reg = chv_padreg(pctrl, offset, CHV_PADCTRL1); - value = readl(reg); + value = chv_readl(pctrl, offset, CHV_PADCTRL1); value &= ~CHV_PADCTRL1_INTWAKECFG_MASK; value &= ~CHV_PADCTRL1_INVRXTX_MASK; - chv_writel(value, reg); + chv_writel(pctrl, offset, CHV_PADCTRL1, value); } static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, @@ -801,13 +806,12 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); unsigned long flags; - void __iomem *reg; u32 value; raw_spin_lock_irqsave(&chv_lock, flags); if (chv_pad_locked(pctrl, offset)) { - value = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); + value = chv_readl(pctrl, offset, CHV_PADCTRL0); if (!(value & CHV_PADCTRL0_GPIOEN)) { /* Locked so cannot enable */ raw_spin_unlock_irqrestore(&chv_lock, flags); @@ -827,8 +831,7 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, /* Disable interrupt generation */ chv_gpio_clear_triggering(pctrl, offset); - reg = chv_padreg(pctrl, offset, CHV_PADCTRL0); - value = readl(reg); + value = chv_readl(pctrl, offset, CHV_PADCTRL0); /* * If the pin is in HiZ mode (both TX and RX buffers are @@ -837,13 +840,12 @@ static int chv_gpio_request_enable(struct pinctrl_dev *pctldev, if ((value & CHV_PADCTRL0_GPIOCFG_MASK) == (CHV_PADCTRL0_GPIOCFG_HIZ << CHV_PADCTRL0_GPIOCFG_SHIFT)) { value &= ~CHV_PADCTRL0_GPIOCFG_MASK; - value |= CHV_PADCTRL0_GPIOCFG_GPI << - CHV_PADCTRL0_GPIOCFG_SHIFT; + value |= CHV_PADCTRL0_GPIOCFG_GPI << CHV_PADCTRL0_GPIOCFG_SHIFT; } /* Switch to a GPIO mode */ value |= CHV_PADCTRL0_GPIOEN; - chv_writel(value, reg); + chv_writel(pctrl, offset, CHV_PADCTRL0, value); } raw_spin_unlock_irqrestore(&chv_lock, flags); @@ -871,18 +873,17 @@ static int chv_gpio_set_direction(struct pinctrl_dev *pctldev, unsigned int offset, bool input) { struct chv_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - void __iomem *reg = chv_padreg(pctrl, offset, CHV_PADCTRL0); unsigned long flags; u32 ctrl0; raw_spin_lock_irqsave(&chv_lock, flags); - ctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIOCFG_MASK; + ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0) & ~CHV_PADCTRL0_GPIOCFG_MASK; if (input) ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPI << CHV_PADCTRL0_GPIOCFG_SHIFT; else ctrl0 |= CHV_PADCTRL0_GPIOCFG_GPO << CHV_PADCTRL0_GPIOCFG_SHIFT; - chv_writel(ctrl0, reg); + chv_writel(pctrl, offset, CHV_PADCTRL0, ctrl0); raw_spin_unlock_irqrestore(&chv_lock, flags); @@ -910,8 +911,8 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin, u32 term; raw_spin_lock_irqsave(&chv_lock, flags); - ctrl0 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); - ctrl1 = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); + ctrl0 = chv_readl(pctrl, pin, CHV_PADCTRL0); + ctrl1 = chv_readl(pctrl, pin, CHV_PADCTRL1); raw_spin_unlock_irqrestore(&chv_lock, flags); term = (ctrl0 & CHV_PADCTRL0_TERM_MASK) >> CHV_PADCTRL0_TERM_SHIFT; @@ -982,12 +983,11 @@ static int chv_config_get(struct pinctrl_dev *pctldev, unsigned int pin, static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin, enum pin_config_param param, u32 arg) { - void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL0); unsigned long flags; u32 ctrl0, pull; raw_spin_lock_irqsave(&chv_lock, flags); - ctrl0 = readl(reg); + ctrl0 = chv_readl(pctrl, pin, CHV_PADCTRL0); switch (param) { case PIN_CONFIG_BIAS_DISABLE: @@ -1039,7 +1039,7 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin, return -EINVAL; } - chv_writel(ctrl0, reg); + chv_writel(pctrl, pin, CHV_PADCTRL0, ctrl0); raw_spin_unlock_irqrestore(&chv_lock, flags); return 0; @@ -1048,19 +1048,18 @@ static int chv_config_set_pull(struct chv_pinctrl *pctrl, unsigned int pin, static int chv_config_set_oden(struct chv_pinctrl *pctrl, unsigned int pin, bool enable) { - void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1); unsigned long flags; u32 ctrl1; raw_spin_lock_irqsave(&chv_lock, flags); - ctrl1 = readl(reg); + ctrl1 = chv_readl(pctrl, pin, CHV_PADCTRL1); if (enable) ctrl1 |= CHV_PADCTRL1_ODEN; else ctrl1 &= ~CHV_PADCTRL1_ODEN; - chv_writel(ctrl1, reg); + chv_writel(pctrl, pin, CHV_PADCTRL1, ctrl1); raw_spin_unlock_irqrestore(&chv_lock, flags); return 0; @@ -1175,7 +1174,7 @@ static int chv_gpio_get(struct gpio_chip *chip, unsigned int offset) u32 ctrl0, cfg; raw_spin_lock_irqsave(&chv_lock, flags); - ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); + ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0); raw_spin_unlock_irqrestore(&chv_lock, flags); cfg = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; @@ -1190,20 +1189,18 @@ static void chv_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct chv_pinctrl *pctrl = gpiochip_get_data(chip); unsigned long flags; - void __iomem *reg; u32 ctrl0; raw_spin_lock_irqsave(&chv_lock, flags); - reg = chv_padreg(pctrl, offset, CHV_PADCTRL0); - ctrl0 = readl(reg); + ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0); if (value) ctrl0 |= CHV_PADCTRL0_GPIOTXSTATE; else ctrl0 &= ~CHV_PADCTRL0_GPIOTXSTATE; - chv_writel(ctrl0, reg); + chv_writel(pctrl, offset, CHV_PADCTRL0, ctrl0); raw_spin_unlock_irqrestore(&chv_lock, flags); } @@ -1215,7 +1212,7 @@ static int chv_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) unsigned long flags; raw_spin_lock_irqsave(&chv_lock, flags); - ctrl0 = readl(chv_padreg(pctrl, offset, CHV_PADCTRL0)); + ctrl0 = chv_readl(pctrl, offset, CHV_PADCTRL0); raw_spin_unlock_irqrestore(&chv_lock, flags); direction = ctrl0 & CHV_PADCTRL0_GPIOCFG_MASK; @@ -1259,10 +1256,10 @@ static void chv_gpio_irq_ack(struct irq_data *d) raw_spin_lock(&chv_lock); - intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0); intr_line &= CHV_PADCTRL0_INTSEL_MASK; intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT; - chv_writel(BIT(intr_line), pctrl->regs + CHV_INTSTAT); + chv_pctrl_writel(pctrl, CHV_INTSTAT, BIT(intr_line)); raw_spin_unlock(&chv_lock); } @@ -1277,16 +1274,16 @@ static void chv_gpio_irq_mask_unmask(struct irq_data *d, bool mask) raw_spin_lock_irqsave(&chv_lock, flags); - intr_line = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intr_line = chv_readl(pctrl, pin, CHV_PADCTRL0); intr_line &= CHV_PADCTRL0_INTSEL_MASK; intr_line >>= CHV_PADCTRL0_INTSEL_SHIFT; - value = readl(pctrl->regs + CHV_INTMASK); + value = chv_pctrl_readl(pctrl, CHV_INTMASK); if (mask) value &= ~BIT(intr_line); else value |= BIT(intr_line); - chv_writel(value, pctrl->regs + CHV_INTMASK); + chv_pctrl_writel(pctrl, CHV_INTMASK, value); raw_spin_unlock_irqrestore(&chv_lock, flags); } @@ -1322,11 +1319,11 @@ static unsigned chv_gpio_irq_startup(struct irq_data *d) u32 intsel, value; raw_spin_lock_irqsave(&chv_lock, flags); - intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + intsel = chv_readl(pctrl, pin, CHV_PADCTRL0); intsel &= CHV_PADCTRL0_INTSEL_MASK; intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; - value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1)); + value = chv_readl(pctrl, pin, CHV_PADCTRL1); if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL) handler = handle_level_irq; else @@ -1367,9 +1364,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type) * Driver programs the IntWakeCfg bits and save the mapping. */ if (!chv_pad_locked(pctrl, pin)) { - void __iomem *reg = chv_padreg(pctrl, pin, CHV_PADCTRL1); - - value = readl(reg); + value = chv_readl(pctrl, pin, CHV_PADCTRL1); value &= ~CHV_PADCTRL1_INTWAKECFG_MASK; value &= ~CHV_PADCTRL1_INVRXTX_MASK; @@ -1386,10 +1381,10 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned int type) value |= CHV_PADCTRL1_INVRXTX_RXDATA; } - chv_writel(value, reg); + chv_writel(pctrl, pin, CHV_PADCTRL1, value); } - value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0)); + value = chv_readl(pctrl, pin, CHV_PADCTRL0); value &= CHV_PADCTRL0_INTSEL_MASK; value >>= CHV_PADCTRL0_INTSEL_SHIFT; @@ -1409,6 +1404,7 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) { struct gpio_chip *gc = irq_desc_get_handler_data(desc); struct chv_pinctrl *pctrl = gpiochip_get_data(gc); + const struct intel_community *community = &pctrl->communities[0]; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned long pending; unsigned long flags; @@ -1417,10 +1413,10 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) chained_irq_enter(chip, desc); raw_spin_lock_irqsave(&chv_lock, flags); - pending = readl(pctrl->regs + CHV_INTSTAT); + pending = chv_pctrl_readl(pctrl, CHV_INTSTAT); raw_spin_unlock_irqrestore(&chv_lock, flags); - for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) { + for_each_set_bit(intr_line, &pending, community->nirqs) { unsigned int irq, offset; offset = pctrl->intr_lines[intr_line]; @@ -1477,17 +1473,17 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip, unsigned int ngpios) { struct chv_pinctrl *pctrl = gpiochip_get_data(chip); - const struct chv_community *community = pctrl->community; + const struct intel_community *community = &pctrl->communities[0]; int i; /* Do not add GPIOs that can only generate GPEs to the IRQ domain */ - for (i = 0; i < community->npins; i++) { + for (i = 0; i < pctrl->soc->npins; i++) { const struct pinctrl_pin_desc *desc; u32 intsel; - desc = &community->pins[i]; + desc = &pctrl->soc->pins[i]; - intsel = readl(chv_padreg(pctrl, desc->number, CHV_PADCTRL0)); + intsel = chv_readl(pctrl, desc->number, CHV_PADCTRL0); intsel &= CHV_PADCTRL0_INTSEL_MASK; intsel >>= CHV_PADCTRL0_INTSEL_SHIFT; @@ -1499,6 +1495,7 @@ static void chv_init_irq_valid_mask(struct gpio_chip *chip, static int chv_gpio_irq_init_hw(struct gpio_chip *chip) { struct chv_pinctrl *pctrl = gpiochip_get_data(chip); + const struct intel_community *community = &pctrl->communities[0]; /* * The same set of machines in chv_no_valid_mask[] have incorrectly @@ -1512,12 +1509,11 @@ static int chv_gpio_irq_init_hw(struct gpio_chip *chip) * Mask all interrupts the community is able to generate * but leave the ones that can only generate GPEs unmasked. */ - chv_writel(GENMASK(31, pctrl->community->nirqs), - pctrl->regs + CHV_INTMASK); + chv_pctrl_writel(pctrl, CHV_INTMASK, GENMASK(31, community->nirqs)); } /* Clear all interrupts */ - chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); + chv_pctrl_writel(pctrl, CHV_INTSTAT, 0xffff); return 0; } @@ -1525,7 +1521,7 @@ static int chv_gpio_irq_init_hw(struct gpio_chip *chip) static int chv_gpio_add_pin_ranges(struct gpio_chip *chip) { struct chv_pinctrl *pctrl = gpiochip_get_data(chip); - const struct chv_community *community = pctrl->community; + const struct intel_community *community = &pctrl->communities[0]; const struct intel_padgroup *gpp; int ret, i; @@ -1545,15 +1541,15 @@ static int chv_gpio_add_pin_ranges(struct gpio_chip *chip) static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) { + const struct intel_community *community = &pctrl->communities[0]; const struct intel_padgroup *gpp; struct gpio_chip *chip = &pctrl->chip; bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); - const struct chv_community *community = pctrl->community; int ret, i, irq_base; *chip = chv_gpio_chip; - chip->ngpio = community->pins[community->npins - 1].number + 1; + chip->ngpio = pctrl->soc->pins[pctrl->soc->npins - 1].number + 1; chip->label = dev_name(pctrl->dev); chip->add_pin_ranges = chv_gpio_add_pin_ranges; chip->parent = pctrl->dev; @@ -1579,7 +1575,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) chip->irq.init_valid_mask = chv_init_irq_valid_mask; } else { irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, - community->npins, NUMA_NO_NODE); + pctrl->soc->npins, NUMA_NO_NODE); if (irq_base < 0) { dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); return irq_base; @@ -1616,9 +1612,9 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function, raw_spin_lock_irqsave(&chv_lock, flags); if (function == ACPI_WRITE) - chv_writel((u32)(*value), pctrl->regs + (u32)address); + chv_pctrl_writel(pctrl, address, *value); else if (function == ACPI_READ) - *value = readl(pctrl->regs + (u32)address); + *value = chv_pctrl_readl(pctrl, address); else ret = AE_BAD_PARAMETER; @@ -1629,6 +1625,10 @@ static acpi_status chv_pinctrl_mmio_access_handler(u32 function, static int chv_pinctrl_probe(struct platform_device *pdev) { + const struct intel_pinctrl_soc_data *soc_data = NULL; + const struct intel_pinctrl_soc_data **soc_table; + struct intel_community *community; + struct device *dev = &pdev->dev; struct chv_pinctrl *pctrl; struct acpi_device *adev; acpi_status status; @@ -1638,40 +1638,53 @@ static int chv_pinctrl_probe(struct platform_device *pdev) if (!adev) return -ENODEV; - pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); - if (!pctrl) - return -ENOMEM; - - for (i = 0; i < ARRAY_SIZE(chv_communities); i++) - if (!strcmp(adev->pnp.unique_id, chv_communities[i]->uid)) { - pctrl->community = chv_communities[i]; + soc_table = (const struct intel_pinctrl_soc_data **)device_get_match_data(dev); + for (i = 0; soc_table[i]; i++) { + if (!strcmp(adev->pnp.unique_id, soc_table[i]->uid)) { + soc_data = soc_table[i]; break; } - if (i == ARRAY_SIZE(chv_communities)) + } + if (!soc_data) return -ENODEV; + pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + pctrl->dev = &pdev->dev; + pctrl->soc = soc_data; + + pctrl->ncommunities = pctrl->soc->ncommunities; + pctrl->communities = devm_kmemdup(dev, pctrl->soc->communities, + pctrl->ncommunities * sizeof(*pctrl->communities), + GFP_KERNEL); + if (!pctrl->communities) + return -ENOMEM; + + community = &pctrl->communities[0]; + community->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(community->regs)) + return PTR_ERR(community->regs); + + community->pad_regs = community->regs + FAMILY_PAD_REGS_OFF; #ifdef CONFIG_PM_SLEEP - pctrl->saved_pin_context = devm_kcalloc(pctrl->dev, - pctrl->community->npins, sizeof(*pctrl->saved_pin_context), - GFP_KERNEL); - if (!pctrl->saved_pin_context) + pctrl->context.pads = devm_kcalloc(dev, pctrl->soc->npins, + sizeof(*pctrl->context.pads), + GFP_KERNEL); + if (!pctrl->context.pads) return -ENOMEM; #endif - pctrl->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(pctrl->regs)) - return PTR_ERR(pctrl->regs); - irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; pctrl->pctldesc = chv_pinctrl_desc; pctrl->pctldesc.name = dev_name(&pdev->dev); - pctrl->pctldesc.pins = pctrl->community->pins; - pctrl->pctldesc.npins = pctrl->community->npins; + pctrl->pctldesc.pins = pctrl->soc->pins; + pctrl->pctldesc.npins = pctrl->soc->npins; pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc, pctrl); @@ -1685,7 +1698,7 @@ static int chv_pinctrl_probe(struct platform_device *pdev) return ret; status = acpi_install_address_space_handler(adev->handle, - pctrl->community->acpi_space_id, + community->acpi_space_id, chv_pinctrl_mmio_access_handler, NULL, pctrl); if (ACPI_FAILURE(status)) @@ -1699,9 +1712,10 @@ static int chv_pinctrl_probe(struct platform_device *pdev) static int chv_pinctrl_remove(struct platform_device *pdev) { struct chv_pinctrl *pctrl = platform_get_drvdata(pdev); + const struct intel_community *community = &pctrl->communities[0]; acpi_remove_address_space_handler(ACPI_COMPANION(&pdev->dev), - pctrl->community->acpi_space_id, + community->acpi_space_id, chv_pinctrl_mmio_access_handler); return 0; @@ -1716,24 +1730,20 @@ static int chv_pinctrl_suspend_noirq(struct device *dev) raw_spin_lock_irqsave(&chv_lock, flags); - pctrl->saved_intmask = readl(pctrl->regs + CHV_INTMASK); + pctrl->saved_intmask = chv_pctrl_readl(pctrl, CHV_INTMASK); - for (i = 0; i < pctrl->community->npins; i++) { + for (i = 0; i < pctrl->soc->npins; i++) { const struct pinctrl_pin_desc *desc; - struct chv_pin_context *ctx; - void __iomem *reg; + struct intel_pad_context *ctx = &pctrl->context.pads[i]; - desc = &pctrl->community->pins[i]; + desc = &pctrl->soc->pins[i]; if (chv_pad_locked(pctrl, desc->number)) continue; - ctx = &pctrl->saved_pin_context[i]; - - reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL0); - ctx->padctrl0 = readl(reg) & ~CHV_PADCTRL0_GPIORXSTATE; + ctx->padctrl0 = chv_readl(pctrl, desc->number, CHV_PADCTRL0); + ctx->padctrl0 &= ~CHV_PADCTRL0_GPIORXSTATE; - reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL1); - ctx->padctrl1 = readl(reg); + ctx->padctrl1 = chv_readl(pctrl, desc->number, CHV_PADCTRL1); } raw_spin_unlock_irqrestore(&chv_lock, flags); @@ -1754,35 +1764,31 @@ static int chv_pinctrl_resume_noirq(struct device *dev) * registers because we don't know in which state BIOS left them * upon exiting suspend. */ - chv_writel(0, pctrl->regs + CHV_INTMASK); + chv_pctrl_writel(pctrl, CHV_INTMASK, 0x0000); - for (i = 0; i < pctrl->community->npins; i++) { + for (i = 0; i < pctrl->soc->npins; i++) { const struct pinctrl_pin_desc *desc; - const struct chv_pin_context *ctx; - void __iomem *reg; + struct intel_pad_context *ctx = &pctrl->context.pads[i]; u32 val; - desc = &pctrl->community->pins[i]; + desc = &pctrl->soc->pins[i]; if (chv_pad_locked(pctrl, desc->number)) continue; - ctx = &pctrl->saved_pin_context[i]; - /* Only restore if our saved state differs from the current */ - reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL0); - val = readl(reg) & ~CHV_PADCTRL0_GPIORXSTATE; + val = chv_readl(pctrl, desc->number, CHV_PADCTRL0); + val &= ~CHV_PADCTRL0_GPIORXSTATE; if (ctx->padctrl0 != val) { - chv_writel(ctx->padctrl0, reg); + chv_writel(pctrl, desc->number, CHV_PADCTRL0, ctx->padctrl0); dev_dbg(pctrl->dev, "restored pin %2u ctrl0 0x%08x\n", - desc->number, readl(reg)); + desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL0)); } - reg = chv_padreg(pctrl, desc->number, CHV_PADCTRL1); - val = readl(reg); + val = chv_readl(pctrl, desc->number, CHV_PADCTRL1); if (ctx->padctrl1 != val) { - chv_writel(ctx->padctrl1, reg); + chv_writel(pctrl, desc->number, CHV_PADCTRL1, ctx->padctrl1); dev_dbg(pctrl->dev, "restored pin %2u ctrl1 0x%08x\n", - desc->number, readl(reg)); + desc->number, chv_readl(pctrl, desc->number, CHV_PADCTRL1)); } } @@ -1790,8 +1796,8 @@ static int chv_pinctrl_resume_noirq(struct device *dev) * Now that all pins are restored to known state, we can restore * the interrupt mask register as well. */ - chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); - chv_writel(pctrl->saved_intmask, pctrl->regs + CHV_INTMASK); + chv_pctrl_writel(pctrl, CHV_INTSTAT, 0xffff); + chv_pctrl_writel(pctrl, CHV_INTMASK, pctrl->saved_intmask); raw_spin_unlock_irqrestore(&chv_lock, flags); @@ -1805,7 +1811,7 @@ static const struct dev_pm_ops chv_pinctrl_pm_ops = { }; static const struct acpi_device_id chv_pinctrl_acpi_match[] = { - { "INT33FF" }, + { "INT33FF", (kernel_ulong_t)chv_soc_data }, { } }; MODULE_DEVICE_TABLE(acpi, chv_pinctrl_acpi_match); diff --git a/drivers/pinctrl/intel/pinctrl-emmitsburg.c b/drivers/pinctrl/intel/pinctrl-emmitsburg.c new file mode 100644 index 000000000000..f6114dbf7520 --- /dev/null +++ b/drivers/pinctrl/intel/pinctrl-emmitsburg.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Emmitsburg PCH pinctrl/GPIO driver + * + * Copyright (C) 2020, Intel Corporation + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + */ + +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <linux/pinctrl/pinctrl.h> + +#include "pinctrl-intel.h" + +#define EBG_PAD_OWN 0x0a0 +#define EBG_PADCFGLOCK 0x100 +#define EBG_HOSTSW_OWN 0x130 +#define EBG_GPI_IS 0x200 +#define EBG_GPI_IE 0x210 + +#define EBG_GPP(r, s, e) \ + { \ + .reg_num = (r), \ + .base = (s), \ + .size = ((e) - (s) + 1), \ + } + +#define EBG_COMMUNITY(b, s, e, g) \ + { \ + .barno = (b), \ + .padown_offset = EBG_PAD_OWN, \ + .padcfglock_offset = EBG_PADCFGLOCK, \ + .hostown_offset = EBG_HOSTSW_OWN, \ + .is_offset = EBG_GPI_IS, \ + .ie_offset = EBG_GPI_IE, \ + .pin_base = (s), \ + .npins = ((e) - (s) + 1), \ + .gpps = (g), \ + .ngpps = ARRAY_SIZE(g), \ + } + +/* Emmitsburg */ +static const struct pinctrl_pin_desc ebg_pins[] = { + /* GPP_A */ + PINCTRL_PIN(0, "ESPI_ALERT0B"), + PINCTRL_PIN(1, "ESPI_ALERT1B"), + PINCTRL_PIN(2, "ESPI_IO_0"), + PINCTRL_PIN(3, "ESPI_IO_1"), + PINCTRL_PIN(4, "ESPI_IO_2"), + PINCTRL_PIN(5, "ESPI_IO_3"), + PINCTRL_PIN(6, "ESPI_CS0B"), + PINCTRL_PIN(7, "ESPI_CS1B"), + PINCTRL_PIN(8, "ESPI_RESETB"), + PINCTRL_PIN(9, "ESPI_CLK"), + PINCTRL_PIN(10, "SRCCLKREQB_0"), + PINCTRL_PIN(11, "SRCCLKREQB_1"), + PINCTRL_PIN(12, "SRCCLKREQB_2"), + PINCTRL_PIN(13, "SRCCLKREQB_3"), + PINCTRL_PIN(14, "SRCCLKREQB_4"), + PINCTRL_PIN(15, "SRCCLKREQB_5"), + PINCTRL_PIN(16, "SRCCLKREQB_6"), + PINCTRL_PIN(17, "SRCCLKREQB_7"), + PINCTRL_PIN(18, "SRCCLKREQB_8"), + PINCTRL_PIN(19, "SRCCLKREQB_9"), + PINCTRL_PIN(20, "ESPI_CLK_LOOPBK"), + /* GPP_B */ + PINCTRL_PIN(21, "GSXDOUT"), + PINCTRL_PIN(22, "GSXSLOAD"), + PINCTRL_PIN(23, "GSXDIN"), + PINCTRL_PIN(24, "GSXSRESETB"), + PINCTRL_PIN(25, "GSXCLK"), + PINCTRL_PIN(26, "USB2_OCB_0"), + PINCTRL_PIN(27, "USB2_OCB_1"), + PINCTRL_PIN(28, "USB2_OCB_2"), + PINCTRL_PIN(29, "USB2_OCB_3"), + PINCTRL_PIN(30, "USB2_OCB_4"), + PINCTRL_PIN(31, "USB2_OCB_5"), + PINCTRL_PIN(32, "USB2_OCB_6"), + PINCTRL_PIN(33, "HS_UART0_RXD"), + PINCTRL_PIN(34, "HS_UART0_TXD"), + PINCTRL_PIN(35, "HS_UART0_RTSB"), + PINCTRL_PIN(36, "HS_UART0_CTSB"), + PINCTRL_PIN(37, "HS_UART1_RXD"), + PINCTRL_PIN(38, "HS_UART1_TXD"), + PINCTRL_PIN(39, "HS_UART1_RTSB"), + PINCTRL_PIN(40, "HS_UART1_CTSB"), + PINCTRL_PIN(41, "GPPC_B_20"), + PINCTRL_PIN(42, "GPPC_B_21"), + PINCTRL_PIN(43, "GPPC_B_22"), + PINCTRL_PIN(44, "PS_ONB"), + /* SPI */ + PINCTRL_PIN(45, "SPI0_IO_2"), + PINCTRL_PIN(46, "SPI0_IO_3"), + PINCTRL_PIN(47, "SPI0_MOSI_IO_0"), + PINCTRL_PIN(48, "SPI0_MISO_IO_1"), + PINCTRL_PIN(49, "SPI0_TPM_CSB"), + PINCTRL_PIN(50, "SPI0_FLASH_0_CSB"), + PINCTRL_PIN(51, "SPI0_FLASH_1_CSB"), + PINCTRL_PIN(52, "SPI0_CLK"), + PINCTRL_PIN(53, "TIME_SYNC_0"), + PINCTRL_PIN(54, "SPKR"), + PINCTRL_PIN(55, "CPU_GP_0"), + PINCTRL_PIN(56, "CPU_GP_1"), + PINCTRL_PIN(57, "CPU_GP_2"), + PINCTRL_PIN(58, "CPU_GP_3"), + PINCTRL_PIN(59, "SUSWARNB_SUSPWRDNACK"), + PINCTRL_PIN(60, "SUSACKB"), + PINCTRL_PIN(61, "NMIB"), + PINCTRL_PIN(62, "SMIB"), + PINCTRL_PIN(63, "GPPC_S_10"), + PINCTRL_PIN(64, "GPPC_S_11"), + PINCTRL_PIN(65, "SPI_CLK_LOOPBK"), + /* GPP_C */ + PINCTRL_PIN(66, "ME_SML0CLK"), + PINCTRL_PIN(67, "ME_SML0DATA"), + PINCTRL_PIN(68, "ME_SML0ALERTB"), + PINCTRL_PIN(69, "ME_SML0BDATA"), + PINCTRL_PIN(70, "ME_SML0BCLK"), + PINCTRL_PIN(71, "ME_SML0BALERTB"), + PINCTRL_PIN(72, "ME_SML1CLK"), + PINCTRL_PIN(73, "ME_SML1DATA"), + PINCTRL_PIN(74, "ME_SML1ALERTB"), + PINCTRL_PIN(75, "ME_SML2CLK"), + PINCTRL_PIN(76, "ME_SML2DATA"), + PINCTRL_PIN(77, "ME_SML2ALERTB"), + PINCTRL_PIN(78, "ME_SML3CLK"), + PINCTRL_PIN(79, "ME_SML3DATA"), + PINCTRL_PIN(80, "ME_SML3ALERTB"), + PINCTRL_PIN(81, "ME_SML4CLK"), + PINCTRL_PIN(82, "ME_SML4DATA"), + PINCTRL_PIN(83, "ME_SML4ALERTB"), + PINCTRL_PIN(84, "GPPC_C_18"), + PINCTRL_PIN(85, "MC_SMBCLK"), + PINCTRL_PIN(86, "MC_SMBDATA"), + PINCTRL_PIN(87, "MC_SMBALERTB"), + /* GPP_D */ + PINCTRL_PIN(88, "HS_SMBCLK"), + PINCTRL_PIN(89, "HS_SMBDATA"), + PINCTRL_PIN(90, "HS_SMBALERTB"), + PINCTRL_PIN(91, "GBE_SMB_ALRT_N"), + PINCTRL_PIN(92, "GBE_SMB_CLK"), + PINCTRL_PIN(93, "GBE_SMB_DATA"), + PINCTRL_PIN(94, "GBE_GPIO10"), + PINCTRL_PIN(95, "GBE_GPIO11"), + PINCTRL_PIN(96, "CRASHLOG_TRIG_N"), + PINCTRL_PIN(97, "PMEB"), + PINCTRL_PIN(98, "BM_BUSYB"), + PINCTRL_PIN(99, "PLTRSTB"), + PINCTRL_PIN(100, "PCHHOTB"), + PINCTRL_PIN(101, "ADR_COMPLETE"), + PINCTRL_PIN(102, "ADR_TRIGGER_N"), + PINCTRL_PIN(103, "VRALERTB"), + PINCTRL_PIN(104, "ADR_ACK"), + PINCTRL_PIN(105, "THERMTRIP_N"), + PINCTRL_PIN(106, "MEMTRIP_N"), + PINCTRL_PIN(107, "MSMI_N"), + PINCTRL_PIN(108, "CATERR_N"), + PINCTRL_PIN(109, "GLB_RST_WARN_B"), + PINCTRL_PIN(110, "USB2_OCB_7"), + PINCTRL_PIN(111, "GPP_D_23"), + /* GPP_E */ + PINCTRL_PIN(112, "SATA1_XPCIE_0"), + PINCTRL_PIN(113, "SATA1_XPCIE_1"), + PINCTRL_PIN(114, "SATA1_XPCIE_2"), + PINCTRL_PIN(115, "SATA1_XPCIE_3"), + PINCTRL_PIN(116, "SATA0_XPCIE_2"), + PINCTRL_PIN(117, "SATA0_XPCIE_3"), + PINCTRL_PIN(118, "SATA0_USB3_XPCIE_0"), + PINCTRL_PIN(119, "SATA0_USB3_XPCIE_1"), + PINCTRL_PIN(120, "SATA0_SCLOCK"), + PINCTRL_PIN(121, "SATA0_SLOAD"), + PINCTRL_PIN(122, "SATA0_SDATAOUT"), + PINCTRL_PIN(123, "SATA1_SCLOCK"), + PINCTRL_PIN(124, "SATA1_SLOAD"), + PINCTRL_PIN(125, "SATA1_SDATAOUT"), + PINCTRL_PIN(126, "SATA2_SCLOCK"), + PINCTRL_PIN(127, "SATA2_SLOAD"), + PINCTRL_PIN(128, "SATA2_SDATAOUT"), + PINCTRL_PIN(129, "ERR0_N"), + PINCTRL_PIN(130, "ERR1_N"), + PINCTRL_PIN(131, "ERR2_N"), + PINCTRL_PIN(132, "GBE_UART_RXD"), + PINCTRL_PIN(133, "GBE_UART_TXD"), + PINCTRL_PIN(134, "GBE_UART_RTSB"), + PINCTRL_PIN(135, "GBE_UART_CTSB"), + /* JTAG */ + PINCTRL_PIN(136, "JTAG_TDO"), + PINCTRL_PIN(137, "JTAG_TDI"), + PINCTRL_PIN(138, "JTAG_TCK"), + PINCTRL_PIN(139, "JTAG_TMS"), + PINCTRL_PIN(140, "JTAGX"), + PINCTRL_PIN(141, "PRDYB"), + PINCTRL_PIN(142, "PREQB"), + PINCTRL_PIN(143, "GLB_PC_DISABLE"), + PINCTRL_PIN(144, "DBG_PMODE"), + PINCTRL_PIN(145, "GLB_EXT_ACC_DISABLE"), + /* GPP_H */ + PINCTRL_PIN(146, "GBE_GPIO12"), + PINCTRL_PIN(147, "GBE_GPIO13"), + PINCTRL_PIN(148, "GBE_SDP_TIMESYNC0_S2N"), + PINCTRL_PIN(149, "GBE_SDP_TIMESYNC1_S2N"), + PINCTRL_PIN(150, "GBE_SDP_TIMESYNC2_S2N"), + PINCTRL_PIN(151, "GBE_SDP_TIMESYNC3_S2N"), + PINCTRL_PIN(152, "GPPC_H_6"), + PINCTRL_PIN(153, "GPPC_H_7"), + PINCTRL_PIN(154, "NCSI_CLK_IN"), + PINCTRL_PIN(155, "NCSI_CRS_DV"), + PINCTRL_PIN(156, "NCSI_RXD0"), + PINCTRL_PIN(157, "NCSI_RXD1"), + PINCTRL_PIN(158, "NCSI_TX_EN"), + PINCTRL_PIN(159, "NCSI_TXD0"), + PINCTRL_PIN(160, "NCSI_TXD1"), + PINCTRL_PIN(161, "NAC_NCSI_CLK_OUT_0"), + PINCTRL_PIN(162, "NAC_NCSI_CLK_OUT_1"), + PINCTRL_PIN(163, "NAC_NCSI_CLK_OUT_2"), + PINCTRL_PIN(164, "PMCALERTB"), + PINCTRL_PIN(165, "GPPC_H_19"), + /* GPP_J */ + PINCTRL_PIN(166, "CPUPWRGD"), + PINCTRL_PIN(167, "CPU_THRMTRIP_N"), + PINCTRL_PIN(168, "PLTRST_CPUB"), + PINCTRL_PIN(169, "TRIGGER0_N"), + PINCTRL_PIN(170, "TRIGGER1_N"), + PINCTRL_PIN(171, "CPU_PWR_DEBUG_N"), + PINCTRL_PIN(172, "CPU_MEMTRIP_N"), + PINCTRL_PIN(173, "CPU_MSMI_N"), + PINCTRL_PIN(174, "ME_PECI"), + PINCTRL_PIN(175, "NAC_SPARE0"), + PINCTRL_PIN(176, "NAC_SPARE1"), + PINCTRL_PIN(177, "NAC_SPARE2"), + PINCTRL_PIN(178, "CPU_ERR0_N"), + PINCTRL_PIN(179, "CPU_CATERR_N"), + PINCTRL_PIN(180, "CPU_ERR1_N"), + PINCTRL_PIN(181, "CPU_ERR2_N"), + PINCTRL_PIN(182, "GPP_J_16"), + PINCTRL_PIN(183, "GPP_J_17"), + /* GPP_I */ + PINCTRL_PIN(184, "GBE_GPIO4"), + PINCTRL_PIN(185, "GBE_GPIO5"), + PINCTRL_PIN(186, "GBE_GPIO6"), + PINCTRL_PIN(187, "GBE_GPIO7"), + PINCTRL_PIN(188, "GBE1_LED1"), + PINCTRL_PIN(189, "GBE1_LED2"), + PINCTRL_PIN(190, "GBE2_LED0"), + PINCTRL_PIN(191, "GBE2_LED1"), + PINCTRL_PIN(192, "GBE2_LED2"), + PINCTRL_PIN(193, "GBE3_LED0"), + PINCTRL_PIN(194, "GBE3_LED1"), + PINCTRL_PIN(195, "GBE3_LED2"), + PINCTRL_PIN(196, "GBE0_I2C_CLK"), + PINCTRL_PIN(197, "GBE0_I2C_DATA"), + PINCTRL_PIN(198, "GBE1_I2C_CLK"), + PINCTRL_PIN(199, "GBE1_I2C_DATA"), + PINCTRL_PIN(200, "GBE2_I2C_CLK"), + PINCTRL_PIN(201, "GBE2_I2C_DATA"), + PINCTRL_PIN(202, "GBE3_I2C_CLK"), + PINCTRL_PIN(203, "GBE3_I2C_DATA"), + PINCTRL_PIN(204, "GBE4_I2C_CLK"), + PINCTRL_PIN(205, "GBE4_I2C_DATA"), + PINCTRL_PIN(206, "GBE_GPIO8"), + PINCTRL_PIN(207, "GBE_GPIO9"), + /* GPP_L */ + PINCTRL_PIN(208, "PM_SYNC_0"), + PINCTRL_PIN(209, "PM_DOWN_0"), + PINCTRL_PIN(210, "PM_SYNC_CLK_0"), + PINCTRL_PIN(211, "GPP_L_3"), + PINCTRL_PIN(212, "GPP_L_4"), + PINCTRL_PIN(213, "GPP_L_5"), + PINCTRL_PIN(214, "GPP_L_6"), + PINCTRL_PIN(215, "GPP_L_7"), + PINCTRL_PIN(216, "GPP_L_8"), + PINCTRL_PIN(217, "NAC_GBE_GPIO0_S2N"), + PINCTRL_PIN(218, "NAC_GBE_GPIO1_S2N"), + PINCTRL_PIN(219, "NAC_GBE_GPIO2_S2N"), + PINCTRL_PIN(220, "NAC_GBE_GPIO3_S2N"), + PINCTRL_PIN(221, "NAC_GBE_SMB_DATA_IN"), + PINCTRL_PIN(222, "NAC_GBE_SMB_DATA_OUT"), + PINCTRL_PIN(223, "NAC_GBE_SMB_ALRT_N"), + PINCTRL_PIN(224, "NAC_GBE_SMB_CLK_IN"), + PINCTRL_PIN(225, "NAC_GBE_SMB_CLK_OUT"), + /* GPP_M */ + PINCTRL_PIN(226, "GPP_M_0"), + PINCTRL_PIN(227, "GPP_M_1"), + PINCTRL_PIN(228, "GPP_M_2"), + PINCTRL_PIN(229, "GPP_M_3"), + PINCTRL_PIN(230, "NAC_WAKE_N"), + PINCTRL_PIN(231, "GPP_M_5"), + PINCTRL_PIN(232, "GPP_M_6"), + PINCTRL_PIN(233, "GPP_M_7"), + PINCTRL_PIN(234, "GPP_M_8"), + PINCTRL_PIN(235, "NAC_SBLINK_S2N"), + PINCTRL_PIN(236, "NAC_SBLINK_N2S"), + PINCTRL_PIN(237, "NAC_SBLINK_CLK_N2S"), + PINCTRL_PIN(238, "NAC_SBLINK_CLK_S2N"), + PINCTRL_PIN(239, "NAC_XTAL_VALID"), + PINCTRL_PIN(240, "NAC_RESET_NAC_N"), + PINCTRL_PIN(241, "GPP_M_15"), + PINCTRL_PIN(242, "GPP_M_16"), + PINCTRL_PIN(243, "GPP_M_17"), + /* GPP_N */ + PINCTRL_PIN(244, "GPP_N_0"), + PINCTRL_PIN(245, "NAC_NCSI_TXD0"), + PINCTRL_PIN(246, "GPP_N_2"), + PINCTRL_PIN(247, "GPP_N_3"), + PINCTRL_PIN(248, "NAC_NCSI_REFCLK_IN"), + PINCTRL_PIN(249, "GPP_N_5"), + PINCTRL_PIN(250, "GPP_N_6"), + PINCTRL_PIN(251, "GPP_N_7"), + PINCTRL_PIN(252, "NAC_NCSI_RXD0"), + PINCTRL_PIN(253, "NAC_NCSI_RXD1"), + PINCTRL_PIN(254, "NAC_NCSI_CRS_DV"), + PINCTRL_PIN(255, "NAC_NCSI_CLK_IN"), + PINCTRL_PIN(256, "NAC_NCSI_REFCLK_OUT"), + PINCTRL_PIN(257, "NAC_NCSI_TX_EN"), + PINCTRL_PIN(258, "NAC_NCSI_TXD1"), + PINCTRL_PIN(259, "NAC_NCSI_OE_N"), + PINCTRL_PIN(260, "NAC_GR_N"), + PINCTRL_PIN(261, "NAC_INIT_SX_WAKE_N"), +}; + +static const struct intel_padgroup ebg_community0_gpps[] = { + EBG_GPP(0, 0, 20), /* GPP_A */ + EBG_GPP(1, 21, 44), /* GPP_B */ + EBG_GPP(2, 45, 65), /* SPI */ +}; + +static const struct intel_padgroup ebg_community1_gpps[] = { + EBG_GPP(0, 66, 87), /* GPP_C */ + EBG_GPP(1, 88, 111), /* GPP_D */ +}; + +static const struct intel_padgroup ebg_community3_gpps[] = { + EBG_GPP(0, 112, 135), /* GPP_E */ + EBG_GPP(1, 136, 145), /* JTAG */ +}; + +static const struct intel_padgroup ebg_community4_gpps[] = { + EBG_GPP(0, 146, 165), /* GPP_H */ + EBG_GPP(1, 166, 183), /* GPP_J */ +}; + +static const struct intel_padgroup ebg_community5_gpps[] = { + EBG_GPP(0, 184, 207), /* GPP_I */ + EBG_GPP(1, 208, 225), /* GPP_L */ + EBG_GPP(2, 226, 243), /* GPP_M */ + EBG_GPP(3, 244, 261), /* GPP_N */ +}; + +static const struct intel_community ebg_communities[] = { + EBG_COMMUNITY(0, 0, 65, ebg_community0_gpps), + EBG_COMMUNITY(1, 66, 111, ebg_community1_gpps), + EBG_COMMUNITY(2, 112, 145, ebg_community3_gpps), + EBG_COMMUNITY(3, 146, 183, ebg_community4_gpps), + EBG_COMMUNITY(4, 184, 261, ebg_community5_gpps), +}; + +static const struct intel_pinctrl_soc_data ebg_soc_data = { + .pins = ebg_pins, + .npins = ARRAY_SIZE(ebg_pins), + .communities = ebg_communities, + .ncommunities = ARRAY_SIZE(ebg_communities), +}; + +static const struct acpi_device_id ebg_pinctrl_acpi_match[] = { + { "INTC1071", (kernel_ulong_t)&ebg_soc_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, ebg_pinctrl_acpi_match); + +static INTEL_PINCTRL_PM_OPS(ebg_pinctrl_pm_ops); + +static struct platform_driver ebg_pinctrl_driver = { + .probe = intel_pinctrl_probe_by_hid, + .driver = { + .name = "emmitsburg-pinctrl", + .acpi_match_table = ebg_pinctrl_acpi_match, + .pm = &ebg_pinctrl_pm_ops, + }, +}; + +module_platform_driver(ebg_pinctrl_driver); + +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); +MODULE_DESCRIPTION("Intel Emmitsburg PCH pinctrl/GPIO driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 6a274e20d926..b64997b303e0 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -435,11 +435,20 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) { u32 value; + value = readl(padcfg0); + /* Put the pad into GPIO mode */ - value = readl(padcfg0) & ~PADCFG0_PMODE_MASK; + value &= ~PADCFG0_PMODE_MASK; + value |= PADCFG0_PMODE_GPIO; + + /* Disable input and output buffers */ + value &= ~PADCFG0_GPIORXDIS; + value &= ~PADCFG0_GPIOTXDIS; + /* Disable SCI/SMI/NMI generation */ value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI); value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI); + writel(value, padcfg0); } @@ -451,6 +460,8 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, void __iomem *padcfg0; unsigned long flags; + padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); + raw_spin_lock_irqsave(&pctrl->lock, flags); if (!intel_pad_owned_by_host(pctrl, pin)) { @@ -463,8 +474,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, return 0; } - padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - /* * If pin is already configured in GPIO mode, we assume that * firmware provides correct settings. In such case we avoid @@ -494,11 +503,10 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev, void __iomem *padcfg0; unsigned long flags; - raw_spin_lock_irqsave(&pctrl->lock, flags); - padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0); - __intel_gpio_set_direction(padcfg0, input); + raw_spin_lock_irqsave(&pctrl->lock, flags); + __intel_gpio_set_direction(padcfg0, input); raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -513,20 +521,21 @@ static const struct pinmux_ops intel_pinmux_ops = { .gpio_set_direction = intel_gpio_set_direction, }; -static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin, - unsigned long *config) +static int intel_config_get_pull(struct intel_pinctrl *pctrl, unsigned int pin, + enum pin_config_param param, u32 *arg) { - struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); - enum pin_config_param param = pinconf_to_config_param(*config); const struct intel_community *community; + void __iomem *padcfg1; + unsigned long flags; u32 value, term; - u32 arg = 0; - - if (!intel_pad_owned_by_host(pctrl, pin)) - return -ENOTSUPP; community = intel_get_community(pctrl, pin); - value = readl(intel_get_padcfg(pctrl, pin, PADCFG1)); + padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + value = readl(padcfg1); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + term = (value & PADCFG1_TERM_MASK) >> PADCFG1_TERM_SHIFT; switch (param) { @@ -541,16 +550,16 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin, switch (term) { case PADCFG1_TERM_1K: - arg = 1000; + *arg = 1000; break; case PADCFG1_TERM_2K: - arg = 2000; + *arg = 2000; break; case PADCFG1_TERM_5K: - arg = 5000; + *arg = 5000; break; case PADCFG1_TERM_20K: - arg = 20000; + *arg = 20000; break; } @@ -564,35 +573,74 @@ static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin, case PADCFG1_TERM_1K: if (!(community->features & PINCTRL_FEATURE_1K_PD)) return -EINVAL; - arg = 1000; + *arg = 1000; break; case PADCFG1_TERM_5K: - arg = 5000; + *arg = 5000; break; case PADCFG1_TERM_20K: - arg = 20000; + *arg = 20000; break; } break; - case PIN_CONFIG_INPUT_DEBOUNCE: { - void __iomem *padcfg2; - u32 v; + default: + return -EINVAL; + } - padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2); - if (!padcfg2) - return -ENOTSUPP; + return 0; +} - v = readl(padcfg2); - if (!(v & PADCFG2_DEBEN)) - return -EINVAL; +static int intel_config_get_debounce(struct intel_pinctrl *pctrl, unsigned int pin, + enum pin_config_param param, u32 *arg) +{ + void __iomem *padcfg2; + unsigned long flags; + unsigned long v; + u32 value2; + + padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2); + if (!padcfg2) + return -ENOTSUPP; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + value2 = readl(padcfg2); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + if (!(value2 & PADCFG2_DEBEN)) + return -EINVAL; + + v = (value2 & PADCFG2_DEBOUNCE_MASK) >> PADCFG2_DEBOUNCE_SHIFT; + *arg = BIT(v) * DEBOUNCE_PERIOD_NSEC / NSEC_PER_USEC; + + return 0; +} + +static int intel_config_get(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *config) +{ + struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + enum pin_config_param param = pinconf_to_config_param(*config); + u32 arg = 0; + int ret; - v = (v & PADCFG2_DEBOUNCE_MASK) >> PADCFG2_DEBOUNCE_SHIFT; - arg = BIT(v) * DEBOUNCE_PERIOD_NSEC / NSEC_PER_USEC; + if (!intel_pad_owned_by_host(pctrl, pin)) + return -ENOTSUPP; + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + ret = intel_config_get_pull(pctrl, pin, param, &arg); + if (ret) + return ret; + break; + + case PIN_CONFIG_INPUT_DEBOUNCE: + ret = intel_config_get_debounce(pctrl, pin, param, &arg); + if (ret) + return ret; break; - } default: return -ENOTSUPP; @@ -613,10 +661,11 @@ static int intel_config_set_pull(struct intel_pinctrl *pctrl, unsigned int pin, int ret = 0; u32 value; - raw_spin_lock_irqsave(&pctrl->lock, flags); - community = intel_get_community(pctrl, pin); padcfg1 = intel_get_padcfg(pctrl, pin, PADCFG1); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + value = readl(padcfg1); switch (param) { @@ -686,7 +735,6 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl, void __iomem *padcfg0, *padcfg2; unsigned long flags; u32 value0, value2; - int ret = 0; padcfg2 = intel_get_padcfg(pctrl, pin, PADCFG2); if (!padcfg2) @@ -708,23 +756,22 @@ static int intel_config_set_debounce(struct intel_pinctrl *pctrl, v = order_base_2(debounce * NSEC_PER_USEC / DEBOUNCE_PERIOD_NSEC); if (v < 3 || v > 15) { - ret = -EINVAL; - goto exit_unlock; - } else { - /* Enable glitch filter and debouncer */ - value0 |= PADCFG0_PREGFRXSEL; - value2 |= v << PADCFG2_DEBOUNCE_SHIFT; - value2 |= PADCFG2_DEBEN; + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + return -EINVAL; } + + /* Enable glitch filter and debouncer */ + value0 |= PADCFG0_PREGFRXSEL; + value2 |= v << PADCFG2_DEBOUNCE_SHIFT; + value2 |= PADCFG2_DEBEN; } writel(value0, padcfg0); writel(value2, padcfg2); -exit_unlock: raw_spin_unlock_irqrestore(&pctrl->lock, flags); - return ret; + return 0; } static int intel_config_set(struct pinctrl_dev *pctldev, unsigned int pin, @@ -894,6 +941,7 @@ static void intel_gpio_set(struct gpio_chip *chip, unsigned int offset, static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { struct intel_pinctrl *pctrl = gpiochip_get_data(chip); + unsigned long flags; void __iomem *reg; u32 padcfg0; int pin; @@ -906,8 +954,9 @@ static int intel_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) if (!reg) return -EINVAL; + raw_spin_lock_irqsave(&pctrl->lock, flags); padcfg0 = readl(reg); - + raw_spin_unlock_irqrestore(&pctrl->lock, flags); if (padcfg0 & PADCFG0_PMODE_MASK) return -EINVAL; @@ -1036,6 +1085,9 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type) intel_gpio_set_gpio_mode(reg); + /* Disable TX buffer and enable RX (this will be input) */ + __intel_gpio_set_direction(reg, true); + value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); @@ -1081,22 +1133,27 @@ static int intel_gpio_irq_wake(struct irq_data *d, unsigned int on) return 0; } -static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, - const struct intel_community *community) +static int intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, + const struct intel_community *community) { struct gpio_chip *gc = &pctrl->chip; - irqreturn_t ret = IRQ_NONE; - int gpp; + unsigned int gpp; + int ret = 0; for (gpp = 0; gpp < community->ngpps; gpp++) { const struct intel_padgroup *padgrp = &community->gpps[gpp]; unsigned long pending, enabled, gpp_offset; + unsigned long flags; + + raw_spin_lock_irqsave(&pctrl->lock, flags); pending = readl(community->regs + community->is_offset + padgrp->reg_num * 4); enabled = readl(community->regs + community->ie_offset + padgrp->reg_num * 4); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + /* Only interrupts that are enabled */ pending &= enabled; @@ -1106,9 +1163,9 @@ static irqreturn_t intel_gpio_community_irq_handler(struct intel_pinctrl *pctrl, irq = irq_find_mapping(gc->irq.domain, padgrp->gpio_base + gpp_offset); generic_handle_irq(irq); - - ret |= IRQ_HANDLED; } + + ret += pending ? 1 : 0; } return ret; @@ -1118,16 +1175,16 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) { const struct intel_community *community; struct intel_pinctrl *pctrl = data; - irqreturn_t ret = IRQ_NONE; - int i; + unsigned int i; + int ret = 0; /* Need to check all communities for pending interrupts */ for (i = 0; i < pctrl->ncommunities; i++) { community = &pctrl->communities[i]; - ret |= intel_gpio_community_irq_handler(pctrl, community); + ret += intel_gpio_community_irq_handler(pctrl, community); } - return ret; + return IRQ_RETVAL(ret); } static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl, @@ -1571,19 +1628,6 @@ static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) } } -static u32 -intel_gpio_is_requested(struct gpio_chip *chip, int base, unsigned int size) -{ - u32 requested = 0; - unsigned int i; - - for (i = 0; i < size; i++) - if (gpiochip_is_requested(chip, base + i)) - requested |= BIT(i); - - return requested; -} - static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value) { u32 curr, updated; @@ -1604,12 +1648,16 @@ static void intel_restore_hostown(struct intel_pinctrl *pctrl, unsigned int c, const struct intel_community *community = &pctrl->communities[c]; const struct intel_padgroup *padgrp = &community->gpps[gpp]; struct device *dev = pctrl->dev; - u32 requested; + const char *dummy; + u32 requested = 0; + unsigned int i; if (padgrp->gpio_base == INTEL_GPIO_BASE_NOMAP) return; - requested = intel_gpio_is_requested(&pctrl->chip, padgrp->gpio_base, padgrp->size); + for_each_requested_gpio_in_range(&pctrl->chip, i, padgrp->gpio_base, padgrp->size, dummy) + requested |= BIT(i); + if (!intel_gpio_update_reg(base + gpp * 4, requested, saved)) return; diff --git a/drivers/pinctrl/intel/pinctrl-intel.h b/drivers/pinctrl/intel/pinctrl-intel.h index cc78c483518f..4e17308d33e9 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.h +++ b/drivers/pinctrl/intel/pinctrl-intel.h @@ -103,6 +103,8 @@ enum { * @gpps: Pad groups if the controller has variable size pad groups * @ngpps: Number of pad groups in this community * @pad_map: Optional non-linear mapping of the pads + * @nirqs: Optional total number of IRQs this community can generate + * @acpi_space_id: Optional address space ID for ACPI OpRegion handler * @regs: Community specific common registers (reserved for core driver) * @pad_regs: Community specific pad registers (reserved for core driver) * @@ -127,6 +129,8 @@ struct intel_community { const struct intel_padgroup *gpps; size_t ngpps; const unsigned int *pad_map; + unsigned short nirqs; + unsigned short acpi_space_id; /* Reserved for the core driver */ void __iomem *regs; diff --git a/drivers/pinctrl/intel/pinctrl-lynxpoint.c b/drivers/pinctrl/intel/pinctrl-lynxpoint.c index a45b8f2182fd..96589d01fe35 100644 --- a/drivers/pinctrl/intel/pinctrl-lynxpoint.c +++ b/drivers/pinctrl/intel/pinctrl-lynxpoint.c @@ -386,6 +386,16 @@ static int lp_pinmux_set_mux(struct pinctrl_dev *pctldev, return 0; } +static void lp_gpio_enable_input(void __iomem *reg) +{ + iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg); +} + +static void lp_gpio_disable_input(void __iomem *reg) +{ + iowrite32(ioread32(reg) | GPINDIS_BIT, reg); +} + static int lp_gpio_request_enable(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, unsigned int pin) @@ -411,7 +421,7 @@ static int lp_gpio_request_enable(struct pinctrl_dev *pctldev, } /* Enable input sensing */ - iowrite32(ioread32(conf2) & ~GPINDIS_BIT, conf2); + lp_gpio_enable_input(conf2); raw_spin_unlock_irqrestore(&lg->lock, flags); @@ -429,7 +439,7 @@ static void lp_gpio_disable_free(struct pinctrl_dev *pctldev, raw_spin_lock_irqsave(&lg->lock, flags); /* Disable input sensing */ - iowrite32(ioread32(conf2) | GPINDIS_BIT, conf2); + lp_gpio_disable_input(conf2); raw_spin_unlock_irqrestore(&lg->lock, flags); @@ -919,16 +929,14 @@ static int lp_gpio_runtime_resume(struct device *dev) static int lp_gpio_resume(struct device *dev) { struct intel_pinctrl *lg = dev_get_drvdata(dev); - void __iomem *reg; + struct gpio_chip *chip = &lg->chip; + const char *dummy; int i; /* on some hardware suspend clears input sensing, re-enable it here */ - for (i = 0; i < lg->chip.ngpio; i++) { - if (gpiochip_is_requested(&lg->chip, i) != NULL) { - reg = lp_gpio_reg(&lg->chip, i, LP_CONFIG2); - iowrite32(ioread32(reg) & ~GPINDIS_BIT, reg); - } - } + for_each_requested_gpio(chip, i, dummy) + lp_gpio_enable_input(lp_gpio_reg(chip, i, LP_CONFIG2)); + return 0; } @@ -951,7 +959,7 @@ static struct platform_driver lp_gpio_driver = { .driver = { .name = "lp_gpio", .pm = &lp_gpio_pm_ops, - .acpi_match_table = ACPI_PTR(lynxpoint_gpio_acpi_match), + .acpi_match_table = lynxpoint_gpio_acpi_match, }, }; diff --git a/drivers/pinctrl/intel/pinctrl-merrifield.c b/drivers/pinctrl/intel/pinctrl-merrifield.c index 04ca8ae95df8..e4ff8da1b894 100644 --- a/drivers/pinctrl/intel/pinctrl-merrifield.c +++ b/drivers/pinctrl/intel/pinctrl-merrifield.c @@ -135,7 +135,7 @@ static const struct pinctrl_pin_desc mrfld_pins[] = { PINCTRL_PIN(43, "GP83_SD_D3"), PINCTRL_PIN(44, "GP84_SD_LS_CLK_FB"), PINCTRL_PIN(45, "GP85_SD_LS_CMD_DIR"), - PINCTRL_PIN(46, "GP86_SD_LVL_D_DIR"), + PINCTRL_PIN(46, "GP86_SD_LS_D_DIR"), PINCTRL_PIN(47, "GP88_SD_LS_SEL"), PINCTRL_PIN(48, "GP87_SD_PD"), PINCTRL_PIN(49, "GP89_SD_WP"), @@ -171,28 +171,28 @@ static const struct pinctrl_pin_desc mrfld_pins[] = { PINCTRL_PIN(77, "GP42_I2S_2_RXD"), PINCTRL_PIN(78, "GP43_I2S_2_TXD"), /* Family 6: GP SSP (22 pins) */ - PINCTRL_PIN(79, "GP120_SPI_3_CLK"), - PINCTRL_PIN(80, "GP121_SPI_3_SS"), - PINCTRL_PIN(81, "GP122_SPI_3_RXD"), - PINCTRL_PIN(82, "GP123_SPI_3_TXD"), - PINCTRL_PIN(83, "GP102_SPI_4_CLK"), - PINCTRL_PIN(84, "GP103_SPI_4_SS_0"), - PINCTRL_PIN(85, "GP104_SPI_4_SS_1"), - PINCTRL_PIN(86, "GP105_SPI_4_SS_2"), - PINCTRL_PIN(87, "GP106_SPI_4_SS_3"), - PINCTRL_PIN(88, "GP107_SPI_4_RXD"), - PINCTRL_PIN(89, "GP108_SPI_4_TXD"), - PINCTRL_PIN(90, "GP109_SPI_5_CLK"), - PINCTRL_PIN(91, "GP110_SPI_5_SS_0"), - PINCTRL_PIN(92, "GP111_SPI_5_SS_1"), - PINCTRL_PIN(93, "GP112_SPI_5_SS_2"), - PINCTRL_PIN(94, "GP113_SPI_5_SS_3"), - PINCTRL_PIN(95, "GP114_SPI_5_RXD"), - PINCTRL_PIN(96, "GP115_SPI_5_TXD"), - PINCTRL_PIN(97, "GP116_SPI_6_CLK"), - PINCTRL_PIN(98, "GP117_SPI_6_SS"), - PINCTRL_PIN(99, "GP118_SPI_6_RXD"), - PINCTRL_PIN(100, "GP119_SPI_6_TXD"), + PINCTRL_PIN(79, "GP120_SPI_0_CLK"), + PINCTRL_PIN(80, "GP121_SPI_0_SS"), + PINCTRL_PIN(81, "GP122_SPI_0_RXD"), + PINCTRL_PIN(82, "GP123_SPI_0_TXD"), + PINCTRL_PIN(83, "GP102_SPI_1_CLK"), + PINCTRL_PIN(84, "GP103_SPI_1_SS0"), + PINCTRL_PIN(85, "GP104_SPI_1_SS1"), + PINCTRL_PIN(86, "GP105_SPI_1_SS2"), + PINCTRL_PIN(87, "GP106_SPI_1_SS3"), + PINCTRL_PIN(88, "GP107_SPI_1_RXD"), + PINCTRL_PIN(89, "GP108_SPI_1_TXD"), + PINCTRL_PIN(90, "GP109_SPI_2_CLK"), + PINCTRL_PIN(91, "GP110_SPI_2_SS0"), + PINCTRL_PIN(92, "GP111_SPI_2_SS1"), + PINCTRL_PIN(93, "GP112_SPI_2_SS2"), + PINCTRL_PIN(94, "GP113_SPI_2_SS3"), + PINCTRL_PIN(95, "GP114_SPI_2_RXD"), + PINCTRL_PIN(96, "GP115_SPI_2_TXD"), + PINCTRL_PIN(97, "GP116_SPI_3_CLK"), + PINCTRL_PIN(98, "GP117_SPI_3_SS"), + PINCTRL_PIN(99, "GP118_SPI_3_RXD"), + PINCTRL_PIN(100, "GP119_SPI_3_TXD"), /* Family 7: I2C (14 pins) */ PINCTRL_PIN(101, "GP19_I2C_1_SCL"), PINCTRL_PIN(102, "GP20_I2C_1_SDA"), @@ -340,6 +340,7 @@ static const struct pinctrl_pin_desc mrfld_pins[] = { }; static const unsigned int mrfld_sdio_pins[] = { 50, 51, 52, 53, 54, 55, 56 }; +static const unsigned int mrfld_i2s2_pins[] = { 75, 76, 77, 78 }; static const unsigned int mrfld_spi5_pins[] = { 90, 91, 92, 93, 94, 95, 96 }; static const unsigned int mrfld_uart0_pins[] = { 115, 116, 117, 118 }; static const unsigned int mrfld_uart1_pins[] = { 119, 120, 121, 122 }; @@ -351,6 +352,7 @@ static const unsigned int mrfld_pwm3_pins[] = { 133 }; static const struct intel_pingroup mrfld_groups[] = { PIN_GROUP("sdio_grp", mrfld_sdio_pins, 1), + PIN_GROUP("i2s2_grp", mrfld_i2s2_pins, 1), PIN_GROUP("spi5_grp", mrfld_spi5_pins, 1), PIN_GROUP("uart0_grp", mrfld_uart0_pins, 1), PIN_GROUP("uart1_grp", mrfld_uart1_pins, 1), @@ -362,6 +364,7 @@ static const struct intel_pingroup mrfld_groups[] = { }; static const char * const mrfld_sdio_groups[] = { "sdio_grp" }; +static const char * const mrfld_i2s2_groups[] = { "i2s2_grp" }; static const char * const mrfld_spi5_groups[] = { "spi5_grp" }; static const char * const mrfld_uart0_groups[] = { "uart0_grp" }; static const char * const mrfld_uart1_groups[] = { "uart1_grp" }; @@ -373,6 +376,7 @@ static const char * const mrfld_pwm3_groups[] = { "pwm3_grp" }; static const struct intel_function mrfld_functions[] = { FUNCTION("sdio", mrfld_sdio_groups), + FUNCTION("i2s2", mrfld_i2s2_groups), FUNCTION("spi5", mrfld_spi5_groups), FUNCTION("uart0", mrfld_uart0_groups), FUNCTION("uart1", mrfld_uart1_groups), diff --git a/drivers/pinctrl/intel/pinctrl-tigerlake.c b/drivers/pinctrl/intel/pinctrl-tigerlake.c index bcfd7548e282..8c162dd5f5a1 100644 --- a/drivers/pinctrl/intel/pinctrl-tigerlake.c +++ b/drivers/pinctrl/intel/pinctrl-tigerlake.c @@ -380,8 +380,366 @@ static const struct intel_pinctrl_soc_data tgllp_soc_data = { .ncommunities = ARRAY_SIZE(tgllp_communities), }; +/* Tiger Lake-H */ +static const struct pinctrl_pin_desc tglh_pins[] = { + /* GPP_A */ + PINCTRL_PIN(0, "SPI0_IO_2"), + PINCTRL_PIN(1, "SPI0_IO_3"), + PINCTRL_PIN(2, "SPI0_MOSI_IO_0"), + PINCTRL_PIN(3, "SPI0_MISO_IO_1"), + PINCTRL_PIN(4, "SPI0_TPM_CSB"), + PINCTRL_PIN(5, "SPI0_FLASH_0_CSB"), + PINCTRL_PIN(6, "SPI0_FLASH_1_CSB"), + PINCTRL_PIN(7, "SPI0_CLK"), + PINCTRL_PIN(8, "ESPI_IO_0"), + PINCTRL_PIN(9, "ESPI_IO_1"), + PINCTRL_PIN(10, "ESPI_IO_2"), + PINCTRL_PIN(11, "ESPI_IO_3"), + PINCTRL_PIN(12, "ESPI_CS0B"), + PINCTRL_PIN(13, "ESPI_CLK"), + PINCTRL_PIN(14, "ESPI_RESETB"), + PINCTRL_PIN(15, "ESPI_CS1B"), + PINCTRL_PIN(16, "ESPI_CS2B"), + PINCTRL_PIN(17, "ESPI_CS3B"), + PINCTRL_PIN(18, "ESPI_ALERT0B"), + PINCTRL_PIN(19, "ESPI_ALERT1B"), + PINCTRL_PIN(20, "ESPI_ALERT2B"), + PINCTRL_PIN(21, "ESPI_ALERT3B"), + PINCTRL_PIN(22, "GPPC_A_14"), + PINCTRL_PIN(23, "SPI0_CLK_LOOPBK"), + PINCTRL_PIN(24, "ESPI_CLK_LOOPBK"), + /* GPP_R */ + PINCTRL_PIN(25, "HDA_BCLK"), + PINCTRL_PIN(26, "HDA_SYNC"), + PINCTRL_PIN(27, "HDA_SDO"), + PINCTRL_PIN(28, "HDA_SDI_0"), + PINCTRL_PIN(29, "HDA_RSTB"), + PINCTRL_PIN(30, "HDA_SDI_1"), + PINCTRL_PIN(31, "GPP_R_6"), + PINCTRL_PIN(32, "GPP_R_7"), + PINCTRL_PIN(33, "GPP_R_8"), + PINCTRL_PIN(34, "PCIE_LNK_DOWN"), + PINCTRL_PIN(35, "ISH_UART0_RTSB"), + PINCTRL_PIN(36, "SX_EXIT_HOLDOFFB"), + PINCTRL_PIN(37, "CLKOUT_48"), + PINCTRL_PIN(38, "ISH_GP_7"), + PINCTRL_PIN(39, "ISH_GP_0"), + PINCTRL_PIN(40, "ISH_GP_1"), + PINCTRL_PIN(41, "ISH_GP_2"), + PINCTRL_PIN(42, "ISH_GP_3"), + PINCTRL_PIN(43, "ISH_GP_4"), + PINCTRL_PIN(44, "ISH_GP_5"), + /* GPP_B */ + PINCTRL_PIN(45, "GSPI0_CS1B"), + PINCTRL_PIN(46, "GSPI1_CS1B"), + PINCTRL_PIN(47, "VRALERTB"), + PINCTRL_PIN(48, "CPU_GP_2"), + PINCTRL_PIN(49, "CPU_GP_3"), + PINCTRL_PIN(50, "SRCCLKREQB_0"), + PINCTRL_PIN(51, "SRCCLKREQB_1"), + PINCTRL_PIN(52, "SRCCLKREQB_2"), + PINCTRL_PIN(53, "SRCCLKREQB_3"), + PINCTRL_PIN(54, "SRCCLKREQB_4"), + PINCTRL_PIN(55, "SRCCLKREQB_5"), + PINCTRL_PIN(56, "I2S_MCLK"), + PINCTRL_PIN(57, "SLP_S0B"), + PINCTRL_PIN(58, "PLTRSTB"), + PINCTRL_PIN(59, "SPKR"), + PINCTRL_PIN(60, "GSPI0_CS0B"), + PINCTRL_PIN(61, "GSPI0_CLK"), + PINCTRL_PIN(62, "GSPI0_MISO"), + PINCTRL_PIN(63, "GSPI0_MOSI"), + PINCTRL_PIN(64, "GSPI1_CS0B"), + PINCTRL_PIN(65, "GSPI1_CLK"), + PINCTRL_PIN(66, "GSPI1_MISO"), + PINCTRL_PIN(67, "GSPI1_MOSI"), + PINCTRL_PIN(68, "SML1ALERTB"), + PINCTRL_PIN(69, "GSPI0_CLK_LOOPBK"), + PINCTRL_PIN(70, "GSPI1_CLK_LOOPBK"), + /* vGPIO_0 */ + PINCTRL_PIN(71, "ESPI_USB_OCB_0"), + PINCTRL_PIN(72, "ESPI_USB_OCB_1"), + PINCTRL_PIN(73, "ESPI_USB_OCB_2"), + PINCTRL_PIN(74, "ESPI_USB_OCB_3"), + PINCTRL_PIN(75, "USB_CPU_OCB_0"), + PINCTRL_PIN(76, "USB_CPU_OCB_1"), + PINCTRL_PIN(77, "USB_CPU_OCB_2"), + PINCTRL_PIN(78, "USB_CPU_OCB_3"), + /* GPP_D */ + PINCTRL_PIN(79, "SPI1_CSB"), + PINCTRL_PIN(80, "SPI1_CLK"), + PINCTRL_PIN(81, "SPI1_MISO_IO_1"), + PINCTRL_PIN(82, "SPI1_MOSI_IO_0"), + PINCTRL_PIN(83, "SML1CLK"), + PINCTRL_PIN(84, "I2S2_SFRM"), + PINCTRL_PIN(85, "I2S2_TXD"), + PINCTRL_PIN(86, "I2S2_RXD"), + PINCTRL_PIN(87, "I2S2_SCLK"), + PINCTRL_PIN(88, "SML0CLK"), + PINCTRL_PIN(89, "SML0DATA"), + PINCTRL_PIN(90, "GPP_D_11"), + PINCTRL_PIN(91, "ISH_UART0_CTSB"), + PINCTRL_PIN(92, "SPI1_IO_2"), + PINCTRL_PIN(93, "SPI1_IO_3"), + PINCTRL_PIN(94, "SML1DATA"), + PINCTRL_PIN(95, "GSPI3_CS0B"), + PINCTRL_PIN(96, "GSPI3_CLK"), + PINCTRL_PIN(97, "GSPI3_MISO"), + PINCTRL_PIN(98, "GSPI3_MOSI"), + PINCTRL_PIN(99, "UART3_RXD"), + PINCTRL_PIN(100, "UART3_TXD"), + PINCTRL_PIN(101, "UART3_RTSB"), + PINCTRL_PIN(102, "UART3_CTSB"), + PINCTRL_PIN(103, "SPI1_CLK_LOOPBK"), + PINCTRL_PIN(104, "GSPI3_CLK_LOOPBK"), + /* GPP_C */ + PINCTRL_PIN(105, "SMBCLK"), + PINCTRL_PIN(106, "SMBDATA"), + PINCTRL_PIN(107, "SMBALERTB"), + PINCTRL_PIN(108, "ISH_UART0_RXD"), + PINCTRL_PIN(109, "ISH_UART0_TXD"), + PINCTRL_PIN(110, "SML0ALERTB"), + PINCTRL_PIN(111, "ISH_I2C2_SDA"), + PINCTRL_PIN(112, "ISH_I2C2_SCL"), + PINCTRL_PIN(113, "UART0_RXD"), + PINCTRL_PIN(114, "UART0_TXD"), + PINCTRL_PIN(115, "UART0_RTSB"), + PINCTRL_PIN(116, "UART0_CTSB"), + PINCTRL_PIN(117, "UART1_RXD"), + PINCTRL_PIN(118, "UART1_TXD"), + PINCTRL_PIN(119, "UART1_RTSB"), + PINCTRL_PIN(120, "UART1_CTSB"), + PINCTRL_PIN(121, "I2C0_SDA"), + PINCTRL_PIN(122, "I2C0_SCL"), + PINCTRL_PIN(123, "I2C1_SDA"), + PINCTRL_PIN(124, "I2C1_SCL"), + PINCTRL_PIN(125, "UART2_RXD"), + PINCTRL_PIN(126, "UART2_TXD"), + PINCTRL_PIN(127, "UART2_RTSB"), + PINCTRL_PIN(128, "UART2_CTSB"), + /* GPP_S */ + PINCTRL_PIN(129, "SNDW1_CLK"), + PINCTRL_PIN(130, "SNDW1_DATA"), + PINCTRL_PIN(131, "SNDW2_CLK"), + PINCTRL_PIN(132, "SNDW2_DATA"), + PINCTRL_PIN(133, "SNDW3_CLK"), + PINCTRL_PIN(134, "SNDW3_DATA"), + PINCTRL_PIN(135, "SNDW4_CLK"), + PINCTRL_PIN(136, "SNDW4_DATA"), + /* GPP_G */ + PINCTRL_PIN(137, "DDPA_CTRLCLK"), + PINCTRL_PIN(138, "DDPA_CTRLDATA"), + PINCTRL_PIN(139, "DNX_FORCE_RELOAD"), + PINCTRL_PIN(140, "GMII_MDC_0"), + PINCTRL_PIN(141, "GMII_MDIO_0"), + PINCTRL_PIN(142, "SLP_DRAMB"), + PINCTRL_PIN(143, "GPPC_G_6"), + PINCTRL_PIN(144, "GPPC_G_7"), + PINCTRL_PIN(145, "ISH_SPI_CSB"), + PINCTRL_PIN(146, "ISH_SPI_CLK"), + PINCTRL_PIN(147, "ISH_SPI_MISO"), + PINCTRL_PIN(148, "ISH_SPI_MOSI"), + PINCTRL_PIN(149, "DDP1_CTRLCLK"), + PINCTRL_PIN(150, "DDP1_CTRLDATA"), + PINCTRL_PIN(151, "DDP2_CTRLCLK"), + PINCTRL_PIN(152, "DDP2_CTRLDATA"), + PINCTRL_PIN(153, "GSPI2_CLK_LOOPBK"), + /* vGPIO */ + PINCTRL_PIN(154, "CNV_BTEN"), + PINCTRL_PIN(155, "CNV_BT_HOST_WAKEB"), + PINCTRL_PIN(156, "CNV_BT_IF_SELECT"), + PINCTRL_PIN(157, "vCNV_BT_UART_TXD"), + PINCTRL_PIN(158, "vCNV_BT_UART_RXD"), + PINCTRL_PIN(159, "vCNV_BT_UART_CTS_B"), + PINCTRL_PIN(160, "vCNV_BT_UART_RTS_B"), + PINCTRL_PIN(161, "vCNV_MFUART1_TXD"), + PINCTRL_PIN(162, "vCNV_MFUART1_RXD"), + PINCTRL_PIN(163, "vCNV_MFUART1_CTS_B"), + PINCTRL_PIN(164, "vCNV_MFUART1_RTS_B"), + PINCTRL_PIN(165, "vUART0_TXD"), + PINCTRL_PIN(166, "vUART0_RXD"), + PINCTRL_PIN(167, "vUART0_CTS_B"), + PINCTRL_PIN(168, "vUART0_RTS_B"), + PINCTRL_PIN(169, "vISH_UART0_TXD"), + PINCTRL_PIN(170, "vISH_UART0_RXD"), + PINCTRL_PIN(171, "vISH_UART0_CTS_B"), + PINCTRL_PIN(172, "vISH_UART0_RTS_B"), + PINCTRL_PIN(173, "vCNV_BT_I2S_BCLK"), + PINCTRL_PIN(174, "vCNV_BT_I2S_WS_SYNC"), + PINCTRL_PIN(175, "vCNV_BT_I2S_SDO"), + PINCTRL_PIN(176, "vCNV_BT_I2S_SDI"), + PINCTRL_PIN(177, "vI2S2_SCLK"), + PINCTRL_PIN(178, "vI2S2_SFRM"), + PINCTRL_PIN(179, "vI2S2_TXD"), + PINCTRL_PIN(180, "vI2S2_RXD"), + /* GPP_E */ + PINCTRL_PIN(181, "SATAXPCIE_0"), + PINCTRL_PIN(182, "SATAXPCIE_1"), + PINCTRL_PIN(183, "SATAXPCIE_2"), + PINCTRL_PIN(184, "CPU_GP_0"), + PINCTRL_PIN(185, "SATA_DEVSLP_0"), + PINCTRL_PIN(186, "SATA_DEVSLP_1"), + PINCTRL_PIN(187, "SATA_DEVSLP_2"), + PINCTRL_PIN(188, "CPU_GP_1"), + PINCTRL_PIN(189, "SATA_LEDB"), + PINCTRL_PIN(190, "USB2_OCB_0"), + PINCTRL_PIN(191, "USB2_OCB_1"), + PINCTRL_PIN(192, "USB2_OCB_2"), + PINCTRL_PIN(193, "USB2_OCB_3"), + /* GPP_F */ + PINCTRL_PIN(194, "SATAXPCIE_3"), + PINCTRL_PIN(195, "SATAXPCIE_4"), + PINCTRL_PIN(196, "SATAXPCIE_5"), + PINCTRL_PIN(197, "SATAXPCIE_6"), + PINCTRL_PIN(198, "SATAXPCIE_7"), + PINCTRL_PIN(199, "SATA_DEVSLP_3"), + PINCTRL_PIN(200, "SATA_DEVSLP_4"), + PINCTRL_PIN(201, "SATA_DEVSLP_5"), + PINCTRL_PIN(202, "SATA_DEVSLP_6"), + PINCTRL_PIN(203, "SATA_DEVSLP_7"), + PINCTRL_PIN(204, "SATA_SCLOCK"), + PINCTRL_PIN(205, "SATA_SLOAD"), + PINCTRL_PIN(206, "SATA_SDATAOUT1"), + PINCTRL_PIN(207, "SATA_SDATAOUT0"), + PINCTRL_PIN(208, "PS_ONB"), + PINCTRL_PIN(209, "M2_SKT2_CFG_0"), + PINCTRL_PIN(210, "M2_SKT2_CFG_1"), + PINCTRL_PIN(211, "M2_SKT2_CFG_2"), + PINCTRL_PIN(212, "M2_SKT2_CFG_3"), + PINCTRL_PIN(213, "L_VDDEN"), + PINCTRL_PIN(214, "L_BKLTEN"), + PINCTRL_PIN(215, "L_BKLTCTL"), + PINCTRL_PIN(216, "VNN_CTRL"), + PINCTRL_PIN(217, "GPP_F_23"), + /* GPP_H */ + PINCTRL_PIN(218, "SRCCLKREQB_6"), + PINCTRL_PIN(219, "SRCCLKREQB_7"), + PINCTRL_PIN(220, "SRCCLKREQB_8"), + PINCTRL_PIN(221, "SRCCLKREQB_9"), + PINCTRL_PIN(222, "SRCCLKREQB_10"), + PINCTRL_PIN(223, "SRCCLKREQB_11"), + PINCTRL_PIN(224, "SRCCLKREQB_12"), + PINCTRL_PIN(225, "SRCCLKREQB_13"), + PINCTRL_PIN(226, "SRCCLKREQB_14"), + PINCTRL_PIN(227, "SRCCLKREQB_15"), + PINCTRL_PIN(228, "SML2CLK"), + PINCTRL_PIN(229, "SML2DATA"), + PINCTRL_PIN(230, "SML2ALERTB"), + PINCTRL_PIN(231, "SML3CLK"), + PINCTRL_PIN(232, "SML3DATA"), + PINCTRL_PIN(233, "SML3ALERTB"), + PINCTRL_PIN(234, "SML4CLK"), + PINCTRL_PIN(235, "SML4DATA"), + PINCTRL_PIN(236, "SML4ALERTB"), + PINCTRL_PIN(237, "ISH_I2C0_SDA"), + PINCTRL_PIN(238, "ISH_I2C0_SCL"), + PINCTRL_PIN(239, "ISH_I2C1_SDA"), + PINCTRL_PIN(240, "ISH_I2C1_SCL"), + PINCTRL_PIN(241, "TIME_SYNC_0"), + /* GPP_J */ + PINCTRL_PIN(242, "CNV_PA_BLANKING"), + PINCTRL_PIN(243, "CPU_C10_GATEB"), + PINCTRL_PIN(244, "CNV_BRI_DT"), + PINCTRL_PIN(245, "CNV_BRI_RSP"), + PINCTRL_PIN(246, "CNV_RGI_DT"), + PINCTRL_PIN(247, "CNV_RGI_RSP"), + PINCTRL_PIN(248, "CNV_MFUART2_RXD"), + PINCTRL_PIN(249, "CNV_MFUART2_TXD"), + PINCTRL_PIN(250, "GPP_J_8"), + PINCTRL_PIN(251, "GPP_J_9"), + /* GPP_K */ + PINCTRL_PIN(252, "GSXDOUT"), + PINCTRL_PIN(253, "GSXSLOAD"), + PINCTRL_PIN(254, "GSXDIN"), + PINCTRL_PIN(255, "GSXSRESETB"), + PINCTRL_PIN(256, "GSXCLK"), + PINCTRL_PIN(257, "ADR_COMPLETE"), + PINCTRL_PIN(258, "DDSP_HPD_A"), + PINCTRL_PIN(259, "DDSP_HPD_B"), + PINCTRL_PIN(260, "CORE_VID_0"), + PINCTRL_PIN(261, "CORE_VID_1"), + PINCTRL_PIN(262, "DDSP_HPD_C"), + PINCTRL_PIN(263, "GPP_K_11"), + PINCTRL_PIN(264, "SYS_PWROK"), + PINCTRL_PIN(265, "SYS_RESETB"), + PINCTRL_PIN(266, "MLK_RSTB"), + /* GPP_I */ + PINCTRL_PIN(267, "PMCALERTB"), + PINCTRL_PIN(268, "DDSP_HPD_1"), + PINCTRL_PIN(269, "DDSP_HPD_2"), + PINCTRL_PIN(270, "DDSP_HPD_3"), + PINCTRL_PIN(271, "DDSP_HPD_4"), + PINCTRL_PIN(272, "DDPB_CTRLCLK"), + PINCTRL_PIN(273, "DDPB_CTRLDATA"), + PINCTRL_PIN(274, "DDPC_CTRLCLK"), + PINCTRL_PIN(275, "DDPC_CTRLDATA"), + PINCTRL_PIN(276, "FUSA_DIAGTEST_EN"), + PINCTRL_PIN(277, "FUSA_DIAGTEST_MODE"), + PINCTRL_PIN(278, "USB2_OCB_4"), + PINCTRL_PIN(279, "USB2_OCB_5"), + PINCTRL_PIN(280, "USB2_OCB_6"), + PINCTRL_PIN(281, "USB2_OCB_7"), + /* JTAG */ + PINCTRL_PIN(282, "JTAG_TDO"), + PINCTRL_PIN(283, "JTAGX"), + PINCTRL_PIN(284, "PRDYB"), + PINCTRL_PIN(285, "PREQB"), + PINCTRL_PIN(286, "JTAG_TDI"), + PINCTRL_PIN(287, "JTAG_TMS"), + PINCTRL_PIN(288, "JTAG_TCK"), + PINCTRL_PIN(289, "DBG_PMODE"), + PINCTRL_PIN(290, "CPU_TRSTB"), +}; + +static const struct intel_padgroup tglh_community0_gpps[] = { + TGL_GPP(0, 0, 24, 0), /* GPP_A */ + TGL_GPP(1, 25, 44, 128), /* GPP_R */ + TGL_GPP(2, 45, 70, 32), /* GPP_B */ + TGL_GPP(3, 71, 78, INTEL_GPIO_BASE_NOMAP), /* vGPIO_0 */ +}; + +static const struct intel_padgroup tglh_community1_gpps[] = { + TGL_GPP(0, 79, 104, 96), /* GPP_D */ + TGL_GPP(1, 105, 128, 64), /* GPP_C */ + TGL_GPP(2, 129, 136, 160), /* GPP_S */ + TGL_GPP(3, 137, 153, 192), /* GPP_G */ + TGL_GPP(4, 154, 180, 224), /* vGPIO */ +}; + +static const struct intel_padgroup tglh_community3_gpps[] = { + TGL_GPP(0, 181, 193, 256), /* GPP_E */ + TGL_GPP(1, 194, 217, 288), /* GPP_F */ +}; + +static const struct intel_padgroup tglh_community4_gpps[] = { + TGL_GPP(0, 218, 241, 320), /* GPP_H */ + TGL_GPP(1, 242, 251, 384), /* GPP_J */ + TGL_GPP(2, 252, 266, 352), /* GPP_K */ +}; + +static const struct intel_padgroup tglh_community5_gpps[] = { + TGL_GPP(0, 267, 281, 416), /* GPP_I */ + TGL_GPP(1, 282, 290, INTEL_GPIO_BASE_NOMAP), /* JTAG */ +}; + +static const struct intel_community tglh_communities[] = { + TGL_COMMUNITY(0, 0, 78, tglh_community0_gpps), + TGL_COMMUNITY(1, 79, 180, tglh_community1_gpps), + TGL_COMMUNITY(2, 181, 217, tglh_community3_gpps), + TGL_COMMUNITY(3, 218, 266, tglh_community4_gpps), + TGL_COMMUNITY(4, 267, 290, tglh_community5_gpps), +}; + +static const struct intel_pinctrl_soc_data tglh_soc_data = { + .pins = tglh_pins, + .npins = ARRAY_SIZE(tglh_pins), + .communities = tglh_communities, + .ncommunities = ARRAY_SIZE(tglh_communities), +}; + static const struct acpi_device_id tgl_pinctrl_acpi_match[] = { { "INT34C5", (kernel_ulong_t)&tgllp_soc_data }, + { "INT34C6", (kernel_ulong_t)&tglh_soc_data }, { } }; MODULE_DEVICE_TABLE(acpi, tgl_pinctrl_acpi_match); diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index f32d3644c509..1cedc5f2aadb 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig @@ -93,6 +93,18 @@ config PINCTRL_MT6765 default ARM64 && ARCH_MEDIATEK select PINCTRL_MTK_PARIS +config PINCTRL_MT6779 + tristate "Mediatek MT6779 pin control" + depends on OF + depends on ARM64 || COMPILE_TEST + default ARM64 && ARCH_MEDIATEK + select PINCTRL_MTK_PARIS + help + Say yes here to support pin controller and gpio driver + on Mediatek MT6779 SoC. + In MTK platform, we support virtual gpio and use it to + map specific eint which doesn't have real gpio pin. + config PINCTRL_MT6797 bool "Mediatek MT6797 pin control" depends on OF diff --git a/drivers/pinctrl/mediatek/Makefile b/drivers/pinctrl/mediatek/Makefile index 4b7132876e71..b0b07c541d11 100644 --- a/drivers/pinctrl/mediatek/Makefile +++ b/drivers/pinctrl/mediatek/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_MT2712) += pinctrl-mt2712.o obj-$(CONFIG_PINCTRL_MT8135) += pinctrl-mt8135.o obj-$(CONFIG_PINCTRL_MT8127) += pinctrl-mt8127.o obj-$(CONFIG_PINCTRL_MT6765) += pinctrl-mt6765.o +obj-$(CONFIG_PINCTRL_MT6779) += pinctrl-mt6779.o obj-$(CONFIG_PINCTRL_MT6797) += pinctrl-mt6797.o obj-$(CONFIG_PINCTRL_MT7622) += pinctrl-mt7622.o obj-$(CONFIG_PINCTRL_MT7623) += pinctrl-mt7623.o diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6779.c b/drivers/pinctrl/mediatek/pinctrl-mt6779.c new file mode 100644 index 000000000000..bb0851c73304 --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mt6779.c @@ -0,0 +1,785 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 MediaTek Inc. + * Author: Andy Teng <andy.teng@mediatek.com> + * + */ + +#include <linux/module.h> +#include "pinctrl-mtk-mt6779.h" +#include "pinctrl-paris.h" + +/* MT6779 have multiple bases to program pin configuration listed as the below: + * gpio:0x10005000, iocfg_rm:0x11C20000, iocfg_br:0x11D10000, + * iocfg_lm:0x11E20000, iocfg_lb:0x11E70000, iocfg_rt:0x11EA0000, + * iocfg_lt:0x11F20000, iocfg_tl:0x11F30000 + * _i_based could be used to indicate what base the pin should be mapped into. + */ + +#define PIN_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \ + PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \ + 32, 0) + +#define PINS_FIELD_BASE(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits) \ + PIN_FIELD_CALC(s_pin, e_pin, i_base, s_addr, x_addrs, s_bit, x_bits, \ + 32, 1) + +static const struct mtk_pin_field_calc mt6779_pin_mode_range[] = { + PIN_FIELD_BASE(0, 7, 0, 0x0300, 0x10, 0, 4), + PIN_FIELD_BASE(8, 15, 0, 0x0310, 0x10, 0, 4), + PIN_FIELD_BASE(16, 23, 0, 0x0320, 0x10, 0, 4), + PIN_FIELD_BASE(24, 31, 0, 0x0330, 0x10, 0, 4), + PIN_FIELD_BASE(32, 39, 0, 0x0340, 0x10, 0, 4), + PIN_FIELD_BASE(40, 47, 0, 0x0350, 0x10, 0, 4), + PIN_FIELD_BASE(48, 55, 0, 0x0360, 0x10, 0, 4), + PIN_FIELD_BASE(56, 63, 0, 0x0370, 0x10, 0, 4), + PIN_FIELD_BASE(64, 71, 0, 0x0380, 0x10, 0, 4), + PIN_FIELD_BASE(72, 79, 0, 0x0390, 0x10, 0, 4), + PIN_FIELD_BASE(80, 87, 0, 0x03A0, 0x10, 0, 4), + PIN_FIELD_BASE(88, 95, 0, 0x03B0, 0x10, 0, 4), + PIN_FIELD_BASE(96, 103, 0, 0x03C0, 0x10, 0, 4), + PIN_FIELD_BASE(104, 111, 0, 0x03D0, 0x10, 0, 4), + PIN_FIELD_BASE(112, 119, 0, 0x03E0, 0x10, 0, 4), + PIN_FIELD_BASE(120, 127, 0, 0x03F0, 0x10, 0, 4), + PIN_FIELD_BASE(128, 135, 0, 0x0400, 0x10, 0, 4), + PIN_FIELD_BASE(136, 143, 0, 0x0410, 0x10, 0, 4), + PIN_FIELD_BASE(144, 151, 0, 0x0420, 0x10, 0, 4), + PIN_FIELD_BASE(152, 159, 0, 0x0430, 0x10, 0, 4), + PIN_FIELD_BASE(160, 167, 0, 0x0440, 0x10, 0, 4), + PIN_FIELD_BASE(168, 175, 0, 0x0450, 0x10, 0, 4), + PIN_FIELD_BASE(176, 183, 0, 0x0460, 0x10, 0, 4), + PIN_FIELD_BASE(184, 191, 0, 0x0470, 0x10, 0, 4), + PIN_FIELD_BASE(192, 199, 0, 0x0480, 0x10, 0, 4), + PIN_FIELD_BASE(200, 202, 0, 0x0490, 0x10, 0, 4), +}; + +static const struct mtk_pin_field_calc mt6779_pin_dir_range[] = { + PIN_FIELD_BASE(0, 31, 0, 0x0000, 0x10, 0, 1), + PIN_FIELD_BASE(32, 63, 0, 0x0010, 0x10, 0, 1), + PIN_FIELD_BASE(64, 95, 0, 0x0020, 0x10, 0, 1), + PIN_FIELD_BASE(96, 127, 0, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(128, 159, 0, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(160, 191, 0, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(192, 202, 0, 0x0060, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_di_range[] = { + PIN_FIELD_BASE(0, 31, 0, 0x0200, 0x10, 0, 1), + PIN_FIELD_BASE(32, 63, 0, 0x0210, 0x10, 0, 1), + PIN_FIELD_BASE(64, 95, 0, 0x0220, 0x10, 0, 1), + PIN_FIELD_BASE(96, 127, 0, 0x0230, 0x10, 0, 1), + PIN_FIELD_BASE(128, 159, 0, 0x0240, 0x10, 0, 1), + PIN_FIELD_BASE(160, 191, 0, 0x0250, 0x10, 0, 1), + PIN_FIELD_BASE(192, 202, 0, 0x0260, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_do_range[] = { + PIN_FIELD_BASE(0, 31, 0, 0x0100, 0x10, 0, 1), + PIN_FIELD_BASE(32, 63, 0, 0x0110, 0x10, 0, 1), + PIN_FIELD_BASE(64, 95, 0, 0x0120, 0x10, 0, 1), + PIN_FIELD_BASE(96, 127, 0, 0x0130, 0x10, 0, 1), + PIN_FIELD_BASE(128, 159, 0, 0x0140, 0x10, 0, 1), + PIN_FIELD_BASE(160, 191, 0, 0x0150, 0x10, 0, 1), + PIN_FIELD_BASE(192, 202, 0, 0x0160, 0x10, 0, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_ies_range[] = { + PIN_FIELD_BASE(0, 9, 6, 0x0030, 0x10, 3, 1), + PIN_FIELD_BASE(10, 16, 3, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(17, 18, 6, 0x0030, 0x10, 28, 1), + PIN_FIELD_BASE(19, 19, 6, 0x0030, 0x10, 27, 1), + PIN_FIELD_BASE(20, 20, 6, 0x0030, 0x10, 26, 1), + PIN_FIELD_BASE(21, 24, 6, 0x0030, 0x10, 19, 1), + PIN_FIELD_BASE(25, 25, 6, 0x0030, 0x10, 30, 1), + PIN_FIELD_BASE(26, 26, 6, 0x0030, 0x10, 23, 1), + PIN_FIELD_BASE(27, 27, 6, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(28, 29, 6, 0x0030, 0x10, 24, 1), + PIN_FIELD_BASE(30, 30, 6, 0x0030, 0x10, 16, 1), + PIN_FIELD_BASE(31, 31, 6, 0x0030, 0x10, 13, 1), + PIN_FIELD_BASE(32, 32, 6, 0x0030, 0x10, 15, 1), + PIN_FIELD_BASE(33, 33, 6, 0x0030, 0x10, 17, 1), + PIN_FIELD_BASE(34, 34, 6, 0x0030, 0x10, 14, 1), + PIN_FIELD_BASE(35, 35, 6, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(36, 36, 6, 0x0030, 0x10, 31, 1), + PIN_FIELD_BASE(37, 37, 6, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(38, 41, 6, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(42, 43, 6, 0x0030, 0x10, 1, 1), + PIN_FIELD_BASE(44, 44, 6, 0x0030, 0x10, 18, 1), + PIN_FIELD_BASE(45, 45, 3, 0x0050, 0x10, 14, 1), + PIN_FIELD_BASE(46, 46, 3, 0x0050, 0x10, 22, 1), + PIN_FIELD_BASE(47, 47, 3, 0x0050, 0x10, 25, 1), + PIN_FIELD_BASE(48, 48, 3, 0x0050, 0x10, 24, 1), + PIN_FIELD_BASE(49, 49, 3, 0x0050, 0x10, 26, 1), + PIN_FIELD_BASE(50, 50, 3, 0x0050, 0x10, 23, 1), + PIN_FIELD_BASE(51, 51, 3, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(52, 52, 3, 0x0050, 0x10, 19, 1), + PIN_FIELD_BASE(53, 54, 3, 0x0050, 0x10, 27, 1), + PIN_FIELD_BASE(55, 55, 3, 0x0050, 0x10, 13, 1), + PIN_FIELD_BASE(56, 56, 3, 0x0050, 0x10, 21, 1), + PIN_FIELD_BASE(57, 57, 3, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(58, 58, 3, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(59, 60, 3, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(61, 61, 3, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(62, 62, 3, 0x0050, 0x10, 20, 1), + PIN_FIELD_BASE(63, 63, 3, 0x0050, 0x10, 17, 1), + PIN_FIELD_BASE(64, 64, 3, 0x0050, 0x10, 16, 1), + PIN_FIELD_BASE(65, 65, 3, 0x0050, 0x10, 18, 1), + PIN_FIELD_BASE(66, 66, 3, 0x0050, 0x10, 15, 1), + PIN_FIELD_BASE(67, 67, 2, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(68, 68, 2, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(69, 69, 2, 0x0060, 0x10, 8, 1), + PIN_FIELD_BASE(70, 71, 2, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(72, 72, 4, 0x0020, 0x10, 3, 1), + PIN_FIELD_BASE(73, 73, 4, 0x0020, 0x10, 2, 1), + PIN_FIELD_BASE(74, 74, 4, 0x0020, 0x10, 1, 1), + PIN_FIELD_BASE(75, 75, 4, 0x0020, 0x10, 4, 1), + PIN_FIELD_BASE(76, 76, 4, 0x0020, 0x10, 12, 1), + PIN_FIELD_BASE(77, 77, 4, 0x0020, 0x10, 11, 1), + PIN_FIELD_BASE(78, 78, 2, 0x0050, 0x10, 18, 1), + PIN_FIELD_BASE(79, 79, 2, 0x0050, 0x10, 17, 1), + PIN_FIELD_BASE(80, 81, 2, 0x0050, 0x10, 19, 1), + PIN_FIELD_BASE(82, 88, 2, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(89, 89, 2, 0x0050, 0x10, 16, 1), + PIN_FIELD_BASE(90, 90, 2, 0x0050, 0x10, 15, 1), + PIN_FIELD_BASE(91, 91, 2, 0x0050, 0x10, 14, 1), + PIN_FIELD_BASE(92, 92, 2, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(93, 93, 4, 0x0020, 0x10, 0, 1), + PIN_FIELD_BASE(94, 94, 2, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(95, 95, 4, 0x0020, 0x10, 7, 1), + PIN_FIELD_BASE(96, 96, 4, 0x0020, 0x10, 5, 1), + PIN_FIELD_BASE(97, 97, 4, 0x0020, 0x10, 8, 1), + PIN_FIELD_BASE(98, 98, 4, 0x0020, 0x10, 6, 1), + PIN_FIELD_BASE(99, 99, 2, 0x0060, 0x10, 9, 1), + PIN_FIELD_BASE(100, 100, 2, 0x0060, 0x10, 12, 1), + PIN_FIELD_BASE(101, 101, 2, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(102, 102, 2, 0x0060, 0x10, 13, 1), + PIN_FIELD_BASE(103, 103, 2, 0x0060, 0x10, 11, 1), + PIN_FIELD_BASE(104, 104, 2, 0x0060, 0x10, 14, 1), + PIN_FIELD_BASE(105, 105, 2, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(106, 106, 2, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(107, 108, 2, 0x0050, 0x10, 12, 1), + PIN_FIELD_BASE(109, 109, 2, 0x0050, 0x10, 11, 1), + PIN_FIELD_BASE(110, 110, 2, 0x0060, 0x10, 16, 1), + PIN_FIELD_BASE(111, 111, 2, 0x0060, 0x10, 18, 1), + PIN_FIELD_BASE(112, 112, 2, 0x0060, 0x10, 15, 1), + PIN_FIELD_BASE(113, 113, 2, 0x0060, 0x10, 17, 1), + PIN_FIELD_BASE(114, 115, 2, 0x0050, 0x10, 26, 1), + PIN_FIELD_BASE(116, 117, 2, 0x0050, 0x10, 21, 1), + PIN_FIELD_BASE(118, 118, 2, 0x0050, 0x10, 31, 1), + PIN_FIELD_BASE(119, 119, 2, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(120, 121, 2, 0x0050, 0x10, 23, 1), + PIN_FIELD_BASE(122, 123, 2, 0x0050, 0x10, 28, 1), + PIN_FIELD_BASE(124, 125, 2, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(126, 127, 1, 0x0030, 0x10, 8, 1), + PIN_FIELD_BASE(128, 129, 1, 0x0030, 0x10, 17, 1), + PIN_FIELD_BASE(130, 130, 1, 0x0030, 0x10, 16, 1), + PIN_FIELD_BASE(131, 131, 1, 0x0030, 0x10, 19, 1), + PIN_FIELD_BASE(132, 132, 1, 0x0030, 0x10, 21, 1), + PIN_FIELD_BASE(133, 133, 1, 0x0030, 0x10, 20, 1), + PIN_FIELD_BASE(134, 135, 1, 0x0030, 0x10, 2, 1), + PIN_FIELD_BASE(136, 136, 1, 0x0030, 0x10, 7, 1), + PIN_FIELD_BASE(137, 137, 1, 0x0030, 0x10, 4, 1), + PIN_FIELD_BASE(138, 138, 1, 0x0030, 0x10, 6, 1), + PIN_FIELD_BASE(139, 139, 1, 0x0030, 0x10, 5, 1), + PIN_FIELD_BASE(140, 141, 1, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(142, 142, 1, 0x0030, 0x10, 15, 1), + PIN_FIELD_BASE(143, 143, 5, 0x0020, 0x10, 15, 1), + PIN_FIELD_BASE(144, 144, 5, 0x0020, 0x10, 17, 1), + PIN_FIELD_BASE(145, 145, 5, 0x0020, 0x10, 16, 1), + PIN_FIELD_BASE(146, 146, 5, 0x0020, 0x10, 12, 1), + PIN_FIELD_BASE(147, 155, 5, 0x0020, 0x10, 0, 1), + PIN_FIELD_BASE(156, 157, 5, 0x0020, 0x10, 22, 1), + PIN_FIELD_BASE(158, 158, 5, 0x0020, 0x10, 21, 1), + PIN_FIELD_BASE(159, 159, 5, 0x0020, 0x10, 24, 1), + PIN_FIELD_BASE(160, 161, 5, 0x0020, 0x10, 19, 1), + PIN_FIELD_BASE(162, 166, 5, 0x0020, 0x10, 25, 1), + PIN_FIELD_BASE(167, 168, 7, 0x0010, 0x10, 1, 1), + PIN_FIELD_BASE(169, 169, 7, 0x0010, 0x10, 4, 1), + PIN_FIELD_BASE(170, 170, 7, 0x0010, 0x10, 6, 1), + PIN_FIELD_BASE(171, 171, 7, 0x0010, 0x10, 8, 1), + PIN_FIELD_BASE(172, 172, 7, 0x0010, 0x10, 3, 1), + PIN_FIELD_BASE(173, 173, 7, 0x0010, 0x10, 7, 1), + PIN_FIELD_BASE(174, 175, 7, 0x0010, 0x10, 9, 1), + PIN_FIELD_BASE(176, 176, 7, 0x0010, 0x10, 0, 1), + PIN_FIELD_BASE(177, 177, 7, 0x0010, 0x10, 5, 1), + PIN_FIELD_BASE(178, 178, 7, 0x0010, 0x10, 11, 1), + PIN_FIELD_BASE(179, 179, 4, 0x0020, 0x10, 13, 1), + PIN_FIELD_BASE(180, 180, 4, 0x0020, 0x10, 10, 1), + PIN_FIELD_BASE(181, 183, 1, 0x0030, 0x10, 22, 1), + PIN_FIELD_BASE(184, 184, 1, 0x0030, 0x10, 12, 1), + PIN_FIELD_BASE(185, 185, 1, 0x0030, 0x10, 11, 1), + PIN_FIELD_BASE(186, 186, 1, 0x0030, 0x10, 13, 1), + PIN_FIELD_BASE(187, 187, 1, 0x0030, 0x10, 10, 1), + PIN_FIELD_BASE(188, 188, 1, 0x0030, 0x10, 14, 1), + PIN_FIELD_BASE(189, 189, 5, 0x0020, 0x10, 9, 1), + PIN_FIELD_BASE(190, 190, 5, 0x0020, 0x10, 18, 1), + PIN_FIELD_BASE(191, 192, 5, 0x0020, 0x10, 13, 1), + PIN_FIELD_BASE(193, 194, 5, 0x0020, 0x10, 10, 1), + PIN_FIELD_BASE(195, 195, 2, 0x0050, 0x10, 30, 1), + PIN_FIELD_BASE(196, 196, 2, 0x0050, 0x10, 25, 1), + PIN_FIELD_BASE(197, 197, 2, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(198, 199, 4, 0x0020, 0x10, 14, 1), + PIN_FIELD_BASE(200, 201, 6, 0x0040, 0x10, 6, 1), + PIN_FIELD_BASE(202, 202, 4, 0x0020, 0x10, 9, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_smt_range[] = { + PINS_FIELD_BASE(0, 9, 6, 0x00c0, 0x10, 3, 1), + PIN_FIELD_BASE(10, 11, 3, 0x00e0, 0x10, 0, 1), + PINS_FIELD_BASE(12, 15, 3, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(16, 16, 3, 0x00e0, 0x10, 3, 1), + PINS_FIELD_BASE(17, 20, 6, 0x00c0, 0x10, 11, 1), + PINS_FIELD_BASE(21, 24, 6, 0x00c0, 0x10, 7, 1), + PIN_FIELD_BASE(25, 25, 6, 0x00c0, 0x10, 12, 1), + PIN_FIELD_BASE(26, 26, 6, 0x00c0, 0x10, 8, 1), + PIN_FIELD_BASE(27, 27, 6, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(28, 29, 6, 0x00c0, 0x10, 9, 1), + PINS_FIELD_BASE(30, 32, 6, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(33, 33, 6, 0x00c0, 0x10, 5, 1), + PIN_FIELD_BASE(34, 34, 6, 0x00c0, 0x10, 4, 1), + PINS_FIELD_BASE(35, 41, 6, 0x00c0, 0x10, 13, 1), + PIN_FIELD_BASE(42, 43, 6, 0x00c0, 0x10, 1, 1), + PIN_FIELD_BASE(44, 44, 6, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(45, 45, 3, 0x00e0, 0x10, 8, 1), + PIN_FIELD_BASE(46, 46, 3, 0x00e0, 0x10, 13, 1), + PINS_FIELD_BASE(47, 50, 3, 0x00e0, 0x10, 14, 1), + PIN_FIELD_BASE(51, 51, 3, 0x00e0, 0x10, 5, 1), + PIN_FIELD_BASE(52, 52, 3, 0x00e0, 0x10, 10, 1), + PIN_FIELD_BASE(53, 54, 3, 0x00e0, 0x10, 15, 1), + PIN_FIELD_BASE(55, 55, 3, 0x00e0, 0x10, 7, 1), + PIN_FIELD_BASE(56, 56, 3, 0x00e0, 0x10, 12, 1), + PINS_FIELD_BASE(57, 60, 3, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(61, 61, 3, 0x00e0, 0x10, 6, 1), + PIN_FIELD_BASE(62, 62, 3, 0x00e0, 0x10, 11, 1), + PINS_FIELD_BASE(63, 66, 3, 0x00e0, 0x10, 9, 1), + PINS_FIELD_BASE(67, 69, 2, 0x00e0, 0x10, 11, 1), + PIN_FIELD_BASE(70, 71, 2, 0x00e0, 0x10, 10, 1), + PINS_FIELD_BASE(72, 75, 4, 0x0070, 0x10, 1, 1), + PINS_FIELD_BASE(76, 77, 4, 0x0070, 0x10, 4, 1), + PINS_FIELD_BASE(78, 86, 2, 0x00e0, 0x10, 1, 1), + PINS_FIELD_BASE(87, 92, 2, 0x00e0, 0x10, 2, 1), + PIN_FIELD_BASE(93, 93, 4, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(94, 94, 2, 0x00e0, 0x10, 2, 1), + PINS_FIELD_BASE(95, 98, 4, 0x0070, 0x10, 2, 1), + PINS_FIELD_BASE(99, 104, 2, 0x00e0, 0x10, 12, 1), + PINS_FIELD_BASE(105, 109, 2, 0x00e0, 0x10, 0, 1), + PIN_FIELD_BASE(110, 110, 2, 0x00e0, 0x10, 14, 1), + PIN_FIELD_BASE(111, 111, 2, 0x00e0, 0x10, 16, 1), + PIN_FIELD_BASE(112, 112, 2, 0x00e0, 0x10, 13, 1), + PIN_FIELD_BASE(113, 113, 2, 0x00e0, 0x10, 15, 1), + PINS_FIELD_BASE(114, 115, 2, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(116, 117, 2, 0x00e0, 0x10, 5, 1), + PINS_FIELD_BASE(118, 119, 2, 0x00e0, 0x10, 4, 1), + PIN_FIELD_BASE(120, 121, 2, 0x00e0, 0x10, 7, 1), + PINS_FIELD_BASE(122, 125, 2, 0x00e0, 0x10, 3, 1), + PINS_FIELD_BASE(126, 127, 1, 0x00c0, 0x10, 5, 1), + PINS_FIELD_BASE(128, 130, 1, 0x00c0, 0x10, 9, 1), + PINS_FIELD_BASE(131, 133, 1, 0x00c0, 0x10, 10, 1), + PIN_FIELD_BASE(134, 135, 1, 0x00c0, 0x10, 2, 1), + PINS_FIELD_BASE(136, 139, 1, 0x00c0, 0x10, 4, 1), + PIN_FIELD_BASE(140, 141, 1, 0x00c0, 0x10, 0, 1), + PIN_FIELD_BASE(142, 142, 1, 0x00c0, 0x10, 8, 1), + PINS_FIELD_BASE(143, 146, 5, 0x0060, 0x10, 1, 1), + PINS_FIELD_BASE(147, 155, 5, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(156, 157, 5, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(158, 158, 5, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(159, 159, 5, 0x0060, 0x10, 8, 1), + PIN_FIELD_BASE(160, 161, 5, 0x0060, 0x10, 3, 1), + PINS_FIELD_BASE(162, 166, 5, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(167, 167, 7, 0x0060, 0x10, 1, 1), + PINS_FIELD_BASE(168, 174, 7, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(175, 175, 7, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(176, 176, 7, 0x0060, 0x10, 0, 1), + PINS_FIELD_BASE(177, 178, 7, 0x0060, 0x10, 2, 1), + PINS_FIELD_BASE(179, 180, 4, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(181, 183, 1, 0x00c0, 0x10, 11, 1), + PINS_FIELD_BASE(184, 187, 1, 0x00c0, 0x10, 6, 1), + PIN_FIELD_BASE(188, 188, 1, 0x00c0, 0x10, 7, 1), + PINS_FIELD_BASE(189, 194, 5, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(195, 195, 2, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(196, 196, 2, 0x00e0, 0x10, 9, 1), + PIN_FIELD_BASE(197, 197, 2, 0x00e0, 0x10, 3, 1), + PIN_FIELD_BASE(198, 199, 4, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(200, 201, 6, 0x00c0, 0x10, 14, 1), + PIN_FIELD_BASE(202, 202, 4, 0x0070, 0x10, 3, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_pu_range[] = { + PIN_FIELD_BASE(0, 9, 6, 0x0070, 0x10, 3, 1), + PIN_FIELD_BASE(16, 16, 3, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(17, 18, 6, 0x0070, 0x10, 28, 1), + PIN_FIELD_BASE(19, 19, 6, 0x0070, 0x10, 27, 1), + PIN_FIELD_BASE(20, 20, 6, 0x0070, 0x10, 26, 1), + PIN_FIELD_BASE(21, 24, 6, 0x0070, 0x10, 19, 1), + PIN_FIELD_BASE(25, 25, 6, 0x0070, 0x10, 30, 1), + PIN_FIELD_BASE(26, 26, 6, 0x0070, 0x10, 23, 1), + PIN_FIELD_BASE(27, 27, 6, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(28, 29, 6, 0x0070, 0x10, 24, 1), + PIN_FIELD_BASE(30, 30, 6, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(31, 31, 6, 0x0070, 0x10, 13, 1), + PIN_FIELD_BASE(32, 32, 6, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(33, 33, 6, 0x0070, 0x10, 17, 1), + PIN_FIELD_BASE(34, 34, 6, 0x0070, 0x10, 14, 1), + PIN_FIELD_BASE(35, 35, 6, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(36, 36, 6, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(37, 37, 6, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(38, 41, 6, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(42, 43, 6, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(44, 44, 6, 0x0070, 0x10, 18, 1), + PIN_FIELD_BASE(45, 45, 3, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(46, 46, 3, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(47, 47, 3, 0x0080, 0x10, 15, 1), + PIN_FIELD_BASE(48, 48, 3, 0x0080, 0x10, 14, 1), + PIN_FIELD_BASE(49, 49, 3, 0x0080, 0x10, 16, 1), + PIN_FIELD_BASE(50, 50, 3, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(51, 51, 3, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(52, 52, 3, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(53, 54, 3, 0x0080, 0x10, 18, 1), + PIN_FIELD_BASE(55, 55, 3, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(56, 56, 3, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(61, 61, 3, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(62, 62, 3, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(63, 63, 3, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(64, 64, 3, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(65, 65, 3, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(66, 66, 3, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(67, 67, 2, 0x00a0, 0x10, 7, 1), + PIN_FIELD_BASE(68, 68, 2, 0x00a0, 0x10, 6, 1), + PIN_FIELD_BASE(69, 69, 2, 0x00a0, 0x10, 8, 1), + PIN_FIELD_BASE(70, 71, 2, 0x00a0, 0x10, 4, 1), + PIN_FIELD_BASE(72, 72, 4, 0x0040, 0x10, 3, 1), + PIN_FIELD_BASE(73, 73, 4, 0x0040, 0x10, 2, 1), + PIN_FIELD_BASE(74, 74, 4, 0x0040, 0x10, 1, 1), + PIN_FIELD_BASE(75, 75, 4, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(76, 76, 4, 0x0040, 0x10, 12, 1), + PIN_FIELD_BASE(77, 77, 4, 0x0040, 0x10, 11, 1), + PIN_FIELD_BASE(78, 78, 2, 0x0090, 0x10, 18, 1), + PIN_FIELD_BASE(79, 79, 2, 0x0090, 0x10, 17, 1), + PIN_FIELD_BASE(80, 81, 2, 0x0090, 0x10, 19, 1), + PIN_FIELD_BASE(82, 88, 2, 0x0090, 0x10, 1, 1), + PIN_FIELD_BASE(89, 89, 2, 0x0090, 0x10, 16, 1), + PIN_FIELD_BASE(90, 90, 2, 0x0090, 0x10, 15, 1), + PIN_FIELD_BASE(91, 91, 2, 0x0090, 0x10, 14, 1), + PIN_FIELD_BASE(92, 92, 2, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(93, 93, 4, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(94, 94, 2, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(95, 95, 4, 0x0040, 0x10, 7, 1), + PIN_FIELD_BASE(96, 96, 4, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(97, 97, 4, 0x0040, 0x10, 8, 1), + PIN_FIELD_BASE(98, 98, 4, 0x0040, 0x10, 6, 1), + PIN_FIELD_BASE(99, 99, 2, 0x00a0, 0x10, 9, 1), + PIN_FIELD_BASE(100, 100, 2, 0x00a0, 0x10, 12, 1), + PIN_FIELD_BASE(101, 101, 2, 0x00a0, 0x10, 10, 1), + PIN_FIELD_BASE(102, 102, 2, 0x00a0, 0x10, 13, 1), + PIN_FIELD_BASE(103, 103, 2, 0x00a0, 0x10, 11, 1), + PIN_FIELD_BASE(104, 104, 2, 0x00a0, 0x10, 14, 1), + PIN_FIELD_BASE(105, 105, 2, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(106, 106, 2, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(107, 108, 2, 0x0090, 0x10, 12, 1), + PIN_FIELD_BASE(109, 109, 2, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(110, 110, 2, 0x00a0, 0x10, 16, 1), + PIN_FIELD_BASE(111, 111, 2, 0x00a0, 0x10, 18, 1), + PIN_FIELD_BASE(112, 112, 2, 0x00a0, 0x10, 15, 1), + PIN_FIELD_BASE(113, 113, 2, 0x00a0, 0x10, 17, 1), + PIN_FIELD_BASE(114, 115, 2, 0x0090, 0x10, 26, 1), + PIN_FIELD_BASE(116, 117, 2, 0x0090, 0x10, 21, 1), + PIN_FIELD_BASE(118, 118, 2, 0x0090, 0x10, 31, 1), + PIN_FIELD_BASE(119, 119, 2, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(120, 121, 2, 0x0090, 0x10, 23, 1), + PIN_FIELD_BASE(122, 123, 2, 0x0090, 0x10, 28, 1), + PIN_FIELD_BASE(124, 125, 2, 0x00a0, 0x10, 1, 1), + PIN_FIELD_BASE(126, 127, 1, 0x0070, 0x10, 2, 1), + PIN_FIELD_BASE(140, 141, 1, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(142, 142, 1, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(143, 143, 5, 0x0040, 0x10, 15, 1), + PIN_FIELD_BASE(144, 144, 5, 0x0040, 0x10, 17, 1), + PIN_FIELD_BASE(145, 145, 5, 0x0040, 0x10, 16, 1), + PIN_FIELD_BASE(146, 146, 5, 0x0040, 0x10, 12, 1), + PIN_FIELD_BASE(147, 155, 5, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(156, 157, 5, 0x0040, 0x10, 22, 1), + PIN_FIELD_BASE(158, 158, 5, 0x0040, 0x10, 21, 1), + PIN_FIELD_BASE(159, 159, 5, 0x0040, 0x10, 24, 1), + PIN_FIELD_BASE(160, 161, 5, 0x0040, 0x10, 19, 1), + PIN_FIELD_BASE(162, 166, 5, 0x0040, 0x10, 25, 1), + PIN_FIELD_BASE(179, 179, 4, 0x0040, 0x10, 13, 1), + PIN_FIELD_BASE(180, 180, 4, 0x0040, 0x10, 10, 1), + PIN_FIELD_BASE(181, 183, 1, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(184, 184, 1, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(185, 185, 1, 0x0070, 0x10, 5, 1), + PIN_FIELD_BASE(186, 186, 1, 0x0070, 0x10, 7, 1), + PIN_FIELD_BASE(187, 187, 1, 0x0070, 0x10, 4, 1), + PIN_FIELD_BASE(188, 188, 1, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(189, 189, 5, 0x0040, 0x10, 9, 1), + PIN_FIELD_BASE(190, 190, 5, 0x0040, 0x10, 18, 1), + PIN_FIELD_BASE(191, 192, 5, 0x0040, 0x10, 13, 1), + PIN_FIELD_BASE(193, 194, 5, 0x0040, 0x10, 10, 1), + PIN_FIELD_BASE(195, 195, 2, 0x0090, 0x10, 30, 1), + PIN_FIELD_BASE(196, 196, 2, 0x0090, 0x10, 25, 1), + PIN_FIELD_BASE(197, 197, 2, 0x00a0, 0x10, 3, 1), + PIN_FIELD_BASE(198, 199, 4, 0x0040, 0x10, 14, 1), + PIN_FIELD_BASE(200, 201, 6, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(202, 202, 4, 0x0040, 0x10, 9, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_pd_range[] = { + PIN_FIELD_BASE(0, 9, 6, 0x0050, 0x10, 3, 1), + PIN_FIELD_BASE(16, 16, 3, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(17, 18, 6, 0x0050, 0x10, 28, 1), + PIN_FIELD_BASE(19, 19, 6, 0x0050, 0x10, 27, 1), + PIN_FIELD_BASE(20, 20, 6, 0x0050, 0x10, 26, 1), + PIN_FIELD_BASE(21, 24, 6, 0x0050, 0x10, 19, 1), + PIN_FIELD_BASE(25, 25, 6, 0x0050, 0x10, 30, 1), + PIN_FIELD_BASE(26, 26, 6, 0x0050, 0x10, 23, 1), + PIN_FIELD_BASE(27, 27, 6, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(28, 29, 6, 0x0050, 0x10, 24, 1), + PIN_FIELD_BASE(30, 30, 6, 0x0050, 0x10, 16, 1), + PIN_FIELD_BASE(31, 31, 6, 0x0050, 0x10, 13, 1), + PIN_FIELD_BASE(32, 32, 6, 0x0050, 0x10, 15, 1), + PIN_FIELD_BASE(33, 33, 6, 0x0050, 0x10, 17, 1), + PIN_FIELD_BASE(34, 34, 6, 0x0050, 0x10, 14, 1), + PIN_FIELD_BASE(35, 35, 6, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(36, 36, 6, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(37, 37, 6, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(38, 41, 6, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(42, 43, 6, 0x0050, 0x10, 1, 1), + PIN_FIELD_BASE(44, 44, 6, 0x0050, 0x10, 18, 1), + PIN_FIELD_BASE(45, 45, 3, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(46, 46, 3, 0x0060, 0x10, 12, 1), + PIN_FIELD_BASE(47, 47, 3, 0x0060, 0x10, 15, 1), + PIN_FIELD_BASE(48, 48, 3, 0x0060, 0x10, 14, 1), + PIN_FIELD_BASE(49, 49, 3, 0x0060, 0x10, 16, 1), + PIN_FIELD_BASE(50, 50, 3, 0x0060, 0x10, 13, 1), + PIN_FIELD_BASE(51, 51, 3, 0x0060, 0x10, 1, 1), + PIN_FIELD_BASE(52, 52, 3, 0x0060, 0x10, 9, 1), + PIN_FIELD_BASE(53, 54, 3, 0x0060, 0x10, 18, 1), + PIN_FIELD_BASE(55, 55, 3, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(56, 56, 3, 0x0060, 0x10, 11, 1), + PIN_FIELD_BASE(61, 61, 3, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(62, 62, 3, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(63, 63, 3, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(64, 64, 3, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(65, 65, 3, 0x0060, 0x10, 8, 1), + PIN_FIELD_BASE(66, 66, 3, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(67, 67, 2, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(68, 68, 2, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(69, 69, 2, 0x0080, 0x10, 8, 1), + PIN_FIELD_BASE(70, 71, 2, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(72, 72, 4, 0x0030, 0x10, 3, 1), + PIN_FIELD_BASE(73, 73, 4, 0x0030, 0x10, 2, 1), + PIN_FIELD_BASE(74, 74, 4, 0x0030, 0x10, 1, 1), + PIN_FIELD_BASE(75, 75, 4, 0x0030, 0x10, 4, 1), + PIN_FIELD_BASE(76, 76, 4, 0x0030, 0x10, 12, 1), + PIN_FIELD_BASE(77, 77, 4, 0x0030, 0x10, 11, 1), + PIN_FIELD_BASE(78, 78, 2, 0x0070, 0x10, 18, 1), + PIN_FIELD_BASE(79, 79, 2, 0x0070, 0x10, 17, 1), + PIN_FIELD_BASE(80, 81, 2, 0x0070, 0x10, 19, 1), + PIN_FIELD_BASE(82, 88, 2, 0x0070, 0x10, 1, 1), + PIN_FIELD_BASE(89, 89, 2, 0x0070, 0x10, 16, 1), + PIN_FIELD_BASE(90, 90, 2, 0x0070, 0x10, 15, 1), + PIN_FIELD_BASE(91, 91, 2, 0x0070, 0x10, 14, 1), + PIN_FIELD_BASE(92, 92, 2, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(93, 93, 4, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(94, 94, 2, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(95, 95, 4, 0x0030, 0x10, 7, 1), + PIN_FIELD_BASE(96, 96, 4, 0x0030, 0x10, 5, 1), + PIN_FIELD_BASE(97, 97, 4, 0x0030, 0x10, 8, 1), + PIN_FIELD_BASE(98, 98, 4, 0x0030, 0x10, 6, 1), + PIN_FIELD_BASE(99, 99, 2, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(100, 100, 2, 0x0080, 0x10, 12, 1), + PIN_FIELD_BASE(101, 101, 2, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(102, 102, 2, 0x0080, 0x10, 13, 1), + PIN_FIELD_BASE(103, 103, 2, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(104, 104, 2, 0x0080, 0x10, 14, 1), + PIN_FIELD_BASE(105, 105, 2, 0x0070, 0x10, 10, 1), + PIN_FIELD_BASE(106, 106, 2, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(107, 108, 2, 0x0070, 0x10, 12, 1), + PIN_FIELD_BASE(109, 109, 2, 0x0070, 0x10, 11, 1), + PIN_FIELD_BASE(110, 110, 2, 0x0080, 0x10, 16, 1), + PIN_FIELD_BASE(111, 111, 2, 0x0080, 0x10, 18, 1), + PIN_FIELD_BASE(112, 112, 2, 0x0080, 0x10, 15, 1), + PIN_FIELD_BASE(113, 113, 2, 0x0080, 0x10, 17, 1), + PIN_FIELD_BASE(114, 115, 2, 0x0070, 0x10, 26, 1), + PIN_FIELD_BASE(116, 117, 2, 0x0070, 0x10, 21, 1), + PIN_FIELD_BASE(118, 118, 2, 0x0070, 0x10, 31, 1), + PIN_FIELD_BASE(119, 119, 2, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(120, 121, 2, 0x0070, 0x10, 23, 1), + PIN_FIELD_BASE(122, 123, 2, 0x0070, 0x10, 28, 1), + PIN_FIELD_BASE(124, 125, 2, 0x0080, 0x10, 1, 1), + PIN_FIELD_BASE(126, 127, 1, 0x0050, 0x10, 2, 1), + PIN_FIELD_BASE(140, 141, 1, 0x0050, 0x10, 0, 1), + PIN_FIELD_BASE(142, 142, 1, 0x0050, 0x10, 9, 1), + PIN_FIELD_BASE(143, 143, 5, 0x0030, 0x10, 15, 1), + PIN_FIELD_BASE(144, 144, 5, 0x0030, 0x10, 17, 1), + PIN_FIELD_BASE(145, 145, 5, 0x0030, 0x10, 16, 1), + PIN_FIELD_BASE(146, 146, 5, 0x0030, 0x10, 12, 1), + PIN_FIELD_BASE(147, 155, 5, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(156, 157, 5, 0x0030, 0x10, 22, 1), + PIN_FIELD_BASE(158, 158, 5, 0x0030, 0x10, 21, 1), + PIN_FIELD_BASE(159, 159, 5, 0x0030, 0x10, 24, 1), + PIN_FIELD_BASE(160, 161, 5, 0x0030, 0x10, 19, 1), + PIN_FIELD_BASE(162, 166, 5, 0x0030, 0x10, 25, 1), + PIN_FIELD_BASE(179, 179, 4, 0x0030, 0x10, 13, 1), + PIN_FIELD_BASE(180, 180, 4, 0x0030, 0x10, 10, 1), + PIN_FIELD_BASE(181, 183, 1, 0x0050, 0x10, 10, 1), + PIN_FIELD_BASE(184, 184, 1, 0x0050, 0x10, 6, 1), + PIN_FIELD_BASE(185, 185, 1, 0x0050, 0x10, 5, 1), + PIN_FIELD_BASE(186, 186, 1, 0x0050, 0x10, 7, 1), + PIN_FIELD_BASE(187, 187, 1, 0x0050, 0x10, 4, 1), + PIN_FIELD_BASE(188, 188, 1, 0x0050, 0x10, 8, 1), + PIN_FIELD_BASE(189, 189, 5, 0x0030, 0x10, 9, 1), + PIN_FIELD_BASE(190, 190, 5, 0x0030, 0x10, 18, 1), + PIN_FIELD_BASE(191, 192, 5, 0x0030, 0x10, 13, 1), + PIN_FIELD_BASE(193, 194, 5, 0x0030, 0x10, 10, 1), + PIN_FIELD_BASE(195, 195, 2, 0x0070, 0x10, 30, 1), + PIN_FIELD_BASE(196, 196, 2, 0x0070, 0x10, 25, 1), + PIN_FIELD_BASE(197, 197, 2, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(198, 199, 4, 0x0030, 0x10, 14, 1), + PIN_FIELD_BASE(200, 201, 6, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(202, 202, 4, 0x0030, 0x10, 9, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_drv_range[] = { + PINS_FIELD_BASE(0, 9, 6, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(10, 16, 3, 0x0000, 0x10, 0, 3), + PINS_FIELD_BASE(17, 19, 6, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(20, 20, 6, 0x0010, 0x10, 6, 3), + PINS_FIELD_BASE(21, 24, 6, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(25, 25, 6, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(26, 26, 6, 0x0000, 0x10, 24, 3), + PIN_FIELD_BASE(27, 27, 6, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(28, 28, 6, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(29, 29, 6, 0x0010, 0x10, 0, 3), + PINS_FIELD_BASE(30, 32, 6, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(33, 33, 6, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(34, 34, 6, 0x0000, 0x10, 12, 3), + PINS_FIELD_BASE(35, 41, 6, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(42, 43, 6, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(44, 44, 6, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(45, 45, 3, 0x0010, 0x10, 12, 3), + PIN_FIELD_BASE(46, 46, 3, 0x0020, 0x10, 0, 3), + PINS_FIELD_BASE(47, 49, 3, 0x0020, 0x10, 3, 3), + PIN_FIELD_BASE(50, 50, 3, 0x0020, 0x10, 6, 3), + PIN_FIELD_BASE(51, 51, 3, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(52, 52, 3, 0x0010, 0x10, 21, 3), + PINS_FIELD_BASE(53, 54, 3, 0x0020, 0x10, 9, 3), + PIN_FIELD_BASE(55, 55, 3, 0x0010, 0x10, 9, 3), + PIN_FIELD_BASE(56, 56, 3, 0x0010, 0x10, 27, 3), + PIN_FIELD_BASE(57, 57, 3, 0x0010, 0x10, 0, 3), + PIN_FIELD_BASE(58, 58, 3, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(59, 60, 3, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(61, 61, 3, 0x0010, 0x10, 6, 3), + PIN_FIELD_BASE(62, 62, 3, 0x0010, 0x10, 24, 3), + PINS_FIELD_BASE(63, 65, 3, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(66, 66, 3, 0x0010, 0x10, 18, 3), + PINS_FIELD_BASE(67, 69, 2, 0x0010, 0x10, 3, 3), + PIN_FIELD_BASE(70, 71, 2, 0x0010, 0x10, 0, 3), + PINS_FIELD_BASE(72, 75, 4, 0x0000, 0x10, 0, 3), + PINS_FIELD_BASE(76, 77, 4, 0x0000, 0x10, 15, 3), + PINS_FIELD_BASE(78, 86, 2, 0x0000, 0x10, 3, 3), + PINS_FIELD_BASE(87, 92, 2, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(93, 93, 4, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(94, 94, 2, 0x0000, 0x10, 6, 3), + PINS_FIELD_BASE(95, 96, 4, 0x0000, 0x10, 6, 3), + PINS_FIELD_BASE(97, 98, 4, 0x0000, 0x10, 9, 3), + PINS_FIELD_BASE(99, 100, 2, 0x0010, 0x10, 6, 3), + PINS_FIELD_BASE(101, 102, 2, 0x0010, 0x10, 9, 3), + PINS_FIELD_BASE(103, 104, 2, 0x0010, 0x10, 12, 3), + PINS_FIELD_BASE(105, 109, 2, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(110, 110, 2, 0x0010, 0x10, 18, 3), + PIN_FIELD_BASE(111, 111, 2, 0x0010, 0x10, 24, 3), + PIN_FIELD_BASE(112, 112, 2, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(113, 113, 2, 0x0010, 0x10, 21, 3), + PINS_FIELD_BASE(114, 115, 2, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(116, 117, 2, 0x0000, 0x10, 15, 3), + PINS_FIELD_BASE(118, 119, 2, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(120, 121, 2, 0x0000, 0x10, 21, 3), + PINS_FIELD_BASE(122, 125, 2, 0x0000, 0x10, 9, 3), + PINS_FIELD_BASE(126, 127, 1, 0x0000, 0x10, 12, 3), + PIN_FIELD_BASE(128, 128, 1, 0x0000, 0x10, 29, 2), + PIN_FIELD_BASE(129, 129, 1, 0x0010, 0x10, 0, 2), + PIN_FIELD_BASE(130, 130, 1, 0x0000, 0x10, 27, 2), + PIN_FIELD_BASE(131, 131, 1, 0x0010, 0x10, 2, 2), + PIN_FIELD_BASE(132, 132, 1, 0x0010, 0x10, 6, 2), + PIN_FIELD_BASE(133, 133, 1, 0x0010, 0x10, 4, 2), + PIN_FIELD_BASE(134, 135, 1, 0x0000, 0x10, 3, 3), + PINS_FIELD_BASE(136, 139, 1, 0x0000, 0x10, 9, 3), + PINS_FIELD_BASE(140, 141, 1, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(142, 142, 1, 0x0000, 0x10, 24, 3), + PINS_FIELD_BASE(143, 146, 5, 0x0000, 0x10, 3, 3), + PINS_FIELD_BASE(147, 155, 5, 0x0000, 0x10, 0, 3), + PIN_FIELD_BASE(156, 157, 5, 0x0000, 0x10, 21, 3), + PIN_FIELD_BASE(158, 158, 5, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(159, 159, 5, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(160, 161, 5, 0x0000, 0x10, 9, 3), + PINS_FIELD_BASE(162, 166, 5, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(167, 167, 7, 0x0000, 0x10, 3, 3), + PINS_FIELD_BASE(168, 174, 7, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(175, 175, 7, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(176, 176, 7, 0x0000, 0x10, 0, 3), + PINS_FIELD_BASE(177, 178, 7, 0x0000, 0x10, 6, 3), + PIN_FIELD_BASE(179, 180, 4, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(181, 183, 1, 0x0010, 0x10, 8, 3), + PINS_FIELD_BASE(184, 186, 1, 0x0000, 0x10, 15, 3), + PIN_FIELD_BASE(187, 188, 1, 0x0000, 0x10, 18, 3), + PIN_FIELD_BASE(189, 189, 5, 0x0000, 0x10, 6, 3), + PINS_FIELD_BASE(190, 194, 5, 0x0000, 0x10, 3, 3), + PIN_FIELD_BASE(195, 195, 2, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(196, 196, 2, 0x0000, 0x10, 27, 3), + PIN_FIELD_BASE(197, 197, 2, 0x0000, 0x10, 9, 3), + PIN_FIELD_BASE(198, 199, 4, 0x0000, 0x10, 21, 3), + PINS_FIELD_BASE(200, 201, 6, 0x0010, 0x10, 15, 3), + PIN_FIELD_BASE(202, 202, 4, 0x0000, 0x10, 12, 3), +}; + +static const struct mtk_pin_field_calc mt6779_pin_pupd_range[] = { + PIN_FIELD_BASE(10, 15, 3, 0x0070, 0x10, 0, 1), + PIN_FIELD_BASE(57, 57, 3, 0x0070, 0x10, 9, 1), + PIN_FIELD_BASE(58, 58, 3, 0x0070, 0x10, 8, 1), + PIN_FIELD_BASE(59, 60, 3, 0x0070, 0x10, 6, 1), + PIN_FIELD_BASE(128, 129, 1, 0x0060, 0x10, 7, 1), + PIN_FIELD_BASE(130, 130, 1, 0x0060, 0x10, 6, 1), + PIN_FIELD_BASE(131, 131, 1, 0x0060, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 1, 0x0060, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 1, 0x0060, 0x10, 10, 1), + PIN_FIELD_BASE(134, 135, 1, 0x0060, 0x10, 0, 1), + PIN_FIELD_BASE(136, 136, 1, 0x0060, 0x10, 5, 1), + PIN_FIELD_BASE(137, 137, 1, 0x0060, 0x10, 2, 1), + PIN_FIELD_BASE(138, 138, 1, 0x0060, 0x10, 4, 1), + PIN_FIELD_BASE(139, 139, 1, 0x0060, 0x10, 3, 1), + PIN_FIELD_BASE(167, 168, 7, 0x0020, 0x10, 1, 1), + PIN_FIELD_BASE(169, 169, 7, 0x0020, 0x10, 4, 1), + PIN_FIELD_BASE(170, 170, 7, 0x0020, 0x10, 6, 1), + PIN_FIELD_BASE(171, 171, 7, 0x0020, 0x10, 8, 1), + PIN_FIELD_BASE(172, 172, 7, 0x0020, 0x10, 3, 1), + PIN_FIELD_BASE(173, 173, 7, 0x0020, 0x10, 7, 1), + PIN_FIELD_BASE(174, 175, 7, 0x0020, 0x10, 9, 1), + PIN_FIELD_BASE(176, 176, 7, 0x0020, 0x10, 0, 1), + PIN_FIELD_BASE(177, 177, 7, 0x0020, 0x10, 5, 1), + PIN_FIELD_BASE(178, 178, 7, 0x0020, 0x10, 11, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_r0_range[] = { + PIN_FIELD_BASE(10, 15, 3, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(57, 57, 3, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(58, 58, 3, 0x0090, 0x10, 8, 1), + PIN_FIELD_BASE(59, 60, 3, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(128, 129, 1, 0x0080, 0x10, 7, 1), + PIN_FIELD_BASE(130, 130, 1, 0x0080, 0x10, 6, 1), + PIN_FIELD_BASE(131, 131, 1, 0x0080, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 1, 0x0080, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 1, 0x0080, 0x10, 10, 1), + PIN_FIELD_BASE(134, 135, 1, 0x0080, 0x10, 0, 1), + PIN_FIELD_BASE(136, 136, 1, 0x0080, 0x10, 5, 1), + PIN_FIELD_BASE(137, 137, 1, 0x0080, 0x10, 2, 1), + PIN_FIELD_BASE(138, 138, 1, 0x0080, 0x10, 4, 1), + PIN_FIELD_BASE(139, 139, 1, 0x0080, 0x10, 3, 1), + PIN_FIELD_BASE(167, 168, 7, 0x0030, 0x10, 1, 1), + PIN_FIELD_BASE(169, 169, 7, 0x0030, 0x10, 4, 1), + PIN_FIELD_BASE(170, 170, 7, 0x0030, 0x10, 6, 1), + PIN_FIELD_BASE(171, 171, 7, 0x0030, 0x10, 8, 1), + PIN_FIELD_BASE(172, 172, 7, 0x0030, 0x10, 3, 1), + PIN_FIELD_BASE(173, 173, 7, 0x0030, 0x10, 7, 1), + PIN_FIELD_BASE(174, 175, 7, 0x0030, 0x10, 9, 1), + PIN_FIELD_BASE(176, 176, 7, 0x0030, 0x10, 0, 1), + PIN_FIELD_BASE(177, 177, 7, 0x0030, 0x10, 5, 1), + PIN_FIELD_BASE(178, 178, 7, 0x0030, 0x10, 11, 1), +}; + +static const struct mtk_pin_field_calc mt6779_pin_r1_range[] = { + PIN_FIELD_BASE(10, 15, 3, 0x00a0, 0x10, 0, 1), + PIN_FIELD_BASE(57, 57, 3, 0x00a0, 0x10, 9, 1), + PIN_FIELD_BASE(58, 58, 3, 0x00a0, 0x10, 8, 1), + PIN_FIELD_BASE(59, 60, 3, 0x00a0, 0x10, 6, 1), + PIN_FIELD_BASE(128, 129, 1, 0x0090, 0x10, 7, 1), + PIN_FIELD_BASE(130, 130, 1, 0x0090, 0x10, 6, 1), + PIN_FIELD_BASE(131, 131, 1, 0x0090, 0x10, 9, 1), + PIN_FIELD_BASE(132, 132, 1, 0x0090, 0x10, 11, 1), + PIN_FIELD_BASE(133, 133, 1, 0x0090, 0x10, 10, 1), + PIN_FIELD_BASE(134, 135, 1, 0x0090, 0x10, 0, 1), + PIN_FIELD_BASE(136, 136, 1, 0x0090, 0x10, 5, 1), + PIN_FIELD_BASE(137, 137, 1, 0x0090, 0x10, 2, 1), + PIN_FIELD_BASE(138, 138, 1, 0x0090, 0x10, 4, 1), + PIN_FIELD_BASE(139, 139, 1, 0x0090, 0x10, 3, 1), + PIN_FIELD_BASE(167, 168, 7, 0x0040, 0x10, 1, 1), + PIN_FIELD_BASE(169, 169, 7, 0x0040, 0x10, 4, 1), + PIN_FIELD_BASE(170, 170, 7, 0x0040, 0x10, 6, 1), + PIN_FIELD_BASE(171, 171, 7, 0x0040, 0x10, 8, 1), + PIN_FIELD_BASE(172, 172, 7, 0x0040, 0x10, 3, 1), + PIN_FIELD_BASE(173, 173, 7, 0x0040, 0x10, 7, 1), + PIN_FIELD_BASE(174, 175, 7, 0x0040, 0x10, 9, 1), + PIN_FIELD_BASE(176, 176, 7, 0x0040, 0x10, 0, 1), + PIN_FIELD_BASE(177, 177, 7, 0x0040, 0x10, 5, 1), + PIN_FIELD_BASE(178, 178, 7, 0x0040, 0x10, 11, 1), +}; + +static const struct mtk_pin_reg_calc mt6779_reg_cals[PINCTRL_PIN_REG_MAX] = { + [PINCTRL_PIN_REG_MODE] = MTK_RANGE(mt6779_pin_mode_range), + [PINCTRL_PIN_REG_DIR] = MTK_RANGE(mt6779_pin_dir_range), + [PINCTRL_PIN_REG_DI] = MTK_RANGE(mt6779_pin_di_range), + [PINCTRL_PIN_REG_DO] = MTK_RANGE(mt6779_pin_do_range), + [PINCTRL_PIN_REG_SMT] = MTK_RANGE(mt6779_pin_smt_range), + [PINCTRL_PIN_REG_IES] = MTK_RANGE(mt6779_pin_ies_range), + [PINCTRL_PIN_REG_PU] = MTK_RANGE(mt6779_pin_pu_range), + [PINCTRL_PIN_REG_PD] = MTK_RANGE(mt6779_pin_pd_range), + [PINCTRL_PIN_REG_DRV] = MTK_RANGE(mt6779_pin_drv_range), + [PINCTRL_PIN_REG_PUPD] = MTK_RANGE(mt6779_pin_pupd_range), + [PINCTRL_PIN_REG_R0] = MTK_RANGE(mt6779_pin_r0_range), + [PINCTRL_PIN_REG_R1] = MTK_RANGE(mt6779_pin_r1_range), +}; + +static const char * const mt6779_pinctrl_register_base_names[] = { + "gpio", "iocfg_rm", "iocfg_br", "iocfg_lm", "iocfg_lb", + "iocfg_rt", "iocfg_lt", "iocfg_tl", +}; + +static const struct mtk_eint_hw mt6779_eint_hw = { + .port_mask = 7, + .ports = 6, + .ap_num = 195, + .db_cnt = 13, +}; + +static const struct mtk_pin_soc mt6779_data = { + .reg_cal = mt6779_reg_cals, + .pins = mtk_pins_mt6779, + .npins = ARRAY_SIZE(mtk_pins_mt6779), + .ngrps = ARRAY_SIZE(mtk_pins_mt6779), + .eint_hw = &mt6779_eint_hw, + .gpio_m = 0, + .ies_present = true, + .base_names = mt6779_pinctrl_register_base_names, + .nbase_names = ARRAY_SIZE(mt6779_pinctrl_register_base_names), + .bias_set_combo = mtk_pinconf_bias_set_combo, + .bias_get_combo = mtk_pinconf_bias_get_combo, + .drive_set = mtk_pinconf_drive_set_raw, + .drive_get = mtk_pinconf_drive_get_raw, + .adv_pull_get = mtk_pinconf_adv_pull_get, + .adv_pull_set = mtk_pinconf_adv_pull_set, +}; + +static const struct of_device_id mt6779_pinctrl_of_match[] = { + { .compatible = "mediatek,mt6779-pinctrl", }, + { } +}; + +static int mt6779_pinctrl_probe(struct platform_device *pdev) +{ + return mtk_paris_pinctrl_probe(pdev, &mt6779_data); +} + +static struct platform_driver mt6779_pinctrl_driver = { + .driver = { + .name = "mt6779-pinctrl", + .of_match_table = mt6779_pinctrl_of_match, + }, + .probe = mt6779_pinctrl_probe, +}; + +static int __init mt6779_pinctrl_init(void) +{ + return platform_driver_register(&mt6779_pinctrl_driver); +} +arch_initcall(mt6779_pinctrl_init); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("MediaTek MT6779 Pinctrl Driver"); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c index b77b18fe5adc..2f3dfb56c3fa 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c @@ -243,6 +243,29 @@ static int mtk_xt_find_eint_num(struct mtk_pinctrl *hw, unsigned long eint_n) return EINT_NA; } +/* + * Virtual GPIO only used inside SOC and not being exported to outside SOC. + * Some modules use virtual GPIO as eint (e.g. pmif or usb). + * In MTK platform, external interrupt (EINT) and GPIO is 1-1 mapping + * and we can set GPIO as eint. + * But some modules use specific eint which doesn't have real GPIO pin. + * So we use virtual GPIO to map it. + */ + +bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n) +{ + const struct mtk_pin_desc *desc; + bool virt_gpio = false; + + desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n]; + + if (desc->funcs && !desc->funcs[desc->eint.eint_m].name) + virt_gpio = true; + + return virt_gpio; +} +EXPORT_SYMBOL_GPL(mtk_is_virt_gpio); + static int mtk_xt_get_gpio_n(void *data, unsigned long eint_n, unsigned int *gpio_n, struct gpio_chip **gpio_chip) @@ -295,6 +318,9 @@ static int mtk_xt_set_gpio_as_eint(void *data, unsigned long eint_n) if (err) return err; + if (mtk_is_virt_gpio(hw, gpio_n)) + return 0; + desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n]; err = mtk_hw_set_value(hw, desc, PINCTRL_PIN_REG_MODE, diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h index 27df08736396..e2aae285b5fc 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.h @@ -80,7 +80,7 @@ enum { DRV_GRP_MAX, }; -static const char * const mtk_default_register_base_names[] = { +static const char * const mtk_default_register_base_names[] __maybe_unused = { "base", }; @@ -315,4 +315,5 @@ int mtk_pinconf_adv_drive_set(struct mtk_pinctrl *hw, int mtk_pinconf_adv_drive_get(struct mtk_pinctrl *hw, const struct mtk_pin_desc *desc, u32 *val); +bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n); #endif /* __PINCTRL_MTK_COMMON_V2_H */ diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-mt6779.h b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6779.h new file mode 100644 index 000000000000..0a48d6686ebb --- /dev/null +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-mt6779.h @@ -0,0 +1,2085 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 MediaTek Inc. + * Author: Andy Teng <andy.teng@mediatek.com> + * + */ + +#ifndef __PINCTRL_MTK_MT6779_H +#define __PINCTRL_MTK_MT6779_H + +#include "pinctrl-paris.h" + +static const struct mtk_pin_desc mtk_pins_mt6779[] = { + MTK_PIN( + 0, "GPIO0", + MTK_EINT_FUNCTION(0, 0), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO0"), + MTK_FUNCTION(1, "SPI6_MI"), + MTK_FUNCTION(2, "I2S5_LRCK"), + MTK_FUNCTION(3, "TDM_LRCK_2ND"), + MTK_FUNCTION(4, "PCM1_SYNC"), + MTK_FUNCTION(5, "SCL_6306"), + MTK_FUNCTION(6, "TP_GPIO0_AO"), + MTK_FUNCTION(7, "PTA_RXD") + ), + MTK_PIN( + 1, "GPIO1", + MTK_EINT_FUNCTION(0, 1), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO1"), + MTK_FUNCTION(1, "SPI6_CSB"), + MTK_FUNCTION(2, "I2S5_DO"), + MTK_FUNCTION(3, "TDM_DATA0_2ND"), + MTK_FUNCTION(4, "PCM1_DO0"), + MTK_FUNCTION(5, "SDA_6306"), + MTK_FUNCTION(6, "TP_GPIO1_AO"), + MTK_FUNCTION(7, "PTA_TXD") + ), + MTK_PIN( + 2, "GPIO2", + MTK_EINT_FUNCTION(0, 2), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO2"), + MTK_FUNCTION(1, "SPI6_MO"), + MTK_FUNCTION(2, "I2S5_BCK"), + MTK_FUNCTION(3, "TDM_BCK_2ND"), + MTK_FUNCTION(4, "PCM1_CLK"), + MTK_FUNCTION(5, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(6, "TP_GPIO2_AO") + ), + MTK_PIN( + 3, "GPIO3", + MTK_EINT_FUNCTION(0, 3), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO3"), + MTK_FUNCTION(1, "SPI6_CLK"), + MTK_FUNCTION(2, "I2S5_MCK"), + MTK_FUNCTION(3, "TDM_MCK_2ND"), + MTK_FUNCTION(4, "EXT_FRAME_SYNC"), + MTK_FUNCTION(5, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(6, "TP_GPIO3_AO") + ), + MTK_PIN( + 4, "GPIO4", + MTK_EINT_FUNCTION(0, 4), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO4"), + MTK_FUNCTION(1, "SPI7_MI"), + MTK_FUNCTION(2, "I2S0_MCK"), + MTK_FUNCTION(3, "TDM_DATA1_2ND"), + MTK_FUNCTION(4, "PCM1_DO1"), + MTK_FUNCTION(5, "DMIC1_CLK"), + MTK_FUNCTION(6, "TP_GPIO4_AO"), + MTK_FUNCTION(7, "SCL8") + ), + MTK_PIN( + 5, "GPIO5", + MTK_EINT_FUNCTION(0, 5), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO5"), + MTK_FUNCTION(1, "SPI7_CSB"), + MTK_FUNCTION(2, "I2S0_BCK"), + MTK_FUNCTION(3, "TDM_DATA2_2ND"), + MTK_FUNCTION(4, "PCM1_DO2"), + MTK_FUNCTION(5, "DMIC1_DAT"), + MTK_FUNCTION(6, "TP_GPIO5_AO"), + MTK_FUNCTION(7, "SDA8") + ), + MTK_PIN( + 6, "GPIO6", + MTK_EINT_FUNCTION(0, 6), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO6"), + MTK_FUNCTION(1, "SPI7_MO"), + MTK_FUNCTION(2, "I2S0_LRCK"), + MTK_FUNCTION(3, "TDM_DATA3_2ND"), + MTK_FUNCTION(4, "PCM1_DI"), + MTK_FUNCTION(5, "DMIC_CLK"), + MTK_FUNCTION(6, "TP_GPIO6_AO"), + MTK_FUNCTION(7, "SCL9") + ), + MTK_PIN( + 7, "GPIO7", + MTK_EINT_FUNCTION(0, 7), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO7"), + MTK_FUNCTION(1, "SPI7_CLK"), + MTK_FUNCTION(2, "I2S0_DI"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(4, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(5, "DMIC_DAT"), + MTK_FUNCTION(6, "TP_GPIO7_AO"), + MTK_FUNCTION(7, "SDA9") + ), + MTK_PIN( + 8, "GPIO8", + MTK_EINT_FUNCTION(0, 8), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO8"), + MTK_FUNCTION(1, "PWM_0"), + MTK_FUNCTION(2, "I2S2_DI2"), + MTK_FUNCTION(3, "SRCLKENAI0"), + MTK_FUNCTION(4, "URXD1"), + MTK_FUNCTION(5, "I2S0_MCK"), + MTK_FUNCTION(6, "CONN_MCU_DBGACK_N"), + MTK_FUNCTION(7, "IDDIG") + ), + MTK_PIN( + 9, "GPIO9", + MTK_EINT_FUNCTION(0, 9), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO9"), + MTK_FUNCTION(1, "PWM_3"), + MTK_FUNCTION(2, "MD_INT0"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(4, "UTXD1"), + MTK_FUNCTION(5, "I2S0_BCK"), + MTK_FUNCTION(6, "CONN_MCU_TRST_B"), + MTK_FUNCTION(7, "USB_DRVVBUS") + ), + MTK_PIN( + 10, "GPIO10", + MTK_EINT_FUNCTION(0, 10), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO10"), + MTK_FUNCTION(1, "MSDC1_CLK_A"), + MTK_FUNCTION(2, "TP_URXD1_AO"), + MTK_FUNCTION(3, "I2S1_LRCK"), + MTK_FUNCTION(4, "UCTS0"), + MTK_FUNCTION(5, "DMIC1_CLK"), + MTK_FUNCTION(6, "KPCOL2"), + MTK_FUNCTION(7, "SCL8") + ), + MTK_PIN( + 11, "GPIO11", + MTK_EINT_FUNCTION(0, 11), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO11"), + MTK_FUNCTION(1, "MSDC1_CMD_A"), + MTK_FUNCTION(2, "TP_UTXD1_AO"), + MTK_FUNCTION(3, "I2S1_DO"), + MTK_FUNCTION(4, "URTS0"), + MTK_FUNCTION(5, "DMIC1_DAT"), + MTK_FUNCTION(6, "KPROW2"), + MTK_FUNCTION(7, "SDA8") + ), + MTK_PIN( + 12, "GPIO12", + MTK_EINT_FUNCTION(0, 12), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO12"), + MTK_FUNCTION(1, "MSDC1_DAT3_A"), + MTK_FUNCTION(2, "TP_URXD2_AO"), + MTK_FUNCTION(3, "I2S1_MCK"), + MTK_FUNCTION(4, "UCTS1"), + MTK_FUNCTION(5, "DMIC_CLK"), + MTK_FUNCTION(6, "ANT_SEL9"), + MTK_FUNCTION(7, "SCL9") + ), + MTK_PIN( + 13, "GPIO13", + MTK_EINT_FUNCTION(0, 13), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO13"), + MTK_FUNCTION(1, "MSDC1_DAT0_A"), + MTK_FUNCTION(2, "TP_UTXD2_AO"), + MTK_FUNCTION(3, "I2S1_BCK"), + MTK_FUNCTION(4, "URTS1"), + MTK_FUNCTION(5, "DMIC_DAT"), + MTK_FUNCTION(6, "ANT_SEL10"), + MTK_FUNCTION(7, "SDA9") + ), + MTK_PIN( + 14, "GPIO14", + MTK_EINT_FUNCTION(0, 14), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO14"), + MTK_FUNCTION(1, "MSDC1_DAT2_A"), + MTK_FUNCTION(2, "PWM_3"), + MTK_FUNCTION(3, "IDDIG"), + MTK_FUNCTION(4, "MD_INT0"), + MTK_FUNCTION(5, "PTA_RXD"), + MTK_FUNCTION(6, "ANT_SEL11") + ), + MTK_PIN( + 15, "GPIO15", + MTK_EINT_FUNCTION(0, 15), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO15"), + MTK_FUNCTION(1, "MSDC1_DAT1_A"), + MTK_FUNCTION(2, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(3, "USB_DRVVBUS"), + MTK_FUNCTION(4, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(5, "PTA_TXD"), + MTK_FUNCTION(6, "ANT_SEL12") + ), + MTK_PIN( + 16, "GPIO16", + MTK_EINT_FUNCTION(0, 16), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO16"), + MTK_FUNCTION(1, "SRCLKENAI0"), + MTK_FUNCTION(2, "EXT_FRAME_SYNC"), + MTK_FUNCTION(3, "MFG_EJTAG_TRSTN"), + MTK_FUNCTION(4, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(5, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(6, "PWM_2"), + MTK_FUNCTION(7, "JTRSTN_SEL1") + ), + MTK_PIN( + 17, "GPIO17", + MTK_EINT_FUNCTION(0, 17), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO17"), + MTK_FUNCTION(1, "SPI0_A_MI"), + MTK_FUNCTION(2, "SCP_SPI0_MI"), + MTK_FUNCTION(3, "MFG_EJTAG_TDO"), + MTK_FUNCTION(4, "DPI_HSYNC"), + MTK_FUNCTION(5, "MFG_DFD_JTAG_TDO"), + MTK_FUNCTION(6, "DFD_TDO"), + MTK_FUNCTION(7, "JTDO_SEL1") + ), + MTK_PIN( + 18, "GPIO18", + MTK_EINT_FUNCTION(0, 18), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO18"), + MTK_FUNCTION(1, "SPI0_A_MO"), + MTK_FUNCTION(2, "SCP_SPI0_MO"), + MTK_FUNCTION(3, "MFG_EJTAG_TDI"), + MTK_FUNCTION(4, "DPI_VSYNC"), + MTK_FUNCTION(5, "MFG_DFD_JTAG_TDI"), + MTK_FUNCTION(6, "DFD_TDI"), + MTK_FUNCTION(7, "JTDI_SEL1") + ), + MTK_PIN( + 19, "GPIO19", + MTK_EINT_FUNCTION(0, 19), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO19"), + MTK_FUNCTION(1, "SPI0_A_CSB"), + MTK_FUNCTION(2, "SCP_SPI0_CS"), + MTK_FUNCTION(3, "MFG_EJTAG_TMS"), + MTK_FUNCTION(4, "DPI_DE"), + MTK_FUNCTION(5, "MFG_DFD_JTAG_TMS"), + MTK_FUNCTION(6, "DFD_TMS"), + MTK_FUNCTION(7, "JTMS_SEL1") + ), + MTK_PIN( + 20, "GPIO20", + MTK_EINT_FUNCTION(0, 20), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO20"), + MTK_FUNCTION(1, "SPI0_A_CLK"), + MTK_FUNCTION(2, "SCP_SPI0_CK"), + MTK_FUNCTION(3, "MFG_EJTAG_TCK"), + MTK_FUNCTION(4, "DPI_CK"), + MTK_FUNCTION(5, "MFG_DFD_JTAG_TCK"), + MTK_FUNCTION(6, "DFD_TCK_XI"), + MTK_FUNCTION(7, "JTCK_SEL1") + ), + MTK_PIN( + 21, "GPIO21", + MTK_EINT_FUNCTION(0, 21), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO21"), + MTK_FUNCTION(1, "PWM_0"), + MTK_FUNCTION(2, "CMFLASH0"), + MTK_FUNCTION(3, "CMVREF2"), + MTK_FUNCTION(4, "CLKM0"), + MTK_FUNCTION(5, "ANT_SEL9"), + MTK_FUNCTION(6, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(7, "DBG_MON_A27") + ), + MTK_PIN( + 22, "GPIO22", + MTK_EINT_FUNCTION(0, 22), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO22"), + MTK_FUNCTION(1, "PWM_1"), + MTK_FUNCTION(2, "CMFLASH1"), + MTK_FUNCTION(3, "CMVREF3"), + MTK_FUNCTION(4, "CLKM1"), + MTK_FUNCTION(5, "ANT_SEL10"), + MTK_FUNCTION(7, "DBG_MON_A28") + ), + MTK_PIN( + 23, "GPIO23", + MTK_EINT_FUNCTION(0, 23), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO23"), + MTK_FUNCTION(1, "PWM_2"), + MTK_FUNCTION(2, "CMFLASH2"), + MTK_FUNCTION(3, "CMVREF0"), + MTK_FUNCTION(4, "CLKM2"), + MTK_FUNCTION(5, "ANT_SEL11"), + MTK_FUNCTION(7, "DBG_MON_A29") + ), + MTK_PIN( + 24, "GPIO24", + MTK_EINT_FUNCTION(0, 24), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO24"), + MTK_FUNCTION(1, "PWM_0"), + MTK_FUNCTION(2, "CMFLASH3"), + MTK_FUNCTION(3, "CMVREF1"), + MTK_FUNCTION(4, "CLKM3"), + MTK_FUNCTION(5, "ANT_SEL12"), + MTK_FUNCTION(7, "DBG_MON_A30") + ), + MTK_PIN( + 25, "GPIO25", + MTK_EINT_FUNCTION(0, 25), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO25"), + MTK_FUNCTION(1, "SRCLKENAI0"), + MTK_FUNCTION(2, "UCTS0"), + MTK_FUNCTION(3, "SCL8"), + MTK_FUNCTION(4, "CMVREF4"), + MTK_FUNCTION(5, "I2S0_LRCK"), + MTK_FUNCTION(6, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(7, "DBG_MON_A31") + ), + MTK_PIN( + 26, "GPIO26", + MTK_EINT_FUNCTION(0, 26), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO26"), + MTK_FUNCTION(1, "PWM_0"), + MTK_FUNCTION(2, "URTS0"), + MTK_FUNCTION(3, "SDA8"), + MTK_FUNCTION(4, "CLKM0"), + MTK_FUNCTION(5, "I2S0_DI"), + MTK_FUNCTION(6, "AGPS_SYNC"), + MTK_FUNCTION(7, "DBG_MON_A32") + ), + MTK_PIN( + 27, "GPIO27", + MTK_EINT_FUNCTION(0, 27), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO27"), + MTK_FUNCTION(1, "AP_GOOD") + ), + MTK_PIN( + 28, "GPIO28", + MTK_EINT_FUNCTION(0, 28), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO28"), + MTK_FUNCTION(1, "SCL5") + ), + MTK_PIN( + 29, "GPIO29", + MTK_EINT_FUNCTION(0, 29), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO29"), + MTK_FUNCTION(1, "SDA5") + ), + MTK_PIN( + 30, "GPIO30", + MTK_EINT_FUNCTION(0, 30), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO30"), + MTK_FUNCTION(1, "I2S1_MCK"), + MTK_FUNCTION(2, "I2S3_MCK"), + MTK_FUNCTION(3, "I2S2_MCK"), + MTK_FUNCTION(4, "DPI_D0"), + MTK_FUNCTION(5, "SPI4_MI"), + MTK_FUNCTION(6, "CONN_MCU_DBGI_N") + ), + MTK_PIN( + 31, "GPIO31", + MTK_EINT_FUNCTION(0, 31), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO31"), + MTK_FUNCTION(1, "I2S1_BCK"), + MTK_FUNCTION(2, "I2S3_BCK"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(4, "DPI_D1"), + MTK_FUNCTION(5, "SPI4_CSB"), + MTK_FUNCTION(6, "CONN_MCU_TDO") + ), + MTK_PIN( + 32, "GPIO32", + MTK_EINT_FUNCTION(0, 32), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO32"), + MTK_FUNCTION(1, "I2S1_LRCK"), + MTK_FUNCTION(2, "I2S3_LRCK"), + MTK_FUNCTION(3, "I2S2_LRCK"), + MTK_FUNCTION(4, "DPI_D2"), + MTK_FUNCTION(5, "SPI4_MO"), + MTK_FUNCTION(6, "CONN_MCU_TDI") + ), + MTK_PIN( + 33, "GPIO33", + MTK_EINT_FUNCTION(0, 33), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO33"), + MTK_FUNCTION(1, "I2S2_DI"), + MTK_FUNCTION(2, "I2S0_DI"), + MTK_FUNCTION(3, "I2S5_DO"), + MTK_FUNCTION(4, "DPI_D3"), + MTK_FUNCTION(5, "SPI4_CLK"), + MTK_FUNCTION(6, "CONN_MCU_TMS") + ), + MTK_PIN( + 34, "GPIO34", + MTK_EINT_FUNCTION(0, 34), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO34"), + MTK_FUNCTION(1, "I2S1_DO"), + MTK_FUNCTION(2, "I2S3_DO"), + MTK_FUNCTION(3, "I2S2_DI2"), + MTK_FUNCTION(4, "DPI_D4"), + MTK_FUNCTION(5, "AGPS_SYNC"), + MTK_FUNCTION(6, "CONN_MCU_TCK") + ), + MTK_PIN( + 35, "GPIO35", + MTK_EINT_FUNCTION(0, 35), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO35"), + MTK_FUNCTION(1, "TDM_LRCK"), + MTK_FUNCTION(2, "I2S1_LRCK"), + MTK_FUNCTION(3, "I2S5_LRCK"), + MTK_FUNCTION(4, "DPI_D5"), + MTK_FUNCTION(5, "SPI5_A_MO"), + MTK_FUNCTION(6, "IO_JTAG_TDI"), + MTK_FUNCTION(7, "PWM_2") + ), + MTK_PIN( + 36, "GPIO36", + MTK_EINT_FUNCTION(0, 36), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO36"), + MTK_FUNCTION(1, "TDM_BCK"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "I2S5_BCK"), + MTK_FUNCTION(4, "DPI_D6"), + MTK_FUNCTION(5, "SPI5_A_CSB"), + MTK_FUNCTION(6, "IO_JTAG_TRSTN"), + MTK_FUNCTION(7, "SRCLKENAI1") + ), + MTK_PIN( + 37, "GPIO37", + MTK_EINT_FUNCTION(0, 37), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO37"), + MTK_FUNCTION(1, "TDM_MCK"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "I2S5_MCK"), + MTK_FUNCTION(4, "DPI_D7"), + MTK_FUNCTION(5, "SPI5_A_MI"), + MTK_FUNCTION(6, "IO_JTAG_TCK"), + MTK_FUNCTION(7, "SRCLKENAI0") + ), + MTK_PIN( + 38, "GPIO38", + MTK_EINT_FUNCTION(0, 38), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO38"), + MTK_FUNCTION(1, "TDM_DATA0"), + MTK_FUNCTION(2, "I2S2_DI"), + MTK_FUNCTION(3, "I2S5_DO"), + MTK_FUNCTION(4, "DPI_D8"), + MTK_FUNCTION(5, "SPI5_A_CLK"), + MTK_FUNCTION(6, "IO_JTAG_TDO"), + MTK_FUNCTION(7, "CONN_TCXOENA_REQ") + ), + MTK_PIN( + 39, "GPIO39", + MTK_EINT_FUNCTION(0, 39), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO39"), + MTK_FUNCTION(1, "TDM_DATA1"), + MTK_FUNCTION(2, "I2S1_DO"), + MTK_FUNCTION(3, "I2S2_DI2"), + MTK_FUNCTION(4, "DPI_D9"), + MTK_FUNCTION(5, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(6, "IO_JTAG_TMS"), + MTK_FUNCTION(7, "IDDIG") + ), + MTK_PIN( + 40, "GPIO40", + MTK_EINT_FUNCTION(0, 40), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO40"), + MTK_FUNCTION(1, "TDM_DATA2"), + MTK_FUNCTION(2, "SCL9"), + MTK_FUNCTION(3, "PWM_3"), + MTK_FUNCTION(4, "DPI_D10"), + MTK_FUNCTION(5, "SRCLKENAI0"), + MTK_FUNCTION(6, "DAP_MD32_SWD"), + MTK_FUNCTION(7, "USB_DRVVBUS") + ), + MTK_PIN( + 41, "GPIO41", + MTK_EINT_FUNCTION(0, 41), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO41"), + MTK_FUNCTION(1, "TDM_DATA3"), + MTK_FUNCTION(2, "SDA9"), + MTK_FUNCTION(3, "PWM_1"), + MTK_FUNCTION(4, "DPI_D11"), + MTK_FUNCTION(5, "CLKM1"), + MTK_FUNCTION(6, "DAP_MD32_SWCK") + ), + MTK_PIN( + 42, "GPIO42", + MTK_EINT_FUNCTION(0, 42), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO42"), + MTK_FUNCTION(1, "DISP_PWM") + ), + MTK_PIN( + 43, "GPIO43", + MTK_EINT_FUNCTION(0, 43), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO43"), + MTK_FUNCTION(1, "DSI_TE") + ), + MTK_PIN( + 44, "GPIO44", + MTK_EINT_FUNCTION(0, 44), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO44"), + MTK_FUNCTION(1, "LCM_RST") + ), + MTK_PIN( + 45, "GPIO45", + MTK_EINT_FUNCTION(0, 45), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO45"), + MTK_FUNCTION(1, "SCL6"), + MTK_FUNCTION(2, "SCP_SCL0"), + MTK_FUNCTION(3, "SCP_SCL1"), + MTK_FUNCTION(4, "SCL_6306") + ), + MTK_PIN( + 46, "GPIO46", + MTK_EINT_FUNCTION(0, 46), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO46"), + MTK_FUNCTION(1, "SDA6"), + MTK_FUNCTION(2, "SCP_SDA0"), + MTK_FUNCTION(3, "SCP_SDA1"), + MTK_FUNCTION(4, "SDA_6306") + ), + MTK_PIN( + 47, "GPIO47", + MTK_EINT_FUNCTION(0, 47), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO47"), + MTK_FUNCTION(1, "SPI1_A_MI"), + MTK_FUNCTION(2, "SCP_SPI1_A_MI"), + MTK_FUNCTION(3, "KPCOL2"), + MTK_FUNCTION(4, "MD_URXD0"), + MTK_FUNCTION(5, "CONN_UART0_RXD"), + MTK_FUNCTION(6, "SSPM_URXD_AO"), + MTK_FUNCTION(7, "DBG_MON_B32") + ), + MTK_PIN( + 48, "GPIO48", + MTK_EINT_FUNCTION(0, 48), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO48"), + MTK_FUNCTION(1, "SPI1_A_CSB"), + MTK_FUNCTION(2, "SCP_SPI1_A_CS"), + MTK_FUNCTION(3, "KPROW2"), + MTK_FUNCTION(4, "MD_UTXD0"), + MTK_FUNCTION(5, "CONN_UART0_TXD"), + MTK_FUNCTION(6, "SSPM_UTXD_AO"), + MTK_FUNCTION(7, "DBG_MON_B31") + ), + MTK_PIN( + 49, "GPIO49", + MTK_EINT_FUNCTION(0, 49), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO49"), + MTK_FUNCTION(1, "SPI1_A_MO"), + MTK_FUNCTION(2, "SCP_SPI1_A_MO"), + MTK_FUNCTION(3, "UCTS0"), + MTK_FUNCTION(4, "MD_URXD1"), + MTK_FUNCTION(5, "PWM_1"), + MTK_FUNCTION(6, "TP_URXD2_AO"), + MTK_FUNCTION(7, "DBG_MON_B30") + ), + MTK_PIN( + 50, "GPIO50", + MTK_EINT_FUNCTION(0, 50), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO50"), + MTK_FUNCTION(1, "SPI1_A_CLK"), + MTK_FUNCTION(2, "SCP_SPI1_A_CK"), + MTK_FUNCTION(3, "URTS0"), + MTK_FUNCTION(4, "MD_UTXD1"), + MTK_FUNCTION(5, "WIFI_TXD"), + MTK_FUNCTION(6, "TP_UTXD2_AO"), + MTK_FUNCTION(7, "DBG_MON_B29") + ), + MTK_PIN( + 51, "GPIO51", + MTK_EINT_FUNCTION(0, 51), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO51"), + MTK_FUNCTION(1, "SCL0") + ), + MTK_PIN( + 52, "GPIO52", + MTK_EINT_FUNCTION(0, 52), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO52"), + MTK_FUNCTION(1, "SDA0") + ), + MTK_PIN( + 53, "GPIO53", + MTK_EINT_FUNCTION(0, 53), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO53"), + MTK_FUNCTION(1, "URXD0"), + MTK_FUNCTION(2, "UTXD0"), + MTK_FUNCTION(3, "MD_URXD0"), + MTK_FUNCTION(4, "MD_URXD1"), + MTK_FUNCTION(5, "SSPM_URXD_AO"), + MTK_FUNCTION(7, "CONN_UART0_RXD") + ), + MTK_PIN( + 54, "GPIO54", + MTK_EINT_FUNCTION(0, 54), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO54"), + MTK_FUNCTION(1, "UTXD0"), + MTK_FUNCTION(2, "URXD0"), + MTK_FUNCTION(3, "MD_UTXD0"), + MTK_FUNCTION(4, "MD_UTXD1"), + MTK_FUNCTION(5, "SSPM_UTXD_AO"), + MTK_FUNCTION(6, "WIFI_TXD"), + MTK_FUNCTION(7, "CONN_UART0_TXD") + ), + MTK_PIN( + 55, "GPIO55", + MTK_EINT_FUNCTION(0, 55), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO55"), + MTK_FUNCTION(1, "SCL3"), + MTK_FUNCTION(2, "SCP_SCL0"), + MTK_FUNCTION(3, "SCP_SCL1"), + MTK_FUNCTION(4, "SCL_6306") + ), + MTK_PIN( + 56, "GPIO56", + MTK_EINT_FUNCTION(0, 56), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO56"), + MTK_FUNCTION(1, "SDA3"), + MTK_FUNCTION(2, "SCP_SDA0"), + MTK_FUNCTION(3, "SCP_SDA1"), + MTK_FUNCTION(4, "SDA_6306") + ), + MTK_PIN( + 57, "GPIO57", + MTK_EINT_FUNCTION(0, 57), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO57"), + MTK_FUNCTION(1, "KPROW1"), + MTK_FUNCTION(2, "PWM_1"), + MTK_FUNCTION(3, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(4, "CLKM1"), + MTK_FUNCTION(5, "IDDIG"), + MTK_FUNCTION(6, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(7, "MBISTREADEN_TRIGGER") + ), + MTK_PIN( + 58, "GPIO58", + MTK_EINT_FUNCTION(0, 58), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO58"), + MTK_FUNCTION(1, "KPROW0"), + MTK_FUNCTION(7, "DBG_MON_B28") + ), + MTK_PIN( + 59, "GPIO59", + MTK_EINT_FUNCTION(0, 59), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO59"), + MTK_FUNCTION(1, "KPCOL0"), + MTK_FUNCTION(7, "DBG_MON_B27") + ), + MTK_PIN( + 60, "GPIO60", + MTK_EINT_FUNCTION(0, 60), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO60"), + MTK_FUNCTION(1, "KPCOL1"), + MTK_FUNCTION(2, "PWM_2"), + MTK_FUNCTION(3, "UCTS1"), + MTK_FUNCTION(4, "CLKM2"), + MTK_FUNCTION(5, "USB_DRVVBUS"), + MTK_FUNCTION(7, "MBISTWRITEEN_TRIGGER") + ), + MTK_PIN( + 61, "GPIO61", + MTK_EINT_FUNCTION(0, 61), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO61"), + MTK_FUNCTION(1, "SCL1"), + MTK_FUNCTION(2, "SCP_SCL0"), + MTK_FUNCTION(3, "SCP_SCL1") + ), + MTK_PIN( + 62, "GPIO62", + MTK_EINT_FUNCTION(0, 62), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO62"), + MTK_FUNCTION(1, "SDA1"), + MTK_FUNCTION(2, "SCP_SDA0"), + MTK_FUNCTION(3, "SCP_SDA1") + ), + MTK_PIN( + 63, "GPIO63", + MTK_EINT_FUNCTION(0, 63), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO63"), + MTK_FUNCTION(1, "SPI2_MI"), + MTK_FUNCTION(2, "SCP_SPI2_MI"), + MTK_FUNCTION(3, "KPCOL2"), + MTK_FUNCTION(4, "MRG_DI"), + MTK_FUNCTION(5, "MD_URXD0"), + MTK_FUNCTION(6, "CONN_UART0_RXD"), + MTK_FUNCTION(7, "DBG_MON_B26") + ), + MTK_PIN( + 64, "GPIO64", + MTK_EINT_FUNCTION(0, 64), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO64"), + MTK_FUNCTION(1, "SPI2_CSB"), + MTK_FUNCTION(2, "SCP_SPI2_CS"), + MTK_FUNCTION(3, "KPROW2"), + MTK_FUNCTION(4, "MRG_SYNC"), + MTK_FUNCTION(5, "MD_UTXD0"), + MTK_FUNCTION(6, "CONN_UART0_TXD"), + MTK_FUNCTION(7, "DBG_MON_B25") + ), + MTK_PIN( + 65, "GPIO65", + MTK_EINT_FUNCTION(0, 65), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO65"), + MTK_FUNCTION(1, "SPI2_MO"), + MTK_FUNCTION(2, "SCP_SPI2_MO"), + MTK_FUNCTION(3, "SCP_SDA1"), + MTK_FUNCTION(4, "MRG_DO"), + MTK_FUNCTION(5, "MD_URXD1"), + MTK_FUNCTION(6, "PWM_3") + ), + MTK_PIN( + 66, "GPIO66", + MTK_EINT_FUNCTION(0, 66), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO66"), + MTK_FUNCTION(1, "SPI2_CLK"), + MTK_FUNCTION(2, "SCP_SPI2_CK"), + MTK_FUNCTION(3, "SCP_SCL1"), + MTK_FUNCTION(4, "MRG_CLK"), + MTK_FUNCTION(5, "MD_UTXD1"), + MTK_FUNCTION(6, "WIFI_TXD") + ), + MTK_PIN( + 67, "GPIO67", + MTK_EINT_FUNCTION(0, 67), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO67"), + MTK_FUNCTION(1, "I2S3_LRCK"), + MTK_FUNCTION(2, "I2S1_LRCK"), + MTK_FUNCTION(3, "URXD1"), + MTK_FUNCTION(4, "PCM0_SYNC"), + MTK_FUNCTION(5, "I2S5_LRCK"), + MTK_FUNCTION(6, "ANT_SEL9"), + MTK_FUNCTION(7, "DBG_MON_B10") + ), + MTK_PIN( + 68, "GPIO68", + MTK_EINT_FUNCTION(0, 68), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO68"), + MTK_FUNCTION(1, "I2S3_DO"), + MTK_FUNCTION(2, "I2S1_DO"), + MTK_FUNCTION(3, "UTXD1"), + MTK_FUNCTION(4, "PCM0_DO"), + MTK_FUNCTION(5, "I2S5_DO"), + MTK_FUNCTION(6, "ANT_SEL10"), + MTK_FUNCTION(7, "DBG_MON_B9") + ), + MTK_PIN( + 69, "GPIO69", + MTK_EINT_FUNCTION(0, 69), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO69"), + MTK_FUNCTION(1, "I2S3_MCK"), + MTK_FUNCTION(2, "I2S1_MCK"), + MTK_FUNCTION(3, "URTS1"), + MTK_FUNCTION(4, "AGPS_SYNC"), + MTK_FUNCTION(5, "I2S5_MCK"), + MTK_FUNCTION(6, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(7, "DBG_MON_B8") + ), + MTK_PIN( + 70, "GPIO70", + MTK_EINT_FUNCTION(0, 70), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO70"), + MTK_FUNCTION(1, "I2S0_DI"), + MTK_FUNCTION(2, "I2S2_DI"), + MTK_FUNCTION(3, "KPCOL2"), + MTK_FUNCTION(4, "PCM0_DI"), + MTK_FUNCTION(5, "I2S2_DI2"), + MTK_FUNCTION(6, "ANT_SEL11"), + MTK_FUNCTION(7, "DBG_MON_B7") + ), + MTK_PIN( + 71, "GPIO71", + MTK_EINT_FUNCTION(0, 71), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO71"), + MTK_FUNCTION(1, "I2S3_BCK"), + MTK_FUNCTION(2, "I2S1_BCK"), + MTK_FUNCTION(3, "KPROW2"), + MTK_FUNCTION(4, "PCM0_CLK"), + MTK_FUNCTION(5, "I2S5_BCK"), + MTK_FUNCTION(6, "ANT_SEL12"), + MTK_FUNCTION(7, "DBG_MON_B6") + ), + MTK_PIN( + 72, "GPIO72", + MTK_EINT_FUNCTION(0, 72), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO72"), + MTK_FUNCTION(1, "BPI_BUS19_OLAT0"), + MTK_FUNCTION(2, "CONN_BPI_BUS19_OLAT0") + ), + MTK_PIN( + 73, "GPIO73", + MTK_EINT_FUNCTION(0, 73), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO73"), + MTK_FUNCTION(1, "BPI_BUS18_PA_VM1"), + MTK_FUNCTION(2, "CONN_MIPI5_SCLK"), + MTK_FUNCTION(3, "MIPI5_SCLK") + ), + MTK_PIN( + 74, "GPIO74", + MTK_EINT_FUNCTION(0, 74), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO74"), + MTK_FUNCTION(1, "BPI_BUS17_PA_VM0"), + MTK_FUNCTION(2, "CONN_MIPI5_SDATA"), + MTK_FUNCTION(3, "MIPI5_SDATA") + ), + MTK_PIN( + 75, "GPIO75", + MTK_EINT_FUNCTION(0, 75), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO75"), + MTK_FUNCTION(1, "BPI_BUS20_OLAT1"), + MTK_FUNCTION(2, "CONN_BPI_BUS20_OLAT1"), + MTK_FUNCTION(3, "RFIC0_BSI_D2") + ), + MTK_PIN( + 76, "GPIO76", + MTK_EINT_FUNCTION(0, 76), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO76"), + MTK_FUNCTION(1, "RFIC0_BSI_D1") + ), + MTK_PIN( + 77, "GPIO77", + MTK_EINT_FUNCTION(0, 77), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO77"), + MTK_FUNCTION(1, "RFIC0_BSI_D0") + ), + MTK_PIN( + 78, "GPIO78", + MTK_EINT_FUNCTION(0, 78), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO78"), + MTK_FUNCTION(1, "BPI_BUS7"), + MTK_FUNCTION(7, "DBG_MON_B24") + ), + MTK_PIN( + 79, "GPIO79", + MTK_EINT_FUNCTION(0, 79), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO79"), + MTK_FUNCTION(1, "BPI_BUS6"), + MTK_FUNCTION(7, "DBG_MON_B23") + ), + MTK_PIN( + 80, "GPIO80", + MTK_EINT_FUNCTION(0, 80), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO80"), + MTK_FUNCTION(1, "BPI_BUS8"), + MTK_FUNCTION(7, "DBG_MON_B22") + ), + MTK_PIN( + 81, "GPIO81", + MTK_EINT_FUNCTION(0, 81), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO81"), + MTK_FUNCTION(1, "BPI_BUS9"), + MTK_FUNCTION(7, "DBG_MON_B21") + ), + MTK_PIN( + 82, "GPIO82", + MTK_EINT_FUNCTION(0, 82), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO82"), + MTK_FUNCTION(1, "BPI_BUS10"), + MTK_FUNCTION(7, "DBG_MON_B20") + ), + MTK_PIN( + 83, "GPIO83", + MTK_EINT_FUNCTION(0, 83), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO83"), + MTK_FUNCTION(1, "BPI_BUS11"), + MTK_FUNCTION(7, "DBG_MON_B19") + ), + MTK_PIN( + 84, "GPIO84", + MTK_EINT_FUNCTION(0, 84), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO84"), + MTK_FUNCTION(1, "BPI_BUS12"), + MTK_FUNCTION(2, "CONN_BPI_BUS12") + ), + MTK_PIN( + 85, "GPIO85", + MTK_EINT_FUNCTION(0, 85), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO85"), + MTK_FUNCTION(1, "BPI_BUS13"), + MTK_FUNCTION(2, "CONN_BPI_BUS13") + ), + MTK_PIN( + 86, "GPIO86", + MTK_EINT_FUNCTION(0, 86), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO86"), + MTK_FUNCTION(1, "BPI_BUS14"), + MTK_FUNCTION(2, "CONN_BPI_BUS14") + ), + MTK_PIN( + 87, "GPIO87", + MTK_EINT_FUNCTION(0, 87), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO87"), + MTK_FUNCTION(1, "BPI_BUS15"), + MTK_FUNCTION(2, "CONN_BPI_BUS15") + ), + MTK_PIN( + 88, "GPIO88", + MTK_EINT_FUNCTION(0, 88), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO88"), + MTK_FUNCTION(1, "BPI_BUS16"), + MTK_FUNCTION(2, "CONN_BPI_BUS16") + ), + MTK_PIN( + 89, "GPIO89", + MTK_EINT_FUNCTION(0, 89), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO89"), + MTK_FUNCTION(1, "BPI_BUS5"), + MTK_FUNCTION(7, "DBG_MON_B18") + ), + MTK_PIN( + 90, "GPIO90", + MTK_EINT_FUNCTION(0, 90), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO90"), + MTK_FUNCTION(1, "BPI_BUS4"), + MTK_FUNCTION(7, "DBG_MON_B17") + ), + MTK_PIN( + 91, "GPIO91", + MTK_EINT_FUNCTION(0, 91), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO91"), + MTK_FUNCTION(1, "BPI_BUS3") + ), + MTK_PIN( + 92, "GPIO92", + MTK_EINT_FUNCTION(0, 92), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO92"), + MTK_FUNCTION(1, "BPI_BUS2"), + MTK_FUNCTION(7, "DBG_MON_B16") + ), + MTK_PIN( + 93, "GPIO93", + MTK_EINT_FUNCTION(0, 93), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO93"), + MTK_FUNCTION(1, "BPI_BUS1") + ), + MTK_PIN( + 94, "GPIO94", + MTK_EINT_FUNCTION(0, 94), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO94"), + MTK_FUNCTION(1, "BPI_BUS0"), + MTK_FUNCTION(7, "DBG_MON_B15") + ), + MTK_PIN( + 95, "GPIO95", + MTK_EINT_FUNCTION(0, 95), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO95"), + MTK_FUNCTION(1, "MIPI0_SDATA") + ), + MTK_PIN( + 96, "GPIO96", + MTK_EINT_FUNCTION(0, 96), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO96"), + MTK_FUNCTION(1, "MIPI0_SCLK") + ), + MTK_PIN( + 97, "GPIO97", + MTK_EINT_FUNCTION(0, 97), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO97"), + MTK_FUNCTION(1, "MIPI1_SDATA") + ), + MTK_PIN( + 98, "GPIO98", + MTK_EINT_FUNCTION(0, 98), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO98"), + MTK_FUNCTION(1, "MIPI1_SCLK") + ), + MTK_PIN( + 99, "GPIO99", + MTK_EINT_FUNCTION(0, 99), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO99"), + MTK_FUNCTION(1, "MIPI2_SCLK"), + MTK_FUNCTION(7, "DBG_MON_B14") + ), + MTK_PIN( + 100, "GPIO100", + MTK_EINT_FUNCTION(0, 100), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO100"), + MTK_FUNCTION(1, "MIPI2_SDATA"), + MTK_FUNCTION(7, "DBG_MON_B13") + ), + MTK_PIN( + 101, "GPIO101", + MTK_EINT_FUNCTION(0, 101), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO101"), + MTK_FUNCTION(1, "MIPI3_SCLK"), + MTK_FUNCTION(7, "DBG_MON_B12") + ), + MTK_PIN( + 102, "GPIO102", + MTK_EINT_FUNCTION(0, 102), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO102"), + MTK_FUNCTION(1, "MIPI3_SDATA"), + MTK_FUNCTION(7, "DBG_MON_B11") + ), + MTK_PIN( + 103, "GPIO103", + MTK_EINT_FUNCTION(0, 103), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO103"), + MTK_FUNCTION(1, "MIPI4_SCLK"), + MTK_FUNCTION(2, "CONN_MIPI4_SCLK") + ), + MTK_PIN( + 104, "GPIO104", + MTK_EINT_FUNCTION(0, 104), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO104"), + MTK_FUNCTION(1, "MIPI4_SDATA"), + MTK_FUNCTION(2, "CONN_MIPI4_SDATA") + ), + MTK_PIN( + 105, "GPIO105", + MTK_EINT_FUNCTION(0, 105), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO105"), + MTK_FUNCTION(1, "BPI_BUS22_OLAT3"), + MTK_FUNCTION(2, "CONN_BPI_BUS22_OLAT3") + ), + MTK_PIN( + 106, "GPIO106", + MTK_EINT_FUNCTION(0, 106), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO106"), + MTK_FUNCTION(1, "BPI_BUS21_OLAT2"), + MTK_FUNCTION(2, "CONN_BPI_BUS21_OLAT2") + ), + MTK_PIN( + 107, "GPIO107", + MTK_EINT_FUNCTION(0, 107), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO107"), + MTK_FUNCTION(1, "BPI_BUS24_ANT1"), + MTK_FUNCTION(2, "CONN_BPI_BUS24_ANT1") + ), + MTK_PIN( + 108, "GPIO108", + MTK_EINT_FUNCTION(0, 108), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO108"), + MTK_FUNCTION(1, "BPI_BUS25_ANT2"), + MTK_FUNCTION(2, "CONN_BPI_BUS25_ANT2") + ), + MTK_PIN( + 109, "GPIO109", + MTK_EINT_FUNCTION(0, 109), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO109"), + MTK_FUNCTION(1, "BPI_BUS23_ANT0"), + MTK_FUNCTION(2, "CONN_BPI_BUS23_ANT0") + ), + MTK_PIN( + 110, "GPIO110", + MTK_EINT_FUNCTION(0, 110), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO110"), + MTK_FUNCTION(1, "SCL4") + ), + MTK_PIN( + 111, "GPIO111", + MTK_EINT_FUNCTION(0, 111), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO111"), + MTK_FUNCTION(1, "SDA4") + ), + MTK_PIN( + 112, "GPIO112", + MTK_EINT_FUNCTION(0, 112), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO112"), + MTK_FUNCTION(1, "SCL2") + ), + MTK_PIN( + 113, "GPIO113", + MTK_EINT_FUNCTION(0, 113), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO113"), + MTK_FUNCTION(1, "SDA2") + ), + MTK_PIN( + 114, "GPIO114", + MTK_EINT_FUNCTION(0, 114), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO114"), + MTK_FUNCTION(1, "CLKM0"), + MTK_FUNCTION(2, "SPI3_MI"), + MTK_FUNCTION(7, "DBG_MON_B5") + ), + MTK_PIN( + 115, "GPIO115", + MTK_EINT_FUNCTION(0, 115), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO115"), + MTK_FUNCTION(1, "CLKM1"), + MTK_FUNCTION(2, "SPI3_CSB"), + MTK_FUNCTION(7, "DBG_MON_B4") + ), + MTK_PIN( + 116, "GPIO116", + MTK_EINT_FUNCTION(0, 116), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO116"), + MTK_FUNCTION(1, "CMMCLK0"), + MTK_FUNCTION(7, "DBG_MON_B3") + ), + MTK_PIN( + 117, "GPIO117", + MTK_EINT_FUNCTION(0, 117), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO117"), + MTK_FUNCTION(1, "CMMCLK1"), + MTK_FUNCTION(2, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(7, "DBG_MON_B2") + ), + MTK_PIN( + 118, "GPIO118", + MTK_EINT_FUNCTION(0, 118), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO118"), + MTK_FUNCTION(1, "CLKM2"), + MTK_FUNCTION(2, "SPI3_MO"), + MTK_FUNCTION(7, "DBG_MON_B1") + ), + MTK_PIN( + 119, "GPIO119", + MTK_EINT_FUNCTION(0, 119), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO119"), + MTK_FUNCTION(1, "CLKM3"), + MTK_FUNCTION(2, "SPI3_CLK"), + MTK_FUNCTION(7, "DBG_MON_B0") + ), + MTK_PIN( + 120, "GPIO120", + MTK_EINT_FUNCTION(0, 120), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO120"), + MTK_FUNCTION(1, "CMMCLK2"), + MTK_FUNCTION(2, "CLKM2"), + MTK_FUNCTION(6, "ANT_SEL12"), + MTK_FUNCTION(7, "TP_UCTS2_AO") + ), + MTK_PIN( + 121, "GPIO121", + MTK_EINT_FUNCTION(0, 121), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO121"), + MTK_FUNCTION(1, "CMMCLK3"), + MTK_FUNCTION(2, "CLKM3"), + MTK_FUNCTION(3, "DVFSRC_EXT_REQ"), + MTK_FUNCTION(6, "ANT_SEL11"), + MTK_FUNCTION(7, "TP_URTS2_AO") + ), + MTK_PIN( + 122, "GPIO122", + MTK_EINT_FUNCTION(0, 122), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO122"), + MTK_FUNCTION(1, "CMVREF1"), + MTK_FUNCTION(2, "PCM0_SYNC"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(4, "AGPS_SYNC"), + MTK_FUNCTION(5, "PWM_1"), + MTK_FUNCTION(6, "ANT_SEL9"), + MTK_FUNCTION(7, "TP_UCTS1_AO") + ), + MTK_PIN( + 123, "GPIO123", + MTK_EINT_FUNCTION(0, 123), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO123"), + MTK_FUNCTION(2, "PCM0_DI"), + MTK_FUNCTION(3, "ADSP_JTAG_TRSTN"), + MTK_FUNCTION(4, "VPU_UDI_NTRST"), + MTK_FUNCTION(5, "SPM_JTAG_TRSTN"), + MTK_FUNCTION(6, "SSPM_JTAG_TRSTN") + ), + MTK_PIN( + 124, "GPIO124", + MTK_EINT_FUNCTION(0, 124), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO124"), + MTK_FUNCTION(1, "CMVREF2"), + MTK_FUNCTION(2, "PCM0_CLK"), + MTK_FUNCTION(3, "MD_INT0"), + MTK_FUNCTION(4, "EXT_FRAME_SYNC"), + MTK_FUNCTION(5, "PWM_2"), + MTK_FUNCTION(6, "ANT_SEL10"), + MTK_FUNCTION(7, "TP_URTS1_AO") + ), + MTK_PIN( + 125, "GPIO125", + MTK_EINT_FUNCTION(0, 125), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO125"), + MTK_FUNCTION(1, "CMVREF3"), + MTK_FUNCTION(2, "PCM0_DO"), + MTK_FUNCTION(3, "ADSP_JTAG_TMS"), + MTK_FUNCTION(4, "VPU_UDI_TMS"), + MTK_FUNCTION(5, "SPM_JTAG_TMS"), + MTK_FUNCTION(6, "SSPM_JTAG_TMS") + ), + MTK_PIN( + 126, "GPIO126", + MTK_EINT_FUNCTION(0, 126), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO126"), + MTK_FUNCTION(1, "CMVREF4"), + MTK_FUNCTION(2, "CMFLASH0"), + MTK_FUNCTION(6, "CONN_MCU_AICE_TMSC") + ), + MTK_PIN( + 127, "GPIO127", + MTK_EINT_FUNCTION(0, 127), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO127"), + MTK_FUNCTION(1, "CMVREF0"), + MTK_FUNCTION(2, "CMFLASH1"), + MTK_FUNCTION(6, "CONN_MCU_AICE_TCKC") + ), + MTK_PIN( + 128, "GPIO128", + MTK_EINT_FUNCTION(0, 128), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO128"), + MTK_FUNCTION(1, "MD1_SIM1_SIO"), + MTK_FUNCTION(2, "MD1_SIM2_SIO"), + MTK_FUNCTION(3, "CCU_JTAG_TRST"), + MTK_FUNCTION(4, "CONN_DSP_JINTP"), + MTK_FUNCTION(5, "SCP_JTAG_TRSTN"), + MTK_FUNCTION(6, "LVTS_FOUT"), + MTK_FUNCTION(7, "DBG_MON_A3") + ), + MTK_PIN( + 129, "GPIO129", + MTK_EINT_FUNCTION(0, 129), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO129"), + MTK_FUNCTION(1, "MD1_SIM1_SRST"), + MTK_FUNCTION(2, "MD1_SIM2_SRST"), + MTK_FUNCTION(3, "CCU_JTAG_TCK"), + MTK_FUNCTION(4, "CONN_DSP_JCK"), + MTK_FUNCTION(5, "SCP_JTAG_TCK"), + MTK_FUNCTION(6, "LVTS_SDO"), + MTK_FUNCTION(7, "DBG_MON_A4") + ), + MTK_PIN( + 130, "GPIO130", + MTK_EINT_FUNCTION(0, 130), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO130"), + MTK_FUNCTION(1, "MD1_SIM1_SCLK"), + MTK_FUNCTION(2, "MD1_SIM2_SCLK"), + MTK_FUNCTION(6, "LVTS_26M"), + MTK_FUNCTION(7, "DBG_MON_A5") + ), + MTK_PIN( + 131, "GPIO131", + MTK_EINT_FUNCTION(0, 131), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO131"), + MTK_FUNCTION(1, "MD1_SIM2_SCLK"), + MTK_FUNCTION(2, "MD1_SIM1_SCLK"), + MTK_FUNCTION(3, "CCU_JTAG_TDI"), + MTK_FUNCTION(4, "CONN_DSP_JDI"), + MTK_FUNCTION(5, "SCP_JTAG_TDI"), + MTK_FUNCTION(6, "LVTS_SCK"), + MTK_FUNCTION(7, "DBG_MON_A0") + ), + MTK_PIN( + 132, "GPIO132", + MTK_EINT_FUNCTION(0, 132), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO132"), + MTK_FUNCTION(1, "MD1_SIM2_SRST"), + MTK_FUNCTION(2, "MD1_SIM1_SRST"), + MTK_FUNCTION(3, "CCU_JTAG_TMS"), + MTK_FUNCTION(4, "CONN_DSP_JMS"), + MTK_FUNCTION(5, "SCP_JTAG_TMS"), + MTK_FUNCTION(6, "LVTS_SDI"), + MTK_FUNCTION(7, "DBG_MON_A1") + ), + MTK_PIN( + 133, "GPIO133", + MTK_EINT_FUNCTION(0, 133), + DRV_GRP0, + MTK_FUNCTION(0, "GPIO133"), + MTK_FUNCTION(1, "MD1_SIM2_SIO"), + MTK_FUNCTION(2, "MD1_SIM1_SIO"), + MTK_FUNCTION(3, "CCU_JTAG_TDO"), + MTK_FUNCTION(4, "CONN_DSP_JDO"), + MTK_FUNCTION(5, "SCP_JTAG_TDO"), + MTK_FUNCTION(6, "LVTS_SCF"), + MTK_FUNCTION(7, "DBG_MON_A2") + ), + MTK_PIN( + 134, "GPIO134", + MTK_EINT_FUNCTION(0, 134), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO134"), + MTK_FUNCTION(1, "MSDC1_CLK"), + MTK_FUNCTION(2, "PCM1_CLK"), + MTK_FUNCTION(3, "SPI5_B_MI"), + MTK_FUNCTION(4, "UDI_TCK"), + MTK_FUNCTION(5, "CONN_DSP_JCK"), + MTK_FUNCTION(6, "IPU_JTAG_TCK"), + MTK_FUNCTION(7, "JTCK_SEL3") + ), + MTK_PIN( + 135, "GPIO135", + MTK_EINT_FUNCTION(0, 135), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO135"), + MTK_FUNCTION(1, "MSDC1_CMD"), + MTK_FUNCTION(2, "PCM1_SYNC"), + MTK_FUNCTION(3, "SPI5_B_CSB"), + MTK_FUNCTION(4, "UDI_TMS"), + MTK_FUNCTION(5, "CONN_DSP_JMS"), + MTK_FUNCTION(6, "IPU_JTAG_TMS"), + MTK_FUNCTION(7, "JTMS_SEL3") + ), + MTK_PIN( + 136, "GPIO136", + MTK_EINT_FUNCTION(0, 136), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO136"), + MTK_FUNCTION(1, "MSDC1_DAT3"), + MTK_FUNCTION(2, "PCM1_DI"), + MTK_FUNCTION(3, "SPI5_B_MO"), + MTK_FUNCTION(4, "CONN_TCXOENA_REQ"), + MTK_FUNCTION(5, "CONN_DSP_JINTP"), + MTK_FUNCTION(6, "CONN_MCU_AICE_TMSC") + ), + MTK_PIN( + 137, "GPIO137", + MTK_EINT_FUNCTION(0, 137), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO137"), + MTK_FUNCTION(1, "MSDC1_DAT0"), + MTK_FUNCTION(2, "PCM1_DO0"), + MTK_FUNCTION(3, "SPI5_B_CLK"), + MTK_FUNCTION(4, "UDI_TDI"), + MTK_FUNCTION(5, "CONN_DSP_JDI"), + MTK_FUNCTION(6, "IPU_JTAG_TDI"), + MTK_FUNCTION(7, "JTDI_SEL3") + ), + MTK_PIN( + 138, "GPIO138", + MTK_EINT_FUNCTION(0, 138), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO138"), + MTK_FUNCTION(1, "MSDC1_DAT2"), + MTK_FUNCTION(2, "PCM1_DO2"), + MTK_FUNCTION(3, "ANT_SEL11"), + MTK_FUNCTION(4, "UDI_NTRST"), + MTK_FUNCTION(5, "CONN_MCU_AICE_TCKC"), + MTK_FUNCTION(6, "IPU_JTAG_TRST"), + MTK_FUNCTION(7, "JTRSTN_SEL3") + ), + MTK_PIN( + 139, "GPIO139", + MTK_EINT_FUNCTION(0, 139), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO139"), + MTK_FUNCTION(1, "MSDC1_DAT1"), + MTK_FUNCTION(2, "PCM1_DO1"), + MTK_FUNCTION(3, "ANT_SEL12"), + MTK_FUNCTION(4, "UDI_TDO"), + MTK_FUNCTION(5, "CONN_DSP_JDO"), + MTK_FUNCTION(6, "IPU_JTAG_TDO"), + MTK_FUNCTION(7, "JTDO_SEL3") + ), + MTK_PIN( + 140, "GPIO140", + MTK_EINT_FUNCTION(0, 140), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO140"), + MTK_FUNCTION(1, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(2, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(3, "ADSP_URXD0"), + MTK_FUNCTION(4, "SCL_6306"), + MTK_FUNCTION(5, "PTA_RXD"), + MTK_FUNCTION(6, "SSPM_URXD_AO") + ), + MTK_PIN( + 141, "GPIO141", + MTK_EINT_FUNCTION(0, 141), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO141"), + MTK_FUNCTION(1, "MD_INT2_C2K_UIM1_HOT_PLUG"), + MTK_FUNCTION(2, "MD_INT1_C2K_UIM0_HOT_PLUG"), + MTK_FUNCTION(3, "ADSP_UTXD0"), + MTK_FUNCTION(4, "SDA_6306"), + MTK_FUNCTION(5, "PTA_TXD"), + MTK_FUNCTION(6, "SSPM_UTXD_AO") + ), + MTK_PIN( + 142, "GPIO142", + MTK_EINT_FUNCTION(0, 142), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO142"), + MTK_FUNCTION(1, "SCP_VREQ_VAO"), + MTK_FUNCTION(2, "DVFSRC_EXT_REQ") + ), + MTK_PIN( + 143, "GPIO143", + MTK_EINT_FUNCTION(0, 143), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO143"), + MTK_FUNCTION(1, "AUD_DAT_MOSI2"), + MTK_FUNCTION(7, "DBG_MON_A9") + ), + MTK_PIN( + 144, "GPIO144", + MTK_EINT_FUNCTION(0, 144), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO144"), + MTK_FUNCTION(1, "AUD_NLE_MOSI1"), + MTK_FUNCTION(2, "AUD_CLK_MISO"), + MTK_FUNCTION(3, "I2S2_MCK"), + MTK_FUNCTION(5, "UDI_TCK"), + MTK_FUNCTION(6, "UFS_UNIPRO_SDA"), + MTK_FUNCTION(7, "DBG_MON_A10") + ), + MTK_PIN( + 145, "GPIO145", + MTK_EINT_FUNCTION(0, 145), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO145"), + MTK_FUNCTION(1, "AUD_NLE_MOSI0"), + MTK_FUNCTION(2, "AUD_SYNC_MISO"), + MTK_FUNCTION(3, "I2S2_BCK"), + MTK_FUNCTION(5, "UDI_TMS"), + MTK_FUNCTION(7, "DBG_MON_A11") + ), + MTK_PIN( + 146, "GPIO146", + MTK_EINT_FUNCTION(0, 146), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO146"), + MTK_FUNCTION(1, "AUD_DAT_MISO2"), + MTK_FUNCTION(3, "I2S2_DI2"), + MTK_FUNCTION(5, "UDI_TDO"), + MTK_FUNCTION(7, "DBG_MON_A14") + ), + MTK_PIN( + 147, "GPIO147", + MTK_EINT_FUNCTION(0, 147), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO147"), + MTK_FUNCTION(1, "ANT_SEL0"), + MTK_FUNCTION(2, "PWM_3") + ), + MTK_PIN( + 148, "GPIO148", + MTK_EINT_FUNCTION(0, 148), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO148"), + MTK_FUNCTION(1, "ANT_SEL1"), + MTK_FUNCTION(2, "SPI0_B_MI"), + MTK_FUNCTION(3, "SSPM_URXD_AO"), + MTK_FUNCTION(5, "TP_UCTS2_AO"), + MTK_FUNCTION(6, "CLKM0") + ), + MTK_PIN( + 149, "GPIO149", + MTK_EINT_FUNCTION(0, 149), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO149"), + MTK_FUNCTION(1, "ANT_SEL2"), + MTK_FUNCTION(2, "SPI0_B_CSB"), + MTK_FUNCTION(3, "SSPM_UTXD_AO"), + MTK_FUNCTION(5, "TP_URTS2_AO"), + MTK_FUNCTION(6, "CONN_TCXOENA_REQ") + ), + MTK_PIN( + 150, "GPIO150", + MTK_EINT_FUNCTION(0, 150), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO150"), + MTK_FUNCTION(1, "ANT_SEL3"), + MTK_FUNCTION(2, "SPI0_B_MO"), + MTK_FUNCTION(3, "UCTS1"), + MTK_FUNCTION(5, "TP_UCTS1_AO"), + MTK_FUNCTION(6, "IDDIG"), + MTK_FUNCTION(7, "SCL9") + ), + MTK_PIN( + 151, "GPIO151", + MTK_EINT_FUNCTION(0, 151), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO151"), + MTK_FUNCTION(1, "ANT_SEL4"), + MTK_FUNCTION(2, "SPI0_B_CLK"), + MTK_FUNCTION(3, "URTS1"), + MTK_FUNCTION(5, "TP_URTS1_AO"), + MTK_FUNCTION(6, "USB_DRVVBUS"), + MTK_FUNCTION(7, "SDA9") + ), + MTK_PIN( + 152, "GPIO152", + MTK_EINT_FUNCTION(0, 152), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO152"), + MTK_FUNCTION(1, "ANT_SEL5"), + MTK_FUNCTION(2, "SPI1_B_MI"), + MTK_FUNCTION(3, "CLKM3"), + MTK_FUNCTION(5, "TP_URXD1_AO"), + MTK_FUNCTION(6, "SCP_SPI1_B_MI"), + MTK_FUNCTION(7, "SCL8") + ), + MTK_PIN( + 153, "GPIO153", + MTK_EINT_FUNCTION(0, 153), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO153"), + MTK_FUNCTION(1, "ANT_SEL6"), + MTK_FUNCTION(2, "SPI1_B_CSB"), + MTK_FUNCTION(3, "SRCLKENAI0"), + MTK_FUNCTION(4, "PWM_0"), + MTK_FUNCTION(5, "TP_UTXD1_AO"), + MTK_FUNCTION(6, "SCP_SPI1_B_CS"), + MTK_FUNCTION(7, "SDA8") + ), + MTK_PIN( + 154, "GPIO154", + MTK_EINT_FUNCTION(0, 154), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO154"), + MTK_FUNCTION(1, "ANT_SEL7"), + MTK_FUNCTION(2, "SPI1_B_MO"), + MTK_FUNCTION(3, "SRCLKENAI1"), + MTK_FUNCTION(5, "TP_URXD2_AO"), + MTK_FUNCTION(6, "SCP_SPI1_B_MO") + ), + MTK_PIN( + 155, "GPIO155", + MTK_EINT_FUNCTION(0, 155), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO155"), + MTK_FUNCTION(1, "ANT_SEL8"), + MTK_FUNCTION(2, "SPI1_B_CLK"), + MTK_FUNCTION(3, "MD_INT0"), + MTK_FUNCTION(5, "TP_UTXD2_AO"), + MTK_FUNCTION(6, "SCP_SPI1_B_CK"), + MTK_FUNCTION(7, "DBG_MON_A15") + ), + MTK_PIN( + 156, "GPIO156", + MTK_EINT_FUNCTION(0, 156), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO156"), + MTK_FUNCTION(1, "CONN_TOP_CLK"), + MTK_FUNCTION(2, "AUXIF_CLK0"), + MTK_FUNCTION(7, "DBG_MON_A16") + ), + MTK_PIN( + 157, "GPIO157", + MTK_EINT_FUNCTION(0, 157), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO157"), + MTK_FUNCTION(1, "CONN_TOP_DATA"), + MTK_FUNCTION(2, "AUXIF_ST0"), + MTK_FUNCTION(7, "DBG_MON_A17") + ), + MTK_PIN( + 158, "GPIO158", + MTK_EINT_FUNCTION(0, 158), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO158"), + MTK_FUNCTION(1, "CONN_HRST_B"), + MTK_FUNCTION(7, "DBG_MON_A18") + ), + MTK_PIN( + 159, "GPIO159", + MTK_EINT_FUNCTION(0, 159), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO159"), + MTK_FUNCTION(1, "CONN_WB_PTA"), + MTK_FUNCTION(7, "DBG_MON_A19") + ), + MTK_PIN( + 160, "GPIO160", + MTK_EINT_FUNCTION(0, 160), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO160"), + MTK_FUNCTION(1, "CONN_BT_CLK"), + MTK_FUNCTION(2, "AUXIF_CLK1"), + MTK_FUNCTION(7, "DBG_MON_A20") + ), + MTK_PIN( + 161, "GPIO161", + MTK_EINT_FUNCTION(0, 161), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO161"), + MTK_FUNCTION(1, "CONN_BT_DATA"), + MTK_FUNCTION(2, "AUXIF_ST1"), + MTK_FUNCTION(7, "DBG_MON_A21") + ), + MTK_PIN( + 162, "GPIO162", + MTK_EINT_FUNCTION(0, 162), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO162"), + MTK_FUNCTION(1, "CONN_WF_CTRL0"), + MTK_FUNCTION(7, "DBG_MON_A22") + ), + MTK_PIN( + 163, "GPIO163", + MTK_EINT_FUNCTION(0, 163), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO163"), + MTK_FUNCTION(1, "CONN_WF_CTRL1"), + MTK_FUNCTION(2, "UFS_MPHY_SCL"), + MTK_FUNCTION(7, "DBG_MON_A23") + ), + MTK_PIN( + 164, "GPIO164", + MTK_EINT_FUNCTION(0, 164), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO164"), + MTK_FUNCTION(1, "CONN_WF_CTRL2"), + MTK_FUNCTION(2, "UFS_MPHY_SDA"), + MTK_FUNCTION(7, "DBG_MON_A24") + ), + MTK_PIN( + 165, "GPIO165", + MTK_EINT_FUNCTION(0, 165), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO165"), + MTK_FUNCTION(1, "CONN_WF_CTRL3"), + MTK_FUNCTION(2, "UFS_UNIPRO_SDA"), + MTK_FUNCTION(7, "DBG_MON_A25") + ), + MTK_PIN( + 166, "GPIO166", + MTK_EINT_FUNCTION(0, 166), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO166"), + MTK_FUNCTION(1, "CONN_WF_CTRL4"), + MTK_FUNCTION(2, "UFS_UNIPRO_SCL"), + MTK_FUNCTION(7, "DBG_MON_A26") + ), + MTK_PIN( + 167, "GPIO167", + MTK_EINT_FUNCTION(0, 167), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO167"), + MTK_FUNCTION(1, "MSDC0_CMD") + ), + MTK_PIN( + 168, "GPIO168", + MTK_EINT_FUNCTION(0, 168), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO168"), + MTK_FUNCTION(1, "MSDC0_DAT0") + ), + MTK_PIN( + 169, "GPIO169", + MTK_EINT_FUNCTION(0, 169), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO169"), + MTK_FUNCTION(1, "MSDC0_DAT2") + ), + MTK_PIN( + 170, "GPIO170", + MTK_EINT_FUNCTION(0, 170), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO170"), + MTK_FUNCTION(1, "MSDC0_DAT4") + ), + MTK_PIN( + 171, "GPIO171", + MTK_EINT_FUNCTION(0, 171), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO171"), + MTK_FUNCTION(1, "MSDC0_DAT6") + ), + MTK_PIN( + 172, "GPIO172", + MTK_EINT_FUNCTION(0, 172), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO172"), + MTK_FUNCTION(1, "MSDC0_DAT1") + ), + MTK_PIN( + 173, "GPIO173", + MTK_EINT_FUNCTION(0, 173), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO173"), + MTK_FUNCTION(1, "MSDC0_DAT5") + ), + MTK_PIN( + 174, "GPIO174", + MTK_EINT_FUNCTION(0, 174), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO174"), + MTK_FUNCTION(1, "MSDC0_DAT7") + ), + MTK_PIN( + 175, "GPIO175", + MTK_EINT_FUNCTION(0, 175), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO175"), + MTK_FUNCTION(1, "MSDC0_DSL"), + MTK_FUNCTION(2, "ANT_SEL9") + ), + MTK_PIN( + 176, "GPIO176", + MTK_EINT_FUNCTION(0, 176), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO176"), + MTK_FUNCTION(1, "MSDC0_CLK"), + MTK_FUNCTION(2, "ANT_SEL10") + ), + MTK_PIN( + 177, "GPIO177", + MTK_EINT_FUNCTION(0, 177), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO177"), + MTK_FUNCTION(1, "MSDC0_DAT3") + ), + MTK_PIN( + 178, "GPIO178", + MTK_EINT_FUNCTION(0, 178), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO178"), + MTK_FUNCTION(1, "MSDC0_RSTB") + ), + MTK_PIN( + 179, "GPIO179", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO179"), + MTK_FUNCTION(1, "RFIC0_BSI_EN") + ), + MTK_PIN( + 180, "GPIO180", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO180"), + MTK_FUNCTION(1, "RFIC0_BSI_CK") + ), + MTK_PIN( + 181, "GPIO181", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO181"), + MTK_FUNCTION(1, "SRCLKENA0") + ), + MTK_PIN( + 182, "GPIO182", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO182"), + MTK_FUNCTION(1, "SRCLKENA1") + ), + MTK_PIN( + 183, "GPIO183", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO183"), + MTK_FUNCTION(1, "WATCHDOG") + ), + MTK_PIN( + 184, "GPIO184", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO184"), + MTK_FUNCTION(1, "PWRAP_SPI0_MI"), + MTK_FUNCTION(2, "PWRAP_SPI0_MO") + ), + MTK_PIN( + 185, "GPIO185", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO185"), + MTK_FUNCTION(1, "PWRAP_SPI0_CSN") + ), + MTK_PIN( + 186, "GPIO186", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO186"), + MTK_FUNCTION(1, "PWRAP_SPI0_MO"), + MTK_FUNCTION(2, "PWRAP_SPI0_MI") + ), + MTK_PIN( + 187, "GPIO187", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO187"), + MTK_FUNCTION(1, "PWRAP_SPI0_CK") + ), + MTK_PIN( + 188, "GPIO188", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO188"), + MTK_FUNCTION(1, "RTC32K_CK") + ), + MTK_PIN( + 189, "GPIO189", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO189"), + MTK_FUNCTION(1, "AUD_CLK_MOSI"), + MTK_FUNCTION(3, "I2S1_MCK"), + MTK_FUNCTION(6, "UFS_UNIPRO_SCL") + ), + MTK_PIN( + 190, "GPIO190", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO190"), + MTK_FUNCTION(1, "AUD_SYNC_MOSI"), + MTK_FUNCTION(3, "I2S1_BCK"), + MTK_FUNCTION(7, "DBG_MON_A6") + ), + MTK_PIN( + 191, "GPIO191", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO191"), + MTK_FUNCTION(1, "AUD_DAT_MOSI0"), + MTK_FUNCTION(3, "I2S1_LRCK"), + MTK_FUNCTION(7, "DBG_MON_A7") + ), + MTK_PIN( + 192, "GPIO192", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO192"), + MTK_FUNCTION(1, "AUD_DAT_MOSI1"), + MTK_FUNCTION(3, "I2S1_DO"), + MTK_FUNCTION(6, "UFS_MPHY_SDA"), + MTK_FUNCTION(7, "DBG_MON_A8") + ), + MTK_PIN( + 193, "GPIO193", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO193"), + MTK_FUNCTION(1, "AUD_DAT_MISO0"), + MTK_FUNCTION(2, "VOW_DAT_MISO"), + MTK_FUNCTION(3, "I2S2_LRCK"), + MTK_FUNCTION(5, "UDI_TDI"), + MTK_FUNCTION(7, "DBG_MON_A12") + ), + MTK_PIN( + 194, "GPIO194", + MTK_EINT_FUNCTION(NO_EINT_SUPPORT, NO_EINT_SUPPORT), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO194"), + MTK_FUNCTION(1, "AUD_DAT_MISO1"), + MTK_FUNCTION(2, "VOW_CLK_MISO"), + MTK_FUNCTION(3, "I2S2_DI"), + MTK_FUNCTION(5, "UDI_NTRST"), + MTK_FUNCTION(6, "UFS_MPHY_SCL"), + MTK_FUNCTION(7, "DBG_MON_A13") + ), + MTK_PIN( + 195, "GPIO195", + MTK_EINT_FUNCTION(0, 179), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO195"), + MTK_FUNCTION(3, "ADSP_JTAG_TCK"), + MTK_FUNCTION(4, "VPU_UDI_TCK"), + MTK_FUNCTION(5, "SPM_JTAG_TCK"), + MTK_FUNCTION(6, "SSPM_JTAG_TCK") + ), + MTK_PIN( + 196, "GPIO196", + MTK_EINT_FUNCTION(0, 180), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO196"), + MTK_FUNCTION(1, "CMMCLK4"), + MTK_FUNCTION(3, "ADSP_JTAG_TDI"), + MTK_FUNCTION(4, "VPU_UDI_TDI"), + MTK_FUNCTION(5, "SPM_JTAG_TDI"), + MTK_FUNCTION(6, "SSPM_JTAG_TDI") + ), + MTK_PIN( + 197, "GPIO197", + MTK_EINT_FUNCTION(0, 181), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO197"), + MTK_FUNCTION(3, "ADSP_JTAG_TDO"), + MTK_FUNCTION(4, "VPU_UDI_TDO"), + MTK_FUNCTION(5, "SPM_JTAG_TDO"), + MTK_FUNCTION(6, "SSPM_JTAG_TDO") + ), + MTK_PIN( + 198, "GPIO198", + MTK_EINT_FUNCTION(0, 182), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO198"), + MTK_FUNCTION(1, "SCL7") + ), + MTK_PIN( + 199, "GPIO199", + MTK_EINT_FUNCTION(0, 183), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO199"), + MTK_FUNCTION(1, "SDA7") + ), + MTK_PIN( + 200, "GPIO200", + MTK_EINT_FUNCTION(0, 184), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO200"), + MTK_FUNCTION(1, "URXD1"), + MTK_FUNCTION(2, "ADSP_URXD0"), + MTK_FUNCTION(3, "TP_URXD1_AO"), + MTK_FUNCTION(4, "SSPM_URXD_AO"), + MTK_FUNCTION(5, "TP_URXD2_AO"), + MTK_FUNCTION(6, "MBISTREADEN_TRIGGER") + ), + MTK_PIN( + 201, "GPIO201", + MTK_EINT_FUNCTION(0, 185), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO201"), + MTK_FUNCTION(1, "UTXD1"), + MTK_FUNCTION(2, "ADSP_UTXD0"), + MTK_FUNCTION(3, "TP_UTXD1_AO"), + MTK_FUNCTION(4, "SSPM_UTXD_AO"), + MTK_FUNCTION(5, "TP_UTXD2_AO"), + MTK_FUNCTION(6, "MBISTWRITEEN_TRIGGER") + ), + MTK_PIN( + 202, "GPIO202", + MTK_EINT_FUNCTION(0, 186), + DRV_GRP4, + MTK_FUNCTION(0, "GPIO202"), + MTK_FUNCTION(1, "PWM_3"), + MTK_FUNCTION(2, "CLKM3") + ), + MTK_PIN( + 203, "GPIO203", + MTK_EINT_FUNCTION(0, 187), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 204, "GPIO204", + MTK_EINT_FUNCTION(0, 188), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 205, "GPIO205", + MTK_EINT_FUNCTION(0, 189), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 206, "GPIO206", + MTK_EINT_FUNCTION(0, 190), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 207, "GPIO207", + MTK_EINT_FUNCTION(0, 191), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 208, "GPIO208", + MTK_EINT_FUNCTION(0, 193), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), + MTK_PIN( + 209, "GPIO209", + MTK_EINT_FUNCTION(0, 194), + DRV_GRP4, + MTK_FUNCTION(0, NULL) + ), +}; + +#endif /* __PINCTRL-MTK-MT6779_H */ diff --git a/drivers/pinctrl/mediatek/pinctrl-paris.c b/drivers/pinctrl/mediatek/pinctrl-paris.c index 90a432bf9fed..a23c18251965 100644 --- a/drivers/pinctrl/mediatek/pinctrl-paris.c +++ b/drivers/pinctrl/mediatek/pinctrl-paris.c @@ -769,6 +769,13 @@ static int mtk_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio) if (gpio >= hw->soc->npins) return -EINVAL; + /* + * "Virtual" GPIOs are always and only used for interrupts + * Since they are only used for interrupts, they are always inputs + */ + if (mtk_is_virt_gpio(hw, gpio)) + return 1; + desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio]; err = mtk_hw_get_value(hw, desc, PINCTRL_PIN_REG_DIR, &value); diff --git a/drivers/pinctrl/meson/pinctrl-meson-a1.c b/drivers/pinctrl/meson/pinctrl-meson-a1.c index 0bcec03f344a..8abf750eac7e 100644 --- a/drivers/pinctrl/meson/pinctrl-meson-a1.c +++ b/drivers/pinctrl/meson/pinctrl-meson-a1.c @@ -746,11 +746,6 @@ static const char * const i2c3_groups[] = { "i2c3_sck_x", "i2c3_sda_x", "i2c3_sck_f", "i2c3_sda_f", }; -static const char * const i2c_slave_groups[] = { - "i2c_slave_sda_a", "i2c_slave_sck_a", - "i2c_slave_sda_f", "i2c_slave_sck_f", -}; - static const char * const spi_a_groups[] = { "spi_a_mosi_x2", "spi_a_ss0_x3", "spi_a_sclk_x4", "spi_a_miso_x5", "spi_a_mosi_x7", "spi_a_miso_x8", "spi_a_ss0_x9", "spi_a_sclk_x10", diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 079f8ee8d353..20683cd072bb 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -56,6 +56,10 @@ #include "../pinctrl-utils.h" #include "pinctrl-meson.h" +static const unsigned int meson_bit_strides[] = { + 1, 1, 1, 1, 1, 2, 1 +}; + /** * meson_get_bank() - find the bank containing a given pin * @@ -96,8 +100,9 @@ static void meson_calc_reg_and_bit(struct meson_bank *bank, unsigned int pin, { struct meson_reg_desc *desc = &bank->regs[reg_type]; - *reg = desc->reg * 4; - *bit = desc->bit + pin - bank->first; + *bit = (desc->bit + pin - bank->first) * meson_bit_strides[reg_type]; + *reg = (desc->reg + (*bit / 32)) * 4; + *bit &= 0x1f; } static int meson_get_groups_count(struct pinctrl_dev *pcdev) @@ -314,7 +319,6 @@ static int meson_pinconf_set_drive_strength(struct meson_pinctrl *pc, return ret; meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit); - bit = bit << 1; if (drive_strength_ua <= 500) { ds_val = MESON_PINCONF_DRV_500UA; @@ -441,7 +445,6 @@ static int meson_pinconf_get_drive_strength(struct meson_pinctrl *pc, return ret; meson_calc_reg_and_bit(bank, pin, REG_DS, ®, &bit); - bit = bit << 1; ret = regmap_read(pc->reg_ds, reg, &val); if (ret) diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index 5f125bd6279d..953126bf6657 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -45,13 +45,14 @@ * The pins of a pinmux groups are composed of one or two groups of contiguous * pins. * @name: Name of the pin group, used to lookup the group. - * @start_pins: Index of the first pin of the main range of pins belonging to + * @start_pin: Index of the first pin of the main range of pins belonging to * the group * @npins: Number of pins included in the first range * @reg_mask: Bit mask matching the group in the selection register - * @extra_pins: Index of the first pin of the optional second range of pins + * @val: Value to write to the registers for a given function + * @extra_pin: Index of the first pin of the optional second range of pins * belonging to the group - * @npins: Number of pins included in the second optional range + * @extra_npins:Number of pins included in the second optional range * @funcs: A list of pinmux functions that can be selected for this group. * @pins: List of the pins included in the group */ diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index dfef471201f6..1e225d513988 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c @@ -231,9 +231,10 @@ static void parse_dt_cfg(struct device_node *np, * pinconf_generic_parse_dt_config() * parse the config properties into generic pinconfig values. * @np: node containing the pinconfig properties + * @pctldev: pincontrol device * @configs: array with nconfigs entries containing the generic pinconf values * must be freed when no longer necessary. - * @nconfigs: umber of configurations + * @nconfigs: number of configurations */ int pinconf_generic_parse_dt_config(struct device_node *np, struct pinctrl_dev *pctldev, diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 1fe62a35bb12..9a760f5cd7ed 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -417,22 +417,13 @@ static int amd_gpio_irq_set_type(struct irq_data *d, unsigned int type) { int ret = 0; u32 pin_reg, pin_reg_irq_en, mask; - unsigned long flags, irq_flags; + unsigned long flags; struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct amd_gpio *gpio_dev = gpiochip_get_data(gc); raw_spin_lock_irqsave(&gpio_dev->lock, flags); pin_reg = readl(gpio_dev->base + (d->hwirq)*4); - /* Ignore the settings coming from the client and - * read the values from the ACPI tables - * while setting the trigger type - */ - - irq_flags = irq_get_trigger_type(d->irq); - if (irq_flags != IRQ_TYPE_NONE) - type = irq_flags; - switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: pin_reg &= ~BIT(LEVEL_TRIG_OFF); @@ -855,6 +846,7 @@ static int amd_gpio_probe(struct platform_device *pdev) int irq_base; struct resource *res; struct amd_gpio *gpio_dev; + struct gpio_irq_chip *girq; gpio_dev = devm_kzalloc(&pdev->dev, sizeof(struct amd_gpio), GFP_KERNEL); @@ -916,6 +908,15 @@ static int amd_gpio_probe(struct platform_device *pdev) return PTR_ERR(gpio_dev->pctrl); } + girq = &gpio_dev->gc.irq; + girq->chip = &amd_gpio_irqchip; + /* This will let us handle the parent IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_simple_irq; + ret = gpiochip_add_data(&gpio_dev->gc, gpio_dev); if (ret) return ret; @@ -927,17 +928,6 @@ static int amd_gpio_probe(struct platform_device *pdev) goto out2; } - ret = gpiochip_irqchip_add(&gpio_dev->gc, - &amd_gpio_irqchip, - 0, - handle_simple_irq, - IRQ_TYPE_NONE); - if (ret) { - dev_err(&pdev->dev, "could not add irqchip\n"); - ret = -ENODEV; - goto out2; - } - ret = devm_request_irq(&pdev->dev, irq_base, amd_gpio_irq_handler, IRQF_SHARED, KBUILD_MODNAME, gpio_dev); if (ret) @@ -965,12 +955,14 @@ static int amd_gpio_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_ACPI static const struct acpi_device_id amd_gpio_acpi_match[] = { { "AMD0030", 0 }, { "AMDI0030", 0}, { }, }; MODULE_DEVICE_TABLE(acpi, amd_gpio_acpi_match); +#endif static struct platform_driver amd_gpio_driver = { .driver = { diff --git a/drivers/pinctrl/pinctrl-at91-pio4.c b/drivers/pinctrl/pinctrl-at91-pio4.c index 54222ccddfb1..8e5a5053a47e 100644 --- a/drivers/pinctrl/pinctrl-at91-pio4.c +++ b/drivers/pinctrl/pinctrl-at91-pio4.c @@ -106,6 +106,8 @@ struct atmel_pin { * @irq_domain: irq domain for the gpio controller. * @irqs: table containing the hw irq number of the bank. The index of the * table is the bank id. + * @pm_wakeup_sources: bitmap of wakeup sources (lines) + * @pm_suspend_backup: backup/restore register values on suspend/resume * @dev: device entry for the Atmel PIO controller. * @node: node of the Atmel PIO controller. */ diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c index 9c5213087659..72edc675431c 100644 --- a/drivers/pinctrl/pinctrl-at91.c +++ b/drivers/pinctrl/pinctrl-at91.c @@ -65,7 +65,7 @@ static int gpio_banks; #define DEBOUNCE_VAL_SHIFT 17 #define DEBOUNCE_VAL (0x3fff << DEBOUNCE_VAL_SHIFT) -/** +/* * These defines will translated the dt binding settings to our internal * settings. They are not necessarily the same value as the register setting. * The actual drive strength current of low, medium and high must be looked up @@ -161,6 +161,10 @@ struct at91_pin_group { * @set_pulldown: enable/disable pulldown * @get_schmitt_trig: get schmitt trigger status * @disable_schmitt_trig: disable schmitt trigger + * @get_drivestrength: get driver strength + * @set_drivestrength: set driver strength + * @get_slewrate: get slew rate + * @set_slewrate: set slew rate * @irq_type: return irq type */ struct at91_pinctrl_mux_ops { diff --git a/drivers/pinctrl/pinctrl-bm1880.c b/drivers/pinctrl/pinctrl-bm1880.c index d1a7d9836787..a8e267237435 100644 --- a/drivers/pinctrl/pinctrl-bm1880.c +++ b/drivers/pinctrl/pinctrl-bm1880.c @@ -22,12 +22,12 @@ /** * struct bm1880_pinctrl - driver data * @base: Pinctrl base address - * @pctrl: Pinctrl device + * @pctrldev: Pinctrl device * @groups: Pingroups * @ngroups: Number of @groups * @funcs: Pinmux functions * @nfuncs: Number of @funcs - * @pconf: Pinconf data + * @pinconf: Pinconf data */ struct bm1880_pinctrl { void __iomem *base; diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 6a8d44504f94..a8d1b53ec4c1 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -124,6 +124,7 @@ static int jz4740_nand_cs1_pins[] = { 0x39, }; static int jz4740_nand_cs2_pins[] = { 0x3a, }; static int jz4740_nand_cs3_pins[] = { 0x3b, }; static int jz4740_nand_cs4_pins[] = { 0x3c, }; +static int jz4740_nand_fre_fwe_pins[] = { 0x5c, 0x5d, }; static int jz4740_pwm_pwm0_pins[] = { 0x77, }; static int jz4740_pwm_pwm1_pins[] = { 0x78, }; static int jz4740_pwm_pwm2_pins[] = { 0x79, }; @@ -146,6 +147,7 @@ static int jz4740_nand_cs1_funcs[] = { 0, }; static int jz4740_nand_cs2_funcs[] = { 0, }; static int jz4740_nand_cs3_funcs[] = { 0, }; static int jz4740_nand_cs4_funcs[] = { 0, }; +static int jz4740_nand_fre_fwe_funcs[] = { 0, 0, }; static int jz4740_pwm_pwm0_funcs[] = { 0, }; static int jz4740_pwm_pwm1_funcs[] = { 0, }; static int jz4740_pwm_pwm2_funcs[] = { 0, }; @@ -178,6 +180,7 @@ static const struct group_desc jz4740_groups[] = { INGENIC_PIN_GROUP("nand-cs2", jz4740_nand_cs2), INGENIC_PIN_GROUP("nand-cs3", jz4740_nand_cs3), INGENIC_PIN_GROUP("nand-cs4", jz4740_nand_cs4), + INGENIC_PIN_GROUP("nand-fre-fwe", jz4740_nand_fre_fwe), INGENIC_PIN_GROUP("pwm0", jz4740_pwm_pwm0), INGENIC_PIN_GROUP("pwm1", jz4740_pwm_pwm1), INGENIC_PIN_GROUP("pwm2", jz4740_pwm_pwm2), @@ -195,7 +198,7 @@ static const char *jz4740_lcd_groups[] = { "lcd-8bit", "lcd-16bit", "lcd-18bit", "lcd-18bit-tft", "lcd-no-pins", }; static const char *jz4740_nand_groups[] = { - "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", + "nand-cs1", "nand-cs2", "nand-cs3", "nand-cs4", "nand-fre-fwe", }; static const char *jz4740_pwm0_groups[] = { "pwm0", }; static const char *jz4740_pwm1_groups[] = { "pwm1", }; @@ -1810,9 +1813,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd) */ high = ingenic_gpio_get_value(jzgc, irq); if (high) - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING); + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW); else - irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING); + irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH); } if (jzgc->jzpc->info->version >= ID_JZ4760) @@ -1848,7 +1851,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type) */ bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq); - type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING; + type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH; } irq_set_type(jzgc, irqd->hwirq, type); @@ -1955,7 +1958,8 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) unsigned int pin = gc->base + offset; if (jzpc->info->version >= ID_JZ4760) { - if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1)) + if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) || + ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; } @@ -2292,6 +2296,7 @@ static const struct regmap_config ingenic_pinctrl_regmap_config = { static const struct of_device_id ingenic_gpio_of_match[] __initconst = { { .compatible = "ingenic,jz4740-gpio", }, + { .compatible = "ingenic,jz4725b-gpio", }, { .compatible = "ingenic,jz4760-gpio", }, { .compatible = "ingenic,jz4770-gpio", }, { .compatible = "ingenic,jz4780-gpio", }, diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index e4677546aec4..7b2f885e68bd 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c @@ -838,11 +838,11 @@ static int lpc18xx_pconf_get_pin(struct pinctrl_dev *pctldev, unsigned param, *arg = (reg & LPC18XX_SCU_PIN_EHD_MASK) >> LPC18XX_SCU_PIN_EHD_POS; switch (*arg) { case 3: *arg += 5; - /* fall through */ + fallthrough; case 2: *arg += 5; - /* fall through */ + fallthrough; case 1: *arg += 3; - /* fall through */ + fallthrough; case 0: *arg += 4; } break; @@ -1057,11 +1057,11 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, unsigned param, switch (param_val) { case 20: param_val -= 5; - /* fall through */ + fallthrough; case 14: param_val -= 5; - /* fall through */ + fallthrough; case 8: param_val -= 3; - /* fall through */ + fallthrough; case 4: param_val -= 4; break; default: diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 151931b593f6..42b12ea14d6b 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c @@ -522,29 +522,6 @@ static int mcp23s08_irq_setup(struct mcp23s08 *mcp) return 0; } -static int mcp23s08_irqchip_setup(struct mcp23s08 *mcp) -{ - struct gpio_chip *chip = &mcp->chip; - int err; - - err = gpiochip_irqchip_add_nested(chip, - &mcp->irq_chip, - 0, - handle_simple_irq, - IRQ_TYPE_NONE); - if (err) { - dev_err(chip->parent, - "could not connect irqchip to gpiochip: %d\n", err); - return err; - } - - gpiochip_set_nested_irqchip(chip, - &mcp->irq_chip, - mcp->irq); - - return 0; -} - /*----------------------------------------------------------------------*/ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, @@ -589,10 +566,6 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, if (ret < 0) goto fail; - ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); - if (ret < 0) - goto fail; - mcp->irq_controller = device_property_read_bool(dev, "interrupt-controller"); if (mcp->irq && mcp->irq_controller) { @@ -629,11 +602,22 @@ int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, } if (mcp->irq && mcp->irq_controller) { - ret = mcp23s08_irqchip_setup(mcp); - if (ret) - goto fail; + struct gpio_irq_chip *girq = &mcp->chip.irq; + + girq->chip = &mcp->irq_chip; + /* This will let us handle the parent IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_simple_irq; + girq->threaded = true; } + ret = devm_gpiochip_add_data(dev, &mcp->chip, mcp); + if (ret < 0) + goto fail; + mcp->pinctrl_desc.pctlops = &mcp_pinctrl_ops; mcp->pinctrl_desc.confops = &mcp_pinconf_ops; mcp->pinctrl_desc.npins = mcp->chip.ngpio; diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index 95c225bc7572..425a3d764f00 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -25,6 +25,23 @@ #include "pinconf.h" #include "pinmux.h" +#define ocelot_clrsetbits(addr, clear, set) \ + writel((readl(addr) & ~(clear)) | (set), (addr)) + +/* PINCONFIG bits (sparx5 only) */ +enum { + PINCONF_BIAS, + PINCONF_SCHMITT, + PINCONF_DRIVE_STRENGTH, +}; + +#define BIAS_PD_BIT BIT(4) +#define BIAS_PU_BIT BIT(3) +#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT) +#define SCHMITT_BIT BIT(2) +#define DRIVE_BITS GENMASK(1, 0) + +/* GPIO standard registers */ #define OCELOT_GPIO_OUT_SET 0x0 #define OCELOT_GPIO_OUT_CLR 0x4 #define OCELOT_GPIO_OUT 0x8 @@ -42,12 +59,17 @@ enum { FUNC_NONE, FUNC_GPIO, + FUNC_IRQ0, FUNC_IRQ0_IN, FUNC_IRQ0_OUT, + FUNC_IRQ1, FUNC_IRQ1_IN, FUNC_IRQ1_OUT, + FUNC_EXT_IRQ, FUNC_MIIM, + FUNC_PHY_LED, FUNC_PCI_WAKE, + FUNC_MD, FUNC_PTP0, FUNC_PTP1, FUNC_PTP2, @@ -59,24 +81,36 @@ enum { FUNC_SG1, FUNC_SG2, FUNC_SI, + FUNC_SI2, FUNC_TACHO, FUNC_TWI, FUNC_TWI2, + FUNC_TWI3, FUNC_TWI_SCL_M, FUNC_UART, FUNC_UART2, + FUNC_UART3, + FUNC_PLL_STAT, + FUNC_EMMC, + FUNC_REF_CLK, + FUNC_RCVRD_CLK, FUNC_MAX }; static const char *const ocelot_function_names[] = { [FUNC_NONE] = "none", [FUNC_GPIO] = "gpio", + [FUNC_IRQ0] = "irq0", [FUNC_IRQ0_IN] = "irq0_in", [FUNC_IRQ0_OUT] = "irq0_out", + [FUNC_IRQ1] = "irq1", [FUNC_IRQ1_IN] = "irq1_in", [FUNC_IRQ1_OUT] = "irq1_out", + [FUNC_EXT_IRQ] = "ext_irq", [FUNC_MIIM] = "miim", + [FUNC_PHY_LED] = "phy_led", [FUNC_PCI_WAKE] = "pci_wake", + [FUNC_MD] = "md", [FUNC_PTP0] = "ptp0", [FUNC_PTP1] = "ptp1", [FUNC_PTP2] = "ptp2", @@ -88,12 +122,19 @@ static const char *const ocelot_function_names[] = { [FUNC_SG1] = "sg1", [FUNC_SG2] = "sg2", [FUNC_SI] = "si", + [FUNC_SI2] = "si2", [FUNC_TACHO] = "tacho", [FUNC_TWI] = "twi", [FUNC_TWI2] = "twi2", + [FUNC_TWI3] = "twi3", [FUNC_TWI_SCL_M] = "twi_scl_m", [FUNC_UART] = "uart", [FUNC_UART2] = "uart2", + [FUNC_UART3] = "uart3", + [FUNC_PLL_STAT] = "pll_stat", + [FUNC_EMMC] = "emmc", + [FUNC_REF_CLK] = "ref_clk", + [FUNC_RCVRD_CLK] = "rcvrd_clk", }; struct ocelot_pmx_func { @@ -111,6 +152,7 @@ struct ocelot_pinctrl { struct pinctrl_dev *pctl; struct gpio_chip gpio_chip; struct regmap *map; + void __iomem *pincfg; struct pinctrl_desc *desc; struct ocelot_pmx_func func[FUNC_MAX]; u8 stride; @@ -324,6 +366,152 @@ static const struct pinctrl_pin_desc jaguar2_pins[] = { JAGUAR2_PIN(63), }; +#define SPARX5_P(p, f0, f1, f2) \ +static struct ocelot_pin_caps sparx5_pin_##p = { \ + .pin = p, \ + .functions = { \ + FUNC_GPIO, FUNC_##f0, FUNC_##f1, FUNC_##f2 \ + }, \ +} + +SPARX5_P(0, SG0, PLL_STAT, NONE); +SPARX5_P(1, SG0, NONE, NONE); +SPARX5_P(2, SG0, NONE, NONE); +SPARX5_P(3, SG0, NONE, NONE); +SPARX5_P(4, SG1, NONE, NONE); +SPARX5_P(5, SG1, NONE, NONE); +SPARX5_P(6, IRQ0_IN, IRQ0_OUT, SFP); +SPARX5_P(7, IRQ1_IN, IRQ1_OUT, SFP); +SPARX5_P(8, PTP0, NONE, SFP); +SPARX5_P(9, PTP1, SFP, TWI_SCL_M); +SPARX5_P(10, UART, NONE, NONE); +SPARX5_P(11, UART, NONE, NONE); +SPARX5_P(12, SG1, NONE, NONE); +SPARX5_P(13, SG1, NONE, NONE); +SPARX5_P(14, TWI, TWI_SCL_M, NONE); +SPARX5_P(15, TWI, NONE, NONE); +SPARX5_P(16, SI, TWI_SCL_M, SFP); +SPARX5_P(17, SI, TWI_SCL_M, SFP); +SPARX5_P(18, SI, TWI_SCL_M, SFP); +SPARX5_P(19, PCI_WAKE, TWI_SCL_M, SFP); +SPARX5_P(20, IRQ0_OUT, TWI_SCL_M, SFP); +SPARX5_P(21, IRQ1_OUT, TACHO, SFP); +SPARX5_P(22, TACHO, IRQ0_OUT, TWI_SCL_M); +SPARX5_P(23, PWM, UART3, TWI_SCL_M); +SPARX5_P(24, PTP2, UART3, TWI_SCL_M); +SPARX5_P(25, PTP3, SI, TWI_SCL_M); +SPARX5_P(26, UART2, SI, TWI_SCL_M); +SPARX5_P(27, UART2, SI, TWI_SCL_M); +SPARX5_P(28, TWI2, SI, SFP); +SPARX5_P(29, TWI2, SI, SFP); +SPARX5_P(30, SG2, SI, PWM); +SPARX5_P(31, SG2, SI, TWI_SCL_M); +SPARX5_P(32, SG2, SI, TWI_SCL_M); +SPARX5_P(33, SG2, SI, SFP); +SPARX5_P(34, NONE, TWI_SCL_M, EMMC); +SPARX5_P(35, SFP, TWI_SCL_M, EMMC); +SPARX5_P(36, SFP, TWI_SCL_M, EMMC); +SPARX5_P(37, SFP, NONE, EMMC); +SPARX5_P(38, NONE, TWI_SCL_M, EMMC); +SPARX5_P(39, SI2, TWI_SCL_M, EMMC); +SPARX5_P(40, SI2, TWI_SCL_M, EMMC); +SPARX5_P(41, SI2, TWI_SCL_M, EMMC); +SPARX5_P(42, SI2, TWI_SCL_M, EMMC); +SPARX5_P(43, SI2, TWI_SCL_M, EMMC); +SPARX5_P(44, SI, SFP, EMMC); +SPARX5_P(45, SI, SFP, EMMC); +SPARX5_P(46, NONE, SFP, EMMC); +SPARX5_P(47, NONE, SFP, EMMC); +SPARX5_P(48, TWI3, SI, SFP); +SPARX5_P(49, TWI3, NONE, SFP); +SPARX5_P(50, SFP, NONE, TWI_SCL_M); +SPARX5_P(51, SFP, SI, TWI_SCL_M); +SPARX5_P(52, SFP, MIIM, TWI_SCL_M); +SPARX5_P(53, SFP, MIIM, TWI_SCL_M); +SPARX5_P(54, SFP, PTP2, TWI_SCL_M); +SPARX5_P(55, SFP, PTP3, PCI_WAKE); +SPARX5_P(56, MIIM, SFP, TWI_SCL_M); +SPARX5_P(57, MIIM, SFP, TWI_SCL_M); +SPARX5_P(58, MIIM, SFP, TWI_SCL_M); +SPARX5_P(59, MIIM, SFP, NONE); +SPARX5_P(60, RECO_CLK, NONE, NONE); +SPARX5_P(61, RECO_CLK, NONE, NONE); +SPARX5_P(62, RECO_CLK, PLL_STAT, NONE); +SPARX5_P(63, RECO_CLK, NONE, NONE); + +#define SPARX5_PIN(n) { \ + .number = n, \ + .name = "GPIO_"#n, \ + .drv_data = &sparx5_pin_##n \ +} + +static const struct pinctrl_pin_desc sparx5_pins[] = { + SPARX5_PIN(0), + SPARX5_PIN(1), + SPARX5_PIN(2), + SPARX5_PIN(3), + SPARX5_PIN(4), + SPARX5_PIN(5), + SPARX5_PIN(6), + SPARX5_PIN(7), + SPARX5_PIN(8), + SPARX5_PIN(9), + SPARX5_PIN(10), + SPARX5_PIN(11), + SPARX5_PIN(12), + SPARX5_PIN(13), + SPARX5_PIN(14), + SPARX5_PIN(15), + SPARX5_PIN(16), + SPARX5_PIN(17), + SPARX5_PIN(18), + SPARX5_PIN(19), + SPARX5_PIN(20), + SPARX5_PIN(21), + SPARX5_PIN(22), + SPARX5_PIN(23), + SPARX5_PIN(24), + SPARX5_PIN(25), + SPARX5_PIN(26), + SPARX5_PIN(27), + SPARX5_PIN(28), + SPARX5_PIN(29), + SPARX5_PIN(30), + SPARX5_PIN(31), + SPARX5_PIN(32), + SPARX5_PIN(33), + SPARX5_PIN(34), + SPARX5_PIN(35), + SPARX5_PIN(36), + SPARX5_PIN(37), + SPARX5_PIN(38), + SPARX5_PIN(39), + SPARX5_PIN(40), + SPARX5_PIN(41), + SPARX5_PIN(42), + SPARX5_PIN(43), + SPARX5_PIN(44), + SPARX5_PIN(45), + SPARX5_PIN(46), + SPARX5_PIN(47), + SPARX5_PIN(48), + SPARX5_PIN(49), + SPARX5_PIN(50), + SPARX5_PIN(51), + SPARX5_PIN(52), + SPARX5_PIN(53), + SPARX5_PIN(54), + SPARX5_PIN(55), + SPARX5_PIN(56), + SPARX5_PIN(57), + SPARX5_PIN(58), + SPARX5_PIN(59), + SPARX5_PIN(60), + SPARX5_PIN(61), + SPARX5_PIN(62), + SPARX5_PIN(63), +}; + static int ocelot_get_functions_count(struct pinctrl_dev *pctldev) { return ARRAY_SIZE(ocelot_function_names); @@ -382,6 +570,7 @@ static int ocelot_pinmux_set_mux(struct pinctrl_dev *pctldev, * ALT[1] * This is racy because both registers can't be updated at the same time * but it doesn't matter much for now. + * Note: ALT0/ALT1 are organized specially for 64 gpio targets */ regmap_update_bits(info->map, REG_ALT(0, info, pin->pin), BIT(p), f << p); @@ -458,6 +647,219 @@ static int ocelot_pctl_get_group_pins(struct pinctrl_dev *pctldev, return 0; } +static int ocelot_hw_get_value(struct ocelot_pinctrl *info, + unsigned int pin, + unsigned int reg, + int *val) +{ + int ret = -EOPNOTSUPP; + + if (info->pincfg) { + u32 regcfg = readl(info->pincfg + (pin * sizeof(u32))); + + ret = 0; + switch (reg) { + case PINCONF_BIAS: + *val = regcfg & BIAS_BITS; + break; + + case PINCONF_SCHMITT: + *val = regcfg & SCHMITT_BIT; + break; + + case PINCONF_DRIVE_STRENGTH: + *val = regcfg & DRIVE_BITS; + break; + + default: + ret = -EOPNOTSUPP; + break; + } + } + return ret; +} + +static int ocelot_hw_set_value(struct ocelot_pinctrl *info, + unsigned int pin, + unsigned int reg, + int val) +{ + int ret = -EOPNOTSUPP; + + if (info->pincfg) { + void __iomem *regaddr = info->pincfg + (pin * sizeof(u32)); + + ret = 0; + switch (reg) { + case PINCONF_BIAS: + ocelot_clrsetbits(regaddr, BIAS_BITS, val); + break; + + case PINCONF_SCHMITT: + ocelot_clrsetbits(regaddr, SCHMITT_BIT, val); + break; + + case PINCONF_DRIVE_STRENGTH: + if (val <= 3) + ocelot_clrsetbits(regaddr, DRIVE_BITS, val); + else + ret = -EINVAL; + break; + + default: + ret = -EOPNOTSUPP; + break; + } + } + return ret; +} + +static int ocelot_pinconf_get(struct pinctrl_dev *pctldev, + unsigned int pin, unsigned long *config) +{ + struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + u32 param = pinconf_to_config_param(*config); + int val, err; + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + err = ocelot_hw_get_value(info, pin, PINCONF_BIAS, &val); + if (err) + return err; + if (param == PIN_CONFIG_BIAS_DISABLE) + val = (val == 0 ? true : false); + else if (param == PIN_CONFIG_BIAS_PULL_DOWN) + val = (val & BIAS_PD_BIT ? true : false); + else /* PIN_CONFIG_BIAS_PULL_UP */ + val = (val & BIAS_PU_BIT ? true : false); + break; + + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val); + if (err) + return err; + + val = (val & SCHMITT_BIT ? true : false); + break; + + case PIN_CONFIG_DRIVE_STRENGTH: + err = ocelot_hw_get_value(info, pin, PINCONF_DRIVE_STRENGTH, + &val); + if (err) + return err; + break; + + case PIN_CONFIG_OUTPUT: + err = regmap_read(info->map, REG(OCELOT_GPIO_OUT, info, pin), + &val); + if (err) + return err; + val = !!(val & BIT(pin % 32)); + break; + + case PIN_CONFIG_INPUT_ENABLE: + case PIN_CONFIG_OUTPUT_ENABLE: + err = regmap_read(info->map, REG(OCELOT_GPIO_OE, info, pin), + &val); + if (err) + return err; + val = val & BIT(pin % 32); + if (param == PIN_CONFIG_OUTPUT_ENABLE) + val = !!val; + else + val = !val; + break; + + default: + return -EOPNOTSUPP; + } + + *config = pinconf_to_config_packed(param, val); + + return 0; +} + +static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + u32 param, arg, p; + int cfg, err = 0; + + for (cfg = 0; cfg < num_configs; cfg++) { + param = pinconf_to_config_param(configs[cfg]); + arg = pinconf_to_config_argument(configs[cfg]); + + switch (param) { + case PIN_CONFIG_BIAS_DISABLE: + case PIN_CONFIG_BIAS_PULL_UP: + case PIN_CONFIG_BIAS_PULL_DOWN: + arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 : + (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT : + BIAS_PD_BIT; + + err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg); + if (err) + goto err; + + break; + + case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + arg = arg ? SCHMITT_BIT : 0; + err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT, + arg); + if (err) + goto err; + + break; + + case PIN_CONFIG_DRIVE_STRENGTH: + err = ocelot_hw_set_value(info, pin, + PINCONF_DRIVE_STRENGTH, + arg); + if (err) + goto err; + + break; + + case PIN_CONFIG_OUTPUT_ENABLE: + case PIN_CONFIG_INPUT_ENABLE: + case PIN_CONFIG_OUTPUT: + p = pin % 32; + if (arg) + regmap_write(info->map, + REG(OCELOT_GPIO_OUT_SET, info, + pin), + BIT(p)); + else + regmap_write(info->map, + REG(OCELOT_GPIO_OUT_CLR, info, + pin), + BIT(p)); + regmap_update_bits(info->map, + REG(OCELOT_GPIO_OE, info, pin), + BIT(p), + param == PIN_CONFIG_INPUT_ENABLE ? + 0 : BIT(p)); + break; + + default: + err = -EOPNOTSUPP; + } + } +err: + return err; +} + +static const struct pinconf_ops ocelot_confops = { + .is_generic = true, + .pin_config_get = ocelot_pinconf_get, + .pin_config_set = ocelot_pinconf_set, + .pin_config_config_dbg_show = pinconf_generic_dump_config, +}; + static const struct pinctrl_ops ocelot_pctl_ops = { .get_groups_count = ocelot_pctl_get_groups_count, .get_group_name = ocelot_pctl_get_group_name, @@ -484,6 +886,16 @@ static struct pinctrl_desc jaguar2_desc = { .owner = THIS_MODULE, }; +static struct pinctrl_desc sparx5_desc = { + .name = "sparx5-pinctrl", + .pins = sparx5_pins, + .npins = ARRAY_SIZE(sparx5_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .confops = &ocelot_confops, + .owner = THIS_MODULE, +}; + static int ocelot_create_group_func_map(struct device *dev, struct ocelot_pinctrl *info) { @@ -511,7 +923,8 @@ static int ocelot_create_group_func_map(struct device *dev, } for (i = 0; i < npins; i++) - info->func[f].groups[i] = info->desc->pins[pins[i]].name; + info->func[f].groups[i] = + info->desc->pins[pins[i]].name; } kfree(pins); @@ -744,6 +1157,7 @@ static int ocelot_gpiochip_register(struct platform_device *pdev, static const struct of_device_id ocelot_pinctrl_of_match[] = { { .compatible = "mscc,ocelot-pinctrl", .data = &ocelot_desc }, { .compatible = "mscc,jaguar2-pinctrl", .data = &jaguar2_desc }, + { .compatible = "microchip,sparx5-pinctrl", .data = &sparx5_desc }, {}, }; @@ -752,6 +1166,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct ocelot_pinctrl *info; void __iomem *base; + struct resource *res; int ret; struct regmap_config regmap_config = { .reg_bits = 32, @@ -773,6 +1188,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) } info->stride = 1 + (info->desc->npins - 1) / 32; + regmap_config.max_register = OCELOT_GPIO_SD_MAP * info->stride + 15 * 4; info->map = devm_regmap_init_mmio(dev, base, ®map_config); @@ -783,6 +1199,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) dev_set_drvdata(dev, info->map); info->dev = dev; + /* Pinconf registers */ + if (info->desc->confops) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + dev_dbg(dev, "Failed to ioremap config registers (no extended pinconf)\n"); + else + info->pincfg = base; + } + ret = ocelot_pinctrl_register(pdev, info); if (ret) return ret; @@ -791,6 +1217,8 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) if (ret) return ret; + dev_info(dev, "driver registered\n"); + return 0; } diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index c07324d1f265..0401c1da79dd 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -9,7 +9,7 @@ * Copyright (c) 2012 Samsung Electronics Co., Ltd. * http://www.samsung.com * Copyright (c) 2012 Linaro Ltd - * http://www.linaro.org + * https://www.linaro.org * * and pinctrl-at91: * Copyright (C) 2011-2012 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com> @@ -63,7 +63,7 @@ enum rockchip_pinctrl_type { RK3399, }; -/** +/* * Encode variants of iomux registers into a type variable */ #define IOMUX_GPIO_ONLY BIT(0) @@ -74,6 +74,7 @@ enum rockchip_pinctrl_type { #define IOMUX_WIDTH_2BIT BIT(5) /** + * struct rockchip_iomux * @type: iomux variant using IOMUX_* constants * @offset: if initialized to -1 it will be autocalculated, by specifying * an initial offset value the relevant source offset can be reset @@ -84,7 +85,7 @@ struct rockchip_iomux { int offset; }; -/** +/* * enum type index corresponding to rockchip_perpin_drv_list arrays index. */ enum rockchip_pin_drv_type { @@ -96,7 +97,7 @@ enum rockchip_pin_drv_type { DRV_TYPE_MAX }; -/** +/* * enum type index corresponding to rockchip_pull_list arrays index. */ enum rockchip_pin_pull_type { @@ -106,6 +107,7 @@ enum rockchip_pin_pull_type { }; /** + * struct rockchip_drv * @drv_type: drive strength variant using rockchip_perpin_drv_type * @offset: if initialized to -1 it will be autocalculated, by specifying * an initial offset value the relevant source offset can be reset @@ -119,8 +121,9 @@ struct rockchip_drv { }; /** + * struct rockchip_pin_bank * @reg_base: register base of the gpio bank - * @reg_pull: optional separate register for additional pull settings + * @regmap_pull: optional separate register for additional pull settings * @clk: clock of the gpio bank * @irq: interrupt of the gpio bank * @saved_masks: Saved content of GPIO_INTEN at suspend time. @@ -138,6 +141,8 @@ struct rockchip_drv { * @gpio_chip: gpiolib chip * @grange: gpio range * @slock: spinlock for the gpio bank + * @toggle_edge_mode: bit mask to toggle (falling/rising) edge mode + * @recalced_mask: bit mask to indicate a need to recalulate the mask * @route_mask: bits describing the routing pins of per bank */ struct rockchip_pin_bank { @@ -312,6 +317,7 @@ enum rockchip_mux_route_location { * @bank_num: bank number. * @pin: index at register or used to calc index. * @func: the min pin. + * @route_location: the mux route location (same, pmu, grf). * @route_offset: the max pin. * @route_val: the register offset. */ @@ -324,8 +330,6 @@ struct rockchip_mux_route_data { u32 route_val; }; -/** - */ struct rockchip_pin_ctrl { struct rockchip_pin_bank *pin_banks; u32 nr_banks; @@ -363,9 +367,7 @@ struct rockchip_pin_config { * @name: name of the pin group, used to lookup the group. * @pins: the pins included in this group. * @npins: number of pins included in this group. - * @func: the mux function number to be programmed when selected. - * @configs: the config values to be set for each pin - * @nconfigs: number of configs for each pin + * @data: local pin configuration */ struct rockchip_pin_group { const char *name; @@ -378,7 +380,7 @@ struct rockchip_pin_group { * struct rockchip_pmx_func: represent a pin function. * @name: name of the pin function, used to lookup the function. * @groups: one or more names of pin groups that provide this function. - * @num_groups: number of groups included in @groups. + * @ngroups: number of groups included in @groups. */ struct rockchip_pmx_func { const char *name; diff --git a/drivers/pinctrl/pinctrl-rza1.c b/drivers/pinctrl/pinctrl-rza1.c index 38a14bbced5f..511f232ab7bc 100644 --- a/drivers/pinctrl/pinctrl-rza1.c +++ b/drivers/pinctrl/pinctrl-rza1.c @@ -75,7 +75,7 @@ * RZ/A1 pinmux flags */ -/** +/* * rza1_bidir_pin - describe a single pin that needs bidir flag applied. */ struct rza1_bidir_pin { @@ -83,7 +83,7 @@ struct rza1_bidir_pin { u8 func: 4; }; -/** +/* * rza1_bidir_entry - describe a list of pins that needs bidir flag applied. * Each struct rza1_bidir_entry describes a port. */ @@ -92,7 +92,7 @@ struct rza1_bidir_entry { const struct rza1_bidir_pin *pins; }; -/** +/* * rza1_swio_pin - describe a single pin that needs swio flag applied. */ struct rza1_swio_pin { @@ -102,7 +102,7 @@ struct rza1_swio_pin { u16 input: 1; }; -/** +/* * rza1_swio_entry - describe a list of pins that needs swio flag applied */ struct rza1_swio_entry { @@ -110,7 +110,7 @@ struct rza1_swio_entry { const struct rza1_swio_pin *pins; }; -/** +/* * rza1_pinmux_conf - group together bidir and swio pinmux flag tables */ struct rza1_pinmux_conf { @@ -431,7 +431,7 @@ static const struct rza1_pinmux_conf rza1l_pmx_conf = { * RZ/A1 types */ /** - * rza1_mux_conf - describes a pin multiplexing operation + * struct rza1_mux_conf - describes a pin multiplexing operation * * @id: the pin identifier from 0 to RZA1_NPINS * @port: the port where pin sits on @@ -450,7 +450,7 @@ struct rza1_mux_conf { }; /** - * rza1_port - describes a pin port + * struct rza1_port - describes a pin port * * This is mostly useful to lock register writes per-bank and not globally. * @@ -467,12 +467,12 @@ struct rza1_port { }; /** - * rza1_pinctrl - RZ pincontroller device + * struct rza1_pinctrl - RZ pincontroller device * * @dev: parent device structure * @mutex: protect [pinctrl|pinmux]_generic functions * @base: logical address base - * @nports: number of pin controller ports + * @nport: number of pin controller ports * @ports: pin controller banks * @pins: pin array for pinctrl core * @desc: pincontroller desc for pinctrl core @@ -536,7 +536,7 @@ static inline int rza1_pinmux_get_swio(unsigned int port, return -ENOENT; } -/** +/* * rza1_pinmux_get_flags() - return pinmux flags associated to a pin */ static unsigned int rza1_pinmux_get_flags(unsigned int port, unsigned int pin, @@ -566,7 +566,7 @@ static unsigned int rza1_pinmux_get_flags(unsigned int port, unsigned int pin, * RZ/A1 SoC operations */ -/** +/* * rza1_set_bit() - un-locked set/clear a single bit in pin configuration * registers */ @@ -664,7 +664,7 @@ static inline int rza1_pin_get(struct rza1_port *port, unsigned int pin) /** * rza1_pin_mux_single() - configure pin multiplexing on a single pin * - * @pinctrl: RZ/A1 pin controller device + * @rza1_pctl: RZ/A1 pin controller device * @mux_conf: pin multiplexing descriptor */ static int rza1_pin_mux_single(struct rza1_pinctrl *rza1_pctl, diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index f3a8a465d27e..efe41abc5d47 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c @@ -42,6 +42,7 @@ * struct pcs_func_vals - mux function register offset and value pair * @reg: register virtual address * @val: register value + * @mask: mask */ struct pcs_func_vals { void __iomem *reg; @@ -83,6 +84,8 @@ struct pcs_conf_type { * @nvals: number of entries in vals array * @pgnames: array of pingroup names the function uses * @npgnames: number of pingroup names the function uses + * @conf: array of pin configurations + * @nconfs: number of pin configurations available * @node: list node */ struct pcs_function { @@ -560,7 +563,7 @@ static int pcs_pinconf_set(struct pinctrl_dev *pctldev, case PIN_CONFIG_BIAS_PULL_UP: if (arg) pcs_pinconf_clear_bias(pctldev, pin); - /* fall through */ + fallthrough; case PIN_CONFIG_INPUT_SCHMITT_ENABLE: data &= ~func->conf[i].mask; if (arg) @@ -653,6 +656,7 @@ static const struct pinconf_ops pcs_pinconf_ops = { * pcs_add_pin() - add a pin to the static per controller pin array * @pcs: pcs driver instance * @offset: register offset from base + * @pin_pos: unused */ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset, unsigned pin_pos) @@ -916,7 +920,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, /* If pinconf isn't supported, don't parse properties in below. */ if (!PCS_HAS_PINCONF) - return 0; + return -ENOTSUPP; /* cacluate how much properties are supported in current node */ for (i = 0; i < ARRAY_SIZE(prop2); i++) { @@ -928,7 +932,7 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, nconfs++; } if (!nconfs) - return 0; + return -ENOTSUPP; func->conf = devm_kcalloc(pcs->dev, nconfs, sizeof(struct pcs_conf_vals), @@ -959,7 +963,6 @@ static int pcs_parse_pinconf(struct pcs_device *pcs, struct device_node *np, /** * pcs_parse_one_pinctrl_entry() - parses a device tree mux entry - * @pctldev: pin controller device * @pcs: pinctrl driver instance * @np: device node of the mux entry * @map: map entry @@ -1017,10 +1020,17 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, break; } - /* Index plus one value cell */ offset = pinctrl_spec.args[0]; vals[found].reg = pcs->base + offset; - vals[found].val = pinctrl_spec.args[1]; + + switch (pinctrl_spec.args_count) { + case 2: + vals[found].val = pinctrl_spec.args[1]; + break; + case 3: + vals[found].val = (pinctrl_spec.args[1] | pinctrl_spec.args[2]); + break; + } dev_dbg(pcs->dev, "%pOFn index: 0x%x value: 0x%x\n", pinctrl_spec.np, offset, pinctrl_spec.args[1]); @@ -1056,9 +1066,12 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, if (PCS_HAS_PINCONF && function) { res = pcs_parse_pinconf(pcs, np, function, map); - if (res) + if (res == 0) + *num_maps = 2; + else if (res == -ENOTSUPP) + *num_maps = 1; + else goto free_pingroups; - *num_maps = 2; } else { *num_maps = 1; } @@ -1343,7 +1356,9 @@ static int pcs_add_gpio_func(struct device_node *node, struct pcs_device *pcs) } return ret; } + /** + * struct pcs_interrupt * @reg: virtual address of interrupt register * @hwirq: hardware irq number * @irq: virtual irq number @@ -1358,6 +1373,9 @@ struct pcs_interrupt { /** * pcs_irq_set() - enables or disables an interrupt + * @pcs_soc: SoC specific settings + * @irq: interrupt + * @enable: enable or disable the interrupt * * Note that this currently assumes one interrupt per pinctrl * register that is typically used for wake-up events. @@ -1438,7 +1456,7 @@ static int pcs_irq_set_wake(struct irq_data *d, unsigned int state) /** * pcs_irq_handle() - common interrupt handler - * @pcs_irq: interrupt data + * @pcs_soc: SoC specific settings * * Note that this currently assumes we have one interrupt bit per * mux register. This interrupt is typically used for wake-up events. @@ -1486,7 +1504,6 @@ static irqreturn_t pcs_irq_handler(int irq, void *d) /** * pcs_irq_handle() - handler for the dedicated chained interrupt case - * @irq: interrupt * @desc: interrupt descriptor * * Use this if you have a separate interrupt for each diff --git a/drivers/pinctrl/pinctrl-stmfx.c b/drivers/pinctrl/pinctrl-stmfx.c index 1aae803c12cd..008c83107a3c 100644 --- a/drivers/pinctrl/pinctrl-stmfx.c +++ b/drivers/pinctrl/pinctrl-stmfx.c @@ -616,6 +616,7 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev) struct stmfx *stmfx = dev_get_drvdata(pdev->dev.parent); struct device_node *np = pdev->dev.of_node; struct stmfx_pinctrl *pctl; + struct gpio_irq_chip *girq; int irq, ret; pctl = devm_kzalloc(stmfx->dev, sizeof(*pctl), GFP_KERNEL); @@ -674,16 +675,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev) pctl->gpio_chip.can_sleep = true; pctl->gpio_chip.of_node = np; - ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl); - if (ret) { - dev_err(pctl->dev, "gpio_chip registration failed\n"); - return ret; - } - - ret = stmfx_pinctrl_gpio_function_enable(pctl); - if (ret) - return ret; - pctl->irq_chip.name = dev_name(pctl->dev); pctl->irq_chip.irq_mask = stmfx_pinctrl_irq_mask; pctl->irq_chip.irq_unmask = stmfx_pinctrl_irq_unmask; @@ -693,13 +684,26 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev) pctl->irq_chip.irq_request_resources = stmfx_gpio_irq_request_resources; pctl->irq_chip.irq_release_resources = stmfx_gpio_irq_release_resources; - ret = gpiochip_irqchip_add_nested(&pctl->gpio_chip, &pctl->irq_chip, - 0, handle_bad_irq, IRQ_TYPE_NONE); + girq = &pctl->gpio_chip.irq; + girq->chip = &pctl->irq_chip; + /* This will let us handle the parent IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + girq->threaded = true; + + ret = devm_gpiochip_add_data(pctl->dev, &pctl->gpio_chip, pctl); if (ret) { - dev_err(pctl->dev, "cannot add irqchip to gpiochip\n"); + dev_err(pctl->dev, "gpio_chip registration failed\n"); return ret; } + ret = stmfx_pinctrl_gpio_function_enable(pctl); + if (ret) + return ret; + ret = devm_request_threaded_irq(pctl->dev, irq, NULL, stmfx_pinctrl_irq_thread_fn, IRQF_ONESHOT, @@ -709,8 +713,6 @@ static int stmfx_pinctrl_probe(struct platform_device *pdev) return ret; } - gpiochip_set_nested_irqchip(&pctl->gpio_chip, &pctl->irq_chip, irq); - dev_info(pctl->dev, "%ld GPIOs available\n", hweight_long(pctl->gpio_valid_mask)); diff --git a/drivers/pinctrl/pinctrl-sx150x.c b/drivers/pinctrl/pinctrl-sx150x.c index 708bc91862fe..b325a136ac48 100644 --- a/drivers/pinctrl/pinctrl-sx150x.c +++ b/drivers/pinctrl/pinctrl-sx150x.c @@ -1187,17 +1187,10 @@ static int sx150x_probe(struct i2c_client *client, if (pctl->data->model != SX150X_789) pctl->gpio.set_multiple = sx150x_gpio_set_multiple; - ret = devm_gpiochip_add_data(dev, &pctl->gpio, pctl); - if (ret) - return ret; - - ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev), - 0, 0, pctl->data->npins); - if (ret) - return ret; - /* Add Interrupt support if an irq is specified */ if (client->irq > 0) { + struct gpio_irq_chip *girq; + pctl->irq_chip.irq_mask = sx150x_irq_mask; pctl->irq_chip.irq_unmask = sx150x_irq_unmask; pctl->irq_chip.irq_set_type = sx150x_irq_set_type; @@ -1213,8 +1206,8 @@ static int sx150x_probe(struct i2c_client *client, /* * Because sx150x_irq_threaded_fn invokes all of the - * nested interrrupt handlers via handle_nested_irq, - * any "handler" passed to gpiochip_irqchip_add() + * nested interrupt handlers via handle_nested_irq, + * any "handler" assigned to struct gpio_irq_chip * below is going to be ignored, so the choice of the * function does not matter that much. * @@ -1222,13 +1215,15 @@ static int sx150x_probe(struct i2c_client *client, * plus it will be instantly noticeable if it is ever * called (should not happen) */ - ret = gpiochip_irqchip_add_nested(&pctl->gpio, - &pctl->irq_chip, 0, - handle_bad_irq, IRQ_TYPE_NONE); - if (ret) { - dev_err(dev, "could not connect irqchip to gpiochip\n"); - return ret; - } + girq = &pctl->gpio.irq; + girq->chip = &pctl->irq_chip; + /* This will let us handle the parent IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->parents = NULL; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + girq->threaded = true; ret = devm_request_threaded_irq(dev, client->irq, NULL, sx150x_irq_thread_fn, @@ -1237,12 +1232,17 @@ static int sx150x_probe(struct i2c_client *client, pctl->irq_chip.name, pctl); if (ret < 0) return ret; - - gpiochip_set_nested_irqchip(&pctl->gpio, - &pctl->irq_chip, - client->irq); } + ret = devm_gpiochip_add_data(dev, &pctl->gpio, pctl); + if (ret) + return ret; + + ret = gpiochip_add_pin_range(&pctl->gpio, dev_name(dev), + 0, 0, pctl->data->npins); + if (ret) + return ret; + return 0; } diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 9503ddf2edc7..bab888fe3f8e 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c @@ -74,6 +74,7 @@ int pinmux_validate_map(const struct pinctrl_map *map, int i) * pinmux_can_be_used_for_gpio() - check if a specific pin * is either muxed to a different function or used as gpio. * + * @pctldev: the associated pin controller device * @pin: the pin number in the global pin space * * Controllers not defined as strict will always return true, @@ -96,6 +97,7 @@ bool pinmux_can_be_used_for_gpio(struct pinctrl_dev *pctldev, unsigned pin) /** * pin_request() - request a single pin to be muxed in, typically for GPIO + * @pctldev: the associated pin controller device * @pin: the pin number in the global pin space * @owner: a representation of the owner of this pin; typically the device * name that controls its mux function, or the requested GPIO name @@ -254,6 +256,7 @@ static const char *pin_free(struct pinctrl_dev *pctldev, int pin, * @pctldev: pin controller device affected * @pin: the pin to mux in for GPIO * @range: the applicable GPIO range + * @gpio: number of requested GPIO */ int pinmux_request_gpio(struct pinctrl_dev *pctldev, struct pinctrl_gpio_range *range, @@ -744,7 +747,7 @@ EXPORT_SYMBOL_GPL(pinmux_generic_get_function_groups); /** * pinmux_generic_get_function() - returns a function based on the number * @pctldev: pin controller device - * @group_selector: function number + * @selector: function number */ struct function_desc *pinmux_generic_get_function(struct pinctrl_dev *pctldev, unsigned int selector) diff --git a/drivers/pinctrl/qcom/pinctrl-ipq4019.c b/drivers/pinctrl/qcom/pinctrl-ipq4019.c index 8bdb5bd393d2..63915cb210ff 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq4019.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq4019.c @@ -254,6 +254,7 @@ DECLARE_QCA_GPIO_PINS(99); .mux_bit = 2, \ .pull_bit = 0, \ .drv_bit = 6, \ + .od_bit = 12, \ .oe_bit = 9, \ .in_bit = 0, \ .out_bit = 1, \ diff --git a/drivers/pinctrl/qcom/pinctrl-ipq8074.c b/drivers/pinctrl/qcom/pinctrl-ipq8074.c index 0edd41cdc64f..aec68b1c9f53 100644 --- a/drivers/pinctrl/qcom/pinctrl-ipq8074.c +++ b/drivers/pinctrl/qcom/pinctrl-ipq8074.c @@ -50,6 +50,7 @@ .intr_enable_bit = 0, \ .intr_status_bit = 0, \ .intr_target_bit = 5, \ + .intr_target_kpss_val = 3, \ .intr_raw_status_bit = 4, \ .intr_polarity_bit = 1, \ .intr_detection_bit = 2, \ diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index c322f30a2064..a2567e772cd5 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -40,16 +40,20 @@ * @dev: device handle. * @pctrl: pinctrl handle. * @chip: gpiochip handle. + * @desc: pin controller descriptor * @restart_nb: restart notifier block. + * @irq_chip: irq chip information * @irq: parent irq for the TLMM irq_chip. + * @intr_target_use_scm: route irq to application cpu using scm calls * @lock: Spinlock to protect register resources as well * as msm_pinctrl data structures. * @enabled_irqs: Bitmap of currently enabled irqs. * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge * detection. * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller - * @soc; Reference to soc_data of platform specific data. + * @soc: Reference to soc_data of platform specific data. * @regs: Base addresses for the TLMM tiles. + * @phys_base: Physical base address */ struct msm_pinctrl { struct device *dev; @@ -233,6 +237,10 @@ static int msm_config_reg(struct msm_pinctrl *pctrl, *bit = g->pull_bit; *mask = 3; break; + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + *bit = g->od_bit; + *mask = 1; + break; case PIN_CONFIG_DRIVE_STRENGTH: *bit = g->drv_bit; *mask = 7; @@ -310,6 +318,12 @@ static int msm_config_group_get(struct pinctrl_dev *pctldev, if (!arg) return -EINVAL; break; + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + /* Pin is not open-drain */ + if (!arg) + return -EINVAL; + arg = 1; + break; case PIN_CONFIG_DRIVE_STRENGTH: arg = msm_regval_to_drive(arg); break; @@ -382,6 +396,9 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, else arg = MSM_PULL_UP; break; + case PIN_CONFIG_DRIVE_OPEN_DRAIN: + arg = 1; + break; case PIN_CONFIG_DRIVE_STRENGTH: /* Check for invalid values */ if (arg > 16 || arg < 2 || (arg % 2) != 0) diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 7486fe08eb9b..333f99243c43 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -38,6 +38,7 @@ struct msm_function { * @mux_bit: Offset in @ctl_reg for the pinmux function selection. * @pull_bit: Offset in @ctl_reg for the bias configuration. * @drv_bit: Offset in @ctl_reg for the drive strength configuration. + * @od_bit: Offset in @ctl_reg for controlling open drain. * @oe_bit: Offset in @ctl_reg for controlling output enable. * @in_bit: Offset in @io_reg for the input bit value. * @out_bit: Offset in @io_reg for the output bit value. @@ -75,6 +76,7 @@ struct msm_pingroup { unsigned pull_bit:5; unsigned drv_bit:5; + unsigned od_bit:5; unsigned oe_bit:5; unsigned in_bit:5; unsigned out_bit:5; diff --git a/drivers/pinctrl/qcom/pinctrl-msm8976.c b/drivers/pinctrl/qcom/pinctrl-msm8976.c index 183f0b2d9f8e..ec43edf9b660 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm8976.c +++ b/drivers/pinctrl/qcom/pinctrl-msm8976.c @@ -799,9 +799,6 @@ static const char * const pa_indicator_groups[] = { static const char * const modem_tsync_groups[] = { "gpio93", }; -static const char * const nav_tsync_groups[] = { - "gpio93", -}; static const char * const ssbi_wtr1_groups[] = { "gpio79", "gpio94", }; diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c index 092a48e4dff5..17441388ce8f 100644 --- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c @@ -794,13 +794,13 @@ static int pmic_gpio_populate(struct pmic_gpio_state *state, switch (subtype) { case PMIC_GPIO_SUBTYPE_GPIO_4CH: pad->have_buffer = true; - /* Fall through */ + fallthrough; case PMIC_GPIO_SUBTYPE_GPIOC_4CH: pad->num_sources = 4; break; case PMIC_GPIO_SUBTYPE_GPIO_8CH: pad->have_buffer = true; - /* Fall through */ + fallthrough; case PMIC_GPIO_SUBTYPE_GPIOC_8CH: pad->num_sources = 8; break; @@ -1117,6 +1117,10 @@ static const struct of_device_id pmic_gpio_of_match[] = { { .compatible = "qcom,pma8084-gpio", .data = (void *) 22 }, /* pms405 has 12 GPIOs with holes on 1, 9, and 10 */ { .compatible = "qcom,pms405-gpio", .data = (void *) 12 }, + /* pm660 has 13 GPIOs with holes on 1, 5, 6, 7, 8 and 10 */ + { .compatible = "qcom,pm660-gpio", .data = (void *) 13 }, + /* pm660l has 12 GPIOs with holes on 1, 2, 10, 11 and 12 */ + { .compatible = "qcom,pm660l-gpio", .data = (void *) 12 }, /* pm8150 has 10 GPIOs with holes on 2, 5, 7 and 8 */ { .compatible = "qcom,pm8150-gpio", .data = (void *) 10 }, /* pm8150b has 12 GPIOs with holes on 3, r and 7 */ diff --git a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c index 338a15d08629..b5949f766a7a 100644 --- a/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c +++ b/drivers/pinctrl/qcom/pinctrl-ssbi-gpio.c @@ -346,7 +346,7 @@ static int pm8xxx_pin_config_set(struct pinctrl_dev *pctldev, return -EINVAL; } pin->pull_up_strength = arg; - /* FALLTHROUGH */ + fallthrough; case PIN_CONFIG_BIAS_PULL_UP: pin->bias = pin->pull_up_strength; banks |= BIT(2); diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c index 84501c785473..b9ea09fabf84 100644 --- a/drivers/pinctrl/samsung/pinctrl-exynos.c +++ b/drivers/pinctrl/samsung/pinctrl-exynos.c @@ -38,7 +38,7 @@ struct exynos_irq_chip { u32 eint_con; u32 eint_mask; u32 eint_pend; - u32 eint_wake_mask_value; + u32 *eint_wake_mask_value; u32 eint_wake_mask_reg; void (*set_eint_wakeup_mask)(struct samsung_pinctrl_drv_data *drvdata, struct exynos_irq_chip *irq_chip); @@ -207,7 +207,7 @@ static void exynos_irq_release_resources(struct irq_data *irqd) /* * irq_chip for gpio interrupts. */ -static struct exynos_irq_chip exynos_gpio_irq_chip = { +static const struct exynos_irq_chip exynos_gpio_irq_chip __initconst = { .chip = { .name = "exynos_gpio_irq_chip", .irq_unmask = exynos_irq_unmask, @@ -274,7 +274,7 @@ struct exynos_eint_gpio_save { * exynos_eint_gpio_init() - setup handling of external gpio interrupts. * @d: driver data of samsung pinctrl driver. */ -int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) +__init int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) { struct samsung_pin_bank *bank; struct device *dev = d->dev; @@ -297,6 +297,15 @@ int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) for (i = 0; i < d->nr_banks; ++i, ++bank) { if (bank->eint_type != EINT_TYPE_GPIO) continue; + + bank->irq_chip = devm_kmemdup(dev, &exynos_gpio_irq_chip, + sizeof(*bank->irq_chip), GFP_KERNEL); + if (!bank->irq_chip) { + ret = -ENOMEM; + goto err_domains; + } + bank->irq_chip->chip.name = bank->name; + bank->irq_domain = irq_domain_add_linear(bank->of_node, bank->nr_pins, &exynos_eint_irqd_ops, bank); if (!bank->irq_domain) { @@ -313,7 +322,6 @@ int exynos_eint_gpio_init(struct samsung_pinctrl_drv_data *d) goto err_domains; } - bank->irq_chip = &exynos_gpio_irq_chip; } return 0; @@ -338,9 +346,9 @@ static int exynos_wkup_irq_set_wake(struct irq_data *irqd, unsigned int on) pr_info("wake %s for irq %d\n", on ? "enabled" : "disabled", irqd->irq); if (!on) - our_chip->eint_wake_mask_value |= bit; + *our_chip->eint_wake_mask_value |= bit; else - our_chip->eint_wake_mask_value &= ~bit; + *our_chip->eint_wake_mask_value &= ~bit; return 0; } @@ -360,10 +368,10 @@ exynos_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, pmu_regs = drvdata->retention_ctrl->priv; dev_info(drvdata->dev, "Setting external wakeup interrupt mask: 0x%x\n", - irq_chip->eint_wake_mask_value); + *irq_chip->eint_wake_mask_value); regmap_write(pmu_regs, irq_chip->eint_wake_mask_reg, - irq_chip->eint_wake_mask_value); + *irq_chip->eint_wake_mask_value); } static void @@ -382,10 +390,11 @@ s5pv210_pinctrl_set_eint_wakeup_mask(struct samsung_pinctrl_drv_data *drvdata, clk_base = (void __iomem *) drvdata->retention_ctrl->priv; - __raw_writel(irq_chip->eint_wake_mask_value, + __raw_writel(*irq_chip->eint_wake_mask_value, clk_base + irq_chip->eint_wake_mask_reg); } +static u32 eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED; /* * irq_chip for wakeup interrupts */ @@ -403,7 +412,7 @@ static const struct exynos_irq_chip s5pv210_wkup_irq_chip __initconst = { .eint_con = EXYNOS_WKUP_ECON_OFFSET, .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, + .eint_wake_mask_value = &eint_wake_mask_value, /* Only differences with exynos4210_wkup_irq_chip: */ .eint_wake_mask_reg = S5PV210_EINT_WAKEUP_MASK, .set_eint_wakeup_mask = s5pv210_pinctrl_set_eint_wakeup_mask, @@ -423,7 +432,7 @@ static const struct exynos_irq_chip exynos4210_wkup_irq_chip __initconst = { .eint_con = EXYNOS_WKUP_ECON_OFFSET, .eint_mask = EXYNOS_WKUP_EMASK_OFFSET, .eint_pend = EXYNOS_WKUP_EPEND_OFFSET, - .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, + .eint_wake_mask_value = &eint_wake_mask_value, .eint_wake_mask_reg = EXYNOS_EINT_WAKEUP_MASK, .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, }; @@ -442,7 +451,7 @@ static const struct exynos_irq_chip exynos7_wkup_irq_chip __initconst = { .eint_con = EXYNOS7_WKUP_ECON_OFFSET, .eint_mask = EXYNOS7_WKUP_EMASK_OFFSET, .eint_pend = EXYNOS7_WKUP_EPEND_OFFSET, - .eint_wake_mask_value = EXYNOS_EINT_WAKEUP_MASK_DISABLED, + .eint_wake_mask_value = &eint_wake_mask_value, .eint_wake_mask_reg = EXYNOS5433_EINT_WAKEUP_MASK, .set_eint_wakeup_mask = exynos_pinctrl_set_eint_wakeup_mask, }; @@ -513,7 +522,7 @@ static void exynos_irq_demux_eint16_31(struct irq_desc *desc) * exynos_eint_wkup_init() - setup handling of external wakeup interrupts. * @d: driver data of samsung pinctrl driver. */ -int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) +__init int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) { struct device *dev = d->dev; struct device_node *wkup_np = NULL; @@ -521,7 +530,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) struct samsung_pin_bank *bank; struct exynos_weint_data *weint_data; struct exynos_muxed_weint_data *muxed_data; - struct exynos_irq_chip *irq_chip; + const struct exynos_irq_chip *irq_chip; unsigned int muxed_banks = 0; unsigned int i; int idx, irq; @@ -531,12 +540,7 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) match = of_match_node(exynos_wkup_irq_ids, np); if (match) { - irq_chip = kmemdup(match->data, - sizeof(*irq_chip), GFP_KERNEL); - if (!irq_chip) { - of_node_put(np); - return -ENOMEM; - } + irq_chip = match->data; wkup_np = np; break; } @@ -549,6 +553,14 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) if (bank->eint_type != EINT_TYPE_WKUP) continue; + bank->irq_chip = devm_kmemdup(dev, irq_chip, sizeof(*irq_chip), + GFP_KERNEL); + if (!bank->irq_chip) { + of_node_put(wkup_np); + return -ENOMEM; + } + bank->irq_chip->chip.name = bank->name; + bank->irq_domain = irq_domain_add_linear(bank->of_node, bank->nr_pins, &exynos_eint_irqd_ops, bank); if (!bank->irq_domain) { @@ -557,8 +569,6 @@ int exynos_eint_wkup_init(struct samsung_pinctrl_drv_data *d) return -ENXIO; } - bank->irq_chip = irq_chip; - if (!of_find_property(bank->of_node, "interrupts", NULL)) { bank->eint_type = EINT_TYPE_WKUP_MUX; ++muxed_banks; @@ -657,10 +667,6 @@ void exynos_pinctrl_suspend(struct samsung_pinctrl_drv_data *drvdata) irq_chip = bank->irq_chip; irq_chip->set_eint_wakeup_mask(drvdata, irq_chip); - } else if (bank->irq_chip != irq_chip) { - dev_warn(drvdata->dev, - "More than one external wakeup interrupt chip configured (bank: %s). This is not supported by hardware nor by driver.\n", - bank->name); } } } diff --git a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c index 9bd0a3de101d..5e24838a582f 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c24xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c24xx.c @@ -80,7 +80,7 @@ static const struct samsung_pin_bank_type bank_type_2bit = { } /** - * struct s3c24xx_eint_data: EINT common data + * struct s3c24xx_eint_data - EINT common data * @drvdata: pin controller driver data * @domains: IRQ domains of particular EINT interrupts * @parents: mapped parent irqs in the main interrupt controller @@ -92,10 +92,10 @@ struct s3c24xx_eint_data { }; /** - * struct s3c24xx_eint_domain_data: per irq-domain data + * struct s3c24xx_eint_domain_data - per irq-domain data * @bank: pin bank related to the domain * @eint_data: common data - * eint0_3_parent_only: live eints 0-3 only in the main intc + * @eint0_3_parent_only: live eints 0-3 only in the main intc */ struct s3c24xx_eint_domain_data { struct samsung_pin_bank *bank; diff --git a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c index f97f8179f2b1..b8166e3fe4ce 100644 --- a/drivers/pinctrl/samsung/pinctrl-s3c64xx.c +++ b/drivers/pinctrl/samsung/pinctrl-s3c64xx.c @@ -193,7 +193,7 @@ static const struct samsung_pin_bank_type bank_type_2bit_alive = { } /** - * struct s3c64xx_eint0_data: EINT0 common data + * struct s3c64xx_eint0_data - EINT0 common data * @drvdata: pin controller driver data * @domains: IRQ domains of particular EINT0 interrupts * @pins: pin offsets inside of banks of particular EINT0 interrupts @@ -205,7 +205,7 @@ struct s3c64xx_eint0_data { }; /** - * struct s3c64xx_eint0_domain_data: EINT0 per-domain data + * struct s3c64xx_eint0_domain_data - EINT0 per-domain data * @bank: pin bank related to the domain * @eints: EINT0 interrupts related to the domain */ @@ -215,7 +215,7 @@ struct s3c64xx_eint0_domain_data { }; /** - * struct s3c64xx_eint_gpio_data: GPIO EINT data + * struct s3c64xx_eint_gpio_data - GPIO EINT data * @drvdata: pin controller driver data * @domains: array of domains related to EINT interrupt groups */ diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index f26574ef234a..608eb5a07248 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c @@ -1140,7 +1140,7 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) return 0; } -/** +/* * samsung_pinctrl_suspend - save pinctrl state for suspend * * Save data for all banks handled by this device. @@ -1187,7 +1187,7 @@ static int __maybe_unused samsung_pinctrl_suspend(struct device *dev) return 0; } -/** +/* * samsung_pinctrl_resume - restore pinctrl state from suspend * * Restore one of the banks that was saved during suspend. diff --git a/drivers/pinctrl/sh-pfc/Kconfig b/drivers/pinctrl/sh-pfc/Kconfig index c461a2f1927a..7fdc7ed8bd2e 100644 --- a/drivers/pinctrl/sh-pfc/Kconfig +++ b/drivers/pinctrl/sh-pfc/Kconfig @@ -20,6 +20,7 @@ config PINCTRL_SH_PFC select PINCTRL_PFC_R8A774A1 if ARCH_R8A774A1 select PINCTRL_PFC_R8A774B1 if ARCH_R8A774B1 select PINCTRL_PFC_R8A774C0 if ARCH_R8A774C0 + select PINCTRL_PFC_R8A774E1 if ARCH_R8A774E1 select PINCTRL_PFC_R8A7778 if ARCH_R8A7778 select PINCTRL_PFC_R8A7779 if ARCH_R8A7779 select PINCTRL_PFC_R8A7790 if ARCH_R8A7790 @@ -99,6 +100,9 @@ config PINCTRL_PFC_R8A774B1 config PINCTRL_PFC_R8A774C0 bool "RZ/G2E pin control support" if COMPILE_TEST +config PINCTRL_PFC_R8A774E1 + bool "RZ/G2H pin control support" if COMPILE_TEST + config PINCTRL_PFC_R8A7778 bool "R-Car M1A pin control support" if COMPILE_TEST diff --git a/drivers/pinctrl/sh-pfc/Makefile b/drivers/pinctrl/sh-pfc/Makefile index 3855d82069c9..7bb99187cd8e 100644 --- a/drivers/pinctrl/sh-pfc/Makefile +++ b/drivers/pinctrl/sh-pfc/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_PINCTRL_PFC_R8A77470) += pfc-r8a77470.o obj-$(CONFIG_PINCTRL_PFC_R8A774A1) += pfc-r8a7796.o obj-$(CONFIG_PINCTRL_PFC_R8A774B1) += pfc-r8a77965.o obj-$(CONFIG_PINCTRL_PFC_R8A774C0) += pfc-r8a77990.o +obj-$(CONFIG_PINCTRL_PFC_R8A774E1) += pfc-r8a77951.o obj-$(CONFIG_PINCTRL_PFC_R8A7778) += pfc-r8a7778.o obj-$(CONFIG_PINCTRL_PFC_R8A7779) += pfc-r8a7779.o obj-$(CONFIG_PINCTRL_PFC_R8A7790) += pfc-r8a7790.o diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c index f368383cba61..c528c124fb0e 100644 --- a/drivers/pinctrl/sh-pfc/core.c +++ b/drivers/pinctrl/sh-pfc/core.c @@ -533,6 +533,12 @@ static const struct of_device_id sh_pfc_of_table[] = { .data = &r8a774c0_pinmux_info, }, #endif +#ifdef CONFIG_PINCTRL_PFC_R8A774E1 + { + .compatible = "renesas,pfc-r8a774e1", + .data = &r8a774e1_pinmux_info, + }, +#endif #ifdef CONFIG_PINCTRL_PFC_R8A7778 { .compatible = "renesas,pfc-r8a7778", diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77951.c b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c index 256fab4b03d3..a94ebe0bf5d0 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77951.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77951.c @@ -4157,357 +4157,365 @@ static const unsigned int vin5_clk_mux[] = { VI5_CLK_MARK, }; -static const struct sh_pfc_pin_group pinmux_groups[] = { - SH_PFC_PIN_GROUP(audio_clk_a_a), - SH_PFC_PIN_GROUP(audio_clk_a_b), - SH_PFC_PIN_GROUP(audio_clk_a_c), - SH_PFC_PIN_GROUP(audio_clk_b_a), - SH_PFC_PIN_GROUP(audio_clk_b_b), - SH_PFC_PIN_GROUP(audio_clk_c_a), - SH_PFC_PIN_GROUP(audio_clk_c_b), - SH_PFC_PIN_GROUP(audio_clkout_a), - SH_PFC_PIN_GROUP(audio_clkout_b), - SH_PFC_PIN_GROUP(audio_clkout_c), - SH_PFC_PIN_GROUP(audio_clkout_d), - SH_PFC_PIN_GROUP(audio_clkout1_a), - SH_PFC_PIN_GROUP(audio_clkout1_b), - SH_PFC_PIN_GROUP(audio_clkout2_a), - SH_PFC_PIN_GROUP(audio_clkout2_b), - SH_PFC_PIN_GROUP(audio_clkout3_a), - SH_PFC_PIN_GROUP(audio_clkout3_b), - SH_PFC_PIN_GROUP(avb_link), - SH_PFC_PIN_GROUP(avb_magic), - SH_PFC_PIN_GROUP(avb_phy_int), - SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */ - SH_PFC_PIN_GROUP(avb_mdio), - SH_PFC_PIN_GROUP(avb_mii), - SH_PFC_PIN_GROUP(avb_avtp_pps), - SH_PFC_PIN_GROUP(avb_avtp_match_a), - SH_PFC_PIN_GROUP(avb_avtp_capture_a), - SH_PFC_PIN_GROUP(avb_avtp_match_b), - SH_PFC_PIN_GROUP(avb_avtp_capture_b), - SH_PFC_PIN_GROUP(can0_data_a), - SH_PFC_PIN_GROUP(can0_data_b), - SH_PFC_PIN_GROUP(can1_data), - SH_PFC_PIN_GROUP(can_clk), - SH_PFC_PIN_GROUP(canfd0_data_a), - SH_PFC_PIN_GROUP(canfd0_data_b), - SH_PFC_PIN_GROUP(canfd1_data), - SH_PFC_PIN_GROUP(drif0_ctrl_a), - SH_PFC_PIN_GROUP(drif0_data0_a), - SH_PFC_PIN_GROUP(drif0_data1_a), - SH_PFC_PIN_GROUP(drif0_ctrl_b), - SH_PFC_PIN_GROUP(drif0_data0_b), - SH_PFC_PIN_GROUP(drif0_data1_b), - SH_PFC_PIN_GROUP(drif0_ctrl_c), - SH_PFC_PIN_GROUP(drif0_data0_c), - SH_PFC_PIN_GROUP(drif0_data1_c), - SH_PFC_PIN_GROUP(drif1_ctrl_a), - SH_PFC_PIN_GROUP(drif1_data0_a), - SH_PFC_PIN_GROUP(drif1_data1_a), - SH_PFC_PIN_GROUP(drif1_ctrl_b), - SH_PFC_PIN_GROUP(drif1_data0_b), - SH_PFC_PIN_GROUP(drif1_data1_b), - SH_PFC_PIN_GROUP(drif1_ctrl_c), - SH_PFC_PIN_GROUP(drif1_data0_c), - SH_PFC_PIN_GROUP(drif1_data1_c), - SH_PFC_PIN_GROUP(drif2_ctrl_a), - SH_PFC_PIN_GROUP(drif2_data0_a), - SH_PFC_PIN_GROUP(drif2_data1_a), - SH_PFC_PIN_GROUP(drif2_ctrl_b), - SH_PFC_PIN_GROUP(drif2_data0_b), - SH_PFC_PIN_GROUP(drif2_data1_b), - SH_PFC_PIN_GROUP(drif3_ctrl_a), - SH_PFC_PIN_GROUP(drif3_data0_a), - SH_PFC_PIN_GROUP(drif3_data1_a), - SH_PFC_PIN_GROUP(drif3_ctrl_b), - SH_PFC_PIN_GROUP(drif3_data0_b), - SH_PFC_PIN_GROUP(drif3_data1_b), - SH_PFC_PIN_GROUP(du_rgb666), - SH_PFC_PIN_GROUP(du_rgb888), - SH_PFC_PIN_GROUP(du_clk_out_0), - SH_PFC_PIN_GROUP(du_clk_out_1), - SH_PFC_PIN_GROUP(du_sync), - SH_PFC_PIN_GROUP(du_oddf), - SH_PFC_PIN_GROUP(du_cde), - SH_PFC_PIN_GROUP(du_disp), - SH_PFC_PIN_GROUP(hscif0_data), - SH_PFC_PIN_GROUP(hscif0_clk), - SH_PFC_PIN_GROUP(hscif0_ctrl), - SH_PFC_PIN_GROUP(hscif1_data_a), - SH_PFC_PIN_GROUP(hscif1_clk_a), - SH_PFC_PIN_GROUP(hscif1_ctrl_a), - SH_PFC_PIN_GROUP(hscif1_data_b), - SH_PFC_PIN_GROUP(hscif1_clk_b), - SH_PFC_PIN_GROUP(hscif1_ctrl_b), - SH_PFC_PIN_GROUP(hscif2_data_a), - SH_PFC_PIN_GROUP(hscif2_clk_a), - SH_PFC_PIN_GROUP(hscif2_ctrl_a), - SH_PFC_PIN_GROUP(hscif2_data_b), - SH_PFC_PIN_GROUP(hscif2_clk_b), - SH_PFC_PIN_GROUP(hscif2_ctrl_b), - SH_PFC_PIN_GROUP(hscif2_data_c), - SH_PFC_PIN_GROUP(hscif2_clk_c), - SH_PFC_PIN_GROUP(hscif2_ctrl_c), - SH_PFC_PIN_GROUP(hscif3_data_a), - SH_PFC_PIN_GROUP(hscif3_clk), - SH_PFC_PIN_GROUP(hscif3_ctrl), - SH_PFC_PIN_GROUP(hscif3_data_b), - SH_PFC_PIN_GROUP(hscif3_data_c), - SH_PFC_PIN_GROUP(hscif3_data_d), - SH_PFC_PIN_GROUP(hscif4_data_a), - SH_PFC_PIN_GROUP(hscif4_clk), - SH_PFC_PIN_GROUP(hscif4_ctrl), - SH_PFC_PIN_GROUP(hscif4_data_b), - SH_PFC_PIN_GROUP(i2c0), - SH_PFC_PIN_GROUP(i2c1_a), - SH_PFC_PIN_GROUP(i2c1_b), - SH_PFC_PIN_GROUP(i2c2_a), - SH_PFC_PIN_GROUP(i2c2_b), - SH_PFC_PIN_GROUP(i2c3), - SH_PFC_PIN_GROUP(i2c5), - SH_PFC_PIN_GROUP(i2c6_a), - SH_PFC_PIN_GROUP(i2c6_b), - SH_PFC_PIN_GROUP(i2c6_c), - SH_PFC_PIN_GROUP(intc_ex_irq0), - SH_PFC_PIN_GROUP(intc_ex_irq1), - SH_PFC_PIN_GROUP(intc_ex_irq2), - SH_PFC_PIN_GROUP(intc_ex_irq3), - SH_PFC_PIN_GROUP(intc_ex_irq4), - SH_PFC_PIN_GROUP(intc_ex_irq5), - SH_PFC_PIN_GROUP(msiof0_clk), - SH_PFC_PIN_GROUP(msiof0_sync), - SH_PFC_PIN_GROUP(msiof0_ss1), - SH_PFC_PIN_GROUP(msiof0_ss2), - SH_PFC_PIN_GROUP(msiof0_txd), - SH_PFC_PIN_GROUP(msiof0_rxd), - SH_PFC_PIN_GROUP(msiof1_clk_a), - SH_PFC_PIN_GROUP(msiof1_sync_a), - SH_PFC_PIN_GROUP(msiof1_ss1_a), - SH_PFC_PIN_GROUP(msiof1_ss2_a), - SH_PFC_PIN_GROUP(msiof1_txd_a), - SH_PFC_PIN_GROUP(msiof1_rxd_a), - SH_PFC_PIN_GROUP(msiof1_clk_b), - SH_PFC_PIN_GROUP(msiof1_sync_b), - SH_PFC_PIN_GROUP(msiof1_ss1_b), - SH_PFC_PIN_GROUP(msiof1_ss2_b), - SH_PFC_PIN_GROUP(msiof1_txd_b), - SH_PFC_PIN_GROUP(msiof1_rxd_b), - SH_PFC_PIN_GROUP(msiof1_clk_c), - SH_PFC_PIN_GROUP(msiof1_sync_c), - SH_PFC_PIN_GROUP(msiof1_ss1_c), - SH_PFC_PIN_GROUP(msiof1_ss2_c), - SH_PFC_PIN_GROUP(msiof1_txd_c), - SH_PFC_PIN_GROUP(msiof1_rxd_c), - SH_PFC_PIN_GROUP(msiof1_clk_d), - SH_PFC_PIN_GROUP(msiof1_sync_d), - SH_PFC_PIN_GROUP(msiof1_ss1_d), - SH_PFC_PIN_GROUP(msiof1_ss2_d), - SH_PFC_PIN_GROUP(msiof1_txd_d), - SH_PFC_PIN_GROUP(msiof1_rxd_d), - SH_PFC_PIN_GROUP(msiof1_clk_e), - SH_PFC_PIN_GROUP(msiof1_sync_e), - SH_PFC_PIN_GROUP(msiof1_ss1_e), - SH_PFC_PIN_GROUP(msiof1_ss2_e), - SH_PFC_PIN_GROUP(msiof1_txd_e), - SH_PFC_PIN_GROUP(msiof1_rxd_e), - SH_PFC_PIN_GROUP(msiof1_clk_f), - SH_PFC_PIN_GROUP(msiof1_sync_f), - SH_PFC_PIN_GROUP(msiof1_ss1_f), - SH_PFC_PIN_GROUP(msiof1_ss2_f), - SH_PFC_PIN_GROUP(msiof1_txd_f), - SH_PFC_PIN_GROUP(msiof1_rxd_f), - SH_PFC_PIN_GROUP(msiof1_clk_g), - SH_PFC_PIN_GROUP(msiof1_sync_g), - SH_PFC_PIN_GROUP(msiof1_ss1_g), - SH_PFC_PIN_GROUP(msiof1_ss2_g), - SH_PFC_PIN_GROUP(msiof1_txd_g), - SH_PFC_PIN_GROUP(msiof1_rxd_g), - SH_PFC_PIN_GROUP(msiof2_clk_a), - SH_PFC_PIN_GROUP(msiof2_sync_a), - SH_PFC_PIN_GROUP(msiof2_ss1_a), - SH_PFC_PIN_GROUP(msiof2_ss2_a), - SH_PFC_PIN_GROUP(msiof2_txd_a), - SH_PFC_PIN_GROUP(msiof2_rxd_a), - SH_PFC_PIN_GROUP(msiof2_clk_b), - SH_PFC_PIN_GROUP(msiof2_sync_b), - SH_PFC_PIN_GROUP(msiof2_ss1_b), - SH_PFC_PIN_GROUP(msiof2_ss2_b), - SH_PFC_PIN_GROUP(msiof2_txd_b), - SH_PFC_PIN_GROUP(msiof2_rxd_b), - SH_PFC_PIN_GROUP(msiof2_clk_c), - SH_PFC_PIN_GROUP(msiof2_sync_c), - SH_PFC_PIN_GROUP(msiof2_ss1_c), - SH_PFC_PIN_GROUP(msiof2_ss2_c), - SH_PFC_PIN_GROUP(msiof2_txd_c), - SH_PFC_PIN_GROUP(msiof2_rxd_c), - SH_PFC_PIN_GROUP(msiof2_clk_d), - SH_PFC_PIN_GROUP(msiof2_sync_d), - SH_PFC_PIN_GROUP(msiof2_ss1_d), - SH_PFC_PIN_GROUP(msiof2_ss2_d), - SH_PFC_PIN_GROUP(msiof2_txd_d), - SH_PFC_PIN_GROUP(msiof2_rxd_d), - SH_PFC_PIN_GROUP(msiof3_clk_a), - SH_PFC_PIN_GROUP(msiof3_sync_a), - SH_PFC_PIN_GROUP(msiof3_ss1_a), - SH_PFC_PIN_GROUP(msiof3_ss2_a), - SH_PFC_PIN_GROUP(msiof3_txd_a), - SH_PFC_PIN_GROUP(msiof3_rxd_a), - SH_PFC_PIN_GROUP(msiof3_clk_b), - SH_PFC_PIN_GROUP(msiof3_sync_b), - SH_PFC_PIN_GROUP(msiof3_ss1_b), - SH_PFC_PIN_GROUP(msiof3_ss2_b), - SH_PFC_PIN_GROUP(msiof3_txd_b), - SH_PFC_PIN_GROUP(msiof3_rxd_b), - SH_PFC_PIN_GROUP(msiof3_clk_c), - SH_PFC_PIN_GROUP(msiof3_sync_c), - SH_PFC_PIN_GROUP(msiof3_txd_c), - SH_PFC_PIN_GROUP(msiof3_rxd_c), - SH_PFC_PIN_GROUP(msiof3_clk_d), - SH_PFC_PIN_GROUP(msiof3_sync_d), - SH_PFC_PIN_GROUP(msiof3_ss1_d), - SH_PFC_PIN_GROUP(msiof3_txd_d), - SH_PFC_PIN_GROUP(msiof3_rxd_d), - SH_PFC_PIN_GROUP(msiof3_clk_e), - SH_PFC_PIN_GROUP(msiof3_sync_e), - SH_PFC_PIN_GROUP(msiof3_ss1_e), - SH_PFC_PIN_GROUP(msiof3_ss2_e), - SH_PFC_PIN_GROUP(msiof3_txd_e), - SH_PFC_PIN_GROUP(msiof3_rxd_e), - SH_PFC_PIN_GROUP(pwm0), - SH_PFC_PIN_GROUP(pwm1_a), - SH_PFC_PIN_GROUP(pwm1_b), - SH_PFC_PIN_GROUP(pwm2_a), - SH_PFC_PIN_GROUP(pwm2_b), - SH_PFC_PIN_GROUP(pwm3_a), - SH_PFC_PIN_GROUP(pwm3_b), - SH_PFC_PIN_GROUP(pwm4_a), - SH_PFC_PIN_GROUP(pwm4_b), - SH_PFC_PIN_GROUP(pwm5_a), - SH_PFC_PIN_GROUP(pwm5_b), - SH_PFC_PIN_GROUP(pwm6_a), - SH_PFC_PIN_GROUP(pwm6_b), - SH_PFC_PIN_GROUP(sata0_devslp_a), - SH_PFC_PIN_GROUP(sata0_devslp_b), - SH_PFC_PIN_GROUP(scif0_data), - SH_PFC_PIN_GROUP(scif0_clk), - SH_PFC_PIN_GROUP(scif0_ctrl), - SH_PFC_PIN_GROUP(scif1_data_a), - SH_PFC_PIN_GROUP(scif1_clk), - SH_PFC_PIN_GROUP(scif1_ctrl), - SH_PFC_PIN_GROUP(scif1_data_b), - SH_PFC_PIN_GROUP(scif2_data_a), - SH_PFC_PIN_GROUP(scif2_clk), - SH_PFC_PIN_GROUP(scif2_data_b), - SH_PFC_PIN_GROUP(scif3_data_a), - SH_PFC_PIN_GROUP(scif3_clk), - SH_PFC_PIN_GROUP(scif3_ctrl), - SH_PFC_PIN_GROUP(scif3_data_b), - SH_PFC_PIN_GROUP(scif4_data_a), - SH_PFC_PIN_GROUP(scif4_clk_a), - SH_PFC_PIN_GROUP(scif4_ctrl_a), - SH_PFC_PIN_GROUP(scif4_data_b), - SH_PFC_PIN_GROUP(scif4_clk_b), - SH_PFC_PIN_GROUP(scif4_ctrl_b), - SH_PFC_PIN_GROUP(scif4_data_c), - SH_PFC_PIN_GROUP(scif4_clk_c), - SH_PFC_PIN_GROUP(scif4_ctrl_c), - SH_PFC_PIN_GROUP(scif5_data_a), - SH_PFC_PIN_GROUP(scif5_clk_a), - SH_PFC_PIN_GROUP(scif5_data_b), - SH_PFC_PIN_GROUP(scif5_clk_b), - SH_PFC_PIN_GROUP(scif_clk_a), - SH_PFC_PIN_GROUP(scif_clk_b), - SH_PFC_PIN_GROUP(sdhi0_data1), - SH_PFC_PIN_GROUP(sdhi0_data4), - SH_PFC_PIN_GROUP(sdhi0_ctrl), - SH_PFC_PIN_GROUP(sdhi0_cd), - SH_PFC_PIN_GROUP(sdhi0_wp), - SH_PFC_PIN_GROUP(sdhi1_data1), - SH_PFC_PIN_GROUP(sdhi1_data4), - SH_PFC_PIN_GROUP(sdhi1_ctrl), - SH_PFC_PIN_GROUP(sdhi1_cd), - SH_PFC_PIN_GROUP(sdhi1_wp), - SH_PFC_PIN_GROUP(sdhi2_data1), - SH_PFC_PIN_GROUP(sdhi2_data4), - SH_PFC_PIN_GROUP(sdhi2_data8), - SH_PFC_PIN_GROUP(sdhi2_ctrl), - SH_PFC_PIN_GROUP(sdhi2_cd_a), - SH_PFC_PIN_GROUP(sdhi2_wp_a), - SH_PFC_PIN_GROUP(sdhi2_cd_b), - SH_PFC_PIN_GROUP(sdhi2_wp_b), - SH_PFC_PIN_GROUP(sdhi2_ds), - SH_PFC_PIN_GROUP(sdhi3_data1), - SH_PFC_PIN_GROUP(sdhi3_data4), - SH_PFC_PIN_GROUP(sdhi3_data8), - SH_PFC_PIN_GROUP(sdhi3_ctrl), - SH_PFC_PIN_GROUP(sdhi3_cd), - SH_PFC_PIN_GROUP(sdhi3_wp), - SH_PFC_PIN_GROUP(sdhi3_ds), - SH_PFC_PIN_GROUP(ssi0_data), - SH_PFC_PIN_GROUP(ssi01239_ctrl), - SH_PFC_PIN_GROUP(ssi1_data_a), - SH_PFC_PIN_GROUP(ssi1_data_b), - SH_PFC_PIN_GROUP(ssi1_ctrl_a), - SH_PFC_PIN_GROUP(ssi1_ctrl_b), - SH_PFC_PIN_GROUP(ssi2_data_a), - SH_PFC_PIN_GROUP(ssi2_data_b), - SH_PFC_PIN_GROUP(ssi2_ctrl_a), - SH_PFC_PIN_GROUP(ssi2_ctrl_b), - SH_PFC_PIN_GROUP(ssi3_data), - SH_PFC_PIN_GROUP(ssi349_ctrl), - SH_PFC_PIN_GROUP(ssi4_data), - SH_PFC_PIN_GROUP(ssi4_ctrl), - SH_PFC_PIN_GROUP(ssi5_data), - SH_PFC_PIN_GROUP(ssi5_ctrl), - SH_PFC_PIN_GROUP(ssi6_data), - SH_PFC_PIN_GROUP(ssi6_ctrl), - SH_PFC_PIN_GROUP(ssi7_data), - SH_PFC_PIN_GROUP(ssi78_ctrl), - SH_PFC_PIN_GROUP(ssi8_data), - SH_PFC_PIN_GROUP(ssi9_data_a), - SH_PFC_PIN_GROUP(ssi9_data_b), - SH_PFC_PIN_GROUP(ssi9_ctrl_a), - SH_PFC_PIN_GROUP(ssi9_ctrl_b), - SH_PFC_PIN_GROUP(tmu_tclk1_a), - SH_PFC_PIN_GROUP(tmu_tclk1_b), - SH_PFC_PIN_GROUP(tmu_tclk2_a), - SH_PFC_PIN_GROUP(tmu_tclk2_b), - SH_PFC_PIN_GROUP(tpu_to0), - SH_PFC_PIN_GROUP(tpu_to1), - SH_PFC_PIN_GROUP(tpu_to2), - SH_PFC_PIN_GROUP(tpu_to3), - SH_PFC_PIN_GROUP(usb0), - SH_PFC_PIN_GROUP(usb1), - SH_PFC_PIN_GROUP(usb2), - SH_PFC_PIN_GROUP(usb2_ch3), - SH_PFC_PIN_GROUP(usb30), - VIN_DATA_PIN_GROUP(vin4_data, 8, _a), - VIN_DATA_PIN_GROUP(vin4_data, 10, _a), - VIN_DATA_PIN_GROUP(vin4_data, 12, _a), - VIN_DATA_PIN_GROUP(vin4_data, 16, _a), - SH_PFC_PIN_GROUP(vin4_data18_a), - VIN_DATA_PIN_GROUP(vin4_data, 20, _a), - VIN_DATA_PIN_GROUP(vin4_data, 24, _a), - VIN_DATA_PIN_GROUP(vin4_data, 8, _b), - VIN_DATA_PIN_GROUP(vin4_data, 10, _b), - VIN_DATA_PIN_GROUP(vin4_data, 12, _b), - VIN_DATA_PIN_GROUP(vin4_data, 16, _b), - SH_PFC_PIN_GROUP(vin4_data18_b), - VIN_DATA_PIN_GROUP(vin4_data, 20, _b), - VIN_DATA_PIN_GROUP(vin4_data, 24, _b), - SH_PFC_PIN_GROUP(vin4_sync), - SH_PFC_PIN_GROUP(vin4_field), - SH_PFC_PIN_GROUP(vin4_clkenb), - SH_PFC_PIN_GROUP(vin4_clk), - VIN_DATA_PIN_GROUP(vin5_data, 8), - VIN_DATA_PIN_GROUP(vin5_data, 10), - VIN_DATA_PIN_GROUP(vin5_data, 12), - VIN_DATA_PIN_GROUP(vin5_data, 16), - SH_PFC_PIN_GROUP(vin5_sync), - SH_PFC_PIN_GROUP(vin5_field), - SH_PFC_PIN_GROUP(vin5_clkenb), - SH_PFC_PIN_GROUP(vin5_clk), +static const struct { + struct sh_pfc_pin_group common[320]; + struct sh_pfc_pin_group automotive[30]; +} pinmux_groups = { + .common = { + SH_PFC_PIN_GROUP(audio_clk_a_a), + SH_PFC_PIN_GROUP(audio_clk_a_b), + SH_PFC_PIN_GROUP(audio_clk_a_c), + SH_PFC_PIN_GROUP(audio_clk_b_a), + SH_PFC_PIN_GROUP(audio_clk_b_b), + SH_PFC_PIN_GROUP(audio_clk_c_a), + SH_PFC_PIN_GROUP(audio_clk_c_b), + SH_PFC_PIN_GROUP(audio_clkout_a), + SH_PFC_PIN_GROUP(audio_clkout_b), + SH_PFC_PIN_GROUP(audio_clkout_c), + SH_PFC_PIN_GROUP(audio_clkout_d), + SH_PFC_PIN_GROUP(audio_clkout1_a), + SH_PFC_PIN_GROUP(audio_clkout1_b), + SH_PFC_PIN_GROUP(audio_clkout2_a), + SH_PFC_PIN_GROUP(audio_clkout2_b), + SH_PFC_PIN_GROUP(audio_clkout3_a), + SH_PFC_PIN_GROUP(audio_clkout3_b), + SH_PFC_PIN_GROUP(avb_link), + SH_PFC_PIN_GROUP(avb_magic), + SH_PFC_PIN_GROUP(avb_phy_int), + SH_PFC_PIN_GROUP_ALIAS(avb_mdc, avb_mdio), /* Deprecated */ + SH_PFC_PIN_GROUP(avb_mdio), + SH_PFC_PIN_GROUP(avb_mii), + SH_PFC_PIN_GROUP(avb_avtp_pps), + SH_PFC_PIN_GROUP(avb_avtp_match_a), + SH_PFC_PIN_GROUP(avb_avtp_capture_a), + SH_PFC_PIN_GROUP(avb_avtp_match_b), + SH_PFC_PIN_GROUP(avb_avtp_capture_b), + SH_PFC_PIN_GROUP(can0_data_a), + SH_PFC_PIN_GROUP(can0_data_b), + SH_PFC_PIN_GROUP(can1_data), + SH_PFC_PIN_GROUP(can_clk), + SH_PFC_PIN_GROUP(canfd0_data_a), + SH_PFC_PIN_GROUP(canfd0_data_b), + SH_PFC_PIN_GROUP(canfd1_data), + SH_PFC_PIN_GROUP(du_rgb666), + SH_PFC_PIN_GROUP(du_rgb888), + SH_PFC_PIN_GROUP(du_clk_out_0), + SH_PFC_PIN_GROUP(du_clk_out_1), + SH_PFC_PIN_GROUP(du_sync), + SH_PFC_PIN_GROUP(du_oddf), + SH_PFC_PIN_GROUP(du_cde), + SH_PFC_PIN_GROUP(du_disp), + SH_PFC_PIN_GROUP(hscif0_data), + SH_PFC_PIN_GROUP(hscif0_clk), + SH_PFC_PIN_GROUP(hscif0_ctrl), + SH_PFC_PIN_GROUP(hscif1_data_a), + SH_PFC_PIN_GROUP(hscif1_clk_a), + SH_PFC_PIN_GROUP(hscif1_ctrl_a), + SH_PFC_PIN_GROUP(hscif1_data_b), + SH_PFC_PIN_GROUP(hscif1_clk_b), + SH_PFC_PIN_GROUP(hscif1_ctrl_b), + SH_PFC_PIN_GROUP(hscif2_data_a), + SH_PFC_PIN_GROUP(hscif2_clk_a), + SH_PFC_PIN_GROUP(hscif2_ctrl_a), + SH_PFC_PIN_GROUP(hscif2_data_b), + SH_PFC_PIN_GROUP(hscif2_clk_b), + SH_PFC_PIN_GROUP(hscif2_ctrl_b), + SH_PFC_PIN_GROUP(hscif2_data_c), + SH_PFC_PIN_GROUP(hscif2_clk_c), + SH_PFC_PIN_GROUP(hscif2_ctrl_c), + SH_PFC_PIN_GROUP(hscif3_data_a), + SH_PFC_PIN_GROUP(hscif3_clk), + SH_PFC_PIN_GROUP(hscif3_ctrl), + SH_PFC_PIN_GROUP(hscif3_data_b), + SH_PFC_PIN_GROUP(hscif3_data_c), + SH_PFC_PIN_GROUP(hscif3_data_d), + SH_PFC_PIN_GROUP(hscif4_data_a), + SH_PFC_PIN_GROUP(hscif4_clk), + SH_PFC_PIN_GROUP(hscif4_ctrl), + SH_PFC_PIN_GROUP(hscif4_data_b), + SH_PFC_PIN_GROUP(i2c0), + SH_PFC_PIN_GROUP(i2c1_a), + SH_PFC_PIN_GROUP(i2c1_b), + SH_PFC_PIN_GROUP(i2c2_a), + SH_PFC_PIN_GROUP(i2c2_b), + SH_PFC_PIN_GROUP(i2c3), + SH_PFC_PIN_GROUP(i2c5), + SH_PFC_PIN_GROUP(i2c6_a), + SH_PFC_PIN_GROUP(i2c6_b), + SH_PFC_PIN_GROUP(i2c6_c), + SH_PFC_PIN_GROUP(intc_ex_irq0), + SH_PFC_PIN_GROUP(intc_ex_irq1), + SH_PFC_PIN_GROUP(intc_ex_irq2), + SH_PFC_PIN_GROUP(intc_ex_irq3), + SH_PFC_PIN_GROUP(intc_ex_irq4), + SH_PFC_PIN_GROUP(intc_ex_irq5), + SH_PFC_PIN_GROUP(msiof0_clk), + SH_PFC_PIN_GROUP(msiof0_sync), + SH_PFC_PIN_GROUP(msiof0_ss1), + SH_PFC_PIN_GROUP(msiof0_ss2), + SH_PFC_PIN_GROUP(msiof0_txd), + SH_PFC_PIN_GROUP(msiof0_rxd), + SH_PFC_PIN_GROUP(msiof1_clk_a), + SH_PFC_PIN_GROUP(msiof1_sync_a), + SH_PFC_PIN_GROUP(msiof1_ss1_a), + SH_PFC_PIN_GROUP(msiof1_ss2_a), + SH_PFC_PIN_GROUP(msiof1_txd_a), + SH_PFC_PIN_GROUP(msiof1_rxd_a), + SH_PFC_PIN_GROUP(msiof1_clk_b), + SH_PFC_PIN_GROUP(msiof1_sync_b), + SH_PFC_PIN_GROUP(msiof1_ss1_b), + SH_PFC_PIN_GROUP(msiof1_ss2_b), + SH_PFC_PIN_GROUP(msiof1_txd_b), + SH_PFC_PIN_GROUP(msiof1_rxd_b), + SH_PFC_PIN_GROUP(msiof1_clk_c), + SH_PFC_PIN_GROUP(msiof1_sync_c), + SH_PFC_PIN_GROUP(msiof1_ss1_c), + SH_PFC_PIN_GROUP(msiof1_ss2_c), + SH_PFC_PIN_GROUP(msiof1_txd_c), + SH_PFC_PIN_GROUP(msiof1_rxd_c), + SH_PFC_PIN_GROUP(msiof1_clk_d), + SH_PFC_PIN_GROUP(msiof1_sync_d), + SH_PFC_PIN_GROUP(msiof1_ss1_d), + SH_PFC_PIN_GROUP(msiof1_ss2_d), + SH_PFC_PIN_GROUP(msiof1_txd_d), + SH_PFC_PIN_GROUP(msiof1_rxd_d), + SH_PFC_PIN_GROUP(msiof1_clk_e), + SH_PFC_PIN_GROUP(msiof1_sync_e), + SH_PFC_PIN_GROUP(msiof1_ss1_e), + SH_PFC_PIN_GROUP(msiof1_ss2_e), + SH_PFC_PIN_GROUP(msiof1_txd_e), + SH_PFC_PIN_GROUP(msiof1_rxd_e), + SH_PFC_PIN_GROUP(msiof1_clk_f), + SH_PFC_PIN_GROUP(msiof1_sync_f), + SH_PFC_PIN_GROUP(msiof1_ss1_f), + SH_PFC_PIN_GROUP(msiof1_ss2_f), + SH_PFC_PIN_GROUP(msiof1_txd_f), + SH_PFC_PIN_GROUP(msiof1_rxd_f), + SH_PFC_PIN_GROUP(msiof1_clk_g), + SH_PFC_PIN_GROUP(msiof1_sync_g), + SH_PFC_PIN_GROUP(msiof1_ss1_g), + SH_PFC_PIN_GROUP(msiof1_ss2_g), + SH_PFC_PIN_GROUP(msiof1_txd_g), + SH_PFC_PIN_GROUP(msiof1_rxd_g), + SH_PFC_PIN_GROUP(msiof2_clk_a), + SH_PFC_PIN_GROUP(msiof2_sync_a), + SH_PFC_PIN_GROUP(msiof2_ss1_a), + SH_PFC_PIN_GROUP(msiof2_ss2_a), + SH_PFC_PIN_GROUP(msiof2_txd_a), + SH_PFC_PIN_GROUP(msiof2_rxd_a), + SH_PFC_PIN_GROUP(msiof2_clk_b), + SH_PFC_PIN_GROUP(msiof2_sync_b), + SH_PFC_PIN_GROUP(msiof2_ss1_b), + SH_PFC_PIN_GROUP(msiof2_ss2_b), + SH_PFC_PIN_GROUP(msiof2_txd_b), + SH_PFC_PIN_GROUP(msiof2_rxd_b), + SH_PFC_PIN_GROUP(msiof2_clk_c), + SH_PFC_PIN_GROUP(msiof2_sync_c), + SH_PFC_PIN_GROUP(msiof2_ss1_c), + SH_PFC_PIN_GROUP(msiof2_ss2_c), + SH_PFC_PIN_GROUP(msiof2_txd_c), + SH_PFC_PIN_GROUP(msiof2_rxd_c), + SH_PFC_PIN_GROUP(msiof2_clk_d), + SH_PFC_PIN_GROUP(msiof2_sync_d), + SH_PFC_PIN_GROUP(msiof2_ss1_d), + SH_PFC_PIN_GROUP(msiof2_ss2_d), + SH_PFC_PIN_GROUP(msiof2_txd_d), + SH_PFC_PIN_GROUP(msiof2_rxd_d), + SH_PFC_PIN_GROUP(msiof3_clk_a), + SH_PFC_PIN_GROUP(msiof3_sync_a), + SH_PFC_PIN_GROUP(msiof3_ss1_a), + SH_PFC_PIN_GROUP(msiof3_ss2_a), + SH_PFC_PIN_GROUP(msiof3_txd_a), + SH_PFC_PIN_GROUP(msiof3_rxd_a), + SH_PFC_PIN_GROUP(msiof3_clk_b), + SH_PFC_PIN_GROUP(msiof3_sync_b), + SH_PFC_PIN_GROUP(msiof3_ss1_b), + SH_PFC_PIN_GROUP(msiof3_ss2_b), + SH_PFC_PIN_GROUP(msiof3_txd_b), + SH_PFC_PIN_GROUP(msiof3_rxd_b), + SH_PFC_PIN_GROUP(msiof3_clk_c), + SH_PFC_PIN_GROUP(msiof3_sync_c), + SH_PFC_PIN_GROUP(msiof3_txd_c), + SH_PFC_PIN_GROUP(msiof3_rxd_c), + SH_PFC_PIN_GROUP(msiof3_clk_d), + SH_PFC_PIN_GROUP(msiof3_sync_d), + SH_PFC_PIN_GROUP(msiof3_ss1_d), + SH_PFC_PIN_GROUP(msiof3_txd_d), + SH_PFC_PIN_GROUP(msiof3_rxd_d), + SH_PFC_PIN_GROUP(msiof3_clk_e), + SH_PFC_PIN_GROUP(msiof3_sync_e), + SH_PFC_PIN_GROUP(msiof3_ss1_e), + SH_PFC_PIN_GROUP(msiof3_ss2_e), + SH_PFC_PIN_GROUP(msiof3_txd_e), + SH_PFC_PIN_GROUP(msiof3_rxd_e), + SH_PFC_PIN_GROUP(pwm0), + SH_PFC_PIN_GROUP(pwm1_a), + SH_PFC_PIN_GROUP(pwm1_b), + SH_PFC_PIN_GROUP(pwm2_a), + SH_PFC_PIN_GROUP(pwm2_b), + SH_PFC_PIN_GROUP(pwm3_a), + SH_PFC_PIN_GROUP(pwm3_b), + SH_PFC_PIN_GROUP(pwm4_a), + SH_PFC_PIN_GROUP(pwm4_b), + SH_PFC_PIN_GROUP(pwm5_a), + SH_PFC_PIN_GROUP(pwm5_b), + SH_PFC_PIN_GROUP(pwm6_a), + SH_PFC_PIN_GROUP(pwm6_b), + SH_PFC_PIN_GROUP(sata0_devslp_a), + SH_PFC_PIN_GROUP(sata0_devslp_b), + SH_PFC_PIN_GROUP(scif0_data), + SH_PFC_PIN_GROUP(scif0_clk), + SH_PFC_PIN_GROUP(scif0_ctrl), + SH_PFC_PIN_GROUP(scif1_data_a), + SH_PFC_PIN_GROUP(scif1_clk), + SH_PFC_PIN_GROUP(scif1_ctrl), + SH_PFC_PIN_GROUP(scif1_data_b), + SH_PFC_PIN_GROUP(scif2_data_a), + SH_PFC_PIN_GROUP(scif2_clk), + SH_PFC_PIN_GROUP(scif2_data_b), + SH_PFC_PIN_GROUP(scif3_data_a), + SH_PFC_PIN_GROUP(scif3_clk), + SH_PFC_PIN_GROUP(scif3_ctrl), + SH_PFC_PIN_GROUP(scif3_data_b), + SH_PFC_PIN_GROUP(scif4_data_a), + SH_PFC_PIN_GROUP(scif4_clk_a), + SH_PFC_PIN_GROUP(scif4_ctrl_a), + SH_PFC_PIN_GROUP(scif4_data_b), + SH_PFC_PIN_GROUP(scif4_clk_b), + SH_PFC_PIN_GROUP(scif4_ctrl_b), + SH_PFC_PIN_GROUP(scif4_data_c), + SH_PFC_PIN_GROUP(scif4_clk_c), + SH_PFC_PIN_GROUP(scif4_ctrl_c), + SH_PFC_PIN_GROUP(scif5_data_a), + SH_PFC_PIN_GROUP(scif5_clk_a), + SH_PFC_PIN_GROUP(scif5_data_b), + SH_PFC_PIN_GROUP(scif5_clk_b), + SH_PFC_PIN_GROUP(scif_clk_a), + SH_PFC_PIN_GROUP(scif_clk_b), + SH_PFC_PIN_GROUP(sdhi0_data1), + SH_PFC_PIN_GROUP(sdhi0_data4), + SH_PFC_PIN_GROUP(sdhi0_ctrl), + SH_PFC_PIN_GROUP(sdhi0_cd), + SH_PFC_PIN_GROUP(sdhi0_wp), + SH_PFC_PIN_GROUP(sdhi1_data1), + SH_PFC_PIN_GROUP(sdhi1_data4), + SH_PFC_PIN_GROUP(sdhi1_ctrl), + SH_PFC_PIN_GROUP(sdhi1_cd), + SH_PFC_PIN_GROUP(sdhi1_wp), + SH_PFC_PIN_GROUP(sdhi2_data1), + SH_PFC_PIN_GROUP(sdhi2_data4), + SH_PFC_PIN_GROUP(sdhi2_data8), + SH_PFC_PIN_GROUP(sdhi2_ctrl), + SH_PFC_PIN_GROUP(sdhi2_cd_a), + SH_PFC_PIN_GROUP(sdhi2_wp_a), + SH_PFC_PIN_GROUP(sdhi2_cd_b), + SH_PFC_PIN_GROUP(sdhi2_wp_b), + SH_PFC_PIN_GROUP(sdhi2_ds), + SH_PFC_PIN_GROUP(sdhi3_data1), + SH_PFC_PIN_GROUP(sdhi3_data4), + SH_PFC_PIN_GROUP(sdhi3_data8), + SH_PFC_PIN_GROUP(sdhi3_ctrl), + SH_PFC_PIN_GROUP(sdhi3_cd), + SH_PFC_PIN_GROUP(sdhi3_wp), + SH_PFC_PIN_GROUP(sdhi3_ds), + SH_PFC_PIN_GROUP(ssi0_data), + SH_PFC_PIN_GROUP(ssi01239_ctrl), + SH_PFC_PIN_GROUP(ssi1_data_a), + SH_PFC_PIN_GROUP(ssi1_data_b), + SH_PFC_PIN_GROUP(ssi1_ctrl_a), + SH_PFC_PIN_GROUP(ssi1_ctrl_b), + SH_PFC_PIN_GROUP(ssi2_data_a), + SH_PFC_PIN_GROUP(ssi2_data_b), + SH_PFC_PIN_GROUP(ssi2_ctrl_a), + SH_PFC_PIN_GROUP(ssi2_ctrl_b), + SH_PFC_PIN_GROUP(ssi3_data), + SH_PFC_PIN_GROUP(ssi349_ctrl), + SH_PFC_PIN_GROUP(ssi4_data), + SH_PFC_PIN_GROUP(ssi4_ctrl), + SH_PFC_PIN_GROUP(ssi5_data), + SH_PFC_PIN_GROUP(ssi5_ctrl), + SH_PFC_PIN_GROUP(ssi6_data), + SH_PFC_PIN_GROUP(ssi6_ctrl), + SH_PFC_PIN_GROUP(ssi7_data), + SH_PFC_PIN_GROUP(ssi78_ctrl), + SH_PFC_PIN_GROUP(ssi8_data), + SH_PFC_PIN_GROUP(ssi9_data_a), + SH_PFC_PIN_GROUP(ssi9_data_b), + SH_PFC_PIN_GROUP(ssi9_ctrl_a), + SH_PFC_PIN_GROUP(ssi9_ctrl_b), + SH_PFC_PIN_GROUP(tmu_tclk1_a), + SH_PFC_PIN_GROUP(tmu_tclk1_b), + SH_PFC_PIN_GROUP(tmu_tclk2_a), + SH_PFC_PIN_GROUP(tmu_tclk2_b), + SH_PFC_PIN_GROUP(tpu_to0), + SH_PFC_PIN_GROUP(tpu_to1), + SH_PFC_PIN_GROUP(tpu_to2), + SH_PFC_PIN_GROUP(tpu_to3), + SH_PFC_PIN_GROUP(usb0), + SH_PFC_PIN_GROUP(usb1), + SH_PFC_PIN_GROUP(usb2), + SH_PFC_PIN_GROUP(usb2_ch3), + SH_PFC_PIN_GROUP(usb30), + VIN_DATA_PIN_GROUP(vin4_data, 8, _a), + VIN_DATA_PIN_GROUP(vin4_data, 10, _a), + VIN_DATA_PIN_GROUP(vin4_data, 12, _a), + VIN_DATA_PIN_GROUP(vin4_data, 16, _a), + SH_PFC_PIN_GROUP(vin4_data18_a), + VIN_DATA_PIN_GROUP(vin4_data, 20, _a), + VIN_DATA_PIN_GROUP(vin4_data, 24, _a), + VIN_DATA_PIN_GROUP(vin4_data, 8, _b), + VIN_DATA_PIN_GROUP(vin4_data, 10, _b), + VIN_DATA_PIN_GROUP(vin4_data, 12, _b), + VIN_DATA_PIN_GROUP(vin4_data, 16, _b), + SH_PFC_PIN_GROUP(vin4_data18_b), + VIN_DATA_PIN_GROUP(vin4_data, 20, _b), + VIN_DATA_PIN_GROUP(vin4_data, 24, _b), + SH_PFC_PIN_GROUP(vin4_sync), + SH_PFC_PIN_GROUP(vin4_field), + SH_PFC_PIN_GROUP(vin4_clkenb), + SH_PFC_PIN_GROUP(vin4_clk), + VIN_DATA_PIN_GROUP(vin5_data, 8), + VIN_DATA_PIN_GROUP(vin5_data, 10), + VIN_DATA_PIN_GROUP(vin5_data, 12), + VIN_DATA_PIN_GROUP(vin5_data, 16), + SH_PFC_PIN_GROUP(vin5_sync), + SH_PFC_PIN_GROUP(vin5_field), + SH_PFC_PIN_GROUP(vin5_clkenb), + SH_PFC_PIN_GROUP(vin5_clk), + }, + .automotive = { + SH_PFC_PIN_GROUP(drif0_ctrl_a), + SH_PFC_PIN_GROUP(drif0_data0_a), + SH_PFC_PIN_GROUP(drif0_data1_a), + SH_PFC_PIN_GROUP(drif0_ctrl_b), + SH_PFC_PIN_GROUP(drif0_data0_b), + SH_PFC_PIN_GROUP(drif0_data1_b), + SH_PFC_PIN_GROUP(drif0_ctrl_c), + SH_PFC_PIN_GROUP(drif0_data0_c), + SH_PFC_PIN_GROUP(drif0_data1_c), + SH_PFC_PIN_GROUP(drif1_ctrl_a), + SH_PFC_PIN_GROUP(drif1_data0_a), + SH_PFC_PIN_GROUP(drif1_data1_a), + SH_PFC_PIN_GROUP(drif1_ctrl_b), + SH_PFC_PIN_GROUP(drif1_data0_b), + SH_PFC_PIN_GROUP(drif1_data1_b), + SH_PFC_PIN_GROUP(drif1_ctrl_c), + SH_PFC_PIN_GROUP(drif1_data0_c), + SH_PFC_PIN_GROUP(drif1_data1_c), + SH_PFC_PIN_GROUP(drif2_ctrl_a), + SH_PFC_PIN_GROUP(drif2_data0_a), + SH_PFC_PIN_GROUP(drif2_data1_a), + SH_PFC_PIN_GROUP(drif2_ctrl_b), + SH_PFC_PIN_GROUP(drif2_data0_b), + SH_PFC_PIN_GROUP(drif2_data1_b), + SH_PFC_PIN_GROUP(drif3_ctrl_a), + SH_PFC_PIN_GROUP(drif3_data0_a), + SH_PFC_PIN_GROUP(drif3_data1_a), + SH_PFC_PIN_GROUP(drif3_ctrl_b), + SH_PFC_PIN_GROUP(drif3_data0_b), + SH_PFC_PIN_GROUP(drif3_data1_b), + } + }; static const char * const audio_clk_groups[] = { @@ -5031,64 +5039,72 @@ static const char * const vin5_groups[] = { "vin5_clk", }; -static const struct sh_pfc_function pinmux_functions[] = { - SH_PFC_FUNCTION(audio_clk), - SH_PFC_FUNCTION(avb), - SH_PFC_FUNCTION(can0), - SH_PFC_FUNCTION(can1), - SH_PFC_FUNCTION(can_clk), - SH_PFC_FUNCTION(canfd0), - SH_PFC_FUNCTION(canfd1), - SH_PFC_FUNCTION(drif0), - SH_PFC_FUNCTION(drif1), - SH_PFC_FUNCTION(drif2), - SH_PFC_FUNCTION(drif3), - SH_PFC_FUNCTION(du), - SH_PFC_FUNCTION(hscif0), - SH_PFC_FUNCTION(hscif1), - SH_PFC_FUNCTION(hscif2), - SH_PFC_FUNCTION(hscif3), - SH_PFC_FUNCTION(hscif4), - SH_PFC_FUNCTION(i2c0), - SH_PFC_FUNCTION(i2c1), - SH_PFC_FUNCTION(i2c2), - SH_PFC_FUNCTION(i2c3), - SH_PFC_FUNCTION(i2c5), - SH_PFC_FUNCTION(i2c6), - SH_PFC_FUNCTION(intc_ex), - SH_PFC_FUNCTION(msiof0), - SH_PFC_FUNCTION(msiof1), - SH_PFC_FUNCTION(msiof2), - SH_PFC_FUNCTION(msiof3), - SH_PFC_FUNCTION(pwm0), - SH_PFC_FUNCTION(pwm1), - SH_PFC_FUNCTION(pwm2), - SH_PFC_FUNCTION(pwm3), - SH_PFC_FUNCTION(pwm4), - SH_PFC_FUNCTION(pwm5), - SH_PFC_FUNCTION(pwm6), - SH_PFC_FUNCTION(sata0), - SH_PFC_FUNCTION(scif0), - SH_PFC_FUNCTION(scif1), - SH_PFC_FUNCTION(scif2), - SH_PFC_FUNCTION(scif3), - SH_PFC_FUNCTION(scif4), - SH_PFC_FUNCTION(scif5), - SH_PFC_FUNCTION(scif_clk), - SH_PFC_FUNCTION(sdhi0), - SH_PFC_FUNCTION(sdhi1), - SH_PFC_FUNCTION(sdhi2), - SH_PFC_FUNCTION(sdhi3), - SH_PFC_FUNCTION(ssi), - SH_PFC_FUNCTION(tmu), - SH_PFC_FUNCTION(tpu), - SH_PFC_FUNCTION(usb0), - SH_PFC_FUNCTION(usb1), - SH_PFC_FUNCTION(usb2), - SH_PFC_FUNCTION(usb2_ch3), - SH_PFC_FUNCTION(usb30), - SH_PFC_FUNCTION(vin4), - SH_PFC_FUNCTION(vin5), +static const struct { + struct sh_pfc_function common[53]; + struct sh_pfc_function automotive[4]; +} pinmux_functions = { + .common = { + SH_PFC_FUNCTION(audio_clk), + SH_PFC_FUNCTION(avb), + SH_PFC_FUNCTION(can0), + SH_PFC_FUNCTION(can1), + SH_PFC_FUNCTION(can_clk), + SH_PFC_FUNCTION(canfd0), + SH_PFC_FUNCTION(canfd1), + SH_PFC_FUNCTION(du), + SH_PFC_FUNCTION(hscif0), + SH_PFC_FUNCTION(hscif1), + SH_PFC_FUNCTION(hscif2), + SH_PFC_FUNCTION(hscif3), + SH_PFC_FUNCTION(hscif4), + SH_PFC_FUNCTION(i2c0), + SH_PFC_FUNCTION(i2c1), + SH_PFC_FUNCTION(i2c2), + SH_PFC_FUNCTION(i2c3), + SH_PFC_FUNCTION(i2c5), + SH_PFC_FUNCTION(i2c6), + SH_PFC_FUNCTION(intc_ex), + SH_PFC_FUNCTION(msiof0), + SH_PFC_FUNCTION(msiof1), + SH_PFC_FUNCTION(msiof2), + SH_PFC_FUNCTION(msiof3), + SH_PFC_FUNCTION(pwm0), + SH_PFC_FUNCTION(pwm1), + SH_PFC_FUNCTION(pwm2), + SH_PFC_FUNCTION(pwm3), + SH_PFC_FUNCTION(pwm4), + SH_PFC_FUNCTION(pwm5), + SH_PFC_FUNCTION(pwm6), + SH_PFC_FUNCTION(sata0), + SH_PFC_FUNCTION(scif0), + SH_PFC_FUNCTION(scif1), + SH_PFC_FUNCTION(scif2), + SH_PFC_FUNCTION(scif3), + SH_PFC_FUNCTION(scif4), + SH_PFC_FUNCTION(scif5), + SH_PFC_FUNCTION(scif_clk), + SH_PFC_FUNCTION(sdhi0), + SH_PFC_FUNCTION(sdhi1), + SH_PFC_FUNCTION(sdhi2), + SH_PFC_FUNCTION(sdhi3), + SH_PFC_FUNCTION(ssi), + SH_PFC_FUNCTION(tmu), + SH_PFC_FUNCTION(tpu), + SH_PFC_FUNCTION(usb0), + SH_PFC_FUNCTION(usb1), + SH_PFC_FUNCTION(usb2), + SH_PFC_FUNCTION(usb2_ch3), + SH_PFC_FUNCTION(usb30), + SH_PFC_FUNCTION(vin4), + SH_PFC_FUNCTION(vin5), + }, + .automotive = { + SH_PFC_FUNCTION(drif0), + SH_PFC_FUNCTION(drif1), + SH_PFC_FUNCTION(drif2), + SH_PFC_FUNCTION(drif3), + } + }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { @@ -5777,7 +5793,9 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { { PIN_DU_DOTCLKIN1, 0, 2 }, /* DU_DOTCLKIN1 */ } }, { PINMUX_DRIVE_REG("DRVCTRL12", 0xe6060330) { +#ifdef CONFIG_PINCTRL_PFC_R8A77951 { PIN_DU_DOTCLKIN2, 28, 2 }, /* DU_DOTCLKIN2 */ +#endif { PIN_DU_DOTCLKIN3, 24, 2 }, /* DU_DOTCLKIN3 */ { PIN_FSCLKST_N, 20, 2 }, /* FSCLKST# */ { PIN_TMS, 4, 2 }, /* TMS */ @@ -5898,8 +5916,8 @@ static const struct pinmux_drive_reg pinmux_drive_regs[] = { { RCAR_GP_PIN(6, 27), 20, 3 }, /* USB1_OVC */ { RCAR_GP_PIN(6, 28), 16, 3 }, /* USB30_PWEN */ { RCAR_GP_PIN(6, 29), 12, 3 }, /* USB30_OVC */ - { RCAR_GP_PIN(6, 30), 8, 3 }, /* USB2_CH3_PWEN */ - { RCAR_GP_PIN(6, 31), 4, 3 }, /* USB2_CH3_OVC */ + { RCAR_GP_PIN(6, 30), 8, 3 }, /* GP6_30/USB2_CH3_PWEN */ + { RCAR_GP_PIN(6, 31), 4, 3 }, /* GP6_31/USB2_CH3_OVC */ } }, { }, }; @@ -6220,6 +6238,32 @@ static const struct sh_pfc_soc_operations r8a77951_pinmux_ops = { .set_bias = r8a77951_pinmux_set_bias, }; +#ifdef CONFIG_PINCTRL_PFC_R8A774E1 +const struct sh_pfc_soc_info r8a774e1_pinmux_info = { + .name = "r8a774e1_pfc", + .ops = &r8a77951_pinmux_ops, + .unlock_reg = 0xe6060000, /* PMMR */ + + .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, + + .pins = pinmux_pins, + .nr_pins = ARRAY_SIZE(pinmux_pins), + .groups = pinmux_groups.common, + .nr_groups = ARRAY_SIZE(pinmux_groups.common), + .functions = pinmux_functions.common, + .nr_functions = ARRAY_SIZE(pinmux_functions.common), + + .cfg_regs = pinmux_config_regs, + .drive_regs = pinmux_drive_regs, + .bias_regs = pinmux_bias_regs, + .ioctrl_regs = pinmux_ioctrl_regs, + + .pinmux_data = pinmux_data, + .pinmux_data_size = ARRAY_SIZE(pinmux_data), +}; +#endif + +#ifdef CONFIG_PINCTRL_PFC_R8A77951 const struct sh_pfc_soc_info r8a77951_pinmux_info = { .name = "r8a77951_pfc", .ops = &r8a77951_pinmux_ops, @@ -6229,10 +6273,12 @@ const struct sh_pfc_soc_info r8a77951_pinmux_info = { .pins = pinmux_pins, .nr_pins = ARRAY_SIZE(pinmux_pins), - .groups = pinmux_groups, - .nr_groups = ARRAY_SIZE(pinmux_groups), - .functions = pinmux_functions, - .nr_functions = ARRAY_SIZE(pinmux_functions), + .groups = pinmux_groups.common, + .nr_groups = ARRAY_SIZE(pinmux_groups.common) + + ARRAY_SIZE(pinmux_groups.automotive), + .functions = pinmux_functions.common, + .nr_functions = ARRAY_SIZE(pinmux_functions.common) + + ARRAY_SIZE(pinmux_functions.automotive), .cfg_regs = pinmux_config_regs, .drive_regs = pinmux_drive_regs, @@ -6242,3 +6288,4 @@ const struct sh_pfc_soc_info r8a77951_pinmux_info = { .pinmux_data = pinmux_data, .pinmux_data_size = ARRAY_SIZE(pinmux_data), }; +#endif diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c index 25e27b6bee89..9f7d9c9238fc 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c @@ -1416,6 +1416,64 @@ static const unsigned int qspi1_data4_mux[] = { QSPI1_IO2_MARK, QSPI1_IO3_MARK }; +/* - RPC -------------------------------------------------------------------- */ +static const unsigned int rpc_clk1_pins[] = { + /* Octal-SPI flash: C/SCLK */ + RCAR_GP_PIN(5, 0), +}; +static const unsigned int rpc_clk1_mux[] = { + QSPI0_SPCLK_MARK, +}; +static const unsigned int rpc_clk2_pins[] = { + /* HyperFlash: CK, CK# */ + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 6), +}; +static const unsigned int rpc_clk2_mux[] = { + QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK, +}; +static const unsigned int rpc_ctrl_pins[] = { + /* Octal-SPI flash: S#/CS, DQS */ + /* HyperFlash: CS#, RDS */ + RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11), +}; +static const unsigned int rpc_ctrl_mux[] = { + QSPI0_SSL_MARK, QSPI1_SSL_MARK, +}; +static const unsigned int rpc_data_pins[] = { + /* DQ[0:7] */ + RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2), + RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4), + RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8), + RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), +}; +static const unsigned int rpc_data_mux[] = { + QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, + QSPI0_IO2_MARK, QSPI0_IO3_MARK, + QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK, + QSPI1_IO2_MARK, QSPI1_IO3_MARK, +}; +static const unsigned int rpc_reset_pins[] = { + /* RPC_RESET# */ + RCAR_GP_PIN(5, 12), +}; +static const unsigned int rpc_reset_mux[] = { + RPC_RESET_N_MARK, +}; +static const unsigned int rpc_int_pins[] = { + /* RPC_INT# */ + RCAR_GP_PIN(5, 14), +}; +static const unsigned int rpc_int_mux[] = { + RPC_INT_N_MARK, +}; +static const unsigned int rpc_wp_pins[] = { + /* RPC_WP# */ + RCAR_GP_PIN(5, 13), +}; +static const unsigned int rpc_wp_mux[] = { + RPC_WP_N_MARK, +}; + /* - SCIF Clock ------------------------------------------------------------- */ static const unsigned int scif_clk_a_pins[] = { /* SCIF_CLK */ @@ -1750,6 +1808,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(qspi1_ctrl), SH_PFC_PIN_GROUP(qspi1_data2), SH_PFC_PIN_GROUP(qspi1_data4), + SH_PFC_PIN_GROUP(rpc_clk1), + SH_PFC_PIN_GROUP(rpc_clk2), + SH_PFC_PIN_GROUP(rpc_ctrl), + SH_PFC_PIN_GROUP(rpc_data), + SH_PFC_PIN_GROUP(rpc_reset), + SH_PFC_PIN_GROUP(rpc_int), + SH_PFC_PIN_GROUP(rpc_wp), SH_PFC_PIN_GROUP(scif_clk_a), SH_PFC_PIN_GROUP(scif_clk_b), SH_PFC_PIN_GROUP(scif0_data), @@ -1954,6 +2019,16 @@ static const char * const qspi1_groups[] = { "qspi1_data4", }; +static const char * const rpc_groups[] = { + "rpc_clk1", + "rpc_clk2", + "rpc_ctrl", + "rpc_data", + "rpc_reset", + "rpc_int", + "rpc_wp", +}; + static const char * const scif_clk_groups[] = { "scif_clk_a", "scif_clk_b", @@ -2039,6 +2114,7 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(pwm4), SH_PFC_FUNCTION(qspi0), SH_PFC_FUNCTION(qspi1), + SH_PFC_FUNCTION(rpc), SH_PFC_FUNCTION(scif_clk), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c index 14fe4032a52d..1055f9853404 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77980.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77980.c @@ -1710,6 +1710,64 @@ static const unsigned int qspi1_data4_mux[] = { QSPI1_IO2_MARK, QSPI1_IO3_MARK }; +/* - RPC -------------------------------------------------------------------- */ +static const unsigned int rpc_clk1_pins[] = { + /* Octal-SPI flash: C/SCLK */ + RCAR_GP_PIN(5, 0), +}; +static const unsigned int rpc_clk1_mux[] = { + QSPI0_SPCLK_MARK, +}; +static const unsigned int rpc_clk2_pins[] = { + /* HyperFlash: CK, CK# */ + RCAR_GP_PIN(5, 0), RCAR_GP_PIN(5, 6), +}; +static const unsigned int rpc_clk2_mux[] = { + QSPI0_SPCLK_MARK, QSPI1_SPCLK_MARK, +}; +static const unsigned int rpc_ctrl_pins[] = { + /* Octal-SPI flash: S#/CS, DQS */ + /* HyperFlash: CS#, RDS */ + RCAR_GP_PIN(5, 5), RCAR_GP_PIN(5, 11), +}; +static const unsigned int rpc_ctrl_mux[] = { + QSPI0_SSL_MARK, QSPI1_SSL_MARK, +}; +static const unsigned int rpc_data_pins[] = { + /* DQ[0:7] */ + RCAR_GP_PIN(5, 1), RCAR_GP_PIN(5, 2), + RCAR_GP_PIN(5, 3), RCAR_GP_PIN(5, 4), + RCAR_GP_PIN(5, 7), RCAR_GP_PIN(5, 8), + RCAR_GP_PIN(5, 9), RCAR_GP_PIN(5, 10), +}; +static const unsigned int rpc_data_mux[] = { + QSPI0_MOSI_IO0_MARK, QSPI0_MISO_IO1_MARK, + QSPI0_IO2_MARK, QSPI0_IO3_MARK, + QSPI1_MOSI_IO0_MARK, QSPI1_MISO_IO1_MARK, + QSPI1_IO2_MARK, QSPI1_IO3_MARK, +}; +static const unsigned int rpc_reset_pins[] = { + /* RPC_RESET# */ + RCAR_GP_PIN(5, 12), +}; +static const unsigned int rpc_reset_mux[] = { + RPC_RESET_N_MARK, +}; +static const unsigned int rpc_int_pins[] = { + /* RPC_INT# */ + RCAR_GP_PIN(5, 14), +}; +static const unsigned int rpc_int_mux[] = { + RPC_INT_N_MARK, +}; +static const unsigned int rpc_wp_pins[] = { + /* RPC_WP# */ + RCAR_GP_PIN(5, 13), +}; +static const unsigned int rpc_wp_mux[] = { + RPC_WP_N_MARK, +}; + /* - SCIF0 ------------------------------------------------------------------ */ static const unsigned int scif0_data_pins[] = { /* RX0, TX0 */ @@ -2126,6 +2184,13 @@ static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(qspi1_ctrl), SH_PFC_PIN_GROUP(qspi1_data2), SH_PFC_PIN_GROUP(qspi1_data4), + SH_PFC_PIN_GROUP(rpc_clk1), + SH_PFC_PIN_GROUP(rpc_clk2), + SH_PFC_PIN_GROUP(rpc_ctrl), + SH_PFC_PIN_GROUP(rpc_data), + SH_PFC_PIN_GROUP(rpc_reset), + SH_PFC_PIN_GROUP(rpc_int), + SH_PFC_PIN_GROUP(rpc_wp), SH_PFC_PIN_GROUP(scif0_data), SH_PFC_PIN_GROUP(scif0_clk), SH_PFC_PIN_GROUP(scif0_ctrl), @@ -2362,6 +2427,16 @@ static const char * const qspi1_groups[] = { "qspi1_data4", }; +static const char * const rpc_groups[] = { + "rpc_clk1", + "rpc_clk2", + "rpc_ctrl", + "rpc_data", + "rpc_reset", + "rpc_int", + "rpc_wp", +}; + static const char * const scif0_groups[] = { "scif0_data", "scif0_clk", @@ -2460,6 +2535,7 @@ static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(pwm4), SH_PFC_FUNCTION(qspi0), SH_PFC_FUNCTION(qspi1), + SH_PFC_FUNCTION(rpc), SH_PFC_FUNCTION(scif0), SH_PFC_FUNCTION(scif1), SH_PFC_FUNCTION(scif3), diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index 0f013827baf9..eff1bb872325 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h @@ -312,6 +312,7 @@ extern const struct sh_pfc_soc_info r8a77470_pinmux_info; extern const struct sh_pfc_soc_info r8a774a1_pinmux_info; extern const struct sh_pfc_soc_info r8a774b1_pinmux_info; extern const struct sh_pfc_soc_info r8a774c0_pinmux_info; +extern const struct sh_pfc_soc_info r8a774e1_pinmux_info; extern const struct sh_pfc_soc_info r8a7778_pinmux_info; extern const struct sh_pfc_soc_info r8a7779_pinmux_info; extern const struct sh_pfc_soc_info r8a7790_pinmux_info; diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c index 50df9e084414..e54a6e3cafd2 100644 --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c @@ -169,7 +169,7 @@ struct dt_params { /** * struct atlas7_pad_conf - Atlas7 Pad Configuration - * @id The ID of this Pad. + * @id: The ID of this Pad. * @type: The type of this Pad. * @mux_reg: The mux register offset. * This register contains the mux. @@ -210,7 +210,7 @@ struct atlas7_pad_config { .ad_ctrl_bit = adb, \ } -/** +/* * struct atlas7_pad_status - Atlas7 Pad status */ struct atlas7_pad_status { @@ -355,10 +355,6 @@ struct atlas7_gpio_chip { struct atlas7_gpio_bank banks[]; }; -/** - * @dev: a pointer back to containing device - * @virtbase: the offset to the controller in virtual memory - */ struct atlas7_pmx { struct device *dev; struct pinctrl_dev *pctl; @@ -376,7 +372,7 @@ struct atlas7_pmx { * refer to A7DA IO Summary - CS-314158-DD-4E.xls */ -/*Pads in IOC RTC & TOP */ +/* Pads in IOC RTC & TOP */ static const struct pinctrl_pin_desc atlas7_ioc_pads[] = { /* RTC PADs */ PINCTRL_PIN(0, "rtc_gpio_0"), @@ -4781,10 +4777,10 @@ struct map_data { /** * struct atlas7_pull_info - Atlas7 Pad pull info - * @type:The type of this Pad. - * @mask:The mas value of this pin's pull bits. - * @v2s: The map of pull register value to pull status. - * @s2v: The map of pull status to pull register value. + * @pad_type: The type of this Pad. + * @mask: The mas value of this pin's pull bits. + * @v2s: The map of pull register value to pull status. + * @s2v: The map of pull status to pull register value. */ struct atlas7_pull_info { u8 pad_type; @@ -4908,6 +4904,7 @@ static const struct atlas7_ds_ma_info atlas7_ma2ds_map[] = { * @type: The type of this Pad. * @mask: The mask value of this pin's pull bits. * @imval: The immediate value of drives trength register. + * @reserved: Reserved space */ struct atlas7_ds_info { u8 type; @@ -5609,7 +5606,7 @@ static int __init atlas7_pinmux_init(void) arch_initcall(atlas7_pinmux_init); -/** +/* * The Following is GPIO Code */ static inline struct diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index a657cd829ce6..7d9bdedcd71b 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -64,7 +64,7 @@ #define gpio_range_to_bank(chip) \ container_of(chip, struct stm32_gpio_bank, range) -#define HWSPINLOCK_TIMEOUT 5 /* msec */ +#define HWSPNLCK_TIMEOUT 1000 /* usec */ static const char * const stm32_gpio_functions[] = { "gpio", "af0", "af1", @@ -84,6 +84,7 @@ struct stm32_pinctrl_group { struct stm32_gpio_bank { void __iomem *base; struct clk *clk; + struct reset_control *rstc; spinlock_t lock; struct gpio_chip gpio_chip; struct pinctrl_gpio_range range; @@ -302,6 +303,7 @@ static const struct gpio_chip stm32_gpio_template = { .direction_output = stm32_gpio_direction_output, .to_irq = stm32_gpio_to_irq, .get_direction = stm32_gpio_get_direction, + .set_config = gpiochip_generic_config, }; static void stm32_gpio_irq_trigger(struct irq_data *d) @@ -420,12 +422,14 @@ static int stm32_gpio_domain_activate(struct irq_domain *d, * to avoid overriding. */ spin_lock_irqsave(&pctl->irqmux_lock, flags); - if (pctl->hwlock) - ret = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT); - if (ret) { - dev_err(pctl->dev, "Can't get hwspinlock\n"); - goto unlock; + if (pctl->hwlock) { + ret = hwspin_lock_timeout_in_atomic(pctl->hwlock, + HWSPNLCK_TIMEOUT); + if (ret) { + dev_err(pctl->dev, "Can't get hwspinlock\n"); + goto unlock; + } } if (pctl->irqmux_map & BIT(irq_data->hwirq)) { @@ -433,7 +437,7 @@ static int stm32_gpio_domain_activate(struct irq_domain *d, irq_data->hwirq); ret = -EBUSY; if (pctl->hwlock) - hwspin_unlock(pctl->hwlock); + hwspin_unlock_in_atomic(pctl->hwlock); goto unlock; } else { pctl->irqmux_map |= BIT(irq_data->hwirq); @@ -442,7 +446,7 @@ static int stm32_gpio_domain_activate(struct irq_domain *d, regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_ioport_nr); if (pctl->hwlock) - hwspin_unlock(pctl->hwlock); + hwspin_unlock_in_atomic(pctl->hwlock); unlock: spin_unlock_irqrestore(&pctl->irqmux_lock, flags); @@ -750,12 +754,13 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank, clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); - if (pctl->hwlock) - err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT); - - if (err) { - dev_err(pctl->dev, "Can't get hwspinlock\n"); - goto unlock; + if (pctl->hwlock) { + err = hwspin_lock_timeout_in_atomic(pctl->hwlock, + HWSPNLCK_TIMEOUT); + if (err) { + dev_err(pctl->dev, "Can't get hwspinlock\n"); + goto unlock; + } } val = readl_relaxed(bank->base + alt_offset); @@ -769,7 +774,7 @@ static int stm32_pmx_set_mode(struct stm32_gpio_bank *bank, writel_relaxed(val, bank->base + STM32_GPIO_MODER); if (pctl->hwlock) - hwspin_unlock(pctl->hwlock); + hwspin_unlock_in_atomic(pctl->hwlock); stm32_gpio_backup_mode(bank, pin, mode, alt); @@ -869,12 +874,13 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank, clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); - if (pctl->hwlock) - err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT); - - if (err) { - dev_err(pctl->dev, "Can't get hwspinlock\n"); - goto unlock; + if (pctl->hwlock) { + err = hwspin_lock_timeout_in_atomic(pctl->hwlock, + HWSPNLCK_TIMEOUT); + if (err) { + dev_err(pctl->dev, "Can't get hwspinlock\n"); + goto unlock; + } } val = readl_relaxed(bank->base + STM32_GPIO_TYPER); @@ -883,7 +889,7 @@ static int stm32_pconf_set_driving(struct stm32_gpio_bank *bank, writel_relaxed(val, bank->base + STM32_GPIO_TYPER); if (pctl->hwlock) - hwspin_unlock(pctl->hwlock); + hwspin_unlock_in_atomic(pctl->hwlock); stm32_gpio_backup_driving(bank, offset, drive); @@ -923,12 +929,13 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank, clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); - if (pctl->hwlock) - err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT); - - if (err) { - dev_err(pctl->dev, "Can't get hwspinlock\n"); - goto unlock; + if (pctl->hwlock) { + err = hwspin_lock_timeout_in_atomic(pctl->hwlock, + HWSPNLCK_TIMEOUT); + if (err) { + dev_err(pctl->dev, "Can't get hwspinlock\n"); + goto unlock; + } } val = readl_relaxed(bank->base + STM32_GPIO_SPEEDR); @@ -937,7 +944,7 @@ static int stm32_pconf_set_speed(struct stm32_gpio_bank *bank, writel_relaxed(val, bank->base + STM32_GPIO_SPEEDR); if (pctl->hwlock) - hwspin_unlock(pctl->hwlock); + hwspin_unlock_in_atomic(pctl->hwlock); stm32_gpio_backup_speed(bank, offset, speed); @@ -977,12 +984,13 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank, clk_enable(bank->clk); spin_lock_irqsave(&bank->lock, flags); - if (pctl->hwlock) - err = hwspin_lock_timeout(pctl->hwlock, HWSPINLOCK_TIMEOUT); - - if (err) { - dev_err(pctl->dev, "Can't get hwspinlock\n"); - goto unlock; + if (pctl->hwlock) { + err = hwspin_lock_timeout_in_atomic(pctl->hwlock, + HWSPNLCK_TIMEOUT); + if (err) { + dev_err(pctl->dev, "Can't get hwspinlock\n"); + goto unlock; + } } val = readl_relaxed(bank->base + STM32_GPIO_PUPDR); @@ -991,7 +999,7 @@ static int stm32_pconf_set_bias(struct stm32_gpio_bank *bank, writel_relaxed(val, bank->base + STM32_GPIO_PUPDR); if (pctl->hwlock) - hwspin_unlock(pctl->hwlock); + hwspin_unlock_in_atomic(pctl->hwlock); stm32_gpio_backup_bias(bank, offset, bias); @@ -1051,7 +1059,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev, struct stm32_gpio_bank *bank; int offset, ret = 0; - range = pinctrl_find_gpio_range_from_pin(pctldev, pin); + range = pinctrl_find_gpio_range_from_pin_nolock(pctldev, pin); if (!range) { dev_err(pctl->dev, "No gpio range defined.\n"); return -EINVAL; @@ -1084,7 +1092,7 @@ static int stm32_pconf_parse_conf(struct pinctrl_dev *pctldev, ret = stm32_pmx_gpio_set_direction(pctldev, range, pin, false); break; default: - ret = -EINVAL; + ret = -ENOTSUPP; } return ret; @@ -1109,9 +1117,11 @@ static int stm32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, int i, ret; for (i = 0; i < num_configs; i++) { + mutex_lock(&pctldev->mutex); ret = stm32_pconf_parse_conf(pctldev, g->pin, pinconf_to_config_param(configs[i]), pinconf_to_config_argument(configs[i])); + mutex_unlock(&pctldev->mutex); if (ret < 0) return ret; @@ -1121,6 +1131,22 @@ static int stm32_pconf_group_set(struct pinctrl_dev *pctldev, unsigned group, return 0; } +static int stm32_pconf_set(struct pinctrl_dev *pctldev, unsigned int pin, + unsigned long *configs, unsigned int num_configs) +{ + int i, ret; + + for (i = 0; i < num_configs; i++) { + ret = stm32_pconf_parse_conf(pctldev, pin, + pinconf_to_config_param(configs[i]), + pinconf_to_config_argument(configs[i])); + if (ret < 0) + return ret; + } + + return 0; +} + static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int pin) @@ -1186,10 +1212,10 @@ static void stm32_pconf_dbg_show(struct pinctrl_dev *pctldev, } } - static const struct pinconf_ops stm32_pconf_ops = { .pin_config_group_get = stm32_pconf_group_get, .pin_config_group_set = stm32_pconf_group_set, + .pin_config_set = stm32_pconf_set, .pin_config_dbg_show = stm32_pconf_dbg_show, }; @@ -1202,13 +1228,11 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, struct of_phandle_args args; struct device *dev = pctl->dev; struct resource res; - struct reset_control *rstc; int npins = STM32_GPIO_PINS_PER_BANK; int bank_nr, err; - rstc = of_reset_control_get_exclusive(np, NULL); - if (!IS_ERR(rstc)) - reset_control_deassert(rstc); + if (!IS_ERR(bank->rstc)) + reset_control_deassert(bank->rstc); if (of_address_to_resource(np, 0, &res)) return -ENODEV; @@ -1217,12 +1241,6 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, if (IS_ERR(bank->base)) return PTR_ERR(bank->base); - bank->clk = of_clk_get_by_name(np, NULL); - if (IS_ERR(bank->clk)) { - dev_err(dev, "failed to get clk (%ld)\n", PTR_ERR(bank->clk)); - return PTR_ERR(bank->clk); - } - err = clk_prepare(bank->clk); if (err) { dev_err(dev, "failed to prepare clk (%d)\n", err); @@ -1517,6 +1535,28 @@ int stm32_pctl_probe(struct platform_device *pdev) if (!pctl->banks) return -ENOMEM; + i = 0; + for_each_available_child_of_node(np, child) { + struct stm32_gpio_bank *bank = &pctl->banks[i]; + + if (of_property_read_bool(child, "gpio-controller")) { + bank->rstc = of_reset_control_get_exclusive(child, + NULL); + if (PTR_ERR(bank->rstc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + bank->clk = of_clk_get_by_name(child, NULL); + if (IS_ERR(bank->clk)) { + if (PTR_ERR(bank->clk) != -EPROBE_DEFER) + dev_err(dev, + "failed to get clk (%ld)\n", + PTR_ERR(bank->clk)); + return PTR_ERR(bank->clk); + } + i++; + } + } + for_each_available_child_of_node(np, child) { if (of_property_read_bool(child, "gpio-controller")) { ret = stm32_gpiolib_register_bank(pctl, child); diff --git a/drivers/pinctrl/tegra/pinctrl-tegra194.c b/drivers/pinctrl/tegra/pinctrl-tegra194.c index 2e0b5f7bb095..c94ba17243c8 100644 --- a/drivers/pinctrl/tegra/pinctrl-tegra194.c +++ b/drivers/pinctrl/tegra/pinctrl-tegra194.c @@ -98,7 +98,6 @@ static struct tegra_function tegra194_functions[] = { .sfsel_bit = 10, \ .schmitt_bit = schmitt_b, \ .drvtype_bit = 13, \ - .drv_reg = -1, \ .parked_bitmask = 0 #define drive_pex_l5_clkreq_n_pgg0 \ diff --git a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c index b522ca010332..cfb924228d87 100644 --- a/drivers/pinctrl/ti/pinctrl-ti-iodelay.c +++ b/drivers/pinctrl/ti/pinctrl-ti-iodelay.c @@ -2,7 +2,7 @@ * Support for configuration of IO Delay module found on Texas Instruments SoCs * such as DRA7 * - * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015-2017 Texas Instruments Incorporated - https://www.ti.com/ * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index cf072153bdc5..a056031dee81 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -218,6 +218,7 @@ config CROS_EC_TYPEC tristate "ChromeOS EC Type-C Connector Control" depends on MFD_CROS_EC_DEV && TYPEC depends on CROS_USBPD_NOTIFY + depends on USB_ROLE_SWITCH default MFD_CROS_EC_DEV help If you say Y here, you get support for accessing Type C connector diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c index ecfada00e6c5..272c89837d74 100644 --- a/drivers/platform/chrome/cros_ec_debugfs.c +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -242,6 +242,25 @@ static ssize_t cros_ec_pdinfo_read(struct file *file, read_buf, p - read_buf); } +static bool cros_ec_uptime_is_supported(struct cros_ec_device *ec_dev) +{ + struct { + struct cros_ec_command cmd; + struct ec_response_uptime_info resp; + } __packed msg = {}; + int ret; + + msg.cmd.command = EC_CMD_GET_UPTIME_INFO; + msg.cmd.insize = sizeof(msg.resp); + + ret = cros_ec_cmd_xfer_status(ec_dev, &msg.cmd); + if (ret == -EPROTO && msg.cmd.result == EC_RES_INVALID_COMMAND) + return false; + + /* Other errors maybe a transient error, do not rule about support. */ + return true; +} + static ssize_t cros_ec_uptime_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -444,8 +463,9 @@ static int cros_ec_debugfs_probe(struct platform_device *pd) debugfs_create_file("pdinfo", 0444, debug_info->dir, debug_info, &cros_ec_pdinfo_fops); - debugfs_create_file("uptime", 0444, debug_info->dir, debug_info, - &cros_ec_uptime_fops); + if (cros_ec_uptime_is_supported(ec->ec_dev)) + debugfs_create_file("uptime", 0444, debug_info->dir, debug_info, + &cros_ec_uptime_fops); debugfs_create_x32("last_resume_result", 0444, debug_info->dir, &ec->ec_dev->last_resume_result); diff --git a/drivers/platform/chrome/cros_ec_ishtp.c b/drivers/platform/chrome/cros_ec_ishtp.c index ed794a7ddba9..81364029af36 100644 --- a/drivers/platform/chrome/cros_ec_ishtp.c +++ b/drivers/platform/chrome/cros_ec_ishtp.c @@ -681,8 +681,10 @@ static int cros_ec_ishtp_probe(struct ishtp_cl_device *cl_device) /* Register croc_ec_dev mfd */ rv = cros_ec_dev_init(client_data); - if (rv) + if (rv) { + down_write(&init_lock); goto end_cros_ec_dev_init_error; + } return 0; diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 3e745e0fe092..8d52b3b4bd4e 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -208,6 +208,12 @@ static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev, msg->insize = sizeof(*r); ret = send_command(ec_dev, msg); + if (ret >= 0) { + if (msg->result == EC_RES_INVALID_COMMAND) + return -EOPNOTSUPP; + if (msg->result != EC_RES_SUCCESS) + return -EPROTO; + } if (ret > 0) { r = (struct ec_response_host_event_mask *)msg->data; *mask = r->mask; @@ -469,14 +475,33 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev) &ver_mask); ec_dev->host_sleep_v1 = (ret >= 0 && (ver_mask & EC_VER_MASK(1))); - /* - * Get host event wake mask, assume all events are wake events - * if unavailable. - */ + /* Get host event wake mask. */ ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg, &ec_dev->host_event_wake_mask); - if (ret < 0) - ec_dev->host_event_wake_mask = U32_MAX; + if (ret < 0) { + /* + * If the EC doesn't support EC_CMD_HOST_EVENT_GET_WAKE_MASK, + * use a reasonable default. Note that we ignore various + * battery, AC status, and power-state events, because (a) + * those can be quite common (e.g., when sitting at full + * charge, on AC) and (b) these are not actionable wake events; + * if anything, we'd like to continue suspending (to save + * power), not wake up. + */ + ec_dev->host_event_wake_mask = U32_MAX & + ~(BIT(EC_HOST_EVENT_AC_DISCONNECTED) | + BIT(EC_HOST_EVENT_BATTERY_LOW) | + BIT(EC_HOST_EVENT_BATTERY_CRITICAL) | + BIT(EC_HOST_EVENT_PD_MCU) | + BIT(EC_HOST_EVENT_BATTERY_STATUS)); + /* + * Old ECs may not support this command. Complain about all + * other errors. + */ + if (ret != -EOPNOTSUPP) + dev_err(ec_dev->dev, + "failed to retrieve wake mask: %d\n", ret); + } ret = 0; @@ -496,8 +521,8 @@ EXPORT_SYMBOL(cros_ec_query_all); * * Return: 0 on success or negative error code. */ -int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, - struct cros_ec_command *msg) +static int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg) { int ret; @@ -541,7 +566,6 @@ int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev, return ret; } -EXPORT_SYMBOL(cros_ec_cmd_xfer); /** * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC. diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c index 7e8629e3db74..30d0ba3b8889 100644 --- a/drivers/platform/chrome/cros_ec_rpmsg.c +++ b/drivers/platform/chrome/cros_ec_rpmsg.c @@ -38,6 +38,9 @@ struct cros_ec_rpmsg_response { * @rpdev: rpmsg device we are connected to * @xfer_ack: completion for host command transfer. * @host_event_work: Work struct for pending host event. + * @ept: The rpmsg endpoint of this channel. + * @has_pending_host_event: Boolean used to check if there is a pending event. + * @probe_done: Flag to indicate that probe is done. */ struct cros_ec_rpmsg { struct rpmsg_device *rpdev; diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c index 24e48d96ed76..8921f24e83ba 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub_ring.c +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -419,9 +419,7 @@ cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub, * Disable filtering since we might add more jitter * if b is in a random point in time. */ - new_timestamp = fifo_timestamp - - fifo_info->timestamp * 1000 + - in->timestamp * 1000; + new_timestamp = c - b * 1000 + a * 1000; /* * The timestamp can be stale if we had to use the fifo * info timestamp. @@ -675,29 +673,22 @@ done_with_this_batch: * cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then * add to ringbuffer (legacy). * - * Note: This assumes we're running old firmware, where every sample's timestamp - * is after the sample. Run if tight_timestamps == false. - * - * If there is a sample with a proper timestamp + * Note: This assumes we're running old firmware, where timestamp + * is inserted after its sample(s)e. There can be several samples between + * timestamps, so several samples can have the same timestamp. * * timestamp | count * ----------------- - * older_unprocess_out --> TS1 | 1 - * TS1 | 2 - * out --> TS1 | 3 - * next_out --> TS2 | - * - * We spread time for the samples [older_unprocess_out .. out] - * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2]. + * 1st sample --> TS1 | 1 + * TS2 | 2 + * TS2 | 3 + * TS3 | 4 + * last_out --> * - * If we reach the end of the samples, we compare with the - * current timestamp: * - * older_unprocess_out --> TS1 | 1 - * TS1 | 2 - * out --> TS1 | 3 + * We spread time for the samples using perod p = (current - TS1)/4. + * between TS1 and TS2: [TS1+p/4, TS1+2p/4, TS1+3p/4, current_timestamp]. * - * We know have [TS1+1/3, TS1+2/3, current timestamp] */ static void cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub, @@ -710,58 +701,37 @@ cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub, int i; for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) { - s64 older_timestamp; s64 timestamp; - struct cros_ec_sensors_ring_sample *older_unprocess_out = - sensorhub->ring; - struct cros_ec_sensors_ring_sample *next_out; - int count = 1; - - for (out = sensorhub->ring; out < last_out; out = next_out) { - s64 time_period; + int count = 0; + s64 time_period; - next_out = out + 1; + for (out = sensorhub->ring; out < last_out; out++) { if (out->sensor_id != i) continue; /* Timestamp to start with */ - older_timestamp = out->timestamp; - - /* Find next sample. */ - while (next_out < last_out && next_out->sensor_id != i) - next_out++; + timestamp = out->timestamp; + out++; + count = 1; + break; + } + for (; out < last_out; out++) { + /* Find last sample. */ + if (out->sensor_id != i) + continue; + count++; + } + if (count == 0) + continue; - if (next_out >= last_out) { - timestamp = current_timestamp; - } else { - timestamp = next_out->timestamp; - if (timestamp == older_timestamp) { - count++; - continue; - } - } + /* Spread uniformly between the first and last samples. */ + time_period = div_s64(current_timestamp - timestamp, count); - /* - * The next sample has a new timestamp, spread the - * unprocessed samples. - */ - if (next_out < last_out) - count++; - time_period = div_s64(timestamp - older_timestamp, - count); - - for (; older_unprocess_out <= out; - older_unprocess_out++) { - if (older_unprocess_out->sensor_id != i) - continue; - older_timestamp += time_period; - older_unprocess_out->timestamp = - older_timestamp; - } - count = 1; - /* The next_out sample has a valid timestamp, skip. */ - next_out++; - older_unprocess_out = next_out; + for (out = sensorhub->ring; out < last_out; out++) { + if (out->sensor_id != i) + continue; + timestamp += time_period; + out->timestamp = timestamp; } } diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index d09260382550..dfa1f816a45f 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -148,6 +148,10 @@ static int terminate_request(struct cros_ec_device *ec_dev) * receive_n_bytes - receive n bytes from the EC. * * Assumes buf is a pointer into the ec_dev->din buffer + * + * @ec_dev: ChromeOS EC device. + * @buf: Pointer to the buffer receiving the data. + * @n: Number of bytes received. */ static int receive_n_bytes(struct cros_ec_device *ec_dev, u8 *buf, int n) { diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c index 66b8d21092af..3fcd27ec9ad8 100644 --- a/drivers/platform/chrome/cros_ec_typec.c +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -14,9 +14,21 @@ #include <linux/platform_data/cros_usbpd_notify.h> #include <linux/platform_device.h> #include <linux/usb/typec.h> +#include <linux/usb/typec_altmode.h> +#include <linux/usb/typec_dp.h> +#include <linux/usb/typec_mux.h> +#include <linux/usb/typec_tbt.h> +#include <linux/usb/role.h> #define DRV_NAME "cros-ec-typec" +/* Supported alt modes. */ +enum { + CROS_EC_ALTMODE_DP = 0, + CROS_EC_ALTMODE_TBT, + CROS_EC_ALTMODE_MAX, +}; + /* Per port data. */ struct cros_typec_port { struct typec_port *port; @@ -25,6 +37,16 @@ struct cros_typec_port { struct typec_partner *partner; /* Port partner PD identity info. */ struct usb_pd_identity p_identity; + struct typec_switch *ori_sw; + struct typec_mux *mux; + struct usb_role_switch *role_sw; + + /* Variables keeping track of switch state. */ + struct typec_mux_state state; + uint8_t mux_flags; + + /* Port alt modes. */ + struct typec_altmode p_altmode[CROS_EC_ALTMODE_MAX]; }; /* Platform-specific data for the Chrome OS EC Type C controller. */ @@ -32,10 +54,11 @@ struct cros_typec_data { struct device *dev; struct cros_ec_device *ec; int num_ports; - unsigned int cmd_ver; + unsigned int pd_ctrl_ver; /* Array of ports, indexed by port number. */ struct cros_typec_port *ports[EC_USB_PD_MAX_PORTS]; struct notifier_block nb; + struct work_struct port_work; }; static int cros_typec_parse_port_props(struct typec_capability *cap, @@ -84,6 +107,81 @@ static int cros_typec_parse_port_props(struct typec_capability *cap, return 0; } +static int cros_typec_get_switch_handles(struct cros_typec_port *port, + struct fwnode_handle *fwnode, + struct device *dev) +{ + port->mux = fwnode_typec_mux_get(fwnode, NULL); + if (IS_ERR(port->mux)) { + dev_dbg(dev, "Mux handle not found.\n"); + goto mux_err; + } + + port->ori_sw = fwnode_typec_switch_get(fwnode); + if (IS_ERR(port->ori_sw)) { + dev_dbg(dev, "Orientation switch handle not found.\n"); + goto ori_sw_err; + } + + port->role_sw = fwnode_usb_role_switch_get(fwnode); + if (IS_ERR(port->role_sw)) { + dev_dbg(dev, "USB role switch handle not found.\n"); + goto role_sw_err; + } + + return 0; + +role_sw_err: + usb_role_switch_put(port->role_sw); +ori_sw_err: + typec_switch_put(port->ori_sw); +mux_err: + typec_mux_put(port->mux); + + return -ENODEV; +} + +static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num, + bool pd_en) +{ + struct cros_typec_port *port = typec->ports[port_num]; + struct typec_partner_desc p_desc = { + .usb_pd = pd_en, + }; + int ret = 0; + + /* + * Fill an initial PD identity, which will then be updated with info + * from the EC. + */ + p_desc.identity = &port->p_identity; + + port->partner = typec_register_partner(port->port, &p_desc); + if (IS_ERR(port->partner)) { + ret = PTR_ERR(port->partner); + port->partner = NULL; + } + + return ret; +} + +static void cros_typec_remove_partner(struct cros_typec_data *typec, + int port_num) +{ + struct cros_typec_port *port = typec->ports[port_num]; + + port->state.alt = NULL; + port->state.mode = TYPEC_STATE_USB; + port->state.data = NULL; + + usb_role_switch_set_role(port->role_sw, USB_ROLE_NONE); + typec_switch_set(port->ori_sw, TYPEC_ORIENTATION_NONE); + typec_mux_set(port->mux, &port->state); + + typec_unregister_partner(port->partner); + port->partner = NULL; +} + static void cros_unregister_ports(struct cros_typec_data *typec) { int i; @@ -91,10 +189,40 @@ static void cros_unregister_ports(struct cros_typec_data *typec) for (i = 0; i < typec->num_ports; i++) { if (!typec->ports[i]) continue; + cros_typec_remove_partner(typec, i); + usb_role_switch_put(typec->ports[i]->role_sw); + typec_switch_put(typec->ports[i]->ori_sw); + typec_mux_put(typec->ports[i]->mux); typec_unregister_port(typec->ports[i]->port); } } +/* + * Fake the alt mode structs until we actually start registering Type C port + * and partner alt modes. + */ +static void cros_typec_register_port_altmodes(struct cros_typec_data *typec, + int port_num) +{ + struct cros_typec_port *port = typec->ports[port_num]; + + /* All PD capable CrOS devices are assumed to support DP altmode. */ + port->p_altmode[CROS_EC_ALTMODE_DP].svid = USB_TYPEC_DP_SID; + port->p_altmode[CROS_EC_ALTMODE_DP].mode = USB_TYPEC_DP_MODE; + + /* + * Register TBT compatibility alt mode. The EC will not enter the mode + * if it doesn't support it, so it's safe to register it unconditionally + * here for now. + */ + port->p_altmode[CROS_EC_ALTMODE_TBT].svid = USB_TYPEC_TBT_SID; + port->p_altmode[CROS_EC_ALTMODE_TBT].mode = TYPEC_ANY_MODE; + + port->state.alt = NULL; + port->state.mode = TYPEC_STATE_USB; + port->state.data = NULL; +} + static int cros_typec_init_ports(struct cros_typec_data *typec) { struct device *dev = typec->dev; @@ -153,6 +281,13 @@ static int cros_typec_init_ports(struct cros_typec_data *typec) ret = PTR_ERR(cros_port->port); goto unregister_ports; } + + ret = cros_typec_get_switch_handles(cros_port, fwnode, dev); + if (ret) + dev_dbg(dev, "No switch control for port %d\n", + port_num); + + cros_typec_register_port_altmodes(typec, port_num); } return 0; @@ -193,30 +328,6 @@ static int cros_typec_ec_command(struct cros_typec_data *typec, return ret; } -static int cros_typec_add_partner(struct cros_typec_data *typec, int port_num, - bool pd_en) -{ - struct cros_typec_port *port = typec->ports[port_num]; - struct typec_partner_desc p_desc = { - .usb_pd = pd_en, - }; - int ret = 0; - - /* - * Fill an initial PD identity, which will then be updated with info - * from the EC. - */ - p_desc.identity = &port->p_identity; - - port->partner = typec_register_partner(port->port, &p_desc); - if (IS_ERR(port->partner)) { - ret = PTR_ERR(port->partner); - port->partner = NULL; - } - - return ret; -} - static void cros_typec_set_port_params_v0(struct cros_typec_data *typec, int port_num, struct ec_response_usb_pd_control *resp) { @@ -270,16 +381,166 @@ static void cros_typec_set_port_params_v1(struct cros_typec_data *typec, } else { if (!typec->ports[port_num]->partner) return; + cros_typec_remove_partner(typec, port_num); + } +} - typec_unregister_partner(typec->ports[port_num]->partner); - typec->ports[port_num]->partner = NULL; +static int cros_typec_get_mux_info(struct cros_typec_data *typec, int port_num, + struct ec_response_usb_pd_mux_info *resp) +{ + struct ec_params_usb_pd_mux_info req = { + .port = port_num, + }; + + return cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_MUX_INFO, &req, + sizeof(req), resp, sizeof(*resp)); +} + +static int cros_typec_usb_safe_state(struct cros_typec_port *port) +{ + port->state.mode = TYPEC_STATE_SAFE; + + return typec_mux_set(port->mux, &port->state); +} + +/* + * Spoof the VDOs that were likely communicated by the partner for TBT alt + * mode. + */ +static int cros_typec_enable_tbt(struct cros_typec_data *typec, + int port_num, + struct ec_response_usb_pd_control_v2 *pd_ctrl) +{ + struct cros_typec_port *port = typec->ports[port_num]; + struct typec_thunderbolt_data data; + int ret; + + if (typec->pd_ctrl_ver < 2) { + dev_err(typec->dev, + "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver); + return -ENOTSUPP; + } + + /* Device Discover Mode VDO */ + data.device_mode = TBT_MODE; + + if (pd_ctrl->control_flags & USB_PD_CTRL_TBT_LEGACY_ADAPTER) + data.device_mode = TBT_SET_ADAPTER(TBT_ADAPTER_TBT3); + + /* Cable Discover Mode VDO */ + data.cable_mode = TBT_MODE; + data.cable_mode |= TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed); + + if (pd_ctrl->control_flags & USB_PD_CTRL_OPTICAL_CABLE) + data.cable_mode |= TBT_CABLE_OPTICAL; + + if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_LINK_UNIDIR) + data.cable_mode |= TBT_CABLE_LINK_TRAINING; + + if (pd_ctrl->cable_gen) + data.cable_mode |= TBT_CABLE_ROUNDED; + + /* Enter Mode VDO */ + data.enter_vdo = TBT_SET_CABLE_SPEED(pd_ctrl->cable_speed); + + if (pd_ctrl->control_flags & USB_PD_CTRL_ACTIVE_CABLE) + data.enter_vdo |= TBT_ENTER_MODE_ACTIVE_CABLE; + + if (!port->state.alt) { + port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_TBT]; + ret = cros_typec_usb_safe_state(port); + if (ret) + return ret; + } + + port->state.data = &data; + port->state.mode = TYPEC_TBT_MODE; + + return typec_mux_set(port->mux, &port->state); +} + +/* Spoof the VDOs that were likely communicated by the partner. */ +static int cros_typec_enable_dp(struct cros_typec_data *typec, + int port_num, + struct ec_response_usb_pd_control_v2 *pd_ctrl) +{ + struct cros_typec_port *port = typec->ports[port_num]; + struct typec_displayport_data dp_data; + int ret; + + if (typec->pd_ctrl_ver < 2) { + dev_err(typec->dev, + "PD_CTRL version too old: %d\n", typec->pd_ctrl_ver); + return -ENOTSUPP; + } + + /* Status VDO. */ + dp_data.status = DP_STATUS_ENABLED; + if (port->mux_flags & USB_PD_MUX_HPD_IRQ) + dp_data.status |= DP_STATUS_IRQ_HPD; + if (port->mux_flags & USB_PD_MUX_HPD_LVL) + dp_data.status |= DP_STATUS_HPD_STATE; + + /* Configuration VDO. */ + dp_data.conf = DP_CONF_SET_PIN_ASSIGN(pd_ctrl->dp_mode); + if (!port->state.alt) { + port->state.alt = &port->p_altmode[CROS_EC_ALTMODE_DP]; + ret = cros_typec_usb_safe_state(port); + if (ret) + return ret; } + + port->state.data = &dp_data; + port->state.mode = TYPEC_MODAL_STATE(ffs(pd_ctrl->dp_mode)); + + return typec_mux_set(port->mux, &port->state); +} + +static int cros_typec_configure_mux(struct cros_typec_data *typec, int port_num, + uint8_t mux_flags, + struct ec_response_usb_pd_control_v2 *pd_ctrl) +{ + struct cros_typec_port *port = typec->ports[port_num]; + enum typec_orientation orientation; + int ret; + + if (!port->partner) + return 0; + + if (mux_flags & USB_PD_MUX_POLARITY_INVERTED) + orientation = TYPEC_ORIENTATION_REVERSE; + else + orientation = TYPEC_ORIENTATION_NORMAL; + + ret = typec_switch_set(port->ori_sw, orientation); + if (ret) + return ret; + + if (mux_flags & USB_PD_MUX_TBT_COMPAT_ENABLED) { + ret = cros_typec_enable_tbt(typec, port_num, pd_ctrl); + } else if (mux_flags & USB_PD_MUX_DP_ENABLED) { + ret = cros_typec_enable_dp(typec, port_num, pd_ctrl); + } else if (mux_flags & USB_PD_MUX_SAFE_MODE) { + ret = cros_typec_usb_safe_state(port); + } else if (mux_flags & USB_PD_MUX_USB_ENABLED) { + port->state.alt = NULL; + port->state.mode = TYPEC_STATE_USB; + ret = typec_mux_set(port->mux, &port->state); + } else { + dev_info(typec->dev, + "Unsupported mode requested, mux flags: %x\n", + mux_flags); + ret = -ENOTSUPP; + } + + return ret; } static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) { struct ec_params_usb_pd_control req; - struct ec_response_usb_pd_control_v1 resp; + struct ec_response_usb_pd_control_v2 resp; + struct ec_response_usb_pd_mux_info mux_resp; int ret; if (port_num < 0 || port_num >= typec->num_ports) { @@ -293,7 +554,7 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) req.mux = USB_PD_CTRL_MUX_NO_CHANGE; req.swap = USB_PD_CTRL_SWAP_NONE; - ret = cros_typec_ec_command(typec, typec->cmd_ver, + ret = cros_typec_ec_command(typec, typec->pd_ctrl_ver, EC_CMD_USB_PD_CONTROL, &req, sizeof(req), &resp, sizeof(resp)); if (ret < 0) @@ -304,13 +565,33 @@ static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity); dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state); - if (typec->cmd_ver == 1) - cros_typec_set_port_params_v1(typec, port_num, &resp); + if (typec->pd_ctrl_ver != 0) + cros_typec_set_port_params_v1(typec, port_num, + (struct ec_response_usb_pd_control_v1 *)&resp); else cros_typec_set_port_params_v0(typec, port_num, (struct ec_response_usb_pd_control *) &resp); - return 0; + /* Update the switches if they exist, according to requested state */ + ret = cros_typec_get_mux_info(typec, port_num, &mux_resp); + if (ret < 0) { + dev_warn(typec->dev, + "Failed to get mux info for port: %d, err = %d\n", + port_num, ret); + return 0; + } + + /* No change needs to be made, let's exit early. */ + if (typec->ports[port_num]->mux_flags == mux_resp.flags) + return 0; + + typec->ports[port_num]->mux_flags = mux_resp.flags; + ret = cros_typec_configure_mux(typec, port_num, mux_resp.flags, &resp); + if (ret) + dev_warn(typec->dev, "Configure muxes failed, err = %d\n", ret); + + return usb_role_switch_set_role(typec->ports[port_num]->role_sw, + !!(resp.role & PD_CTRL_RESP_ROLE_DATA)); } static int cros_typec_get_cmd_version(struct cros_typec_data *typec) @@ -327,22 +608,22 @@ static int cros_typec_get_cmd_version(struct cros_typec_data *typec) if (ret < 0) return ret; - if (resp.version_mask & EC_VER_MASK(1)) - typec->cmd_ver = 1; + if (resp.version_mask & EC_VER_MASK(2)) + typec->pd_ctrl_ver = 2; + else if (resp.version_mask & EC_VER_MASK(1)) + typec->pd_ctrl_ver = 1; else - typec->cmd_ver = 0; + typec->pd_ctrl_ver = 0; dev_dbg(typec->dev, "PD Control has version mask 0x%hhx\n", - typec->cmd_ver); + typec->pd_ctrl_ver); return 0; } -static int cros_ec_typec_event(struct notifier_block *nb, - unsigned long host_event, void *_notify) +static void cros_typec_port_work(struct work_struct *work) { - struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, - nb); + struct cros_typec_data *typec = container_of(work, struct cros_typec_data, port_work); int ret, i; for (i = 0; i < typec->num_ports; i++) { @@ -350,6 +631,14 @@ static int cros_ec_typec_event(struct notifier_block *nb, if (ret < 0) dev_warn(typec->dev, "Update failed for port: %d\n", i); } +} + +static int cros_ec_typec_event(struct notifier_block *nb, + unsigned long host_event, void *_notify) +{ + struct cros_typec_data *typec = container_of(nb, struct cros_typec_data, nb); + + schedule_work(&typec->port_work); return NOTIFY_OK; } @@ -408,6 +697,12 @@ static int cros_typec_probe(struct platform_device *pdev) if (ret < 0) return ret; + INIT_WORK(&typec->port_work, cros_typec_port_work); + + /* + * Safe to call port update here, since we haven't registered the + * PD notifier yet. + */ for (i = 0; i < typec->num_ports; i++) { ret = cros_typec_port_update(typec, i); if (ret < 0) @@ -426,11 +721,35 @@ unregister_ports: return ret; } +static int __maybe_unused cros_typec_suspend(struct device *dev) +{ + struct cros_typec_data *typec = dev_get_drvdata(dev); + + cancel_work_sync(&typec->port_work); + + return 0; +} + +static int __maybe_unused cros_typec_resume(struct device *dev) +{ + struct cros_typec_data *typec = dev_get_drvdata(dev); + + /* Refresh port state. */ + schedule_work(&typec->port_work); + + return 0; +} + +static const struct dev_pm_ops cros_typec_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(cros_typec_suspend, cros_typec_resume) +}; + static struct platform_driver cros_typec_driver = { .driver = { .name = DRV_NAME, .acpi_match_table = ACPI_PTR(cros_typec_acpi_id), .of_match_table = of_match_ptr(cros_typec_of_match), + .pm = &cros_typec_pm_ops, }, .probe = cros_typec_probe, }; diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 5739a9669b29..bbc4e71a16ff 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -625,7 +625,10 @@ static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring, vdev_id = VIRTIO_ID_NET; hdr_len = sizeof(struct virtio_net_hdr); config = &fifo->vdev[vdev_id]->config.net; - if (ntohs(hdr.len) > config->mtu + + /* A legacy-only interface for now. */ + if (ntohs(hdr.len) > + __virtio16_to_cpu(virtio_legacy_is_little_endian(), + config->mtu) + MLXBF_TMFIFO_NET_L2_OVERHEAD) return; } else { @@ -1231,8 +1234,12 @@ static int mlxbf_tmfifo_probe(struct platform_device *pdev) /* Create the network vdev. */ memset(&net_config, 0, sizeof(net_config)); - net_config.mtu = ETH_DATA_LEN; - net_config.status = VIRTIO_NET_S_LINK_UP; + + /* A legacy-only interface for now. */ + net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(), + ETH_DATA_LEN); + net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(), + VIRTIO_NET_S_LINK_UP); mlxbf_tmfifo_get_cfg_mac(net_config.mac); rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET, MLXBF_TMFIFO_NET_FEATURES, &net_config, diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 90bc7969b199..8cf8c1be2666 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -186,7 +186,9 @@ #define MLXPLAT_CPLD_WD_RESET_ACT_MASK GENMASK(7, 1) #define MLXPLAT_CPLD_WD_FAN_ACT_MASK (GENMASK(7, 0) & ~BIT(4)) #define MLXPLAT_CPLD_WD_COUNT_ACT_MASK (GENMASK(7, 0) & ~BIT(7)) +#define MLXPLAT_CPLD_WD_CPBLTY_MASK (GENMASK(7, 0) & ~BIT(6)) #define MLXPLAT_CPLD_WD_DFLT_TIMEOUT 30 +#define MLXPLAT_CPLD_WD3_DFLT_TIMEOUT 600 #define MLXPLAT_CPLD_WD_MAX_DEVS 2 /* mlxplat_priv - platform private data @@ -2084,6 +2086,84 @@ static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type2[] = { }, }; +/* Watchdog type3: hardware implementation version 3 + * Can be on all systems. It's differentiated by WD capability bit. + * Old systems (MSN2700, MSN2410, MSN2740, MSN2100 and MSN2140) + * still have only one main watchdog. + */ +static struct mlxreg_core_data mlxplat_mlxcpld_wd_main_regs_type3[] = { + { + .label = "action", + .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, + .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK, + .bit = 0, + }, + { + .label = "timeout", + .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET, + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, + .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT, + }, + { + .label = "timeleft", + .reg = MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET, + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, + }, + { + .label = "ping", + .reg = MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, + .mask = MLXPLAT_CPLD_WD_RESET_ACT_MASK, + .bit = 0, + }, + { + .label = "reset", + .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .bit = 6, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_wd_aux_regs_type3[] = { + { + .label = "action", + .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, + .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK, + .bit = 4, + }, + { + .label = "timeout", + .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET, + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, + .health_cntr = MLXPLAT_CPLD_WD3_DFLT_TIMEOUT, + }, + { + .label = "timeleft", + .reg = MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET, + .mask = MLXPLAT_CPLD_WD_TYPE2_TO_MASK, + }, + { + .label = "ping", + .reg = MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, + .mask = MLXPLAT_CPLD_WD_FAN_ACT_MASK, + .bit = 4, + }, +}; + +static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type3[] = { + { + .data = mlxplat_mlxcpld_wd_main_regs_type3, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_main_regs_type3), + .version = MLX_WDT_TYPE3, + .identity = "mlx-wdt-main", + }, + { + .data = mlxplat_mlxcpld_wd_aux_regs_type3, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_wd_aux_regs_type3), + .version = MLX_WDT_TYPE3, + .identity = "mlx-wdt-aux", + }, +}; + static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -2114,8 +2194,10 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET: case MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET: case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET: + case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET: case MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET: case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET: + case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET: case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET: case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET: @@ -2742,6 +2824,27 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) return 0; } +static int mlxplat_mlxcpld_check_wd_capability(void *regmap) +{ + u32 regval; + int i, rc; + + rc = regmap_read(regmap, MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + ®val); + if (rc) + return rc; + + if (!(regval & ~MLXPLAT_CPLD_WD_CPBLTY_MASK)) { + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type3); i++) { + if (mlxplat_wd_data[i]) + mlxplat_wd_data[i] = + &mlxplat_mlxcpld_wd_set_type3[i]; + } + } + + return 0; +} + static int __init mlxplat_init(void) { struct mlxplat_priv *priv; @@ -2874,6 +2977,9 @@ static int __init mlxplat_init(void) } /* Add WD drivers. */ + err = mlxplat_mlxcpld_check_wd_capability(priv->regmap); + if (err) + goto fail_platform_wd_register; for (j = 0; j < MLXPLAT_CPLD_WD_MAX_DEVS; j++) { if (mlxplat_wd_data[j]) { mlxplat_wd_data[j]->regmap = priv->regmap; diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index f07b982c8dff..0a1fb5c74f83 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -99,6 +99,17 @@ config POWER_RESET_HISI help Reboot support for Hisilicon boards. +config POWER_RESET_LINKSTATION + tristate "Buffalo LinkStation power-off driver" + depends on ARCH_MVEBU || COMPILE_TEST + depends on OF_MDIO && PHYLIB + help + This driver supports turning off some Buffalo LinkStations by + setting an output pin at the ethernet PHY to the correct state. + It also makes the device compatible with the WoL function. + + Say Y here if you have a Buffalo LinkStation LS421D/E. + config POWER_RESET_MSM bool "Qualcomm MSM power-off driver" depends on ARCH_QCOM diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile index 5710ca469517..c51eceba9ea3 100644 --- a/drivers/power/reset/Makefile +++ b/drivers/power/reset/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_POWER_RESET_GEMINI_POWEROFF) += gemini-poweroff.o obj-$(CONFIG_POWER_RESET_GPIO) += gpio-poweroff.o obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o +obj-${CONFIG_POWER_RESET_LINKSTATION} += linkstation-poweroff.o obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o obj-$(CONFIG_POWER_RESET_MT6323) += mt6323-poweroff.o obj-$(CONFIG_POWER_RESET_OXNAS) += oxnas-restart.o diff --git a/drivers/power/reset/keystone-reset.c b/drivers/power/reset/keystone-reset.c index ad11faae19c5..211eeef0c81a 100644 --- a/drivers/power/reset/keystone-reset.c +++ b/drivers/power/reset/keystone-reset.c @@ -2,7 +2,7 @@ /* * TI keystone reboot driver * - * Copyright (C) 2014 Texas Instruments Incorporated. http://www.ti.com/ + * Copyright (C) 2014 Texas Instruments Incorporated. https://www.ti.com/ * * Author: Ivan Khoronzhuk <ivan.khoronzhuk@ti.com> */ diff --git a/drivers/power/reset/linkstation-poweroff.c b/drivers/power/reset/linkstation-poweroff.c new file mode 100644 index 000000000000..39e89baedb5f --- /dev/null +++ b/drivers/power/reset/linkstation-poweroff.c @@ -0,0 +1,136 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LinkStation power off restart driver + * Copyright (C) 2020 Daniel González Cabanelas <dgcbueu@gmail.com> + */ + +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/of.h> +#include <linux/of_mdio.h> +#include <linux/of_platform.h> +#include <linux/reboot.h> +#include <linux/phy.h> + +/* Defines from the eth phy Marvell driver */ +#define MII_MARVELL_COPPER_PAGE 0 +#define MII_MARVELL_LED_PAGE 3 +#define MII_MARVELL_WOL_PAGE 17 +#define MII_MARVELL_PHY_PAGE 22 + +#define MII_PHY_LED_CTRL 16 +#define MII_88E1318S_PHY_LED_TCR 18 +#define MII_88E1318S_PHY_WOL_CTRL 16 +#define MII_M1011_IEVENT 19 + +#define MII_88E1318S_PHY_LED_TCR_INTn_ENABLE BIT(7) +#define MII_88E1318S_PHY_LED_TCR_FORCE_INT BIT(15) +#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12) +#define LED2_FORCE_ON (0x8 << 8) +#define LEDMASK GENMASK(11,8) + +static struct phy_device *phydev; + +static void mvphy_reg_intn(u16 data) +{ + int rc = 0, saved_page; + + saved_page = phy_select_page(phydev, MII_MARVELL_LED_PAGE); + if (saved_page < 0) + goto err; + + /* Force manual LED2 control to let INTn work */ + __phy_modify(phydev, MII_PHY_LED_CTRL, LEDMASK, LED2_FORCE_ON); + + /* Set the LED[2]/INTn pin to the required state */ + __phy_modify(phydev, MII_88E1318S_PHY_LED_TCR, + MII_88E1318S_PHY_LED_TCR_FORCE_INT, + MII_88E1318S_PHY_LED_TCR_INTn_ENABLE | data); + + if (!data) { + /* Clear interrupts to ensure INTn won't be holded in high state */ + __phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_MARVELL_COPPER_PAGE); + __phy_read(phydev, MII_M1011_IEVENT); + + /* If WOL was enabled and a magic packet was received before powering + * off, we won't be able to wake up by sending another magic packet. + * Clear WOL status. + */ + __phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_MARVELL_WOL_PAGE); + __phy_set_bits(phydev, MII_88E1318S_PHY_WOL_CTRL, + MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS); + } +err: + rc = phy_restore_page(phydev, saved_page, rc); + if (rc < 0) + dev_err(&phydev->mdio.dev, "Write register failed, %d\n", rc); +} + +static int linkstation_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + if (action == SYS_RESTART) + mvphy_reg_intn(MII_88E1318S_PHY_LED_TCR_FORCE_INT); + + return NOTIFY_DONE; +} + +static struct notifier_block linkstation_reboot_nb = { + .notifier_call = linkstation_reboot_notifier, +}; + +static void linkstation_poweroff(void) +{ + unregister_reboot_notifier(&linkstation_reboot_nb); + mvphy_reg_intn(0); + + kernel_restart("Power off"); +} + +static const struct of_device_id ls_poweroff_of_match[] = { + { .compatible = "buffalo,ls421d" }, + { .compatible = "buffalo,ls421de" }, + { }, +}; + +static int __init linkstation_poweroff_init(void) +{ + struct mii_bus *bus; + struct device_node *dn; + + dn = of_find_matching_node(NULL, ls_poweroff_of_match); + if (!dn) + return -ENODEV; + of_node_put(dn); + + dn = of_find_node_by_name(NULL, "mdio"); + if (!dn) + return -ENODEV; + + bus = of_mdio_find_bus(dn); + of_node_put(dn); + if (!bus) + return -EPROBE_DEFER; + + phydev = phy_find_first(bus); + if (!phydev) + return -EPROBE_DEFER; + + register_reboot_notifier(&linkstation_reboot_nb); + pm_power_off = linkstation_poweroff; + + return 0; +} + +static void __exit linkstation_poweroff_exit(void) +{ + pm_power_off = NULL; + unregister_reboot_notifier(&linkstation_reboot_nb); +} + +module_init(linkstation_poweroff_init); +module_exit(linkstation_poweroff_exit); + +MODULE_AUTHOR("Daniel González Cabanelas <dgcbueu@gmail.com>"); +MODULE_DESCRIPTION("LinkStation power off driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/88pm860x_battery.c b/drivers/power/supply/88pm860x_battery.c index 1308f3a185f3..590da88a17a2 100644 --- a/drivers/power/supply/88pm860x_battery.c +++ b/drivers/power/supply/88pm860x_battery.c @@ -433,7 +433,7 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) int ret; int data; int bat_remove; - int soc; + int soc = 0; /* measure enable on GPADC1 */ data = MEAS1_GP1; @@ -496,7 +496,9 @@ static void pm860x_init_battery(struct pm860x_battery_info *info) } mutex_unlock(&info->lock); - calc_soc(info, OCV_MODE_ACTIVE, &soc); + ret = calc_soc(info, OCV_MODE_ACTIVE, &soc); + if (ret < 0) + goto out; data = pm860x_reg_read(info->i2c, PM8607_POWER_UP_LOG); bat_remove = data & BAT_WU_LOG; diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 44d3c8512fb8..faf2830aa152 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -610,6 +610,19 @@ config CHARGER_BQ24735 help Say Y to enable support for the TI BQ24735 battery charger. +config CHARGER_BQ2515X + tristate "TI BQ2515X battery charger family" + depends on I2C + depends on GPIOLIB || COMPILE_TEST + select REGMAP_I2C + help + Say Y to enable support for the TI BQ2515X family of battery + charging integrated circuits. The BQ2515X are highly integrated + battery charge management ICs that integrate the most common + functions for wearable devices, namely a charger, an output voltage + rail, ADC for battery and system monitoring, and push-button + controller. + config CHARGER_BQ25890 tristate "TI BQ25890 battery charger driver" depends on I2C diff --git a/drivers/power/supply/Makefile b/drivers/power/supply/Makefile index b9644663e435..b3c694a65114 100644 --- a/drivers/power/supply/Makefile +++ b/drivers/power/supply/Makefile @@ -82,6 +82,7 @@ obj-$(CONFIG_CHARGER_BQ2415X) += bq2415x_charger.o obj-$(CONFIG_CHARGER_BQ24190) += bq24190_charger.o obj-$(CONFIG_CHARGER_BQ24257) += bq24257_charger.o obj-$(CONFIG_CHARGER_BQ24735) += bq24735-charger.o +obj-$(CONFIG_CHARGER_BQ2515X) += bq2515x_charger.o obj-$(CONFIG_CHARGER_BQ25890) += bq25890_charger.o obj-$(CONFIG_CHARGER_SMB347) += smb347-charger.o obj-$(CONFIG_CHARGER_TPS65090) += tps65090-charger.o diff --git a/drivers/power/supply/axp20x_usb_power.c b/drivers/power/supply/axp20x_usb_power.c index 4fde24b5f35a..d01dc0332edc 100644 --- a/drivers/power/supply/axp20x_usb_power.c +++ b/drivers/power/supply/axp20x_usb_power.c @@ -78,7 +78,7 @@ static bool axp20x_usb_vbus_needs_polling(struct axp20x_usb_power *power) /* * Polling is only necessary while VBUS is offline. While online, a * present->absent transition implies an online->offline transition - * and will triger the VBUS_REMOVAL IRQ. + * and will trigger the VBUS_REMOVAL IRQ. */ if (power->axp20x_id >= AXP221_ID && !power->online) return true; diff --git a/drivers/power/supply/bq2415x_charger.c b/drivers/power/supply/bq2415x_charger.c index a1f00ae1c180..5724001e66b9 100644 --- a/drivers/power/supply/bq2415x_charger.c +++ b/drivers/power/supply/bq2415x_charger.c @@ -5,14 +5,14 @@ * Copyright (C) 2011-2013 Pali Rohár <pali@kernel.org> * * Datasheets: - * http://www.ti.com/product/bq24150 - * http://www.ti.com/product/bq24150a - * http://www.ti.com/product/bq24152 - * http://www.ti.com/product/bq24153 - * http://www.ti.com/product/bq24153a - * http://www.ti.com/product/bq24155 - * http://www.ti.com/product/bq24157s - * http://www.ti.com/product/bq24158 + * https://www.ti.com/product/bq24150 + * https://www.ti.com/product/bq24150a + * https://www.ti.com/product/bq24152 + * https://www.ti.com/product/bq24153 + * https://www.ti.com/product/bq24153a + * https://www.ti.com/product/bq24155 + * https://www.ti.com/product/bq24157s + * https://www.ti.com/product/bq24158 */ #include <linux/kernel.h> diff --git a/drivers/power/supply/bq24190_charger.c b/drivers/power/supply/bq24190_charger.c index 4540e913057f..d14186525e1e 100644 --- a/drivers/power/supply/bq24190_charger.c +++ b/drivers/power/supply/bq24190_charger.c @@ -481,8 +481,10 @@ static ssize_t bq24190_sysfs_store(struct device *dev, return ret; ret = pm_runtime_get_sync(bdi->dev); - if (ret < 0) + if (ret < 0) { + pm_runtime_put_noidle(bdi->dev); return ret; + } ret = bq24190_write_mask(bdi, info->reg, info->mask, info->shift, v); if (ret) diff --git a/drivers/power/supply/bq24257_charger.c b/drivers/power/supply/bq24257_charger.c index eb151687beb3..8e60cb0f3c3f 100644 --- a/drivers/power/supply/bq24257_charger.c +++ b/drivers/power/supply/bq24257_charger.c @@ -5,9 +5,9 @@ * Copyright (C) 2015 Intel Corporation * * Datasheets: - * http://www.ti.com/product/bq24250 - * http://www.ti.com/product/bq24251 - * http://www.ti.com/product/bq24257 + * https://www.ti.com/product/bq24250 + * https://www.ti.com/product/bq24251 + * https://www.ti.com/product/bq24257 */ #include <linux/module.h> diff --git a/drivers/power/supply/bq2515x_charger.c b/drivers/power/supply/bq2515x_charger.c new file mode 100644 index 000000000000..36b0c8c98d40 --- /dev/null +++ b/drivers/power/supply/bq2515x_charger.c @@ -0,0 +1,1169 @@ +// SPDX-License-Identifier: GPL-2.0 +// BQ2515X Battery Charger Driver +// Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/gpio/consumer.h> +#include <linux/power_supply.h> +#include <linux/regmap.h> +#include <linux/types.h> + +#define BQ2515X_MANUFACTURER "Texas Instruments" + +#define BQ2515X_STAT0 0x00 +#define BQ2515X_STAT1 0x01 +#define BQ2515X_STAT2 0x02 +#define BQ2515X_FLAG0 0x03 +#define BQ2515X_FLAG1 0x04 +#define BQ2515X_FLAG2 0x05 +#define BQ2515X_FLAG3 0x06 +#define BQ2515X_MASK0 0x07 +#define BQ2515X_MASK1 0x08 +#define BQ2515X_MASK2 0x09 +#define BQ2515X_MASK3 0x0a +#define BQ2515X_VBAT_CTRL 0x12 +#define BQ2515X_ICHG_CTRL 0x13 +#define BQ2515X_PCHRGCTRL 0x14 +#define BQ2515X_TERMCTRL 0x15 +#define BQ2515X_BUVLO 0x16 +#define BQ2515X_CHARGERCTRL0 0x17 +#define BQ2515X_CHARGERCTRL1 0x18 +#define BQ2515X_ILIMCTRL 0x19 +#define BQ2515X_LDOCTRL 0x1d +#define BQ2515X_MRCTRL 0x30 +#define BQ2515X_ICCTRL0 0x35 +#define BQ2515X_ICCTRL1 0x36 +#define BQ2515X_ICCTRL2 0x37 +#define BQ2515X_ADCCTRL0 0x40 +#define BQ2515X_ADCCTRL1 0x41 +#define BQ2515X_ADC_VBAT_M 0x42 +#define BQ2515X_ADC_VBAT_L 0x43 +#define BQ2515X_ADC_TS_M 0x44 +#define BQ2515X_ADC_TS_L 0x45 +#define BQ2515X_ADC_ICHG_M 0x46 +#define BQ2515X_ADC_ICHG_L 0x47 +#define BQ2515X_ADC_ADCIN_M 0x48 +#define BQ2515X_ADC_ADCIN_L 0x49 +#define BQ2515X_ADC_VIN_M 0x4a +#define BQ2515X_ADC_VIN_L 0x4b +#define BQ2515X_ADC_PMID_M 0x4c +#define BQ2515X_ADC_PMID_L 0x4d +#define BQ2515X_ADC_IIN_M 0x4e +#define BQ2515X_ADC_IIN_L 0x4f +#define BQ2515X_ADC_COMP1_M 0x52 +#define BQ2515X_ADC_COMP1_L 0X53 +#define BQ2515X_ADC_COMP2_M 0X54 +#define BQ2515X_ADC_COMP2_L 0x55 +#define BQ2515X_ADC_COMP3_M 0x56 +#define BQ2515X_ADC_COMP3_L 0x57 +#define BQ2515X_ADC_READ_EN 0x58 +#define BQ2515X_TS_FASTCHGCTRL 0x61 +#define BQ2515X_TS_COLD 0x62 +#define BQ2515X_TS_COOL 0x63 +#define BQ2515X_TS_WARM 0x64 +#define BQ2515X_TS_HOT 0x65 +#define BQ2515X_DEVICE_ID 0x6f + +#define BQ2515X_DEFAULT_ICHG_UA 10000 +#define BQ25150_DEFAULT_ILIM_UA 100000 +#define BQ25155_DEFAULT_ILIM_UA 500000 +#define BQ2515X_DEFAULT_VBAT_REG_UV 4200000 +#define BQ2515X_DEFAULT_IPRECHARGE_UA 2500 + +#define BQ2515X_DIVISOR 65536 +#define BQ2515X_VBAT_BASE_VOLT 3600000 +#define BQ2515X_VBAT_REG_MAX 4600000 +#define BQ2515X_VBAT_REG_MIN 3600000 +#define BQ2515X_VBAT_STEP_UV 10000 +#define BQ2515X_UV_FACTOR 1000000 +#define BQ2515X_VBAT_MULTIPLIER 6 +#define BQ2515X_ICHG_DIVISOR 52429 +#define BQ2515X_ICHG_CURR_STEP_THRESH_UA 318750 +#define BQ2515X_ICHG_MIN_UA 0 +#define BQ2515X_ICHG_MAX_UA 500000 +#define BQ2515X_ICHG_RNG_1B0_UA 1250 +#define BQ2515X_ICHG_RNG_1B1_UA 2500 +#define BQ2515X_VLOWV_SEL_1B0_UV 3000000 +#define BQ2515X_VLOWV_SEL_1B1_UV 2800000 +#define BQ2515X_PRECHRG_ICHRG_RNGE_1875_UA 18750 +#define BQ2515X_PRECHRG_ICHRG_RNGE_3750_UA 37500 +#define BQ2515X_TWAKE2_MIN_US 1700000 +#define BQ2515X_TWAKE2_MAX_US 2300000 + +#define BQ2515X_ILIM_150MA 0x2 +#define BQ2515X_ILIM_MASK 0x7 +#define BQ2515X_ILIM_MIN 50000 +#define BQ2515X_ILIM_MAX 600000 +#define BQ2515X_HEALTH_MASK 0xf +#define BQ2515X_ICHGRNG_MASK 0x80 +#define BQ2515X_STAT0_MASK 0x0f +#define BQ2515X_STAT1_MASK 0x1f +#define BQ2515X_PRECHARGE_MASK 0x1f + +#define BQ2515X_TS_HOT_STAT BIT(0) +#define BQ2515X_TS_WARM_STAT BIT(1) +#define BQ2515X_TS_COOL_STAT BIT(2) +#define BQ2515X_TS_COLD_STAT BIT(3) +#define BQ2515X_SAFETY_TIMER_EXP BIT(5) + +#define BQ2515X_EN_VBAT_READ BIT(3) +#define BQ2515X_EN_ICHG_READ BIT(5) + +#define BQ2515X_VIN_GOOD BIT(0) +#define BQ2515X_CHRG_DONE BIT(5) +#define BQ2515X_CV_CHRG_MODE BIT(6) + +#define BQ2515X_VIN_OVP_FAULT_STAT BIT(7) + +#define BQ2515X_WATCHDOG_DISABLE BIT(4) + +#define BQ2515X_ICHARGE_RANGE BIT(7) + +#define BQ2515X_VLOWV_SEL BIT(5) + +#define BQ2515X_CHARGER_DISABLE BIT(0) + +#define BQ2515X_HWRESET_14S_WD BIT(1) + +static const int bq2515x_ilim_lvl_values[] = { + 50000, 100000, 150000, 200000, 300000, 400000, 500000, 600000 +}; + +/** + * struct bq2515x_init_data - + * @ilim: input current limit + * @ichg: fast charge current + * @vbatreg: battery regulation voltage + * @iprechg: precharge current + */ +struct bq2515x_init_data { + int ilim; + int ichg; + int vbatreg; + int iprechg; +}; + +enum bq2515x_id { + BQ25150, + BQ25155, +}; + +/** + * struct bq2515x_device - + * @mains: mains properties + * @battery: battery properties + * @regmap: register map structure + * @dev: device structure + * + * @reset_gpio: manual reset (MR) pin + * @powerdown_gpio: low power mode pin + * @ac_detect_gpio: power good (PG) pin + * @ce_gpio: charge enable (CE) pin + * + * @model_name: string value describing device model + * @device_id: value of device_id + * @mains_online: boolean value indicating power supply online + * + * @bq2515x_init_data init_data: charger initialization data structure + */ +struct bq2515x_device { + struct power_supply *mains; + struct power_supply *battery; + struct regmap *regmap; + struct device *dev; + + struct gpio_desc *reset_gpio; + struct gpio_desc *powerdown_gpio; + struct gpio_desc *ac_detect_gpio; + struct gpio_desc *ce_gpio; + + char model_name[I2C_NAME_SIZE]; + int device_id; + bool mains_online; + + struct bq2515x_init_data init_data; +}; + +static struct reg_default bq25150_reg_defaults[] = { + {BQ2515X_FLAG0, 0x0}, + {BQ2515X_FLAG1, 0x0}, + {BQ2515X_FLAG2, 0x0}, + {BQ2515X_FLAG3, 0x0}, + {BQ2515X_MASK0, 0x0}, + {BQ2515X_MASK1, 0x0}, + {BQ2515X_MASK2, 0x71}, + {BQ2515X_MASK3, 0x0}, + {BQ2515X_VBAT_CTRL, 0x3C}, + {BQ2515X_ICHG_CTRL, 0x8}, + {BQ2515X_PCHRGCTRL, 0x2}, + {BQ2515X_TERMCTRL, 0x14}, + {BQ2515X_BUVLO, 0x0}, + {BQ2515X_CHARGERCTRL0, 0x82}, + {BQ2515X_CHARGERCTRL1, 0x42}, + {BQ2515X_ILIMCTRL, 0x1}, + {BQ2515X_LDOCTRL, 0xB0}, + {BQ2515X_MRCTRL, 0x2A}, + {BQ2515X_ICCTRL0, 0x10}, + {BQ2515X_ICCTRL1, 0x0}, + {BQ2515X_ICCTRL2, 0x0}, + {BQ2515X_ADCCTRL0, 0x2}, + {BQ2515X_ADCCTRL1, 0x40}, + {BQ2515X_ADC_COMP1_M, 0x23}, + {BQ2515X_ADC_COMP1_L, 0x20}, + {BQ2515X_ADC_COMP2_M, 0x38}, + {BQ2515X_ADC_COMP2_L, 0x90}, + {BQ2515X_ADC_COMP3_M, 0x0}, + {BQ2515X_ADC_COMP3_L, 0x0}, + {BQ2515X_ADC_READ_EN, 0x0}, + {BQ2515X_TS_FASTCHGCTRL, 0x34}, + {BQ2515X_TS_COLD, 0x7C}, + {BQ2515X_TS_COOL, 0x6D}, + {BQ2515X_TS_WARM, 0x38}, + {BQ2515X_TS_HOT, 0x27}, + {BQ2515X_DEVICE_ID, 0x20}, +}; + +static struct reg_default bq25155_reg_defaults[] = { + {BQ2515X_FLAG0, 0x0}, + {BQ2515X_FLAG1, 0x0}, + {BQ2515X_FLAG2, 0x0}, + {BQ2515X_FLAG3, 0x0}, + {BQ2515X_MASK0, 0x0}, + {BQ2515X_MASK1, 0x0}, + {BQ2515X_MASK2, 0x71}, + {BQ2515X_MASK3, 0x0}, + {BQ2515X_VBAT_CTRL, 0x3C}, + {BQ2515X_ICHG_CTRL, 0x8}, + {BQ2515X_PCHRGCTRL, 0x2}, + {BQ2515X_TERMCTRL, 0x14}, + {BQ2515X_BUVLO, 0x0}, + {BQ2515X_CHARGERCTRL0, 0x82}, + {BQ2515X_CHARGERCTRL1, 0xC2}, + {BQ2515X_ILIMCTRL, 0x6}, + {BQ2515X_LDOCTRL, 0xB0}, + {BQ2515X_MRCTRL, 0x2A}, + {BQ2515X_ICCTRL0, 0x10}, + {BQ2515X_ICCTRL1, 0x0}, + {BQ2515X_ICCTRL2, 0x40}, + {BQ2515X_ADCCTRL0, 0x2}, + {BQ2515X_ADCCTRL1, 0x40}, + {BQ2515X_ADC_COMP1_M, 0x23}, + {BQ2515X_ADC_COMP1_L, 0x20}, + {BQ2515X_ADC_COMP2_M, 0x38}, + {BQ2515X_ADC_COMP2_L, 0x90}, + {BQ2515X_ADC_COMP3_M, 0x0}, + {BQ2515X_ADC_COMP3_L, 0x0}, + {BQ2515X_ADC_READ_EN, 0x0}, + {BQ2515X_TS_FASTCHGCTRL, 0x34}, + {BQ2515X_TS_COLD, 0x7C}, + {BQ2515X_TS_COOL, 0x6D}, + {BQ2515X_TS_WARM, 0x38}, + {BQ2515X_TS_HOT, 0x27}, + {BQ2515X_DEVICE_ID, 0x35}, +}; + +static int bq2515x_wake_up(struct bq2515x_device *bq2515x) +{ + int ret; + int val; + + /* Read the STAT register if we can read it then the device is out + * of ship mode. If the register cannot be read then attempt to wake + * it up and enable the ADC. + */ + ret = regmap_read(bq2515x->regmap, BQ2515X_STAT0, &val); + if (ret) + return ret; + + /* Need to toggle LP and bring device out of ship mode. The device + * will exit the ship mode when the MR pin is held low for at least + * t_WAKE2 as shown in section 8.3.7.1 of the datasheet. + */ + gpiod_set_value_cansleep(bq2515x->powerdown_gpio, 0); + + gpiod_set_value_cansleep(bq2515x->reset_gpio, 0); + usleep_range(BQ2515X_TWAKE2_MIN_US, BQ2515X_TWAKE2_MAX_US); + gpiod_set_value_cansleep(bq2515x->reset_gpio, 1); + + return regmap_write(bq2515x->regmap, BQ2515X_ADC_READ_EN, + (BQ2515X_EN_VBAT_READ | BQ2515X_EN_ICHG_READ)); +} + +static int bq2515x_update_ps_status(struct bq2515x_device *bq2515x) +{ + bool dc = false; + unsigned int val; + int ret; + + if (bq2515x->ac_detect_gpio) + val = gpiod_get_value_cansleep(bq2515x->ac_detect_gpio); + else { + ret = regmap_read(bq2515x->regmap, BQ2515X_STAT0, &val); + if (ret) + return ret; + } + + dc = val & BQ2515X_VIN_GOOD; + + ret = bq2515x->mains_online != dc; + + bq2515x->mains_online = dc; + + return ret; +} + +static int bq2515x_disable_watchdog_timers(struct bq2515x_device *bq2515x) +{ + int ret; + + ret = regmap_update_bits(bq2515x->regmap, BQ2515X_CHARGERCTRL0, + BQ2515X_WATCHDOG_DISABLE, BQ2515X_WATCHDOG_DISABLE); + if (ret) + return ret; + + return regmap_update_bits(bq2515x->regmap, BQ2515X_ICCTRL2, + BQ2515X_HWRESET_14S_WD, 0); +} + +static int bq2515x_get_battery_voltage_now(struct bq2515x_device *bq2515x) +{ + int ret; + int vbat_msb; + int vbat_lsb; + uint32_t vbat_measurement; + + if (!bq2515x->mains_online) + bq2515x_wake_up(bq2515x); + + ret = regmap_read(bq2515x->regmap, BQ2515X_ADC_VBAT_M, &vbat_msb); + if (ret) + return ret; + + ret = regmap_read(bq2515x->regmap, BQ2515X_ADC_VBAT_L, &vbat_lsb); + if (ret) + return ret; + + vbat_measurement = (vbat_msb << 8) | vbat_lsb; + + return vbat_measurement * (BQ2515X_UV_FACTOR / BQ2515X_DIVISOR) * + BQ2515X_VBAT_MULTIPLIER; +} + +static int bq2515x_get_battery_current_now(struct bq2515x_device *bq2515x) +{ + int ret; + int ichg_msb; + int ichg_lsb; + uint32_t ichg_measurement; + u16 ichg_multiplier = BQ2515X_ICHG_RNG_1B0_UA; + unsigned int ichg_reg_code, reg_code; + unsigned int icharge_range = 0, pchrgctrl; + unsigned int buvlo, vlowv_sel, vlowv = BQ2515X_VLOWV_SEL_1B0_UV; + + if (!bq2515x->mains_online) + return -ENODATA; + + ret = regmap_read(bq2515x->regmap, BQ2515X_ADC_ICHG_M, &ichg_msb); + if (ret) + return ret; + + ret = regmap_read(bq2515x->regmap, BQ2515X_ADC_ICHG_L, &ichg_lsb); + if (ret) + return ret; + + ichg_measurement = (ichg_msb << 8) | ichg_lsb; + + ret = regmap_read(bq2515x->regmap, BQ2515X_BUVLO, &buvlo); + if (ret) + return ret; + + vlowv_sel = buvlo & BQ2515X_VLOWV_SEL; + + if (vlowv_sel) + vlowv = BQ2515X_VLOWV_SEL_1B1_UV; + + if (bq2515x_get_battery_voltage_now(bq2515x) < vlowv) { + ret = regmap_read(bq2515x->regmap, BQ2515X_PCHRGCTRL, + &pchrgctrl); + if (ret) + return ret; + + reg_code = pchrgctrl & BQ2515X_PRECHARGE_MASK; + } else { + ret = regmap_read(bq2515x->regmap, BQ2515X_ICHG_CTRL, + &ichg_reg_code); + if (ret) + return ret; + + reg_code = ichg_reg_code; + } + + ret = regmap_read(bq2515x->regmap, BQ2515X_PCHRGCTRL, &pchrgctrl); + if (ret) + return ret; + + icharge_range = pchrgctrl & BQ2515X_ICHARGE_RANGE; + + if (icharge_range) + ichg_multiplier = BQ2515X_ICHG_RNG_1B1_UA; + + return reg_code * (ichg_multiplier * ichg_measurement / + BQ2515X_ICHG_DIVISOR); +} + +static bool bq2515x_get_charge_disable(struct bq2515x_device *bq2515x) +{ + int ret; + int ce_pin; + int icctrl2; + int charger_disable; + + ce_pin = gpiod_get_value_cansleep(bq2515x->ce_gpio); + + ret = regmap_read(bq2515x->regmap, BQ2515X_ICCTRL2, &icctrl2); + if (ret) + return ret; + + charger_disable = icctrl2 & BQ2515X_CHARGER_DISABLE; + + if (charger_disable || ce_pin) + return true; + + return false; +} + +static int bq2515x_set_charge_disable(struct bq2515x_device *bq2515x, int val) +{ + gpiod_set_value_cansleep(bq2515x->ce_gpio, val); + + return regmap_update_bits(bq2515x->regmap, BQ2515X_ICCTRL2, + BQ2515X_CHARGER_DISABLE, val); +} + +static int bq2515x_get_const_charge_current(struct bq2515x_device *bq2515x) +{ + int ret; + u16 ichg_multiplier = BQ2515X_ICHG_RNG_1B0_UA; + unsigned int ichg_reg_code; + unsigned int pchrgctrl; + unsigned int icharge_range; + + ret = regmap_read(bq2515x->regmap, BQ2515X_ICHG_CTRL, &ichg_reg_code); + if (ret) + return ret; + + ret = regmap_read(bq2515x->regmap, BQ2515X_PCHRGCTRL, &pchrgctrl); + if (ret) + return ret; + + icharge_range = pchrgctrl & BQ2515X_ICHARGE_RANGE; + + if (icharge_range) + ichg_multiplier = BQ2515X_ICHG_RNG_1B1_UA; + + return ichg_reg_code * ichg_multiplier; +} + +static int bq2515x_set_const_charge_current(struct bq2515x_device *bq2515x, + int val) +{ + int ret; + unsigned int ichg_reg_code; + u16 ichg_multiplier = BQ2515X_ICHG_RNG_1B0_UA; + unsigned int icharge_range = 0; + + if (val > BQ2515X_ICHG_MAX_UA || val < BQ2515X_ICHG_MIN_UA) + return -EINVAL; + + if (val > BQ2515X_ICHG_CURR_STEP_THRESH_UA) { + ichg_multiplier = BQ2515X_ICHG_RNG_1B1_UA; + icharge_range = BQ2515X_ICHARGE_RANGE; + } + + bq2515x_set_charge_disable(bq2515x, 1); + + ret = regmap_update_bits(bq2515x->regmap, BQ2515X_PCHRGCTRL, + BQ2515X_ICHARGE_RANGE, icharge_range); + if (ret) + return ret; + + ichg_reg_code = val / ichg_multiplier; + + ret = regmap_write(bq2515x->regmap, BQ2515X_ICHG_CTRL, ichg_reg_code); + if (ret) + return ret; + + return bq2515x_set_charge_disable(bq2515x, 0); +} + +static int bq2515x_get_precharge_current(struct bq2515x_device *bq2515x) +{ + int ret; + unsigned int pchrgctrl; + unsigned int icharge_range; + u16 precharge_multiplier = BQ2515X_ICHG_RNG_1B0_UA; + unsigned int precharge_reg_code; + + ret = regmap_read(bq2515x->regmap, BQ2515X_PCHRGCTRL, &pchrgctrl); + if (ret) + return ret; + + icharge_range = pchrgctrl & BQ2515X_ICHARGE_RANGE; + + if (icharge_range) + precharge_multiplier = BQ2515X_ICHG_RNG_1B1_UA; + + precharge_reg_code = pchrgctrl & BQ2515X_PRECHARGE_MASK; + + return precharge_reg_code * precharge_multiplier; +} + +static int bq2515x_set_precharge_current(struct bq2515x_device *bq2515x, + int val) +{ + int ret; + unsigned int pchrgctrl; + unsigned int icharge_range; + unsigned int precharge_reg_code; + unsigned int precharge_multiplier = BQ2515X_ICHG_RNG_1B0_UA; + unsigned int precharge_max_ua = BQ2515X_PRECHRG_ICHRG_RNGE_1875_UA; + + ret = regmap_read(bq2515x->regmap, BQ2515X_PCHRGCTRL, &pchrgctrl); + if (ret) + return ret; + + icharge_range = pchrgctrl & BQ2515X_ICHARGE_RANGE; + + if (icharge_range) { + precharge_max_ua = BQ2515X_PRECHRG_ICHRG_RNGE_3750_UA; + precharge_multiplier = BQ2515X_ICHG_RNG_1B1_UA; + } else { + precharge_max_ua = BQ2515X_PRECHRG_ICHRG_RNGE_1875_UA; + precharge_multiplier = BQ2515X_ICHG_RNG_1B0_UA; + } + if (val > precharge_max_ua || val < BQ2515X_ICHG_MIN_UA) + return -EINVAL; + + precharge_reg_code = val / precharge_multiplier; + + ret = bq2515x_set_charge_disable(bq2515x, 1); + if (ret) + return ret; + + ret = regmap_update_bits(bq2515x->regmap, BQ2515X_PCHRGCTRL, + BQ2515X_PRECHARGE_MASK, precharge_reg_code); + if (ret) + return ret; + + return bq2515x_set_charge_disable(bq2515x, 0); +} + +static int bq2515x_charging_status(struct bq2515x_device *bq2515x, + union power_supply_propval *val) +{ + bool status0_no_fault; + bool status1_no_fault; + bool ce_status; + bool charge_done; + unsigned int status; + int ret; + + if (!bq2515x->mains_online) { + val->intval = POWER_SUPPLY_STATUS_DISCHARGING; + return 0; + } + + ret = regmap_read(bq2515x->regmap, BQ2515X_STAT0, &status); + if (ret) + return ret; + + /* + * The code block below is used to determine if any faults from the + * STAT0 register are disbaling charging or if the charge has completed + * according to the CHARGE_DONE_STAT bit. + */ + if (((status & BQ2515X_STAT0_MASK) == true) & + ((status & BQ2515X_CHRG_DONE) == false)) { + status0_no_fault = true; + charge_done = false; + } else if (status & BQ2515X_CHRG_DONE) { + charge_done = true; + status0_no_fault = false; + } else { + status0_no_fault = false; + charge_done = false; + } + + ret = regmap_read(bq2515x->regmap, BQ2515X_STAT1, &status); + if (ret) + return ret; + /* + * The code block below is used to determine if any faults from the + * STAT1 register are disbaling charging + */ + if ((status & BQ2515X_STAT1_MASK) == false) + status1_no_fault = true; + else + status1_no_fault = false; + + ce_status = (!bq2515x_get_charge_disable(bq2515x)); + + /* + * If there are no faults and charging is enabled, then status is + * charging. Otherwise, if charging is complete, then status is full. + * Otherwise, if a fault exists or charging is disabled, then status is + * not charging + */ + if (status0_no_fault & status1_no_fault & ce_status) + val->intval = POWER_SUPPLY_STATUS_CHARGING; + else if (charge_done) + val->intval = POWER_SUPPLY_STATUS_FULL; + else if (!(status0_no_fault & status1_no_fault & ce_status)) + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + + return 0; +} + +static int bq2515x_get_batt_reg(struct bq2515x_device *bq2515x) +{ + int vbat_reg_code; + int ret; + + ret = regmap_read(bq2515x->regmap, BQ2515X_VBAT_CTRL, &vbat_reg_code); + if (ret) + return ret; + + return BQ2515X_VBAT_BASE_VOLT + vbat_reg_code * BQ2515X_VBAT_STEP_UV; +} + +static int bq2515x_set_batt_reg(struct bq2515x_device *bq2515x, int val) +{ + int vbat_reg_code; + + if (val > BQ2515X_VBAT_REG_MAX || val < BQ2515X_VBAT_REG_MIN) + return -EINVAL; + + vbat_reg_code = (val - BQ2515X_VBAT_BASE_VOLT) / BQ2515X_VBAT_STEP_UV; + + return regmap_write(bq2515x->regmap, BQ2515X_VBAT_CTRL, vbat_reg_code); +} + +static int bq2515x_get_ilim_lvl(struct bq2515x_device *bq2515x) +{ + int ret; + int ilimctrl; + + ret = regmap_read(bq2515x->regmap, BQ2515X_ILIMCTRL, &ilimctrl); + if (ret) + return ret; + + return bq2515x_ilim_lvl_values[ilimctrl & BQ2515X_ILIM_MASK]; +} + +static int bq2515x_set_ilim_lvl(struct bq2515x_device *bq2515x, int val) +{ + int i = 0; + unsigned int array_size = ARRAY_SIZE(bq2515x_ilim_lvl_values); + + for (i = array_size - 1; i > 0; i--) { + if (val >= bq2515x_ilim_lvl_values[i]) + break; + } + return regmap_write(bq2515x->regmap, BQ2515X_ILIMCTRL, i); +} + +static int bq2515x_power_supply_property_is_writeable(struct power_supply *psy, + enum power_supply_property prop) +{ + switch (prop) { + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: + return true; + default: + return false; + } +} + +static int bq2515x_charger_get_health(struct bq2515x_device *bq2515x, + union power_supply_propval *val) +{ + int health = POWER_SUPPLY_HEALTH_GOOD; + int ret; + unsigned int stat1; + unsigned int flag3; + + if (!bq2515x->mains_online) + bq2515x_wake_up(bq2515x); + + ret = regmap_read(bq2515x->regmap, BQ2515X_FLAG3, &flag3); + if (ret) + return ret; + + ret = regmap_read(bq2515x->regmap, BQ2515X_STAT1, &stat1); + if (ret) + return ret; + + if (stat1 & BQ2515X_HEALTH_MASK) { + switch (stat1 & BQ2515X_HEALTH_MASK) { + case BQ2515X_TS_HOT_STAT: + health = POWER_SUPPLY_HEALTH_HOT; + break; + case BQ2515X_TS_WARM_STAT: + health = POWER_SUPPLY_HEALTH_WARM; + break; + case BQ2515X_TS_COOL_STAT: + health = POWER_SUPPLY_HEALTH_COOL; + break; + case BQ2515X_TS_COLD_STAT: + health = POWER_SUPPLY_HEALTH_COLD; + break; + default: + health = POWER_SUPPLY_HEALTH_UNKNOWN; + break; + } + } + + if (stat1 & BQ2515X_VIN_OVP_FAULT_STAT) + health = POWER_SUPPLY_HEALTH_OVERVOLTAGE; + + if (flag3 & BQ2515X_SAFETY_TIMER_EXP) + health = POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE; + + val->intval = health; + return 0; +} + +static int bq2515x_mains_set_property(struct power_supply *psy, + enum power_supply_property prop, + const union power_supply_propval *val) +{ + struct bq2515x_device *bq2515x = power_supply_get_drvdata(psy); + int ret; + + switch (prop) { + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + ret = bq2515x_set_batt_reg(bq2515x, val->intval); + break; + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + ret = bq2515x_set_const_charge_current(bq2515x, val->intval); + break; + + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = bq2515x_set_ilim_lvl(bq2515x, val->intval); + break; + + case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: + ret = bq2515x_set_precharge_current(bq2515x, val->intval); + break; + + default: + return -EINVAL; + } + + return ret; +} + +static int bq2515x_mains_get_property(struct power_supply *psy, + enum power_supply_property prop, + union power_supply_propval *val) +{ + struct bq2515x_device *bq2515x = power_supply_get_drvdata(psy); + int ret = 0; + + switch (prop) { + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT: + ret = bq2515x_get_const_charge_current(bq2515x); + if (ret < 0) + return ret; + + val->intval = ret; + break; + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE: + ret = bq2515x_get_batt_reg(bq2515x); + if (ret < 0) + return ret; + val->intval = ret; + break; + + case POWER_SUPPLY_PROP_PRECHARGE_CURRENT: + ret = bq2515x_get_precharge_current(bq2515x); + if (ret < 0) + return ret; + val->intval = ret; + break; + + case POWER_SUPPLY_PROP_ONLINE: + val->intval = bq2515x->mains_online; + break; + + case POWER_SUPPLY_PROP_HEALTH: + ret = bq2515x_charger_get_health(bq2515x, val); + if (ret) + val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; + break; + + case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT: + ret = bq2515x_get_ilim_lvl(bq2515x); + if (ret < 0) + return ret; + val->intval = ret; + break; + + case POWER_SUPPLY_PROP_MODEL_NAME: + val->strval = bq2515x->model_name; + break; + + case POWER_SUPPLY_PROP_MANUFACTURER: + val->strval = BQ2515X_MANUFACTURER; + break; + + case POWER_SUPPLY_PROP_STATUS: + ret = bq2515x_charging_status(bq2515x, val); + if (ret) + return ret; + break; + + default: + return -EINVAL; + } + + return ret; +} + +static int bq2515x_battery_get_property(struct power_supply *psy, + enum power_supply_property prop, + union power_supply_propval *val) +{ + struct bq2515x_device *bq2515x = power_supply_get_drvdata(psy); + int ret; + + ret = bq2515x_update_ps_status(bq2515x); + if (ret) + return ret; + + switch (prop) { + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX: + ret = bq2515x->init_data.vbatreg; + if (ret < 0) + return ret; + val->intval = ret; + break; + + case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX: + ret = bq2515x->init_data.ichg; + if (ret < 0) + return ret; + val->intval = ret; + break; + + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + ret = bq2515x_get_battery_voltage_now(bq2515x); + if (ret < 0) + return ret; + val->intval = ret; + break; + + case POWER_SUPPLY_PROP_CURRENT_NOW: + ret = bq2515x_get_battery_current_now(bq2515x); + if (ret < 0) + return ret; + val->intval = ret; + break; + + default: + return -EINVAL; + } + return 0; +} + +static enum power_supply_property bq2515x_battery_properties[] = { + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, +}; + +static enum power_supply_property bq2515x_mains_properties[] = { + POWER_SUPPLY_PROP_ONLINE, + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, + POWER_SUPPLY_PROP_MODEL_NAME, + POWER_SUPPLY_PROP_MANUFACTURER, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, + POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT, + POWER_SUPPLY_PROP_PRECHARGE_CURRENT, +}; + +static struct power_supply_desc bq2515x_mains_desc = { + .name = "bq2515x-mains", + .type = POWER_SUPPLY_TYPE_MAINS, + .get_property = bq2515x_mains_get_property, + .set_property = bq2515x_mains_set_property, + .properties = bq2515x_mains_properties, + .num_properties = ARRAY_SIZE(bq2515x_mains_properties), + .property_is_writeable = bq2515x_power_supply_property_is_writeable, +}; + +static struct power_supply_desc bq2515x_battery_desc = { + .name = "bq2515x-battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .get_property = bq2515x_battery_get_property, + .properties = bq2515x_battery_properties, + .num_properties = ARRAY_SIZE(bq2515x_battery_properties), + .property_is_writeable = bq2515x_power_supply_property_is_writeable, +}; + +static int bq2515x_power_supply_register(struct bq2515x_device *bq2515x, + struct device *dev, struct power_supply_config psy_cfg) +{ + bq2515x->mains = devm_power_supply_register(bq2515x->dev, + &bq2515x_mains_desc, + &psy_cfg); + if (IS_ERR(bq2515x->mains)) + return -EINVAL; + + bq2515x->battery = devm_power_supply_register(bq2515x->dev, + &bq2515x_battery_desc, + &psy_cfg); + if (IS_ERR(bq2515x->battery)) + return -EINVAL; + + return 0; +} + +static int bq2515x_hw_init(struct bq2515x_device *bq2515x) +{ + int ret; + struct power_supply_battery_info bat_info = { }; + + ret = bq2515x_disable_watchdog_timers(bq2515x); + if (ret) + return ret; + + if (bq2515x->init_data.ilim) { + ret = bq2515x_set_ilim_lvl(bq2515x, bq2515x->init_data.ilim); + if (ret) + return ret; + } + + ret = power_supply_get_battery_info(bq2515x->mains, &bat_info); + if (ret) { + dev_warn(bq2515x->dev, "battery info missing, default values will be applied\n"); + + bq2515x->init_data.ichg = BQ2515X_DEFAULT_ICHG_UA; + + bq2515x->init_data.vbatreg = BQ2515X_DEFAULT_VBAT_REG_UV; + + bq2515x->init_data.iprechg = BQ2515X_DEFAULT_IPRECHARGE_UA; + + } else { + bq2515x->init_data.ichg = + bat_info.constant_charge_current_max_ua; + + bq2515x->init_data.vbatreg = + bat_info.constant_charge_voltage_max_uv; + + bq2515x->init_data.iprechg = + bat_info.precharge_current_ua; + } + + ret = bq2515x_set_const_charge_current(bq2515x, + bq2515x->init_data.ichg); + if (ret) + return ret; + + ret = bq2515x_set_batt_reg(bq2515x, bq2515x->init_data.vbatreg); + if (ret) + return ret; + + return bq2515x_set_precharge_current(bq2515x, + bq2515x->init_data.iprechg); +} + +static int bq2515x_read_properties(struct bq2515x_device *bq2515x) +{ + int ret; + + ret = device_property_read_u32(bq2515x->dev, + "input-current-limit-microamp", + &bq2515x->init_data.ilim); + if (ret) { + switch (bq2515x->device_id) { + case BQ25150: + bq2515x->init_data.ilim = BQ25150_DEFAULT_ILIM_UA; + break; + case BQ25155: + bq2515x->init_data.ilim = BQ25155_DEFAULT_ILIM_UA; + break; + } + } + + bq2515x->ac_detect_gpio = devm_gpiod_get_optional(bq2515x->dev, + "ac-detect", GPIOD_IN); + if (IS_ERR(bq2515x->ac_detect_gpio)) { + ret = PTR_ERR(bq2515x->ac_detect_gpio); + dev_err(bq2515x->dev, "Failed to get ac detect"); + return ret; + } + + bq2515x->reset_gpio = devm_gpiod_get_optional(bq2515x->dev, + "reset", GPIOD_OUT_LOW); + if (IS_ERR(bq2515x->reset_gpio)) { + ret = PTR_ERR(bq2515x->reset_gpio); + dev_err(bq2515x->dev, "Failed to get reset"); + return ret; + } + + bq2515x->powerdown_gpio = devm_gpiod_get_optional(bq2515x->dev, + "powerdown", GPIOD_OUT_LOW); + if (IS_ERR(bq2515x->powerdown_gpio)) { + ret = PTR_ERR(bq2515x->powerdown_gpio); + dev_err(bq2515x->dev, "Failed to get powerdown"); + return ret; + } + + bq2515x->ce_gpio = devm_gpiod_get_optional(bq2515x->dev, + "charge-enable", + GPIOD_OUT_LOW); + if (IS_ERR(bq2515x->ce_gpio)) { + ret = PTR_ERR(bq2515x->ce_gpio); + dev_err(bq2515x->dev, "Failed to get ce"); + return ret; + } + + return 0; +} + +static bool bq2515x_volatile_register(struct device *dev, unsigned int reg) +{ + switch (reg) { + case BQ2515X_STAT0 ... BQ2515X_FLAG3: + case BQ2515X_ADC_VBAT_M ... BQ2515X_ADC_IIN_L: + return true; + default: + return false; + } +} + +static const struct regmap_config bq25150_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = BQ2515X_DEVICE_ID, + .reg_defaults = bq25150_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(bq25150_reg_defaults), + .cache_type = REGCACHE_RBTREE, + .volatile_reg = bq2515x_volatile_register, +}; + +static const struct regmap_config bq25155_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = BQ2515X_DEVICE_ID, + .reg_defaults = bq25155_reg_defaults, + .num_reg_defaults = ARRAY_SIZE(bq25155_reg_defaults), + .cache_type = REGCACHE_RBTREE, + .volatile_reg = bq2515x_volatile_register, +}; + +static int bq2515x_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct bq2515x_device *bq2515x; + struct power_supply_config charger_cfg = {}; + int ret; + + bq2515x = devm_kzalloc(dev, sizeof(*bq2515x), GFP_KERNEL); + if (!bq2515x) + return -ENOMEM; + + bq2515x->dev = dev; + + strncpy(bq2515x->model_name, id->name, I2C_NAME_SIZE); + + bq2515x->device_id = id->driver_data; + + switch (bq2515x->device_id) { + case BQ25150: + bq2515x->regmap = devm_regmap_init_i2c(client, + &bq25150_regmap_config); + break; + case BQ25155: + bq2515x->regmap = devm_regmap_init_i2c(client, + &bq25155_regmap_config); + break; + } + + if (IS_ERR(bq2515x->regmap)) { + dev_err(dev, "failed to allocate register map\n"); + return PTR_ERR(bq2515x->regmap); + } + + i2c_set_clientdata(client, bq2515x); + + charger_cfg.drv_data = bq2515x; + charger_cfg.of_node = dev->of_node; + + ret = bq2515x_read_properties(bq2515x); + if (ret) { + dev_err(dev, "Failed to read device tree properties %d\n", + ret); + return ret; + } + + ret = bq2515x_power_supply_register(bq2515x, dev, charger_cfg); + if (ret) { + dev_err(dev, "failed to register power supply\n"); + return ret; + } + + ret = bq2515x_hw_init(bq2515x); + if (ret) { + dev_err(dev, "Cannot initialize the chip\n"); + return ret; + } + + return 0; +} + +static const struct i2c_device_id bq2515x_i2c_ids[] = { + { "bq25150", BQ25150, }, + { "bq25155", BQ25155, }, + {}, +}; +MODULE_DEVICE_TABLE(i2c, bq2515x_i2c_ids); + +static const struct of_device_id bq2515x_of_match[] = { + { .compatible = "ti,bq25150", }, + { .compatible = "ti,bq25155", }, + { }, +}; +MODULE_DEVICE_TABLE(of, bq2515x_of_match); + +static struct i2c_driver bq2515x_driver = { + .driver = { + .name = "bq2515x-charger", + .of_match_table = bq2515x_of_match, + }, + .probe = bq2515x_probe, + .id_table = bq2515x_i2c_ids, +}; +module_i2c_driver(bq2515x_driver); + +MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com>"); +MODULE_AUTHOR("Ricardo Rivera-Matos <r-rivera-matos@ti.com>"); +MODULE_DESCRIPTION("BQ2515X charger driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 942c92127b6d..a123f6e21f08 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -18,31 +18,33 @@ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * Datasheets: - * http://www.ti.com/product/bq27000 - * http://www.ti.com/product/bq27200 - * http://www.ti.com/product/bq27010 - * http://www.ti.com/product/bq27210 - * http://www.ti.com/product/bq27500 - * http://www.ti.com/product/bq27510-g1 - * http://www.ti.com/product/bq27510-g2 - * http://www.ti.com/product/bq27510-g3 - * http://www.ti.com/product/bq27520-g1 - * http://www.ti.com/product/bq27520-g2 - * http://www.ti.com/product/bq27520-g3 - * http://www.ti.com/product/bq27520-g4 - * http://www.ti.com/product/bq27530-g1 - * http://www.ti.com/product/bq27531-g1 - * http://www.ti.com/product/bq27541-g1 - * http://www.ti.com/product/bq27542-g1 - * http://www.ti.com/product/bq27546-g1 - * http://www.ti.com/product/bq27742-g1 - * http://www.ti.com/product/bq27545-g1 - * http://www.ti.com/product/bq27421-g1 - * http://www.ti.com/product/bq27425-g1 - * http://www.ti.com/product/bq27426 - * http://www.ti.com/product/bq27411-g1 - * http://www.ti.com/product/bq27441-g1 - * http://www.ti.com/product/bq27621-g1 + * https://www.ti.com/product/bq27000 + * https://www.ti.com/product/bq27200 + * https://www.ti.com/product/bq27010 + * https://www.ti.com/product/bq27210 + * https://www.ti.com/product/bq27500 + * https://www.ti.com/product/bq27510-g1 + * https://www.ti.com/product/bq27510-g2 + * https://www.ti.com/product/bq27510-g3 + * https://www.ti.com/product/bq27520-g1 + * https://www.ti.com/product/bq27520-g2 + * https://www.ti.com/product/bq27520-g3 + * https://www.ti.com/product/bq27520-g4 + * https://www.ti.com/product/bq27530-g1 + * https://www.ti.com/product/bq27531-g1 + * https://www.ti.com/product/bq27541-g1 + * https://www.ti.com/product/bq27542-g1 + * https://www.ti.com/product/bq27546-g1 + * https://www.ti.com/product/bq27742-g1 + * https://www.ti.com/product/bq27545-g1 + * https://www.ti.com/product/bq27421-g1 + * https://www.ti.com/product/bq27425-g1 + * https://www.ti.com/product/bq27426 + * https://www.ti.com/product/bq27411-g1 + * https://www.ti.com/product/bq27441-g1 + * https://www.ti.com/product/bq27621-g1 + * https://www.ti.com/product/bq27z561 + * https://www.ti.com/product/bq28z610 */ #include <linux/device.h> @@ -79,6 +81,11 @@ #define BQ27000_FLAG_FC BIT(5) #define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */ +/* BQ27Z561 has different layout for Flags register */ +#define BQ27Z561_FLAG_FDC BIT(4) /* Battery fully discharged */ +#define BQ27Z561_FLAG_FC BIT(5) /* Battery fully charged */ +#define BQ27Z561_FLAG_DIS_CH BIT(6) /* Battery is discharging */ + /* control register params */ #define BQ27XXX_SEALED 0x20 #define BQ27XXX_SET_CFGUPDATE 0x13 @@ -431,12 +438,52 @@ static u8 [BQ27XXX_REG_DCAP] = 0x3c, [BQ27XXX_REG_AP] = 0x18, BQ27XXX_DM_REG_ROWS, - }; + }, #define bq27411_regs bq27421_regs #define bq27425_regs bq27421_regs #define bq27426_regs bq27421_regs #define bq27441_regs bq27421_regs #define bq27621_regs bq27421_regs + bq27z561_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x06, + [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, + [BQ27XXX_REG_VOLT] = 0x08, + [BQ27XXX_REG_AI] = 0x14, + [BQ27XXX_REG_FLAGS] = 0x0a, + [BQ27XXX_REG_TTE] = 0x16, + [BQ27XXX_REG_TTF] = 0x18, + [BQ27XXX_REG_TTES] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = INVALID_REG_ADDR, + [BQ27XXX_REG_FCC] = 0x12, + [BQ27XXX_REG_CYCT] = 0x2a, + [BQ27XXX_REG_AE] = 0x22, + [BQ27XXX_REG_SOC] = 0x2c, + [BQ27XXX_REG_DCAP] = 0x3c, + [BQ27XXX_REG_AP] = 0x22, + BQ27XXX_DM_REG_ROWS, + }, + bq28z610_regs[BQ27XXX_REG_MAX] = { + [BQ27XXX_REG_CTRL] = 0x00, + [BQ27XXX_REG_TEMP] = 0x06, + [BQ27XXX_REG_INT_TEMP] = INVALID_REG_ADDR, + [BQ27XXX_REG_VOLT] = 0x08, + [BQ27XXX_REG_AI] = 0x14, + [BQ27XXX_REG_FLAGS] = 0x0a, + [BQ27XXX_REG_TTE] = 0x16, + [BQ27XXX_REG_TTF] = 0x18, + [BQ27XXX_REG_TTES] = INVALID_REG_ADDR, + [BQ27XXX_REG_TTECP] = INVALID_REG_ADDR, + [BQ27XXX_REG_NAC] = INVALID_REG_ADDR, + [BQ27XXX_REG_FCC] = 0x12, + [BQ27XXX_REG_CYCT] = 0x2a, + [BQ27XXX_REG_AE] = 0x22, + [BQ27XXX_REG_SOC] = 0x2c, + [BQ27XXX_REG_DCAP] = 0x3c, + [BQ27XXX_REG_AP] = 0x22, + BQ27XXX_DM_REG_ROWS, + }; static enum power_supply_property bq27000_props[] = { POWER_SUPPLY_PROP_STATUS, @@ -672,6 +719,44 @@ static enum power_supply_property bq27421_props[] = { #define bq27441_props bq27421_props #define bq27621_props bq27421_props +static enum power_supply_property bq27z561_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_POWER_AVG, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + +static enum power_supply_property bq28z610_props[] = { + POWER_SUPPLY_PROP_STATUS, + POWER_SUPPLY_PROP_PRESENT, + POWER_SUPPLY_PROP_VOLTAGE_NOW, + POWER_SUPPLY_PROP_CURRENT_NOW, + POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_LEVEL, + POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW, + POWER_SUPPLY_PROP_TIME_TO_FULL_NOW, + POWER_SUPPLY_PROP_TECHNOLOGY, + POWER_SUPPLY_PROP_CHARGE_FULL, + POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, + POWER_SUPPLY_PROP_CYCLE_COUNT, + POWER_SUPPLY_PROP_POWER_AVG, + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_MANUFACTURER, +}; + struct bq27xxx_dm_reg { u8 subclass_id; u8 offset; @@ -767,11 +852,15 @@ static struct bq27xxx_dm_reg bq27621_dm_regs[] = { #define bq27621_dm_regs 0 #endif +#define bq27z561_dm_regs 0 +#define bq28z610_dm_regs 0 + #define BQ27XXX_O_ZERO 0x00000001 #define BQ27XXX_O_OTDC 0x00000002 /* has OTC/OTD overtemperature flags */ #define BQ27XXX_O_UTOT 0x00000004 /* has OT overtemperature flag */ #define BQ27XXX_O_CFGUP 0x00000008 #define BQ27XXX_O_RAM 0x00000010 +#define BQ27Z561_O_BITS 0x00000020 #define BQ27XXX_DATA(ref, key, opt) { \ .opts = (opt), \ @@ -816,6 +905,8 @@ static struct { [BQ27426] = BQ27XXX_DATA(bq27426, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), [BQ27441] = BQ27XXX_DATA(bq27441, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), [BQ27621] = BQ27XXX_DATA(bq27621, 0x80008000, BQ27XXX_O_UTOT | BQ27XXX_O_CFGUP | BQ27XXX_O_RAM), + [BQ27Z561] = BQ27XXX_DATA(bq27z561, 0 , BQ27Z561_O_BITS), + [BQ28Z610] = BQ27XXX_DATA(bq28z610, 0 , BQ27Z561_O_BITS), }; static DEFINE_MUTEX(bq27xxx_list_lock); @@ -1551,6 +1642,8 @@ static bool bq27xxx_battery_dead(struct bq27xxx_device_info *di, u16 flags) { if (di->opts & BQ27XXX_O_ZERO) return flags & (BQ27000_FLAG_EDV1 | BQ27000_FLAG_EDVF); + else if (di->opts & BQ27Z561_O_BITS) + return flags & BQ27Z561_FLAG_FDC; else return flags & (BQ27XXX_FLAG_SOC1 | BQ27XXX_FLAG_SOCF); } @@ -1595,6 +1688,7 @@ void bq27xxx_battery_update(struct bq27xxx_device_info *di) cache.time_to_empty_avg = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTECP); if (di->regs[BQ27XXX_REG_TTF] != INVALID_REG_ADDR) cache.time_to_full = bq27xxx_battery_read_time(di, BQ27XXX_REG_TTF); + cache.charge_full = bq27xxx_battery_read_fcc(di); cache.capacity = bq27xxx_battery_read_soc(di); if (di->regs[BQ27XXX_REG_AE] != INVALID_REG_ADDR) @@ -1682,6 +1776,13 @@ static int bq27xxx_battery_status(struct bq27xxx_device_info *di, status = POWER_SUPPLY_STATUS_NOT_CHARGING; else status = POWER_SUPPLY_STATUS_DISCHARGING; + } else if (di->opts & BQ27Z561_O_BITS) { + if (di->cache.flags & BQ27Z561_FLAG_FC) + status = POWER_SUPPLY_STATUS_FULL; + else if (di->cache.flags & BQ27Z561_FLAG_DIS_CH) + status = POWER_SUPPLY_STATUS_DISCHARGING; + else + status = POWER_SUPPLY_STATUS_CHARGING; } else { if (di->cache.flags & BQ27XXX_FLAG_FC) status = POWER_SUPPLY_STATUS_FULL; @@ -1710,6 +1811,13 @@ static int bq27xxx_battery_capacity_level(struct bq27xxx_device_info *di, level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; else level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; + } else if (di->opts & BQ27Z561_O_BITS) { + if (di->cache.flags & BQ27Z561_FLAG_FC) + level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + else if (di->cache.flags & BQ27Z561_FLAG_FDC) + level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + else + level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; } else { if (di->cache.flags & BQ27XXX_FLAG_FC) level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; diff --git a/drivers/power/supply/bq27xxx_battery_hdq.c b/drivers/power/supply/bq27xxx_battery_hdq.c index 9aff896c9802..29771967df2e 100644 --- a/drivers/power/supply/bq27xxx_battery_hdq.c +++ b/drivers/power/supply/bq27xxx_battery_hdq.c @@ -1,7 +1,7 @@ /* * BQ27xxx battery monitor HDQ/1-wire driver * - * Copyright (C) 2007-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2007-2017 Texas Instruments Incorporated - https://www.ti.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as diff --git a/drivers/power/supply/bq27xxx_battery_i2c.c b/drivers/power/supply/bq27xxx_battery_i2c.c index 2677c38a8a42..ab02456d69e5 100644 --- a/drivers/power/supply/bq27xxx_battery_i2c.c +++ b/drivers/power/supply/bq27xxx_battery_i2c.c @@ -1,7 +1,7 @@ /* * BQ27xxx battery monitor I2C driver * - * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/ + * Copyright (C) 2015 Texas Instruments Incorporated - https://www.ti.com/ * Andrew F. Davis <afd@ti.com> * * This program is free software; you can redistribute it and/or modify @@ -253,6 +253,8 @@ static const struct i2c_device_id bq27xxx_i2c_id_table[] = { { "bq27426", BQ27426 }, { "bq27441", BQ27441 }, { "bq27621", BQ27621 }, + { "bq27z561", BQ27Z561 }, + { "bq28z610", BQ28Z610 }, {}, }; MODULE_DEVICE_TABLE(i2c, bq27xxx_i2c_id_table); @@ -286,6 +288,8 @@ static const struct of_device_id bq27xxx_battery_i2c_of_match_table[] = { { .compatible = "ti,bq27426" }, { .compatible = "ti,bq27441" }, { .compatible = "ti,bq27621" }, + { .compatible = "ti,bq27z561" }, + { .compatible = "ti,bq28z610" }, {}, }; MODULE_DEVICE_TABLE(of, bq27xxx_battery_i2c_of_match_table); diff --git a/drivers/power/supply/cpcap-battery.c b/drivers/power/supply/cpcap-battery.c index 6e9392901b0a..90eba364664b 100644 --- a/drivers/power/supply/cpcap-battery.c +++ b/drivers/power/supply/cpcap-battery.c @@ -274,7 +274,7 @@ static int cpcap_battery_cc_to_ua(struct cpcap_battery_ddata *ddata, /** * cpcap_battery_read_accumulated - reads cpcap coulomb counter * @ddata: device driver data - * @regs: coulomb counter values + * @ccd: coulomb counter values * * Based on Motorola mapphone kernel function data_read_regs(). * Looking at the registers, the coulomb counter seems similar to diff --git a/drivers/power/supply/da9030_battery.c b/drivers/power/supply/da9030_battery.c index 88582423b87d..0deba48d22d3 100644 --- a/drivers/power/supply/da9030_battery.c +++ b/drivers/power/supply/da9030_battery.c @@ -172,17 +172,7 @@ static int bat_debug_show(struct seq_file *s, void *data) return 0; } -static int debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, bat_debug_show, inode->i_private); -} - -static const struct file_operations bat_debug_fops = { - .open = debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(bat_debug); static struct dentry *da9030_bat_create_debugfs(struct da9030_charger *charger) { diff --git a/drivers/power/supply/gpio-charger.c b/drivers/power/supply/gpio-charger.c index 1b959c7f8b0e..875735d50716 100644 --- a/drivers/power/supply/gpio-charger.c +++ b/drivers/power/supply/gpio-charger.c @@ -112,9 +112,14 @@ static int gpio_charger_get_irq(struct device *dev, void *dev_id, return irq; } +/* + * The entries will be overwritten by driver's probe routine depending + * on the available features. This list ensures, that the array is big + * enough for all optional features. + */ static enum power_supply_property gpio_charger_properties[] = { POWER_SUPPLY_PROP_ONLINE, - POWER_SUPPLY_PROP_STATUS /* Must always be last in the array. */ + POWER_SUPPLY_PROP_STATUS, }; static int gpio_charger_probe(struct platform_device *pdev) @@ -128,6 +133,7 @@ static int gpio_charger_probe(struct platform_device *pdev) int charge_status_irq; unsigned long flags; int ret; + int num_props = 0; if (!pdata && !dev->of_node) { dev_err(dev, "No platform data\n"); @@ -142,13 +148,13 @@ static int gpio_charger_probe(struct platform_device *pdev) * This will fetch a GPIO descriptor from device tree, ACPI or * boardfile descriptor tables. It's good to try this first. */ - gpio_charger->gpiod = devm_gpiod_get(dev, NULL, GPIOD_IN); + gpio_charger->gpiod = devm_gpiod_get_optional(dev, NULL, GPIOD_IN); /* - * If this fails and we're not using device tree, try the - * legacy platform data method. + * Fallback to legacy platform data method, if no GPIO is specified + * using boardfile descriptor tables. */ - if (IS_ERR(gpio_charger->gpiod) && !dev->of_node) { + if (!gpio_charger->gpiod && pdata) { /* Non-DT: use legacy GPIO numbers */ if (!gpio_is_valid(pdata->gpio)) { dev_err(dev, "Invalid gpio pin in pdata\n"); @@ -173,17 +179,23 @@ static int gpio_charger_probe(struct platform_device *pdev) return PTR_ERR(gpio_charger->gpiod); } + if (gpio_charger->gpiod) { + gpio_charger_properties[num_props] = POWER_SUPPLY_PROP_ONLINE; + num_props++; + } + charge_status = devm_gpiod_get_optional(dev, "charge-status", GPIOD_IN); - gpio_charger->charge_status = charge_status; - if (IS_ERR(gpio_charger->charge_status)) - return PTR_ERR(gpio_charger->charge_status); + if (IS_ERR(charge_status)) + return PTR_ERR(charge_status); + if (charge_status) { + gpio_charger->charge_status = charge_status; + gpio_charger_properties[num_props] = POWER_SUPPLY_PROP_STATUS; + num_props++; + } charger_desc = &gpio_charger->charger_desc; charger_desc->properties = gpio_charger_properties; - charger_desc->num_properties = ARRAY_SIZE(gpio_charger_properties); - /* Remove POWER_SUPPLY_PROP_STATUS from the supported properties. */ - if (!gpio_charger->charge_status) - charger_desc->num_properties -= 1; + charger_desc->num_properties = num_props; charger_desc->get_property = gpio_charger_get_property; psy_cfg.of_node = dev->of_node; @@ -269,6 +281,6 @@ static struct platform_driver gpio_charger_driver = { module_platform_driver(gpio_charger_driver); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); -MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO"); +MODULE_DESCRIPTION("Driver for chargers only communicating via GPIO(s)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gpio-charger"); diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c index 48aa44665e2f..6cb31b9a958d 100644 --- a/drivers/power/supply/max17040_battery.c +++ b/drivers/power/supply/max17040_battery.c @@ -69,6 +69,9 @@ static int max17040_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_CAPACITY: val->intval = chip->soc; break; + case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN: + val->intval = chip->low_soc_alert; + break; default: return -EINVAL; } @@ -256,19 +259,57 @@ static int max17040_enable_alert_irq(struct max17040_chip *chip) return ret; } +static int max17040_prop_writeable(struct power_supply *psy, + enum power_supply_property psp) +{ + switch (psp) { + case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN: + return 1; + default: + return 0; + } +} + +static int max17040_set_property(struct power_supply *psy, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct max17040_chip *chip = power_supply_get_drvdata(psy); + int ret; + + switch (psp) { + case POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN: + /* alert threshold can be programmed from 1% up to 32% */ + if ((val->intval < 1) || (val->intval > 32)) { + ret = -EINVAL; + break; + } + ret = max17040_set_low_soc_alert(chip->client, val->intval); + chip->low_soc_alert = val->intval; + break; + default: + ret = -EINVAL; + } + + return ret; +} + static enum power_supply_property max17040_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CAPACITY, + POWER_SUPPLY_PROP_CAPACITY_ALERT_MIN, }; static const struct power_supply_desc max17040_battery_desc = { - .name = "battery", - .type = POWER_SUPPLY_TYPE_BATTERY, - .get_property = max17040_get_property, - .properties = max17040_battery_props, - .num_properties = ARRAY_SIZE(max17040_battery_props), + .name = "battery", + .type = POWER_SUPPLY_TYPE_BATTERY, + .get_property = max17040_get_property, + .set_property = max17040_set_property, + .property_is_writeable = max17040_prop_writeable, + .properties = max17040_battery_props, + .num_properties = ARRAY_SIZE(max17040_battery_props), }; static int max17040_probe(struct i2c_client *client, diff --git a/drivers/power/supply/max8998_charger.c b/drivers/power/supply/max8998_charger.c index 9a926c7c0f22..c26023b19f26 100644 --- a/drivers/power/supply/max8998_charger.c +++ b/drivers/power/supply/max8998_charger.c @@ -23,6 +23,7 @@ struct max8998_battery_data { static enum power_supply_property max8998_battery_props[] = { POWER_SUPPLY_PROP_PRESENT, /* the presence of battery */ POWER_SUPPLY_PROP_ONLINE, /* charger is active or not */ + POWER_SUPPLY_PROP_STATUS, /* charger is charging/discharging/full */ }; /* Note that the charger control is done by a current regulator "CHARGER" */ @@ -49,10 +50,28 @@ static int max8998_battery_get_property(struct power_supply *psy, ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, ®); if (ret) return ret; - if (reg & (1 << 3)) - val->intval = 0; - else + + if (reg & (1 << 5)) val->intval = 1; + else + val->intval = 0; + + break; + case POWER_SUPPLY_PROP_STATUS: + ret = max8998_read_reg(i2c, MAX8998_REG_STATUS2, ®); + if (ret) + return ret; + + if (!(reg & (1 << 5))) { + val->intval = POWER_SUPPLY_STATUS_DISCHARGING; + } else { + if (reg & (1 << 6)) + val->intval = POWER_SUPPLY_STATUS_FULL; + else if (reg & (1 << 3)) + val->intval = POWER_SUPPLY_STATUS_CHARGING; + else + val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; + } break; default: return -EINVAL; diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 90e56736d479..ccbad435ed12 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -733,7 +733,7 @@ EXPORT_SYMBOL_GPL(power_supply_put_battery_info); * percent * @table: Pointer to battery resistance temperature table * @table_len: The table length - * @ocv: Current temperature + * @temp: Current temperature * * This helper function is used to look up battery internal resistance percent * according to current temperature value from the resistance temperature table, diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c index bc79560229b5..3d383086018c 100644 --- a/drivers/power/supply/power_supply_sysfs.c +++ b/drivers/power/supply/power_supply_sysfs.c @@ -87,6 +87,7 @@ static const char * const POWER_SUPPLY_CHARGE_TYPE_TEXT[] = { [POWER_SUPPLY_CHARGE_TYPE_STANDARD] = "Standard", [POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE] = "Adaptive", [POWER_SUPPLY_CHARGE_TYPE_CUSTOM] = "Custom", + [POWER_SUPPLY_CHARGE_TYPE_LONGLIFE] = "Long Life", }; static const char * const POWER_SUPPLY_HEALTH_TEXT[] = { @@ -101,6 +102,9 @@ static const char * const POWER_SUPPLY_HEALTH_TEXT[] = { [POWER_SUPPLY_HEALTH_SAFETY_TIMER_EXPIRE] = "Safety timer expire", [POWER_SUPPLY_HEALTH_OVERCURRENT] = "Over current", [POWER_SUPPLY_HEALTH_CALIBRATION_REQUIRED] = "Calibration required", + [POWER_SUPPLY_HEALTH_WARM] = "Warm", + [POWER_SUPPLY_HEALTH_COOL] = "Cool", + [POWER_SUPPLY_HEALTH_HOT] = "Hot", }; static const char * const POWER_SUPPLY_TECHNOLOGY_TEXT[] = { @@ -343,7 +347,7 @@ static umode_t power_supply_attr_is_visible(struct kobject *kobj, struct attribute *attr, int attrno) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct power_supply *psy = dev_get_drvdata(dev); umode_t mode = S_IRUSR | S_IRGRP | S_IROTH; int i; diff --git a/drivers/power/supply/rt5033_battery.c b/drivers/power/supply/rt5033_battery.c index d8667a9fc49b..f330452341f0 100644 --- a/drivers/power/supply/rt5033_battery.c +++ b/drivers/power/supply/rt5033_battery.c @@ -125,7 +125,7 @@ static int rt5033_battery_probe(struct i2c_client *client, battery = devm_kzalloc(&client->dev, sizeof(*battery), GFP_KERNEL); if (!battery) - return -EINVAL; + return -ENOMEM; battery->client = client; battery->regmap = devm_regmap_init_i2c(client, diff --git a/drivers/power/supply/sbs-battery.c b/drivers/power/supply/sbs-battery.c index 83b9924033bd..49c3508a6b79 100644 --- a/drivers/power/supply/sbs-battery.c +++ b/drivers/power/supply/sbs-battery.c @@ -51,6 +51,14 @@ enum { REG_CHARGE_VOLTAGE, }; +#define REG_ADDR_SPEC_INFO 0x1A +#define SPEC_INFO_VERSION_MASK GENMASK(7, 4) +#define SPEC_INFO_VERSION_SHIFT 4 + +#define SBS_VERSION_1_0 1 +#define SBS_VERSION_1_1 2 +#define SBS_VERSION_1_1_WITH_PEC 3 + #define REG_ADDR_MANUFACTURE_DATE 0x1B /* Battery Mode defines */ @@ -224,14 +232,57 @@ exit: static int sbs_update_presence(struct sbs_info *chip, bool is_present) { + struct i2c_client *client = chip->client; + int retries = chip->i2c_retry_count; + s32 ret = 0; + u8 version; + if (chip->is_present == is_present) return 0; if (!is_present) { chip->is_present = false; + /* Disable PEC when no device is present */ + client->flags &= ~I2C_CLIENT_PEC; return 0; } + /* Check if device supports packet error checking and use it */ + while (retries > 0) { + ret = i2c_smbus_read_word_data(client, REG_ADDR_SPEC_INFO); + if (ret >= 0) + break; + + /* + * Some batteries trigger the detection pin before the + * I2C bus is properly connected. This works around the + * issue. + */ + msleep(100); + + retries--; + } + + if (ret < 0) { + dev_dbg(&client->dev, "failed to read spec info: %d\n", ret); + + /* fallback to old behaviour */ + client->flags &= ~I2C_CLIENT_PEC; + chip->is_present = true; + + return ret; + } + + version = (ret & SPEC_INFO_VERSION_MASK) >> SPEC_INFO_VERSION_SHIFT; + + if (version == SBS_VERSION_1_1_WITH_PEC) + client->flags |= I2C_CLIENT_PEC; + else + client->flags &= ~I2C_CLIENT_PEC; + + dev_dbg(&client->dev, "PEC: %s\n", (client->flags & I2C_CLIENT_PEC) ? + "enabled" : "disabled"); + if (!chip->is_present && is_present && !chip->charger_broadcasts) sbs_disable_charger_broadcasts(chip); @@ -263,8 +314,7 @@ static int sbs_read_word_data(struct i2c_client *client, u8 address) return ret; } -static int sbs_read_string_data(struct i2c_client *client, u8 address, - char *values) +static int sbs_read_string_data_fallback(struct i2c_client *client, u8 address, char *values) { struct sbs_info *chip = i2c_get_clientdata(client); s32 ret = 0, block_length = 0; @@ -274,6 +324,9 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address, retries_length = chip->i2c_retry_count; retries_block = chip->i2c_retry_count; + dev_warn_once(&client->dev, "I2C adapter does not support I2C_FUNC_SMBUS_READ_BLOCK_DATA.\n" + "Fallback method does not support PEC.\n"); + /* Adapter needs to support these two functions */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | @@ -329,6 +382,38 @@ static int sbs_read_string_data(struct i2c_client *client, u8 address, return ret; } +static int sbs_read_string_data(struct i2c_client *client, u8 address, char *values) +{ + struct sbs_info *chip = i2c_get_clientdata(client); + int retries = chip->i2c_retry_count; + int ret = 0; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BLOCK_DATA)) { + bool pec = client->flags & I2C_CLIENT_PEC; + client->flags &= ~I2C_CLIENT_PEC; + ret = sbs_read_string_data_fallback(client, address, values); + if (pec) + client->flags |= I2C_CLIENT_PEC; + return ret; + } + + while (retries > 0) { + ret = i2c_smbus_read_block_data(client, address, values); + if (ret >= 0) + break; + retries--; + } + + if (ret < 0) { + dev_dbg(&client->dev, "failed to read block 0x%x: %d\n", address, ret); + return ret; + } + + /* add string termination */ + values[ret] = '\0'; + return ret; +} + static int sbs_write_word_data(struct i2c_client *client, u8 address, u16 value) { diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index be42e814ea34..9c627618c224 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -5,6 +5,7 @@ #include <linux/iio/consumer.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/math64.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/of.h> @@ -133,14 +134,14 @@ static const char * const sc27xx_charger_supply_name[] = { "sc2723_charger", }; -static int sc27xx_fgu_adc_to_current(struct sc27xx_fgu_data *data, int adc) +static int sc27xx_fgu_adc_to_current(struct sc27xx_fgu_data *data, s64 adc) { - return DIV_ROUND_CLOSEST(adc * 1000, data->cur_1000ma_adc); + return DIV_S64_ROUND_CLOSEST(adc * 1000, data->cur_1000ma_adc); } -static int sc27xx_fgu_adc_to_voltage(struct sc27xx_fgu_data *data, int adc) +static int sc27xx_fgu_adc_to_voltage(struct sc27xx_fgu_data *data, s64 adc) { - return DIV_ROUND_CLOSEST(adc * 1000, data->vol_1000mv_adc); + return DIV_S64_ROUND_CLOSEST(adc * 1000, data->vol_1000mv_adc); } static int sc27xx_fgu_voltage_to_adc(struct sc27xx_fgu_data *data, int vol) diff --git a/drivers/power/supply/test_power.c b/drivers/power/supply/test_power.c index b3c05ff05783..04acd76bbaa1 100644 --- a/drivers/power/supply/test_power.c +++ b/drivers/power/supply/test_power.c @@ -34,7 +34,7 @@ static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION; static int battery_capacity = 50; static int battery_voltage = 3300; static int battery_charge_counter = -1000; -static int battery_current = 1600; +static int battery_current = -1600; static bool module_initialized; diff --git a/drivers/power/supply/wilco-charger.c b/drivers/power/supply/wilco-charger.c index b3c6d7cdd731..98ade073ef05 100644 --- a/drivers/power/supply/wilco-charger.c +++ b/drivers/power/supply/wilco-charger.c @@ -27,6 +27,7 @@ enum charge_mode { CHARGE_MODE_AC = 3, /* Mostly AC use, used for Trickle */ CHARGE_MODE_AUTO = 4, /* Used for Adaptive */ CHARGE_MODE_CUSTOM = 5, /* Used for Custom */ + CHARGE_MODE_LONGLIFE = 6, /* Used for Long Life */ }; #define CHARGE_LOWER_LIMIT_MIN 50 @@ -48,6 +49,8 @@ static int psp_val_to_charge_mode(int psp_val) return CHARGE_MODE_AUTO; case POWER_SUPPLY_CHARGE_TYPE_CUSTOM: return CHARGE_MODE_CUSTOM; + case POWER_SUPPLY_CHARGE_TYPE_LONGLIFE: + return CHARGE_MODE_LONGLIFE; default: return -EINVAL; } @@ -67,6 +70,8 @@ static int charge_mode_to_psp_val(enum charge_mode mode) return POWER_SUPPLY_CHARGE_TYPE_ADAPTIVE; case CHARGE_MODE_CUSTOM: return POWER_SUPPLY_CHARGE_TYPE_CUSTOM; + case CHARGE_MODE_LONGLIFE: + return POWER_SUPPLY_CHARGE_TYPE_LONGLIFE; default: return -EINVAL; } diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 004b2ea9b5fd..276e939a5684 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -510,12 +510,12 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, last->period > s2.period && last->period <= state->period) dev_warn(chip->dev, - ".apply didn't pick the best available period (requested: %u, applied: %u, possible: %u)\n", + ".apply didn't pick the best available period (requested: %llu, applied: %llu, possible: %llu)\n", state->period, s2.period, last->period); if (state->enabled && state->period < s2.period) dev_warn(chip->dev, - ".apply is supposed to round down period (requested: %u, applied: %u)\n", + ".apply is supposed to round down period (requested: %llu, applied: %llu)\n", state->period, s2.period); if (state->enabled && @@ -524,14 +524,14 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, last->duty_cycle > s2.duty_cycle && last->duty_cycle <= state->duty_cycle) dev_warn(chip->dev, - ".apply didn't pick the best available duty cycle (requested: %u/%u, applied: %u/%u, possible: %u/%u)\n", + ".apply didn't pick the best available duty cycle (requested: %llu/%llu, applied: %llu/%llu, possible: %llu/%llu)\n", state->duty_cycle, state->period, s2.duty_cycle, s2.period, last->duty_cycle, last->period); if (state->enabled && state->duty_cycle < s2.duty_cycle) dev_warn(chip->dev, - ".apply is supposed to round down duty_cycle (requested: %u/%u, applied: %u/%u)\n", + ".apply is supposed to round down duty_cycle (requested: %llu/%llu, applied: %llu/%llu)\n", state->duty_cycle, state->period, s2.duty_cycle, s2.period); @@ -558,7 +558,7 @@ static void pwm_apply_state_debug(struct pwm_device *pwm, (s1.enabled && s1.period != last->period) || (s1.enabled && s1.duty_cycle != last->duty_cycle)) { dev_err(chip->dev, - ".apply is not idempotent (ena=%d pol=%d %u/%u) -> (ena=%d pol=%d %u/%u)\n", + ".apply is not idempotent (ena=%d pol=%d %llu/%llu) -> (ena=%d pol=%d %llu/%llu)\n", s1.enabled, s1.polarity, s1.duty_cycle, s1.period, last->enabled, last->polarity, last->duty_cycle, last->period); @@ -1284,8 +1284,8 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s) if (state.enabled) seq_puts(s, " enabled"); - seq_printf(s, " period: %u ns", state.period); - seq_printf(s, " duty: %u ns", state.duty_cycle); + seq_printf(s, " period: %llu ns", state.period); + seq_printf(s, " duty: %llu ns", state.duty_cycle); seq_printf(s, " polarity: %s", state.polarity ? "inverse" : "normal"); diff --git a/drivers/pwm/pwm-bcm-iproc.c b/drivers/pwm/pwm-bcm-iproc.c index 1f829edd8ee7..79b1e58e946d 100644 --- a/drivers/pwm/pwm-bcm-iproc.c +++ b/drivers/pwm/pwm-bcm-iproc.c @@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, u64 tmp, multi, rate; u32 value, prescale; - rate = clk_get_rate(ip->clk); - value = readl(ip->base + IPROC_PWM_CTRL_OFFSET); if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm))) @@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm, else state->polarity = PWM_POLARITY_INVERSED; + rate = clk_get_rate(ip->clk); + if (rate == 0) { + state->period = 0; + state->duty_cycle = 0; + return; + } + value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET); prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm); prescale &= IPROC_PWM_PRESCALE_MAX; @@ -143,8 +148,7 @@ static int iproc_pwmc_apply(struct pwm_chip *chip, struct pwm_device *pwm, value = rate * state->duty_cycle; duty = div64_u64(value, div); - if (period < IPROC_PWM_PERIOD_MIN || - duty < IPROC_PWM_DUTY_CYCLE_MIN) + if (period < IPROC_PWM_PERIOD_MIN) return -EINVAL; if (period <= IPROC_PWM_PERIOD_MAX && diff --git a/drivers/pwm/pwm-bcm-kona.c b/drivers/pwm/pwm-bcm-kona.c index 81da91df2529..16c5898b934a 100644 --- a/drivers/pwm/pwm-bcm-kona.c +++ b/drivers/pwm/pwm-bcm-kona.c @@ -138,7 +138,7 @@ static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm, dc = div64_u64(val, div); /* If duty_ns or period_ns are not achievable then return */ - if (pc < PERIOD_COUNT_MIN || dc < DUTY_CYCLE_HIGH_MIN) + if (pc < PERIOD_COUNT_MIN) return -EINVAL; /* If pc and dc are in bounds, the calculation is done */ diff --git a/drivers/pwm/pwm-clps711x.c b/drivers/pwm/pwm-clps711x.c index 924d39a797cf..ba9500aca078 100644 --- a/drivers/pwm/pwm-clps711x.c +++ b/drivers/pwm/pwm-clps711x.c @@ -43,7 +43,7 @@ static void clps711x_pwm_update_val(struct clps711x_chip *priv, u32 n, u32 v) static unsigned int clps711x_get_duty(struct pwm_device *pwm, unsigned int v) { /* Duty cycle 0..15 max */ - return DIV_ROUND_CLOSEST(v * 0xf, pwm->args.period); + return DIV64_U64_ROUND_CLOSEST(v * 0xf, pwm->args.period); } static int clps711x_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) diff --git a/drivers/pwm/pwm-imx-tpm.c b/drivers/pwm/pwm-imx-tpm.c index 5f3d7f7e6aef..fcdf6befb838 100644 --- a/drivers/pwm/pwm-imx-tpm.c +++ b/drivers/pwm/pwm-imx-tpm.c @@ -124,7 +124,7 @@ static int pwm_imx_tpm_round_state(struct pwm_chip *chip, real_state->duty_cycle = state->duty_cycle; tmp = (u64)p->mod * real_state->duty_cycle; - p->val = DIV_ROUND_CLOSEST_ULL(tmp, real_state->period); + p->val = DIV64_U64_ROUND_CLOSEST(tmp, real_state->period); real_state->polarity = state->polarity; real_state->enabled = state->enabled; diff --git a/drivers/pwm/pwm-imx27.c b/drivers/pwm/pwm-imx27.c index 732a6f3701e8..c50d453552bd 100644 --- a/drivers/pwm/pwm-imx27.c +++ b/drivers/pwm/pwm-imx27.c @@ -202,7 +202,7 @@ static void pwm_imx27_wait_fifo_slot(struct pwm_chip *chip, sr = readl(imx->mmio_base + MX3_PWMSR); fifoav = FIELD_GET(MX3_PWMSR_FIFOAV, sr); if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) { - period_ms = DIV_ROUND_UP(pwm_get_period(pwm), + period_ms = DIV_ROUND_UP_ULL(pwm_get_period(pwm), NSEC_PER_MSEC); msleep(period_ms); diff --git a/drivers/pwm/pwm-iqs620a.c b/drivers/pwm/pwm-iqs620a.c index 674f0e238ba0..7d33e3646436 100644 --- a/drivers/pwm/pwm-iqs620a.c +++ b/drivers/pwm/pwm-iqs620a.c @@ -25,10 +25,10 @@ #include <linux/regmap.h> #include <linux/slab.h> -#define IQS620_PWR_SETTINGS 0xD2 +#define IQS620_PWR_SETTINGS 0xd2 #define IQS620_PWR_SETTINGS_PWM_OUT BIT(7) -#define IQS620_PWM_DUTY_CYCLE 0xD8 +#define IQS620_PWM_DUTY_CYCLE 0xd8 #define IQS620_PWM_PERIOD_NS 1000000 @@ -46,7 +46,8 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, { struct iqs620_pwm_private *iqs620_pwm; struct iqs62x_core *iqs62x; - int duty_scale, ret; + u64 duty_scale; + int ret; if (state->polarity != PWM_POLARITY_NORMAL) return -ENOTSUPP; @@ -69,7 +70,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, * For lower duty cycles (e.g. 0), the PWM output is simply disabled to * allow an external pull-down resistor to hold the GPIO3/LTX pin low. */ - duty_scale = state->duty_cycle * 256 / IQS620_PWM_PERIOD_NS; + duty_scale = div_u64(state->duty_cycle * 256, IQS620_PWM_PERIOD_NS); mutex_lock(&iqs620_pwm->lock); @@ -81,7 +82,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, } if (duty_scale) { - u8 duty_val = min(duty_scale - 1, 0xFF); + u8 duty_val = min_t(u64, duty_scale - 1, 0xff); ret = regmap_write(iqs62x->regmap, IQS620_PWM_DUTY_CYCLE, duty_val); @@ -93,7 +94,7 @@ static int iqs620_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, if (state->enabled && duty_scale) { ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, - IQS620_PWR_SETTINGS_PWM_OUT, 0xFF); + IQS620_PWR_SETTINGS_PWM_OUT, 0xff); if (ret) goto err_mutex; } @@ -159,7 +160,7 @@ static int iqs620_pwm_notifier(struct notifier_block *notifier, ret = regmap_update_bits(iqs62x->regmap, IQS620_PWR_SETTINGS, IQS620_PWR_SETTINGS_PWM_OUT, - iqs620_pwm->out_en ? 0xFF : 0); + iqs620_pwm->out_en ? 0xff : 0); err_mutex: mutex_unlock(&iqs620_pwm->lock); diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index b94e0d09c300..ab001ce55178 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -46,6 +46,7 @@ struct pwm_mediatek_of_data { * @clk_main: the clock used by PWM core * @clk_pwms: the clock used by each PWM channel * @clk_freq: the fix clock frequency of legacy MIPS SoC + * @soc: pointer to chip's platform data */ struct pwm_mediatek_chip { struct pwm_chip chip; diff --git a/drivers/pwm/pwm-omap-dmtimer.c b/drivers/pwm/pwm-omap-dmtimer.c index 0d31833db2e2..358db4ff9d4f 100644 --- a/drivers/pwm/pwm-omap-dmtimer.c +++ b/drivers/pwm/pwm-omap-dmtimer.c @@ -14,7 +14,7 @@ * with a timer counter that goes up. When it overflows it gets * reloaded with the load value and the pwm output goes up. * When counter matches with match register, the output goes down. - * Reference Manual: http://www.ti.com/lit/ug/spruh73q/spruh73q.pdf + * Reference Manual: https://www.ti.com/lit/ug/spruh73q/spruh73q.pdf * * Limitations: * - When PWM is stopped, timer counter gets stopped immediately. This @@ -58,7 +58,7 @@ * @mutex: Mutex to protect pwm apply state * @dm_timer: Pointer to omap dm timer. * @pdata: Pointer to omap dm timer ops. - * dm_timer_pdev: Pointer to omap dm timer platform device + * @dm_timer_pdev: Pointer to omap dm timer platform device */ struct pwm_omap_dmtimer_chip { struct pwm_chip chip; diff --git a/drivers/pwm/pwm-sifive.c b/drivers/pwm/pwm-sifive.c index cc63f9baa481..62de0bb85921 100644 --- a/drivers/pwm/pwm-sifive.c +++ b/drivers/pwm/pwm-sifive.c @@ -181,7 +181,7 @@ static int pwm_sifive_apply(struct pwm_chip *chip, struct pwm_device *pwm, * consecutively */ num = (u64)duty_cycle * (1U << PWM_SIFIVE_CMPWIDTH); - frac = DIV_ROUND_CLOSEST_ULL(num, state->period); + frac = DIV64_U64_ROUND_CLOSEST(num, state->period); /* The hardware cannot generate a 100% duty cycle */ frac = min(frac, (1U << PWM_SIFIVE_CMPWIDTH) - 1); diff --git a/drivers/pwm/pwm-stm32-lp.c b/drivers/pwm/pwm-stm32-lp.c index 67fca62524dc..134c14621ee0 100644 --- a/drivers/pwm/pwm-stm32-lp.c +++ b/drivers/pwm/pwm-stm32-lp.c @@ -61,7 +61,7 @@ static int stm32_pwm_lp_apply(struct pwm_chip *chip, struct pwm_device *pwm, do_div(div, NSEC_PER_SEC); if (!div) { /* Clock is too slow to achieve requested period. */ - dev_dbg(priv->chip.dev, "Can't reach %u ns\n", state->period); + dev_dbg(priv->chip.dev, "Can't reach %llu ns\n", state->period); return -EINVAL; } diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 18fbbe3277d0..961c59c99bb3 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -285,7 +285,7 @@ static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, val = (duty & PWM_DTY_MASK) | PWM_PRD(period); sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm)); sun4i_pwm->next_period[pwm->hwpwm] = jiffies + - usecs_to_jiffies(cstate.period / 1000 + 1); + nsecs_to_jiffies(cstate.period + 1000); if (state->polarity != PWM_POLARITY_NORMAL) ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm); diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index ab38c8203b79..683804c7d26c 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -2,7 +2,7 @@ /* * ECAP PWM driver * - * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments, Inc. - https://www.ti.com/ */ #include <linux/module.h> diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c index 7b4c770ce9d6..0846917ff2d2 100644 --- a/drivers/pwm/pwm-tiehrpwm.c +++ b/drivers/pwm/pwm-tiehrpwm.c @@ -2,7 +2,7 @@ /* * EHRPWM PWM driver * - * Copyright (C) 2012 Texas Instruments, Inc. - http://www.ti.com/ + * Copyright (C) 2012 Texas Instruments, Inc. - https://www.ti.com/ */ #include <linux/module.h> diff --git a/drivers/pwm/sysfs.c b/drivers/pwm/sysfs.c index 2389b8669846..449dbc0f49ed 100644 --- a/drivers/pwm/sysfs.c +++ b/drivers/pwm/sysfs.c @@ -42,7 +42,7 @@ static ssize_t period_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.period); + return sprintf(buf, "%llu\n", state.period); } static ssize_t period_store(struct device *child, @@ -52,10 +52,10 @@ static ssize_t period_store(struct device *child, struct pwm_export *export = child_to_pwm_export(child); struct pwm_device *pwm = export->pwm; struct pwm_state state; - unsigned int val; + u64 val; int ret; - ret = kstrtouint(buf, 0, &val); + ret = kstrtou64(buf, 0, &val); if (ret) return ret; @@ -77,7 +77,7 @@ static ssize_t duty_cycle_show(struct device *child, pwm_get_state(pwm, &state); - return sprintf(buf, "%u\n", state.duty_cycle); + return sprintf(buf, "%llu\n", state.duty_cycle); } static ssize_t duty_cycle_store(struct device *child, diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 451608e960a1..c07ceec3c6d4 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -981,7 +981,7 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) if (unlikely(copy_from_user(transfer, (void __user *)(uintptr_t)transaction.block, - transaction.count * sizeof(*transfer)))) { + array_size(sizeof(*transfer), transaction.count)))) { ret = -EFAULT; goto out_free; } @@ -994,7 +994,7 @@ static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg) if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block, transfer, - transaction.count * sizeof(*transfer)))) + array_size(sizeof(*transfer), transaction.count)))) ret = -EFAULT; out_free: @@ -1710,8 +1710,7 @@ static int rio_mport_add_riodev(struct mport_cdev_priv *priv, if (rval & RIO_PEF_SWITCH) { rio_mport_read_config_32(mport, destid, hopcount, RIO_SWP_INFO_CAR, &swpinfo); - size += (RIO_GET_TOTAL_PORTS(swpinfo) * - sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); + size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); } rdev = kzalloc(size, GFP_KERNEL); diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c index eb8ed28533f8..19b0c33f4a62 100644 --- a/drivers/rapidio/rio-scan.c +++ b/drivers/rapidio/rio-scan.c @@ -330,7 +330,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, size_t size; u32 swpinfo = 0; - size = sizeof(struct rio_dev); + size = sizeof(*rdev); if (rio_mport_read_config_32(port, destid, hopcount, RIO_PEF_CAR, &result)) return NULL; @@ -338,10 +338,8 @@ static struct rio_dev *rio_setup_device(struct rio_net *net, if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { rio_mport_read_config_32(port, destid, hopcount, RIO_SWP_INFO_CAR, &swpinfo); - if (result & RIO_PEF_SWITCH) { - size += (RIO_GET_TOTAL_PORTS(swpinfo) * - sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); - } + if (result & RIO_PEF_SWITCH) + size += struct_size(rswitch, nextdev, RIO_GET_TOTAL_PORTS(swpinfo)); } rdev = kzalloc(size, GFP_KERNEL); diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index c4d1731295eb..c6659dfea7c7 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig @@ -14,6 +14,15 @@ config REMOTEPROC if REMOTEPROC +config REMOTEPROC_CDEV + bool "Remoteproc character device interface" + help + Say y here to have a character device interface for the remoteproc + framework. Userspace can boot/shutdown remote processors through + this interface. + + It's safe to say N if you don't want to use this interface. + config IMX_REMOTEPROC tristate "IMX6/7 remoteproc support" depends on ARCH_MXC @@ -116,6 +125,9 @@ config KEYSTONE_REMOTEPROC It's safe to say N here if you're not interested in the Keystone DSPs or just want to use a bare minimum kernel. +config QCOM_PIL_INFO + tristate + config QCOM_RPROC_COMMON tristate @@ -132,6 +144,7 @@ config QCOM_Q6V5_ADSP depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n depends on QCOM_SYSMON || QCOM_SYSMON=n select MFD_SYSCON + select QCOM_PIL_INFO select QCOM_MDT_LOADER select QCOM_Q6V5_COMMON select QCOM_RPROC_COMMON @@ -148,8 +161,8 @@ config QCOM_Q6V5_MSS depends on QCOM_SYSMON || QCOM_SYSMON=n select MFD_SYSCON select QCOM_MDT_LOADER + select QCOM_PIL_INFO select QCOM_Q6V5_COMMON - select QCOM_Q6V5_IPA_NOTIFY select QCOM_RPROC_COMMON select QCOM_SCM help @@ -164,6 +177,7 @@ config QCOM_Q6V5_PAS depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n depends on QCOM_SYSMON || QCOM_SYSMON=n select MFD_SYSCON + select QCOM_PIL_INFO select QCOM_MDT_LOADER select QCOM_Q6V5_COMMON select QCOM_RPROC_COMMON @@ -182,6 +196,7 @@ config QCOM_Q6V5_WCSS depends on QCOM_SYSMON || QCOM_SYSMON=n select MFD_SYSCON select QCOM_MDT_LOADER + select QCOM_PIL_INFO select QCOM_Q6V5_COMMON select QCOM_RPROC_COMMON select QCOM_SCM @@ -189,9 +204,6 @@ config QCOM_Q6V5_WCSS Say y here to support the Qualcomm Peripheral Image Loader for the Hexagon V5 based WCSS remote processors. -config QCOM_Q6V5_IPA_NOTIFY - tristate - config QCOM_SYSMON tristate "Qualcomm sysmon driver" depends on RPMSG @@ -215,6 +227,7 @@ config QCOM_WCNSS_PIL depends on QCOM_SMEM depends on QCOM_SYSMON || QCOM_SYSMON=n select QCOM_MDT_LOADER + select QCOM_PIL_INFO select QCOM_RPROC_COMMON select QCOM_SCM help @@ -249,6 +262,19 @@ config STM32_RPROC This can be either built-in or a loadable module. +config TI_K3_DSP_REMOTEPROC + tristate "TI K3 DSP remoteproc support" + depends on ARCH_K3 + select MAILBOX + select OMAP2PLUS_MBOX + help + Say m here to support TI's C66x and C71x DSP remote processor + subsystems on various TI K3 family of SoCs through the remote + processor framework. + + It's safe to say N here if you're not interested in utilizing + the DSP slave processors. + endif # REMOTEPROC endmenu diff --git a/drivers/remoteproc/Makefile b/drivers/remoteproc/Makefile index e8b886e511f0..3dfa28e6c701 100644 --- a/drivers/remoteproc/Makefile +++ b/drivers/remoteproc/Makefile @@ -5,10 +5,12 @@ obj-$(CONFIG_REMOTEPROC) += remoteproc.o remoteproc-y := remoteproc_core.o +remoteproc-y += remoteproc_coredump.o remoteproc-y += remoteproc_debugfs.o remoteproc-y += remoteproc_sysfs.o remoteproc-y += remoteproc_virtio.o remoteproc-y += remoteproc_elf_loader.o +obj-$(CONFIG_REMOTEPROC_CDEV) += remoteproc_cdev.o obj-$(CONFIG_IMX_REMOTEPROC) += imx_rproc.o obj-$(CONFIG_INGENIC_VPU_RPROC) += ingenic_rproc.o obj-$(CONFIG_MTK_SCP) += mtk_scp.o mtk_scp_ipi.o @@ -16,13 +18,13 @@ obj-$(CONFIG_OMAP_REMOTEPROC) += omap_remoteproc.o obj-$(CONFIG_WKUP_M3_RPROC) += wkup_m3_rproc.o obj-$(CONFIG_DA8XX_REMOTEPROC) += da8xx_remoteproc.o obj-$(CONFIG_KEYSTONE_REMOTEPROC) += keystone_remoteproc.o +obj-$(CONFIG_QCOM_PIL_INFO) += qcom_pil_info.o obj-$(CONFIG_QCOM_RPROC_COMMON) += qcom_common.o obj-$(CONFIG_QCOM_Q6V5_COMMON) += qcom_q6v5.o obj-$(CONFIG_QCOM_Q6V5_ADSP) += qcom_q6v5_adsp.o obj-$(CONFIG_QCOM_Q6V5_MSS) += qcom_q6v5_mss.o obj-$(CONFIG_QCOM_Q6V5_PAS) += qcom_q6v5_pas.o obj-$(CONFIG_QCOM_Q6V5_WCSS) += qcom_q6v5_wcss.o -obj-$(CONFIG_QCOM_Q6V5_IPA_NOTIFY) += qcom_q6v5_ipa_notify.o obj-$(CONFIG_QCOM_SYSMON) += qcom_sysmon.o obj-$(CONFIG_QCOM_WCNSS_PIL) += qcom_wcnss_pil.o qcom_wcnss_pil-y += qcom_wcnss.o @@ -30,3 +32,4 @@ qcom_wcnss_pil-y += qcom_wcnss_iris.o obj-$(CONFIG_ST_REMOTEPROC) += st_remoteproc.o obj-$(CONFIG_ST_SLIM_REMOTEPROC) += st_slim_rproc.o obj-$(CONFIG_STM32_RPROC) += stm32_rproc.o +obj-$(CONFIG_TI_K3_DSP_REMOTEPROC) += ti_k3_dsp_remoteproc.o diff --git a/drivers/remoteproc/ingenic_rproc.c b/drivers/remoteproc/ingenic_rproc.c index 189020d77b25..1c2b21a5d178 100644 --- a/drivers/remoteproc/ingenic_rproc.c +++ b/drivers/remoteproc/ingenic_rproc.c @@ -11,7 +11,6 @@ #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> -#include <linux/pm_runtime.h> #include <linux/remoteproc.h> #include "remoteproc_internal.h" @@ -62,6 +61,28 @@ struct vpu { struct device *dev; }; +static int ingenic_rproc_prepare(struct rproc *rproc) +{ + struct vpu *vpu = rproc->priv; + int ret; + + /* The clocks must be enabled for the firmware to be loaded in TCSM */ + ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks); + if (ret) + dev_err(vpu->dev, "Unable to start clocks: %d\n", ret); + + return ret; +} + +static int ingenic_rproc_unprepare(struct rproc *rproc) +{ + struct vpu *vpu = rproc->priv; + + clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks); + + return 0; +} + static int ingenic_rproc_start(struct rproc *rproc) { struct vpu *vpu = rproc->priv; @@ -115,6 +136,8 @@ static void *ingenic_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) } static struct rproc_ops ingenic_rproc_ops = { + .prepare = ingenic_rproc_prepare, + .unprepare = ingenic_rproc_unprepare, .start = ingenic_rproc_start, .stop = ingenic_rproc_stop, .kick = ingenic_rproc_kick, @@ -135,16 +158,6 @@ static irqreturn_t vpu_interrupt(int irq, void *data) return rproc_vq_interrupt(rproc, vring); } -static void ingenic_rproc_disable_clks(void *data) -{ - struct vpu *vpu = data; - - pm_runtime_resume(vpu->dev); - pm_runtime_disable(vpu->dev); - - clk_bulk_disable_unprepare(ARRAY_SIZE(vpu->clks), vpu->clks); -} - static int ingenic_rproc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -206,35 +219,13 @@ static int ingenic_rproc_probe(struct platform_device *pdev) disable_irq(vpu->irq); - /* The clocks must be enabled for the firmware to be loaded in TCSM */ - ret = clk_bulk_prepare_enable(ARRAY_SIZE(vpu->clks), vpu->clks); - if (ret) { - dev_err(dev, "Unable to start clocks\n"); - return ret; - } - - pm_runtime_irq_safe(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - pm_runtime_get_sync(dev); - pm_runtime_use_autosuspend(dev); - - ret = devm_add_action_or_reset(dev, ingenic_rproc_disable_clks, vpu); - if (ret) { - dev_err(dev, "Unable to register action\n"); - goto out_pm_put; - } - ret = devm_rproc_add(dev, rproc); if (ret) { dev_err(dev, "Failed to register remote processor\n"); - goto out_pm_put; + return ret; } -out_pm_put: - pm_runtime_put_autosuspend(dev); - - return ret; + return 0; } static const struct of_device_id ingenic_rproc_of_matches[] = { @@ -243,33 +234,10 @@ static const struct of_device_id ingenic_rproc_of_matches[] = { }; MODULE_DEVICE_TABLE(of, ingenic_rproc_of_matches); -static int __maybe_unused ingenic_rproc_suspend(struct device *dev) -{ - struct vpu *vpu = dev_get_drvdata(dev); - - clk_bulk_disable(ARRAY_SIZE(vpu->clks), vpu->clks); - - return 0; -} - -static int __maybe_unused ingenic_rproc_resume(struct device *dev) -{ - struct vpu *vpu = dev_get_drvdata(dev); - - return clk_bulk_enable(ARRAY_SIZE(vpu->clks), vpu->clks); -} - -static const struct dev_pm_ops __maybe_unused ingenic_rproc_pm = { - SET_RUNTIME_PM_OPS(ingenic_rproc_suspend, ingenic_rproc_resume, NULL) -}; - static struct platform_driver ingenic_rproc_driver = { .probe = ingenic_rproc_probe, .driver = { .name = "ingenic-vpu", -#ifdef CONFIG_PM - .pm = &ingenic_rproc_pm, -#endif .of_match_table = ingenic_rproc_of_matches, }, }; diff --git a/drivers/remoteproc/qcom_common.c b/drivers/remoteproc/qcom_common.c index 9028cea2d81e..085fd73fa23a 100644 --- a/drivers/remoteproc/qcom_common.c +++ b/drivers/remoteproc/qcom_common.c @@ -12,8 +12,10 @@ #include <linux/module.h> #include <linux/notifier.h> #include <linux/remoteproc.h> +#include <linux/remoteproc/qcom_rproc.h> #include <linux/rpmsg/qcom_glink.h> #include <linux/rpmsg/qcom_smd.h> +#include <linux/slab.h> #include <linux/soc/qcom/mdt_loader.h> #include "remoteproc_internal.h" @@ -23,7 +25,14 @@ #define to_smd_subdev(d) container_of(d, struct qcom_rproc_subdev, subdev) #define to_ssr_subdev(d) container_of(d, struct qcom_rproc_ssr, subdev) -static BLOCKING_NOTIFIER_HEAD(ssr_notifiers); +struct qcom_ssr_subsystem { + const char *name; + struct srcu_notifier_head notifier_list; + struct list_head list; +}; + +static LIST_HEAD(qcom_ssr_subsystem_list); +static DEFINE_MUTEX(qcom_ssr_subsys_lock); static int glink_subdev_start(struct rproc_subdev *subdev) { @@ -189,37 +198,122 @@ void qcom_remove_smd_subdev(struct rproc *rproc, struct qcom_rproc_subdev *smd) } EXPORT_SYMBOL_GPL(qcom_remove_smd_subdev); +static struct qcom_ssr_subsystem *qcom_ssr_get_subsys(const char *name) +{ + struct qcom_ssr_subsystem *info; + + mutex_lock(&qcom_ssr_subsys_lock); + /* Match in the global qcom_ssr_subsystem_list with name */ + list_for_each_entry(info, &qcom_ssr_subsystem_list, list) + if (!strcmp(info->name, name)) + goto out; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + info = ERR_PTR(-ENOMEM); + goto out; + } + info->name = kstrdup_const(name, GFP_KERNEL); + srcu_init_notifier_head(&info->notifier_list); + + /* Add to global notification list */ + list_add_tail(&info->list, &qcom_ssr_subsystem_list); + +out: + mutex_unlock(&qcom_ssr_subsys_lock); + return info; +} + /** * qcom_register_ssr_notifier() - register SSR notification handler - * @nb: notifier_block to notify for restart notifications + * @name: Subsystem's SSR name + * @nb: notifier_block to be invoked upon subsystem's state change * - * Returns 0 on success, negative errno on failure. + * This registers the @nb notifier block as part the notifier chain for a + * remoteproc associated with @name. The notifier block's callback + * will be invoked when the remote processor's SSR events occur + * (pre/post startup and pre/post shutdown). * - * This register the @notify function as handler for restart notifications. As - * remote processors are stopped this function will be called, with the SSR - * name passed as a parameter. + * Return: a subsystem cookie on success, ERR_PTR on failure. */ -int qcom_register_ssr_notifier(struct notifier_block *nb) +void *qcom_register_ssr_notifier(const char *name, struct notifier_block *nb) { - return blocking_notifier_chain_register(&ssr_notifiers, nb); + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(name); + if (IS_ERR(info)) + return info; + + srcu_notifier_chain_register(&info->notifier_list, nb); + + return &info->notifier_list; } EXPORT_SYMBOL_GPL(qcom_register_ssr_notifier); /** * qcom_unregister_ssr_notifier() - unregister SSR notification handler + * @notify: subsystem cookie returned from qcom_register_ssr_notifier * @nb: notifier_block to unregister + * + * This function will unregister the notifier from the particular notifier + * chain. + * + * Return: 0 on success, %ENOENT otherwise. */ -void qcom_unregister_ssr_notifier(struct notifier_block *nb) +int qcom_unregister_ssr_notifier(void *notify, struct notifier_block *nb) { - blocking_notifier_chain_unregister(&ssr_notifiers, nb); + return srcu_notifier_chain_unregister(notify, nb); } EXPORT_SYMBOL_GPL(qcom_unregister_ssr_notifier); +static int ssr_notify_prepare(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_POWERUP, &data); + return 0; +} + +static int ssr_notify_start(struct rproc_subdev *subdev) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_POWERUP, &data); + return 0; +} + +static void ssr_notify_stop(struct rproc_subdev *subdev, bool crashed) +{ + struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = crashed, + }; + + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_BEFORE_SHUTDOWN, &data); +} + static void ssr_notify_unprepare(struct rproc_subdev *subdev) { struct qcom_rproc_ssr *ssr = to_ssr_subdev(subdev); + struct qcom_ssr_notify_data data = { + .name = ssr->info->name, + .crashed = false, + }; - blocking_notifier_call_chain(&ssr_notifiers, 0, (void *)ssr->name); + srcu_notifier_call_chain(&ssr->info->notifier_list, + QCOM_SSR_AFTER_SHUTDOWN, &data); } /** @@ -229,12 +323,24 @@ static void ssr_notify_unprepare(struct rproc_subdev *subdev) * @ssr_name: identifier to use for notifications originating from @rproc * * As the @ssr is registered with the @rproc SSR events will be sent to all - * registered listeners in the system as the remoteproc is shut down. + * registered listeners for the remoteproc when it's SSR events occur + * (pre/post startup and pre/post shutdown). */ void qcom_add_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr, const char *ssr_name) { - ssr->name = ssr_name; + struct qcom_ssr_subsystem *info; + + info = qcom_ssr_get_subsys(ssr_name); + if (IS_ERR(info)) { + dev_err(&rproc->dev, "Failed to add ssr subdevice\n"); + return; + } + + ssr->info = info; + ssr->subdev.prepare = ssr_notify_prepare; + ssr->subdev.start = ssr_notify_start; + ssr->subdev.stop = ssr_notify_stop; ssr->subdev.unprepare = ssr_notify_unprepare; rproc_add_subdev(rproc, &ssr->subdev); @@ -249,6 +355,7 @@ EXPORT_SYMBOL_GPL(qcom_add_ssr_subdev); void qcom_remove_ssr_subdev(struct rproc *rproc, struct qcom_rproc_ssr *ssr) { rproc_remove_subdev(rproc, &ssr->subdev); + ssr->info = NULL; } EXPORT_SYMBOL_GPL(qcom_remove_ssr_subdev); diff --git a/drivers/remoteproc/qcom_common.h b/drivers/remoteproc/qcom_common.h index 34e5188187dc..dfc641c3a98b 100644 --- a/drivers/remoteproc/qcom_common.h +++ b/drivers/remoteproc/qcom_common.h @@ -26,10 +26,11 @@ struct qcom_rproc_subdev { struct qcom_smd_edge *edge; }; +struct qcom_ssr_subsystem; + struct qcom_rproc_ssr { struct rproc_subdev subdev; - - const char *name; + struct qcom_ssr_subsystem *info; }; void qcom_add_glink_subdev(struct rproc *rproc, struct qcom_rproc_glink *glink, diff --git a/drivers/remoteproc/qcom_pil_info.c b/drivers/remoteproc/qcom_pil_info.c new file mode 100644 index 000000000000..5521c4437ffa --- /dev/null +++ b/drivers/remoteproc/qcom_pil_info.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2019-2020 Linaro Ltd. + */ +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_address.h> +#include "qcom_pil_info.h" + +/* + * The PIL relocation information region is used to communicate memory regions + * occupied by co-processor firmware for post mortem crash analysis. + * + * It consists of an array of entries with an 8 byte textual identifier of the + * region followed by a 64 bit base address and 32 bit size, both little + * endian. + */ +#define PIL_RELOC_NAME_LEN 8 +#define PIL_RELOC_ENTRY_SIZE (PIL_RELOC_NAME_LEN + sizeof(__le64) + sizeof(__le32)) + +struct pil_reloc { + void __iomem *base; + size_t num_entries; +}; + +static struct pil_reloc _reloc __read_mostly; +static DEFINE_MUTEX(pil_reloc_lock); + +static int qcom_pil_info_init(void) +{ + struct device_node *np; + struct resource imem; + void __iomem *base; + int ret; + + /* Already initialized? */ + if (_reloc.base) + return 0; + + np = of_find_compatible_node(NULL, NULL, "qcom,pil-reloc-info"); + if (!np) + return -ENOENT; + + ret = of_address_to_resource(np, 0, &imem); + of_node_put(np); + if (ret < 0) + return ret; + + base = ioremap(imem.start, resource_size(&imem)); + if (!base) { + pr_err("failed to map PIL relocation info region\n"); + return -ENOMEM; + } + + memset_io(base, 0, resource_size(&imem)); + + _reloc.base = base; + _reloc.num_entries = resource_size(&imem) / PIL_RELOC_ENTRY_SIZE; + + return 0; +} + +/** + * qcom_pil_info_store() - store PIL information of image in IMEM + * @image: name of the image + * @base: base address of the loaded image + * @size: size of the loaded image + * + * Return: 0 on success, negative errno on failure + */ +int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size) +{ + char buf[PIL_RELOC_NAME_LEN]; + void __iomem *entry; + int ret; + int i; + + mutex_lock(&pil_reloc_lock); + ret = qcom_pil_info_init(); + if (ret < 0) { + mutex_unlock(&pil_reloc_lock); + return ret; + } + + for (i = 0; i < _reloc.num_entries; i++) { + entry = _reloc.base + i * PIL_RELOC_ENTRY_SIZE; + + memcpy_fromio(buf, entry, PIL_RELOC_NAME_LEN); + + /* + * An empty record means we didn't find it, given that the + * records are packed. + */ + if (!buf[0]) + goto found_unused; + + if (!strncmp(buf, image, PIL_RELOC_NAME_LEN)) + goto found_existing; + } + + pr_warn("insufficient PIL info slots\n"); + mutex_unlock(&pil_reloc_lock); + return -ENOMEM; + +found_unused: + memcpy_toio(entry, image, PIL_RELOC_NAME_LEN); +found_existing: + /* Use two writel() as base is only aligned to 4 bytes on odd entries */ + writel(base, entry + PIL_RELOC_NAME_LEN); + writel((u64)base >> 32, entry + PIL_RELOC_NAME_LEN + 4); + writel(size, entry + PIL_RELOC_NAME_LEN + sizeof(__le64)); + mutex_unlock(&pil_reloc_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(qcom_pil_info_store); + +static void __exit pil_reloc_exit(void) +{ + mutex_lock(&pil_reloc_lock); + iounmap(_reloc.base); + _reloc.base = NULL; + mutex_unlock(&pil_reloc_lock); +} +module_exit(pil_reloc_exit); + +MODULE_DESCRIPTION("Qualcomm PIL relocation info"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/remoteproc/qcom_pil_info.h b/drivers/remoteproc/qcom_pil_info.h new file mode 100644 index 000000000000..0dce6142935e --- /dev/null +++ b/drivers/remoteproc/qcom_pil_info.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __QCOM_PIL_INFO_H__ +#define __QCOM_PIL_INFO_H__ + +#include <linux/types.h> + +int qcom_pil_info_store(const char *image, phys_addr_t base, size_t size); + +#endif diff --git a/drivers/remoteproc/qcom_q6v5.c b/drivers/remoteproc/qcom_q6v5.c index 111a442c993c..fd6fd36268d9 100644 --- a/drivers/remoteproc/qcom_q6v5.c +++ b/drivers/remoteproc/qcom_q6v5.c @@ -153,6 +153,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5) { int ret; + q6v5->running = false; + qcom_smem_state_update_bits(q6v5->state, BIT(q6v5->stop_bit), BIT(q6v5->stop_bit)); diff --git a/drivers/remoteproc/qcom_q6v5_adsp.c b/drivers/remoteproc/qcom_q6v5_adsp.c index d2a2574dcf35..efb2c1aa80a3 100644 --- a/drivers/remoteproc/qcom_q6v5_adsp.c +++ b/drivers/remoteproc/qcom_q6v5_adsp.c @@ -26,6 +26,7 @@ #include <linux/soc/qcom/smem_state.h> #include "qcom_common.h" +#include "qcom_pil_info.h" #include "qcom_q6v5.h" #include "remoteproc_internal.h" @@ -82,6 +83,7 @@ struct qcom_adsp { unsigned int halt_lpass; int crash_reason_smem; + const char *info_name; struct completion start_done; struct completion stop_done; @@ -164,10 +166,17 @@ reset: static int adsp_load(struct rproc *rproc, const struct firmware *fw) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; + + ret = qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0, + adsp->mem_region, adsp->mem_phys, + adsp->mem_size, &adsp->mem_reloc); + if (ret) + return ret; + + qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size); - return qcom_mdt_load_no_init(adsp->dev, fw, rproc->firmware, 0, - adsp->mem_region, adsp->mem_phys, adsp->mem_size, - &adsp->mem_reloc); + return 0; } static int adsp_start(struct rproc *rproc) @@ -436,6 +445,7 @@ static int adsp_probe(struct platform_device *pdev) adsp = (struct qcom_adsp *)rproc->priv; adsp->dev = &pdev->dev; adsp->rproc = rproc; + adsp->info_name = desc->sysmon_name; platform_set_drvdata(pdev, adsp); ret = adsp_alloc_memory_region(adsp); diff --git a/drivers/remoteproc/qcom_q6v5_ipa_notify.c b/drivers/remoteproc/qcom_q6v5_ipa_notify.c deleted file mode 100644 index e1c10a128bfd..000000000000 --- a/drivers/remoteproc/qcom_q6v5_ipa_notify.c +++ /dev/null @@ -1,85 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 - -/* - * Qualcomm IPA notification subdev support - * - * Copyright (C) 2019 Linaro Ltd. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/remoteproc.h> -#include <linux/remoteproc/qcom_q6v5_ipa_notify.h> - -static void -ipa_notify_common(struct rproc_subdev *subdev, enum qcom_rproc_event event) -{ - struct qcom_rproc_ipa_notify *ipa_notify; - qcom_ipa_notify_t notify; - - ipa_notify = container_of(subdev, struct qcom_rproc_ipa_notify, subdev); - notify = ipa_notify->notify; - if (notify) - notify(ipa_notify->data, event); -} - -static int ipa_notify_prepare(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_STARTING); - - return 0; -} - -static int ipa_notify_start(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_RUNNING); - - return 0; -} - -static void ipa_notify_stop(struct rproc_subdev *subdev, bool crashed) - -{ - ipa_notify_common(subdev, crashed ? MODEM_CRASHED : MODEM_STOPPING); -} - -static void ipa_notify_unprepare(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_OFFLINE); -} - -static void ipa_notify_removing(struct rproc_subdev *subdev) -{ - ipa_notify_common(subdev, MODEM_REMOVING); -} - -/* Register the IPA notification subdevice with the Q6V5 MSS remoteproc */ -void qcom_add_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify) -{ - ipa_notify->notify = NULL; - ipa_notify->data = NULL; - ipa_notify->subdev.prepare = ipa_notify_prepare; - ipa_notify->subdev.start = ipa_notify_start; - ipa_notify->subdev.stop = ipa_notify_stop; - ipa_notify->subdev.unprepare = ipa_notify_unprepare; - - rproc_add_subdev(rproc, &ipa_notify->subdev); -} -EXPORT_SYMBOL_GPL(qcom_add_ipa_notify_subdev); - -/* Remove the IPA notification subdevice */ -void qcom_remove_ipa_notify_subdev(struct rproc *rproc, - struct qcom_rproc_ipa_notify *ipa_notify) -{ - struct rproc_subdev *subdev = &ipa_notify->subdev; - - ipa_notify_removing(subdev); - - rproc_remove_subdev(rproc, subdev); - ipa_notify->notify = NULL; /* Make it obvious */ -} -EXPORT_SYMBOL_GPL(qcom_remove_ipa_notify_subdev); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("Qualcomm IPA notification remoteproc subdev"); diff --git a/drivers/remoteproc/qcom_q6v5_mss.c b/drivers/remoteproc/qcom_q6v5_mss.c index feb70283b6a2..c401bcc263fa 100644 --- a/drivers/remoteproc/qcom_q6v5_mss.c +++ b/drivers/remoteproc/qcom_q6v5_mss.c @@ -9,6 +9,7 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/devcoredump.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/kernel.h> @@ -22,19 +23,22 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/remoteproc.h> -#include "linux/remoteproc/qcom_q6v5_ipa_notify.h" #include <linux/reset.h> #include <linux/soc/qcom/mdt_loader.h> #include <linux/iopoll.h> +#include <linux/slab.h> #include "remoteproc_internal.h" #include "qcom_common.h" +#include "qcom_pil_info.h" #include "qcom_q6v5.h" #include <linux/qcom_scm.h> #define MPSS_CRASH_REASON_SMEM 421 +#define MBA_LOG_SIZE SZ_4K + /* RMB Status Register Values */ #define RMB_PBL_SUCCESS 0x1 @@ -111,8 +115,6 @@ #define QDSP6SS_SLEEP 0x3C #define QDSP6SS_BOOT_CORE_START 0x400 #define QDSP6SS_BOOT_CMD 0x404 -#define QDSP6SS_BOOT_STATUS 0x408 -#define BOOT_STATUS_TIMEOUT_US 200 #define BOOT_FSM_TIMEOUT 10000 struct reg_info { @@ -139,6 +141,7 @@ struct rproc_hexagon_res { int version; bool need_mem_protection; bool has_alt_reset; + bool has_mba_logs; bool has_spare_reg; }; @@ -178,15 +181,14 @@ struct q6v5 { int active_reg_count; int proxy_reg_count; - bool running; - bool dump_mba_loaded; - unsigned long dump_segment_mask; - unsigned long dump_complete_mask; + size_t current_dump_size; + size_t total_dump_size; phys_addr_t mba_phys; void *mba_region; size_t mba_size; + size_t dp_size; phys_addr_t mpss_phys; phys_addr_t mpss_reloc; @@ -195,10 +197,10 @@ struct q6v5 { struct qcom_rproc_glink glink_subdev; struct qcom_rproc_subdev smd_subdev; struct qcom_rproc_ssr ssr_subdev; - struct qcom_rproc_ipa_notify ipa_notify_subdev; struct qcom_sysmon *sysmon; bool need_mem_protection; bool has_alt_reset; + bool has_mba_logs; bool has_spare_reg; int mpss_perm; int mba_perm; @@ -403,11 +405,33 @@ static int q6v5_xfer_mem_ownership(struct q6v5 *qproc, int *current_perm, current_perm, next, perms); } +static void q6v5_debug_policy_load(struct q6v5 *qproc) +{ + const struct firmware *dp_fw; + + if (request_firmware_direct(&dp_fw, "msadp", qproc->dev)) + return; + + if (SZ_1M + dp_fw->size <= qproc->mba_size) { + memcpy(qproc->mba_region + SZ_1M, dp_fw->data, dp_fw->size); + qproc->dp_size = dp_fw->size; + } + + release_firmware(dp_fw); +} + static int q6v5_load(struct rproc *rproc, const struct firmware *fw) { struct q6v5 *qproc = rproc->priv; + /* MBA is restricted to a maximum size of 1M */ + if (fw->size > qproc->mba_size || fw->size > SZ_1M) { + dev_err(qproc->dev, "MBA firmware load failed\n"); + return -EINVAL; + } + memcpy(qproc->mba_region, fw->data, fw->size); + q6v5_debug_policy_load(qproc); return 0; } @@ -510,6 +534,26 @@ static int q6v5_rmb_mba_wait(struct q6v5 *qproc, u32 status, int ms) return val; } +static void q6v5_dump_mba_logs(struct q6v5 *qproc) +{ + struct rproc *rproc = qproc->rproc; + void *data; + + if (!qproc->has_mba_logs) + return; + + if (q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, + qproc->mba_size)) + return; + + data = vmalloc(MBA_LOG_SIZE); + if (!data) + return; + + memcpy(data, qproc->mba_region, MBA_LOG_SIZE); + dev_coredumpv(&rproc->dev, data, MBA_LOG_SIZE, GFP_KERNEL); +} + static int q6v5proc_reset(struct q6v5 *qproc) { u32 val; @@ -578,13 +622,15 @@ static int q6v5proc_reset(struct q6v5 *qproc) /* De-assert the Q6 stop core signal */ writel(1, qproc->reg_base + QDSP6SS_BOOT_CORE_START); + /* Wait for 10 us for any staggering logic to settle */ + usleep_range(10, 20); + /* Trigger the boot FSM to start the Q6 out-of-reset sequence */ writel(1, qproc->reg_base + QDSP6SS_BOOT_CMD); - /* Poll the QDSP6SS_BOOT_STATUS for FSM completion */ - ret = readl_poll_timeout(qproc->reg_base + QDSP6SS_BOOT_STATUS, - val, (val & BIT(0)) != 0, 1, - BOOT_STATUS_TIMEOUT_US); + /* Poll the MSS_STATUS for FSM completion */ + ret = readl_poll_timeout(qproc->rmb_base + RMB_MBA_MSS_STATUS, + val, (val & BIT(0)) != 0, 10, BOOT_FSM_TIMEOUT); if (ret) { dev_err(qproc->dev, "Boot FSM failed to complete.\n"); /* Reset the modem so that boot FSM is in reset state */ @@ -828,6 +874,7 @@ static int q6v5_mba_load(struct q6v5 *qproc) { int ret; int xfermemop_ret; + bool mba_load_err = false; qcom_q6v5_prepare(&qproc->q6v5); @@ -894,6 +941,10 @@ static int q6v5_mba_load(struct q6v5 *qproc) } writel(qproc->mba_phys, qproc->rmb_base + RMB_MBA_IMAGE_REG); + if (qproc->dp_size) { + writel(qproc->mba_phys + SZ_1M, qproc->rmb_base + RMB_PMI_CODE_START_REG); + writel(qproc->dp_size, qproc->rmb_base + RMB_PMI_CODE_LENGTH_REG); + } ret = q6v5proc_reset(qproc); if (ret) @@ -917,7 +968,7 @@ halt_axi_ports: q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_nc); - + mba_load_err = true; reclaim_mba: xfermemop_ret = q6v5_xfer_mem_ownership(qproc, &qproc->mba_perm, true, false, qproc->mba_phys, @@ -925,6 +976,8 @@ reclaim_mba: if (xfermemop_ret) { dev_err(qproc->dev, "Failed to reclaim mba buffer, system may become unstable\n"); + } else if (mba_load_err) { + q6v5_dump_mba_logs(qproc); } disable_active_clks: @@ -960,6 +1013,7 @@ static void q6v5_mba_reclaim(struct q6v5 *qproc) u32 val; qproc->dump_mba_loaded = false; + qproc->dp_size = 0; q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_q6); q6v5proc_halt_axi_port(qproc, qproc->halt_map, qproc->halt_modem); @@ -1138,15 +1192,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc) } else if (phdr->p_filesz) { /* Replace "xxx.xxx" with "xxx.bxx" */ sprintf(fw_name + fw_name_len - 3, "b%02d", i); - ret = request_firmware(&seg_fw, fw_name, qproc->dev); + ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev, + ptr, phdr->p_filesz); if (ret) { dev_err(qproc->dev, "failed to load %s\n", fw_name); iounmap(ptr); goto release_firmware; } - memcpy(ptr, seg_fw->data, seg_fw->size); - release_firmware(seg_fw); } @@ -1189,6 +1242,8 @@ static int q6v5_mpss_load(struct q6v5 *qproc) else if (ret < 0) dev_err(qproc->dev, "MPSS authentication failed: %d\n", ret); + qcom_pil_info_store("modem", qproc->mpss_phys, qproc->mpss_size); + release_firmware: release_firmware(fw); out: @@ -1199,11 +1254,10 @@ out: static void qcom_q6v5_dump_segment(struct rproc *rproc, struct rproc_dump_segment *segment, - void *dest) + void *dest, size_t cp_offset, size_t size) { int ret = 0; struct q6v5 *qproc = rproc->priv; - unsigned long mask = BIT((unsigned long)segment->priv); int offset = segment->da - qproc->mpss_reloc; void *ptr = NULL; @@ -1220,19 +1274,19 @@ static void qcom_q6v5_dump_segment(struct rproc *rproc, } if (!ret) - ptr = ioremap_wc(qproc->mpss_phys + offset, segment->size); + ptr = ioremap_wc(qproc->mpss_phys + offset + cp_offset, size); if (ptr) { - memcpy(dest, ptr, segment->size); + memcpy(dest, ptr, size); iounmap(ptr); } else { - memset(dest, 0xff, segment->size); + memset(dest, 0xff, size); } - qproc->dump_segment_mask |= mask; + qproc->current_dump_size += size; /* Reclaim mba after copying segments */ - if (qproc->dump_segment_mask == qproc->dump_complete_mask) { + if (qproc->current_dump_size == qproc->total_dump_size) { if (qproc->dump_mba_loaded) { /* Try to reset ownership back to Q6 */ q6v5_xfer_mem_ownership(qproc, &qproc->mpss_perm, @@ -1254,7 +1308,8 @@ static int q6v5_start(struct rproc *rproc) if (ret) return ret; - dev_info(qproc->dev, "MBA booted, loading mpss\n"); + dev_info(qproc->dev, "MBA booted with%s debug policy, loading mpss\n", + qproc->dp_size ? "" : "out"); ret = q6v5_mpss_load(qproc); if (ret) @@ -1274,13 +1329,13 @@ static int q6v5_start(struct rproc *rproc) "Failed to reclaim mba buffer system may become unstable\n"); /* Reset Dump Segment Mask */ - qproc->dump_segment_mask = 0; - qproc->running = true; + qproc->current_dump_size = 0; return 0; reclaim_mpss: q6v5_mba_reclaim(qproc); + q6v5_dump_mba_logs(qproc); return ret; } @@ -1290,8 +1345,6 @@ static int q6v5_stop(struct rproc *rproc) struct q6v5 *qproc = (struct q6v5 *)rproc->priv; int ret; - qproc->running = false; - ret = qcom_q6v5_request_stop(&qproc->q6v5); if (ret == -ETIMEDOUT) dev_err(qproc->dev, "timed out on wait\n"); @@ -1323,7 +1376,7 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc, ehdr = (struct elf32_hdr *)fw->data; phdrs = (struct elf32_phdr *)(ehdr + 1); - qproc->dump_complete_mask = 0; + qproc->total_dump_size = 0; for (i = 0; i < ehdr->e_phnum; i++) { phdr = &phdrs[i]; @@ -1334,11 +1387,11 @@ static int qcom_q6v5_register_dump_segments(struct rproc *rproc, ret = rproc_coredump_add_custom_segment(rproc, phdr->p_paddr, phdr->p_memsz, qcom_q6v5_dump_segment, - (void *)i); + NULL); if (ret) break; - qproc->dump_complete_mask |= BIT(i); + qproc->total_dump_size += phdr->p_memsz; } release_firmware(fw); @@ -1553,39 +1606,6 @@ static int q6v5_alloc_memory_region(struct q6v5 *qproc) return 0; } -#if IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) - -/* Register IPA notification function */ -int qcom_register_ipa_notify(struct rproc *rproc, qcom_ipa_notify_t notify, - void *data) -{ - struct qcom_rproc_ipa_notify *ipa_notify; - struct q6v5 *qproc = rproc->priv; - - if (!notify) - return -EINVAL; - - ipa_notify = &qproc->ipa_notify_subdev; - if (ipa_notify->notify) - return -EBUSY; - - ipa_notify->notify = notify; - ipa_notify->data = data; - - return 0; -} -EXPORT_SYMBOL_GPL(qcom_register_ipa_notify); - -/* Deregister IPA notification function */ -void qcom_deregister_ipa_notify(struct rproc *rproc) -{ - struct q6v5 *qproc = rproc->priv; - - qproc->ipa_notify_subdev.notify = NULL; -} -EXPORT_SYMBOL_GPL(qcom_deregister_ipa_notify); -#endif /* !IS_ENABLED(CONFIG_QCOM_Q6V5_IPA_NOTIFY) */ - static int q6v5_probe(struct platform_device *pdev) { const struct rproc_hexagon_res *desc; @@ -1700,6 +1720,7 @@ static int q6v5_probe(struct platform_device *pdev) qproc->version = desc->version; qproc->need_mem_protection = desc->need_mem_protection; + qproc->has_mba_logs = desc->has_mba_logs; ret = qcom_q6v5_init(&qproc->q6v5, pdev, rproc, MPSS_CRASH_REASON_SMEM, qcom_msa_handover); @@ -1711,7 +1732,6 @@ static int q6v5_probe(struct platform_device *pdev) qcom_add_glink_subdev(rproc, &qproc->glink_subdev, "mpss"); qcom_add_smd_subdev(rproc, &qproc->smd_subdev); qcom_add_ssr_subdev(rproc, &qproc->ssr_subdev, "mpss"); - qcom_add_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); qproc->sysmon = qcom_add_sysmon_subdev(rproc, "modem", 0x12); if (IS_ERR(qproc->sysmon)) { ret = PTR_ERR(qproc->sysmon); @@ -1727,7 +1747,6 @@ static int q6v5_probe(struct platform_device *pdev) remove_sysmon_subdev: qcom_remove_sysmon_subdev(qproc->sysmon); remove_subdevs: - qcom_remove_ipa_notify_subdev(qproc->rproc, &qproc->ipa_notify_subdev); qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); @@ -1749,7 +1768,6 @@ static int q6v5_remove(struct platform_device *pdev) rproc_del(rproc); qcom_remove_sysmon_subdev(qproc->sysmon); - qcom_remove_ipa_notify_subdev(rproc, &qproc->ipa_notify_subdev); qcom_remove_ssr_subdev(rproc, &qproc->ssr_subdev); qcom_remove_smd_subdev(rproc, &qproc->smd_subdev); qcom_remove_glink_subdev(rproc, &qproc->glink_subdev); @@ -1791,6 +1809,7 @@ static const struct rproc_hexagon_res sc7180_mss = { }, .need_mem_protection = true, .has_alt_reset = false, + .has_mba_logs = true, .has_spare_reg = true, .version = MSS_SC7180, }; @@ -1826,6 +1845,7 @@ static const struct rproc_hexagon_res sdm845_mss = { }, .need_mem_protection = true, .has_alt_reset = true, + .has_mba_logs = false, .has_spare_reg = false, .version = MSS_SDM845, }; @@ -1853,6 +1873,7 @@ static const struct rproc_hexagon_res msm8998_mss = { }, .need_mem_protection = true, .has_alt_reset = false, + .has_mba_logs = false, .has_spare_reg = false, .version = MSS_MSM8998, }; @@ -1883,6 +1904,7 @@ static const struct rproc_hexagon_res msm8996_mss = { }, .need_mem_protection = true, .has_alt_reset = false, + .has_mba_logs = false, .has_spare_reg = false, .version = MSS_MSM8996, }; @@ -1916,6 +1938,7 @@ static const struct rproc_hexagon_res msm8916_mss = { }, .need_mem_protection = false, .has_alt_reset = false, + .has_mba_logs = false, .has_spare_reg = false, .version = MSS_MSM8916, }; @@ -1957,6 +1980,7 @@ static const struct rproc_hexagon_res msm8974_mss = { }, .need_mem_protection = false, .has_alt_reset = false, + .has_mba_logs = false, .has_spare_reg = false, .version = MSS_MSM8974, }; diff --git a/drivers/remoteproc/qcom_q6v5_pas.c b/drivers/remoteproc/qcom_q6v5_pas.c index 61791a03f648..3837f23995e0 100644 --- a/drivers/remoteproc/qcom_q6v5_pas.c +++ b/drivers/remoteproc/qcom_q6v5_pas.c @@ -25,6 +25,7 @@ #include <linux/soc/qcom/smem_state.h> #include "qcom_common.h" +#include "qcom_pil_info.h" #include "qcom_q6v5.h" #include "remoteproc_internal.h" @@ -64,6 +65,7 @@ struct qcom_adsp { int pas_id; int crash_reason_smem; bool has_aggre2_clk; + const char *info_name; struct completion start_done; struct completion stop_done; @@ -117,11 +119,17 @@ static void adsp_pds_disable(struct qcom_adsp *adsp, struct device **pds, static int adsp_load(struct rproc *rproc, const struct firmware *fw) { struct qcom_adsp *adsp = (struct qcom_adsp *)rproc->priv; + int ret; - return qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id, - adsp->mem_region, adsp->mem_phys, adsp->mem_size, - &adsp->mem_reloc); + ret = qcom_mdt_load(adsp->dev, fw, rproc->firmware, adsp->pas_id, + adsp->mem_region, adsp->mem_phys, adsp->mem_size, + &adsp->mem_reloc); + if (ret) + return ret; + qcom_pil_info_store(adsp->info_name, adsp->mem_phys, adsp->mem_size); + + return 0; } static int adsp_start(struct rproc *rproc) @@ -405,6 +413,7 @@ static int adsp_probe(struct platform_device *pdev) adsp->rproc = rproc; adsp->pas_id = desc->pas_id; adsp->has_aggre2_clk = desc->has_aggre2_clk; + adsp->info_name = desc->sysmon_name; platform_set_drvdata(pdev, adsp); device_wakeup_enable(adsp->dev); diff --git a/drivers/remoteproc/qcom_q6v5_wcss.c b/drivers/remoteproc/qcom_q6v5_wcss.c index 88c76b9417fa..8846ef0b0f1a 100644 --- a/drivers/remoteproc/qcom_q6v5_wcss.c +++ b/drivers/remoteproc/qcom_q6v5_wcss.c @@ -14,6 +14,7 @@ #include <linux/reset.h> #include <linux/soc/qcom/mdt_loader.h> #include "qcom_common.h" +#include "qcom_pil_info.h" #include "qcom_q6v5.h" #define WCSS_CRASH_REASON 421 @@ -424,10 +425,17 @@ static void *q6v5_wcss_da_to_va(struct rproc *rproc, u64 da, size_t len) static int q6v5_wcss_load(struct rproc *rproc, const struct firmware *fw) { struct q6v5_wcss *wcss = rproc->priv; + int ret; + + ret = qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware, + 0, wcss->mem_region, wcss->mem_phys, + wcss->mem_size, &wcss->mem_reloc); + if (ret) + return ret; + + qcom_pil_info_store("wcnss", wcss->mem_phys, wcss->mem_size); - return qcom_mdt_load_no_init(wcss->dev, fw, rproc->firmware, - 0, wcss->mem_region, wcss->mem_phys, - wcss->mem_size, &wcss->mem_reloc); + return ret; } static const struct rproc_ops q6v5_wcss_ops = { diff --git a/drivers/remoteproc/qcom_sysmon.c b/drivers/remoteproc/qcom_sysmon.c index 8d8996d714f0..9eb2f6bccea6 100644 --- a/drivers/remoteproc/qcom_sysmon.c +++ b/drivers/remoteproc/qcom_sysmon.c @@ -71,7 +71,7 @@ static LIST_HEAD(sysmon_list); /** * sysmon_send_event() - send notification of other remote's SSR event * @sysmon: sysmon context - * @name: other remote's name + * @event: sysmon event context */ static void sysmon_send_event(struct qcom_sysmon *sysmon, const struct sysmon_event *event) @@ -343,7 +343,7 @@ static void ssctl_request_shutdown(struct qcom_sysmon *sysmon) /** * ssctl_send_event() - send notification of other remote's SSR event * @sysmon: sysmon context - * @name: other remote's name + * @event: sysmon event context */ static void ssctl_send_event(struct qcom_sysmon *sysmon, const struct sysmon_event *event) diff --git a/drivers/remoteproc/qcom_wcnss.c b/drivers/remoteproc/qcom_wcnss.c index 5d65e1a9329a..e2573f79a137 100644 --- a/drivers/remoteproc/qcom_wcnss.c +++ b/drivers/remoteproc/qcom_wcnss.c @@ -27,6 +27,7 @@ #include "qcom_common.h" #include "remoteproc_internal.h" +#include "qcom_pil_info.h" #include "qcom_wcnss.h" #define WCNSS_CRASH_REASON_SMEM 422 @@ -145,10 +146,17 @@ void qcom_wcnss_assign_iris(struct qcom_wcnss *wcnss, static int wcnss_load(struct rproc *rproc, const struct firmware *fw) { struct qcom_wcnss *wcnss = (struct qcom_wcnss *)rproc->priv; + int ret; + + ret = qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID, + wcnss->mem_region, wcnss->mem_phys, + wcnss->mem_size, &wcnss->mem_reloc); + if (ret) + return ret; + + qcom_pil_info_store("wcnss", wcnss->mem_phys, wcnss->mem_size); - return qcom_mdt_load(wcnss->dev, fw, rproc->firmware, WCNSS_PAS_ID, - wcnss->mem_region, wcnss->mem_phys, - wcnss->mem_size, &wcnss->mem_reloc); + return 0; } static void wcnss_indicate_nv_download(struct qcom_wcnss *wcnss) diff --git a/drivers/remoteproc/remoteproc_cdev.c b/drivers/remoteproc/remoteproc_cdev.c new file mode 100644 index 000000000000..b19ea3057bde --- /dev/null +++ b/drivers/remoteproc/remoteproc_cdev.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Character device interface driver for Remoteproc framework. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/cdev.h> +#include <linux/compat.h> +#include <linux/fs.h> +#include <linux/module.h> +#include <linux/remoteproc.h> +#include <linux/uaccess.h> +#include <uapi/linux/remoteproc_cdev.h> + +#include "remoteproc_internal.h" + +#define NUM_RPROC_DEVICES 64 +static dev_t rproc_major; + +static ssize_t rproc_cdev_write(struct file *filp, const char __user *buf, size_t len, loff_t *pos) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + int ret = 0; + char cmd[10]; + + if (!len || len > sizeof(cmd)) + return -EINVAL; + + ret = copy_from_user(cmd, buf, len); + if (ret) + return -EFAULT; + + if (!strncmp(cmd, "start", len)) { + if (rproc->state == RPROC_RUNNING) + return -EBUSY; + + ret = rproc_boot(rproc); + } else if (!strncmp(cmd, "stop", len)) { + if (rproc->state != RPROC_RUNNING) + return -EINVAL; + + rproc_shutdown(rproc); + } else { + dev_err(&rproc->dev, "Unrecognized option\n"); + ret = -EINVAL; + } + + return ret ? ret : len; +} + +static long rproc_device_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct rproc *rproc = container_of(filp->f_inode->i_cdev, struct rproc, cdev); + void __user *argp = (void __user *)arg; + s32 param; + + switch (ioctl) { + case RPROC_SET_SHUTDOWN_ON_RELEASE: + if (copy_from_user(¶m, argp, sizeof(s32))) + return -EFAULT; + + rproc->cdev_put_on_release = !!param; + break; + case RPROC_GET_SHUTDOWN_ON_RELEASE: + param = (s32)rproc->cdev_put_on_release; + if (copy_to_user(argp, ¶m, sizeof(s32))) + return -EFAULT; + + break; + default: + dev_err(&rproc->dev, "Unsupported ioctl\n"); + return -EINVAL; + } + + return 0; +} + +static int rproc_cdev_release(struct inode *inode, struct file *filp) +{ + struct rproc *rproc = container_of(inode->i_cdev, struct rproc, cdev); + + if (rproc->cdev_put_on_release && rproc->state == RPROC_RUNNING) + rproc_shutdown(rproc); + + return 0; +} + +static const struct file_operations rproc_fops = { + .write = rproc_cdev_write, + .unlocked_ioctl = rproc_device_ioctl, + .compat_ioctl = compat_ptr_ioctl, + .release = rproc_cdev_release, +}; + +int rproc_char_device_add(struct rproc *rproc) +{ + int ret; + + cdev_init(&rproc->cdev, &rproc_fops); + rproc->cdev.owner = THIS_MODULE; + + rproc->dev.devt = MKDEV(MAJOR(rproc_major), rproc->index); + cdev_set_parent(&rproc->cdev, &rproc->dev.kobj); + ret = cdev_add(&rproc->cdev, rproc->dev.devt, 1); + if (ret < 0) + dev_err(&rproc->dev, "Failed to add char dev for %s\n", rproc->name); + + return ret; +} + +void rproc_char_device_remove(struct rproc *rproc) +{ + __unregister_chrdev(MAJOR(rproc->dev.devt), rproc->index, 1, "remoteproc"); +} + +void __init rproc_init_cdev(void) +{ + int ret; + + ret = alloc_chrdev_region(&rproc_major, 0, NUM_RPROC_DEVICES, "remoteproc"); + if (ret < 0) + pr_err("Failed to alloc rproc_cdev region, err %d\n", ret); +} diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c index 9f04c30c4aaf..7f90eeea67e2 100644 --- a/drivers/remoteproc/remoteproc_core.c +++ b/drivers/remoteproc/remoteproc_core.c @@ -26,10 +26,8 @@ #include <linux/firmware.h> #include <linux/string.h> #include <linux/debugfs.h> -#include <linux/devcoredump.h> #include <linux/rculist.h> #include <linux/remoteproc.h> -#include <linux/pm_runtime.h> #include <linux/iommu.h> #include <linux/idr.h> #include <linux/elf.h> @@ -41,7 +39,6 @@ #include <linux/platform_device.h> #include "remoteproc_internal.h" -#include "remoteproc_elf_helpers.h" #define HIGH_BITS_MASK 0xFFFFFFFF00000000ULL @@ -244,6 +241,7 @@ EXPORT_SYMBOL(rproc_da_to_va); * * Return: a valid pointer on carveout entry on success or NULL on failure. */ +__printf(2, 3) struct rproc_mem_entry * rproc_find_carveout_by_name(struct rproc *rproc, const char *name, ...) { @@ -411,10 +409,22 @@ void rproc_free_vring(struct rproc_vring *rvring) idr_remove(&rproc->notifyids, rvring->notifyid); - /* reset resource entry info */ - rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset; - rsc->vring[idx].da = 0; - rsc->vring[idx].notifyid = -1; + /* + * At this point rproc_stop() has been called and the installed resource + * table in the remote processor memory may no longer be accessible. As + * such and as per rproc_stop(), rproc->table_ptr points to the cached + * resource table (rproc->cached_table). The cached resource table is + * only available when a remote processor has been booted by the + * remoteproc core, otherwise it is NULL. + * + * Based on the above, reset the virtio device section in the cached + * resource table only if there is one to work with. + */ + if (rproc->table_ptr) { + rsc = (void *)rproc->table_ptr + rvring->rvdev->rsc_offset; + rsc->vring[idx].da = 0; + rsc->vring[idx].notifyid = -1; + } } static int rproc_vdev_do_start(struct rproc_subdev *subdev) @@ -967,6 +977,7 @@ EXPORT_SYMBOL(rproc_add_carveout); * This function allocates a rproc_mem_entry struct and fill it with parameters * provided by client. */ +__printf(8, 9) struct rproc_mem_entry * rproc_mem_entry_init(struct device *dev, void *va, dma_addr_t dma, size_t len, u32 da, @@ -1010,6 +1021,7 @@ EXPORT_SYMBOL(rproc_mem_entry_init); * This function allocates a rproc_mem_entry struct and fill it with parameters * provided by client. */ +__printf(5, 6) struct rproc_mem_entry * rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, u32 da, const char *name, ...) @@ -1034,6 +1046,29 @@ rproc_of_resm_mem_entry_init(struct device *dev, u32 of_resm_idx, size_t len, } EXPORT_SYMBOL(rproc_of_resm_mem_entry_init); +/** + * rproc_of_parse_firmware() - parse and return the firmware-name + * @dev: pointer on device struct representing a rproc + * @index: index to use for the firmware-name retrieval + * @fw_name: pointer to a character string, in which the firmware + * name is returned on success and unmodified otherwise. + * + * This is an OF helper function that parses a device's DT node for + * the "firmware-name" property and returns the firmware name pointer + * in @fw_name on success. + * + * Return: 0 on success, or an appropriate failure. + */ +int rproc_of_parse_firmware(struct device *dev, int index, const char **fw_name) +{ + int ret; + + ret = of_property_read_string_index(dev->of_node, "firmware-name", + index, fw_name); + return ret ? ret : 0; +} +EXPORT_SYMBOL(rproc_of_parse_firmware); + /* * A lookup table for resource handlers. The indices are defined in * enum fw_resource_type. @@ -1239,19 +1274,6 @@ static int rproc_alloc_registered_carveouts(struct rproc *rproc) return 0; } -/** - * rproc_coredump_cleanup() - clean up dump_segments list - * @rproc: the remote processor handle - */ -static void rproc_coredump_cleanup(struct rproc *rproc) -{ - struct rproc_dump_segment *entry, *tmp; - - list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) { - list_del(&entry->node); - kfree(entry); - } -} /** * rproc_resource_cleanup() - clean up and free all acquired resources @@ -1260,7 +1282,7 @@ static void rproc_coredump_cleanup(struct rproc *rproc) * This function will free all resources acquired for @rproc, and it * is called whenever @rproc either shuts down or fails to boot. */ -static void rproc_resource_cleanup(struct rproc *rproc) +void rproc_resource_cleanup(struct rproc *rproc) { struct rproc_mem_entry *entry, *tmp; struct rproc_debug_trace *trace, *ttmp; @@ -1304,6 +1326,7 @@ static void rproc_resource_cleanup(struct rproc *rproc) rproc_coredump_cleanup(rproc); } +EXPORT_SYMBOL(rproc_resource_cleanup); static int rproc_start(struct rproc *rproc, const struct firmware *fw) { @@ -1370,6 +1393,48 @@ reset_table_ptr: return ret; } +static int rproc_attach(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + ret = rproc_prepare_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to prepare subdevices for %s: %d\n", + rproc->name, ret); + goto out; + } + + /* Attach to the remote processor */ + ret = rproc_attach_device(rproc); + if (ret) { + dev_err(dev, "can't attach to rproc %s: %d\n", + rproc->name, ret); + goto unprepare_subdevices; + } + + /* Start any subdevices for the remote processor */ + ret = rproc_start_subdevices(rproc); + if (ret) { + dev_err(dev, "failed to probe subdevices for %s: %d\n", + rproc->name, ret); + goto stop_rproc; + } + + rproc->state = RPROC_RUNNING; + + dev_info(dev, "remote processor %s is now attached\n", rproc->name); + + return 0; + +stop_rproc: + rproc->ops->stop(rproc); +unprepare_subdevices: + rproc_unprepare_subdevices(rproc); +out: + return ret; +} + /* * take a firmware and boot a remote processor with it. */ @@ -1383,12 +1448,6 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) if (ret) return ret; - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync failed: %d\n", ret); - return ret; - } - dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size); /* @@ -1398,7 +1457,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) ret = rproc_enable_iommu(rproc); if (ret) { dev_err(dev, "can't enable iommu: %d\n", ret); - goto put_pm_runtime; + return ret; } /* Prepare rproc for firmware loading if needed */ @@ -1452,8 +1511,63 @@ unprepare_rproc: rproc_unprepare_device(rproc); disable_iommu: rproc_disable_iommu(rproc); -put_pm_runtime: - pm_runtime_put(dev); + return ret; +} + +/* + * Attach to remote processor - similar to rproc_fw_boot() but without + * the steps that deal with the firmware image. + */ +static int rproc_actuate(struct rproc *rproc) +{ + struct device *dev = &rproc->dev; + int ret; + + /* + * if enabling an IOMMU isn't relevant for this rproc, this is + * just a nop + */ + ret = rproc_enable_iommu(rproc); + if (ret) { + dev_err(dev, "can't enable iommu: %d\n", ret); + return ret; + } + + /* reset max_notifyid */ + rproc->max_notifyid = -1; + + /* reset handled vdev */ + rproc->nb_vdev = 0; + + /* + * Handle firmware resources required to attach to a remote processor. + * Because we are attaching rather than booting the remote processor, + * we expect the platform driver to properly set rproc->table_ptr. + */ + ret = rproc_handle_resources(rproc, rproc_loading_handlers); + if (ret) { + dev_err(dev, "Failed to process resources: %d\n", ret); + goto disable_iommu; + } + + /* Allocate carveout resources associated to rproc */ + ret = rproc_alloc_registered_carveouts(rproc); + if (ret) { + dev_err(dev, "Failed to allocate associated carveouts: %d\n", + ret); + goto clean_up_resources; + } + + ret = rproc_attach(rproc); + if (ret) + goto clean_up_resources; + + return 0; + +clean_up_resources: + rproc_resource_cleanup(rproc); +disable_iommu: + rproc_disable_iommu(rproc); return ret; } @@ -1479,6 +1593,15 @@ static int rproc_trigger_auto_boot(struct rproc *rproc) int ret; /* + * Since the remote processor is in a detached state, it has already + * been booted by another entity. As such there is no point in waiting + * for a firmware image to be loaded, we can simply initiate the process + * of attaching to it immediately. + */ + if (rproc->state == RPROC_DETACHED) + return rproc_boot(rproc); + + /* * We're initiating an asynchronous firmware loading, so we can * be built-in kernel code, without hanging the boot process. */ @@ -1513,187 +1636,19 @@ static int rproc_stop(struct rproc *rproc, bool crashed) rproc->state = RPROC_OFFLINE; - dev_info(dev, "stopped remote processor %s\n", rproc->name); - - return 0; -} - -/** - * rproc_coredump_add_segment() - add segment of device memory to coredump - * @rproc: handle of a remote processor - * @da: device address - * @size: size of segment - * - * Add device memory to the list of segments to be included in a coredump for - * the remoteproc. - * - * Return: 0 on success, negative errno on error. - */ -int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size) -{ - struct rproc_dump_segment *segment; - - segment = kzalloc(sizeof(*segment), GFP_KERNEL); - if (!segment) - return -ENOMEM; - - segment->da = da; - segment->size = size; - - list_add_tail(&segment->node, &rproc->dump_segments); - - return 0; -} -EXPORT_SYMBOL(rproc_coredump_add_segment); - -/** - * rproc_coredump_add_custom_segment() - add custom coredump segment - * @rproc: handle of a remote processor - * @da: device address - * @size: size of segment - * @dumpfn: custom dump function called for each segment during coredump - * @priv: private data - * - * Add device memory to the list of segments to be included in the coredump - * and associate the segment with the given custom dump function and private - * data. - * - * Return: 0 on success, negative errno on error. - */ -int rproc_coredump_add_custom_segment(struct rproc *rproc, - dma_addr_t da, size_t size, - void (*dumpfn)(struct rproc *rproc, - struct rproc_dump_segment *segment, - void *dest), - void *priv) -{ - struct rproc_dump_segment *segment; - - segment = kzalloc(sizeof(*segment), GFP_KERNEL); - if (!segment) - return -ENOMEM; - - segment->da = da; - segment->size = size; - segment->priv = priv; - segment->dump = dumpfn; + /* + * The remote processor has been stopped and is now offline, which means + * that the next time it is brought back online the remoteproc core will + * be responsible to load its firmware. As such it is no longer + * autonomous. + */ + rproc->autonomous = false; - list_add_tail(&segment->node, &rproc->dump_segments); + dev_info(dev, "stopped remote processor %s\n", rproc->name); return 0; } -EXPORT_SYMBOL(rproc_coredump_add_custom_segment); -/** - * rproc_coredump_set_elf_info() - set coredump elf information - * @rproc: handle of a remote processor - * @class: elf class for coredump elf file - * @machine: elf machine for coredump elf file - * - * Set elf information which will be used for coredump elf file. - * - * Return: 0 on success, negative errno on error. - */ -int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine) -{ - if (class != ELFCLASS64 && class != ELFCLASS32) - return -EINVAL; - - rproc->elf_class = class; - rproc->elf_machine = machine; - - return 0; -} -EXPORT_SYMBOL(rproc_coredump_set_elf_info); - -/** - * rproc_coredump() - perform coredump - * @rproc: rproc handle - * - * This function will generate an ELF header for the registered segments - * and create a devcoredump device associated with rproc. - */ -static void rproc_coredump(struct rproc *rproc) -{ - struct rproc_dump_segment *segment; - void *phdr; - void *ehdr; - size_t data_size; - size_t offset; - void *data; - void *ptr; - u8 class = rproc->elf_class; - int phnum = 0; - - if (list_empty(&rproc->dump_segments)) - return; - - if (class == ELFCLASSNONE) { - dev_err(&rproc->dev, "Elf class is not set\n"); - return; - } - - data_size = elf_size_of_hdr(class); - list_for_each_entry(segment, &rproc->dump_segments, node) { - data_size += elf_size_of_phdr(class) + segment->size; - - phnum++; - } - - data = vmalloc(data_size); - if (!data) - return; - - ehdr = data; - - memset(ehdr, 0, elf_size_of_hdr(class)); - /* e_ident field is common for both elf32 and elf64 */ - elf_hdr_init_ident(ehdr, class); - - elf_hdr_set_e_type(class, ehdr, ET_CORE); - elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine); - elf_hdr_set_e_version(class, ehdr, EV_CURRENT); - elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr); - elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class)); - elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class)); - elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class)); - elf_hdr_set_e_phnum(class, ehdr, phnum); - - phdr = data + elf_hdr_get_e_phoff(class, ehdr); - offset = elf_hdr_get_e_phoff(class, ehdr); - offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr); - - list_for_each_entry(segment, &rproc->dump_segments, node) { - memset(phdr, 0, elf_size_of_phdr(class)); - elf_phdr_set_p_type(class, phdr, PT_LOAD); - elf_phdr_set_p_offset(class, phdr, offset); - elf_phdr_set_p_vaddr(class, phdr, segment->da); - elf_phdr_set_p_paddr(class, phdr, segment->da); - elf_phdr_set_p_filesz(class, phdr, segment->size); - elf_phdr_set_p_memsz(class, phdr, segment->size); - elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); - elf_phdr_set_p_align(class, phdr, 0); - - if (segment->dump) { - segment->dump(rproc, segment, data + offset); - } else { - ptr = rproc_da_to_va(rproc, segment->da, segment->size); - if (!ptr) { - dev_err(&rproc->dev, - "invalid coredump segment (%pad, %zu)\n", - &segment->da, segment->size); - memset(data + offset, 0xff, segment->size); - } else { - memcpy(data + offset, ptr, segment->size); - } - } - - offset += elf_phdr_get_p_filesz(class, phdr); - phdr += elf_size_of_phdr(class); - } - - dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); -} /** * rproc_trigger_recovery() - recover a remoteproc @@ -1815,24 +1770,30 @@ int rproc_boot(struct rproc *rproc) goto unlock_mutex; } - /* skip the boot process if rproc is already powered up */ + /* skip the boot or attach process if rproc is already powered up */ if (atomic_inc_return(&rproc->power) > 1) { ret = 0; goto unlock_mutex; } - dev_info(dev, "powering up %s\n", rproc->name); + if (rproc->state == RPROC_DETACHED) { + dev_info(dev, "attaching to %s\n", rproc->name); - /* load firmware */ - ret = request_firmware(&firmware_p, rproc->firmware, dev); - if (ret < 0) { - dev_err(dev, "request_firmware failed: %d\n", ret); - goto downref_rproc; - } + ret = rproc_actuate(rproc); + } else { + dev_info(dev, "powering up %s\n", rproc->name); - ret = rproc_fw_boot(rproc, firmware_p); + /* load firmware */ + ret = request_firmware(&firmware_p, rproc->firmware, dev); + if (ret < 0) { + dev_err(dev, "request_firmware failed: %d\n", ret); + goto downref_rproc; + } - release_firmware(firmware_p); + ret = rproc_fw_boot(rproc, firmware_p); + + release_firmware(firmware_p); + } downref_rproc: if (ret) @@ -1891,8 +1852,6 @@ void rproc_shutdown(struct rproc *rproc) rproc_disable_iommu(rproc); - pm_runtime_put(dev); - /* Free the copy of the resource table */ kfree(rproc->cached_table); rproc->cached_table = NULL; @@ -1952,6 +1911,43 @@ struct rproc *rproc_get_by_phandle(phandle phandle) #endif EXPORT_SYMBOL(rproc_get_by_phandle); +static int rproc_validate(struct rproc *rproc) +{ + switch (rproc->state) { + case RPROC_OFFLINE: + /* + * An offline processor without a start() + * function makes no sense. + */ + if (!rproc->ops->start) + return -EINVAL; + break; + case RPROC_DETACHED: + /* + * A remote processor in a detached state without an + * attach() function makes not sense. + */ + if (!rproc->ops->attach) + return -EINVAL; + /* + * When attaching to a remote processor the device memory + * is already available and as such there is no need to have a + * cached table. + */ + if (rproc->cached_table) + return -EINVAL; + break; + default: + /* + * When adding a remote processor, the state of the device + * can be offline or detached, nothing else. + */ + return -EINVAL; + } + + return 0; +} + /** * rproc_add() - register a remote processor * @rproc: the remote processor handle to register @@ -1981,11 +1977,30 @@ int rproc_add(struct rproc *rproc) if (ret < 0) return ret; + ret = rproc_validate(rproc); + if (ret < 0) + return ret; + dev_info(dev, "%s is available\n", rproc->name); /* create debugfs entries */ rproc_create_debug_dir(rproc); + /* add char device for this remoteproc */ + ret = rproc_char_device_add(rproc); + if (ret < 0) + return ret; + + /* + * Remind ourselves the remote processor has been attached to rather + * than booted by the remoteproc core. This is important because the + * RPROC_DETACHED state will be lost as soon as the remote processor + * has been attached to. Used in firmware_show() and reset in + * rproc_stop(). + */ + if (rproc->state == RPROC_DETACHED) + rproc->autonomous = true; + /* if rproc is marked always-on, request it to boot */ if (rproc->auto_boot) { ret = rproc_trigger_auto_boot(rproc); @@ -2183,9 +2198,6 @@ struct rproc *rproc_alloc(struct device *dev, const char *name, rproc->state = RPROC_OFFLINE; - pm_runtime_no_callbacks(&rproc->dev); - pm_runtime_enable(&rproc->dev); - return rproc; put_device: @@ -2205,7 +2217,6 @@ EXPORT_SYMBOL(rproc_alloc); */ void rproc_free(struct rproc *rproc) { - pm_runtime_disable(&rproc->dev); put_device(&rproc->dev); } EXPORT_SYMBOL(rproc_free); @@ -2256,6 +2267,7 @@ int rproc_del(struct rproc *rproc) mutex_unlock(&rproc->lock); rproc_delete_debug_dir(rproc); + rproc_char_device_remove(rproc); /* the rproc is downref'ed as soon as it's removed from the klist */ mutex_lock(&rproc_list_mutex); @@ -2424,6 +2436,7 @@ static int __init remoteproc_init(void) { rproc_init_sysfs(); rproc_init_debugfs(); + rproc_init_cdev(); rproc_init_panic(); return 0; diff --git a/drivers/remoteproc/remoteproc_coredump.c b/drivers/remoteproc/remoteproc_coredump.c new file mode 100644 index 000000000000..bb15a29038e8 --- /dev/null +++ b/drivers/remoteproc/remoteproc_coredump.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Coredump functionality for Remoteproc framework. + * + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/completion.h> +#include <linux/devcoredump.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/remoteproc.h> +#include "remoteproc_internal.h" +#include "remoteproc_elf_helpers.h" + +struct rproc_coredump_state { + struct rproc *rproc; + void *header; + struct completion dump_done; +}; + +/** + * rproc_coredump_cleanup() - clean up dump_segments list + * @rproc: the remote processor handle + */ +void rproc_coredump_cleanup(struct rproc *rproc) +{ + struct rproc_dump_segment *entry, *tmp; + + list_for_each_entry_safe(entry, tmp, &rproc->dump_segments, node) { + list_del(&entry->node); + kfree(entry); + } +} + +/** + * rproc_coredump_add_segment() - add segment of device memory to coredump + * @rproc: handle of a remote processor + * @da: device address + * @size: size of segment + * + * Add device memory to the list of segments to be included in a coredump for + * the remoteproc. + * + * Return: 0 on success, negative errno on error. + */ +int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size) +{ + struct rproc_dump_segment *segment; + + segment = kzalloc(sizeof(*segment), GFP_KERNEL); + if (!segment) + return -ENOMEM; + + segment->da = da; + segment->size = size; + + list_add_tail(&segment->node, &rproc->dump_segments); + + return 0; +} +EXPORT_SYMBOL(rproc_coredump_add_segment); + +/** + * rproc_coredump_add_custom_segment() - add custom coredump segment + * @rproc: handle of a remote processor + * @da: device address + * @size: size of segment + * @dumpfn: custom dump function called for each segment during coredump + * @priv: private data + * + * Add device memory to the list of segments to be included in the coredump + * and associate the segment with the given custom dump function and private + * data. + * + * Return: 0 on success, negative errno on error. + */ +int rproc_coredump_add_custom_segment(struct rproc *rproc, + dma_addr_t da, size_t size, + void (*dumpfn)(struct rproc *rproc, + struct rproc_dump_segment *segment, + void *dest, size_t offset, + size_t size), + void *priv) +{ + struct rproc_dump_segment *segment; + + segment = kzalloc(sizeof(*segment), GFP_KERNEL); + if (!segment) + return -ENOMEM; + + segment->da = da; + segment->size = size; + segment->priv = priv; + segment->dump = dumpfn; + + list_add_tail(&segment->node, &rproc->dump_segments); + + return 0; +} +EXPORT_SYMBOL(rproc_coredump_add_custom_segment); + +/** + * rproc_coredump_set_elf_info() - set coredump elf information + * @rproc: handle of a remote processor + * @class: elf class for coredump elf file + * @machine: elf machine for coredump elf file + * + * Set elf information which will be used for coredump elf file. + * + * Return: 0 on success, negative errno on error. + */ +int rproc_coredump_set_elf_info(struct rproc *rproc, u8 class, u16 machine) +{ + if (class != ELFCLASS64 && class != ELFCLASS32) + return -EINVAL; + + rproc->elf_class = class; + rproc->elf_machine = machine; + + return 0; +} +EXPORT_SYMBOL(rproc_coredump_set_elf_info); + +static void rproc_coredump_free(void *data) +{ + struct rproc_coredump_state *dump_state = data; + + vfree(dump_state->header); + complete(&dump_state->dump_done); +} + +static void *rproc_coredump_find_segment(loff_t user_offset, + struct list_head *segments, + size_t *data_left) +{ + struct rproc_dump_segment *segment; + + list_for_each_entry(segment, segments, node) { + if (user_offset < segment->size) { + *data_left = segment->size - user_offset; + return segment; + } + user_offset -= segment->size; + } + + *data_left = 0; + return NULL; +} + +static void rproc_copy_segment(struct rproc *rproc, void *dest, + struct rproc_dump_segment *segment, + size_t offset, size_t size) +{ + void *ptr; + + if (segment->dump) { + segment->dump(rproc, segment, dest, offset, size); + } else { + ptr = rproc_da_to_va(rproc, segment->da + offset, size); + if (!ptr) { + dev_err(&rproc->dev, + "invalid copy request for segment %pad with offset %zu and size %zu)\n", + &segment->da, offset, size); + memset(dest, 0xff, size); + } else { + memcpy(dest, ptr, size); + } + } +} + +static ssize_t rproc_coredump_read(char *buffer, loff_t offset, size_t count, + void *data, size_t header_sz) +{ + size_t seg_data, bytes_left = count; + ssize_t copy_sz; + struct rproc_dump_segment *seg; + struct rproc_coredump_state *dump_state = data; + struct rproc *rproc = dump_state->rproc; + void *elfcore = dump_state->header; + + /* Copy the vmalloc'ed header first. */ + if (offset < header_sz) { + copy_sz = memory_read_from_buffer(buffer, count, &offset, + elfcore, header_sz); + + return copy_sz; + } + + /* + * Find out the segment memory chunk to be copied based on offset. + * Keep copying data until count bytes are read. + */ + while (bytes_left) { + seg = rproc_coredump_find_segment(offset - header_sz, + &rproc->dump_segments, + &seg_data); + /* EOF check */ + if (!seg) { + dev_info(&rproc->dev, "Ramdump done, %lld bytes read", + offset); + break; + } + + copy_sz = min_t(size_t, bytes_left, seg_data); + + rproc_copy_segment(rproc, buffer, seg, seg->size - seg_data, + copy_sz); + + offset += copy_sz; + buffer += copy_sz; + bytes_left -= copy_sz; + } + + return count - bytes_left; +} + +/** + * rproc_coredump() - perform coredump + * @rproc: rproc handle + * + * This function will generate an ELF header for the registered segments + * and create a devcoredump device associated with rproc. Based on the + * coredump configuration this function will directly copy the segments + * from device memory to userspace or copy segments from device memory to + * a separate buffer, which can then be read by userspace. + * The first approach avoids using extra vmalloc memory. But it will stall + * recovery flow until dump is read by userspace. + */ +void rproc_coredump(struct rproc *rproc) +{ + struct rproc_dump_segment *segment; + void *phdr; + void *ehdr; + size_t data_size; + size_t offset; + void *data; + u8 class = rproc->elf_class; + int phnum = 0; + struct rproc_coredump_state dump_state; + enum rproc_dump_mechanism dump_conf = rproc->dump_conf; + + if (list_empty(&rproc->dump_segments) || + dump_conf == RPROC_COREDUMP_DISABLED) + return; + + if (class == ELFCLASSNONE) { + dev_err(&rproc->dev, "Elf class is not set\n"); + return; + } + + data_size = elf_size_of_hdr(class); + list_for_each_entry(segment, &rproc->dump_segments, node) { + /* + * For default configuration buffer includes headers & segments. + * For inline dump buffer just includes headers as segments are + * directly read from device memory. + */ + data_size += elf_size_of_phdr(class); + if (dump_conf == RPROC_COREDUMP_DEFAULT) + data_size += segment->size; + + phnum++; + } + + data = vmalloc(data_size); + if (!data) + return; + + ehdr = data; + + memset(ehdr, 0, elf_size_of_hdr(class)); + /* e_ident field is common for both elf32 and elf64 */ + elf_hdr_init_ident(ehdr, class); + + elf_hdr_set_e_type(class, ehdr, ET_CORE); + elf_hdr_set_e_machine(class, ehdr, rproc->elf_machine); + elf_hdr_set_e_version(class, ehdr, EV_CURRENT); + elf_hdr_set_e_entry(class, ehdr, rproc->bootaddr); + elf_hdr_set_e_phoff(class, ehdr, elf_size_of_hdr(class)); + elf_hdr_set_e_ehsize(class, ehdr, elf_size_of_hdr(class)); + elf_hdr_set_e_phentsize(class, ehdr, elf_size_of_phdr(class)); + elf_hdr_set_e_phnum(class, ehdr, phnum); + + phdr = data + elf_hdr_get_e_phoff(class, ehdr); + offset = elf_hdr_get_e_phoff(class, ehdr); + offset += elf_size_of_phdr(class) * elf_hdr_get_e_phnum(class, ehdr); + + list_for_each_entry(segment, &rproc->dump_segments, node) { + memset(phdr, 0, elf_size_of_phdr(class)); + elf_phdr_set_p_type(class, phdr, PT_LOAD); + elf_phdr_set_p_offset(class, phdr, offset); + elf_phdr_set_p_vaddr(class, phdr, segment->da); + elf_phdr_set_p_paddr(class, phdr, segment->da); + elf_phdr_set_p_filesz(class, phdr, segment->size); + elf_phdr_set_p_memsz(class, phdr, segment->size); + elf_phdr_set_p_flags(class, phdr, PF_R | PF_W | PF_X); + elf_phdr_set_p_align(class, phdr, 0); + + if (dump_conf == RPROC_COREDUMP_DEFAULT) + rproc_copy_segment(rproc, data + offset, segment, 0, + segment->size); + + offset += elf_phdr_get_p_filesz(class, phdr); + phdr += elf_size_of_phdr(class); + } + if (dump_conf == RPROC_COREDUMP_DEFAULT) { + dev_coredumpv(&rproc->dev, data, data_size, GFP_KERNEL); + return; + } + + /* Initialize the dump state struct to be used by rproc_coredump_read */ + dump_state.rproc = rproc; + dump_state.header = data; + init_completion(&dump_state.dump_done); + + dev_coredumpm(&rproc->dev, NULL, &dump_state, data_size, GFP_KERNEL, + rproc_coredump_read, rproc_coredump_free); + + /* + * Wait until the dump is read and free is called. Data is freed + * by devcoredump framework automatically after 5 minutes. + */ + wait_for_completion(&dump_state.dump_done); +} diff --git a/drivers/remoteproc/remoteproc_debugfs.c b/drivers/remoteproc/remoteproc_debugfs.c index 732770e92b99..2e3b3e22e1d0 100644 --- a/drivers/remoteproc/remoteproc_debugfs.c +++ b/drivers/remoteproc/remoteproc_debugfs.c @@ -28,6 +28,94 @@ static struct dentry *rproc_dbg; /* + * A coredump-configuration-to-string lookup table, for exposing a + * human readable configuration via debugfs. Always keep in sync with + * enum rproc_coredump_mechanism + */ +static const char * const rproc_coredump_str[] = { + [RPROC_COREDUMP_DEFAULT] = "default", + [RPROC_COREDUMP_INLINE] = "inline", + [RPROC_COREDUMP_DISABLED] = "disabled", +}; + +/* Expose the current coredump configuration via debugfs */ +static ssize_t rproc_coredump_read(struct file *filp, char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + char buf[20]; + int len; + + len = scnprintf(buf, sizeof(buf), "%s\n", + rproc_coredump_str[rproc->dump_conf]); + + return simple_read_from_buffer(userbuf, count, ppos, buf, len); +} + +/* + * By writing to the 'coredump' debugfs entry, we control the behavior of the + * coredump mechanism dynamically. The default value of this entry is "default". + * + * The 'coredump' debugfs entry supports these commands: + * + * default: This is the default coredump mechanism. When the remoteproc + * crashes the entire coredump will be copied to a separate buffer + * and exposed to userspace. + * + * inline: The coredump will not be copied to a separate buffer and the + * recovery process will have to wait until data is read by + * userspace. But this avoid usage of extra memory. + * + * disabled: This will disable coredump. Recovery will proceed without + * collecting any dump. + */ +static ssize_t rproc_coredump_write(struct file *filp, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct rproc *rproc = filp->private_data; + int ret, err = 0; + char buf[20]; + + if (count > sizeof(buf)) + return -EINVAL; + + ret = copy_from_user(buf, user_buf, count); + if (ret) + return -EFAULT; + + /* remove end of line */ + if (buf[count - 1] == '\n') + buf[count - 1] = '\0'; + + if (rproc->state == RPROC_CRASHED) { + dev_err(&rproc->dev, "can't change coredump configuration\n"); + err = -EBUSY; + goto out; + } + + if (!strncmp(buf, "disable", count)) { + rproc->dump_conf = RPROC_COREDUMP_DISABLED; + } else if (!strncmp(buf, "inline", count)) { + rproc->dump_conf = RPROC_COREDUMP_INLINE; + } else if (!strncmp(buf, "default", count)) { + rproc->dump_conf = RPROC_COREDUMP_DEFAULT; + } else { + dev_err(&rproc->dev, "Invalid coredump configuration\n"); + err = -EINVAL; + } +out: + return err ? err : count; +} + +static const struct file_operations rproc_coredump_fops = { + .read = rproc_coredump_read, + .write = rproc_coredump_write, + .open = simple_open, + .llseek = generic_file_llseek, +}; + +/* * Some remote processors may support dumping trace logs into a shared * memory buffer. We expose this trace buffer using debugfs, so users * can easily tell what's going on remotely. @@ -337,6 +425,8 @@ void rproc_create_debug_dir(struct rproc *rproc) rproc, &rproc_rsc_table_fops); debugfs_create_file("carveout_memories", 0400, rproc->dbg_dir, rproc, &rproc_carveouts_fops); + debugfs_create_file("coredump", 0600, rproc->dbg_dir, + rproc, &rproc_coredump_fops); } void __init rproc_init_debugfs(void) diff --git a/drivers/remoteproc/remoteproc_internal.h b/drivers/remoteproc/remoteproc_internal.h index 4ba7cb59d3e8..c34002888d2c 100644 --- a/drivers/remoteproc/remoteproc_internal.h +++ b/drivers/remoteproc/remoteproc_internal.h @@ -28,6 +28,8 @@ struct rproc_debug_trace { void rproc_release(struct kref *kref); irqreturn_t rproc_vq_interrupt(struct rproc *rproc, int vq_id); void rproc_vdev_release(struct kref *ref); +int rproc_of_parse_firmware(struct device *dev, int index, + const char **fw_name); /* from remoteproc_virtio.c */ int rproc_add_virtio_dev(struct rproc_vdev *rvdev, int id); @@ -47,6 +49,38 @@ extern struct class rproc_class; int rproc_init_sysfs(void); void rproc_exit_sysfs(void); +/* from remoteproc_coredump.c */ +void rproc_coredump_cleanup(struct rproc *rproc); +void rproc_coredump(struct rproc *rproc); + +#ifdef CONFIG_REMOTEPROC_CDEV +void rproc_init_cdev(void); +void rproc_exit_cdev(void); +int rproc_char_device_add(struct rproc *rproc); +void rproc_char_device_remove(struct rproc *rproc); +#else +static inline void rproc_init_cdev(void) +{ +} + +static inline void rproc_exit_cdev(void) +{ +} + +/* + * The character device interface is an optional feature, if it is not enabled + * the function should not return an error. + */ +static inline int rproc_char_device_add(struct rproc *rproc) +{ + return 0; +} + +static inline void rproc_char_device_remove(struct rproc *rproc) +{ +} +#endif + void rproc_free_vring(struct rproc_vring *rvring); int rproc_alloc_vring(struct rproc_vdev *rvdev, int i); @@ -79,6 +113,14 @@ static inline int rproc_unprepare_device(struct rproc *rproc) return 0; } +static inline int rproc_attach_device(struct rproc *rproc) +{ + if (rproc->ops->attach) + return rproc->ops->attach(rproc); + + return 0; +} + static inline int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) { diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c index 52b871327b55..eea514cec50e 100644 --- a/drivers/remoteproc/remoteproc_sysfs.c +++ b/drivers/remoteproc/remoteproc_sysfs.c @@ -15,8 +15,20 @@ static ssize_t firmware_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rproc *rproc = to_rproc(dev); - - return sprintf(buf, "%s\n", rproc->firmware); + const char *firmware = rproc->firmware; + + /* + * If the remote processor has been started by an external + * entity we have no idea of what image it is running. As such + * simply display a generic string rather then rproc->firmware. + * + * Here we rely on the autonomous flag because a remote processor + * may have been attached to and currently in a running state. + */ + if (rproc->autonomous) + firmware = "unknown"; + + return sprintf(buf, "%s\n", firmware); } /* Change firmware name via sysfs */ @@ -72,6 +84,7 @@ static const char * const rproc_state_string[] = { [RPROC_RUNNING] = "running", [RPROC_CRASHED] = "crashed", [RPROC_DELETED] = "deleted", + [RPROC_DETACHED] = "detached", [RPROC_LAST] = "invalid", }; diff --git a/drivers/remoteproc/stm32_rproc.c b/drivers/remoteproc/stm32_rproc.c index 062797a447c6..f4da42fc0eeb 100644 --- a/drivers/remoteproc/stm32_rproc.c +++ b/drivers/remoteproc/stm32_rproc.c @@ -39,6 +39,15 @@ #define STM32_MBX_VQ1_ID 1 #define STM32_MBX_SHUTDOWN "shutdown" +#define RSC_TBL_SIZE 1024 + +#define M4_STATE_OFF 0 +#define M4_STATE_INI 1 +#define M4_STATE_CRUN 2 +#define M4_STATE_CSTOP 3 +#define M4_STATE_STANDBY 4 +#define M4_STATE_CRASH 5 + struct stm32_syscon { struct regmap *map; u32 reg; @@ -71,12 +80,15 @@ struct stm32_rproc { struct reset_control *rst; struct stm32_syscon hold_boot; struct stm32_syscon pdds; + struct stm32_syscon m4_state; + struct stm32_syscon rsctbl; int wdg_irq; u32 nb_rmems; struct stm32_rproc_mem *rmems; struct stm32_mbox mb[MBOX_NB_MBX]; struct workqueue_struct *workqueue; bool secured_soc; + void __iomem *rsc_va; }; static int stm32_rproc_pa_to_da(struct rproc *rproc, phys_addr_t pa, u64 *da) @@ -128,10 +140,10 @@ static int stm32_rproc_mem_release(struct rproc *rproc, return 0; } -static int stm32_rproc_of_memory_translations(struct rproc *rproc) +static int stm32_rproc_of_memory_translations(struct platform_device *pdev, + struct stm32_rproc *ddata) { - struct device *parent, *dev = rproc->dev.parent; - struct stm32_rproc *ddata = rproc->priv; + struct device *parent, *dev = &pdev->dev; struct device_node *np; struct stm32_rproc_mem *p_mems; struct stm32_rproc_mem_ranges *mem_range; @@ -204,7 +216,7 @@ static int stm32_rproc_elf_load_rsc_table(struct rproc *rproc, return 0; } -static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +static int stm32_rproc_parse_memory_regions(struct rproc *rproc) { struct device *dev = rproc->dev.parent; struct device_node *np = dev->of_node; @@ -257,12 +269,23 @@ static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) index++; } + return 0; +} + +static int stm32_rproc_parse_fw(struct rproc *rproc, const struct firmware *fw) +{ + int ret = stm32_rproc_parse_memory_regions(rproc); + + if (ret) + return ret; + return stm32_rproc_elf_load_rsc_table(rproc, fw); } static irqreturn_t stm32_rproc_wdg(int irq, void *data) { - struct rproc *rproc = data; + struct platform_device *pdev = data; + struct rproc *rproc = platform_get_drvdata(pdev); rproc_report_crash(rproc, RPROC_WATCHDOG); @@ -437,6 +460,13 @@ static int stm32_rproc_start(struct rproc *rproc) return stm32_rproc_set_hold_boot(rproc, true); } +static int stm32_rproc_attach(struct rproc *rproc) +{ + stm32_rproc_add_coredump_trace(rproc); + + return stm32_rproc_set_hold_boot(rproc, true); +} + static int stm32_rproc_stop(struct rproc *rproc) { struct stm32_rproc *ddata = rproc->priv; @@ -474,6 +504,18 @@ static int stm32_rproc_stop(struct rproc *rproc) } } + /* update coprocessor state to OFF if available */ + if (ddata->m4_state.map) { + err = regmap_update_bits(ddata->m4_state.map, + ddata->m4_state.reg, + ddata->m4_state.mask, + M4_STATE_OFF); + if (err) { + dev_err(&rproc->dev, "failed to set copro state\n"); + return err; + } + } + return 0; } @@ -502,6 +544,7 @@ static void stm32_rproc_kick(struct rproc *rproc, int vqid) static struct rproc_ops st_rproc_ops = { .start = stm32_rproc_start, .stop = stm32_rproc_stop, + .attach = stm32_rproc_attach, .kick = stm32_rproc_kick, .load = rproc_elf_load_segments, .parse_fw = stm32_rproc_parse_fw, @@ -538,12 +581,11 @@ out: return err; } -static int stm32_rproc_parse_dt(struct platform_device *pdev) +static int stm32_rproc_parse_dt(struct platform_device *pdev, + struct stm32_rproc *ddata, bool *auto_boot) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - struct rproc *rproc = platform_get_drvdata(pdev); - struct stm32_rproc *ddata = rproc->priv; struct stm32_syscon tz; unsigned int tzen; int err, irq; @@ -554,7 +596,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev) if (irq > 0) { err = devm_request_irq(dev, irq, stm32_rproc_wdg, 0, - dev_name(dev), rproc); + dev_name(dev), pdev); if (err) { dev_err(dev, "failed to request wdg irq\n"); return err; @@ -589,7 +631,7 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev) err = regmap_read(tz.map, tz.reg, &tzen); if (err) { - dev_err(&rproc->dev, "failed to read tzen\n"); + dev_err(dev, "failed to read tzen\n"); return err; } ddata->secured_soc = tzen & tz.mask; @@ -605,9 +647,118 @@ static int stm32_rproc_parse_dt(struct platform_device *pdev) if (err) dev_info(dev, "failed to get pdds\n"); - rproc->auto_boot = of_property_read_bool(np, "st,auto-boot"); + *auto_boot = of_property_read_bool(np, "st,auto-boot"); - return stm32_rproc_of_memory_translations(rproc); + /* + * See if we can check the M4 status, i.e if it was started + * from the boot loader or not. + */ + err = stm32_rproc_get_syscon(np, "st,syscfg-m4-state", + &ddata->m4_state); + if (err) { + /* remember this */ + ddata->m4_state.map = NULL; + /* no coprocessor state syscon (optional) */ + dev_warn(dev, "m4 state not supported\n"); + + /* no need to go further */ + return 0; + } + + /* See if we can get the resource table */ + err = stm32_rproc_get_syscon(np, "st,syscfg-rsc-tbl", + &ddata->rsctbl); + if (err) { + /* no rsc table syscon (optional) */ + dev_warn(dev, "rsc tbl syscon not supported\n"); + } + + return 0; +} + +static int stm32_rproc_get_m4_status(struct stm32_rproc *ddata, + unsigned int *state) +{ + /* See stm32_rproc_parse_dt() */ + if (!ddata->m4_state.map) { + /* + * We couldn't get the coprocessor's state, assume + * it is not running. + */ + state = M4_STATE_OFF; + return 0; + } + + return regmap_read(ddata->m4_state.map, ddata->m4_state.reg, state); +} + +static int stm32_rproc_da_to_pa(struct platform_device *pdev, + struct stm32_rproc *ddata, + u64 da, phys_addr_t *pa) +{ + struct device *dev = &pdev->dev; + struct stm32_rproc_mem *p_mem; + unsigned int i; + + for (i = 0; i < ddata->nb_rmems; i++) { + p_mem = &ddata->rmems[i]; + + if (da < p_mem->dev_addr || + da >= p_mem->dev_addr + p_mem->size) + continue; + + *pa = da - p_mem->dev_addr + p_mem->bus_addr; + dev_dbg(dev, "da %llx to pa %#x\n", da, *pa); + + return 0; + } + + dev_err(dev, "can't translate da %llx\n", da); + + return -EINVAL; +} + +static int stm32_rproc_get_loaded_rsc_table(struct platform_device *pdev, + struct rproc *rproc, + struct stm32_rproc *ddata) +{ + struct device *dev = &pdev->dev; + phys_addr_t rsc_pa; + u32 rsc_da; + int err; + + err = regmap_read(ddata->rsctbl.map, ddata->rsctbl.reg, &rsc_da); + if (err) { + dev_err(dev, "failed to read rsc tbl addr\n"); + return err; + } + + if (!rsc_da) + /* no rsc table */ + return 0; + + err = stm32_rproc_da_to_pa(pdev, ddata, rsc_da, &rsc_pa); + if (err) + return err; + + ddata->rsc_va = devm_ioremap_wc(dev, rsc_pa, RSC_TBL_SIZE); + if (IS_ERR_OR_NULL(ddata->rsc_va)) { + dev_err(dev, "Unable to map memory region: %pa+%zx\n", + &rsc_pa, RSC_TBL_SIZE); + ddata->rsc_va = NULL; + return -ENOMEM; + } + + /* + * The resource table is already loaded in device memory, no need + * to work with a cached table. + */ + rproc->cached_table = NULL; + /* Assuming the resource table fits in 1kB is fair */ + rproc->table_sz = RSC_TBL_SIZE; + rproc->table_ptr = (struct resource_table *)ddata->rsc_va; + + return 0; } static int stm32_rproc_probe(struct platform_device *pdev) @@ -616,6 +767,7 @@ static int stm32_rproc_probe(struct platform_device *pdev) struct stm32_rproc *ddata; struct device_node *np = dev->of_node; struct rproc *rproc; + unsigned int state; int ret; ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); @@ -626,25 +778,47 @@ static int stm32_rproc_probe(struct platform_device *pdev) if (!rproc) return -ENOMEM; + ddata = rproc->priv; + rproc_coredump_set_elf_info(rproc, ELFCLASS32, EM_NONE); + + ret = stm32_rproc_parse_dt(pdev, ddata, &rproc->auto_boot); + if (ret) + goto free_rproc; + + ret = stm32_rproc_of_memory_translations(pdev, ddata); + if (ret) + goto free_rproc; + + ret = stm32_rproc_get_m4_status(ddata, &state); + if (ret) + goto free_rproc; + + if (state == M4_STATE_CRUN) { + rproc->state = RPROC_DETACHED; + + ret = stm32_rproc_parse_memory_regions(rproc); + if (ret) + goto free_resources; + + ret = stm32_rproc_get_loaded_rsc_table(pdev, rproc, ddata); + if (ret) + goto free_resources; + } + rproc->has_iommu = false; - ddata = rproc->priv; ddata->workqueue = create_workqueue(dev_name(dev)); if (!ddata->workqueue) { dev_err(dev, "cannot create workqueue\n"); ret = -ENOMEM; - goto free_rproc; + goto free_resources; } platform_set_drvdata(pdev, rproc); - ret = stm32_rproc_parse_dt(pdev); - if (ret) - goto free_wkq; - ret = stm32_rproc_request_mbox(rproc); if (ret) - goto free_rproc; + goto free_wkq; ret = rproc_add(rproc); if (ret) @@ -656,6 +830,8 @@ free_mb: stm32_rproc_free_mbox(rproc); free_wkq: destroy_workqueue(ddata->workqueue); +free_resources: + rproc_resource_cleanup(rproc); free_rproc: if (device_may_wakeup(dev)) { dev_pm_clear_wake_irq(dev); diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c new file mode 100644 index 000000000000..9011e477290c --- /dev/null +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * TI K3 DSP Remote Processor(s) driver + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + */ + +#include <linux/io.h> +#include <linux/mailbox_client.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of_reserved_mem.h> +#include <linux/omap-mailbox.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/reset.h> +#include <linux/slab.h> + +#include "omap_remoteproc.h" +#include "remoteproc_internal.h" +#include "ti_sci_proc.h" + +#define KEYSTONE_RPROC_LOCAL_ADDRESS_MASK (SZ_16M - 1) + +/** + * struct k3_dsp_mem - internal memory structure + * @cpu_addr: MPU virtual address of the memory region + * @bus_addr: Bus address used to access the memory region + * @dev_addr: Device address of the memory region from DSP view + * @size: Size of the memory region + */ +struct k3_dsp_mem { + void __iomem *cpu_addr; + phys_addr_t bus_addr; + u32 dev_addr; + size_t size; +}; + +/** + * struct k3_dsp_mem_data - memory definitions for a DSP + * @name: name for this memory entry + * @dev_addr: device address for the memory entry + */ +struct k3_dsp_mem_data { + const char *name; + const u32 dev_addr; +}; + +/** + * struct k3_dsp_dev_data - device data structure for a DSP + * @mems: pointer to memory definitions for a DSP + * @num_mems: number of memory regions in @mems + * @boot_align_addr: boot vector address alignment granularity + * @uses_lreset: flag to denote the need for local reset management + */ +struct k3_dsp_dev_data { + const struct k3_dsp_mem_data *mems; + u32 num_mems; + u32 boot_align_addr; + bool uses_lreset; +}; + +/** + * struct k3_dsp_rproc - k3 DSP remote processor driver structure + * @dev: cached device pointer + * @rproc: remoteproc device handle + * @mem: internal memory regions data + * @num_mems: number of internal memory regions + * @rmem: reserved memory regions data + * @num_rmems: number of reserved memory regions + * @reset: reset control handle + * @data: pointer to DSP-specific device data + * @tsp: TI-SCI processor control handle + * @ti_sci: TI-SCI handle + * @ti_sci_id: TI-SCI device identifier + * @mbox: mailbox channel handle + * @client: mailbox client to request the mailbox channel + */ +struct k3_dsp_rproc { + struct device *dev; + struct rproc *rproc; + struct k3_dsp_mem *mem; + int num_mems; + struct k3_dsp_mem *rmem; + int num_rmems; + struct reset_control *reset; + const struct k3_dsp_dev_data *data; + struct ti_sci_proc *tsp; + const struct ti_sci_handle *ti_sci; + u32 ti_sci_id; + struct mbox_chan *mbox; + struct mbox_client client; +}; + +/** + * k3_dsp_rproc_mbox_callback() - inbound mailbox message handler + * @client: mailbox client pointer used for requesting the mailbox channel + * @data: mailbox payload + * + * This handler is invoked by the OMAP mailbox driver whenever a mailbox + * message is received. Usually, the mailbox payload simply contains + * the index of the virtqueue that is kicked by the remote processor, + * and we let remoteproc core handle it. + * + * In addition to virtqueue indices, we also have some out-of-band values + * that indicate different events. Those values are deliberately very + * large so they don't coincide with virtqueue indices. + */ +static void k3_dsp_rproc_mbox_callback(struct mbox_client *client, void *data) +{ + struct k3_dsp_rproc *kproc = container_of(client, struct k3_dsp_rproc, + client); + struct device *dev = kproc->rproc->dev.parent; + const char *name = kproc->rproc->name; + u32 msg = omap_mbox_message(data); + + dev_dbg(dev, "mbox msg: 0x%x\n", msg); + + switch (msg) { + case RP_MBOX_CRASH: + /* + * remoteproc detected an exception, but error recovery is not + * supported. So, just log this for now + */ + dev_err(dev, "K3 DSP rproc %s crashed\n", name); + break; + case RP_MBOX_ECHO_REPLY: + dev_info(dev, "received echo reply from %s\n", name); + break; + default: + /* silently handle all other valid messages */ + if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG) + return; + if (msg > kproc->rproc->max_notifyid) { + dev_dbg(dev, "dropping unknown message 0x%x", msg); + return; + } + /* msg contains the index of the triggered vring */ + if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE) + dev_dbg(dev, "no message was found in vqid %d\n", msg); + } +} + +/* + * Kick the remote processor to notify about pending unprocessed messages. + * The vqid usage is not used and is inconsequential, as the kick is performed + * through a simulated GPIO (a bit in an IPC interrupt-triggering register), + * the remote processor is expected to process both its Tx and Rx virtqueues. + */ +static void k3_dsp_rproc_kick(struct rproc *rproc, int vqid) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct device *dev = rproc->dev.parent; + mbox_msg_t msg = (mbox_msg_t)vqid; + int ret; + + /* send the index of the triggered virtqueue in the mailbox payload */ + ret = mbox_send_message(kproc->mbox, (void *)msg); + if (ret < 0) + dev_err(dev, "failed to send mailbox message, status = %d\n", + ret); +} + +/* Put the DSP processor into reset */ +static int k3_dsp_rproc_reset(struct k3_dsp_rproc *kproc) +{ + struct device *dev = kproc->dev; + int ret; + + ret = reset_control_assert(kproc->reset); + if (ret) { + dev_err(dev, "local-reset assert failed, ret = %d\n", ret); + return ret; + } + + if (kproc->data->uses_lreset) + return ret; + + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "module-reset assert failed, ret = %d\n", ret); + if (reset_control_deassert(kproc->reset)) + dev_warn(dev, "local-reset deassert back failed\n"); + } + + return ret; +} + +/* Release the DSP processor from reset */ +static int k3_dsp_rproc_release(struct k3_dsp_rproc *kproc) +{ + struct device *dev = kproc->dev; + int ret; + + if (kproc->data->uses_lreset) + goto lreset; + + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) { + dev_err(dev, "module-reset deassert failed, ret = %d\n", ret); + return ret; + } + +lreset: + ret = reset_control_deassert(kproc->reset); + if (ret) { + dev_err(dev, "local-reset deassert failed, ret = %d\n", ret); + if (kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id)) + dev_warn(dev, "module-reset assert back failed\n"); + } + + return ret; +} + +/* + * The C66x DSP cores have a local reset that affects only the CPU, and a + * generic module reset that powers on the device and allows the DSP internal + * memories to be accessed while the local reset is asserted. This function is + * used to release the global reset on C66x DSPs to allow loading into the DSP + * internal RAMs. The .prepare() ops is invoked by remoteproc core before any + * firmware loading, and is followed by the .start() ops after loading to + * actually let the C66x DSP cores run. + */ +static int k3_dsp_rproc_prepare(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + ret = kproc->ti_sci->ops.dev_ops.get_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) + dev_err(dev, "module-reset deassert failed, cannot enable internal RAM loading, ret = %d\n", + ret); + + return ret; +} + +/* + * This function implements the .unprepare() ops and performs the complimentary + * operations to that of the .prepare() ops. The function is used to assert the + * global reset on applicable C66x cores. This completes the second portion of + * powering down the C66x DSP cores. The cores themselves are only halted in the + * .stop() callback through the local reset, and the .unprepare() ops is invoked + * by the remoteproc core after the remoteproc is stopped to balance the global + * reset. + */ +static int k3_dsp_rproc_unprepare(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct device *dev = kproc->dev; + int ret; + + ret = kproc->ti_sci->ops.dev_ops.put_device(kproc->ti_sci, + kproc->ti_sci_id); + if (ret) + dev_err(dev, "module-reset assert failed, ret = %d\n", ret); + + return ret; +} + +/* + * Power up the DSP remote processor. + * + * This function will be invoked only after the firmware for this rproc + * was loaded, parsed successfully, and all of its resource requirements + * were met. + */ +static int k3_dsp_rproc_start(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + struct mbox_client *client = &kproc->client; + struct device *dev = kproc->dev; + u32 boot_addr; + int ret; + + client->dev = dev; + client->tx_done = NULL; + client->rx_callback = k3_dsp_rproc_mbox_callback; + client->tx_block = false; + client->knows_txdone = false; + + kproc->mbox = mbox_request_channel(client, 0); + if (IS_ERR(kproc->mbox)) { + ret = -EBUSY; + dev_err(dev, "mbox_request_channel failed: %ld\n", + PTR_ERR(kproc->mbox)); + return ret; + } + + /* + * Ping the remote processor, this is only for sanity-sake for now; + * there is no functional effect whatsoever. + * + * Note that the reply will _not_ arrive immediately: this message + * will wait in the mailbox fifo until the remote processor is booted. + */ + ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST); + if (ret < 0) { + dev_err(dev, "mbox_send_message failed: %d\n", ret); + goto put_mbox; + } + + boot_addr = rproc->bootaddr; + if (boot_addr & (kproc->data->boot_align_addr - 1)) { + dev_err(dev, "invalid boot address 0x%x, must be aligned on a 0x%x boundary\n", + boot_addr, kproc->data->boot_align_addr); + ret = -EINVAL; + goto put_mbox; + } + + dev_err(dev, "booting DSP core using boot addr = 0x%x\n", boot_addr); + ret = ti_sci_proc_set_config(kproc->tsp, boot_addr, 0, 0); + if (ret) + goto put_mbox; + + ret = k3_dsp_rproc_release(kproc); + if (ret) + goto put_mbox; + + return 0; + +put_mbox: + mbox_free_channel(kproc->mbox); + return ret; +} + +/* + * Stop the DSP remote processor. + * + * This function puts the DSP processor into reset, and finishes processing + * of any pending messages. + */ +static int k3_dsp_rproc_stop(struct rproc *rproc) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + + mbox_free_channel(kproc->mbox); + + k3_dsp_rproc_reset(kproc); + + return 0; +} + +/* + * Custom function to translate a DSP device address (internal RAMs only) to a + * kernel virtual address. The DSPs can access their RAMs at either an internal + * address visible only from a DSP, or at the SoC-level bus address. Both these + * addresses need to be looked through for translation. The translated addresses + * can be used either by the remoteproc core for loading (when using kernel + * remoteproc loader), or by any rpmsg bus drivers. + */ +static void *k3_dsp_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len) +{ + struct k3_dsp_rproc *kproc = rproc->priv; + void __iomem *va = NULL; + phys_addr_t bus_addr; + u32 dev_addr, offset; + size_t size; + int i; + + if (len == 0) + return NULL; + + for (i = 0; i < kproc->num_mems; i++) { + bus_addr = kproc->mem[i].bus_addr; + dev_addr = kproc->mem[i].dev_addr; + size = kproc->mem[i].size; + + if (da < KEYSTONE_RPROC_LOCAL_ADDRESS_MASK) { + /* handle DSP-view addresses */ + if (da >= dev_addr && + ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + } else { + /* handle SoC-view addresses */ + if (da >= bus_addr && + (da + len) <= (bus_addr + size)) { + offset = da - bus_addr; + va = kproc->mem[i].cpu_addr + offset; + return (__force void *)va; + } + } + } + + /* handle static DDR reserved memory regions */ + for (i = 0; i < kproc->num_rmems; i++) { + dev_addr = kproc->rmem[i].dev_addr; + size = kproc->rmem[i].size; + + if (da >= dev_addr && ((da + len) <= (dev_addr + size))) { + offset = da - dev_addr; + va = kproc->rmem[i].cpu_addr + offset; + return (__force void *)va; + } + } + + return NULL; +} + +static const struct rproc_ops k3_dsp_rproc_ops = { + .start = k3_dsp_rproc_start, + .stop = k3_dsp_rproc_stop, + .kick = k3_dsp_rproc_kick, + .da_to_va = k3_dsp_rproc_da_to_va, +}; + +static int k3_dsp_rproc_of_get_memories(struct platform_device *pdev, + struct k3_dsp_rproc *kproc) +{ + const struct k3_dsp_dev_data *data = kproc->data; + struct device *dev = &pdev->dev; + struct resource *res; + int num_mems = 0; + int i; + + num_mems = kproc->data->num_mems; + kproc->mem = devm_kcalloc(kproc->dev, num_mems, + sizeof(*kproc->mem), GFP_KERNEL); + if (!kproc->mem) + return -ENOMEM; + + for (i = 0; i < num_mems; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + data->mems[i].name); + if (!res) { + dev_err(dev, "found no memory resource for %s\n", + data->mems[i].name); + return -EINVAL; + } + if (!devm_request_mem_region(dev, res->start, + resource_size(res), + dev_name(dev))) { + dev_err(dev, "could not request %s region for resource\n", + data->mems[i].name); + return -EBUSY; + } + + kproc->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start, + resource_size(res)); + if (IS_ERR(kproc->mem[i].cpu_addr)) { + dev_err(dev, "failed to map %s memory\n", + data->mems[i].name); + return PTR_ERR(kproc->mem[i].cpu_addr); + } + kproc->mem[i].bus_addr = res->start; + kproc->mem[i].dev_addr = data->mems[i].dev_addr; + kproc->mem[i].size = resource_size(res); + + dev_dbg(dev, "memory %8s: bus addr %pa size 0x%zx va %pK da 0x%x\n", + data->mems[i].name, &kproc->mem[i].bus_addr, + kproc->mem[i].size, kproc->mem[i].cpu_addr, + kproc->mem[i].dev_addr); + } + kproc->num_mems = num_mems; + + return 0; +} + +static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc) +{ + struct device *dev = kproc->dev; + struct device_node *np = dev->of_node; + struct device_node *rmem_np; + struct reserved_mem *rmem; + int num_rmems; + int ret, i; + + num_rmems = of_property_count_elems_of_size(np, "memory-region", + sizeof(phandle)); + if (num_rmems <= 0) { + dev_err(dev, "device does not reserved memory regions, ret = %d\n", + num_rmems); + return -EINVAL; + } + if (num_rmems < 2) { + dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n", + num_rmems); + return -EINVAL; + } + + /* use reserved memory region 0 for vring DMA allocations */ + ret = of_reserved_mem_device_init_by_idx(dev, np, 0); + if (ret) { + dev_err(dev, "device cannot initialize DMA pool, ret = %d\n", + ret); + return ret; + } + + num_rmems--; + kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); + if (!kproc->rmem) { + ret = -ENOMEM; + goto release_rmem; + } + + /* use remaining reserved memory regions for static carveouts */ + for (i = 0; i < num_rmems; i++) { + rmem_np = of_parse_phandle(np, "memory-region", i + 1); + if (!rmem_np) { + ret = -EINVAL; + goto unmap_rmem; + } + + rmem = of_reserved_mem_lookup(rmem_np); + if (!rmem) { + of_node_put(rmem_np); + ret = -EINVAL; + goto unmap_rmem; + } + of_node_put(rmem_np); + + kproc->rmem[i].bus_addr = rmem->base; + /* 64-bit address regions currently not supported */ + kproc->rmem[i].dev_addr = (u32)rmem->base; + kproc->rmem[i].size = rmem->size; + kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size); + if (!kproc->rmem[i].cpu_addr) { + dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", + i + 1, &rmem->base, &rmem->size); + ret = -ENOMEM; + goto unmap_rmem; + } + + dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", + i + 1, &kproc->rmem[i].bus_addr, + kproc->rmem[i].size, kproc->rmem[i].cpu_addr, + kproc->rmem[i].dev_addr); + } + kproc->num_rmems = num_rmems; + + return 0; + +unmap_rmem: + for (i--; i >= 0; i--) + iounmap(kproc->rmem[i].cpu_addr); + kfree(kproc->rmem); +release_rmem: + of_reserved_mem_device_release(kproc->dev); + return ret; +} + +static void k3_dsp_reserved_mem_exit(struct k3_dsp_rproc *kproc) +{ + int i; + + for (i = 0; i < kproc->num_rmems; i++) + iounmap(kproc->rmem[i].cpu_addr); + kfree(kproc->rmem); + + of_reserved_mem_device_release(kproc->dev); +} + +static +struct ti_sci_proc *k3_dsp_rproc_of_get_tsp(struct device *dev, + const struct ti_sci_handle *sci) +{ + struct ti_sci_proc *tsp; + u32 temp[2]; + int ret; + + ret = of_property_read_u32_array(dev->of_node, "ti,sci-proc-ids", + temp, 2); + if (ret < 0) + return ERR_PTR(ret); + + tsp = kzalloc(sizeof(*tsp), GFP_KERNEL); + if (!tsp) + return ERR_PTR(-ENOMEM); + + tsp->dev = dev; + tsp->sci = sci; + tsp->ops = &sci->ops.proc_ops; + tsp->proc_id = temp[0]; + tsp->host_id = temp[1]; + + return tsp; +} + +static int k3_dsp_rproc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + const struct k3_dsp_dev_data *data; + struct k3_dsp_rproc *kproc; + struct rproc *rproc; + const char *fw_name; + int ret = 0; + int ret1; + + data = of_device_get_match_data(dev); + if (!data) + return -ENODEV; + + ret = rproc_of_parse_firmware(dev, 0, &fw_name); + if (ret) { + dev_err(dev, "failed to parse firmware-name property, ret = %d\n", + ret); + return ret; + } + + rproc = rproc_alloc(dev, dev_name(dev), &k3_dsp_rproc_ops, fw_name, + sizeof(*kproc)); + if (!rproc) + return -ENOMEM; + + rproc->has_iommu = false; + rproc->recovery_disabled = true; + if (data->uses_lreset) { + rproc->ops->prepare = k3_dsp_rproc_prepare; + rproc->ops->unprepare = k3_dsp_rproc_unprepare; + } + kproc = rproc->priv; + kproc->rproc = rproc; + kproc->dev = dev; + kproc->data = data; + + kproc->ti_sci = ti_sci_get_by_phandle(np, "ti,sci"); + if (IS_ERR(kproc->ti_sci)) { + ret = PTR_ERR(kproc->ti_sci); + if (ret != -EPROBE_DEFER) { + dev_err(dev, "failed to get ti-sci handle, ret = %d\n", + ret); + } + kproc->ti_sci = NULL; + goto free_rproc; + } + + ret = of_property_read_u32(np, "ti,sci-dev-id", &kproc->ti_sci_id); + if (ret) { + dev_err(dev, "missing 'ti,sci-dev-id' property\n"); + goto put_sci; + } + + kproc->reset = devm_reset_control_get_exclusive(dev, NULL); + if (IS_ERR(kproc->reset)) { + ret = PTR_ERR(kproc->reset); + dev_err(dev, "failed to get reset, status = %d\n", ret); + goto put_sci; + } + + kproc->tsp = k3_dsp_rproc_of_get_tsp(dev, kproc->ti_sci); + if (IS_ERR(kproc->tsp)) { + dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n", + ret); + ret = PTR_ERR(kproc->tsp); + goto put_sci; + } + + ret = ti_sci_proc_request(kproc->tsp); + if (ret < 0) { + dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret); + goto free_tsp; + } + + ret = k3_dsp_rproc_of_get_memories(pdev, kproc); + if (ret) + goto release_tsp; + + ret = k3_dsp_reserved_mem_init(kproc); + if (ret) { + dev_err(dev, "reserved memory init failed, ret = %d\n", ret); + goto release_tsp; + } + + /* + * ensure the DSP local reset is asserted to ensure the DSP doesn't + * execute bogus code in .prepare() when the module reset is released. + */ + if (data->uses_lreset) { + ret = reset_control_status(kproc->reset); + if (ret < 0) { + dev_err(dev, "failed to get reset status, status = %d\n", + ret); + goto release_mem; + } else if (ret == 0) { + dev_warn(dev, "local reset is deasserted for device\n"); + k3_dsp_rproc_reset(kproc); + } + } + + ret = rproc_add(rproc); + if (ret) { + dev_err(dev, "failed to add register device with remoteproc core, status = %d\n", + ret); + goto release_mem; + } + + platform_set_drvdata(pdev, kproc); + + return 0; + +release_mem: + k3_dsp_reserved_mem_exit(kproc); +release_tsp: + ret1 = ti_sci_proc_release(kproc->tsp); + if (ret1) + dev_err(dev, "failed to release proc, ret = %d\n", ret1); +free_tsp: + kfree(kproc->tsp); +put_sci: + ret1 = ti_sci_put_handle(kproc->ti_sci); + if (ret1) + dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret1); +free_rproc: + rproc_free(rproc); + return ret; +} + +static int k3_dsp_rproc_remove(struct platform_device *pdev) +{ + struct k3_dsp_rproc *kproc = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int ret; + + rproc_del(kproc->rproc); + + ret = ti_sci_proc_release(kproc->tsp); + if (ret) + dev_err(dev, "failed to release proc, ret = %d\n", ret); + + kfree(kproc->tsp); + + ret = ti_sci_put_handle(kproc->ti_sci); + if (ret) + dev_err(dev, "failed to put ti_sci handle, ret = %d\n", ret); + + k3_dsp_reserved_mem_exit(kproc); + rproc_free(kproc->rproc); + + return 0; +} + +static const struct k3_dsp_mem_data c66_mems[] = { + { .name = "l2sram", .dev_addr = 0x800000 }, + { .name = "l1pram", .dev_addr = 0xe00000 }, + { .name = "l1dram", .dev_addr = 0xf00000 }, +}; + +/* C71x cores only have a L1P Cache, there are no L1P SRAMs */ +static const struct k3_dsp_mem_data c71_mems[] = { + { .name = "l2sram", .dev_addr = 0x800000 }, + { .name = "l1dram", .dev_addr = 0xe00000 }, +}; + +static const struct k3_dsp_dev_data c66_data = { + .mems = c66_mems, + .num_mems = ARRAY_SIZE(c66_mems), + .boot_align_addr = SZ_1K, + .uses_lreset = true, +}; + +static const struct k3_dsp_dev_data c71_data = { + .mems = c71_mems, + .num_mems = ARRAY_SIZE(c71_mems), + .boot_align_addr = SZ_2M, + .uses_lreset = false, +}; + +static const struct of_device_id k3_dsp_of_match[] = { + { .compatible = "ti,j721e-c66-dsp", .data = &c66_data, }, + { .compatible = "ti,j721e-c71-dsp", .data = &c71_data, }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, k3_dsp_of_match); + +static struct platform_driver k3_dsp_rproc_driver = { + .probe = k3_dsp_rproc_probe, + .remove = k3_dsp_rproc_remove, + .driver = { + .name = "k3-dsp-rproc", + .of_match_table = k3_dsp_of_match, + }, +}; + +module_platform_driver(k3_dsp_rproc_driver); + +MODULE_AUTHOR("Suman Anna <s-anna@ti.com>"); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("TI K3 DSP Remoteproc driver"); diff --git a/drivers/remoteproc/ti_sci_proc.h b/drivers/remoteproc/ti_sci_proc.h new file mode 100644 index 000000000000..778558abcdcc --- /dev/null +++ b/drivers/remoteproc/ti_sci_proc.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Texas Instruments TI-SCI Processor Controller Helper Functions + * + * Copyright (C) 2018-2020 Texas Instruments Incorporated - https://www.ti.com/ + * Suman Anna <s-anna@ti.com> + */ + +#ifndef REMOTEPROC_TI_SCI_PROC_H +#define REMOTEPROC_TI_SCI_PROC_H + +#include <linux/soc/ti/ti_sci_protocol.h> + +/** + * struct ti_sci_proc - structure representing a processor control client + * @sci: cached TI-SCI protocol handle + * @ops: cached TI-SCI proc ops + * @dev: cached client device pointer + * @proc_id: processor id for the consumer remoteproc device + * @host_id: host id to pass the control over for this consumer remoteproc + * device + */ +struct ti_sci_proc { + const struct ti_sci_handle *sci; + const struct ti_sci_proc_ops *ops; + struct device *dev; + u8 proc_id; + u8 host_id; +}; + +static inline int ti_sci_proc_request(struct ti_sci_proc *tsp) +{ + int ret; + + ret = tsp->ops->request(tsp->sci, tsp->proc_id); + if (ret) + dev_err(tsp->dev, "ti-sci processor request failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_release(struct ti_sci_proc *tsp) +{ + int ret; + + ret = tsp->ops->release(tsp->sci, tsp->proc_id); + if (ret) + dev_err(tsp->dev, "ti-sci processor release failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_handover(struct ti_sci_proc *tsp) +{ + int ret; + + ret = tsp->ops->handover(tsp->sci, tsp->proc_id, tsp->host_id); + if (ret) + dev_err(tsp->dev, "ti-sci processor handover of %d to %d failed: %d\n", + tsp->proc_id, tsp->host_id, ret); + return ret; +} + +static inline int ti_sci_proc_set_config(struct ti_sci_proc *tsp, + u64 boot_vector, + u32 cfg_set, u32 cfg_clr) +{ + int ret; + + ret = tsp->ops->set_config(tsp->sci, tsp->proc_id, boot_vector, + cfg_set, cfg_clr); + if (ret) + dev_err(tsp->dev, "ti-sci processor set_config failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_set_control(struct ti_sci_proc *tsp, + u32 ctrl_set, u32 ctrl_clr) +{ + int ret; + + ret = tsp->ops->set_control(tsp->sci, tsp->proc_id, ctrl_set, ctrl_clr); + if (ret) + dev_err(tsp->dev, "ti-sci processor set_control failed: %d\n", + ret); + return ret; +} + +static inline int ti_sci_proc_get_status(struct ti_sci_proc *tsp, + u64 *boot_vector, u32 *cfg_flags, + u32 *ctrl_flags, u32 *status_flags) +{ + int ret; + + ret = tsp->ops->get_status(tsp->sci, tsp->proc_id, boot_vector, + cfg_flags, ctrl_flags, status_flags); + if (ret) + dev_err(tsp->dev, "ti-sci processor get_status failed: %d\n", + ret); + return ret; +} + +#endif /* REMOTEPROC_TI_SCI_PROC_H */ diff --git a/drivers/rpmsg/virtio_rpmsg_bus.c b/drivers/rpmsg/virtio_rpmsg_bus.c index 07d4f3374098..9006fc7f73d0 100644 --- a/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/drivers/rpmsg/virtio_rpmsg_bus.c @@ -23,6 +23,7 @@ #include <linux/slab.h> #include <linux/sched.h> #include <linux/virtio.h> +#include <linux/virtio_byteorder.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/wait.h> @@ -84,11 +85,11 @@ struct virtproc_info { * Every message sent(/received) on the rpmsg bus begins with this header. */ struct rpmsg_hdr { - u32 src; - u32 dst; - u32 reserved; - u16 len; - u16 flags; + __virtio32 src; + __virtio32 dst; + __virtio32 reserved; + __virtio16 len; + __virtio16 flags; u8 data[]; } __packed; @@ -106,8 +107,8 @@ struct rpmsg_hdr { */ struct rpmsg_ns_msg { char name[RPMSG_NAME_SIZE]; - u32 addr; - u32 flags; + __virtio32 addr; + __virtio32 flags; } __packed; /** @@ -335,8 +336,8 @@ static int virtio_rpmsg_announce_create(struct rpmsg_device *rpdev) struct rpmsg_ns_msg nsm; strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); - nsm.addr = rpdev->ept->addr; - nsm.flags = RPMSG_NS_CREATE; + nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr); + nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_CREATE); err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR); if (err) @@ -359,8 +360,8 @@ static int virtio_rpmsg_announce_destroy(struct rpmsg_device *rpdev) struct rpmsg_ns_msg nsm; strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); - nsm.addr = rpdev->ept->addr; - nsm.flags = RPMSG_NS_DESTROY; + nsm.addr = cpu_to_virtio32(vrp->vdev, rpdev->ept->addr); + nsm.flags = cpu_to_virtio32(vrp->vdev, RPMSG_NS_DESTROY); err = rpmsg_sendto(rpdev->ept, &nsm, sizeof(nsm), RPMSG_NS_ADDR); if (err) @@ -612,18 +613,18 @@ static int rpmsg_send_offchannel_raw(struct rpmsg_device *rpdev, } } - msg->len = len; + msg->len = cpu_to_virtio16(vrp->vdev, len); msg->flags = 0; - msg->src = src; - msg->dst = dst; + msg->src = cpu_to_virtio32(vrp->vdev, src); + msg->dst = cpu_to_virtio32(vrp->vdev, dst); msg->reserved = 0; memcpy(msg->data, data, len); dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", - msg->src, msg->dst, msg->len, msg->flags, msg->reserved); + src, dst, len, msg->flags, msg->reserved); #if defined(CONFIG_DYNAMIC_DEBUG) dynamic_hex_dump("rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, - msg, sizeof(*msg) + msg->len, true); + msg, sizeof(*msg) + len, true); #endif rpmsg_sg_init(&sg, msg, sizeof(*msg) + len); @@ -704,13 +705,17 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, { struct rpmsg_endpoint *ept; struct scatterlist sg; + unsigned int msg_len = virtio16_to_cpu(vrp->vdev, msg->len); int err; dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", - msg->src, msg->dst, msg->len, msg->flags, msg->reserved); + virtio32_to_cpu(vrp->vdev, msg->src), + virtio32_to_cpu(vrp->vdev, msg->dst), msg_len, + virtio16_to_cpu(vrp->vdev, msg->flags), + virtio32_to_cpu(vrp->vdev, msg->reserved)); #if defined(CONFIG_DYNAMIC_DEBUG) dynamic_hex_dump("rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, - msg, sizeof(*msg) + msg->len, true); + msg, sizeof(*msg) + msg_len, true); #endif /* @@ -718,15 +723,15 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, * the reported payload length. */ if (len > vrp->buf_size || - msg->len > (len - sizeof(struct rpmsg_hdr))) { - dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); + msg_len > (len - sizeof(struct rpmsg_hdr))) { + dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg_len); return -EINVAL; } /* use the dst addr to fetch the callback of the appropriate user */ mutex_lock(&vrp->endpoints_lock); - ept = idr_find(&vrp->endpoints, msg->dst); + ept = idr_find(&vrp->endpoints, virtio32_to_cpu(vrp->vdev, msg->dst)); /* let's make sure no one deallocates ept while we use it */ if (ept) @@ -739,8 +744,8 @@ static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, mutex_lock(&ept->cb_lock); if (ept->cb) - ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, - msg->src); + ept->cb(ept->rpdev, msg->data, msg_len, ept->priv, + virtio32_to_cpu(vrp->vdev, msg->src)); mutex_unlock(&ept->cb_lock); @@ -846,15 +851,15 @@ static int rpmsg_ns_cb(struct rpmsg_device *rpdev, void *data, int len, /* don't trust the remote processor for null terminating the name */ msg->name[RPMSG_NAME_SIZE - 1] = '\0'; - dev_info(dev, "%sing channel %s addr 0x%x\n", - msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat", - msg->name, msg->addr); - strncpy(chinfo.name, msg->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; - chinfo.dst = msg->addr; + chinfo.dst = virtio32_to_cpu(vrp->vdev, msg->addr); + + dev_info(dev, "%sing channel %s addr 0x%x\n", + virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY ? + "destroy" : "creat", msg->name, chinfo.dst); - if (msg->flags & RPMSG_NS_DESTROY) { + if (virtio32_to_cpu(vrp->vdev, msg->flags) & RPMSG_NS_DESTROY) { ret = rpmsg_unregister_device(&vrp->vdev->dev, &chinfo); if (ret) dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index f3b8e6dcd879..48c536acd777 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -281,7 +281,8 @@ config RTC_DRV_DS1374 config RTC_DRV_DS1374_WDT bool "Dallas/Maxim DS1374 watchdog timer" - depends on RTC_DRV_DS1374 + depends on RTC_DRV_DS1374 && WATCHDOG + select WATCHDOG_CORE help If you say Y here you will get support for the watchdog timer in the Dallas Semiconductor DS1374 diff --git a/drivers/rtc/rtc-ab-b5ze-s3.c b/drivers/rtc/rtc-ab-b5ze-s3.c index 811fe2005488..2370ac0cdb5f 100644 --- a/drivers/rtc/rtc-ab-b5ze-s3.c +++ b/drivers/rtc/rtc-ab-b5ze-s3.c @@ -7,7 +7,7 @@ * * Detailed datasheet of the chip is available here: * - * http://www.abracon.com/realtimeclock/AB-RTCMC-32.768kHz-B5ZE-S3-Application-Manual.pdf + * https://www.abracon.com/realtimeclock/AB-RTCMC-32.768kHz-B5ZE-S3-Application-Manual.pdf * * This work is based on ISL12057 driver (drivers/rtc/rtc-isl12057.c). * diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c index 4a63f0cd2321..933e4237237d 100644 --- a/drivers/rtc/rtc-bq32k.c +++ b/drivers/rtc/rtc-bq32k.c @@ -6,7 +6,7 @@ * Copyright (C) 2014 Pavel Machek <pavel@denx.de> * * You can get hardware description at - * http://www.ti.com/lit/ds/symlink/bq32000.pdf + * https://www.ti.com/lit/ds/symlink/bq32000.pdf */ #include <linux/module.h> diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c index a603f1f21125..800667d73a6f 100644 --- a/drivers/rtc/rtc-cpcap.c +++ b/drivers/rtc/rtc-cpcap.c @@ -261,7 +261,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev) return PTR_ERR(rtc->rtc_dev); rtc->rtc_dev->ops = &cpcap_rtc_ops; - rtc->rtc_dev->range_max = (1 << 14) * SECS_PER_DAY - 1; + rtc->rtc_dev->range_max = (timeu64_t) (DAY_MASK + 1) * SECS_PER_DAY - 1; err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor); if (err) diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 49702942bb08..54c85cdd019d 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -1668,6 +1668,8 @@ static const struct watchdog_ops ds1388_wdt_ops = { static void ds1307_wdt_register(struct ds1307 *ds1307) { struct watchdog_device *wdt; + int err; + int val; if (ds1307->type != ds_1388) return; @@ -1676,6 +1678,10 @@ static void ds1307_wdt_register(struct ds1307 *ds1307) if (!wdt) return; + err = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &val); + if (!err && val & DS1388_BIT_WF) + wdt->bootstatus = WDIOF_CARDRESET; + wdt->info = &ds1388_wdt_info; wdt->ops = &ds1388_wdt_ops; wdt->timeout = 99; diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 9c51a12cf70f..177d870bda0d 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -46,6 +46,7 @@ #define DS1374_REG_WDALM2 0x06 #define DS1374_REG_CR 0x07 /* Control */ #define DS1374_REG_CR_AIE 0x01 /* Alarm Int. Enable */ +#define DS1374_REG_CR_WDSTR 0x08 /* 1=INT, 0=RST */ #define DS1374_REG_CR_WDALM 0x20 /* 1=Watchdog, 0=Alarm */ #define DS1374_REG_CR_WACE 0x40 /* WD/Alarm counter enable */ #define DS1374_REG_SR 0x08 /* Status */ @@ -71,7 +72,9 @@ struct ds1374 { struct i2c_client *client; struct rtc_device *rtc; struct work_struct work; - +#ifdef CONFIG_RTC_DRV_DS1374_WDT + struct watchdog_device wdt; +#endif /* The mutex protects alarm operations, and prevents a race * between the enable_irq() in the workqueue and the free_irq() * in the remove function. @@ -369,238 +372,96 @@ static const struct rtc_class_ops ds1374_rtc_ops = { * ***************************************************************************** */ -static struct i2c_client *save_client; /* Default margin */ -#define WD_TIMO 131762 +#define TIMER_MARGIN_DEFAULT 32 +#define TIMER_MARGIN_MIN 1 +#define TIMER_MARGIN_MAX 4095 /* 24-bit value */ -#define DRV_NAME "DS1374 Watchdog" - -static int wdt_margin = WD_TIMO; -static unsigned long wdt_is_open; +static int wdt_margin; module_param(wdt_margin, int, 0); MODULE_PARM_DESC(wdt_margin, "Watchdog timeout in seconds (default 32s)"); +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default =" + __MODULE_STRING(WATCHDOG_NOWAYOUT)")"); + static const struct watchdog_info ds1374_wdt_info = { - .identity = "DS1374 WTD", + .identity = "DS1374 Watchdog", .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, }; -static int ds1374_wdt_settimeout(unsigned int timeout) +static int ds1374_wdt_settimeout(struct watchdog_device *wdt, unsigned int timeout) { - int ret = -ENOIOCTLCMD; - int cr; + struct ds1374 *ds1374 = watchdog_get_drvdata(wdt); + struct i2c_client *client = ds1374->client; + int ret, cr; - ret = cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR); - if (ret < 0) - goto out; + wdt->timeout = timeout; + + cr = i2c_smbus_read_byte_data(client, DS1374_REG_CR); + if (cr < 0) + return cr; /* Disable any existing watchdog/alarm before setting the new one */ cr &= ~DS1374_REG_CR_WACE; - ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr); + ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr); if (ret < 0) - goto out; + return ret; /* Set new watchdog time */ - ret = ds1374_write_rtc(save_client, timeout, DS1374_REG_WDALM0, 3); - if (ret) { - pr_info("couldn't set new watchdog time\n"); - goto out; - } + timeout = timeout * 4096; + ret = ds1374_write_rtc(client, timeout, DS1374_REG_WDALM0, 3); + if (ret) + return ret; /* Enable watchdog timer */ cr |= DS1374_REG_CR_WACE | DS1374_REG_CR_WDALM; + cr &= ~DS1374_REG_CR_WDSTR;/* for RST PIN */ cr &= ~DS1374_REG_CR_AIE; - ret = i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr); + ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr); if (ret < 0) - goto out; + return ret; return 0; -out: - return ret; } - /* * Reload the watchdog timer. (ie, pat the watchdog) */ -static void ds1374_wdt_ping(void) +static int ds1374_wdt_start(struct watchdog_device *wdt) { + struct ds1374 *ds1374 = watchdog_get_drvdata(wdt); u32 val; - int ret = 0; - ret = ds1374_read_rtc(save_client, &val, DS1374_REG_WDALM0, 3); - if (ret) - pr_info("WD TICK FAIL!!!!!!!!!! %i\n", ret); + return ds1374_read_rtc(ds1374->client, &val, DS1374_REG_WDALM0, 3); } -static void ds1374_wdt_disable(void) +static int ds1374_wdt_stop(struct watchdog_device *wdt) { + struct ds1374 *ds1374 = watchdog_get_drvdata(wdt); + struct i2c_client *client = ds1374->client; int cr; - cr = i2c_smbus_read_byte_data(save_client, DS1374_REG_CR); + cr = i2c_smbus_read_byte_data(client, DS1374_REG_CR); + if (cr < 0) + return cr; + /* Disable watchdog timer */ cr &= ~DS1374_REG_CR_WACE; - i2c_smbus_write_byte_data(save_client, DS1374_REG_CR, cr); -} - -/* - * Watchdog device is opened, and watchdog starts running. - */ -static int ds1374_wdt_open(struct inode *inode, struct file *file) -{ - struct ds1374 *ds1374 = i2c_get_clientdata(save_client); - - if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) { - mutex_lock(&ds1374->mutex); - if (test_and_set_bit(0, &wdt_is_open)) { - mutex_unlock(&ds1374->mutex); - return -EBUSY; - } - /* - * Activate - */ - wdt_is_open = 1; - mutex_unlock(&ds1374->mutex); - return stream_open(inode, file); - } - return -ENODEV; -} - -/* - * Close the watchdog device. - */ -static int ds1374_wdt_release(struct inode *inode, struct file *file) -{ - if (MINOR(inode->i_rdev) == WATCHDOG_MINOR) - clear_bit(0, &wdt_is_open); - - return 0; -} - -/* - * Pat the watchdog whenever device is written to. - */ -static ssize_t ds1374_wdt_write(struct file *file, const char __user *data, - size_t len, loff_t *ppos) -{ - if (len) { - ds1374_wdt_ping(); - return 1; - } - return 0; -} - -static ssize_t ds1374_wdt_read(struct file *file, char __user *data, - size_t len, loff_t *ppos) -{ - return 0; -} - -/* - * Handle commands from user-space. - */ -static long ds1374_wdt_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - int new_margin, options; - - switch (cmd) { - case WDIOC_GETSUPPORT: - return copy_to_user((struct watchdog_info __user *)arg, - &ds1374_wdt_info, sizeof(ds1374_wdt_info)) ? -EFAULT : 0; - - case WDIOC_GETSTATUS: - case WDIOC_GETBOOTSTATUS: - return put_user(0, (int __user *)arg); - case WDIOC_KEEPALIVE: - ds1374_wdt_ping(); - return 0; - case WDIOC_SETTIMEOUT: - if (get_user(new_margin, (int __user *)arg)) - return -EFAULT; - - /* the hardware's tick rate is 4096 Hz, so - * the counter value needs to be scaled accordingly - */ - new_margin <<= 12; - if (new_margin < 1 || new_margin > 16777216) - return -EINVAL; - - wdt_margin = new_margin; - ds1374_wdt_settimeout(new_margin); - ds1374_wdt_ping(); - /* fallthrough */ - case WDIOC_GETTIMEOUT: - /* when returning ... inverse is true */ - return put_user((wdt_margin >> 12), (int __user *)arg); - case WDIOC_SETOPTIONS: - if (copy_from_user(&options, (int __user *)arg, sizeof(int))) - return -EFAULT; - - if (options & WDIOS_DISABLECARD) { - pr_info("disable watchdog\n"); - ds1374_wdt_disable(); - return 0; - } - - if (options & WDIOS_ENABLECARD) { - pr_info("enable watchdog\n"); - ds1374_wdt_settimeout(wdt_margin); - ds1374_wdt_ping(); - return 0; - } - return -EINVAL; - } - return -ENOTTY; + return i2c_smbus_write_byte_data(client, DS1374_REG_CR, cr); } -static long ds1374_wdt_unlocked_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - int ret; - struct ds1374 *ds1374 = i2c_get_clientdata(save_client); - - mutex_lock(&ds1374->mutex); - ret = ds1374_wdt_ioctl(file, cmd, arg); - mutex_unlock(&ds1374->mutex); - - return ret; -} - -static int ds1374_wdt_notify_sys(struct notifier_block *this, - unsigned long code, void *unused) -{ - if (code == SYS_DOWN || code == SYS_HALT) - /* Disable Watchdog */ - ds1374_wdt_disable(); - return NOTIFY_DONE; -} - -static const struct file_operations ds1374_wdt_fops = { - .owner = THIS_MODULE, - .read = ds1374_wdt_read, - .unlocked_ioctl = ds1374_wdt_unlocked_ioctl, - .compat_ioctl = compat_ptr_ioctl, - .write = ds1374_wdt_write, - .open = ds1374_wdt_open, - .release = ds1374_wdt_release, - .llseek = no_llseek, -}; - -static struct miscdevice ds1374_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &ds1374_wdt_fops, -}; - -static struct notifier_block ds1374_wdt_notifier = { - .notifier_call = ds1374_wdt_notify_sys, +static const struct watchdog_ops ds1374_wdt_ops = { + .owner = THIS_MODULE, + .start = ds1374_wdt_start, + .stop = ds1374_wdt_stop, + .set_timeout = ds1374_wdt_settimeout, }; - #endif /*CONFIG_RTC_DRV_DS1374_WDT*/ /* ***************************************************************************** @@ -652,16 +513,22 @@ static int ds1374_probe(struct i2c_client *client, return ret; #ifdef CONFIG_RTC_DRV_DS1374_WDT - save_client = client; - ret = misc_register(&ds1374_miscdev); + ds1374->wdt.info = &ds1374_wdt_info; + ds1374->wdt.ops = &ds1374_wdt_ops; + ds1374->wdt.timeout = TIMER_MARGIN_DEFAULT; + ds1374->wdt.min_timeout = TIMER_MARGIN_MIN; + ds1374->wdt.max_timeout = TIMER_MARGIN_MAX; + + watchdog_init_timeout(&ds1374->wdt, wdt_margin, &client->dev); + watchdog_set_nowayout(&ds1374->wdt, nowayout); + watchdog_stop_on_reboot(&ds1374->wdt); + watchdog_stop_on_unregister(&ds1374->wdt); + watchdog_set_drvdata(&ds1374->wdt, ds1374); + ds1374_wdt_settimeout(&ds1374->wdt, ds1374->wdt.timeout); + + ret = devm_watchdog_register_device(&client->dev, &ds1374->wdt); if (ret) return ret; - ret = register_reboot_notifier(&ds1374_wdt_notifier); - if (ret) { - misc_deregister(&ds1374_miscdev); - return ret; - } - ds1374_wdt_settimeout(131072); #endif return 0; @@ -670,11 +537,6 @@ static int ds1374_probe(struct i2c_client *client, static int ds1374_remove(struct i2c_client *client) { struct ds1374 *ds1374 = i2c_get_clientdata(client); -#ifdef CONFIG_RTC_DRV_DS1374_WDT - misc_deregister(&ds1374_miscdev); - ds1374_miscdev.parent = NULL; - unregister_reboot_notifier(&ds1374_wdt_notifier); -#endif if (client->irq > 0) { mutex_lock(&ds1374->mutex); diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c index 27797157fcb3..6349d2cd3680 100644 --- a/drivers/rtc/rtc-goldfish.c +++ b/drivers/rtc/rtc-goldfish.c @@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev, rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC; writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH); writel(rtc_alarm64, base + TIMER_ALARM_LOW); + writel(1, base + TIMER_IRQ_ENABLED); } else { /* * if this function was called with enabled=0 diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index f21dc6b16d88..8d141d8a5490 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -95,7 +95,7 @@ /** * struct imxdi_dev - private imxdi rtc data - * @pdev: pionter to platform dev + * @pdev: pointer to platform dev * @rtc: pointer to rtc struct * @ioaddr: IO registers pointer * @clk: input reference clock @@ -350,7 +350,7 @@ static int di_handle_invalid_and_failure_state(struct imxdi_dev *imxdi, u32 dsr) * the tamper register is locked. We cannot disable the * tamper detection. The TDCHL can only be reset by a * DRYICE POR, but we cannot force a DRYICE POR in - * softwere because we are still in "FAILURE STATE". + * software because we are still in "FAILURE STATE". * We need a DRYICE POR via battery power cycling.... */ /* diff --git a/drivers/rtc/rtc-max77686.c b/drivers/rtc/rtc-max77686.c index 03ebcf1c0f3d..d51cc12114cb 100644 --- a/drivers/rtc/rtc-max77686.c +++ b/drivers/rtc/rtc-max77686.c @@ -805,17 +805,36 @@ static int max77686_rtc_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int max77686_rtc_suspend(struct device *dev) { + struct max77686_rtc_info *info = dev_get_drvdata(dev); + int ret = 0; + if (device_may_wakeup(dev)) { struct max77686_rtc_info *info = dev_get_drvdata(dev); - return enable_irq_wake(info->virq); + ret = enable_irq_wake(info->virq); } - return 0; + /* + * If the main IRQ (not virtual) is the parent IRQ, then it must be + * disabled during suspend because if it happens while suspended it + * will be handled before resuming I2C. + * + * Since Main IRQ is shared, all its users should disable it to be sure + * it won't fire while one of them is still suspended. + */ + if (!info->drv_data->rtc_irq_from_platform) + disable_irq(info->rtc_irq); + + return ret; } static int max77686_rtc_resume(struct device *dev) { + struct max77686_rtc_info *info = dev_get_drvdata(dev); + + if (!info->drv_data->rtc_irq_from_platform) + enable_irq(info->rtc_irq); + if (device_may_wakeup(dev)) { struct max77686_rtc_info *info = dev_get_drvdata(dev); diff --git a/drivers/rtc/rtc-mcp795.c b/drivers/rtc/rtc-mcp795.c index 1660d5e79582..21cbf7f892e8 100644 --- a/drivers/rtc/rtc-mcp795.c +++ b/drivers/rtc/rtc-mcp795.c @@ -7,7 +7,7 @@ * based on other Linux RTC drivers * * Device datasheet: - * http://ww1.microchip.com/downloads/en/DeviceDoc/22280A.pdf + * https://ww1.microchip.com/downloads/en/DeviceDoc/22280A.pdf */ #include <linux/module.h> diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 9c5670776c68..ed6316992cbb 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -20,6 +20,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_irq.h> #include <linux/regmap.h> #include <linux/watchdog.h> @@ -28,8 +29,11 @@ #define PCF2127_BIT_CTRL1_TSF1 BIT(4) /* Control register 2 */ #define PCF2127_REG_CTRL2 0x01 +#define PCF2127_BIT_CTRL2_AIE BIT(1) #define PCF2127_BIT_CTRL2_TSIE BIT(2) +#define PCF2127_BIT_CTRL2_AF BIT(4) #define PCF2127_BIT_CTRL2_TSF2 BIT(5) +#define PCF2127_BIT_CTRL2_WDTF BIT(6) /* Control register 3 */ #define PCF2127_REG_CTRL3 0x02 #define PCF2127_BIT_CTRL3_BLIE BIT(0) @@ -46,6 +50,13 @@ #define PCF2127_REG_DW 0x07 #define PCF2127_REG_MO 0x08 #define PCF2127_REG_YR 0x09 +/* Alarm registers */ +#define PCF2127_REG_ALARM_SC 0x0A +#define PCF2127_REG_ALARM_MN 0x0B +#define PCF2127_REG_ALARM_HR 0x0C +#define PCF2127_REG_ALARM_DM 0x0D +#define PCF2127_REG_ALARM_DW 0x0E +#define PCF2127_BIT_ALARM_AE BIT(7) /* Watchdog registers */ #define PCF2127_REG_WD_CTL 0x10 #define PCF2127_BIT_WD_CTL_TF0 BIT(0) @@ -324,6 +335,112 @@ static const struct watchdog_ops pcf2127_watchdog_ops = { .set_timeout = pcf2127_wdt_set_timeout, }; +/* Alarm */ +static int pcf2127_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct pcf2127 *pcf2127 = dev_get_drvdata(dev); + unsigned int buf[5], ctrl2; + int ret; + + ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2); + if (ret) + return ret; + + ret = pcf2127_wdt_active_ping(&pcf2127->wdd); + if (ret) + return ret; + + ret = regmap_bulk_read(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf, + sizeof(buf)); + if (ret) + return ret; + + alrm->enabled = ctrl2 & PCF2127_BIT_CTRL2_AIE; + alrm->pending = ctrl2 & PCF2127_BIT_CTRL2_AF; + + alrm->time.tm_sec = bcd2bin(buf[0] & 0x7F); + alrm->time.tm_min = bcd2bin(buf[1] & 0x7F); + alrm->time.tm_hour = bcd2bin(buf[2] & 0x3F); + alrm->time.tm_mday = bcd2bin(buf[3] & 0x3F); + + return 0; +} + +static int pcf2127_rtc_alarm_irq_enable(struct device *dev, u32 enable) +{ + struct pcf2127 *pcf2127 = dev_get_drvdata(dev); + int ret; + + ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2, + PCF2127_BIT_CTRL2_AIE, + enable ? PCF2127_BIT_CTRL2_AIE : 0); + if (ret) + return ret; + + return pcf2127_wdt_active_ping(&pcf2127->wdd); +} + +static int pcf2127_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct pcf2127 *pcf2127 = dev_get_drvdata(dev); + uint8_t buf[5]; + int ret; + + ret = regmap_update_bits(pcf2127->regmap, PCF2127_REG_CTRL2, + PCF2127_BIT_CTRL2_AF, 0); + if (ret) + return ret; + + ret = pcf2127_wdt_active_ping(&pcf2127->wdd); + if (ret) + return ret; + + buf[0] = bin2bcd(alrm->time.tm_sec); + buf[1] = bin2bcd(alrm->time.tm_min); + buf[2] = bin2bcd(alrm->time.tm_hour); + buf[3] = bin2bcd(alrm->time.tm_mday); + buf[4] = PCF2127_BIT_ALARM_AE; /* Do not match on week day */ + + ret = regmap_bulk_write(pcf2127->regmap, PCF2127_REG_ALARM_SC, buf, + sizeof(buf)); + if (ret) + return ret; + + return pcf2127_rtc_alarm_irq_enable(dev, alrm->enabled); +} + +static irqreturn_t pcf2127_rtc_irq(int irq, void *dev) +{ + struct pcf2127 *pcf2127 = dev_get_drvdata(dev); + unsigned int ctrl2 = 0; + int ret = 0; + + ret = regmap_read(pcf2127->regmap, PCF2127_REG_CTRL2, &ctrl2); + if (ret) + return IRQ_NONE; + + if (!(ctrl2 & PCF2127_BIT_CTRL2_AF)) + return IRQ_NONE; + + regmap_write(pcf2127->regmap, PCF2127_REG_CTRL2, + ctrl2 & ~(PCF2127_BIT_CTRL2_AF | PCF2127_BIT_CTRL2_WDTF)); + + rtc_update_irq(pcf2127->rtc, 1, RTC_IRQF | RTC_AF); + + pcf2127_wdt_active_ping(&pcf2127->wdd); + + return IRQ_HANDLED; +} + +static const struct rtc_class_ops pcf2127_rtc_alrm_ops = { + .ioctl = pcf2127_rtc_ioctl, + .read_time = pcf2127_rtc_read_time, + .set_time = pcf2127_rtc_set_time, + .read_alarm = pcf2127_rtc_read_alarm, + .set_alarm = pcf2127_rtc_set_alarm, + .alarm_irq_enable = pcf2127_rtc_alarm_irq_enable, +}; + /* sysfs interface */ static ssize_t timestamp0_store(struct device *dev, @@ -416,7 +533,7 @@ static const struct attribute_group pcf2127_attr_group = { }; static int pcf2127_probe(struct device *dev, struct regmap *regmap, - const char *name, bool has_nvmem) + int alarm_irq, const char *name, bool has_nvmem) { struct pcf2127 *pcf2127; u32 wdd_timeout; @@ -440,6 +557,23 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap, pcf2127->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; pcf2127->rtc->range_max = RTC_TIMESTAMP_END_2099; pcf2127->rtc->set_start_time = true; /* Sets actual start to 1970 */ + pcf2127->rtc->uie_unsupported = 1; + + if (alarm_irq >= 0) { + ret = devm_request_threaded_irq(dev, alarm_irq, NULL, + pcf2127_rtc_irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + dev_name(dev), dev); + if (ret) { + dev_err(dev, "failed to request alarm irq\n"); + return ret; + } + } + + if (alarm_irq >= 0 || device_property_read_bool(dev, "wakeup-source")) { + device_init_wakeup(dev, true); + pcf2127->rtc->ops = &pcf2127_rtc_alrm_ops; + } pcf2127->wdd.parent = dev; pcf2127->wdd.info = &pcf2127_wdt_info; @@ -553,6 +687,7 @@ static int pcf2127_probe(struct device *dev, struct regmap *regmap, static const struct of_device_id pcf2127_of_match[] = { { .compatible = "nxp,pcf2127" }, { .compatible = "nxp,pcf2129" }, + { .compatible = "nxp,pca2129" }, {} }; MODULE_DEVICE_TABLE(of, pcf2127_of_match); @@ -657,13 +792,14 @@ static int pcf2127_i2c_probe(struct i2c_client *client, return PTR_ERR(regmap); } - return pcf2127_probe(&client->dev, regmap, + return pcf2127_probe(&client->dev, regmap, client->irq, pcf2127_i2c_driver.driver.name, id->driver_data); } static const struct i2c_device_id pcf2127_i2c_id[] = { { "pcf2127", 1 }, { "pcf2129", 0 }, + { "pca2129", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, pcf2127_i2c_id); @@ -722,13 +858,15 @@ static int pcf2127_spi_probe(struct spi_device *spi) return PTR_ERR(regmap); } - return pcf2127_probe(&spi->dev, regmap, pcf2127_spi_driver.driver.name, + return pcf2127_probe(&spi->dev, regmap, spi->irq, + pcf2127_spi_driver.driver.name, spi_get_device_id(spi)->driver_data); } static const struct spi_device_id pcf2127_spi_id[] = { { "pcf2127", 1 }, { "pcf2129", 0 }, + { "pca2129", 0 }, { } }; MODULE_DEVICE_TABLE(spi, pcf2127_spi_id); diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 7a87f461bec8..ca55ba975aeb 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -21,8 +21,8 @@ /* * Information for this driver was pulled from the following datasheets. * - * http://www.nxp.com/documents/data_sheet/PCF85063A.pdf - * http://www.nxp.com/documents/data_sheet/PCF85063TP.pdf + * https://www.nxp.com/documents/data_sheet/PCF85063A.pdf + * https://www.nxp.com/documents/data_sheet/PCF85063TP.pdf * * PCF85063A -- Rev. 6 — 18 November 2015 * PCF85063TP -- Rev. 4 — 6 May 2015 diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 40d7450a1ce4..c6b89273feba 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -275,6 +275,7 @@ static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) struct pl031_local *ldata = dev_get_drvdata(dev); writel(rtc_tm_to_time64(&alarm->time), ldata->base + RTC_MR); + pl031_alarm_irq_enable(dev, alarm->enabled); return 0; } diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index d5880f52dc2b..5896e5282a4e 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -818,7 +818,7 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns, static int pkey_apqns4key(const u8 *key, size_t keylen, u32 flags, struct pkey_apqn *apqns, size_t *nr_apqns) { - int rc = EINVAL; + int rc; u32 _nr_apqns, *_apqns = NULL; struct keytoken_header *hdr = (struct keytoken_header *)key; @@ -886,7 +886,7 @@ static int pkey_apqns4keytype(enum pkey_key_type ktype, u8 cur_mkvp[32], u8 alt_mkvp[32], u32 flags, struct pkey_apqn *apqns, size_t *nr_apqns) { - int rc = -EINVAL; + int rc; u32 _nr_apqns, *_apqns = NULL; if (ktype == PKEY_TYPE_CCA_DATA || ktype == PKEY_TYPE_CCA_CIPHER) { diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index f043e378652c..8588da0a0655 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -2002,7 +2002,7 @@ static void aac_remove_one(struct pci_dev *pdev) } static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev, - enum pci_channel_state error) + pci_channel_state_t error) { struct Scsi_Host *shost = pci_get_drvdata(pdev); struct aac_dev *aac = shost_priv(shost); diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index 85c7959961cc..1409c7687853 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -256,9 +256,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new) WARN_ON(!fcf_dev); new->fcf_dev = NULL; fcoe_fcf_device_delete(fcf_dev); - kfree(new); mutex_unlock(&cdev->lock); } + kfree(new); } /** diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index 19721db23283..d8cbc9c0e766 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, if (PTR_ERR(fp) == -FC_EX_CLOSED) goto out; - if (IS_ERR(fp)) - goto redisc; + if (IS_ERR(fp)) { + mutex_lock(&disc->disc_mutex); + fc_disc_restart(disc); + mutex_unlock(&disc->disc_mutex); + goto out; + } cp = fc_frame_payload_get(fp, sizeof(*cp)); if (!cp) @@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp, new_rdata->disc_id = disc->disc_id; fc_rport_login(new_rdata); } - goto out; + goto free_fp; } rdata->disc_id = disc->disc_id; mutex_unlock(&rdata->rp_mutex); @@ -626,6 +630,8 @@ redisc: fc_disc_restart(disc); mutex_unlock(&disc->disc_mutex); } +free_fp: + fc_frame_free(fp); out: kref_put(&rdata->kref, fc_rport_destroy); if (!IS_ERR(fp)) diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index a62c60ca6477..ece6c250ebaf 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -6679,9 +6679,15 @@ lpfc_get_host_speed(struct Scsi_Host *shost) } } else if (lpfc_is_link_up(phba) && (phba->hba_flag & HBA_FCOE_MODE)) { switch (phba->fc_linkspeed) { + case LPFC_ASYNC_LINK_SPEED_1GBPS: + fc_host_speed(shost) = FC_PORTSPEED_1GBIT; + break; case LPFC_ASYNC_LINK_SPEED_10GBPS: fc_host_speed(shost) = FC_PORTSPEED_10GBIT; break; + case LPFC_ASYNC_LINK_SPEED_20GBPS: + fc_host_speed(shost) = FC_PORTSPEED_20GBIT; + break; case LPFC_ASYNC_LINK_SPEED_25GBPS: fc_host_speed(shost) = FC_PORTSPEED_25GBIT; break; @@ -7406,12 +7412,26 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) void lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) { - if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) + int logit = 0; + + if (phba->cfg_hdw_queue > phba->sli4_hba.num_present_cpu) { phba->cfg_hdw_queue = phba->sli4_hba.num_present_cpu; - if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) + logit = 1; + } + if (phba->cfg_irq_chann > phba->sli4_hba.num_present_cpu) { phba->cfg_irq_chann = phba->sli4_hba.num_present_cpu; - if (phba->cfg_irq_chann > phba->cfg_hdw_queue) + logit = 1; + } + if (phba->cfg_irq_chann > phba->cfg_hdw_queue) { phba->cfg_irq_chann = phba->cfg_hdw_queue; + logit = 1; + } + if (logit) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "2006 Reducing Queues - CPU limitation: " + "IRQ %d HDWQ %d\n", + phba->cfg_irq_chann, + phba->cfg_hdw_queue); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME && phba->nvmet_support) { diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 1d88fedaf3f0..6f9d648a9b9c 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2494,13 +2494,12 @@ lpfc_sli4_bsg_link_diag_test(struct bsg_job *job) diag_status_reply = (struct diag_status *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*diag_status_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "3012 Received Run link diag test reply " "below minimum size (%d): reply_len:%d\n", - (int)(sizeof(struct fc_bsg_request) + - sizeof(struct diag_status)), + (int)(sizeof(*bsg_reply) + + sizeof(*diag_status_reply)), job->reply_len); rc = -EINVAL; goto job_error; @@ -3418,8 +3417,7 @@ lpfc_bsg_get_dfc_rev(struct bsg_job *job) event_reply = (struct get_mgmt_rev_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2741 Received GET_DFC_REV reply below " "minimum size\n"); @@ -5202,8 +5200,8 @@ lpfc_menlo_cmd(struct bsg_job *job) goto no_dd_data; } - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) { + if (job->reply_len < sizeof(*bsg_reply) + + sizeof(struct menlo_response)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2785 Received MENLO_CMD reply below " "minimum size\n"); @@ -5359,9 +5357,7 @@ lpfc_forced_link_speed(struct bsg_job *job) forced_reply = (struct forced_link_speed_support_reply *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + - sizeof(struct forced_link_speed_support_reply)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*forced_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "0049 Received FORCED_LINK_SPEED reply below " "minimum size\n"); @@ -5715,8 +5711,7 @@ lpfc_get_trunk_info(struct bsg_job *job) event_reply = (struct lpfc_trunk_info *) bsg_reply->reply_data.vendor_reply.vendor_rsp; - if (job->reply_len < - sizeof(struct fc_bsg_request) + sizeof(struct lpfc_trunk_info)) { + if (job->reply_len < sizeof(*bsg_reply) + sizeof(*event_reply)) { lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC, "2728 Received GET TRUNK _INFO reply below " "minimum size\n"); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index dd9f2bf54edd..ef2015fad2d5 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -713,7 +713,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* This is a GID_FT completing so the gidft_inp counter was * incremented before the GID_FT was issued to the wire. */ - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; /* * Skip processing the NS response @@ -741,11 +742,14 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; /* CT command is being retried */ - vport->gidft_inp--; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, vport->fc_ns_retry, type); if (rc == 0) goto out; + else { /* Unable to send NS cmd */ + if (vport->gidft_inp) + vport->gidft_inp--; + } } if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); @@ -825,7 +829,8 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -918,7 +923,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* This is a GID_PT completing so the gidft_inp counter was * incremented before the GID_PT was issued to the wire. */ - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; /* * Skip processing the NS response @@ -942,11 +948,14 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->fc_ns_retry++; /* CT command is being retried */ - vport->gidft_inp--; rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, vport->fc_ns_retry, GID_PT_N_PORT); if (rc == 0) goto out; + else { /* Unable to send NS cmd */ + if (vport->gidft_inp) + vport->gidft_inp--; + } } if (vport->fc_flag & FC_RSCN_MODE) lpfc_els_flush_rscn(vport); @@ -1027,7 +1036,8 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } - vport->gidft_inp--; + if (vport->gidft_inp) + vport->gidft_inp--; } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 85d4e4000c25..48dc63f22cca 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3937,10 +3937,14 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, case LSRJT_UNABLE_TPC: /* The driver has a VALID PLOGI but the rport has * rejected the PRLI - can't do it now. Delay - * for 1 second and try again - don't care about - * the explanation. + * for 1 second and try again. + * + * However, if explanation is REQ_UNSUPPORTED there's + * no point to retry PRLI. */ - if (cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) { + if ((cmd == ELS_CMD_PRLI || cmd == ELS_CMD_NVMEPRLI) && + stat.un.b.lsRjtRsnCodeExp != + LSEXP_REQ_UNSUPPORTED) { delay = 1000; maxretry = lpfc_max_els_tries + 1; retry = 1; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index c4a7e82d3ff2..c69725999315 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4577,6 +4577,13 @@ static void lpfc_host_supported_speeds_set(struct Scsi_Host *shost) struct lpfc_hba *phba = vport->phba; fc_host_supported_speeds(shost) = 0; + /* + * Avoid reporting supported link speed for FCoE as it can't be + * controlled via FCoE. + */ + if (phba->hba_flag & HBA_FCOE_MODE) + return; + if (phba->lmt & LMT_128Gb) fc_host_supported_speeds(shost) |= FC_PORTSPEED_128GBIT; if (phba->lmt & LMT_64Gb) @@ -4910,6 +4917,9 @@ lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code, case LPFC_ASYNC_LINK_SPEED_40GBPS: port_speed = 40000; break; + case LPFC_ASYNC_LINK_SPEED_100GBPS: + port_speed = 100000; + break; default: port_speed = 0; } @@ -8589,7 +8599,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) "VPI(B:%d M:%d) " "VFI(B:%d M:%d) " "RPI(B:%d M:%d) " - "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n", + "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d lmt:x%x\n", phba->sli4_hba.extents_in_use, phba->sli4_hba.max_cfg_param.xri_base, phba->sli4_hba.max_cfg_param.max_xri, @@ -8603,7 +8613,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.max_eq, phba->sli4_hba.max_cfg_param.max_cq, phba->sli4_hba.max_cfg_param.max_wq, - phba->sli4_hba.max_cfg_param.max_rq); + phba->sli4_hba.max_cfg_param.max_rq, + phba->lmt); /* * Calculate queue resources based on how @@ -8626,7 +8637,8 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) if ((phba->cfg_irq_chann > qmin) || (phba->cfg_hdw_queue > qmin)) { lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, - "2005 Reducing Queues: " + "2005 Reducing Queues - " + "FW resource limitation: " "WQ %d CQ %d EQ %d: min %d: " "IRQ %d HDWQ %d\n", phba->sli4_hba.max_cfg_param.max_wq, @@ -14100,17 +14112,18 @@ lpfc_init(void) printk(KERN_ERR "Could not register lpfcmgmt device, " "misc_register returned with status %d", error); + error = -ENOMEM; lpfc_transport_functions.vport_create = lpfc_vport_create; lpfc_transport_functions.vport_delete = lpfc_vport_delete; lpfc_transport_template = fc_attach_transport(&lpfc_transport_functions); if (lpfc_transport_template == NULL) - return -ENOMEM; + goto unregister; lpfc_vport_transport_template = fc_attach_transport(&lpfc_vport_transport_functions); if (lpfc_vport_transport_template == NULL) { fc_release_transport(lpfc_transport_template); - return -ENOMEM; + goto unregister; } lpfc_nvme_cmd_template(); lpfc_nvmet_cmd_template(); @@ -14136,6 +14149,8 @@ unwind: cpuhp_failure: fc_release_transport(lpfc_transport_template); fc_release_transport(lpfc_vport_transport_template); +unregister: + misc_deregister(&lpfc_mgmt_dev); return error; } diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index e4c710fe0245..cad53d19cb25 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -1745,7 +1745,13 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, } } - if (ndlp->nlp_type & NLP_FCP_TARGET) { + if (ndlp->nlp_type & NLP_FCP_TARGET) + ndlp->nlp_fc4_type |= NLP_FC4_FCP; + + if (ndlp->nlp_type & NLP_NVME_TARGET) + ndlp->nlp_fc4_type |= NLP_FC4_NVME; + + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } else { diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index a4430aeeb04a..d4ade7cdb93a 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2110,7 +2110,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) } tgtp->tport_unreg_cmp = &tport_unreg_cmp; nvmet_fc_unregister_targetport(phba->targetport); - if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, + if (!wait_for_completion_timeout(&tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "6179 Unreg targetport x%px timeout " diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8582b51b0613..4cd7ded656b7 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -13650,7 +13650,11 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe) fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { spin_unlock_irqrestore(&phba->hbalock, iflags); /* Handle MDS Loopback frames */ - lpfc_sli4_handle_mds_loopback(phba->pport, dma_buf); + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli4_handle_mds_loopback(phba->pport, + dma_buf); + else + lpfc_in_buf_free(phba, &dma_buf->dbuf); break; } @@ -18363,7 +18367,10 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) { vport = phba->pport; /* Handle MDS Loopback frames */ - lpfc_sli4_handle_mds_loopback(vport, dmabuf); + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli4_handle_mds_loopback(vport, dmabuf); + else + lpfc_in_buf_free(phba, &dmabuf->dbuf); return; } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 1987c6666279..20adec4387f0 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.8.0.2" +#define LPFC_DRIVER_VERSION "12.8.0.3" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index e443dee43bcf..c9abed8429c9 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1526,7 +1526,7 @@ int sas_rphy_add(struct sas_rphy *rphy) list_add_tail(&rphy->list, &sas_host->rphy_list); if (identify->device_type == SAS_END_DEVICE && (identify->target_port_protocols & - (SAS_PROTOCOL_SSP|SAS_PROTOCOL_STP|SAS_PROTOCOL_SATA))) + (SAS_PROTOCOL_SSP | SAS_PROTOCOL_STP | SAS_PROTOCOL_SATA))) rphy->scsi_target_id = sas_host->next_target_id++; else if (identify->device_type == SAS_END_DEVICE) rphy->scsi_target_id = -1; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index acde0ca35769..95018e650f2d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2578,8 +2578,6 @@ sd_print_capacity(struct scsi_disk *sdkp, sd_printk(KERN_NOTICE, sdkp, "%u-byte physical blocks\n", sdkp->physical_block_size); - - sd_zbc_print_zones(sdkp); } /* called with buffer of length 512 */ @@ -3220,6 +3218,14 @@ static int sd_revalidate_disk(struct gendisk *disk) sd_config_write_same(sdkp); kfree(buffer); + /* + * For a zoned drive, revalidating the zones can be done only once + * the gendisk capacity is set. So if this fails, set back the gendisk + * capacity to 0. + */ + if (sd_zbc_revalidate_zones(sdkp)) + set_capacity_revalidate_and_notify(disk, 0, false); + out: return 0; } diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 27c0f4e9b1d4..4933e7daf17d 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -75,7 +75,9 @@ struct scsi_disk { struct opal_dev *opal_dev; #ifdef CONFIG_BLK_DEV_ZONED u32 nr_zones; + u32 rev_nr_zones; u32 zone_blocks; + u32 rev_zone_blocks; u32 zones_optimal_open; u32 zones_optimal_nonseq; u32 zones_max_open; @@ -215,8 +217,8 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp) int sd_zbc_init_disk(struct scsi_disk *sdkp); void sd_zbc_release_disk(struct scsi_disk *sdkp); -extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); -extern void sd_zbc_print_zones(struct scsi_disk *sdkp); +int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); +int sd_zbc_revalidate_zones(struct scsi_disk *sdkp); blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, bool all); unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, @@ -242,7 +244,10 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, return 0; } -static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {} +static inline int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) +{ + return 0; +} static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, unsigned char op, diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 4717e79bff55..0e94ff056bff 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -634,6 +634,23 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, return 0; } +static void sd_zbc_print_zones(struct scsi_disk *sdkp) +{ + if (!sd_is_zoned(sdkp) || !sdkp->capacity) + return; + + if (sdkp->capacity & (sdkp->zone_blocks - 1)) + sd_printk(KERN_NOTICE, sdkp, + "%u zones of %u logical blocks + 1 runt zone\n", + sdkp->nr_zones - 1, + sdkp->zone_blocks); + else + sd_printk(KERN_NOTICE, sdkp, + "%u zones of %u logical blocks\n", + sdkp->nr_zones, + sdkp->zone_blocks); +} + static void sd_zbc_revalidate_zones_cb(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); @@ -641,36 +658,31 @@ static void sd_zbc_revalidate_zones_cb(struct gendisk *disk) swap(sdkp->zones_wp_offset, sdkp->rev_wp_offset); } -static int sd_zbc_revalidate_zones(struct scsi_disk *sdkp, - u32 zone_blocks, - unsigned int nr_zones) +int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) { struct gendisk *disk = sdkp->disk; + struct request_queue *q = disk->queue; + u32 zone_blocks = sdkp->rev_zone_blocks; + unsigned int nr_zones = sdkp->rev_nr_zones; + u32 max_append; int ret = 0; + if (!sd_is_zoned(sdkp)) + return 0; + /* * Make sure revalidate zones are serialized to ensure exclusive * updates of the scsi disk data. */ mutex_lock(&sdkp->rev_mutex); - /* - * Revalidate the disk zones to update the device request queue zone - * bitmaps and the zone write pointer offset array. Do this only once - * the device capacity is set on the second revalidate execution for - * disk scan or if something changed when executing a normal revalidate. - */ - if (sdkp->first_scan) { - sdkp->zone_blocks = zone_blocks; - sdkp->nr_zones = nr_zones; - goto unlock; - } - if (sdkp->zone_blocks == zone_blocks && sdkp->nr_zones == nr_zones && disk->queue->nr_zones == nr_zones) goto unlock; + sdkp->zone_blocks = zone_blocks; + sdkp->nr_zones = nr_zones; sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_NOIO); if (!sdkp->rev_wp_offset) { ret = -ENOMEM; @@ -682,6 +694,21 @@ static int sd_zbc_revalidate_zones(struct scsi_disk *sdkp, kvfree(sdkp->rev_wp_offset); sdkp->rev_wp_offset = NULL; + if (ret) { + sdkp->zone_blocks = 0; + sdkp->nr_zones = 0; + sdkp->capacity = 0; + goto unlock; + } + + max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks), + q->limits.max_segments << (PAGE_SHIFT - 9)); + max_append = min_t(u32, max_append, queue_max_hw_sectors(q)); + + blk_queue_max_zone_append_sectors(q, max_append); + + sd_zbc_print_zones(sdkp); + unlock: mutex_unlock(&sdkp->rev_mutex); @@ -694,7 +721,6 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) struct request_queue *q = disk->queue; unsigned int nr_zones; u32 zone_blocks = 0; - u32 max_append; int ret; if (!sd_is_zoned(sdkp)) @@ -728,22 +754,8 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) sdkp->device->use_16_for_rw = 1; sdkp->device->use_10_for_rw = 0; - ret = sd_zbc_revalidate_zones(sdkp, zone_blocks, nr_zones); - if (ret) - goto err; - - /* - * On the first scan 'chunk_sectors' isn't setup yet, so calling - * blk_queue_max_zone_append_sectors() will result in a WARN(). Defer - * this setting to the second scan. - */ - if (sdkp->first_scan) - return 0; - - max_append = min_t(u32, logical_to_sectors(sdkp->device, zone_blocks), - q->limits.max_segments << (PAGE_SHIFT - 9)); - - blk_queue_max_zone_append_sectors(q, max_append); + sdkp->rev_nr_zones = nr_zones; + sdkp->rev_zone_blocks = zone_blocks; return 0; @@ -753,23 +765,6 @@ err: return ret; } -void sd_zbc_print_zones(struct scsi_disk *sdkp) -{ - if (!sd_is_zoned(sdkp) || !sdkp->capacity) - return; - - if (sdkp->capacity & (sdkp->zone_blocks - 1)) - sd_printk(KERN_NOTICE, sdkp, - "%u zones of %u logical blocks + 1 runt zone\n", - sdkp->nr_zones - 1, - sdkp->zone_blocks); - else - sd_printk(KERN_NOTICE, sdkp, - "%u zones of %u logical blocks\n", - sdkp->nr_zones, - sdkp->zone_blocks); -} - int sd_zbc_init_disk(struct scsi_disk *sdkp) { if (!sd_is_zoned(sdkp)) diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index cd157f11eb22..bd38c8cea56e 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -7423,8 +7423,12 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev, u16 timeout) { - return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, + int rc; + + rc = pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout); + + return pcibios_err_to_errno(rc); } static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c index af20ad963b05..28edb6e53ea2 100644 --- a/drivers/scsi/sym53c8xx_2/sym_glue.c +++ b/drivers/scsi/sym53c8xx_2/sym_glue.c @@ -1743,7 +1743,7 @@ static void sym2_remove(struct pci_dev *pdev) * @state: current state of the PCI slot */ static pci_ers_result_t sym2_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { /* If slot is permanently frozen, turn everything off */ if (state == pci_channel_io_perm_failure) { diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 8cc003aa4d00..ca1c39b6f631 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -754,14 +754,14 @@ static struct scsi_host_template virtscsi_host_template = { #define virtscsi_config_get(vdev, fld) \ ({ \ - typeof(((struct virtio_scsi_config *)0)->fld) __val; \ + __virtio_native_type(struct virtio_scsi_config, fld) __val; \ virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \ __val; \ }) #define virtscsi_config_set(vdev, fld, val) \ do { \ - typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \ + __virtio_native_type(struct virtio_scsi_config, fld) __val = (val); \ virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \ } while(0) diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c index eeb028b9cdb3..fd72d9088bdc 100644 --- a/drivers/sh/clk/cpg.c +++ b/drivers/sh/clk/cpg.c @@ -36,21 +36,6 @@ static void sh_clk_write(int value, struct clk *clk) iowrite32(value, clk->mapped_reg); } -static unsigned int r8(const void __iomem *addr) -{ - return ioread8(addr); -} - -static unsigned int r16(const void __iomem *addr) -{ - return ioread16(addr); -} - -static unsigned int r32(const void __iomem *addr) -{ - return ioread32(addr); -} - static int sh_clk_mstp_enable(struct clk *clk) { sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk); @@ -61,11 +46,11 @@ static int sh_clk_mstp_enable(struct clk *clk) (phys_addr_t)clk->enable_reg + clk->mapped_reg; if (clk->flags & CLK_ENABLE_REG_8BIT) - read = r8; + read = ioread8; else if (clk->flags & CLK_ENABLE_REG_16BIT) - read = r16; + read = ioread16; else - read = r32; + read = ioread32; for (i = 1000; (read(mapped_status) & (1 << clk->enable_bit)) && i; diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig index 899f8c066797..3dc3e3d61ea3 100644 --- a/drivers/soc/qcom/Kconfig +++ b/drivers/soc/qcom/Kconfig @@ -53,6 +53,10 @@ config QCOM_LLCC SDM845. This provides interfaces to clients that use the LLCC. Say yes here to enable LLCC slice driver. +config QCOM_KRYO_L2_ACCESSORS + bool + depends on ARCH_QCOM && ARM64 || COMPILE_TEST + config QCOM_MDT_LOADER tristate select QCOM_SCM diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile index 7d7e2ecbdce6..93392d9dc7f7 100644 --- a/drivers/soc/qcom/Makefile +++ b/drivers/soc/qcom/Makefile @@ -24,3 +24,4 @@ obj-$(CONFIG_QCOM_APR) += apr.o obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o obj-$(CONFIG_QCOM_RPMHPD) += rpmhpd.o obj-$(CONFIG_QCOM_RPMPD) += rpmpd.o +obj-$(CONFIG_QCOM_KRYO_L2_ACCESSORS) += kryo-l2-accessors.o diff --git a/drivers/soc/qcom/kryo-l2-accessors.c b/drivers/soc/qcom/kryo-l2-accessors.c new file mode 100644 index 000000000000..c20cb92077c0 --- /dev/null +++ b/drivers/soc/qcom/kryo-l2-accessors.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + */ + +#include <linux/spinlock.h> +#include <asm/barrier.h> +#include <asm/sysreg.h> +#include <soc/qcom/kryo-l2-accessors.h> + +#define L2CPUSRSELR_EL1 sys_reg(3, 3, 15, 0, 6) +#define L2CPUSRDR_EL1 sys_reg(3, 3, 15, 0, 7) + +static DEFINE_RAW_SPINLOCK(l2_access_lock); + +/** + * kryo_l2_set_indirect_reg() - write value to an L2 register + * @reg: Address of L2 register. + * @value: Value to be written to register. + * + * Use architecturally required barriers for ordering between system register + * accesses, and system registers with respect to device memory + */ +void kryo_l2_set_indirect_reg(u64 reg, u64 val) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&l2_access_lock, flags); + write_sysreg_s(reg, L2CPUSRSELR_EL1); + isb(); + write_sysreg_s(val, L2CPUSRDR_EL1); + isb(); + raw_spin_unlock_irqrestore(&l2_access_lock, flags); +} +EXPORT_SYMBOL(kryo_l2_set_indirect_reg); + +/** + * kryo_l2_get_indirect_reg() - read an L2 register value + * @reg: Address of L2 register. + * + * Use architecturally required barriers for ordering between system register + * accesses, and system registers with respect to device memory + */ +u64 kryo_l2_get_indirect_reg(u64 reg) +{ + u64 val; + unsigned long flags; + + raw_spin_lock_irqsave(&l2_access_lock, flags); + write_sysreg_s(reg, L2CPUSRSELR_EL1); + isb(); + val = read_sysreg_s(L2CPUSRDR_EL1); + raw_spin_unlock_irqrestore(&l2_access_lock, flags); + + return val; +} +EXPORT_SYMBOL(kryo_l2_get_indirect_reg); diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c index 4c9225f15c4e..088dc99f77f3 100644 --- a/drivers/soc/qcom/pdr_interface.c +++ b/drivers/soc/qcom/pdr_interface.c @@ -5,6 +5,7 @@ #include <linux/kernel.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/string.h> #include <linux/workqueue.h> diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c index 44ef00f1f8ee..2028458bea6f 100644 --- a/drivers/staging/qlge/qlge_main.c +++ b/drivers/staging/qlge/qlge_main.c @@ -4679,7 +4679,7 @@ static void ql_eeh_close(struct net_device *ndev) * a PCI bus error is detected. */ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev, - enum pci_channel_state state) + pci_channel_state_t state) { struct net_device *ndev = pci_get_drvdata(pdev); struct ql_adapter *qdev = netdev_priv(ndev); diff --git a/drivers/target/iscsi/iscsi_target_transport.c b/drivers/target/iscsi/iscsi_target_transport.c index 036940518bfe..27c85f260459 100644 --- a/drivers/target/iscsi/iscsi_target_transport.c +++ b/drivers/target/iscsi/iscsi_target_transport.c @@ -31,7 +31,7 @@ void iscsit_put_transport(struct iscsit_transport *t) module_put(t->owner); } -int iscsit_register_transport(struct iscsit_transport *t) +void iscsit_register_transport(struct iscsit_transport *t) { INIT_LIST_HEAD(&t->t_node); @@ -40,8 +40,6 @@ int iscsit_register_transport(struct iscsit_transport *t) mutex_unlock(&transport_mutex); pr_debug("Registered iSCSI transport: %s\n", t->name); - - return 0; } EXPORT_SYMBOL(iscsit_register_transport); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index a2e710d46432..b668224f906d 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -499,4 +499,15 @@ config SPRD_THERMAL help Support for the Spreadtrum thermal sensor driver in the Linux thermal framework. + +config KHADAS_MCU_FAN_THERMAL + tristate "Khadas MCU controller FAN cooling support" + depends on OF || COMPILE_TEST + depends on MFD_KHADAS_MCU + select MFD_CORE + select REGMAP + help + If you say yes here you get support for the FAN controlled + by the Microcontroller found on the Khadas VIM boards. + endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index b8d96d26f9ec..b64dd50a6629 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -61,3 +61,4 @@ obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o +obj-$(CONFIG_KHADAS_MCU_FAN_THERMAL) += khadas_mcu_fan.o diff --git a/drivers/thermal/khadas_mcu_fan.c b/drivers/thermal/khadas_mcu_fan.c new file mode 100644 index 000000000000..9eadd2d6413e --- /dev/null +++ b/drivers/thermal/khadas_mcu_fan.c @@ -0,0 +1,162 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Khadas MCU Controlled FAN driver + * + * Copyright (C) 2020 BayLibre SAS + * Author(s): Neil Armstrong <narmstrong@baylibre.com> + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/mfd/khadas-mcu.h> +#include <linux/regmap.h> +#include <linux/sysfs.h> +#include <linux/thermal.h> + +#define MAX_LEVEL 3 + +struct khadas_mcu_fan_ctx { + struct khadas_mcu *mcu; + unsigned int level; + struct thermal_cooling_device *cdev; +}; + +static int khadas_mcu_fan_set_level(struct khadas_mcu_fan_ctx *ctx, + unsigned int level) +{ + int ret; + + ret = regmap_write(ctx->mcu->regmap, KHADAS_MCU_CMD_FAN_STATUS_CTRL_REG, + level); + if (ret) + return ret; + + ctx->level = level; + + return 0; +} + +static int khadas_mcu_fan_get_max_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + *state = MAX_LEVEL; + + return 0; +} + +static int khadas_mcu_fan_get_cur_state(struct thermal_cooling_device *cdev, + unsigned long *state) +{ + struct khadas_mcu_fan_ctx *ctx = cdev->devdata; + + *state = ctx->level; + + return 0; +} + +static int +khadas_mcu_fan_set_cur_state(struct thermal_cooling_device *cdev, + unsigned long state) +{ + struct khadas_mcu_fan_ctx *ctx = cdev->devdata; + + if (state > MAX_LEVEL) + return -EINVAL; + + if (state == ctx->level) + return 0; + + return khadas_mcu_fan_set_level(ctx, state); +} + +static const struct thermal_cooling_device_ops khadas_mcu_fan_cooling_ops = { + .get_max_state = khadas_mcu_fan_get_max_state, + .get_cur_state = khadas_mcu_fan_get_cur_state, + .set_cur_state = khadas_mcu_fan_set_cur_state, +}; + +static int khadas_mcu_fan_probe(struct platform_device *pdev) +{ + struct khadas_mcu *mcu = dev_get_drvdata(pdev->dev.parent); + struct thermal_cooling_device *cdev; + struct device *dev = &pdev->dev; + struct khadas_mcu_fan_ctx *ctx; + int ret; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + ctx->mcu = mcu; + platform_set_drvdata(pdev, ctx); + + cdev = devm_thermal_of_cooling_device_register(dev->parent, + dev->parent->of_node, "khadas-mcu-fan", ctx, + &khadas_mcu_fan_cooling_ops); + if (IS_ERR(cdev)) { + ret = PTR_ERR(cdev); + dev_err(dev, "Failed to register khadas-mcu-fan as cooling device: %d\n", + ret); + return ret; + } + ctx->cdev = cdev; + thermal_cdev_update(cdev); + + return 0; +} + +static void khadas_mcu_fan_shutdown(struct platform_device *pdev) +{ + struct khadas_mcu_fan_ctx *ctx = platform_get_drvdata(pdev); + + khadas_mcu_fan_set_level(ctx, 0); +} + +#ifdef CONFIG_PM_SLEEP +static int khadas_mcu_fan_suspend(struct device *dev) +{ + struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev); + unsigned int level_save = ctx->level; + int ret; + + ret = khadas_mcu_fan_set_level(ctx, 0); + if (ret) + return ret; + + ctx->level = level_save; + + return 0; +} + +static int khadas_mcu_fan_resume(struct device *dev) +{ + struct khadas_mcu_fan_ctx *ctx = dev_get_drvdata(dev); + + return khadas_mcu_fan_set_level(ctx, ctx->level); +} +#endif + +static SIMPLE_DEV_PM_OPS(khadas_mcu_fan_pm, khadas_mcu_fan_suspend, + khadas_mcu_fan_resume); + +static const struct platform_device_id khadas_mcu_fan_id_table[] = { + { .name = "khadas-mcu-fan-ctrl", }, + {}, +}; +MODULE_DEVICE_TABLE(platform, khadas_mcu_fan_id_table); + +static struct platform_driver khadas_mcu_fan_driver = { + .probe = khadas_mcu_fan_probe, + .shutdown = khadas_mcu_fan_shutdown, + .driver = { + .name = "khadas-mcu-fan-ctrl", + .pm = &khadas_mcu_fan_pm, + }, + .id_table = khadas_mcu_fan_id_table, +}; + +module_platform_driver(khadas_mcu_fan_driver); + +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>"); +MODULE_DESCRIPTION("Khadas MCU FAN driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c index acb8b6256847..a4d78811f7e2 100644 --- a/drivers/thunderbolt/test.c +++ b/drivers/thunderbolt/test.c @@ -17,13 +17,13 @@ static int __ida_init(struct kunit_resource *res, void *context) struct ida *ida = context; ida_init(ida); - res->allocation = ida; + res->data = ida; return 0; } static void __ida_destroy(struct kunit_resource *res) { - struct ida *ida = res->allocation; + struct ida *ida = res->data; ida_destroy(ida); } diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig index d93a69b12f81..4271c408103e 100644 --- a/drivers/vdpa/Kconfig +++ b/drivers/vdpa/Kconfig @@ -29,4 +29,23 @@ config IFCVF To compile this driver as a module, choose M here: the module will be called ifcvf. +config MLX5_VDPA + bool "MLX5 VDPA support library for ConnectX devices" + depends on MLX5_CORE + default n + help + Support library for Mellanox VDPA drivers. Provides code that is + common for all types of VDPA drivers. The following drivers are planned: + net, block. + +config MLX5_VDPA_NET + tristate "vDPA driver for ConnectX devices" + depends on MLX5_VDPA + default n + help + VDPA network driver for ConnectX6 and newer. Provides offloading + of virtio net datapath such that descriptors put on the ring will + be executed by the hardware. It also supports a variety of stateless + offloads depending on the actual device used and firmware version. + endif # VDPA diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile index 8bbb686ca7a2..d160e9b63a66 100644 --- a/drivers/vdpa/Makefile +++ b/drivers/vdpa/Makefile @@ -2,3 +2,4 @@ obj-$(CONFIG_VDPA) += vdpa.o obj-$(CONFIG_VDPA_SIM) += vdpa_sim/ obj-$(CONFIG_IFCVF) += ifcvf/ +obj-$(CONFIG_MLX5_VDPA) += mlx5/ diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c index 94bf0328b68d..f2a128e56de5 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.c +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -272,7 +272,7 @@ static int ifcvf_config_features(struct ifcvf_hw *hw) return 0; } -u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) +u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) { struct ifcvf_lm_cfg __iomem *ifcvf_lm; void __iomem *avail_idx_addr; @@ -287,7 +287,7 @@ u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) return last_avail_idx; } -int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num) +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) { struct ifcvf_lm_cfg __iomem *ifcvf_lm; void __iomem *avail_idx_addr; diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h index f4554412e607..08f267a2aafe 100644 --- a/drivers/vdpa/ifcvf/ifcvf_base.h +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -29,7 +29,7 @@ (1ULL << VIRTIO_F_VERSION_1) | \ (1ULL << VIRTIO_NET_F_STATUS) | \ (1ULL << VIRTIO_F_ORDER_PLATFORM) | \ - (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ + (1ULL << VIRTIO_F_ACCESS_PLATFORM) | \ (1ULL << VIRTIO_NET_F_MRG_RXBUF)) /* Only one queue pair for now. */ @@ -116,7 +116,7 @@ void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); void io_write64_twopart(u64 val, u32 *lo, u32 *hi); void ifcvf_reset(struct ifcvf_hw *hw); u64 ifcvf_get_features(struct ifcvf_hw *hw); -u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); -int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num); +u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num); struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); #endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c index f5a60c14b979..076d7ac5e723 100644 --- a/drivers/vdpa/ifcvf/ifcvf_main.c +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -50,8 +50,10 @@ static void ifcvf_free_irq(struct ifcvf_adapter *adapter, int queues) int i; - for (i = 0; i < queues; i++) + for (i = 0; i < queues; i++) { devm_free_irq(&pdev->dev, vf->vring[i].irq, &vf->vring[i]); + vf->vring[i].irq = -EINVAL; + } ifcvf_free_irq_vectors(pdev); } @@ -235,19 +237,21 @@ static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) return IFCVF_QUEUE_MAX; } -static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid) +static int ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid, + struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return ifcvf_get_vq_state(vf, qid); + state->avail_index = ifcvf_get_vq_state(vf, qid); + return 0; } static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, - u64 num) + const struct vdpa_vq_state *state) { struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); - return ifcvf_set_vq_state(vf, qid, num); + return ifcvf_set_vq_state(vf, qid, state->avail_index); } static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, @@ -352,6 +356,14 @@ static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, vf->config_cb.private = cb->private; } +static int ifcvf_vdpa_get_vq_irq(struct vdpa_device *vdpa_dev, + u16 qid) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return vf->vring[qid].irq; +} + /* * IFCVF currently does't have on-chip IOMMU, so not * implemented set_map()/dma_map()/dma_unmap() @@ -369,6 +381,7 @@ static const struct vdpa_config_ops ifc_vdpa_ops = { .get_vq_ready = ifcvf_vdpa_get_vq_ready, .set_vq_num = ifcvf_vdpa_set_vq_num, .set_vq_address = ifcvf_vdpa_set_vq_address, + .get_vq_irq = ifcvf_vdpa_get_vq_irq, .kick_vq = ifcvf_vdpa_kick_vq, .get_generation = ifcvf_vdpa_get_generation, .get_device_id = ifcvf_vdpa_get_device_id, @@ -384,7 +397,7 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct device *dev = &pdev->dev; struct ifcvf_adapter *adapter; struct ifcvf_hw *vf; - int ret; + int ret, i; ret = pcim_enable_device(pdev); if (ret) { @@ -420,7 +433,8 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) } adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, - dev, &ifc_vdpa_ops); + dev, &ifc_vdpa_ops, + IFCVF_MAX_QUEUE_PAIRS * 2); if (adapter == NULL) { IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); return -ENOMEM; @@ -441,6 +455,9 @@ static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err; } + for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) + vf->vring[i].irq = -EINVAL; + ret = vdpa_register_device(&adapter->vdpa); if (ret) { IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); diff --git a/drivers/vdpa/mlx5/Makefile b/drivers/vdpa/mlx5/Makefile new file mode 100644 index 000000000000..89a5bededc9f --- /dev/null +++ b/drivers/vdpa/mlx5/Makefile @@ -0,0 +1,4 @@ +subdir-ccflags-y += -I$(srctree)/drivers/vdpa/mlx5/core + +obj-$(CONFIG_MLX5_VDPA_NET) += mlx5_vdpa.o +mlx5_vdpa-$(CONFIG_MLX5_VDPA_NET) += net/main.o net/mlx5_vnet.o core/resources.o core/mr.o diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h new file mode 100644 index 000000000000..5c92a576edae --- /dev/null +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#ifndef __MLX5_VDPA_H__ +#define __MLX5_VDPA_H__ + +#include <linux/vdpa.h> +#include <linux/mlx5/driver.h> + +struct mlx5_vdpa_direct_mr { + u64 start; + u64 end; + u32 perm; + struct mlx5_core_mkey mr; + struct sg_table sg_head; + int log_size; + int nsg; + struct list_head list; + u64 offset; +}; + +struct mlx5_vdpa_mr { + struct mlx5_core_mkey mkey; + + /* list of direct MRs descendants of this indirect mr */ + struct list_head head; + unsigned long num_directs; + unsigned long num_klms; + bool initialized; + + /* serialize mkey creation and destruction */ + struct mutex mkey_mtx; +}; + +struct mlx5_vdpa_resources { + u32 pdn; + struct mlx5_uars_page *uar; + void __iomem *kick_addr; + u16 uid; + u32 null_mkey; + bool valid; +}; + +struct mlx5_vdpa_dev { + struct vdpa_device vdev; + struct mlx5_core_dev *mdev; + struct mlx5_vdpa_resources res; + + u64 mlx_features; + u64 actual_features; + u8 status; + u32 max_vqs; + u32 generation; + + struct mlx5_vdpa_mr mr; +}; + +int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid); +int mlx5_vdpa_dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid); +int mlx5_vdpa_get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey); +int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn); +void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn); +int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn); +void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn); +int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn); +void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn); +int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn); +void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn); +int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev); +void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev); +int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in, + int inlen); +int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey); +int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, + bool *change_map); +int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb); +void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev); + +#define mlx5_vdpa_warn(__dev, format, ...) \ + dev_warn((__dev)->mdev->device, "%s:%d:(pid %d) warning: " format, __func__, __LINE__, \ + current->pid, ##__VA_ARGS__) + +#define mlx5_vdpa_info(__dev, format, ...) \ + dev_info((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \ + current->pid, ##__VA_ARGS__) + +#define mlx5_vdpa_dbg(__dev, format, ...) \ + dev_debug((__dev)->mdev->device, "%s:%d:(pid %d): " format, __func__, __LINE__, \ + current->pid, ##__VA_ARGS__) + +#endif /* __MLX5_VDPA_H__ */ diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h b/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h new file mode 100644 index 000000000000..f6f57a29b38e --- /dev/null +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa_ifc.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#ifndef __MLX5_VDPA_IFC_H_ +#define __MLX5_VDPA_IFC_H_ + +#include <linux/mlx5/mlx5_ifc.h> + +enum { + MLX5_VIRTIO_Q_EVENT_MODE_NO_MSIX_MODE = 0x0, + MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE = 0x1, + MLX5_VIRTIO_Q_EVENT_MODE_MSIX_MODE = 0x2, +}; + +enum { + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT = 0x1, // do I check this caps? + MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED = 0x2, +}; + +enum { + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT = 0, + MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED = 1, +}; + +struct mlx5_ifc_virtio_q_bits { + u8 virtio_q_type[0x8]; + u8 reserved_at_8[0x5]; + u8 event_mode[0x3]; + u8 queue_index[0x10]; + + u8 full_emulation[0x1]; + u8 virtio_version_1_0[0x1]; + u8 reserved_at_22[0x2]; + u8 offload_type[0x4]; + u8 event_qpn_or_msix[0x18]; + + u8 doorbell_stride_index[0x10]; + u8 queue_size[0x10]; + + u8 device_emulation_id[0x20]; + + u8 desc_addr[0x40]; + + u8 used_addr[0x40]; + + u8 available_addr[0x40]; + + u8 virtio_q_mkey[0x20]; + + u8 max_tunnel_desc[0x10]; + u8 reserved_at_170[0x8]; + u8 error_type[0x8]; + + u8 umem_1_id[0x20]; + + u8 umem_1_size[0x20]; + + u8 umem_1_offset[0x40]; + + u8 umem_2_id[0x20]; + + u8 umem_2_size[0x20]; + + u8 umem_2_offset[0x40]; + + u8 umem_3_id[0x20]; + + u8 umem_3_size[0x20]; + + u8 umem_3_offset[0x40]; + + u8 counter_set_id[0x20]; + + u8 reserved_at_320[0x8]; + u8 pd[0x18]; + + u8 reserved_at_340[0xc0]; +}; + +struct mlx5_ifc_virtio_net_q_object_bits { + u8 modify_field_select[0x40]; + + u8 reserved_at_40[0x20]; + + u8 vhca_id[0x10]; + u8 reserved_at_70[0x10]; + + u8 queue_feature_bit_mask_12_3[0xa]; + u8 dirty_bitmap_dump_enable[0x1]; + u8 vhost_log_page[0x5]; + u8 reserved_at_90[0xc]; + u8 state[0x4]; + + u8 reserved_at_a0[0x5]; + u8 queue_feature_bit_mask_2_0[0x3]; + u8 tisn_or_qpn[0x18]; + + u8 dirty_bitmap_mkey[0x20]; + + u8 dirty_bitmap_size[0x20]; + + u8 dirty_bitmap_addr[0x40]; + + u8 hw_available_index[0x10]; + u8 hw_used_index[0x10]; + + u8 reserved_at_160[0xa0]; + + struct mlx5_ifc_virtio_q_bits virtio_q_context; +}; + +struct mlx5_ifc_create_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + + struct mlx5_ifc_virtio_net_q_object_bits obj_context; +}; + +struct mlx5_ifc_create_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +struct mlx5_ifc_destroy_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +struct mlx5_ifc_destroy_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +struct mlx5_ifc_query_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; +}; + +struct mlx5_ifc_query_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; + + struct mlx5_ifc_virtio_net_q_object_bits obj_context; +}; + +enum { + MLX5_VIRTQ_MODIFY_MASK_STATE = (u64)1 << 0, + MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_PARAMS = (u64)1 << 3, + MLX5_VIRTQ_MODIFY_MASK_DIRTY_BITMAP_DUMP_ENABLE = (u64)1 << 4, +}; + +enum { + MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT = 0x0, + MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY = 0x1, + MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND = 0x2, + MLX5_VIRTIO_NET_Q_OBJECT_STATE_ERR = 0x3, +}; + +enum { + MLX5_RQTC_LIST_Q_TYPE_RQ = 0x0, + MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q = 0x1, +}; + +struct mlx5_ifc_modify_virtio_net_q_in_bits { + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; + + struct mlx5_ifc_virtio_net_q_object_bits obj_context; +}; + +struct mlx5_ifc_modify_virtio_net_q_out_bits { + struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr; +}; + +#endif /* __MLX5_VDPA_IFC_H_ */ diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c new file mode 100644 index 000000000000..ef1c550f8266 --- /dev/null +++ b/drivers/vdpa/mlx5/core/mr.c @@ -0,0 +1,486 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#include <linux/vdpa.h> +#include <linux/gcd.h> +#include <linux/string.h> +#include <linux/mlx5/qp.h> +#include "mlx5_vdpa.h" + +/* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */ +#define MLX5_DIV_ROUND_UP_POW2(_n, _s) \ +({ \ + u64 __s = _s; \ + u64 _res; \ + _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \ + _res; \ +}) + +static int get_octo_len(u64 len, int page_shift) +{ + u64 page_size = 1ULL << page_shift; + int npages; + + npages = ALIGN(len, page_size) >> page_shift; + return (npages + 1) / 2; +} + +static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in) +{ + struct scatterlist *sg; + __be64 *pas; + int i; + + pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i) + (*pas) = cpu_to_be64(sg_dma_address(sg)); +} + +static void mlx5_set_access_mode(void *mkc, int mode) +{ + MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); + MLX5_SET(mkc, mkc, access_mode_4_2, mode >> 2); +} + +static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) +{ + struct scatterlist *sg; + int i; + + for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i) + mtt[i] = cpu_to_be64(sg_dma_address(sg)); +} + +static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) +{ + int inlen; + void *mkc; + void *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + roundup(MLX5_ST_SZ_BYTES(mtt) * mr->nsg, 16); + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); + fill_sg(mr, in); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); + MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); + mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_MTT); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mvdev->res.pdn); + MLX5_SET64(mkc, mkc, start_addr, mr->offset); + MLX5_SET64(mkc, mkc, len, mr->end - mr->start); + MLX5_SET(mkc, mkc, log_page_size, mr->log_size); + MLX5_SET(mkc, mkc, translations_octword_size, + get_octo_len(mr->end - mr->start, mr->log_size)); + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, + get_octo_len(mr->end - mr->start, mr->log_size)); + populate_mtts(mr, MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt)); + err = mlx5_vdpa_create_mkey(mvdev, &mr->mr, in, inlen); + kvfree(in); + if (err) { + mlx5_vdpa_warn(mvdev, "Failed to create direct MR\n"); + return err; + } + + return 0; +} + +static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) +{ + mlx5_vdpa_destroy_mkey(mvdev, &mr->mr); +} + +static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) +{ + return max_t(u64, map->start, mr->start); +} + +static u64 map_end(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) +{ + return min_t(u64, map->last + 1, mr->end); +} + +static u64 maplen(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr) +{ + return map_end(map, mr) - map_start(map, mr); +} + +#define MLX5_VDPA_INVALID_START_ADDR ((u64)-1) +#define MLX5_VDPA_INVALID_LEN ((u64)-1) + +static u64 indir_start_addr(struct mlx5_vdpa_mr *mkey) +{ + struct mlx5_vdpa_direct_mr *s; + + s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list); + if (!s) + return MLX5_VDPA_INVALID_START_ADDR; + + return s->start; +} + +static u64 indir_len(struct mlx5_vdpa_mr *mkey) +{ + struct mlx5_vdpa_direct_mr *s; + struct mlx5_vdpa_direct_mr *e; + + s = list_first_entry_or_null(&mkey->head, struct mlx5_vdpa_direct_mr, list); + if (!s) + return MLX5_VDPA_INVALID_LEN; + + e = list_last_entry(&mkey->head, struct mlx5_vdpa_direct_mr, list); + + return e->end - s->start; +} + +#define LOG_MAX_KLM_SIZE 30 +#define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE) + +static u32 klm_bcount(u64 size) +{ + return (u32)size; +} + +static void fill_indir(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey, void *in) +{ + struct mlx5_vdpa_direct_mr *dmr; + struct mlx5_klm *klmarr; + struct mlx5_klm *klm; + bool first = true; + u64 preve; + int i; + + klmarr = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); + i = 0; + list_for_each_entry(dmr, &mkey->head, list) { +again: + klm = &klmarr[i++]; + if (first) { + preve = dmr->start; + first = false; + } + + if (preve == dmr->start) { + klm->key = cpu_to_be32(dmr->mr.key); + klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start)); + preve = dmr->end; + } else { + klm->key = cpu_to_be32(mvdev->res.null_mkey); + klm->bcount = cpu_to_be32(klm_bcount(dmr->start - preve)); + preve = dmr->start; + goto again; + } + } +} + +static int klm_byte_size(int nklms) +{ + return 16 * ALIGN(nklms, 4); +} + +static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr) +{ + int inlen; + void *mkc; + void *in; + int err; + u64 start; + u64 len; + + start = indir_start_addr(mr); + len = indir_len(mr); + if (start == MLX5_VDPA_INVALID_START_ADDR || len == MLX5_VDPA_INVALID_LEN) + return -EINVAL; + + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + klm_byte_size(mr->num_klms); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + MLX5_SET(mkc, mkc, lw, 1); + MLX5_SET(mkc, mkc, lr, 1); + mlx5_set_access_mode(mkc, MLX5_MKC_ACCESS_MODE_KLMS); + MLX5_SET(mkc, mkc, qpn, 0xffffff); + MLX5_SET(mkc, mkc, pd, mvdev->res.pdn); + MLX5_SET64(mkc, mkc, start_addr, start); + MLX5_SET64(mkc, mkc, len, len); + MLX5_SET(mkc, mkc, translations_octword_size, klm_byte_size(mr->num_klms) / 16); + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, mr->num_klms); + fill_indir(mvdev, mr, in); + err = mlx5_vdpa_create_mkey(mvdev, &mr->mkey, in, inlen); + kfree(in); + return err; +} + +static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey) +{ + mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey); +} + +static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr, + struct vhost_iotlb *iotlb) +{ + struct vhost_iotlb_map *map; + unsigned long lgcd = 0; + int log_entity_size; + unsigned long size; + u64 start = 0; + int err; + struct page *pg; + unsigned int nsg; + int sglen; + u64 pa; + u64 paend; + struct scatterlist *sg; + struct device *dma = mvdev->mdev->device; + int ret; + + for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); + map; map = vhost_iotlb_itree_next(map, start, mr->end - 1)) { + size = maplen(map, mr); + lgcd = gcd(lgcd, size); + start += size; + } + log_entity_size = ilog2(lgcd); + + sglen = 1 << log_entity_size; + nsg = MLX5_DIV_ROUND_UP_POW2(mr->end - mr->start, log_entity_size); + + err = sg_alloc_table(&mr->sg_head, nsg, GFP_KERNEL); + if (err) + return err; + + sg = mr->sg_head.sgl; + for (map = vhost_iotlb_itree_first(iotlb, mr->start, mr->end - 1); + map; map = vhost_iotlb_itree_next(map, mr->start, mr->end - 1)) { + paend = map->addr + maplen(map, mr); + for (pa = map->addr; pa < paend; pa += sglen) { + pg = pfn_to_page(__phys_to_pfn(pa)); + if (!sg) { + mlx5_vdpa_warn(mvdev, "sg null. start 0x%llx, end 0x%llx\n", + map->start, map->last + 1); + err = -ENOMEM; + goto err_map; + } + sg_set_page(sg, pg, sglen, 0); + sg = sg_next(sg); + if (!sg) + goto done; + } + } +done: + mr->log_size = log_entity_size; + mr->nsg = nsg; + ret = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); + if (!ret) + goto err_map; + + err = create_direct_mr(mvdev, mr); + if (err) + goto err_direct; + + return 0; + +err_direct: + dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); +err_map: + sg_free_table(&mr->sg_head); + return err; +} + +static void unmap_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) +{ + struct device *dma = mvdev->mdev->device; + + destroy_direct_mr(mvdev, mr); + dma_unmap_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); + sg_free_table(&mr->sg_head); +} + +static int add_direct_chain(struct mlx5_vdpa_dev *mvdev, u64 start, u64 size, u8 perm, + struct vhost_iotlb *iotlb) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + struct mlx5_vdpa_direct_mr *dmr; + struct mlx5_vdpa_direct_mr *n; + LIST_HEAD(tmp); + u64 st; + u64 sz; + int err; + int i = 0; + + st = start; + while (size) { + sz = (u32)min_t(u64, MAX_KLM_SIZE, size); + dmr = kzalloc(sizeof(*dmr), GFP_KERNEL); + if (!dmr) { + err = -ENOMEM; + goto err_alloc; + } + + dmr->start = st; + dmr->end = st + sz; + dmr->perm = perm; + err = map_direct_mr(mvdev, dmr, iotlb); + if (err) { + kfree(dmr); + goto err_alloc; + } + + list_add_tail(&dmr->list, &tmp); + size -= sz; + mr->num_directs++; + mr->num_klms++; + st += sz; + i++; + } + list_splice_tail(&tmp, &mr->head); + return 0; + +err_alloc: + list_for_each_entry_safe(dmr, n, &mr->head, list) { + list_del_init(&dmr->list); + unmap_direct_mr(mvdev, dmr); + kfree(dmr); + } + return err; +} + +/* The iotlb pointer contains a list of maps. Go over the maps, possibly + * merging mergeable maps, and create direct memory keys that provide the + * device access to memory. The direct mkeys are then referred to by the + * indirect memory key that provides access to the enitre address space given + * by iotlb. + */ +static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + struct mlx5_vdpa_direct_mr *dmr; + struct mlx5_vdpa_direct_mr *n; + struct vhost_iotlb_map *map; + u32 pperm = U16_MAX; + u64 last = U64_MAX; + u64 ps = U64_MAX; + u64 pe = U64_MAX; + u64 start = 0; + int err = 0; + int nnuls; + + if (mr->initialized) + return 0; + + INIT_LIST_HEAD(&mr->head); + for (map = vhost_iotlb_itree_first(iotlb, start, last); map; + map = vhost_iotlb_itree_next(map, start, last)) { + start = map->start; + if (pe == map->start && pperm == map->perm) { + pe = map->last + 1; + } else { + if (ps != U64_MAX) { + if (pe < map->start) { + /* We have a hole in the map. Check how + * many null keys are required to fill it. + */ + nnuls = MLX5_DIV_ROUND_UP_POW2(map->start - pe, + LOG_MAX_KLM_SIZE); + mr->num_klms += nnuls; + } + err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb); + if (err) + goto err_chain; + } + ps = map->start; + pe = map->last + 1; + pperm = map->perm; + } + } + err = add_direct_chain(mvdev, ps, pe - ps, pperm, iotlb); + if (err) + goto err_chain; + + /* Create the memory key that defines the guests's address space. This + * memory key refers to the direct keys that contain the MTT + * translations + */ + err = create_indirect_key(mvdev, mr); + if (err) + goto err_chain; + + mr->initialized = true; + return 0; + +err_chain: + list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { + list_del_init(&dmr->list); + unmap_direct_mr(mvdev, dmr); + kfree(dmr); + } + return err; +} + +int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + int err; + + mutex_lock(&mr->mkey_mtx); + err = _mlx5_vdpa_create_mr(mvdev, iotlb); + mutex_unlock(&mr->mkey_mtx); + return err; +} + +void mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + struct mlx5_vdpa_direct_mr *dmr; + struct mlx5_vdpa_direct_mr *n; + + mutex_lock(&mr->mkey_mtx); + if (!mr->initialized) + goto out; + + destroy_indirect_key(mvdev, mr); + list_for_each_entry_safe_reverse(dmr, n, &mr->head, list) { + list_del_init(&dmr->list); + unmap_direct_mr(mvdev, dmr); + kfree(dmr); + } + memset(mr, 0, sizeof(*mr)); + mr->initialized = false; +out: + mutex_unlock(&mr->mkey_mtx); +} + +static bool map_empty(struct vhost_iotlb *iotlb) +{ + return !vhost_iotlb_itree_first(iotlb, 0, U64_MAX); +} + +int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb, + bool *change_map) +{ + struct mlx5_vdpa_mr *mr = &mvdev->mr; + int err = 0; + + *change_map = false; + if (map_empty(iotlb)) { + mlx5_vdpa_destroy_mr(mvdev); + return 0; + } + mutex_lock(&mr->mkey_mtx); + if (mr->initialized) { + mlx5_vdpa_info(mvdev, "memory map update\n"); + *change_map = true; + } + if (!*change_map) + err = _mlx5_vdpa_create_mr(mvdev, iotlb); + mutex_unlock(&mr->mkey_mtx); + + return err; +} diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c new file mode 100644 index 000000000000..96e6421c5d1c --- /dev/null +++ b/drivers/vdpa/mlx5/core/resources.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#include <linux/mlx5/driver.h> +#include "mlx5_vdpa.h" + +static int alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid) +{ + struct mlx5_core_dev *mdev = dev->mdev; + + u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; + int err; + + MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); + MLX5_SET(alloc_pd_in, in, uid, uid); + + err = mlx5_cmd_exec_inout(mdev, alloc_pd, in, out); + if (!err) + *pdn = MLX5_GET(alloc_pd_out, out, pd); + + return err; +} + +static int dealloc_pd(struct mlx5_vdpa_dev *dev, u32 pdn, u16 uid) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_pd_in)] = {}; + struct mlx5_core_dev *mdev = dev->mdev; + + MLX5_SET(dealloc_pd_in, in, opcode, MLX5_CMD_OP_DEALLOC_PD); + MLX5_SET(dealloc_pd_in, in, pd, pdn); + MLX5_SET(dealloc_pd_in, in, uid, uid); + return mlx5_cmd_exec_in(mdev, dealloc_pd, in); +} + +static int get_null_mkey(struct mlx5_vdpa_dev *dev, u32 *null_mkey) +{ + u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {}; + u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {}; + struct mlx5_core_dev *mdev = dev->mdev; + int err; + + MLX5_SET(query_special_contexts_in, in, opcode, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); + err = mlx5_cmd_exec_inout(mdev, query_special_contexts, in, out); + if (!err) + *null_mkey = MLX5_GET(query_special_contexts_out, out, null_mkey); + return err; +} + +static int create_uctx(struct mlx5_vdpa_dev *mvdev, u16 *uid) +{ + u32 out[MLX5_ST_SZ_DW(create_uctx_out)] = {}; + int inlen; + void *in; + int err; + + /* 0 means not supported */ + if (!MLX5_CAP_GEN(mvdev->mdev, log_max_uctx)) + return -EOPNOTSUPP; + + inlen = MLX5_ST_SZ_BYTES(create_uctx_in); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX); + MLX5_SET(create_uctx_in, in, uctx.cap, MLX5_UCTX_CAP_RAW_TX); + + err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out)); + kfree(in); + if (!err) + *uid = MLX5_GET(create_uctx_out, out, uid); + + return err; +} + +static void destroy_uctx(struct mlx5_vdpa_dev *mvdev, u32 uid) +{ + u32 out[MLX5_ST_SZ_DW(destroy_uctx_out)] = {}; + u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {}; + + MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX); + MLX5_SET(destroy_uctx_in, in, uid, uid); + + mlx5_cmd_exec(mvdev->mdev, in, sizeof(in), out, sizeof(out)); +} + +int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tisn) +{ + u32 out[MLX5_ST_SZ_DW(create_tis_out)] = {}; + int err; + + MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS); + MLX5_SET(create_tis_in, in, uid, mvdev->res.uid); + err = mlx5_cmd_exec_inout(mvdev->mdev, create_tis, in, out); + if (!err) + *tisn = MLX5_GET(create_tis_out, out, tisn); + + return err; +} + +void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev *mvdev, u32 tisn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tis_in)] = {}; + + MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS); + MLX5_SET(destroy_tis_in, in, uid, mvdev->res.uid); + MLX5_SET(destroy_tis_in, in, tisn, tisn); + mlx5_cmd_exec_in(mvdev->mdev, destroy_tis, in); +} + +int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev *mvdev, void *in, int inlen, u32 *rqtn) +{ + u32 out[MLX5_ST_SZ_DW(create_rqt_out)] = {}; + int err; + + MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT); + err = mlx5_cmd_exec(mvdev->mdev, in, inlen, out, sizeof(out)); + if (!err) + *rqtn = MLX5_GET(create_rqt_out, out, rqtn); + + return err; +} + +void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev *mvdev, u32 rqtn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)] = {}; + + MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT); + MLX5_SET(destroy_rqt_in, in, uid, mvdev->res.uid); + MLX5_SET(destroy_rqt_in, in, rqtn, rqtn); + mlx5_cmd_exec_in(mvdev->mdev, destroy_rqt, in); +} + +int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev *mvdev, void *in, u32 *tirn) +{ + u32 out[MLX5_ST_SZ_DW(create_tir_out)] = {}; + int err; + + MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR); + err = mlx5_cmd_exec_inout(mvdev->mdev, create_tir, in, out); + if (!err) + *tirn = MLX5_GET(create_tir_out, out, tirn); + + return err; +} + +void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev *mvdev, u32 tirn) +{ + u32 in[MLX5_ST_SZ_DW(destroy_tir_in)] = {}; + + MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR); + MLX5_SET(destroy_tir_in, in, uid, mvdev->res.uid); + MLX5_SET(destroy_tir_in, in, tirn, tirn); + mlx5_cmd_exec_in(mvdev->mdev, destroy_tir, in); +} + +int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn) +{ + u32 out[MLX5_ST_SZ_DW(alloc_transport_domain_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_transport_domain_in)] = {}; + int err; + + MLX5_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN); + MLX5_SET(alloc_transport_domain_in, in, uid, mvdev->res.uid); + + err = mlx5_cmd_exec_inout(mvdev->mdev, alloc_transport_domain, in, out); + if (!err) + *tdn = MLX5_GET(alloc_transport_domain_out, out, transport_domain); + + return err; +} + +void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_transport_domain_in)] = {}; + + MLX5_SET(dealloc_transport_domain_in, in, opcode, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN); + MLX5_SET(dealloc_transport_domain_in, in, uid, mvdev->res.uid); + MLX5_SET(dealloc_transport_domain_in, in, transport_domain, tdn); + mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in); +} + +int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in, + int inlen) +{ + u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {}; + u32 mkey_index; + void *mkc; + int err; + + MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY); + MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); + + err = mlx5_cmd_exec(mvdev->mdev, in, inlen, lout, sizeof(lout)); + if (err) + return err; + + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); + mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index); + mkey->iova = MLX5_GET64(mkc, mkc, start_addr); + mkey->size = MLX5_GET64(mkc, mkc, len); + mkey->key |= mlx5_idx_to_mkey(mkey_index); + mkey->pd = MLX5_GET(mkc, mkc, pd); + return 0; +} + +int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey) +{ + u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {}; + + MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid); + MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY); + MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key)); + return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in); +} + +int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev) +{ + u64 offset = MLX5_CAP64_DEV_VDPA_EMULATION(mvdev->mdev, doorbell_bar_offset); + struct mlx5_vdpa_resources *res = &mvdev->res; + struct mlx5_core_dev *mdev = mvdev->mdev; + u64 kick_addr; + int err; + + if (res->valid) { + mlx5_vdpa_warn(mvdev, "resources already allocated\n"); + return -EINVAL; + } + mutex_init(&mvdev->mr.mkey_mtx); + res->uar = mlx5_get_uars_page(mdev); + if (IS_ERR(res->uar)) { + err = PTR_ERR(res->uar); + goto err_uars; + } + + err = create_uctx(mvdev, &res->uid); + if (err) + goto err_uctx; + + err = alloc_pd(mvdev, &res->pdn, res->uid); + if (err) + goto err_pd; + + err = get_null_mkey(mvdev, &res->null_mkey); + if (err) + goto err_key; + + kick_addr = pci_resource_start(mdev->pdev, 0) + offset; + res->kick_addr = ioremap(kick_addr, PAGE_SIZE); + if (!res->kick_addr) { + err = -ENOMEM; + goto err_key; + } + res->valid = true; + + return 0; + +err_key: + dealloc_pd(mvdev, res->pdn, res->uid); +err_pd: + destroy_uctx(mvdev, res->uid); +err_uctx: + mlx5_put_uars_page(mdev, res->uar); +err_uars: + mutex_destroy(&mvdev->mr.mkey_mtx); + return err; +} + +void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev) +{ + struct mlx5_vdpa_resources *res = &mvdev->res; + + if (!res->valid) + return; + + iounmap(res->kick_addr); + res->kick_addr = NULL; + dealloc_pd(mvdev, res->pdn, res->uid); + destroy_uctx(mvdev, res->uid); + mlx5_put_uars_page(mvdev->mdev, res->uar); + mutex_destroy(&mvdev->mr.mkey_mtx); + res->valid = false; +} diff --git a/drivers/vdpa/mlx5/net/main.c b/drivers/vdpa/mlx5/net/main.c new file mode 100644 index 000000000000..838cd98386ff --- /dev/null +++ b/drivers/vdpa/mlx5/net/main.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#include <linux/module.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/device.h> +#include "mlx5_vdpa_ifc.h" +#include "mlx5_vnet.h" + +MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); +MODULE_DESCRIPTION("Mellanox VDPA driver"); +MODULE_LICENSE("Dual BSD/GPL"); + +static bool required_caps_supported(struct mlx5_core_dev *mdev) +{ + u8 event_mode; + u64 got; + + got = MLX5_CAP_GEN_64(mdev, general_obj_types); + + if (!(got & MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q)) + return false; + + event_mode = MLX5_CAP_DEV_VDPA_EMULATION(mdev, event_mode); + if (!(event_mode & MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE)) + return false; + + if (!MLX5_CAP_DEV_VDPA_EMULATION(mdev, eth_frame_offload_type)) + return false; + + return true; +} + +static void *mlx5_vdpa_add(struct mlx5_core_dev *mdev) +{ + struct mlx5_vdpa_dev *vdev; + + if (mlx5_core_is_pf(mdev)) + return NULL; + + if (!required_caps_supported(mdev)) { + dev_info(mdev->device, "virtio net emulation not supported\n"); + return NULL; + } + vdev = mlx5_vdpa_add_dev(mdev); + if (IS_ERR(vdev)) + return NULL; + + return vdev; +} + +static void mlx5_vdpa_remove(struct mlx5_core_dev *mdev, void *context) +{ + struct mlx5_vdpa_dev *vdev = context; + + mlx5_vdpa_remove_dev(vdev); +} + +static struct mlx5_interface mlx5_vdpa_interface = { + .add = mlx5_vdpa_add, + .remove = mlx5_vdpa_remove, + .protocol = MLX5_INTERFACE_PROTOCOL_VDPA, +}; + +static int __init mlx5_vdpa_init(void) +{ + return mlx5_register_interface(&mlx5_vdpa_interface); +} + +static void __exit mlx5_vdpa_exit(void) +{ + mlx5_unregister_interface(&mlx5_vdpa_interface); +} + +module_init(mlx5_vdpa_init); +module_exit(mlx5_vdpa_exit); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c new file mode 100644 index 000000000000..9df69d5efe8c --- /dev/null +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -0,0 +1,1974 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#include <linux/vdpa.h> +#include <uapi/linux/virtio_ids.h> +#include <linux/virtio_config.h> +#include <linux/mlx5/qp.h> +#include <linux/mlx5/device.h> +#include <linux/mlx5/vport.h> +#include <linux/mlx5/fs.h> +#include <linux/mlx5/device.h> +#include "mlx5_vnet.h" +#include "mlx5_vdpa_ifc.h" +#include "mlx5_vdpa.h" + +#define to_mvdev(__vdev) container_of((__vdev), struct mlx5_vdpa_dev, vdev) + +#define VALID_FEATURES_MASK \ + (BIT(VIRTIO_NET_F_CSUM) | BIT(VIRTIO_NET_F_GUEST_CSUM) | \ + BIT(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) | BIT(VIRTIO_NET_F_MTU) | BIT(VIRTIO_NET_F_MAC) | \ + BIT(VIRTIO_NET_F_GUEST_TSO4) | BIT(VIRTIO_NET_F_GUEST_TSO6) | \ + BIT(VIRTIO_NET_F_GUEST_ECN) | BIT(VIRTIO_NET_F_GUEST_UFO) | BIT(VIRTIO_NET_F_HOST_TSO4) | \ + BIT(VIRTIO_NET_F_HOST_TSO6) | BIT(VIRTIO_NET_F_HOST_ECN) | BIT(VIRTIO_NET_F_HOST_UFO) | \ + BIT(VIRTIO_NET_F_MRG_RXBUF) | BIT(VIRTIO_NET_F_STATUS) | BIT(VIRTIO_NET_F_CTRL_VQ) | \ + BIT(VIRTIO_NET_F_CTRL_RX) | BIT(VIRTIO_NET_F_CTRL_VLAN) | \ + BIT(VIRTIO_NET_F_CTRL_RX_EXTRA) | BIT(VIRTIO_NET_F_GUEST_ANNOUNCE) | \ + BIT(VIRTIO_NET_F_MQ) | BIT(VIRTIO_NET_F_CTRL_MAC_ADDR) | BIT(VIRTIO_NET_F_HASH_REPORT) | \ + BIT(VIRTIO_NET_F_RSS) | BIT(VIRTIO_NET_F_RSC_EXT) | BIT(VIRTIO_NET_F_STANDBY) | \ + BIT(VIRTIO_NET_F_SPEED_DUPLEX) | BIT(VIRTIO_F_NOTIFY_ON_EMPTY) | \ + BIT(VIRTIO_F_ANY_LAYOUT) | BIT(VIRTIO_F_VERSION_1) | BIT(VIRTIO_F_ACCESS_PLATFORM) | \ + BIT(VIRTIO_F_RING_PACKED) | BIT(VIRTIO_F_ORDER_PLATFORM) | BIT(VIRTIO_F_SR_IOV)) + +#define VALID_STATUS_MASK \ + (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER | VIRTIO_CONFIG_S_DRIVER_OK | \ + VIRTIO_CONFIG_S_FEATURES_OK | VIRTIO_CONFIG_S_NEEDS_RESET | VIRTIO_CONFIG_S_FAILED) + +struct mlx5_vdpa_net_resources { + u32 tisn; + u32 tdn; + u32 tirn; + u32 rqtn; + bool valid; +}; + +struct mlx5_vdpa_cq_buf { + struct mlx5_frag_buf_ctrl fbc; + struct mlx5_frag_buf frag_buf; + int cqe_size; + int nent; +}; + +struct mlx5_vdpa_cq { + struct mlx5_core_cq mcq; + struct mlx5_vdpa_cq_buf buf; + struct mlx5_db db; + int cqe; +}; + +struct mlx5_vdpa_umem { + struct mlx5_frag_buf_ctrl fbc; + struct mlx5_frag_buf frag_buf; + int size; + u32 id; +}; + +struct mlx5_vdpa_qp { + struct mlx5_core_qp mqp; + struct mlx5_frag_buf frag_buf; + struct mlx5_db db; + u16 head; + bool fw; +}; + +struct mlx5_vq_restore_info { + u32 num_ent; + u64 desc_addr; + u64 device_addr; + u64 driver_addr; + u16 avail_index; + bool ready; + struct vdpa_callback cb; + bool restore; +}; + +struct mlx5_vdpa_virtqueue { + bool ready; + u64 desc_addr; + u64 device_addr; + u64 driver_addr; + u32 num_ent; + struct vdpa_callback event_cb; + + /* Resources for implementing the notification channel from the device + * to the driver. fwqp is the firmware end of an RC connection; the + * other end is vqqp used by the driver. cq is is where completions are + * reported. + */ + struct mlx5_vdpa_cq cq; + struct mlx5_vdpa_qp fwqp; + struct mlx5_vdpa_qp vqqp; + + /* umem resources are required for the virtqueue operation. They're use + * is internal and they must be provided by the driver. + */ + struct mlx5_vdpa_umem umem1; + struct mlx5_vdpa_umem umem2; + struct mlx5_vdpa_umem umem3; + + bool initialized; + int index; + u32 virtq_id; + struct mlx5_vdpa_net *ndev; + u16 avail_idx; + int fw_state; + + /* keep last in the struct */ + struct mlx5_vq_restore_info ri; +}; + +/* We will remove this limitation once mlx5_vdpa_alloc_resources() + * provides for driver space allocation + */ +#define MLX5_MAX_SUPPORTED_VQS 16 + +struct mlx5_vdpa_net { + struct mlx5_vdpa_dev mvdev; + struct mlx5_vdpa_net_resources res; + struct virtio_net_config config; + struct mlx5_vdpa_virtqueue vqs[MLX5_MAX_SUPPORTED_VQS]; + + /* Serialize vq resources creation and destruction. This is required + * since memory map might change and we need to destroy and create + * resources while driver in operational. + */ + struct mutex reslock; + struct mlx5_flow_table *rxft; + struct mlx5_fc *rx_counter; + struct mlx5_flow_handle *rx_rule; + bool setup; + u16 mtu; +}; + +static void free_resources(struct mlx5_vdpa_net *ndev); +static void init_mvqs(struct mlx5_vdpa_net *ndev); +static int setup_driver(struct mlx5_vdpa_net *ndev); +static void teardown_driver(struct mlx5_vdpa_net *ndev); + +static bool mlx5_vdpa_debug; + +#define MLX5_LOG_VIO_FLAG(_feature) \ + do { \ + if (features & BIT(_feature)) \ + mlx5_vdpa_info(mvdev, "%s\n", #_feature); \ + } while (0) + +#define MLX5_LOG_VIO_STAT(_status) \ + do { \ + if (status & (_status)) \ + mlx5_vdpa_info(mvdev, "%s\n", #_status); \ + } while (0) + +static void print_status(struct mlx5_vdpa_dev *mvdev, u8 status, bool set) +{ + if (status & ~VALID_STATUS_MASK) + mlx5_vdpa_warn(mvdev, "Warning: there are invalid status bits 0x%x\n", + status & ~VALID_STATUS_MASK); + + if (!mlx5_vdpa_debug) + return; + + mlx5_vdpa_info(mvdev, "driver status %s", set ? "set" : "get"); + if (set && !status) { + mlx5_vdpa_info(mvdev, "driver resets the device\n"); + return; + } + + MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_ACKNOWLEDGE); + MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER); + MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_DRIVER_OK); + MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FEATURES_OK); + MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_NEEDS_RESET); + MLX5_LOG_VIO_STAT(VIRTIO_CONFIG_S_FAILED); +} + +static void print_features(struct mlx5_vdpa_dev *mvdev, u64 features, bool set) +{ + if (features & ~VALID_FEATURES_MASK) + mlx5_vdpa_warn(mvdev, "There are invalid feature bits 0x%llx\n", + features & ~VALID_FEATURES_MASK); + + if (!mlx5_vdpa_debug) + return; + + mlx5_vdpa_info(mvdev, "driver %s feature bits:\n", set ? "sets" : "reads"); + if (!features) + mlx5_vdpa_info(mvdev, "all feature bits are cleared\n"); + + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CSUM); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_CSUM); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MTU); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MAC); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO4); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_TSO6); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ECN); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_UFO); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO4); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_TSO6); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_ECN); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HOST_UFO); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MRG_RXBUF); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STATUS); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VQ); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_VLAN); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_RX_EXTRA); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_GUEST_ANNOUNCE); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_MQ); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_CTRL_MAC_ADDR); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_HASH_REPORT); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSS); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_RSC_EXT); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_STANDBY); + MLX5_LOG_VIO_FLAG(VIRTIO_NET_F_SPEED_DUPLEX); + MLX5_LOG_VIO_FLAG(VIRTIO_F_NOTIFY_ON_EMPTY); + MLX5_LOG_VIO_FLAG(VIRTIO_F_ANY_LAYOUT); + MLX5_LOG_VIO_FLAG(VIRTIO_F_VERSION_1); + MLX5_LOG_VIO_FLAG(VIRTIO_F_ACCESS_PLATFORM); + MLX5_LOG_VIO_FLAG(VIRTIO_F_RING_PACKED); + MLX5_LOG_VIO_FLAG(VIRTIO_F_ORDER_PLATFORM); + MLX5_LOG_VIO_FLAG(VIRTIO_F_SR_IOV); +} + +static int create_tis(struct mlx5_vdpa_net *ndev) +{ + struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; + u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {}; + void *tisc; + int err; + + tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); + MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); + err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); + if (err) + mlx5_vdpa_warn(mvdev, "create TIS (%d)\n", err); + + return err; +} + +static void destroy_tis(struct mlx5_vdpa_net *ndev) +{ + mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); +} + +#define MLX5_VDPA_CQE_SIZE 64 +#define MLX5_VDPA_LOG_CQE_SIZE ilog2(MLX5_VDPA_CQE_SIZE) + +static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) +{ + struct mlx5_frag_buf *frag_buf = &buf->frag_buf; + u8 log_wq_stride = MLX5_VDPA_LOG_CQE_SIZE; + u8 log_wq_sz = MLX5_VDPA_LOG_CQE_SIZE; + int err; + + err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, + ndev->mvdev.mdev->priv.numa_node); + if (err) + return err; + + mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc); + + buf->cqe_size = MLX5_VDPA_CQE_SIZE; + buf->nent = nent; + + return 0; +} + +static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) +{ + struct mlx5_frag_buf *frag_buf = &umem->frag_buf; + + return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, + ndev->mvdev.mdev->priv.numa_node); +} + +static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) +{ + mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); +} + +static void *get_cqe(struct mlx5_vdpa_cq *vcq, int n) +{ + return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n); +} + +static void cq_frag_buf_init(struct mlx5_vdpa_cq *vcq, struct mlx5_vdpa_cq_buf *buf) +{ + struct mlx5_cqe64 *cqe64; + void *cqe; + int i; + + for (i = 0; i < buf->nent; i++) { + cqe = get_cqe(vcq, i); + cqe64 = cqe; + cqe64->op_own = MLX5_CQE_INVALID << 4; + } +} + +static void *get_sw_cqe(struct mlx5_vdpa_cq *cq, int n) +{ + struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1)); + + if (likely(get_cqe_opcode(cqe64) != MLX5_CQE_INVALID) && + !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe))) + return cqe64; + + return NULL; +} + +static void rx_post(struct mlx5_vdpa_qp *vqp, int n) +{ + vqp->head += n; + vqp->db.db[0] = cpu_to_be32(vqp->head); +} + +static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, + struct mlx5_vdpa_virtqueue *mvq, u32 num_ent) +{ + struct mlx5_vdpa_qp *vqp; + __be64 *pas; + void *qpc; + + vqp = fw ? &mvq->fwqp : &mvq->vqqp; + MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + if (vqp->fw) { + /* Firmware QP is allocated by the driver for the firmware's + * use so we can skip part of the params as they will be chosen by firmware + */ + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, rq_type, MLX5_ZERO_LEN_RQ); + MLX5_SET(qpc, qpc, no_sq, 1); + return; + } + + MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); + MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES); + MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); + MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET(qpc, qpc, no_sq, 1); + MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); + MLX5_SET(qpc, qpc, log_rq_size, ilog2(num_ent)); + MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ); + pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, in, pas); + mlx5_fill_page_frag_array(&vqp->frag_buf, pas); +} + +static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) +{ + return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, + num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf, + ndev->mvdev.mdev->priv.numa_node); +} + +static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) +{ + mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); +} + +static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, + struct mlx5_vdpa_qp *vqp) +{ + struct mlx5_core_dev *mdev = ndev->mvdev.mdev; + int inlen = MLX5_ST_SZ_BYTES(create_qp_in); + u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {}; + void *qpc; + void *in; + int err; + + if (!vqp->fw) { + vqp = &mvq->vqqp; + err = rq_buf_alloc(ndev, vqp, mvq->num_ent); + if (err) + return err; + + err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); + if (err) + goto err_db; + inlen += vqp->frag_buf.npages * sizeof(__be64); + } + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_kzalloc; + } + + qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); + qpc = MLX5_ADDR_OF(create_qp_in, in, qpc); + MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC); + MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED); + MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); + MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES); + if (!vqp->fw) + MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma); + MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); + err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); + kfree(in); + if (err) + goto err_kzalloc; + + vqp->mqp.uid = ndev->mvdev.res.uid; + vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn); + + if (!vqp->fw) + rx_post(vqp, mvq->num_ent); + + return 0; + +err_kzalloc: + if (!vqp->fw) + mlx5_db_free(ndev->mvdev.mdev, &vqp->db); +err_db: + if (!vqp->fw) + rq_buf_free(ndev, vqp); + + return err; +} + +static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) +{ + u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {}; + + MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); + MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn); + MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); + if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) + mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); + if (!vqp->fw) { + mlx5_db_free(ndev->mvdev.mdev, &vqp->db); + rq_buf_free(ndev, vqp); + } +} + +static void *next_cqe_sw(struct mlx5_vdpa_cq *cq) +{ + return get_sw_cqe(cq, cq->mcq.cons_index); +} + +static int mlx5_vdpa_poll_one(struct mlx5_vdpa_cq *vcq) +{ + struct mlx5_cqe64 *cqe64; + + cqe64 = next_cqe_sw(vcq); + if (!cqe64) + return -EAGAIN; + + vcq->mcq.cons_index++; + return 0; +} + +static void mlx5_vdpa_handle_completions(struct mlx5_vdpa_virtqueue *mvq, int num) +{ + mlx5_cq_set_ci(&mvq->cq.mcq); + rx_post(&mvq->vqqp, num); + if (mvq->event_cb.callback) + mvq->event_cb.callback(mvq->event_cb.private); +} + +static void mlx5_vdpa_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe) +{ + struct mlx5_vdpa_virtqueue *mvq = container_of(mcq, struct mlx5_vdpa_virtqueue, cq.mcq); + struct mlx5_vdpa_net *ndev = mvq->ndev; + void __iomem *uar_page = ndev->mvdev.res.uar->map; + int num = 0; + + while (!mlx5_vdpa_poll_one(&mvq->cq)) { + num++; + if (num > mvq->num_ent / 2) { + /* If completions keep coming while we poll, we want to + * let the hardware know that we consumed them by + * updating the doorbell record. We also let vdpa core + * know about this so it passes it on the virtio driver + * on the guest. + */ + mlx5_vdpa_handle_completions(mvq, num); + num = 0; + } + } + + if (num) + mlx5_vdpa_handle_completions(mvq, num); + + mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); +} + +static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) +{ + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + struct mlx5_core_dev *mdev = ndev->mvdev.mdev; + void __iomem *uar_page = ndev->mvdev.res.uar->map; + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_vdpa_cq *vcq = &mvq->cq; + unsigned int irqn; + __be64 *pas; + int inlen; + void *cqc; + void *in; + int err; + int eqn; + + err = mlx5_db_alloc(mdev, &vcq->db); + if (err) + return err; + + vcq->mcq.set_ci_db = vcq->db.db; + vcq->mcq.arm_db = vcq->db.db + 1; + vcq->mcq.cqe_sz = 64; + + err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); + if (err) + goto err_db; + + cq_frag_buf_init(vcq, &vcq->buf); + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages; + in = kzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_vzalloc; + } + + MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); + pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas); + mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas); + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + + /* Use vector 0 by default. Consider adding code to choose least used + * vector. + */ + err = mlx5_vector2eqn(mdev, 0, &eqn, &irqn); + if (err) + goto err_vec; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + MLX5_SET(cqc, cqc, log_cq_size, ilog2(num_ent)); + MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); + MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma); + + err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out)); + if (err) + goto err_vec; + + vcq->mcq.comp = mlx5_vdpa_cq_comp; + vcq->cqe = num_ent; + vcq->mcq.set_ci_db = vcq->db.db; + vcq->mcq.arm_db = vcq->db.db + 1; + mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); + kfree(in); + return 0; + +err_vec: + kfree(in); +err_vzalloc: + cq_frag_buf_free(ndev, &vcq->buf); +err_db: + mlx5_db_free(ndev->mvdev.mdev, &vcq->db); + return err; +} + +static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) +{ + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + struct mlx5_core_dev *mdev = ndev->mvdev.mdev; + struct mlx5_vdpa_cq *vcq = &mvq->cq; + + if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) { + mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); + return; + } + cq_frag_buf_free(ndev, &vcq->buf); + mlx5_db_free(ndev->mvdev.mdev, &vcq->db); +} + +static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, + struct mlx5_vdpa_umem **umemp) +{ + struct mlx5_core_dev *mdev = ndev->mvdev.mdev; + int p_a; + int p_b; + + switch (num) { + case 1: + p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_a); + p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_1_buffer_param_b); + *umemp = &mvq->umem1; + break; + case 2: + p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_a); + p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_2_buffer_param_b); + *umemp = &mvq->umem2; + break; + case 3: + p_a = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_a); + p_b = MLX5_CAP_DEV_VDPA_EMULATION(mdev, umem_3_buffer_param_b); + *umemp = &mvq->umem3; + break; + } + return p_a * mvq->num_ent + p_b; +} + +static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) +{ + mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); +} + +static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) +{ + int inlen; + u32 out[MLX5_ST_SZ_DW(create_umem_out)] = {}; + void *um; + void *in; + int err; + __be64 *pas; + int size; + struct mlx5_vdpa_umem *umem; + + size = umem_size(ndev, mvq, num, &umem); + if (size < 0) + return size; + + umem->size = size; + err = umem_frag_buf_alloc(ndev, umem, size); + if (err) + return err; + + inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_in; + } + + MLX5_SET(create_umem_in, in, opcode, MLX5_CMD_OP_CREATE_UMEM); + MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); + um = MLX5_ADDR_OF(create_umem_in, in, umem); + MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages); + + pas = (__be64 *)MLX5_ADDR_OF(umem, um, mtt[0]); + mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW); + + err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); + if (err) { + mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); + goto err_cmd; + } + + kfree(in); + umem->id = MLX5_GET(create_umem_out, out, umem_id); + + return 0; + +err_cmd: + kfree(in); +err_in: + umem_frag_buf_free(ndev, umem); + return err; +} + +static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) +{ + u32 in[MLX5_ST_SZ_DW(destroy_umem_in)] = {}; + u32 out[MLX5_ST_SZ_DW(destroy_umem_out)] = {}; + struct mlx5_vdpa_umem *umem; + + switch (num) { + case 1: + umem = &mvq->umem1; + break; + case 2: + umem = &mvq->umem2; + break; + case 3: + umem = &mvq->umem3; + break; + } + + MLX5_SET(destroy_umem_in, in, opcode, MLX5_CMD_OP_DESTROY_UMEM); + MLX5_SET(destroy_umem_in, in, umem_id, umem->id); + if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) + return; + + umem_frag_buf_free(ndev, umem); +} + +static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + int num; + int err; + + for (num = 1; num <= 3; num++) { + err = create_umem(ndev, mvq, num); + if (err) + goto err_umem; + } + return 0; + +err_umem: + for (num--; num > 0; num--) + umem_destroy(ndev, mvq, num); + + return err; +} + +static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + int num; + + for (num = 3; num > 0; num--) + umem_destroy(ndev, mvq, num); +} + +static int get_queue_type(struct mlx5_vdpa_net *ndev) +{ + u32 type_mask; + + type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); + + /* prefer split queue */ + if (type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_PACKED) + return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_PACKED; + + WARN_ON(!(type_mask & MLX5_VIRTIO_EMULATION_CAP_VIRTIO_QUEUE_TYPE_SPLIT)); + + return MLX5_VIRTIO_EMULATION_VIRTIO_QUEUE_TYPE_SPLIT; +} + +static bool vq_is_tx(u16 idx) +{ + return idx % 2; +} + +static u16 get_features_12_3(u64 features) +{ + return (!!(features & BIT(VIRTIO_NET_F_HOST_TSO4)) << 9) | + (!!(features & BIT(VIRTIO_NET_F_HOST_TSO6)) << 8) | + (!!(features & BIT(VIRTIO_NET_F_CSUM)) << 7) | + (!!(features & BIT(VIRTIO_NET_F_GUEST_CSUM)) << 6); +} + +static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + int inlen = MLX5_ST_SZ_BYTES(create_virtio_net_q_in); + u32 out[MLX5_ST_SZ_DW(create_virtio_net_q_out)] = {}; + void *obj_context; + void *cmd_hdr; + void *vq_ctx; + void *in; + int err; + + err = umems_create(ndev, mvq); + if (err) + return err; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) { + err = -ENOMEM; + goto err_alloc; + } + + cmd_hdr = MLX5_ADDR_OF(create_virtio_net_q_in, in, general_obj_in_cmd_hdr); + + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_CREATE_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); + + obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context); + MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); + MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3, + get_features_12_3(ndev->mvdev.actual_features)); + vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context); + MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); + + if (vq_is_tx(mvq->index)) + MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); + + MLX5_SET(virtio_q, vq_ctx, event_mode, MLX5_VIRTIO_Q_EVENT_MODE_QP_MODE); + MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); + MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); + MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); + MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, + !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1)); + MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); + MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); + MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); + MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key); + MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); + MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); + MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); + MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem1.size); + MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); + MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem1.size); + MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); + if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) + MLX5_SET(virtio_q, vq_ctx, virtio_version_1_0, 1); + + err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); + if (err) + goto err_cmd; + + kfree(in); + mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); + + return 0; + +err_cmd: + kfree(in); +err_alloc: + umems_destroy(ndev, mvq); + return err; +} + +static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + u32 in[MLX5_ST_SZ_DW(destroy_virtio_net_q_in)] = {}; + u32 out[MLX5_ST_SZ_DW(destroy_virtio_net_q_out)] = {}; + + MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.opcode, + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT); + MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); + MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); + MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_type, + MLX5_OBJ_TYPE_VIRTIO_NET_Q); + if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { + mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); + return; + } + umems_destroy(ndev, mvq); +} + +static u32 get_rqpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) +{ + return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; +} + +static u32 get_qpn(struct mlx5_vdpa_virtqueue *mvq, bool fw) +{ + return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; +} + +static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, + int *outlen, u32 qpn, u32 rqpn) +{ + void *qpc; + void *pp; + + switch (cmd) { + case MLX5_CMD_OP_2RST_QP: + *inlen = MLX5_ST_SZ_BYTES(qp_2rst_in); + *outlen = MLX5_ST_SZ_BYTES(qp_2rst_out); + *in = kzalloc(*inlen, GFP_KERNEL); + *out = kzalloc(*outlen, GFP_KERNEL); + if (!*in || !*out) + goto outerr; + + MLX5_SET(qp_2rst_in, *in, opcode, cmd); + MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); + MLX5_SET(qp_2rst_in, *in, qpn, qpn); + break; + case MLX5_CMD_OP_RST2INIT_QP: + *inlen = MLX5_ST_SZ_BYTES(rst2init_qp_in); + *outlen = MLX5_ST_SZ_BYTES(rst2init_qp_out); + *in = kzalloc(*inlen, GFP_KERNEL); + *out = kzalloc(MLX5_ST_SZ_BYTES(rst2init_qp_out), GFP_KERNEL); + if (!*in || !*out) + goto outerr; + + MLX5_SET(rst2init_qp_in, *in, opcode, cmd); + MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); + MLX5_SET(rst2init_qp_in, *in, qpn, qpn); + qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc); + MLX5_SET(qpc, qpc, remote_qpn, rqpn); + MLX5_SET(qpc, qpc, rwe, 1); + pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path); + MLX5_SET(ads, pp, vhca_port_num, 1); + break; + case MLX5_CMD_OP_INIT2RTR_QP: + *inlen = MLX5_ST_SZ_BYTES(init2rtr_qp_in); + *outlen = MLX5_ST_SZ_BYTES(init2rtr_qp_out); + *in = kzalloc(*inlen, GFP_KERNEL); + *out = kzalloc(MLX5_ST_SZ_BYTES(init2rtr_qp_out), GFP_KERNEL); + if (!*in || !*out) + goto outerr; + + MLX5_SET(init2rtr_qp_in, *in, opcode, cmd); + MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); + MLX5_SET(init2rtr_qp_in, *in, qpn, qpn); + qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc); + MLX5_SET(qpc, qpc, mtu, MLX5_QPC_MTU_256_BYTES); + MLX5_SET(qpc, qpc, log_msg_max, 30); + MLX5_SET(qpc, qpc, remote_qpn, rqpn); + pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path); + MLX5_SET(ads, pp, fl, 1); + break; + case MLX5_CMD_OP_RTR2RTS_QP: + *inlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_in); + *outlen = MLX5_ST_SZ_BYTES(rtr2rts_qp_out); + *in = kzalloc(*inlen, GFP_KERNEL); + *out = kzalloc(MLX5_ST_SZ_BYTES(rtr2rts_qp_out), GFP_KERNEL); + if (!*in || !*out) + goto outerr; + + MLX5_SET(rtr2rts_qp_in, *in, opcode, cmd); + MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); + MLX5_SET(rtr2rts_qp_in, *in, qpn, qpn); + qpc = MLX5_ADDR_OF(rst2init_qp_in, *in, qpc); + pp = MLX5_ADDR_OF(qpc, qpc, primary_address_path); + MLX5_SET(ads, pp, ack_timeout, 14); + MLX5_SET(qpc, qpc, retry_count, 7); + MLX5_SET(qpc, qpc, rnr_retry, 7); + break; + default: + goto outerr_nullify; + } + + return; + +outerr: + kfree(*in); + kfree(*out); +outerr_nullify: + *in = NULL; + *out = NULL; +} + +static void free_inout(void *in, void *out) +{ + kfree(in); + kfree(out); +} + +/* Two QPs are used by each virtqueue. One is used by the driver and one by + * firmware. The fw argument indicates whether the subjected QP is the one used + * by firmware. + */ +static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) +{ + int outlen; + int inlen; + void *out; + void *in; + int err; + + alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); + if (!in || !out) + return -ENOMEM; + + err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); + free_inout(in, out); + return err; +} + +static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + int err; + + err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); + if (err) + return err; + + err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); + if (err) + return err; + + err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); + if (err) + return err; + + err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); + if (err) + return err; + + err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); + if (err) + return err; + + err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); + if (err) + return err; + + return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); +} + +struct mlx5_virtq_attr { + u8 state; + u16 available_index; +}; + +static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, + struct mlx5_virtq_attr *attr) +{ + int outlen = MLX5_ST_SZ_BYTES(query_virtio_net_q_out); + u32 in[MLX5_ST_SZ_DW(query_virtio_net_q_in)] = {}; + void *out; + void *obj_context; + void *cmd_hdr; + int err; + + out = kzalloc(outlen, GFP_KERNEL); + if (!out) + return -ENOMEM; + + cmd_hdr = MLX5_ADDR_OF(query_virtio_net_q_in, in, general_obj_in_cmd_hdr); + + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_QUERY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); + err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); + if (err) + goto err_cmd; + + obj_context = MLX5_ADDR_OF(query_virtio_net_q_out, out, obj_context); + memset(attr, 0, sizeof(*attr)); + attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); + attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); + kfree(out); + return 0; + +err_cmd: + kfree(out); + return err; +} + +static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) +{ + int inlen = MLX5_ST_SZ_BYTES(modify_virtio_net_q_in); + u32 out[MLX5_ST_SZ_DW(modify_virtio_net_q_out)] = {}; + void *obj_context; + void *cmd_hdr; + void *in; + int err; + + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + cmd_hdr = MLX5_ADDR_OF(modify_virtio_net_q_in, in, general_obj_in_cmd_hdr); + + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, opcode, MLX5_CMD_OP_MODIFY_GENERAL_OBJECT); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_type, MLX5_OBJ_TYPE_VIRTIO_NET_Q); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); + MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); + + obj_context = MLX5_ADDR_OF(modify_virtio_net_q_in, in, obj_context); + MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, + MLX5_VIRTQ_MODIFY_MASK_STATE); + MLX5_SET(virtio_net_q_object, obj_context, state, state); + err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); + kfree(in); + if (!err) + mvq->fw_state = state; + + return err; +} + +static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + u16 idx = mvq->index; + int err; + + if (!mvq->num_ent) + return 0; + + if (mvq->initialized) { + mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n"); + return -EINVAL; + } + + err = cq_create(ndev, idx, mvq->num_ent); + if (err) + return err; + + err = qp_create(ndev, mvq, &mvq->fwqp); + if (err) + goto err_fwqp; + + err = qp_create(ndev, mvq, &mvq->vqqp); + if (err) + goto err_vqqp; + + err = connect_qps(ndev, mvq); + if (err) + goto err_connect; + + err = create_virtqueue(ndev, mvq); + if (err) + goto err_connect; + + if (mvq->ready) { + err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); + if (err) { + mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", + idx, err); + goto err_connect; + } + } + + mvq->initialized = true; + return 0; + +err_connect: + qp_destroy(ndev, &mvq->vqqp); +err_vqqp: + qp_destroy(ndev, &mvq->fwqp); +err_fwqp: + cq_destroy(ndev, idx); + return err; +} + +static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + struct mlx5_virtq_attr attr; + + if (!mvq->initialized) + return; + + if (query_virtqueue(ndev, mvq, &attr)) { + mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); + return; + } + if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) + return; + + if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) + mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); +} + +static void suspend_vqs(struct mlx5_vdpa_net *ndev) +{ + int i; + + for (i = 0; i < MLX5_MAX_SUPPORTED_VQS; i++) + suspend_vq(ndev, &ndev->vqs[i]); +} + +static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + if (!mvq->initialized) + return; + + suspend_vq(ndev, mvq); + destroy_virtqueue(ndev, mvq); + qp_destroy(ndev, &mvq->vqqp); + qp_destroy(ndev, &mvq->fwqp); + cq_destroy(ndev, mvq->index); + mvq->initialized = false; +} + +static int create_rqt(struct mlx5_vdpa_net *ndev) +{ + int log_max_rqt; + __be32 *list; + void *rqtc; + int inlen; + void *in; + int i, j; + int err; + + log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); + if (log_max_rqt < 1) + return -EOPNOTSUPP; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + (1 << log_max_rqt) * MLX5_ST_SZ_BYTES(rq_num); + in = kzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, list_q_type, MLX5_RQTC_LIST_Q_TYPE_VIRTIO_NET_Q); + MLX5_SET(rqtc, rqtc, rqt_max_size, 1 << log_max_rqt); + MLX5_SET(rqtc, rqtc, rqt_actual_size, 1); + list = MLX5_ADDR_OF(rqtc, rqtc, rq_num[0]); + for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) { + if (!ndev->vqs[j].initialized) + continue; + + if (!vq_is_tx(ndev->vqs[j].index)) { + list[i] = cpu_to_be32(ndev->vqs[j].virtq_id); + i++; + } + } + + err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); + kfree(in); + if (err) + return err; + + return 0; +} + +static void destroy_rqt(struct mlx5_vdpa_net *ndev) +{ + mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); +} + +static int create_tir(struct mlx5_vdpa_net *ndev) +{ +#define HASH_IP_L4PORTS \ + (MLX5_HASH_FIELD_SEL_SRC_IP | MLX5_HASH_FIELD_SEL_DST_IP | MLX5_HASH_FIELD_SEL_L4_SPORT | \ + MLX5_HASH_FIELD_SEL_L4_DPORT) + static const u8 rx_hash_toeplitz_key[] = { 0x2c, 0xc6, 0x81, 0xd1, 0x5b, 0xdb, 0xf4, 0xf7, + 0xfc, 0xa2, 0x83, 0x19, 0xdb, 0x1a, 0x3e, 0x94, + 0x6b, 0x9e, 0x38, 0xd9, 0x2c, 0x9c, 0x03, 0xd1, + 0xad, 0x99, 0x44, 0xa7, 0xd9, 0x56, 0x3d, 0x59, + 0x06, 0x3c, 0x25, 0xf3, 0xfc, 0x1f, 0xdc, 0x2a }; + void *rss_key; + void *outer; + void *tirc; + void *in; + int err; + + in = kzalloc(MLX5_ST_SZ_BYTES(create_tir_in), GFP_KERNEL); + if (!in) + return -ENOMEM; + + MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ); + rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); + memcpy(rss_key, rx_hash_toeplitz_key, sizeof(rx_hash_toeplitz_key)); + + outer = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + MLX5_SET(rx_hash_field_select, outer, l3_prot_type, MLX5_L3_PROT_TYPE_IPV4); + MLX5_SET(rx_hash_field_select, outer, l4_prot_type, MLX5_L4_PROT_TYPE_TCP); + MLX5_SET(rx_hash_field_select, outer, selected_fields, HASH_IP_L4PORTS); + + MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); + MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); + + err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); + kfree(in); + return err; +} + +static void destroy_tir(struct mlx5_vdpa_net *ndev) +{ + mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); +} + +static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev) +{ + struct mlx5_flow_destination dest[2] = {}; + struct mlx5_flow_table_attr ft_attr = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_namespace *ns; + int err; + + /* for now, one entry, match all, forward to tir */ + ft_attr.max_fte = 1; + ft_attr.autogroup.max_num_groups = 1; + + ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); + if (!ns) { + mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n"); + return -EOPNOTSUPP; + } + + ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); + if (IS_ERR(ndev->rxft)) + return PTR_ERR(ndev->rxft); + + ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false); + if (IS_ERR(ndev->rx_counter)) { + err = PTR_ERR(ndev->rx_counter); + goto err_fc; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + dest[0].tir_num = ndev->res.tirn; + dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[1].counter_id = mlx5_fc_id(ndev->rx_counter); + ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2); + if (IS_ERR(ndev->rx_rule)) { + err = PTR_ERR(ndev->rx_rule); + ndev->rx_rule = NULL; + goto err_rule; + } + + return 0; + +err_rule: + mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); +err_fc: + mlx5_destroy_flow_table(ndev->rxft); + return err; +} + +static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev) +{ + if (!ndev->rx_rule) + return; + + mlx5_del_flow_rules(ndev->rx_rule); + mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); + mlx5_destroy_flow_table(ndev->rxft); + + ndev->rx_rule = NULL; +} + +static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + + if (unlikely(!mvq->ready)) + return; + + iowrite16(idx, ndev->mvdev.res.kick_addr); +} + +static int mlx5_vdpa_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area, + u64 driver_area, u64 device_area) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + + mvq->desc_addr = desc_area; + mvq->device_addr = device_area; + mvq->driver_addr = driver_area; + return 0; +} + +static void mlx5_vdpa_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq; + + mvq = &ndev->vqs[idx]; + mvq->num_ent = num; +} + +static void mlx5_vdpa_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx]; + + vq->event_cb = *cb; +} + +static void mlx5_vdpa_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + + if (!ready) + suspend_vq(ndev, mvq); + + mvq->ready = ready; +} + +static bool mlx5_vdpa_get_vq_ready(struct vdpa_device *vdev, u16 idx) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + + return mvq->ready; +} + +static int mlx5_vdpa_set_vq_state(struct vdpa_device *vdev, u16 idx, + const struct vdpa_vq_state *state) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + + if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { + mlx5_vdpa_warn(mvdev, "can't modify available index\n"); + return -EINVAL; + } + + mvq->avail_idx = state->avail_index; + return 0; +} + +static int mlx5_vdpa_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; + struct mlx5_virtq_attr attr; + int err; + + if (!mvq->initialized) + return -EAGAIN; + + err = query_virtqueue(ndev, mvq, &attr); + if (err) { + mlx5_vdpa_warn(mvdev, "failed to query virtqueue\n"); + return err; + } + state->avail_index = attr.available_index; + return 0; +} + +static u32 mlx5_vdpa_get_vq_align(struct vdpa_device *vdev) +{ + return PAGE_SIZE; +} + +enum { MLX5_VIRTIO_NET_F_GUEST_CSUM = 1 << 9, + MLX5_VIRTIO_NET_F_CSUM = 1 << 10, + MLX5_VIRTIO_NET_F_HOST_TSO6 = 1 << 11, + MLX5_VIRTIO_NET_F_HOST_TSO4 = 1 << 12, +}; + +static u64 mlx_to_vritio_features(u16 dev_features) +{ + u64 result = 0; + + if (dev_features & MLX5_VIRTIO_NET_F_GUEST_CSUM) + result |= BIT(VIRTIO_NET_F_GUEST_CSUM); + if (dev_features & MLX5_VIRTIO_NET_F_CSUM) + result |= BIT(VIRTIO_NET_F_CSUM); + if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO6) + result |= BIT(VIRTIO_NET_F_HOST_TSO6); + if (dev_features & MLX5_VIRTIO_NET_F_HOST_TSO4) + result |= BIT(VIRTIO_NET_F_HOST_TSO4); + + return result; +} + +static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + u16 dev_features; + + dev_features = MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, device_features_bits_mask); + ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features); + if (MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, virtio_version_1_0)) + ndev->mvdev.mlx_features |= BIT(VIRTIO_F_VERSION_1); + ndev->mvdev.mlx_features |= BIT(VIRTIO_F_ACCESS_PLATFORM); + print_features(mvdev, ndev->mvdev.mlx_features, false); + return ndev->mvdev.mlx_features; +} + +static int verify_min_features(struct mlx5_vdpa_dev *mvdev, u64 features) +{ + if (!(features & BIT(VIRTIO_F_ACCESS_PLATFORM))) + return -EOPNOTSUPP; + + return 0; +} + +static int setup_virtqueues(struct mlx5_vdpa_net *ndev) +{ + int err; + int i; + + for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) { + err = setup_vq(ndev, &ndev->vqs[i]); + if (err) + goto err_vq; + } + + return 0; + +err_vq: + for (--i; i >= 0; i--) + teardown_vq(ndev, &ndev->vqs[i]); + + return err; +} + +static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) +{ + struct mlx5_vdpa_virtqueue *mvq; + int i; + + for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { + mvq = &ndev->vqs[i]; + if (!mvq->initialized) + continue; + + teardown_vq(ndev, mvq); + } +} + +/* TODO: cross-endian support */ +static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) +{ + return virtio_legacy_is_little_endian() || + (mvdev->actual_features & (1ULL << VIRTIO_F_VERSION_1)); +} + +static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + int err; + + print_features(mvdev, features, true); + + err = verify_min_features(mvdev, features); + if (err) + return err; + + ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; + ndev->config.mtu = __cpu_to_virtio16(mlx5_vdpa_is_little_endian(mvdev), + ndev->mtu); + return err; +} + +static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb) +{ + /* not implemented */ + mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n"); +} + +#define MLX5_VDPA_MAX_VQ_ENTRIES 256 +static u16 mlx5_vdpa_get_vq_num_max(struct vdpa_device *vdev) +{ + return MLX5_VDPA_MAX_VQ_ENTRIES; +} + +static u32 mlx5_vdpa_get_device_id(struct vdpa_device *vdev) +{ + return VIRTIO_ID_NET; +} + +static u32 mlx5_vdpa_get_vendor_id(struct vdpa_device *vdev) +{ + return PCI_VENDOR_ID_MELLANOX; +} + +static u8 mlx5_vdpa_get_status(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + + print_status(mvdev, ndev->mvdev.status, false); + return ndev->mvdev.status; +} + +static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) +{ + struct mlx5_vq_restore_info *ri = &mvq->ri; + struct mlx5_virtq_attr attr; + int err; + + if (!mvq->initialized) + return 0; + + err = query_virtqueue(ndev, mvq, &attr); + if (err) + return err; + + ri->avail_index = attr.available_index; + ri->ready = mvq->ready; + ri->num_ent = mvq->num_ent; + ri->desc_addr = mvq->desc_addr; + ri->device_addr = mvq->device_addr; + ri->driver_addr = mvq->driver_addr; + ri->cb = mvq->event_cb; + ri->restore = true; + return 0; +} + +static int save_channels_info(struct mlx5_vdpa_net *ndev) +{ + int i; + + for (i = 0; i < ndev->mvdev.max_vqs; i++) { + memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); + save_channel_info(ndev, &ndev->vqs[i]); + } + return 0; +} + +static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) +{ + int i; + + for (i = 0; i < ndev->mvdev.max_vqs; i++) + memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); +} + +static void restore_channels_info(struct mlx5_vdpa_net *ndev) +{ + struct mlx5_vdpa_virtqueue *mvq; + struct mlx5_vq_restore_info *ri; + int i; + + mlx5_clear_vqs(ndev); + init_mvqs(ndev); + for (i = 0; i < ndev->mvdev.max_vqs; i++) { + mvq = &ndev->vqs[i]; + ri = &mvq->ri; + if (!ri->restore) + continue; + + mvq->avail_idx = ri->avail_index; + mvq->ready = ri->ready; + mvq->num_ent = ri->num_ent; + mvq->desc_addr = ri->desc_addr; + mvq->device_addr = ri->device_addr; + mvq->driver_addr = ri->driver_addr; + mvq->event_cb = ri->cb; + } +} + +static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb) +{ + int err; + + suspend_vqs(ndev); + err = save_channels_info(ndev); + if (err) + goto err_mr; + + teardown_driver(ndev); + mlx5_vdpa_destroy_mr(&ndev->mvdev); + err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb); + if (err) + goto err_mr; + + restore_channels_info(ndev); + err = setup_driver(ndev); + if (err) + goto err_setup; + + return 0; + +err_setup: + mlx5_vdpa_destroy_mr(&ndev->mvdev); +err_mr: + return err; +} + +static int setup_driver(struct mlx5_vdpa_net *ndev) +{ + int err; + + mutex_lock(&ndev->reslock); + if (ndev->setup) { + mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n"); + err = 0; + goto out; + } + err = setup_virtqueues(ndev); + if (err) { + mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n"); + goto out; + } + + err = create_rqt(ndev); + if (err) { + mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n"); + goto err_rqt; + } + + err = create_tir(ndev); + if (err) { + mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n"); + goto err_tir; + } + + err = add_fwd_to_tir(ndev); + if (err) { + mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n"); + goto err_fwd; + } + ndev->setup = true; + mutex_unlock(&ndev->reslock); + + return 0; + +err_fwd: + destroy_tir(ndev); +err_tir: + destroy_rqt(ndev); +err_rqt: + teardown_virtqueues(ndev); +out: + mutex_unlock(&ndev->reslock); + return err; +} + +static void teardown_driver(struct mlx5_vdpa_net *ndev) +{ + mutex_lock(&ndev->reslock); + if (!ndev->setup) + goto out; + + remove_fwd_to_tir(ndev); + destroy_tir(ndev); + destroy_rqt(ndev); + teardown_virtqueues(ndev); + ndev->setup = false; +out: + mutex_unlock(&ndev->reslock); +} + +static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + int err; + + print_status(mvdev, status, true); + if (!status) { + mlx5_vdpa_info(mvdev, "performing device reset\n"); + teardown_driver(ndev); + mlx5_vdpa_destroy_mr(&ndev->mvdev); + ndev->mvdev.status = 0; + ndev->mvdev.mlx_features = 0; + ++mvdev->generation; + return; + } + + if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { + if (status & VIRTIO_CONFIG_S_DRIVER_OK) { + err = setup_driver(ndev); + if (err) { + mlx5_vdpa_warn(mvdev, "failed to setup driver\n"); + goto err_setup; + } + } else { + mlx5_vdpa_warn(mvdev, "did not expect DRIVER_OK to be cleared\n"); + return; + } + } + + ndev->mvdev.status = status; + return; + +err_setup: + mlx5_vdpa_destroy_mr(&ndev->mvdev); + ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; +} + +static void mlx5_vdpa_get_config(struct vdpa_device *vdev, unsigned int offset, void *buf, + unsigned int len) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + + if (offset + len < sizeof(struct virtio_net_config)) + memcpy(buf, (u8 *)&ndev->config + offset, len); +} + +static void mlx5_vdpa_set_config(struct vdpa_device *vdev, unsigned int offset, const void *buf, + unsigned int len) +{ + /* not supported */ +} + +static u32 mlx5_vdpa_get_generation(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + + return mvdev->generation; +} + +static int mlx5_vdpa_set_map(struct vdpa_device *vdev, struct vhost_iotlb *iotlb) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); + bool change_map; + int err; + + err = mlx5_vdpa_handle_set_map(mvdev, iotlb, &change_map); + if (err) { + mlx5_vdpa_warn(mvdev, "set map failed(%d)\n", err); + return err; + } + + if (change_map) + return mlx5_vdpa_change_map(ndev, iotlb); + + return 0; +} + +static void mlx5_vdpa_free(struct vdpa_device *vdev) +{ + struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev); + struct mlx5_vdpa_net *ndev; + + ndev = to_mlx5_vdpa_ndev(mvdev); + + free_resources(ndev); + mlx5_vdpa_free_resources(&ndev->mvdev); + mutex_destroy(&ndev->reslock); +} + +static struct vdpa_notification_area mlx5_get_vq_notification(struct vdpa_device *vdev, u16 idx) +{ + struct vdpa_notification_area ret = {}; + + return ret; +} + +static int mlx5_get_vq_irq(struct vdpa_device *vdv, u16 idx) +{ + return -EOPNOTSUPP; +} + +static const struct vdpa_config_ops mlx5_vdpa_ops = { + .set_vq_address = mlx5_vdpa_set_vq_address, + .set_vq_num = mlx5_vdpa_set_vq_num, + .kick_vq = mlx5_vdpa_kick_vq, + .set_vq_cb = mlx5_vdpa_set_vq_cb, + .set_vq_ready = mlx5_vdpa_set_vq_ready, + .get_vq_ready = mlx5_vdpa_get_vq_ready, + .set_vq_state = mlx5_vdpa_set_vq_state, + .get_vq_state = mlx5_vdpa_get_vq_state, + .get_vq_notification = mlx5_get_vq_notification, + .get_vq_irq = mlx5_get_vq_irq, + .get_vq_align = mlx5_vdpa_get_vq_align, + .get_features = mlx5_vdpa_get_features, + .set_features = mlx5_vdpa_set_features, + .set_config_cb = mlx5_vdpa_set_config_cb, + .get_vq_num_max = mlx5_vdpa_get_vq_num_max, + .get_device_id = mlx5_vdpa_get_device_id, + .get_vendor_id = mlx5_vdpa_get_vendor_id, + .get_status = mlx5_vdpa_get_status, + .set_status = mlx5_vdpa_set_status, + .get_config = mlx5_vdpa_get_config, + .set_config = mlx5_vdpa_set_config, + .get_generation = mlx5_vdpa_get_generation, + .set_map = mlx5_vdpa_set_map, + .free = mlx5_vdpa_free, +}; + +static int alloc_resources(struct mlx5_vdpa_net *ndev) +{ + struct mlx5_vdpa_net_resources *res = &ndev->res; + int err; + + if (res->valid) { + mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); + return -EEXIST; + } + + err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); + if (err) + return err; + + err = create_tis(ndev); + if (err) + goto err_tis; + + res->valid = true; + + return 0; + +err_tis: + mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); + return err; +} + +static void free_resources(struct mlx5_vdpa_net *ndev) +{ + struct mlx5_vdpa_net_resources *res = &ndev->res; + + if (!res->valid) + return; + + destroy_tis(ndev); + mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); + res->valid = false; +} + +static void init_mvqs(struct mlx5_vdpa_net *ndev) +{ + struct mlx5_vdpa_virtqueue *mvq; + int i; + + for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) { + mvq = &ndev->vqs[i]; + memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); + mvq->index = i; + mvq->ndev = ndev; + mvq->fwqp.fw = true; + } + for (; i < ndev->mvdev.max_vqs; i++) { + mvq = &ndev->vqs[i]; + memset(mvq, 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); + mvq->index = i; + mvq->ndev = ndev; + } +} + +void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev) +{ + struct virtio_net_config *config; + struct mlx5_vdpa_dev *mvdev; + struct mlx5_vdpa_net *ndev; + u32 max_vqs; + int err; + + /* we save one virtqueue for control virtqueue should we require it */ + max_vqs = MLX5_CAP_DEV_VDPA_EMULATION(mdev, max_num_virtio_queues); + max_vqs = min_t(u32, max_vqs, MLX5_MAX_SUPPORTED_VQS); + + ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, + 2 * mlx5_vdpa_max_qps(max_vqs)); + if (IS_ERR(ndev)) + return ndev; + + ndev->mvdev.max_vqs = max_vqs; + mvdev = &ndev->mvdev; + mvdev->mdev = mdev; + init_mvqs(ndev); + mutex_init(&ndev->reslock); + config = &ndev->config; + err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu); + if (err) + goto err_mtu; + + err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac); + if (err) + goto err_mtu; + + mvdev->vdev.dma_dev = mdev->device; + err = mlx5_vdpa_alloc_resources(&ndev->mvdev); + if (err) + goto err_mtu; + + err = alloc_resources(ndev); + if (err) + goto err_res; + + err = vdpa_register_device(&mvdev->vdev); + if (err) + goto err_reg; + + return ndev; + +err_reg: + free_resources(ndev); +err_res: + mlx5_vdpa_free_resources(&ndev->mvdev); +err_mtu: + mutex_destroy(&ndev->reslock); + put_device(&mvdev->vdev.dev); + return ERR_PTR(err); +} + +void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev) +{ + vdpa_unregister_device(&mvdev->vdev); +} diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.h b/drivers/vdpa/mlx5/net/mlx5_vnet.h new file mode 100644 index 000000000000..f2d6d68b020e --- /dev/null +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies Ltd. */ + +#ifndef __MLX5_VNET_H_ +#define __MLX5_VNET_H_ + +#include <linux/vdpa.h> +#include <linux/virtio_net.h> +#include <linux/vringh.h> +#include <linux/mlx5/driver.h> +#include <linux/mlx5/cq.h> +#include <linux/mlx5/qp.h> +#include "mlx5_vdpa.h" + +static inline u32 mlx5_vdpa_max_qps(int max_vqs) +{ + return max_vqs / 2; +} + +#define to_mlx5_vdpa_ndev(__mvdev) container_of(__mvdev, struct mlx5_vdpa_net, mvdev) +void *mlx5_vdpa_add_dev(struct mlx5_core_dev *mdev); +void mlx5_vdpa_remove_dev(struct mlx5_vdpa_dev *mvdev); + +#endif /* __MLX5_VNET_H_ */ diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c index de211ef3738c..a69ffc991e13 100644 --- a/drivers/vdpa/vdpa.c +++ b/drivers/vdpa/vdpa.c @@ -61,6 +61,7 @@ static void vdpa_release_dev(struct device *d) * initialized but before registered. * @parent: the parent device * @config: the bus operations that is supported by this device + * @nvqs: number of virtqueues supported by this device * @size: size of the parent structure that contains private data * * Driver should use vdpa_alloc_device() wrapper macro instead of @@ -71,6 +72,7 @@ static void vdpa_release_dev(struct device *d) */ struct vdpa_device *__vdpa_alloc_device(struct device *parent, const struct vdpa_config_ops *config, + int nvqs, size_t size) { struct vdpa_device *vdev; @@ -96,6 +98,8 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent, vdev->dev.release = vdpa_release_dev; vdev->index = err; vdev->config = config; + vdev->features_valid = false; + vdev->nvqs = nvqs; err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); if (err) diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index c7334cc65bb2..62d640327145 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -24,6 +24,7 @@ #include <linux/etherdevice.h> #include <linux/vringh.h> #include <linux/vdpa.h> +#include <linux/virtio_byteorder.h> #include <linux/vhost_iotlb.h> #include <uapi/linux/virtio_config.h> #include <uapi/linux/virtio_net.h> @@ -33,6 +34,10 @@ #define DRV_DESC "vDPA Device Simulator" #define DRV_LICENSE "GPL v2" +static int batch_mapping = 1; +module_param(batch_mapping, int, 0444); +MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable"); + struct vdpasim_virtqueue { struct vringh vring; struct vringh_kiov iov; @@ -55,12 +60,12 @@ struct vdpasim_virtqueue { static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | (1ULL << VIRTIO_F_VERSION_1) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM); + (1ULL << VIRTIO_F_ACCESS_PLATFORM); /* State of each vdpasim device */ struct vdpasim { struct vdpa_device vdpa; - struct vdpasim_virtqueue vqs[2]; + struct vdpasim_virtqueue vqs[VDPASIM_VQ_NUM]; struct work_struct work; /* spinlock to synchronize virtqueue state */ spinlock_t lock; @@ -70,8 +75,27 @@ struct vdpasim { u32 status; u32 generation; u64 features; + /* spinlock to synchronize iommu table */ + spinlock_t iommu_lock; }; +/* TODO: cross-endian support */ +static inline bool vdpasim_is_little_endian(struct vdpasim *vdpasim) +{ + return virtio_legacy_is_little_endian() || + (vdpasim->features & (1ULL << VIRTIO_F_VERSION_1)); +} + +static inline u16 vdpasim16_to_cpu(struct vdpasim *vdpasim, __virtio16 val) +{ + return __virtio16_to_cpu(vdpasim_is_little_endian(vdpasim), val); +} + +static inline __virtio16 cpu_to_vdpasim16(struct vdpasim *vdpasim, u16 val) +{ + return __cpu_to_virtio16(vdpasim_is_little_endian(vdpasim), val); +} + static struct vdpasim *vdpasim_dev; static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) @@ -118,7 +142,9 @@ static void vdpasim_reset(struct vdpasim *vdpasim) for (i = 0; i < VDPASIM_VQ_NUM; i++) vdpasim_vq_reset(&vdpasim->vqs[i]); + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_reset(vdpasim->iommu); + spin_unlock(&vdpasim->iommu_lock); vdpasim->features = 0; vdpasim->status = 0; @@ -236,8 +262,10 @@ static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, /* For simplicity, use identical mapping to avoid e.g iova * allocator. */ + spin_lock(&vdpasim->iommu_lock); ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, pa, dir_to_perm(dir)); + spin_unlock(&vdpasim->iommu_lock); if (ret) return DMA_MAPPING_ERROR; @@ -251,8 +279,10 @@ static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, struct vdpasim *vdpasim = dev_to_sim(dev); struct vhost_iotlb *iommu = vdpasim->iommu; + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_del_range(iommu, (u64)dma_addr, (u64)dma_addr + size - 1); + spin_unlock(&vdpasim->iommu_lock); } static void *vdpasim_alloc_coherent(struct device *dev, size_t size, @@ -264,9 +294,10 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size, void *addr = kmalloc(size, flag); int ret; - if (!addr) + spin_lock(&vdpasim->iommu_lock); + if (!addr) { *dma_addr = DMA_MAPPING_ERROR; - else { + } else { u64 pa = virt_to_phys(addr); ret = vhost_iotlb_add_range(iommu, (u64)pa, @@ -279,6 +310,7 @@ static void *vdpasim_alloc_coherent(struct device *dev, size_t size, } else *dma_addr = (dma_addr_t)pa; } + spin_unlock(&vdpasim->iommu_lock); return addr; } @@ -290,8 +322,11 @@ static void vdpasim_free_coherent(struct device *dev, size_t size, struct vdpasim *vdpasim = dev_to_sim(dev); struct vhost_iotlb *iommu = vdpasim->iommu; + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_del_range(iommu, (u64)dma_addr, (u64)dma_addr + size - 1); + spin_unlock(&vdpasim->iommu_lock); + kfree(phys_to_virt((uintptr_t)dma_addr)); } @@ -303,21 +338,27 @@ static const struct dma_map_ops vdpasim_dma_ops = { }; static const struct vdpa_config_ops vdpasim_net_config_ops; +static const struct vdpa_config_ops vdpasim_net_batch_config_ops; static struct vdpasim *vdpasim_create(void) { - struct virtio_net_config *config; + const struct vdpa_config_ops *ops; struct vdpasim *vdpasim; struct device *dev; int ret = -ENOMEM; - vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, - &vdpasim_net_config_ops); + if (batch_mapping) + ops = &vdpasim_net_batch_config_ops; + else + ops = &vdpasim_net_config_ops; + + vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops, VDPASIM_VQ_NUM); if (!vdpasim) goto err_alloc; INIT_WORK(&vdpasim->work, vdpasim_work); spin_lock_init(&vdpasim->lock); + spin_lock_init(&vdpasim->iommu_lock); dev = &vdpasim->vdpa.dev; dev->coherent_dma_mask = DMA_BIT_MASK(64); @@ -331,10 +372,7 @@ static struct vdpasim *vdpasim_create(void) if (!vdpasim->buffer) goto err_iommu; - config = &vdpasim->config; - config->mtu = 1500; - config->status = VIRTIO_NET_S_LINK_UP; - eth_random_addr(config->mac); + eth_random_addr(vdpasim->config.mac); vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu); vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu); @@ -413,26 +451,29 @@ static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) return vq->ready; } -static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state) +static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, + const struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; spin_lock(&vdpasim->lock); - vrh->last_avail_idx = state; + vrh->last_avail_idx = state->avail_index; spin_unlock(&vdpasim->lock); return 0; } -static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx) +static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx, + struct vdpa_vq_state *state) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; struct vringh *vrh = &vq->vring; - return vrh->last_avail_idx; + state->avail_index = vrh->last_avail_idx; + return 0; } static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa) @@ -448,13 +489,22 @@ static u64 vdpasim_get_features(struct vdpa_device *vdpa) static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct virtio_net_config *config = &vdpasim->config; /* DMA mapping must be done by driver */ - if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) + if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) return -EINVAL; vdpasim->features = features & vdpasim_features; + /* We generally only know whether guest is using the legacy interface + * here, so generally that's the earliest we can set config fields. + * Note: We actually require VIRTIO_F_ACCESS_PLATFORM above which + * implies VIRTIO_F_VERSION_1, but let's not try to be clever here. + */ + + config->mtu = cpu_to_vdpasim16(vdpasim, 1500); + config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP); return 0; } @@ -508,7 +558,7 @@ static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, struct vdpasim *vdpasim = vdpa_to_sim(vdpa); if (offset + len < sizeof(struct virtio_net_config)) - memcpy(buf, &vdpasim->config + offset, len); + memcpy(buf, (u8 *)&vdpasim->config + offset, len); } static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, @@ -532,6 +582,7 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, u64 start = 0ULL, last = 0ULL - 1; int ret; + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_reset(vdpasim->iommu); for (map = vhost_iotlb_itree_first(iotlb, start, last); map; @@ -541,10 +592,12 @@ static int vdpasim_set_map(struct vdpa_device *vdpa, if (ret) goto err; } + spin_unlock(&vdpasim->iommu_lock); return 0; err: vhost_iotlb_reset(vdpasim->iommu); + spin_unlock(&vdpasim->iommu_lock); return ret; } @@ -552,16 +605,23 @@ static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size, u64 pa, u32 perm) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + int ret; + + spin_lock(&vdpasim->iommu_lock); + ret = vhost_iotlb_add_range(vdpasim->iommu, iova, iova + size - 1, pa, + perm); + spin_unlock(&vdpasim->iommu_lock); - return vhost_iotlb_add_range(vdpasim->iommu, iova, - iova + size - 1, pa, perm); + return ret; } static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) { struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + spin_lock(&vdpasim->iommu_lock); vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); + spin_unlock(&vdpasim->iommu_lock); return 0; } @@ -597,12 +657,36 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = { .get_config = vdpasim_get_config, .set_config = vdpasim_set_config, .get_generation = vdpasim_get_generation, - .set_map = vdpasim_set_map, .dma_map = vdpasim_dma_map, .dma_unmap = vdpasim_dma_unmap, .free = vdpasim_free, }; +static const struct vdpa_config_ops vdpasim_net_batch_config_ops = { + .set_vq_address = vdpasim_set_vq_address, + .set_vq_num = vdpasim_set_vq_num, + .kick_vq = vdpasim_kick_vq, + .set_vq_cb = vdpasim_set_vq_cb, + .set_vq_ready = vdpasim_set_vq_ready, + .get_vq_ready = vdpasim_get_vq_ready, + .set_vq_state = vdpasim_set_vq_state, + .get_vq_state = vdpasim_get_vq_state, + .get_vq_align = vdpasim_get_vq_align, + .get_features = vdpasim_get_features, + .set_features = vdpasim_set_features, + .set_config_cb = vdpasim_set_config_cb, + .get_vq_num_max = vdpasim_get_vq_num_max, + .get_device_id = vdpasim_get_device_id, + .get_vendor_id = vdpasim_get_vendor_id, + .get_status = vdpasim_get_status, + .set_status = vdpasim_set_status, + .get_config = vdpasim_get_config, + .set_config = vdpasim_set_config, + .get_generation = vdpasim_get_generation, + .set_map = vdpasim_set_map, + .free = vdpasim_free, +}; + static int __init vdpasim_dev_init(void) { vdpasim_dev = vdpasim_create(); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index de881a6cff35..620465c2a1da 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -60,6 +60,10 @@ module_param(enable_sriov, bool, 0644); MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF."); #endif +static bool disable_denylist; +module_param(disable_denylist, bool, 0444); +MODULE_PARM_DESC(disable_denylist, "Disable use of device denylist. Disabling the denylist allows binding to devices with known errata that may lead to exploitable stability or security issues when accessed by untrusted users."); + static inline bool vfio_vga_disabled(void) { #ifdef CONFIG_VFIO_PCI_VGA @@ -69,6 +73,44 @@ static inline bool vfio_vga_disabled(void) #endif } +static bool vfio_pci_dev_in_denylist(struct pci_dev *pdev) +{ + switch (pdev->vendor) { + case PCI_VENDOR_ID_INTEL: + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_QAT_C3XXX: + case PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF: + case PCI_DEVICE_ID_INTEL_QAT_C62X: + case PCI_DEVICE_ID_INTEL_QAT_C62X_VF: + case PCI_DEVICE_ID_INTEL_QAT_DH895XCC: + case PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF: + return true; + default: + return false; + } + } + + return false; +} + +static bool vfio_pci_is_denylisted(struct pci_dev *pdev) +{ + if (!vfio_pci_dev_in_denylist(pdev)) + return false; + + if (disable_denylist) { + pci_warn(pdev, + "device denylist disabled - allowing device %04x:%04x.\n", + pdev->vendor, pdev->device); + return false; + } + + pci_warn(pdev, "%04x:%04x exists in vfio-pci device denylist, driver probing disallowed.\n", + pdev->vendor, pdev->device); + + return true; +} + /* * Our VGA arbiter participation is limited since we don't know anything * about the device itself. However, if the device is the only VGA device @@ -207,6 +249,8 @@ static bool vfio_pci_nointx(struct pci_dev *pdev) case 0x1580 ... 0x1581: case 0x1583 ... 0x158b: case 0x37d0 ... 0x37d2: + /* X550 */ + case 0x1563: return true; default: return false; @@ -521,14 +565,12 @@ static void vfio_pci_release(void *device_data) vfio_pci_vf_token_user_add(vdev, -1); vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); + mutex_lock(&vdev->igate); if (vdev->err_trigger) { eventfd_ctx_put(vdev->err_trigger); vdev->err_trigger = NULL; } - mutex_unlock(&vdev->igate); - - mutex_lock(&vdev->igate); if (vdev->req_trigger) { eventfd_ctx_put(vdev->req_trigger); vdev->req_trigger = NULL; @@ -1856,6 +1898,9 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) struct iommu_group *group; int ret; + if (vfio_pci_is_denylisted(pdev)) + return -EINVAL; + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) return -EINVAL; @@ -2345,6 +2390,9 @@ static int __init vfio_pci_init(void) vfio_pci_fill_ids(); + if (disable_denylist) + pr_warn("device denylist disabled.\n"); + return 0; out_driver: diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 580099afeaff..262ab0efd06c 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -627,9 +627,10 @@ static struct vfio_device *vfio_group_get_device(struct vfio_group *group, * that error notification via MSI can be affected for platforms that handle * MSI within the same IOVA space as DMA. */ -static const char * const vfio_driver_whitelist[] = { "pci-stub" }; +static const char * const vfio_driver_allowed[] = { "pci-stub" }; -static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv) +static bool vfio_dev_driver_allowed(struct device *dev, + struct device_driver *drv) { if (dev_is_pci(dev)) { struct pci_dev *pdev = to_pci_dev(dev); @@ -638,8 +639,8 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv) return true; } - return match_string(vfio_driver_whitelist, - ARRAY_SIZE(vfio_driver_whitelist), + return match_string(vfio_driver_allowed, + ARRAY_SIZE(vfio_driver_allowed), drv->name) >= 0; } @@ -648,7 +649,7 @@ static bool vfio_dev_whitelisted(struct device *dev, struct device_driver *drv) * one of the following states: * - driver-less * - bound to a vfio driver - * - bound to a whitelisted driver + * - bound to an otherwise allowed driver * - a PCI interconnect device * * We use two methods to determine whether a device is bound to a vfio @@ -674,7 +675,7 @@ static int vfio_dev_viable(struct device *dev, void *data) } mutex_unlock(&group->unbound_lock); - if (!ret || !drv || vfio_dev_whitelisted(dev, drv)) + if (!ret || !drv || vfio_dev_driver_allowed(dev, drv)) return 0; device = vfio_group_get_device(group, dev); diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c index 16b3adc508db..fe888b5dcc00 100644 --- a/drivers/vfio/vfio_iommu_spapr_tce.c +++ b/drivers/vfio/vfio_iommu_spapr_tce.c @@ -383,7 +383,7 @@ static void tce_iommu_unuse_page(struct tce_container *container, struct page *page; page = pfn_to_page(hpa >> PAGE_SHIFT); - put_page(page); + unpin_user_page(page); } static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container, @@ -486,7 +486,7 @@ static int tce_iommu_use_page(unsigned long tce, unsigned long *hpa) struct page *page = NULL; enum dma_data_direction direction = iommu_tce_direction(tce); - if (get_user_pages_fast(tce & PAGE_MASK, 1, + if (pin_user_pages_fast(tce & PAGE_MASK, 1, direction != DMA_TO_DEVICE ? FOLL_WRITE : 0, &page) != 1) return -EFAULT; diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 5e556ac9102a..6990fc711a80 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -425,7 +425,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, if (ret) { bool unlocked = false; - ret = fixup_user_fault(NULL, mm, vaddr, + ret = fixup_user_fault(mm, vaddr, FAULT_FLAG_REMOTE | (write_fault ? FAULT_FLAG_WRITE : 0), &unlocked); @@ -453,7 +453,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, flags |= FOLL_WRITE; mmap_read_lock(mm); - ret = pin_user_pages_remote(NULL, mm, vaddr, 1, flags | FOLL_LONGTERM, + ret = pin_user_pages_remote(mm, vaddr, 1, flags | FOLL_LONGTERM, page, NULL, NULL); if (ret == 1) { *pfn = page_to_pfn(page[0]); @@ -1225,8 +1225,10 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, return 0; unwind: - list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) + list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) { iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); + cond_resched(); + } return ret; } @@ -2453,6 +2455,23 @@ static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) return ret; } +static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu, + unsigned long arg) +{ + switch (arg) { + case VFIO_TYPE1_IOMMU: + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_NESTING_IOMMU: + return 1; + case VFIO_DMA_CC_IOMMU: + if (!iommu) + return 0; + return vfio_domains_have_iommu_cache(iommu); + default: + return 0; + } +} + static int vfio_iommu_iova_add_cap(struct vfio_info_cap *caps, struct vfio_iommu_type1_info_cap_iova_range *cap_iovas, size_t size) @@ -2529,241 +2548,256 @@ static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu, return vfio_info_add_capability(caps, &cap_mig.header, sizeof(cap_mig)); } -static long vfio_iommu_type1_ioctl(void *iommu_data, - unsigned int cmd, unsigned long arg) +static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu, + unsigned long arg) { - struct vfio_iommu *iommu = iommu_data; + struct vfio_iommu_type1_info info; unsigned long minsz; + struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; + unsigned long capsz; + int ret; - if (cmd == VFIO_CHECK_EXTENSION) { - switch (arg) { - case VFIO_TYPE1_IOMMU: - case VFIO_TYPE1v2_IOMMU: - case VFIO_TYPE1_NESTING_IOMMU: - return 1; - case VFIO_DMA_CC_IOMMU: - if (!iommu) - return 0; - return vfio_domains_have_iommu_cache(iommu); - default: - return 0; - } - } else if (cmd == VFIO_IOMMU_GET_INFO) { - struct vfio_iommu_type1_info info; - struct vfio_info_cap caps = { .buf = NULL, .size = 0 }; - unsigned long capsz; - int ret; - - minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); + minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); - /* For backward compatibility, cannot require this */ - capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); + /* For backward compatibility, cannot require this */ + capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset); - if (copy_from_user(&info, (void __user *)arg, minsz)) - return -EFAULT; + if (copy_from_user(&info, (void __user *)arg, minsz)) + return -EFAULT; - if (info.argsz < minsz) - return -EINVAL; + if (info.argsz < minsz) + return -EINVAL; - if (info.argsz >= capsz) { - minsz = capsz; - info.cap_offset = 0; /* output, no-recopy necessary */ - } + if (info.argsz >= capsz) { + minsz = capsz; + info.cap_offset = 0; /* output, no-recopy necessary */ + } - mutex_lock(&iommu->lock); - info.flags = VFIO_IOMMU_INFO_PGSIZES; + mutex_lock(&iommu->lock); + info.flags = VFIO_IOMMU_INFO_PGSIZES; - info.iova_pgsizes = iommu->pgsize_bitmap; + info.iova_pgsizes = iommu->pgsize_bitmap; - ret = vfio_iommu_migration_build_caps(iommu, &caps); + ret = vfio_iommu_migration_build_caps(iommu, &caps); - if (!ret) - ret = vfio_iommu_iova_build_caps(iommu, &caps); + if (!ret) + ret = vfio_iommu_iova_build_caps(iommu, &caps); - mutex_unlock(&iommu->lock); + mutex_unlock(&iommu->lock); - if (ret) - return ret; + if (ret) + return ret; - if (caps.size) { - info.flags |= VFIO_IOMMU_INFO_CAPS; + if (caps.size) { + info.flags |= VFIO_IOMMU_INFO_CAPS; - if (info.argsz < sizeof(info) + caps.size) { - info.argsz = sizeof(info) + caps.size; - } else { - vfio_info_cap_shift(&caps, sizeof(info)); - if (copy_to_user((void __user *)arg + - sizeof(info), caps.buf, - caps.size)) { - kfree(caps.buf); - return -EFAULT; - } - info.cap_offset = sizeof(info); + if (info.argsz < sizeof(info) + caps.size) { + info.argsz = sizeof(info) + caps.size; + } else { + vfio_info_cap_shift(&caps, sizeof(info)); + if (copy_to_user((void __user *)arg + + sizeof(info), caps.buf, + caps.size)) { + kfree(caps.buf); + return -EFAULT; } - - kfree(caps.buf); + info.cap_offset = sizeof(info); } - return copy_to_user((void __user *)arg, &info, minsz) ? - -EFAULT : 0; + kfree(caps.buf); + } - } else if (cmd == VFIO_IOMMU_MAP_DMA) { - struct vfio_iommu_type1_dma_map map; - uint32_t mask = VFIO_DMA_MAP_FLAG_READ | - VFIO_DMA_MAP_FLAG_WRITE; + return copy_to_user((void __user *)arg, &info, minsz) ? + -EFAULT : 0; +} - minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); +static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu, + unsigned long arg) +{ + struct vfio_iommu_type1_dma_map map; + unsigned long minsz; + uint32_t mask = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE; - if (copy_from_user(&map, (void __user *)arg, minsz)) - return -EFAULT; + minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); - if (map.argsz < minsz || map.flags & ~mask) - return -EINVAL; + if (copy_from_user(&map, (void __user *)arg, minsz)) + return -EFAULT; - return vfio_dma_do_map(iommu, &map); + if (map.argsz < minsz || map.flags & ~mask) + return -EINVAL; - } else if (cmd == VFIO_IOMMU_UNMAP_DMA) { - struct vfio_iommu_type1_dma_unmap unmap; - struct vfio_bitmap bitmap = { 0 }; - int ret; + return vfio_dma_do_map(iommu, &map); +} - minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); +static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu, + unsigned long arg) +{ + struct vfio_iommu_type1_dma_unmap unmap; + struct vfio_bitmap bitmap = { 0 }; + unsigned long minsz; + int ret; - if (copy_from_user(&unmap, (void __user *)arg, minsz)) - return -EFAULT; + minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); - if (unmap.argsz < minsz || - unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) - return -EINVAL; + if (copy_from_user(&unmap, (void __user *)arg, minsz)) + return -EFAULT; - if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { - unsigned long pgshift; + if (unmap.argsz < minsz || + unmap.flags & ~VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) + return -EINVAL; - if (unmap.argsz < (minsz + sizeof(bitmap))) - return -EINVAL; + if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) { + unsigned long pgshift; - if (copy_from_user(&bitmap, - (void __user *)(arg + minsz), - sizeof(bitmap))) - return -EFAULT; + if (unmap.argsz < (minsz + sizeof(bitmap))) + return -EINVAL; - if (!access_ok((void __user *)bitmap.data, bitmap.size)) - return -EINVAL; + if (copy_from_user(&bitmap, + (void __user *)(arg + minsz), + sizeof(bitmap))) + return -EFAULT; - pgshift = __ffs(bitmap.pgsize); - ret = verify_bitmap_size(unmap.size >> pgshift, - bitmap.size); - if (ret) - return ret; - } + if (!access_ok((void __user *)bitmap.data, bitmap.size)) + return -EINVAL; - ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap); + pgshift = __ffs(bitmap.pgsize); + ret = verify_bitmap_size(unmap.size >> pgshift, + bitmap.size); if (ret) return ret; + } + + ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap); + if (ret) + return ret; - return copy_to_user((void __user *)arg, &unmap, minsz) ? + return copy_to_user((void __user *)arg, &unmap, minsz) ? -EFAULT : 0; - } else if (cmd == VFIO_IOMMU_DIRTY_PAGES) { - struct vfio_iommu_type1_dirty_bitmap dirty; - uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START | - VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP | - VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; - int ret = 0; +} - if (!iommu->v2) - return -EACCES; +static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu, + unsigned long arg) +{ + struct vfio_iommu_type1_dirty_bitmap dirty; + uint32_t mask = VFIO_IOMMU_DIRTY_PAGES_FLAG_START | + VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP | + VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; + unsigned long minsz; + int ret = 0; - minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, - flags); + if (!iommu->v2) + return -EACCES; - if (copy_from_user(&dirty, (void __user *)arg, minsz)) - return -EFAULT; + minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags); - if (dirty.argsz < minsz || dirty.flags & ~mask) - return -EINVAL; + if (copy_from_user(&dirty, (void __user *)arg, minsz)) + return -EFAULT; - /* only one flag should be set at a time */ - if (__ffs(dirty.flags) != __fls(dirty.flags)) - return -EINVAL; + if (dirty.argsz < minsz || dirty.flags & ~mask) + return -EINVAL; - if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) { - size_t pgsize; + /* only one flag should be set at a time */ + if (__ffs(dirty.flags) != __fls(dirty.flags)) + return -EINVAL; - mutex_lock(&iommu->lock); - pgsize = 1 << __ffs(iommu->pgsize_bitmap); - if (!iommu->dirty_page_tracking) { - ret = vfio_dma_bitmap_alloc_all(iommu, pgsize); - if (!ret) - iommu->dirty_page_tracking = true; - } - mutex_unlock(&iommu->lock); - return ret; - } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) { - mutex_lock(&iommu->lock); - if (iommu->dirty_page_tracking) { - iommu->dirty_page_tracking = false; - vfio_dma_bitmap_free_all(iommu); - } - mutex_unlock(&iommu->lock); - return 0; - } else if (dirty.flags & - VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) { - struct vfio_iommu_type1_dirty_bitmap_get range; - unsigned long pgshift; - size_t data_size = dirty.argsz - minsz; - size_t iommu_pgsize; - - if (!data_size || data_size < sizeof(range)) - return -EINVAL; - - if (copy_from_user(&range, (void __user *)(arg + minsz), - sizeof(range))) - return -EFAULT; + if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) { + size_t pgsize; - if (range.iova + range.size < range.iova) - return -EINVAL; - if (!access_ok((void __user *)range.bitmap.data, - range.bitmap.size)) - return -EINVAL; + mutex_lock(&iommu->lock); + pgsize = 1 << __ffs(iommu->pgsize_bitmap); + if (!iommu->dirty_page_tracking) { + ret = vfio_dma_bitmap_alloc_all(iommu, pgsize); + if (!ret) + iommu->dirty_page_tracking = true; + } + mutex_unlock(&iommu->lock); + return ret; + } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) { + mutex_lock(&iommu->lock); + if (iommu->dirty_page_tracking) { + iommu->dirty_page_tracking = false; + vfio_dma_bitmap_free_all(iommu); + } + mutex_unlock(&iommu->lock); + return 0; + } else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) { + struct vfio_iommu_type1_dirty_bitmap_get range; + unsigned long pgshift; + size_t data_size = dirty.argsz - minsz; + size_t iommu_pgsize; - pgshift = __ffs(range.bitmap.pgsize); - ret = verify_bitmap_size(range.size >> pgshift, - range.bitmap.size); - if (ret) - return ret; + if (!data_size || data_size < sizeof(range)) + return -EINVAL; - mutex_lock(&iommu->lock); + if (copy_from_user(&range, (void __user *)(arg + minsz), + sizeof(range))) + return -EFAULT; - iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); + if (range.iova + range.size < range.iova) + return -EINVAL; + if (!access_ok((void __user *)range.bitmap.data, + range.bitmap.size)) + return -EINVAL; - /* allow only smallest supported pgsize */ - if (range.bitmap.pgsize != iommu_pgsize) { - ret = -EINVAL; - goto out_unlock; - } - if (range.iova & (iommu_pgsize - 1)) { - ret = -EINVAL; - goto out_unlock; - } - if (!range.size || range.size & (iommu_pgsize - 1)) { - ret = -EINVAL; - goto out_unlock; - } + pgshift = __ffs(range.bitmap.pgsize); + ret = verify_bitmap_size(range.size >> pgshift, + range.bitmap.size); + if (ret) + return ret; - if (iommu->dirty_page_tracking) - ret = vfio_iova_dirty_bitmap(range.bitmap.data, - iommu, range.iova, range.size, - range.bitmap.pgsize); - else - ret = -EINVAL; -out_unlock: - mutex_unlock(&iommu->lock); + mutex_lock(&iommu->lock); - return ret; + iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap); + + /* allow only smallest supported pgsize */ + if (range.bitmap.pgsize != iommu_pgsize) { + ret = -EINVAL; + goto out_unlock; + } + if (range.iova & (iommu_pgsize - 1)) { + ret = -EINVAL; + goto out_unlock; + } + if (!range.size || range.size & (iommu_pgsize - 1)) { + ret = -EINVAL; + goto out_unlock; } + + if (iommu->dirty_page_tracking) + ret = vfio_iova_dirty_bitmap(range.bitmap.data, + iommu, range.iova, + range.size, + range.bitmap.pgsize); + else + ret = -EINVAL; +out_unlock: + mutex_unlock(&iommu->lock); + + return ret; } - return -ENOTTY; + return -EINVAL; +} + +static long vfio_iommu_type1_ioctl(void *iommu_data, + unsigned int cmd, unsigned long arg) +{ + struct vfio_iommu *iommu = iommu_data; + + switch (cmd) { + case VFIO_CHECK_EXTENSION: + return vfio_iommu_type1_check_extension(iommu, arg); + case VFIO_IOMMU_GET_INFO: + return vfio_iommu_type1_get_info(iommu, arg); + case VFIO_IOMMU_MAP_DMA: + return vfio_iommu_type1_map_dma(iommu, arg); + case VFIO_IOMMU_UNMAP_DMA: + return vfio_iommu_type1_unmap_dma(iommu, arg); + case VFIO_IOMMU_DIRTY_PAGES: + return vfio_iommu_type1_dirty_pages(iommu, arg); + default: + return -ENOTTY; + } } static int vfio_iommu_type1_register_notifier(void *iommu_data, diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index d3688c6afb87..587fbae06182 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -65,6 +65,7 @@ config VHOST_VDPA tristate "Vhost driver for vDPA-based backend" depends on EVENTFD select VHOST + select IRQ_BYPASS_MANAGER depends on VDPA help This kernel module can be loaded in host kernel to accelerate diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index eea902b83afe..531a00d703cd 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -73,7 +73,7 @@ enum { VHOST_NET_FEATURES = VHOST_FEATURES | (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) | (1ULL << VIRTIO_NET_F_MRG_RXBUF) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM) + (1ULL << VIRTIO_F_ACCESS_PLATFORM) }; enum { @@ -1615,21 +1615,6 @@ done: return err; } -static int vhost_net_set_backend_features(struct vhost_net *n, u64 features) -{ - int i; - - mutex_lock(&n->dev.mutex); - for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { - mutex_lock(&n->vqs[i].vq.mutex); - n->vqs[i].vq.acked_backend_features = features; - mutex_unlock(&n->vqs[i].vq.mutex); - } - mutex_unlock(&n->dev.mutex); - - return 0; -} - static int vhost_net_set_features(struct vhost_net *n, u64 features) { size_t vhost_hlen, sock_hlen, hdr_len; @@ -1653,7 +1638,7 @@ static int vhost_net_set_features(struct vhost_net *n, u64 features) !vhost_log_access_ok(&n->dev)) goto out_unlock; - if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) { + if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) { if (vhost_init_device_iotlb(&n->dev, true)) goto out_unlock; } @@ -1730,7 +1715,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl, return -EFAULT; if (features & ~VHOST_NET_BACKEND_FEATURES) return -EOPNOTSUPP; - return vhost_net_set_backend_features(n, features); + vhost_set_backend_features(&n->dev, features); + return 0; case VHOST_RESET_OWNER: return vhost_net_reset_owner(n); case VHOST_SET_OWNER: diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index a54b60d6623f..3fab94f88894 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -27,37 +27,11 @@ #include "vhost.h" enum { - VHOST_VDPA_FEATURES = - (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | - (1ULL << VIRTIO_F_ANY_LAYOUT) | - (1ULL << VIRTIO_F_VERSION_1) | - (1ULL << VIRTIO_F_IOMMU_PLATFORM) | - (1ULL << VIRTIO_F_RING_PACKED) | - (1ULL << VIRTIO_F_ORDER_PLATFORM) | - (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | - (1ULL << VIRTIO_RING_F_EVENT_IDX), - - VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES | - (1ULL << VIRTIO_NET_F_CSUM) | - (1ULL << VIRTIO_NET_F_GUEST_CSUM) | - (1ULL << VIRTIO_NET_F_MTU) | - (1ULL << VIRTIO_NET_F_MAC) | - (1ULL << VIRTIO_NET_F_GUEST_TSO4) | - (1ULL << VIRTIO_NET_F_GUEST_TSO6) | - (1ULL << VIRTIO_NET_F_GUEST_ECN) | - (1ULL << VIRTIO_NET_F_GUEST_UFO) | - (1ULL << VIRTIO_NET_F_HOST_TSO4) | - (1ULL << VIRTIO_NET_F_HOST_TSO6) | - (1ULL << VIRTIO_NET_F_HOST_ECN) | - (1ULL << VIRTIO_NET_F_HOST_UFO) | - (1ULL << VIRTIO_NET_F_MRG_RXBUF) | - (1ULL << VIRTIO_NET_F_STATUS) | - (1ULL << VIRTIO_NET_F_SPEED_DUPLEX), + VHOST_VDPA_BACKEND_FEATURES = + (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) | + (1ULL << VHOST_BACKEND_F_IOTLB_BATCH), }; -/* Currently, only network backend w/o multiqueue is supported. */ -#define VHOST_VDPA_VQ_MAX 2 - #define VHOST_VDPA_DEV_MAX (1U << MINORBITS) struct vhost_vdpa { @@ -73,16 +47,13 @@ struct vhost_vdpa { int virtio_id; int minor; struct eventfd_ctx *config_ctx; + int in_batch; }; static DEFINE_IDA(vhost_vdpa_ida); static dev_t vhost_vdpa_major; -static const u64 vhost_vdpa_features[] = { - [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES, -}; - static void handle_vq_kick(struct vhost_work *work) { struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, @@ -96,7 +67,7 @@ static void handle_vq_kick(struct vhost_work *work) static irqreturn_t vhost_vdpa_virtqueue_cb(void *private) { struct vhost_virtqueue *vq = private; - struct eventfd_ctx *call_ctx = vq->call_ctx; + struct eventfd_ctx *call_ctx = vq->call_ctx.ctx; if (call_ctx) eventfd_signal(call_ctx, 1); @@ -115,12 +86,45 @@ static irqreturn_t vhost_vdpa_config_cb(void *private) return IRQ_HANDLED; } +static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid) +{ + struct vhost_virtqueue *vq = &v->vqs[qid]; + const struct vdpa_config_ops *ops = v->vdpa->config; + struct vdpa_device *vdpa = v->vdpa; + int ret, irq; + + if (!ops->get_vq_irq) + return; + + irq = ops->get_vq_irq(vdpa, qid); + spin_lock(&vq->call_ctx.ctx_lock); + irq_bypass_unregister_producer(&vq->call_ctx.producer); + if (!vq->call_ctx.ctx || irq < 0) { + spin_unlock(&vq->call_ctx.ctx_lock); + return; + } + + vq->call_ctx.producer.token = vq->call_ctx.ctx; + vq->call_ctx.producer.irq = irq; + ret = irq_bypass_register_producer(&vq->call_ctx.producer); + spin_unlock(&vq->call_ctx.ctx_lock); +} + +static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid) +{ + struct vhost_virtqueue *vq = &v->vqs[qid]; + + spin_lock(&vq->call_ctx.ctx_lock); + irq_bypass_unregister_producer(&vq->call_ctx.producer); + spin_unlock(&vq->call_ctx.ctx_lock); +} + static void vhost_vdpa_reset(struct vhost_vdpa *v) { struct vdpa_device *vdpa = v->vdpa; - const struct vdpa_config_ops *ops = vdpa->config; - ops->set_status(vdpa, 0); + vdpa_reset(vdpa); + v->in_batch = 0; } static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) @@ -155,11 +159,15 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; - u8 status; + u8 status, status_old; + int nvqs = v->nvqs; + u16 i; if (copy_from_user(&status, statusp, sizeof(status))) return -EFAULT; + status_old = ops->get_status(vdpa); + /* * Userspace shouldn't remove status bits unless reset the * status to 0. @@ -169,6 +177,14 @@ static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) ops->set_status(vdpa, status); + if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK)) + for (i = 0; i < nvqs; i++) + vhost_vdpa_setup_vq_irq(v, i); + + if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK)) + for (i = 0; i < nvqs; i++) + vhost_vdpa_unsetup_vq_irq(v, i); + return 0; } @@ -196,7 +212,6 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v, struct vhost_vdpa_config __user *c) { struct vdpa_device *vdpa = v->vdpa; - const struct vdpa_config_ops *ops = vdpa->config; struct vhost_vdpa_config config; unsigned long size = offsetof(struct vhost_vdpa_config, buf); u8 *buf; @@ -209,7 +224,7 @@ static long vhost_vdpa_get_config(struct vhost_vdpa *v, if (!buf) return -ENOMEM; - ops->get_config(vdpa, config.off, buf, config.len); + vdpa_get_config(vdpa, config.off, buf, config.len); if (copy_to_user(c->buf, buf, config.len)) { kvfree(buf); @@ -255,7 +270,6 @@ static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep) u64 features; features = ops->get_features(vdpa); - features &= vhost_vdpa_features[v->virtio_id]; if (copy_to_user(featurep, &features, sizeof(features))) return -EFAULT; @@ -279,10 +293,7 @@ static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) if (copy_from_user(&features, featurep, sizeof(features))) return -EFAULT; - if (features & ~vhost_vdpa_features[v->virtio_id]) - return -EINVAL; - - if (ops->set_features(vdpa, features)) + if (vdpa_set_features(vdpa, features)) return -EINVAL; return 0; @@ -332,14 +343,18 @@ static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp) return 0; } + static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, void __user *argp) { struct vdpa_device *vdpa = v->vdpa; const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_vq_state vq_state; struct vdpa_callback cb; struct vhost_virtqueue *vq; struct vhost_vring_state s; + u64 __user *featurep = argp; + u64 features; u32 idx; long r; @@ -353,15 +368,32 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, idx = array_index_nospec(idx, v->nvqs); vq = &v->vqs[idx]; - if (cmd == VHOST_VDPA_SET_VRING_ENABLE) { + switch (cmd) { + case VHOST_VDPA_SET_VRING_ENABLE: if (copy_from_user(&s, argp, sizeof(s))) return -EFAULT; ops->set_vq_ready(vdpa, idx, s.num); return 0; - } + case VHOST_GET_VRING_BASE: + r = ops->get_vq_state(v->vdpa, idx, &vq_state); + if (r) + return r; - if (cmd == VHOST_GET_VRING_BASE) - vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx); + vq->last_avail_idx = vq_state.avail_index; + break; + case VHOST_GET_BACKEND_FEATURES: + features = VHOST_VDPA_BACKEND_FEATURES; + if (copy_to_user(featurep, &features, sizeof(features))) + return -EFAULT; + return 0; + case VHOST_SET_BACKEND_FEATURES: + if (copy_from_user(&features, featurep, sizeof(features))) + return -EFAULT; + if (features & ~VHOST_VDPA_BACKEND_FEATURES) + return -EOPNOTSUPP; + vhost_set_backend_features(&v->vdev, features); + return 0; + } r = vhost_vring_ioctl(&v->vdev, cmd, argp); if (r) @@ -377,12 +409,13 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, break; case VHOST_SET_VRING_BASE: - if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx)) + vq_state.avail_index = vq->last_avail_idx; + if (ops->set_vq_state(vdpa, idx, &vq_state)) r = -EINVAL; break; case VHOST_SET_VRING_CALL: - if (vq->call_ctx) { + if (vq->call_ctx.ctx) { cb.callback = vhost_vdpa_virtqueue_cb; cb.private = vq; } else { @@ -390,6 +423,7 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, cb.private = NULL; } ops->set_vq_cb(vdpa, idx, &cb); + vhost_vdpa_setup_vq_irq(v, idx); break; case VHOST_SET_VRING_NUM: @@ -519,13 +553,15 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, if (r) return r; - if (ops->dma_map) + if (ops->dma_map) { r = ops->dma_map(vdpa, iova, size, pa, perm); - else if (ops->set_map) - r = ops->set_map(vdpa, dev->iotlb); - else + } else if (ops->set_map) { + if (!v->in_batch) + r = ops->set_map(vdpa, dev->iotlb); + } else { r = iommu_map(v->domain, iova, pa, size, perm_to_iommu_flags(perm)); + } return r; } @@ -538,12 +574,14 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size) vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1); - if (ops->dma_map) + if (ops->dma_map) { ops->dma_unmap(vdpa, iova, size); - else if (ops->set_map) - ops->set_map(vdpa, dev->iotlb); - else + } else if (ops->set_map) { + if (!v->in_batch) + ops->set_map(vdpa, dev->iotlb); + } else { iommu_unmap(v->domain, iova, size); + } } static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, @@ -636,6 +674,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, struct vhost_iotlb_msg *msg) { struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev); + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; int r = 0; r = vhost_dev_check_owner(dev); @@ -649,6 +689,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, case VHOST_IOTLB_INVALIDATE: vhost_vdpa_unmap(v, msg->iova, msg->size); break; + case VHOST_IOTLB_BATCH_BEGIN: + v->in_batch = true; + break; + case VHOST_IOTLB_BATCH_END: + if (v->in_batch && ops->set_map) + ops->set_map(vdpa, dev->iotlb); + v->in_batch = false; + break; default: r = -EINVAL; break; @@ -765,6 +813,18 @@ err: return r; } +static void vhost_vdpa_clean_irq(struct vhost_vdpa *v) +{ + struct vhost_virtqueue *vq; + int i; + + for (i = 0; i < v->nvqs; i++) { + vq = &v->vqs[i]; + if (vq->call_ctx.producer.irq) + irq_bypass_unregister_producer(&vq->call_ctx.producer); + } +} + static int vhost_vdpa_release(struct inode *inode, struct file *filep) { struct vhost_vdpa *v = filep->private_data; @@ -777,6 +837,7 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) vhost_vdpa_iotlb_free(v); vhost_vdpa_free_domain(v); vhost_vdpa_config_put(v); + vhost_vdpa_clean_irq(v); vhost_dev_cleanup(&v->vdev); kfree(v->vdev.vqs); mutex_unlock(&d->mutex); @@ -872,7 +933,7 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) { const struct vdpa_config_ops *ops = vdpa->config; struct vhost_vdpa *v; - int minor, nvqs = VHOST_VDPA_VQ_MAX; + int minor; int r; /* Currently, we only accept the network devices. */ @@ -893,14 +954,14 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa) atomic_set(&v->opened, 0); v->minor = minor; v->vdpa = vdpa; - v->nvqs = nvqs; + v->nvqs = vdpa->nvqs; v->virtio_id = ops->get_device_id(vdpa); device_initialize(&v->dev); v->dev.release = vhost_vdpa_release_dev; v->dev.parent = &vdpa->dev; v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor); - v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue), GFP_KERNEL); if (!v->vqs) { r = -ENOMEM; diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 74d135ee7e26..5857d4eec9d7 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -298,6 +298,13 @@ static void vhost_vq_meta_reset(struct vhost_dev *d) __vhost_vq_meta_reset(d->vqs[i]); } +static void vhost_vring_call_reset(struct vhost_vring_call *call_ctx) +{ + call_ctx->ctx = NULL; + memset(&call_ctx->producer, 0x0, sizeof(struct irq_bypass_producer)); + spin_lock_init(&call_ctx->ctx_lock); +} + static void vhost_vq_reset(struct vhost_dev *dev, struct vhost_virtqueue *vq) { @@ -319,13 +326,13 @@ static void vhost_vq_reset(struct vhost_dev *dev, vq->log_base = NULL; vq->error_ctx = NULL; vq->kick = NULL; - vq->call_ctx = NULL; vq->log_ctx = NULL; vhost_reset_is_le(vq); vhost_disable_cross_endian(vq); vq->busyloop_timeout = 0; vq->umem = NULL; vq->iotlb = NULL; + vhost_vring_call_reset(&vq->call_ctx); __vhost_vq_meta_reset(vq); } @@ -685,8 +692,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev) eventfd_ctx_put(dev->vqs[i]->error_ctx); if (dev->vqs[i]->kick) fput(dev->vqs[i]->kick); - if (dev->vqs[i]->call_ctx) - eventfd_ctx_put(dev->vqs[i]->call_ctx); + if (dev->vqs[i]->call_ctx.ctx) + eventfd_ctx_put(dev->vqs[i]->call_ctx.ctx); vhost_vq_reset(dev, dev->vqs[i]); } vhost_dev_free_iovecs(dev); @@ -1405,7 +1412,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) memcpy(newmem, &mem, size); if (copy_from_user(newmem->regions, m->regions, - mem.nregions * sizeof *m->regions)) { + flex_array_size(newmem, regions, mem.nregions))) { kvfree(newmem); return -EFAULT; } @@ -1629,7 +1636,10 @@ long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *arg r = PTR_ERR(ctx); break; } - swap(ctx, vq->call_ctx); + + spin_lock(&vq->call_ctx.ctx_lock); + swap(ctx, vq->call_ctx.ctx); + spin_unlock(&vq->call_ctx.ctx_lock); break; case VHOST_SET_VRING_ERR: if (copy_from_user(&f, argp, sizeof f)) { @@ -2435,8 +2445,8 @@ static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq) void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq) { /* Signal the Guest tell them we used something up. */ - if (vq->call_ctx && vhost_notify(dev, vq)) - eventfd_signal(vq->call_ctx, 1); + if (vq->call_ctx.ctx && vhost_notify(dev, vq)) + eventfd_signal(vq->call_ctx.ctx, 1); } EXPORT_SYMBOL_GPL(vhost_signal); @@ -2576,6 +2586,21 @@ struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, } EXPORT_SYMBOL_GPL(vhost_dequeue_msg); +void vhost_set_backend_features(struct vhost_dev *dev, u64 features) +{ + struct vhost_virtqueue *vq; + int i; + + mutex_lock(&dev->mutex); + for (i = 0; i < dev->nvqs; ++i) { + vq = dev->vqs[i]; + mutex_lock(&vq->mutex); + vq->acked_backend_features = features; + mutex_unlock(&vq->mutex); + } + mutex_unlock(&dev->mutex); +} +EXPORT_SYMBOL_GPL(vhost_set_backend_features); static int __init vhost_init(void) { diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index c8e96a095d3b..9032d3c2a9f4 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -13,6 +13,7 @@ #include <linux/virtio_ring.h> #include <linux/atomic.h> #include <linux/vhost_iotlb.h> +#include <linux/irqbypass.h> struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); @@ -60,6 +61,12 @@ enum vhost_uaddr_type { VHOST_NUM_ADDRS = 3, }; +struct vhost_vring_call { + struct eventfd_ctx *ctx; + struct irq_bypass_producer producer; + spinlock_t ctx_lock; +}; + /* The virtqueue structure describes a queue attached to a device. */ struct vhost_virtqueue { struct vhost_dev *dev; @@ -72,7 +79,7 @@ struct vhost_virtqueue { vring_used_t __user *used; const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; - struct eventfd_ctx *call_ctx; + struct vhost_vring_call call_ctx; struct eventfd_ctx *error_ctx; struct eventfd_ctx *log_ctx; @@ -207,6 +214,8 @@ void vhost_enqueue_msg(struct vhost_dev *dev, struct vhost_msg_node *node); struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev, struct list_head *head); +void vhost_set_backend_features(struct vhost_dev *dev, u64 features); + __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev, poll_table *wait); ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to, diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index 20d96a5ac384..25e409bbb1a2 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c @@ -121,18 +121,7 @@ out: static int pm860x_backlight_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.state & BL_CORE_SUSPENDED) - brightness = 0; - - return pm860x_backlight_set(bl, brightness); + return pm860x_backlight_set(bl, backlight_get_brightness(bl)); } static int pm860x_backlight_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig index 7d22d7377606..87f9fc238d28 100644 --- a/drivers/video/backlight/Kconfig +++ b/drivers/video/backlight/Kconfig @@ -173,14 +173,6 @@ config BACKLIGHT_EP93XX To compile this driver as a module, choose M here: the module will be called ep93xx_bl. -config BACKLIGHT_GENERIC - tristate "Generic (aka Sharp Corgi) Backlight Driver" - default y - help - Say y to enable the generic platform backlight driver previously - known as the Corgi backlight driver. If you have a Sharp Zaurus - SL-C7xx, SL-Cxx00 or SL-6000x say y. - config BACKLIGHT_IPAQ_MICRO tristate "iPAQ microcontroller backlight driver" depends on MFD_IPAQ_MICRO @@ -386,13 +378,6 @@ config BACKLIGHT_LP8788 help This supports TI LP8788 backlight driver. -config BACKLIGHT_OT200 - tristate "Backlight driver for ot200 visualisation device" - depends on CS5535_MFGPT && GPIO_CS5535 - help - To compile this driver as a module, choose M here: the module will be - called ot200_bl. - config BACKLIGHT_PANDORA tristate "Backlight driver for Pandora console" depends on TWL4030_CORE diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile index 0c1a1524627a..13463b99f1f9 100644 --- a/drivers/video/backlight/Makefile +++ b/drivers/video/backlight/Makefile @@ -31,7 +31,6 @@ obj-$(CONFIG_BACKLIGHT_CLASS_DEVICE) += backlight.o obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o obj-$(CONFIG_BACKLIGHT_DA9052) += da9052_bl.o obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o -obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o obj-$(CONFIG_BACKLIGHT_GPIO) += gpio_backlight.o obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o @@ -45,7 +44,6 @@ obj-$(CONFIG_BACKLIGHT_LP8788) += lp8788_bl.o obj-$(CONFIG_BACKLIGHT_LV5207LP) += lv5207lp.o obj-$(CONFIG_BACKLIGHT_MAX8925) += max8925_bl.o obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o -obj-$(CONFIG_BACKLIGHT_OT200) += ot200_bl.o obj-$(CONFIG_BACKLIGHT_PANDORA) += pandora_bl.o obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o obj-$(CONFIG_BACKLIGHT_PWM) += pwm_bl.o diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c index 0f63f76723a5..686988c3df3a 100644 --- a/drivers/video/backlight/adp5520_bl.c +++ b/drivers/video/backlight/adp5520_bl.c @@ -65,15 +65,7 @@ static int adp5520_bl_set(struct backlight_device *bl, int brightness) static int adp5520_bl_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - return adp5520_bl_set(bl, brightness); + return adp5520_bl_set(bl, backlight_get_brightness(bl)); } static int adp5520_bl_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c index 19968104fc47..ddc7f5f0401f 100644 --- a/drivers/video/backlight/adp8860_bl.c +++ b/drivers/video/backlight/adp8860_bl.c @@ -361,15 +361,7 @@ static int adp8860_bl_set(struct backlight_device *bl, int brightness) static int adp8860_bl_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - return adp8860_bl_set(bl, brightness); + return adp8860_bl_set(bl, backlight_get_brightness(bl)); } static int adp8860_bl_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c index 4c0032010cfe..8b5213a39527 100644 --- a/drivers/video/backlight/adp8870_bl.c +++ b/drivers/video/backlight/adp8870_bl.c @@ -399,15 +399,7 @@ static int adp8870_bl_set(struct backlight_device *bl, int brightness) static int adp8870_bl_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - return adp8870_bl_set(bl, brightness); + return adp8870_bl_set(bl, backlight_get_brightness(bl)); } static int adp8870_bl_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/backlight/as3711_bl.c b/drivers/video/backlight/as3711_bl.c index 33f0f0f2e8b3..3b60019cdc2b 100644 --- a/drivers/video/backlight/as3711_bl.c +++ b/drivers/video/backlight/as3711_bl.c @@ -104,17 +104,10 @@ static int as3711_bl_update_status(struct backlight_device *bl) struct as3711_bl_data *data = bl_get_data(bl); struct as3711_bl_supply *supply = to_supply(data); struct as3711 *as3711 = supply->as3711; - int brightness = bl->props.brightness; + int brightness; int ret = 0; - dev_dbg(&bl->dev, "%s(): brightness %u, pwr %x, blank %x, state %x\n", - __func__, bl->props.brightness, bl->props.power, - bl->props.fb_blank, bl->props.state); - - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK || - bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) - brightness = 0; + brightness = backlight_get_brightness(bl); if (data->type == AS3711_BL_SU1) { ret = as3711_set_brightness_v(as3711, brightness, diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 92d80aa0c0ef..537fe1b376ad 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c @@ -22,6 +22,47 @@ #include <asm/backlight.h> #endif +/** + * DOC: overview + * + * The backlight core supports implementing backlight drivers. + * + * A backlight driver registers a driver using + * devm_backlight_device_register(). The properties of the backlight + * driver such as type and max_brightness must be specified. + * When the core detect changes in for example brightness or power state + * the update_status() operation is called. The backlight driver shall + * implement this operation and use it to adjust backlight. + * + * Several sysfs attributes are provided by the backlight core:: + * + * - brightness R/W, set the requested brightness level + * - actual_brightness RO, the brightness level used by the HW + * - max_brightness RO, the maximum brightness level supported + * + * See Documentation/ABI/stable/sysfs-class-backlight for the full list. + * + * The backlight can be adjusted using the sysfs interface, and + * the backlight driver may also support adjusting backlight using + * a hot-key or some other platform or firmware specific way. + * + * The driver must implement the get_brightness() operation if + * the HW do not support all the levels that can be specified in + * brightness, thus providing user-space access to the actual level + * via the actual_brightness attribute. + * + * When the backlight changes this is reported to user-space using + * an uevent connected to the actual_brightness attribute. + * When brightness is set by platform specific means, for example + * a hot-key to adjust backlight, the driver must notify the backlight + * core that brightness has changed using backlight_force_update(). + * + * The backlight driver core receives notifications from fbdev and + * if the event is FB_EVENT_BLANK and if the value of blank, from the + * FBIOBLANK ioctrl, results in a change in the backlight state the + * update_status() operation is called. + */ + static struct list_head backlight_dev_list; static struct mutex backlight_dev_list_mutex; static struct blocking_notifier_head backlight_notifier; @@ -40,9 +81,17 @@ static const char *const backlight_scale_types[] = { #if defined(CONFIG_FB) || (defined(CONFIG_FB_MODULE) && \ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)) -/* This callback gets called when something important happens inside a - * framebuffer driver. We're looking if that important event is blanking, - * and if it is and necessary, we're switching backlight power as well ... +/* + * fb_notifier_callback + * + * This callback gets called when something important happens inside a + * framebuffer driver. The backlight core only cares about FB_BLANK_UNBLANK + * which is reported to the driver using backlight_update_status() + * as a state change. + * + * There may be several fbdev's connected to the backlight device, + * in which case they are kept track of. A state change is only reported + * if there is a change in backlight for the specified fbdev. */ static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data) @@ -58,28 +107,29 @@ static int fb_notifier_callback(struct notifier_block *self, bd = container_of(self, struct backlight_device, fb_notif); mutex_lock(&bd->ops_lock); - if (bd->ops) - if (!bd->ops->check_fb || - bd->ops->check_fb(bd, evdata->info)) { - fb_blank = *(int *)evdata->data; - if (fb_blank == FB_BLANK_UNBLANK && - !bd->fb_bl_on[node]) { - bd->fb_bl_on[node] = true; - if (!bd->use_count++) { - bd->props.state &= ~BL_CORE_FBBLANK; - bd->props.fb_blank = FB_BLANK_UNBLANK; - backlight_update_status(bd); - } - } else if (fb_blank != FB_BLANK_UNBLANK && - bd->fb_bl_on[node]) { - bd->fb_bl_on[node] = false; - if (!(--bd->use_count)) { - bd->props.state |= BL_CORE_FBBLANK; - bd->props.fb_blank = fb_blank; - backlight_update_status(bd); - } - } + + if (!bd->ops) + goto out; + if (bd->ops->check_fb && !bd->ops->check_fb(bd, evdata->info)) + goto out; + + fb_blank = *(int *)evdata->data; + if (fb_blank == FB_BLANK_UNBLANK && !bd->fb_bl_on[node]) { + bd->fb_bl_on[node] = true; + if (!bd->use_count++) { + bd->props.state &= ~BL_CORE_FBBLANK; + bd->props.fb_blank = FB_BLANK_UNBLANK; + backlight_update_status(bd); } + } else if (fb_blank != FB_BLANK_UNBLANK && bd->fb_bl_on[node]) { + bd->fb_bl_on[node] = false; + if (!(--bd->use_count)) { + bd->props.state |= BL_CORE_FBBLANK; + bd->props.fb_blank = fb_blank; + backlight_update_status(bd); + } + } +out: mutex_unlock(&bd->ops_lock); return 0; } @@ -320,9 +370,13 @@ ATTRIBUTE_GROUPS(bl_device); * backlight_force_update - tell the backlight subsystem that hardware state * has changed * @bd: the backlight device to update + * @reason: reason for update * * Updates the internal state of the backlight in response to a hardware event, - * and generate a uevent to notify userspace + * and generates an uevent to notify userspace. A backlight driver shall call + * backlight_force_update() when the backlight is changed using, for example, + * a hot-key. The updated brightness is read using get_brightness() and the + * brightness value is reported using an uevent. */ void backlight_force_update(struct backlight_device *bd, enum backlight_update_reason reason) @@ -335,19 +389,7 @@ void backlight_force_update(struct backlight_device *bd, } EXPORT_SYMBOL(backlight_force_update); -/** - * backlight_device_register - create and register a new object of - * backlight_device class. - * @name: the name of the new object(must be the same as the name of the - * respective framebuffer device). - * @parent: a pointer to the parent device - * @devdata: an optional pointer to be stored for private driver use. The - * methods may retrieve it by using bl_get_data(bd). - * @ops: the backlight operations structure. - * - * Creates and registers new backlight device. Returns either an - * ERR_PTR() or a pointer to the newly allocated device. - */ +/* deprecated - use devm_backlight_device_register() */ struct backlight_device *backlight_device_register(const char *name, struct device *parent, void *devdata, const struct backlight_ops *ops, const struct backlight_properties *props) @@ -414,6 +456,15 @@ struct backlight_device *backlight_device_register(const char *name, } EXPORT_SYMBOL(backlight_device_register); +/** backlight_device_get_by_type - find first backlight device of a type + * @type: the type of backlight device + * + * Look up the first backlight device of the specified type + * + * RETURNS: + * + * Pointer to backlight device if any was found. Otherwise NULL. + */ struct backlight_device *backlight_device_get_by_type(enum backlight_type type) { bool found = false; @@ -453,12 +504,7 @@ struct backlight_device *backlight_device_get_by_name(const char *name) } EXPORT_SYMBOL(backlight_device_get_by_name); -/** - * backlight_device_unregister - unregisters a backlight device object. - * @bd: the backlight device object to be unregistered and freed. - * - * Unregisters a previously registered via backlight_device_register object. - */ +/* deprecated - use devm_backlight_device_unregister() */ void backlight_device_unregister(struct backlight_device *bd) { if (!bd) @@ -506,10 +552,12 @@ static int devm_backlight_device_match(struct device *dev, void *res, * backlight_register_notifier - get notified of backlight (un)registration * @nb: notifier block with the notifier to call on backlight (un)registration * - * @return 0 on success, otherwise a negative error code - * * Register a notifier to get notified when backlight devices get registered * or unregistered. + * + * RETURNS: + * + * 0 on success, otherwise a negative error code */ int backlight_register_notifier(struct notifier_block *nb) { @@ -521,10 +569,12 @@ EXPORT_SYMBOL(backlight_register_notifier); * backlight_unregister_notifier - unregister a backlight notifier * @nb: notifier block to unregister * - * @return 0 on success, otherwise a negative error code - * * Register a notifier to get notified when backlight devices get registered * or unregistered. + * + * RETURNS: + * + * 0 on success, otherwise a negative error code */ int backlight_unregister_notifier(struct notifier_block *nb) { @@ -533,19 +583,21 @@ int backlight_unregister_notifier(struct notifier_block *nb) EXPORT_SYMBOL(backlight_unregister_notifier); /** - * devm_backlight_device_register - resource managed backlight_device_register() + * devm_backlight_device_register - register a new backlight device * @dev: the device to register * @name: the name of the device - * @parent: a pointer to the parent device + * @parent: a pointer to the parent device (often the same as @dev) * @devdata: an optional pointer to be stored for private driver use * @ops: the backlight operations structure * @props: the backlight properties * - * @return a struct backlight on success, or an ERR_PTR on error + * Creates and registers new backlight device. When a backlight device + * is registered the configuration must be specified in the @props + * parameter. See description of &backlight_properties. + * + * RETURNS: * - * Managed backlight_device_register(). The backlight_device returned - * from this function are automatically freed on driver detach. - * See backlight_device_register() for more information. + * struct backlight on success, or an ERR_PTR on error */ struct backlight_device *devm_backlight_device_register(struct device *dev, const char *name, struct device *parent, void *devdata, @@ -573,13 +625,13 @@ struct backlight_device *devm_backlight_device_register(struct device *dev, EXPORT_SYMBOL(devm_backlight_device_register); /** - * devm_backlight_device_unregister - resource managed backlight_device_unregister() + * devm_backlight_device_unregister - unregister backlight device * @dev: the device to unregister * @bd: the backlight device to unregister * - * Deallocated a backlight allocated with devm_backlight_device_register(). + * Deallocates a backlight allocated with devm_backlight_device_register(). * Normally this function will not need to be called and the resource management - * code will ensure that the resource is freed. + * code will ensure that the resources are freed. */ void devm_backlight_device_unregister(struct device *dev, struct backlight_device *bd) @@ -621,22 +673,7 @@ struct backlight_device *of_find_backlight_by_node(struct device_node *node) EXPORT_SYMBOL(of_find_backlight_by_node); #endif -/** - * of_find_backlight - Get backlight device - * @dev: Device - * - * This function looks for a property named 'backlight' on the DT node - * connected to @dev and looks up the backlight device. - * - * Call backlight_put() to drop the reference on the backlight device. - * - * Returns: - * A pointer to the backlight device if found. - * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight - * device is found. - * NULL if there's no backlight property. - */ -struct backlight_device *of_find_backlight(struct device *dev) +static struct backlight_device *of_find_backlight(struct device *dev) { struct backlight_device *bd = NULL; struct device_node *np; @@ -662,20 +699,29 @@ struct backlight_device *of_find_backlight(struct device *dev) return bd; } -EXPORT_SYMBOL(of_find_backlight); static void devm_backlight_release(void *data) { - backlight_put(data); + struct backlight_device *bd = data; + + if (bd) + put_device(&bd->dev); } /** - * devm_of_find_backlight - Resource-managed of_find_backlight() - * @dev: Device + * devm_of_find_backlight - find backlight for a device + * @dev: the device * - * Device managed version of of_find_backlight(). - * The reference on the backlight device is automatically + * This function looks for a property named 'backlight' on the DT node + * connected to @dev and looks up the backlight device. The lookup is + * device managed so the reference to the backlight device is automatically * dropped on driver detach. + * + * RETURNS: + * + * A pointer to the backlight device if found. + * Error pointer -EPROBE_DEFER if the DT property is set, but no backlight + * device is found. NULL if there's no backlight property. */ struct backlight_device *devm_of_find_backlight(struct device *dev) { @@ -687,7 +733,7 @@ struct backlight_device *devm_of_find_backlight(struct device *dev) return bd; ret = devm_add_action(dev, devm_backlight_release, bd); if (ret) { - backlight_put(bd); + put_device(&bd->dev); return ERR_PTR(ret); } return bd; diff --git a/drivers/video/backlight/bd6107.c b/drivers/video/backlight/bd6107.c index d5d5fb457e78..515184fbe33a 100644 --- a/drivers/video/backlight/bd6107.c +++ b/drivers/video/backlight/bd6107.c @@ -82,12 +82,7 @@ static int bd6107_write(struct bd6107 *bd, u8 reg, u8 data) static int bd6107_backlight_update_status(struct backlight_device *backlight) { struct bd6107 *bd = bl_get_data(backlight); - int brightness = backlight->props.brightness; - - if (backlight->props.power != FB_BLANK_UNBLANK || - backlight->props.fb_blank != FB_BLANK_UNBLANK || - backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) - brightness = 0; + int brightness = backlight_get_brightness(backlight); if (brightness) { bd6107_write(bd, BD6107_PORTSEL, BD6107_PORTSEL_LEDM(2) | diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index 25ef0cbd7583..33f5d80495e6 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -420,13 +420,7 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity) static int corgi_bl_update_status(struct backlight_device *bd) { struct corgi_lcd *lcd = bl_get_data(bd); - int intensity = bd->props.brightness; - - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - - if (bd->props.fb_blank != FB_BLANK_UNBLANK) - intensity = 0; + int intensity = backlight_get_brightness(bd); if (corgibl_flags & CORGIBL_SUSPENDED) intensity = 0; diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c index 4624b7b7c6a6..4ad0a72531fe 100644 --- a/drivers/video/backlight/cr_bllcd.c +++ b/drivers/video/backlight/cr_bllcd.c @@ -59,26 +59,18 @@ struct cr_panel { static int cr_backlight_set_intensity(struct backlight_device *bd) { - int intensity = bd->props.brightness; u32 addr = gpio_bar + CRVML_PANEL_PORT; u32 cur = inl(addr); - if (bd->props.power == FB_BLANK_UNBLANK) - intensity = FB_BLANK_UNBLANK; - if (bd->props.fb_blank == FB_BLANK_UNBLANK) - intensity = FB_BLANK_UNBLANK; - if (bd->props.power == FB_BLANK_POWERDOWN) - intensity = FB_BLANK_POWERDOWN; - if (bd->props.fb_blank == FB_BLANK_POWERDOWN) - intensity = FB_BLANK_POWERDOWN; - - if (intensity == FB_BLANK_UNBLANK) { /* FULL ON */ - cur &= ~CRVML_BACKLIGHT_OFF; - outl(cur, addr); - } else if (intensity == FB_BLANK_POWERDOWN) { /* OFF */ + if (backlight_get_brightness(bd) == 0) { + /* OFF */ cur |= CRVML_BACKLIGHT_OFF; outl(cur, addr); - } /* anything else, don't bother */ + } else { + /* FULL ON */ + cur &= ~CRVML_BACKLIGHT_OFF; + outl(cur, addr); + } return 0; } @@ -90,9 +82,9 @@ static int cr_backlight_get_intensity(struct backlight_device *bd) u8 intensity; if (cur & CRVML_BACKLIGHT_OFF) - intensity = FB_BLANK_POWERDOWN; + intensity = 0; else - intensity = FB_BLANK_UNBLANK; + intensity = 1; return intensity; } diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c index 62540e4bdedb..71f21bbc7a9f 100644 --- a/drivers/video/backlight/da903x_bl.c +++ b/drivers/video/backlight/da903x_bl.c @@ -77,18 +77,7 @@ static int da903x_backlight_set(struct backlight_device *bl, int brightness) static int da903x_backlight_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.state & BL_CORE_SUSPENDED) - brightness = 0; - - return da903x_backlight_set(bl, brightness); + return da903x_backlight_set(bl, backlight_get_brightness(bl)); } static int da903x_backlight_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c index 4149e0b2f83c..2387009d452d 100644 --- a/drivers/video/backlight/ep93xx_bl.c +++ b/drivers/video/backlight/ep93xx_bl.c @@ -36,13 +36,7 @@ static int ep93xxbl_set(struct backlight_device *bl, int brightness) static int ep93xxbl_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - return ep93xxbl_set(bl, brightness); + return ep93xxbl_set(bl, backlight_get_brightness(bl)); } static int ep93xxbl_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c deleted file mode 100644 index 8fe63dbc8590..000000000000 --- a/drivers/video/backlight/generic_bl.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic Backlight Driver - * - * Copyright (c) 2004-2008 Richard Purdie - */ - -#include <linux/module.h> -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/platform_device.h> -#include <linux/mutex.h> -#include <linux/fb.h> -#include <linux/backlight.h> - -static int genericbl_intensity; -static struct backlight_device *generic_backlight_device; -static struct generic_bl_info *bl_machinfo; - -static int genericbl_send_intensity(struct backlight_device *bd) -{ - int intensity = bd->props.brightness; - - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - if (bd->props.state & BL_CORE_FBBLANK) - intensity = 0; - if (bd->props.state & BL_CORE_SUSPENDED) - intensity = 0; - - bl_machinfo->set_bl_intensity(intensity); - - genericbl_intensity = intensity; - - if (bl_machinfo->kick_battery) - bl_machinfo->kick_battery(); - - return 0; -} - -static int genericbl_get_intensity(struct backlight_device *bd) -{ - return genericbl_intensity; -} - -static const struct backlight_ops genericbl_ops = { - .options = BL_CORE_SUSPENDRESUME, - .get_brightness = genericbl_get_intensity, - .update_status = genericbl_send_intensity, -}; - -static int genericbl_probe(struct platform_device *pdev) -{ - struct backlight_properties props; - struct generic_bl_info *machinfo = dev_get_platdata(&pdev->dev); - const char *name = "generic-bl"; - struct backlight_device *bd; - - bl_machinfo = machinfo; - if (!machinfo->limit_mask) - machinfo->limit_mask = -1; - - if (machinfo->name) - name = machinfo->name; - - memset(&props, 0, sizeof(struct backlight_properties)); - props.type = BACKLIGHT_RAW; - props.max_brightness = machinfo->max_intensity; - bd = devm_backlight_device_register(&pdev->dev, name, &pdev->dev, - NULL, &genericbl_ops, &props); - if (IS_ERR(bd)) - return PTR_ERR(bd); - - platform_set_drvdata(pdev, bd); - - bd->props.power = FB_BLANK_UNBLANK; - bd->props.brightness = machinfo->default_intensity; - backlight_update_status(bd); - - generic_backlight_device = bd; - - dev_info(&pdev->dev, "Generic Backlight Driver Initialized.\n"); - return 0; -} - -static int genericbl_remove(struct platform_device *pdev) -{ - struct backlight_device *bd = platform_get_drvdata(pdev); - - bd->props.power = 0; - bd->props.brightness = 0; - backlight_update_status(bd); - - dev_info(&pdev->dev, "Generic Backlight Driver Unloaded\n"); - return 0; -} - -static struct platform_driver genericbl_driver = { - .probe = genericbl_probe, - .remove = genericbl_remove, - .driver = { - .name = "generic-bl", - }, -}; - -module_platform_driver(genericbl_driver); - -MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>"); -MODULE_DESCRIPTION("Generic Backlight Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/video/backlight/gpio_backlight.c b/drivers/video/backlight/gpio_backlight.c index 75409ddfba3e..6f78d928f054 100644 --- a/drivers/video/backlight/gpio_backlight.c +++ b/drivers/video/backlight/gpio_backlight.c @@ -21,24 +21,11 @@ struct gpio_backlight { struct gpio_desc *gpiod; }; -static int gpio_backlight_get_next_brightness(struct backlight_device *bl) -{ - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK || - bl->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) - brightness = 0; - - return brightness; -} - static int gpio_backlight_update_status(struct backlight_device *bl) { struct gpio_backlight *gbl = bl_get_data(bl); - int brightness = gpio_backlight_get_next_brightness(bl); - gpiod_set_value_cansleep(gbl->gpiod, brightness); + gpiod_set_value_cansleep(gbl->gpiod, backlight_get_brightness(bl)); return 0; } @@ -108,7 +95,7 @@ static int gpio_backlight_probe(struct platform_device *pdev) bl->props.brightness = 1; - init_brightness = gpio_backlight_get_next_brightness(bl); + init_brightness = backlight_get_brightness(bl); ret = gpiod_direction_output(gbl->gpiod, init_brightness); if (ret) { dev_err(dev, "failed to set initial brightness\n"); diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c index 8ea42b8d9bc8..9123c33def05 100644 --- a/drivers/video/backlight/hp680_bl.c +++ b/drivers/video/backlight/hp680_bl.c @@ -33,12 +33,8 @@ static void hp680bl_send_intensity(struct backlight_device *bd) { unsigned long flags; u16 v; - int intensity = bd->props.brightness; + int intensity = backlight_get_brightness(bd); - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - if (bd->props.fb_blank != FB_BLANK_UNBLANK) - intensity = 0; if (hp680bl_suspended) intensity = 0; diff --git a/drivers/video/backlight/ili922x.c b/drivers/video/backlight/ili922x.c index 9c5aa3fbb284..328aba9cddad 100644 --- a/drivers/video/backlight/ili922x.c +++ b/drivers/video/backlight/ili922x.c @@ -107,6 +107,8 @@ * lower frequency when the registers are read/written. * The macro sets the frequency in the spi_transfer structure if * the frequency exceeds the maximum value. + * @s: pointer to an SPI device + * @x: pointer to the read/write buffer pair */ #define CHECK_FREQ_REG(s, x) \ do { \ @@ -121,7 +123,7 @@ #define set_tx_byte(b) (tx_invert ? ~(b) : b) -/** +/* * ili922x_id - id as set by manufacturer */ static int ili922x_id = 1; @@ -130,7 +132,7 @@ module_param(ili922x_id, int, 0); static int tx_invert; module_param(tx_invert, int, 0); -/** +/* * driver's private structure */ struct ili922x { @@ -293,6 +295,8 @@ static int ili922x_write(struct spi_device *spi, u8 reg, u16 value) #ifdef DEBUG /** * ili922x_reg_dump - dump all registers + * + * @spi: pointer to an SPI device */ static void ili922x_reg_dump(struct spi_device *spi) { diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c index f0385f9cf9da..996f7ba3b373 100644 --- a/drivers/video/backlight/jornada720_bl.c +++ b/drivers/video/backlight/jornada720_bl.c @@ -54,7 +54,7 @@ static int jornada_bl_update_status(struct backlight_device *bd) jornada_ssp_start(); /* If backlight is off then really turn it off */ - if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) { + if (backlight_is_blank(bd)) { ret = jornada_ssp_byte(BRIGHTNESSOFF); if (ret != TXDUMMY) { dev_info(&bd->dev, "brightness off timeout\n"); diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c index 1dfe13c18925..55794b239cff 100644 --- a/drivers/video/backlight/kb3886_bl.c +++ b/drivers/video/backlight/kb3886_bl.c @@ -87,12 +87,8 @@ static const struct dmi_system_id kb3886bl_device_table[] __initconst = { static int kb3886bl_send_intensity(struct backlight_device *bd) { - int intensity = bd->props.brightness; + int intensity = backlight_get_brightness(bd); - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - if (bd->props.fb_blank != FB_BLANK_UNBLANK) - intensity = 0; if (kb3886bl_flags & KB3886BL_SUSPENDED) intensity = 0; diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 78b033358625..db56e465aaff 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c @@ -179,6 +179,7 @@ ATTRIBUTE_GROUPS(lcd_device); * lcd_device_register - register a new object of lcd_device class. * @name: the name of the new object(must be the same as the name of the * respective framebuffer device). + * @parent: pointer to the parent's struct device . * @devdata: an optional pointer to be stored in the device. The * methods may retrieve it by using lcd_get_data(ld). * @ops: the lcd operations structure. diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c index 3f66549997c8..f54d256e2d54 100644 --- a/drivers/video/backlight/led_bl.c +++ b/drivers/video/backlight/led_bl.c @@ -54,12 +54,7 @@ static void led_bl_power_off(struct led_bl_data *priv) static int led_bl_update_status(struct backlight_device *bl) { struct led_bl_data *priv = bl_get_data(bl); - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK || - bl->props.state & BL_CORE_FBBLANK) - brightness = 0; + int brightness = backlight_get_brightness(bl); if (brightness > 0) led_bl_set_brightness(priv, brightness); diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c index ee09d1bd02b9..1df1b6643c0b 100644 --- a/drivers/video/backlight/lm3533_bl.c +++ b/drivers/video/backlight/lm3533_bl.c @@ -39,14 +39,8 @@ static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl) static int lm3533_bl_update_status(struct backlight_device *bd) { struct lm3533_bl *bl = bl_get_data(bd); - int brightness = bd->props.brightness; - if (bd->props.power != FB_BLANK_UNBLANK) - brightness = 0; - if (bd->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness); + return lm3533_ctrlbank_set_brightness(&bl->cb, backlight_get_brightness(bd)); } static int lm3533_bl_get_brightness(struct backlight_device *bd) @@ -235,7 +229,7 @@ static struct attribute *lm3533_bl_attributes[] = { static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct lm3533_bl *bl = dev_get_drvdata(dev); umode_t mode = attr->mode; diff --git a/drivers/video/backlight/lm3630a_bl.c b/drivers/video/backlight/lm3630a_bl.c index ee320883b710..e88a2b0e5904 100644 --- a/drivers/video/backlight/lm3630a_bl.c +++ b/drivers/video/backlight/lm3630a_bl.c @@ -391,7 +391,7 @@ static int lm3630a_parse_led_sources(struct fwnode_handle *node, return ret; for (i = 0; i < num_sources; i++) { - if (sources[i] < LM3630A_SINK_0 || sources[i] > LM3630A_SINK_1) + if (sources[i] != LM3630A_SINK_0 && sources[i] != LM3630A_SINK_1) return -EINVAL; ret |= BIT(sources[i]); @@ -412,7 +412,7 @@ static int lm3630a_parse_bank(struct lm3630a_platform_data *pdata, if (ret) return ret; - if (bank < LM3630A_BANK_0 || bank > LM3630A_BANK_1) + if (bank != LM3630A_BANK_0 && bank != LM3630A_BANK_1) return -EINVAL; led_sources = lm3630a_parse_led_sources(node, BIT(bank)); diff --git a/drivers/video/backlight/lms501kf03.c b/drivers/video/backlight/lms501kf03.c index 8ae32e3573c1..f949b66dce1b 100644 --- a/drivers/video/backlight/lms501kf03.c +++ b/drivers/video/backlight/lms501kf03.c @@ -9,7 +9,6 @@ #include <linux/backlight.h> #include <linux/delay.h> #include <linux/fb.h> -#include <linux/gpio.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/spi/spi.h> @@ -89,14 +88,6 @@ static const unsigned char seq_rgb_gamma[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; -static const unsigned char seq_up_dn[] = { - 0x36, 0x10, -}; - -static const unsigned char seq_sleep_in[] = { - 0x10, -}; - static const unsigned char seq_sleep_out[] = { 0x11, }; diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c index cdc02e04f89d..297ee2e1ab0b 100644 --- a/drivers/video/backlight/locomolcd.c +++ b/drivers/video/backlight/locomolcd.c @@ -111,12 +111,8 @@ static int current_intensity; static int locomolcd_set_intensity(struct backlight_device *bd) { - int intensity = bd->props.brightness; + int intensity = backlight_get_brightness(bd); - if (bd->props.power != FB_BLANK_UNBLANK) - intensity = 0; - if (bd->props.fb_blank != FB_BLANK_UNBLANK) - intensity = 0; if (locomolcd_flags & LOCOMOLCD_SUSPENDED) intensity = 0; diff --git a/drivers/video/backlight/lv5207lp.c b/drivers/video/backlight/lv5207lp.c index c6ad73a784e2..1842ae9a55f8 100644 --- a/drivers/video/backlight/lv5207lp.c +++ b/drivers/video/backlight/lv5207lp.c @@ -46,12 +46,7 @@ static int lv5207lp_write(struct lv5207lp *lv, u8 reg, u8 data) static int lv5207lp_backlight_update_status(struct backlight_device *backlight) { struct lv5207lp *lv = bl_get_data(backlight); - int brightness = backlight->props.brightness; - - if (backlight->props.power != FB_BLANK_UNBLANK || - backlight->props.fb_blank != FB_BLANK_UNBLANK || - backlight->props.state & (BL_CORE_SUSPENDED | BL_CORE_FBBLANK)) - brightness = 0; + int brightness = backlight_get_brightness(backlight); if (brightness) { lv5207lp_write(lv, LV5207LP_CTRL1, diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c index 97cc260ff9d1..e607ec6fd4bf 100644 --- a/drivers/video/backlight/max8925_bl.c +++ b/drivers/video/backlight/max8925_bl.c @@ -64,18 +64,7 @@ out: static int max8925_backlight_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.state & BL_CORE_SUSPENDED) - brightness = 0; - - return max8925_backlight_set(bl, brightness); + return max8925_backlight_set(bl, backlight_get_brightness(bl)); } static int max8925_backlight_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/backlight/ot200_bl.c b/drivers/video/backlight/ot200_bl.c deleted file mode 100644 index 23ee7106c72a..000000000000 --- a/drivers/video/backlight/ot200_bl.c +++ /dev/null @@ -1,162 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2012 Bachmann electronic GmbH - * Christian Gmeiner <christian.gmeiner@gmail.com> - * - * Backlight driver for ot200 visualisation device from - * Bachmann electronic GmbH. - */ - -#include <linux/module.h> -#include <linux/fb.h> -#include <linux/backlight.h> -#include <linux/gpio.h> -#include <linux/platform_device.h> -#include <linux/cs5535.h> - -static struct cs5535_mfgpt_timer *pwm_timer; - -/* this array defines the mapping of brightness in % to pwm frequency */ -static const u8 dim_table[101] = {0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, - 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, - 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9, - 10, 10, 11, 11, 12, 12, 13, 14, 15, 15, 16, - 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28, - 30, 31, 33, 35, 37, 39, 41, 43, 45, 47, 50, - 53, 55, 58, 61, 65, 68, 72, 75, 79, 84, 88, - 93, 97, 103, 108, 114, 120, 126, 133, 140, - 147, 155, 163}; - -struct ot200_backlight_data { - int current_brightness; -}; - -#define GPIO_DIMM 27 -#define SCALE 1 -#define CMP1MODE 0x2 /* compare on GE; output high on compare - * greater than or equal */ -#define PWM_SETUP (SCALE | CMP1MODE << 6 | MFGPT_SETUP_CNTEN) -#define MAX_COMP2 163 - -static int ot200_backlight_update_status(struct backlight_device *bl) -{ - struct ot200_backlight_data *data = bl_get_data(bl); - int brightness = bl->props.brightness; - - if (bl->props.state & BL_CORE_FBBLANK) - brightness = 0; - - /* enable or disable PWM timer */ - if (brightness == 0) - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, 0); - else if (data->current_brightness == 0) { - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0); - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, - MFGPT_SETUP_CNTEN); - } - - /* apply new brightness value */ - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1, - MAX_COMP2 - dim_table[brightness]); - data->current_brightness = brightness; - - return 0; -} - -static int ot200_backlight_get_brightness(struct backlight_device *bl) -{ - struct ot200_backlight_data *data = bl_get_data(bl); - return data->current_brightness; -} - -static const struct backlight_ops ot200_backlight_ops = { - .update_status = ot200_backlight_update_status, - .get_brightness = ot200_backlight_get_brightness, -}; - -static int ot200_backlight_probe(struct platform_device *pdev) -{ - struct backlight_device *bl; - struct ot200_backlight_data *data; - struct backlight_properties props; - int retval = 0; - - /* request gpio */ - if (devm_gpio_request(&pdev->dev, GPIO_DIMM, - "ot200 backlight dimmer") < 0) { - dev_err(&pdev->dev, "failed to request GPIO %d\n", GPIO_DIMM); - return -ENODEV; - } - - /* request timer */ - pwm_timer = cs5535_mfgpt_alloc_timer(7, MFGPT_DOMAIN_ANY); - if (!pwm_timer) { - dev_err(&pdev->dev, "MFGPT 7 not available\n"); - return -ENODEV; - } - - data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); - if (!data) { - retval = -ENOMEM; - goto error_devm_kzalloc; - } - - /* setup gpio */ - cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_ENABLE); - cs5535_gpio_set(GPIO_DIMM, GPIO_OUTPUT_AUX1); - - /* setup timer */ - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1, 0); - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP2, MAX_COMP2); - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, PWM_SETUP); - - data->current_brightness = 100; - props.max_brightness = 100; - props.brightness = 100; - props.type = BACKLIGHT_RAW; - - bl = devm_backlight_device_register(&pdev->dev, dev_name(&pdev->dev), - &pdev->dev, data, &ot200_backlight_ops, - &props); - if (IS_ERR(bl)) { - dev_err(&pdev->dev, "failed to register backlight\n"); - retval = PTR_ERR(bl); - goto error_devm_kzalloc; - } - - platform_set_drvdata(pdev, bl); - - return 0; - -error_devm_kzalloc: - cs5535_mfgpt_free_timer(pwm_timer); - return retval; -} - -static int ot200_backlight_remove(struct platform_device *pdev) -{ - /* on module unload set brightness to 100% */ - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_COUNTER, 0); - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); - cs5535_mfgpt_write(pwm_timer, MFGPT_REG_CMP1, - MAX_COMP2 - dim_table[100]); - - cs5535_mfgpt_free_timer(pwm_timer); - - return 0; -} - -static struct platform_driver ot200_backlight_driver = { - .driver = { - .name = "ot200-backlight", - }, - .probe = ot200_backlight_probe, - .remove = ot200_backlight_remove, -}; - -module_platform_driver(ot200_backlight_driver); - -MODULE_DESCRIPTION("backlight driver for ot200 visualisation device"); -MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:ot200-backlight"); diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index 82b8d7594701..dfc760830eb9 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -108,14 +108,9 @@ static int compute_duty_cycle(struct pwm_bl_data *pb, int brightness) static int pwm_backlight_update_status(struct backlight_device *bl) { struct pwm_bl_data *pb = bl_get_data(bl); - int brightness = bl->props.brightness; + int brightness = backlight_get_brightness(bl); struct pwm_state state; - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK || - bl->props.state & BL_CORE_FBBLANK) - brightness = 0; - if (pb->notify) brightness = pb->notify(pb->dev, brightness); @@ -606,7 +601,8 @@ static int pwm_backlight_probe(struct platform_device *pdev) pb->scale = data->max_brightness; } - pb->lth_brightness = data->lth_brightness * (state.period / pb->scale); + pb->lth_brightness = data->lth_brightness * (div_u64(state.period, + pb->scale)); props.type = BACKLIGHT_RAW; props.max_brightness = data->max_brightness; diff --git a/drivers/video/backlight/qcom-wled.c b/drivers/video/backlight/qcom-wled.c index 4c8c34b99441..3bc7800eb0a9 100644 --- a/drivers/video/backlight/qcom-wled.c +++ b/drivers/video/backlight/qcom-wled.c @@ -433,14 +433,9 @@ static int wled5_ovp_delay(struct wled *wled) static int wled_update_status(struct backlight_device *bl) { struct wled *wled = bl_get_data(bl); - u16 brightness = bl->props.brightness; + u16 brightness = backlight_get_brightness(bl); int rc = 0; - if (bl->props.power != FB_BLANK_UNBLANK || - bl->props.fb_blank != FB_BLANK_UNBLANK || - bl->props.state & BL_CORE_FBBLANK) - brightness = 0; - mutex_lock(&wled->lock); if (brightness) { rc = wled->wled_set_brightness(wled, brightness); @@ -1287,14 +1282,6 @@ static const struct wled_var_cfg wled4_string_i_limit_cfg = { .size = ARRAY_SIZE(wled4_string_i_limit_values), }; -static const struct wled_var_cfg wled3_string_cfg = { - .size = 8, -}; - -static const struct wled_var_cfg wled4_string_cfg = { - .size = 16, -}; - static const struct wled_var_cfg wled5_mod_sel_cfg = { .size = 2, }; diff --git a/drivers/video/backlight/sky81452-backlight.c b/drivers/video/backlight/sky81452-backlight.c index 2355f00f5773..0ce181585008 100644 --- a/drivers/video/backlight/sky81452-backlight.c +++ b/drivers/video/backlight/sky81452-backlight.c @@ -8,15 +8,13 @@ #include <linux/backlight.h> #include <linux/err.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/regmap.h> -#include <linux/platform_data/sky81452-backlight.h> #include <linux/slab.h> /* registers */ @@ -42,6 +40,29 @@ #define SKY81452_DEFAULT_NAME "lcd-backlight" #define SKY81452_MAX_BRIGHTNESS (SKY81452_CS + 1) +/** + * struct sky81452_platform_data + * @name: backlight driver name. + * If it is not defined, default name is lcd-backlight. + * @gpiod_enable:GPIO descriptor which control EN pin + * @enable: Enable mask for current sink channel 1, 2, 3, 4, 5 and 6. + * @ignore_pwm: true if DPWMI should be ignored. + * @dpwm_mode: true is DPWM dimming mode, otherwise Analog dimming mode. + * @phase_shift:true is phase shift mode. + * @short_detection_threshold: It should be one of 4, 5, 6 and 7V. + * @boost_current_limit: It should be one of 2300, 2750mA. + */ +struct sky81452_bl_platform_data { + const char *name; + struct gpio_desc *gpiod_enable; + unsigned int enable; + bool ignore_pwm; + bool dpwm_mode; + bool phase_shift; + unsigned int short_detection_threshold; + unsigned int boost_current_limit; +}; + #define CTZ(b) __builtin_ctz(b) static int sky81452_bl_update_status(struct backlight_device *bd) @@ -182,7 +203,7 @@ static struct sky81452_bl_platform_data *sky81452_bl_parse_dt( pdata->ignore_pwm = of_property_read_bool(np, "skyworks,ignore-pwm"); pdata->dpwm_mode = of_property_read_bool(np, "skyworks,dpwm-mode"); pdata->phase_shift = of_property_read_bool(np, "skyworks,phase-shift"); - pdata->gpio_enable = of_get_gpio(np, 0); + pdata->gpiod_enable = devm_gpiod_get_optional(dev, NULL, GPIOD_OUT_HIGH); ret = of_property_count_u32_elems(np, "led-sources"); if (ret < 0) { @@ -252,26 +273,15 @@ static int sky81452_bl_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct regmap *regmap = dev_get_drvdata(dev->parent); - struct sky81452_bl_platform_data *pdata = dev_get_platdata(dev); + struct sky81452_bl_platform_data *pdata; struct backlight_device *bd; struct backlight_properties props; const char *name; int ret; - if (!pdata) { - pdata = sky81452_bl_parse_dt(dev); - if (IS_ERR(pdata)) - return PTR_ERR(pdata); - } - - if (gpio_is_valid(pdata->gpio_enable)) { - ret = devm_gpio_request_one(dev, pdata->gpio_enable, - GPIOF_OUT_INIT_HIGH, "sky81452-en"); - if (ret < 0) { - dev_err(dev, "failed to request GPIO. err=%d\n", ret); - return ret; - } - } + pdata = sky81452_bl_parse_dt(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); ret = sky81452_bl_init_device(regmap, pdata); if (ret < 0) { @@ -312,8 +322,8 @@ static int sky81452_bl_remove(struct platform_device *pdev) bd->props.brightness = 0; backlight_update_status(bd); - if (gpio_is_valid(pdata->gpio_enable)) - gpio_set_value_cansleep(pdata->gpio_enable, 0); + if (pdata->gpiod_enable) + gpiod_set_value_cansleep(pdata->gpiod_enable, 0); return 0; } diff --git a/drivers/video/backlight/tps65217_bl.c b/drivers/video/backlight/tps65217_bl.c index 762e3feed097..8457166f357f 100644 --- a/drivers/video/backlight/tps65217_bl.c +++ b/drivers/video/backlight/tps65217_bl.c @@ -77,15 +77,7 @@ static int tps65217_bl_update_status(struct backlight_device *bl) { struct tps65217_bl *tps65217_bl = bl_get_data(bl); int rc; - int brightness = bl->props.brightness; - - if (bl->props.state & BL_CORE_SUSPENDED) - brightness = 0; - - if ((bl->props.power != FB_BLANK_UNBLANK) || - (bl->props.fb_blank != FB_BLANK_UNBLANK)) - /* framebuffer in low power mode or blanking active */ - brightness = 0; + int brightness = backlight_get_brightness(bl); if (brightness > 0) { rc = tps65217_reg_write(tps65217_bl->tps, diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c index e55977d54c15..c5aaee205bdf 100644 --- a/drivers/video/backlight/wm831x_bl.c +++ b/drivers/video/backlight/wm831x_bl.c @@ -91,18 +91,7 @@ err: static int wm831x_backlight_update_status(struct backlight_device *bl) { - int brightness = bl->props.brightness; - - if (bl->props.power != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.fb_blank != FB_BLANK_UNBLANK) - brightness = 0; - - if (bl->props.state & BL_CORE_SUSPENDED) - brightness = 0; - - return wm831x_backlight_set(bl, brightness); + return wm831x_backlight_set(bl, backlight_get_brightness(bl)); } static int wm831x_backlight_get_brightness(struct backlight_device *bl) diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 30e73ec4ad5c..da7c88ffaa6a 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -957,7 +957,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var, int fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) { - int flags = info->flags; int ret = 0; u32 activate; struct fb_var_screeninfo old_var; @@ -1052,9 +1051,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) event.data = &mode; fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event); - if (flags & FBINFO_MISC_USEREVENT) - fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL); - return 0; } EXPORT_SYMBOL(fb_set_var); @@ -1105,9 +1101,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, return -EFAULT; console_lock(); lock_fb_info(info); - info->flags |= FBINFO_MISC_USEREVENT; ret = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!ret) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); unlock_fb_info(info); console_unlock(); if (!ret && copy_to_user(argp, &var, sizeof(var))) diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c index d54c88f88991..65dae05fff8e 100644 --- a/drivers/video/fbdev/core/fbsysfs.c +++ b/drivers/video/fbdev/core/fbsysfs.c @@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var) var->activate |= FB_ACTIVATE_FORCE; console_lock(); - fb_info->flags |= FBINFO_MISC_USEREVENT; err = fb_set_var(fb_info, var); - fb_info->flags &= ~FBINFO_MISC_USEREVENT; + if (!err) + fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL); console_unlock(); if (err) return err; diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c index 9df78fb77267..203c254f8f6c 100644 --- a/drivers/video/fbdev/ps3fb.c +++ b/drivers/video/fbdev/ps3fb.c @@ -29,6 +29,7 @@ #include <linux/freezer.h> #include <linux/uaccess.h> #include <linux/fb.h> +#include <linux/fbcon.h> #include <linux/init.h> #include <asm/cell-regs.h> @@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd, var = info->var; fb_videomode_to_var(&var, vmode); console_lock(); - info->flags |= FBINFO_MISC_USEREVENT; /* Force, in case only special bits changed */ var.activate |= FB_ACTIVATE_FORCE; par->new_mode_id = val; retval = fb_set_var(info, &var); - info->flags &= ~FBINFO_MISC_USEREVENT; + if (!retval) + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL); console_unlock(); } break; diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c index 8e06ba912d60..09425ec317ba 100644 --- a/drivers/video/fbdev/ssd1307fb.c +++ b/drivers/video/fbdev/ssd1307fb.c @@ -312,7 +312,7 @@ static int ssd1307fb_init(struct ssd1307fb_par *par) /* Enable the PWM */ pwm_enable(par->pwm); - dev_dbg(&par->client->dev, "Using PWM%d with a %dns period.\n", + dev_dbg(&par->client->dev, "Using PWM%d with a %lluns period.\n", par->pwm->pwm, pwm_get_period(par->pwm)); } diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 8be02f333b7a..31cc97f2f515 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -398,12 +398,9 @@ static inline s64 towards_target(struct virtio_balloon *vb) s64 target; u32 num_pages; - virtio_cread(vb->vdev, struct virtio_balloon_config, num_pages, - &num_pages); - /* Legacy balloon config space is LE, unlike all other devices. */ - if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) - num_pages = le32_to_cpu((__force __le32)num_pages); + virtio_cread_le(vb->vdev, struct virtio_balloon_config, num_pages, + &num_pages); target = num_pages; return target - vb->num_pages; @@ -462,11 +459,8 @@ static void update_balloon_size(struct virtio_balloon *vb) u32 actual = vb->num_pages; /* Legacy balloon config space is LE, unlike all other devices. */ - if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) - actual = (__force u32)cpu_to_le32(actual); - - virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual, - &actual); + virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, actual, + &actual); } static void update_balloon_stats_func(struct work_struct *work) @@ -579,12 +573,10 @@ static u32 virtio_balloon_cmd_id_received(struct virtio_balloon *vb) { if (test_and_clear_bit(VIRTIO_BALLOON_CONFIG_READ_CMD_ID, &vb->config_read_bitmap)) { - virtio_cread(vb->vdev, struct virtio_balloon_config, - free_page_hint_cmd_id, - &vb->cmd_id_received_cache); /* Legacy balloon config space is LE, unlike all other devices. */ - if (!virtio_has_feature(vb->vdev, VIRTIO_F_VERSION_1)) - vb->cmd_id_received_cache = le32_to_cpu((__force __le32)vb->cmd_id_received_cache); + virtio_cread_le(vb->vdev, struct virtio_balloon_config, + free_page_hint_cmd_id, + &vb->cmd_id_received_cache); } return vb->cmd_id_received_cache; @@ -600,7 +592,7 @@ static int send_cmd_id_start(struct virtio_balloon *vb) while (virtqueue_get_buf(vq, &unused)) ; - vb->cmd_id_active = virtio32_to_cpu(vb->vdev, + vb->cmd_id_active = cpu_to_virtio32(vb->vdev, virtio_balloon_cmd_id_received(vb)); sg_init_one(&sg, &vb->cmd_id_active, sizeof(vb->cmd_id_active)); err = virtqueue_add_outbuf(vq, &sg, 1, &vb->cmd_id_active, GFP_KERNEL); @@ -987,8 +979,8 @@ static int virtballoon_probe(struct virtio_device *vdev) if (!want_init_on_free()) memset(&poison_val, PAGE_POISON, sizeof(poison_val)); - virtio_cwrite(vb->vdev, struct virtio_balloon_config, - poison_val, &poison_val); + virtio_cwrite_le(vb->vdev, struct virtio_balloon_config, + poison_val, &poison_val); } vb->pr_dev_info.report = virtballoon_free_page_report; @@ -1129,7 +1121,7 @@ static int virtballoon_validate(struct virtio_device *vdev) else if (!virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_REPORTING); - __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); + __virtio_clear_bit(vdev, VIRTIO_F_ACCESS_PLATFORM); return 0; } diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c index efaf65b0f42d..877b2ea3ed05 100644 --- a/drivers/virtio/virtio_input.c +++ b/drivers/virtio/virtio_input.c @@ -113,9 +113,9 @@ static u8 virtinput_cfg_select(struct virtio_input *vi, { u8 size; - virtio_cwrite(vi->vdev, struct virtio_input_config, select, &select); - virtio_cwrite(vi->vdev, struct virtio_input_config, subsel, &subsel); - virtio_cread(vi->vdev, struct virtio_input_config, size, &size); + virtio_cwrite_le(vi->vdev, struct virtio_input_config, select, &select); + virtio_cwrite_le(vi->vdev, struct virtio_input_config, subsel, &subsel); + virtio_cread_le(vi->vdev, struct virtio_input_config, size, &size); return size; } @@ -158,11 +158,11 @@ static void virtinput_cfg_abs(struct virtio_input *vi, int abs) u32 mi, ma, re, fu, fl; virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ABS_INFO, abs); - virtio_cread(vi->vdev, struct virtio_input_config, u.abs.min, &mi); - virtio_cread(vi->vdev, struct virtio_input_config, u.abs.max, &ma); - virtio_cread(vi->vdev, struct virtio_input_config, u.abs.res, &re); - virtio_cread(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu); - virtio_cread(vi->vdev, struct virtio_input_config, u.abs.flat, &fl); + virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.min, &mi); + virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.max, &ma); + virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.res, &re); + virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.fuzz, &fu); + virtio_cread_le(vi->vdev, struct virtio_input_config, u.abs.flat, &fl); input_set_abs_params(vi->idev, abs, mi, ma, fu, fl); input_abs_set_res(vi->idev, abs, re); } @@ -244,14 +244,14 @@ static int virtinput_probe(struct virtio_device *vdev) size = virtinput_cfg_select(vi, VIRTIO_INPUT_CFG_ID_DEVIDS, 0); if (size >= sizeof(struct virtio_input_devids)) { - virtio_cread(vi->vdev, struct virtio_input_config, - u.ids.bustype, &vi->idev->id.bustype); - virtio_cread(vi->vdev, struct virtio_input_config, - u.ids.vendor, &vi->idev->id.vendor); - virtio_cread(vi->vdev, struct virtio_input_config, - u.ids.product, &vi->idev->id.product); - virtio_cread(vi->vdev, struct virtio_input_config, - u.ids.version, &vi->idev->id.version); + virtio_cread_le(vi->vdev, struct virtio_input_config, + u.ids.bustype, &vi->idev->id.bustype); + virtio_cread_le(vi->vdev, struct virtio_input_config, + u.ids.vendor, &vi->idev->id.vendor); + virtio_cread_le(vi->vdev, struct virtio_input_config, + u.ids.product, &vi->idev->id.product); + virtio_cread_le(vi->vdev, struct virtio_input_config, + u.ids.version, &vi->idev->id.version); } else { vi->idev->id.bustype = BUS_VIRTUAL; } diff --git a/drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c index f26f5f64ae82..c08512fcea90 100644 --- a/drivers/virtio/virtio_mem.c +++ b/drivers/virtio/virtio_mem.c @@ -1530,21 +1530,21 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm) uint64_t new_plugged_size, usable_region_size, end_addr; /* the plugged_size is just a reflection of what _we_ did previously */ - virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size, - &new_plugged_size); + virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, + &new_plugged_size); if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size)) vm->plugged_size = new_plugged_size; /* calculate the last usable memory block id */ - virtio_cread(vm->vdev, struct virtio_mem_config, - usable_region_size, &usable_region_size); + virtio_cread_le(vm->vdev, struct virtio_mem_config, + usable_region_size, &usable_region_size); end_addr = vm->addr + usable_region_size; end_addr = min(end_addr, phys_limit); vm->last_usable_mb_id = virtio_mem_phys_to_mb_id(end_addr) - 1; /* see if there is a request to change the size */ - virtio_cread(vm->vdev, struct virtio_mem_config, requested_size, - &vm->requested_size); + virtio_cread_le(vm->vdev, struct virtio_mem_config, requested_size, + &vm->requested_size); dev_info(&vm->vdev->dev, "plugged size: 0x%llx", vm->plugged_size); dev_info(&vm->vdev->dev, "requested size: 0x%llx", vm->requested_size); @@ -1677,16 +1677,16 @@ static int virtio_mem_init(struct virtio_mem *vm) } /* Fetch all properties that can't change. */ - virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size, - &vm->plugged_size); - virtio_cread(vm->vdev, struct virtio_mem_config, block_size, - &vm->device_block_size); - virtio_cread(vm->vdev, struct virtio_mem_config, node_id, - &node_id); + virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size, + &vm->plugged_size); + virtio_cread_le(vm->vdev, struct virtio_mem_config, block_size, + &vm->device_block_size); + virtio_cread_le(vm->vdev, struct virtio_mem_config, node_id, + &node_id); vm->nid = virtio_mem_translate_node_id(vm, node_id); - virtio_cread(vm->vdev, struct virtio_mem_config, addr, &vm->addr); - virtio_cread(vm->vdev, struct virtio_mem_config, region_size, - &vm->region_size); + virtio_cread_le(vm->vdev, struct virtio_mem_config, addr, &vm->addr); + virtio_cread_le(vm->vdev, struct virtio_mem_config, region_size, + &vm->region_size); /* * We always hotplug memory in memory block granularity. This way, diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index db93cedd262f..3e14e700b231 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -27,16 +27,16 @@ * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses * for 16-bit fields and 8-bit accesses for 8-bit fields. */ -static inline u8 vp_ioread8(u8 __iomem *addr) +static inline u8 vp_ioread8(const u8 __iomem *addr) { return ioread8(addr); } -static inline u16 vp_ioread16 (__le16 __iomem *addr) +static inline u16 vp_ioread16 (const __le16 __iomem *addr) { return ioread16(addr); } -static inline u32 vp_ioread32(__le32 __iomem *addr) +static inline u32 vp_ioread32(const __le32 __iomem *addr) { return ioread32(addr); } @@ -481,6 +481,7 @@ static const struct virtio_config_ops virtio_pci_config_ops = { * @dev: the pci device * @cfg_type: the VIRTIO_PCI_CAP_* value we seek * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. + * @bars: the bitmask of BARs * * Returns offset of the capability, or 0. */ diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index a2de775801af..becc77697960 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -240,7 +240,7 @@ static inline bool virtqueue_use_indirect(struct virtqueue *_vq, static bool vring_use_dma_api(struct virtio_device *vdev) { - if (!virtio_has_iommu_quirk(vdev)) + if (!virtio_has_dma_quirk(vdev)) return true; /* Otherwise, we are left to guess. */ @@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) { struct vring_virtqueue *vq = to_vvq(_vq); + if (unlikely(vq->broken)) + return false; + virtio_mb(vq->weak_barriers); return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) : virtqueue_poll_split(_vq, last_used_idx); @@ -2225,7 +2228,7 @@ void vring_transport_features(struct virtio_device *vdev) break; case VIRTIO_F_VERSION_1: break; - case VIRTIO_F_IOMMU_PLATFORM: + case VIRTIO_F_ACCESS_PLATFORM: break; case VIRTIO_F_RING_PACKED: break; diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c index c30eb55030be..4a9ddb44b2a7 100644 --- a/drivers/virtio/virtio_vdpa.c +++ b/drivers/virtio/virtio_vdpa.c @@ -57,9 +57,8 @@ static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset, void *buf, unsigned len) { struct vdpa_device *vdpa = vd_get_vdpa(vdev); - const struct vdpa_config_ops *ops = vdpa->config; - ops->get_config(vdpa, offset, buf, len); + vdpa_get_config(vdpa, offset, buf, len); } static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset, @@ -101,9 +100,8 @@ static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status) static void virtio_vdpa_reset(struct virtio_device *vdev) { struct vdpa_device *vdpa = vd_get_vdpa(vdev); - const struct vdpa_config_ops *ops = vdpa->config; - return ops->set_status(vdpa, 0); + vdpa_reset(vdpa); } static bool virtio_vdpa_notify(struct virtqueue *vq) @@ -294,12 +292,11 @@ static u64 virtio_vdpa_get_features(struct virtio_device *vdev) static int virtio_vdpa_finalize_features(struct virtio_device *vdev) { struct vdpa_device *vdpa = vd_get_vdpa(vdev); - const struct vdpa_config_ops *ops = vdpa->config; /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - return ops->set_features(vdpa, vdev->features); + return vdpa_set_features(vdpa, vdev->features); } static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 4f4687c46d38..ab7aad5a1e69 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1027,7 +1027,7 @@ config ADVANTECH_WDT If you are configuring a Linux kernel for the Advantech single-board computer, say `Y' here to support its built-in watchdog timer feature. More information can be found at - <http://www.advantech.com.tw/products/> + <https://www.advantech.com.tw/products/> config ALIM1535_WDT tristate "ALi M1535 PMU Watchdog Timer" diff --git a/drivers/watchdog/advantechwdt.c b/drivers/watchdog/advantechwdt.c index 0e4c18a2aa42..554fe85da50e 100644 --- a/drivers/watchdog/advantechwdt.c +++ b/drivers/watchdog/advantechwdt.c @@ -177,7 +177,7 @@ static long advwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (advwdt_set_heartbeat(new_timeout)) return -EINVAL; advwdt_ping(); - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/alim1535_wdt.c b/drivers/watchdog/alim1535_wdt.c index 42338c7d4540..bfb9a91ca1df 100644 --- a/drivers/watchdog/alim1535_wdt.c +++ b/drivers/watchdog/alim1535_wdt.c @@ -220,7 +220,7 @@ static long ali_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; ali_keepalive(); } - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c index 5af0358f4390..4ff7f5afb7aa 100644 --- a/drivers/watchdog/alim7101_wdt.c +++ b/drivers/watchdog/alim7101_wdt.c @@ -279,7 +279,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) timeout = new_timeout; wdt_keepalive(); } - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c index c087027ffd5d..ff37dc91057d 100644 --- a/drivers/watchdog/ar7_wdt.c +++ b/drivers/watchdog/ar7_wdt.c @@ -235,8 +235,7 @@ static long ar7_wdt_ioctl(struct file *file, ar7_wdt_update_margin(new_margin); ar7_wdt_kick(1); spin_unlock(&wdt_lock); - /* Fall through */ - + fallthrough; case WDIOC_GETTIMEOUT: if (put_user(margin, (int *)arg)) return -EFAULT; diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c index d6dff97c280b..0f18f06a21b6 100644 --- a/drivers/watchdog/ath79_wdt.c +++ b/drivers/watchdog/ath79_wdt.c @@ -215,8 +215,8 @@ static long ath79_wdt_ioctl(struct file *file, unsigned int cmd, err = ath79_wdt_set_timeout(t); if (err) break; + fallthrough; - /* fallthrough */ case WDIOC_GETTIMEOUT: err = put_user(timeout, p); break; diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c index eb850a8d19df..8237c4e9c2a0 100644 --- a/drivers/watchdog/bcm_kona_wdt.c +++ b/drivers/watchdog/bcm_kona_wdt.c @@ -279,7 +279,7 @@ static int bcm_kona_wdt_probe(struct platform_device *pdev) wdt->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(wdt->base)) - return -ENODEV; + return PTR_ERR(wdt->base); wdt->resolution = SECWDOG_DEFAULT_RESOLUTION; ret = bcm_kona_wdt_set_resolution_reg(wdt); diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c index 9d09bbfdef20..7817fb976f9c 100644 --- a/drivers/watchdog/booke_wdt.c +++ b/drivers/watchdog/booke_wdt.c @@ -39,6 +39,11 @@ static bool booke_wdt_enabled; module_param(booke_wdt_enabled, bool, 0); static int booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT; module_param(booke_wdt_period, int, 0); +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #ifdef CONFIG_PPC_FSL_BOOK3E @@ -215,7 +220,6 @@ static void __exit booke_wdt_exit(void) static int __init booke_wdt_init(void) { int ret = 0; - bool nowayout = WATCHDOG_NOWAYOUT; pr_info("powerpc book-e watchdog driver loaded\n"); booke_wdt_info.firmware_version = cur_cpu_spec->pvr_value; diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index fba21de2bbad..32d0e1781e63 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2010-2011 Picochip Ltd., Jamie Iles - * http://www.picochip.com + * https://www.picochip.com * * This file implements a driver for the Synopsys DesignWare watchdog device * in the many subsystems. The watchdog has 16 different timeout periods @@ -13,6 +13,8 @@ */ #include <linux/bitops.h> +#include <linux/limits.h> +#include <linux/kernel.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> @@ -20,11 +22,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> +#include <linux/interrupt.h> #include <linux/of.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/watchdog.h> +#include <linux/debugfs.h> #define WDOG_CONTROL_REG_OFFSET 0x00 #define WDOG_CONTROL_REG_WDT_EN_MASK 0x01 @@ -34,26 +38,64 @@ #define WDOG_CURRENT_COUNT_REG_OFFSET 0x08 #define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c #define WDOG_COUNTER_RESTART_KICK_VALUE 0x76 - -/* The maximum TOP (timeout period) value that can be set in the watchdog. */ -#define DW_WDT_MAX_TOP 15 +#define WDOG_INTERRUPT_STATUS_REG_OFFSET 0x10 +#define WDOG_INTERRUPT_CLEAR_REG_OFFSET 0x14 +#define WDOG_COMP_PARAMS_5_REG_OFFSET 0xe4 +#define WDOG_COMP_PARAMS_4_REG_OFFSET 0xe8 +#define WDOG_COMP_PARAMS_3_REG_OFFSET 0xec +#define WDOG_COMP_PARAMS_2_REG_OFFSET 0xf0 +#define WDOG_COMP_PARAMS_1_REG_OFFSET 0xf4 +#define WDOG_COMP_PARAMS_1_USE_FIX_TOP BIT(6) +#define WDOG_COMP_VERSION_REG_OFFSET 0xf8 +#define WDOG_COMP_TYPE_REG_OFFSET 0xfc + +/* There are sixteen TOPs (timeout periods) that can be set in the watchdog. */ +#define DW_WDT_NUM_TOPS 16 +#define DW_WDT_FIX_TOP(_idx) (1U << (16 + _idx)) #define DW_WDT_DEFAULT_SECONDS 30 +static const u32 dw_wdt_fix_tops[DW_WDT_NUM_TOPS] = { + DW_WDT_FIX_TOP(0), DW_WDT_FIX_TOP(1), DW_WDT_FIX_TOP(2), + DW_WDT_FIX_TOP(3), DW_WDT_FIX_TOP(4), DW_WDT_FIX_TOP(5), + DW_WDT_FIX_TOP(6), DW_WDT_FIX_TOP(7), DW_WDT_FIX_TOP(8), + DW_WDT_FIX_TOP(9), DW_WDT_FIX_TOP(10), DW_WDT_FIX_TOP(11), + DW_WDT_FIX_TOP(12), DW_WDT_FIX_TOP(13), DW_WDT_FIX_TOP(14), + DW_WDT_FIX_TOP(15) +}; + static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); +enum dw_wdt_rmod { + DW_WDT_RMOD_RESET = 1, + DW_WDT_RMOD_IRQ = 2 +}; + +struct dw_wdt_timeout { + u32 top_val; + unsigned int sec; + unsigned int msec; +}; + struct dw_wdt { void __iomem *regs; struct clk *clk; + struct clk *pclk; unsigned long rate; + enum dw_wdt_rmod rmod; + struct dw_wdt_timeout timeouts[DW_WDT_NUM_TOPS]; struct watchdog_device wdd; struct reset_control *rst; /* Save/restore */ u32 control; u32 timeout; + +#ifdef CONFIG_DEBUG_FS + struct dentry *dbgfs_dir; +#endif }; #define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd) @@ -64,20 +106,84 @@ static inline int dw_wdt_is_enabled(struct dw_wdt *dw_wdt) WDOG_CONTROL_REG_WDT_EN_MASK; } -static inline int dw_wdt_top_in_seconds(struct dw_wdt *dw_wdt, unsigned top) +static void dw_wdt_update_mode(struct dw_wdt *dw_wdt, enum dw_wdt_rmod rmod) { + u32 val; + + val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); + if (rmod == DW_WDT_RMOD_IRQ) + val |= WDOG_CONTROL_REG_RESP_MODE_MASK; + else + val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK; + writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); + + dw_wdt->rmod = rmod; +} + +static unsigned int dw_wdt_find_best_top(struct dw_wdt *dw_wdt, + unsigned int timeout, u32 *top_val) +{ + int idx; + /* - * There are 16 possible timeout values in 0..15 where the number of - * cycles is 2 ^ (16 + i) and the watchdog counts down. + * Find a TOP with timeout greater or equal to the requested number. + * Note we'll select a TOP with maximum timeout if the requested + * timeout couldn't be reached. */ - return (1U << (16 + top)) / dw_wdt->rate; + for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) { + if (dw_wdt->timeouts[idx].sec >= timeout) + break; + } + + if (idx == DW_WDT_NUM_TOPS) + --idx; + + *top_val = dw_wdt->timeouts[idx].top_val; + + return dw_wdt->timeouts[idx].sec; +} + +static unsigned int dw_wdt_get_min_timeout(struct dw_wdt *dw_wdt) +{ + int idx; + + /* + * We'll find a timeout greater or equal to one second anyway because + * the driver probe would have failed if there was none. + */ + for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) { + if (dw_wdt->timeouts[idx].sec) + break; + } + + return dw_wdt->timeouts[idx].sec; } -static int dw_wdt_get_top(struct dw_wdt *dw_wdt) +static unsigned int dw_wdt_get_max_timeout_ms(struct dw_wdt *dw_wdt) { - int top = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF; + struct dw_wdt_timeout *timeout = &dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1]; + u64 msec; + + msec = (u64)timeout->sec * MSEC_PER_SEC + timeout->msec; - return dw_wdt_top_in_seconds(dw_wdt, top); + return msec < UINT_MAX ? msec : UINT_MAX; +} + +static unsigned int dw_wdt_get_timeout(struct dw_wdt *dw_wdt) +{ + int top_val = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET) & 0xF; + int idx; + + for (idx = 0; idx < DW_WDT_NUM_TOPS; ++idx) { + if (dw_wdt->timeouts[idx].top_val == top_val) + break; + } + + /* + * In IRQ mode due to the two stages counter, the actual timeout is + * twice greater than the TOP setting. + */ + return dw_wdt->timeouts[idx].sec * dw_wdt->rmod; } static int dw_wdt_ping(struct watchdog_device *wdd) @@ -93,17 +199,23 @@ static int dw_wdt_ping(struct watchdog_device *wdd) static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s) { struct dw_wdt *dw_wdt = to_dw_wdt(wdd); - int i, top_val = DW_WDT_MAX_TOP; + unsigned int timeout; + u32 top_val; /* - * Iterate over the timeout values until we find the closest match. We - * always look for >=. + * Note IRQ mode being enabled means having a non-zero pre-timeout + * setup. In this case we try to find a TOP as close to the half of the + * requested timeout as possible since DW Watchdog IRQ mode is designed + * in two stages way - first timeout rises the pre-timeout interrupt, + * second timeout performs the system reset. So basically the effective + * watchdog-caused reset happens after two watchdog TOPs elapsed. */ - for (i = 0; i <= DW_WDT_MAX_TOP; ++i) - if (dw_wdt_top_in_seconds(dw_wdt, i) >= top_s) { - top_val = i; - break; - } + timeout = dw_wdt_find_best_top(dw_wdt, DIV_ROUND_UP(top_s, dw_wdt->rmod), + &top_val); + if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) + wdd->pretimeout = timeout; + else + wdd->pretimeout = 0; /* * Set the new value in the watchdog. Some versions of dw_wdt @@ -114,25 +226,47 @@ static int dw_wdt_set_timeout(struct watchdog_device *wdd, unsigned int top_s) writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); + /* Kick new TOP value into the watchdog counter if activated. */ + if (watchdog_active(wdd)) + dw_wdt_ping(wdd); + /* * In case users set bigger timeout value than HW can support, * kernel(watchdog_dev.c) helps to feed watchdog before * wdd->max_hw_heartbeat_ms */ if (top_s * 1000 <= wdd->max_hw_heartbeat_ms) - wdd->timeout = dw_wdt_top_in_seconds(dw_wdt, top_val); + wdd->timeout = timeout * dw_wdt->rmod; else wdd->timeout = top_s; return 0; } +static int dw_wdt_set_pretimeout(struct watchdog_device *wdd, unsigned int req) +{ + struct dw_wdt *dw_wdt = to_dw_wdt(wdd); + + /* + * We ignore actual value of the timeout passed from user-space + * using it as a flag whether the pretimeout functionality is intended + * to be activated. + */ + dw_wdt_update_mode(dw_wdt, req ? DW_WDT_RMOD_IRQ : DW_WDT_RMOD_RESET); + dw_wdt_set_timeout(wdd, wdd->timeout); + + return 0; +} + static void dw_wdt_arm_system_reset(struct dw_wdt *dw_wdt) { u32 val = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); - /* Disable interrupt mode; always perform system reset. */ - val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK; + /* Disable/enable interrupt mode depending on the RMOD flag. */ + if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) + val |= WDOG_CONTROL_REG_RESP_MODE_MASK; + else + val &= ~WDOG_CONTROL_REG_RESP_MODE_MASK; /* Enable watchdog. */ val |= WDOG_CONTROL_REG_WDT_EN_MASK; writel(val, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); @@ -170,6 +304,7 @@ static int dw_wdt_restart(struct watchdog_device *wdd, struct dw_wdt *dw_wdt = to_dw_wdt(wdd); writel(0, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); + dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET); if (dw_wdt_is_enabled(dw_wdt)) writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt->regs + WDOG_COUNTER_RESTART_REG_OFFSET); @@ -185,9 +320,19 @@ static int dw_wdt_restart(struct watchdog_device *wdd, static unsigned int dw_wdt_get_timeleft(struct watchdog_device *wdd) { struct dw_wdt *dw_wdt = to_dw_wdt(wdd); + unsigned int sec; + u32 val; + + val = readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET); + sec = val / dw_wdt->rate; + + if (dw_wdt->rmod == DW_WDT_RMOD_IRQ) { + val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET); + if (!val) + sec += wdd->pretimeout; + } - return readl(dw_wdt->regs + WDOG_CURRENT_COUNT_REG_OFFSET) / - dw_wdt->rate; + return sec; } static const struct watchdog_info dw_wdt_ident = { @@ -196,16 +341,41 @@ static const struct watchdog_info dw_wdt_ident = { .identity = "Synopsys DesignWare Watchdog", }; +static const struct watchdog_info dw_wdt_pt_ident = { + .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | + WDIOF_PRETIMEOUT | WDIOF_MAGICCLOSE, + .identity = "Synopsys DesignWare Watchdog", +}; + static const struct watchdog_ops dw_wdt_ops = { .owner = THIS_MODULE, .start = dw_wdt_start, .stop = dw_wdt_stop, .ping = dw_wdt_ping, .set_timeout = dw_wdt_set_timeout, + .set_pretimeout = dw_wdt_set_pretimeout, .get_timeleft = dw_wdt_get_timeleft, .restart = dw_wdt_restart, }; +static irqreturn_t dw_wdt_irq(int irq, void *devid) +{ + struct dw_wdt *dw_wdt = devid; + u32 val; + + /* + * We don't clear the IRQ status. It's supposed to be done by the + * following ping operations. + */ + val = readl(dw_wdt->regs + WDOG_INTERRUPT_STATUS_REG_OFFSET); + if (!val) + return IRQ_NONE; + + watchdog_notify_pretimeout(&dw_wdt->wdd); + + return IRQ_HANDLED; +} + #ifdef CONFIG_PM_SLEEP static int dw_wdt_suspend(struct device *dev) { @@ -214,6 +384,7 @@ static int dw_wdt_suspend(struct device *dev) dw_wdt->control = readl(dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); dw_wdt->timeout = readl(dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); + clk_disable_unprepare(dw_wdt->pclk); clk_disable_unprepare(dw_wdt->clk); return 0; @@ -227,6 +398,12 @@ static int dw_wdt_resume(struct device *dev) if (err) return err; + err = clk_prepare_enable(dw_wdt->pclk); + if (err) { + clk_disable_unprepare(dw_wdt->clk); + return err; + } + writel(dw_wdt->timeout, dw_wdt->regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); writel(dw_wdt->control, dw_wdt->regs + WDOG_CONTROL_REG_OFFSET); @@ -238,6 +415,139 @@ static int dw_wdt_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(dw_wdt_pm_ops, dw_wdt_suspend, dw_wdt_resume); +/* + * In case if DW WDT IP core is synthesized with fixed TOP feature disabled the + * TOPs array can be arbitrary ordered with nearly any sixteen uint numbers + * depending on the system engineer imagination. The next method handles the + * passed TOPs array to pre-calculate the effective timeouts and to sort the + * TOP items out in the ascending order with respect to the timeouts. + */ + +static void dw_wdt_handle_tops(struct dw_wdt *dw_wdt, const u32 *tops) +{ + struct dw_wdt_timeout tout, *dst; + int val, tidx; + u64 msec; + + /* + * We walk over the passed TOPs array and calculate corresponding + * timeouts in seconds and milliseconds. The milliseconds granularity + * is needed to distinguish the TOPs with very close timeouts and to + * set the watchdog max heartbeat setting further. + */ + for (val = 0; val < DW_WDT_NUM_TOPS; ++val) { + tout.top_val = val; + tout.sec = tops[val] / dw_wdt->rate; + msec = (u64)tops[val] * MSEC_PER_SEC; + do_div(msec, dw_wdt->rate); + tout.msec = msec - ((u64)tout.sec * MSEC_PER_SEC); + + /* + * Find a suitable place for the current TOP in the timeouts + * array so that the list is remained in the ascending order. + */ + for (tidx = 0; tidx < val; ++tidx) { + dst = &dw_wdt->timeouts[tidx]; + if (tout.sec > dst->sec || (tout.sec == dst->sec && + tout.msec >= dst->msec)) + continue; + else + swap(*dst, tout); + } + + dw_wdt->timeouts[val] = tout; + } +} + +static int dw_wdt_init_timeouts(struct dw_wdt *dw_wdt, struct device *dev) +{ + u32 data, of_tops[DW_WDT_NUM_TOPS]; + const u32 *tops; + int ret; + + /* + * Retrieve custom or fixed counter values depending on the + * WDT_USE_FIX_TOP flag found in the component specific parameters + * #1 register. + */ + data = readl(dw_wdt->regs + WDOG_COMP_PARAMS_1_REG_OFFSET); + if (data & WDOG_COMP_PARAMS_1_USE_FIX_TOP) { + tops = dw_wdt_fix_tops; + } else { + ret = of_property_read_variable_u32_array(dev_of_node(dev), + "snps,watchdog-tops", of_tops, DW_WDT_NUM_TOPS, + DW_WDT_NUM_TOPS); + if (ret < 0) { + dev_warn(dev, "No valid TOPs array specified\n"); + tops = dw_wdt_fix_tops; + } else { + tops = of_tops; + } + } + + /* Convert the specified TOPs into an array of watchdog timeouts. */ + dw_wdt_handle_tops(dw_wdt, tops); + if (!dw_wdt->timeouts[DW_WDT_NUM_TOPS - 1].sec) { + dev_err(dev, "No any valid TOP detected\n"); + return -EINVAL; + } + + return 0; +} + +#ifdef CONFIG_DEBUG_FS + +#define DW_WDT_DBGFS_REG(_name, _off) \ +{ \ + .name = _name, \ + .offset = _off \ +} + +static const struct debugfs_reg32 dw_wdt_dbgfs_regs[] = { + DW_WDT_DBGFS_REG("cr", WDOG_CONTROL_REG_OFFSET), + DW_WDT_DBGFS_REG("torr", WDOG_TIMEOUT_RANGE_REG_OFFSET), + DW_WDT_DBGFS_REG("ccvr", WDOG_CURRENT_COUNT_REG_OFFSET), + DW_WDT_DBGFS_REG("crr", WDOG_COUNTER_RESTART_REG_OFFSET), + DW_WDT_DBGFS_REG("stat", WDOG_INTERRUPT_STATUS_REG_OFFSET), + DW_WDT_DBGFS_REG("param5", WDOG_COMP_PARAMS_5_REG_OFFSET), + DW_WDT_DBGFS_REG("param4", WDOG_COMP_PARAMS_4_REG_OFFSET), + DW_WDT_DBGFS_REG("param3", WDOG_COMP_PARAMS_3_REG_OFFSET), + DW_WDT_DBGFS_REG("param2", WDOG_COMP_PARAMS_2_REG_OFFSET), + DW_WDT_DBGFS_REG("param1", WDOG_COMP_PARAMS_1_REG_OFFSET), + DW_WDT_DBGFS_REG("version", WDOG_COMP_VERSION_REG_OFFSET), + DW_WDT_DBGFS_REG("type", WDOG_COMP_TYPE_REG_OFFSET) +}; + +static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) +{ + struct device *dev = dw_wdt->wdd.parent; + struct debugfs_regset32 *regset; + + regset = devm_kzalloc(dev, sizeof(*regset), GFP_KERNEL); + if (!regset) + return; + + regset->regs = dw_wdt_dbgfs_regs; + regset->nregs = ARRAY_SIZE(dw_wdt_dbgfs_regs); + regset->base = dw_wdt->regs; + + dw_wdt->dbgfs_dir = debugfs_create_dir(dev_name(dev), NULL); + + debugfs_create_regset32("registers", 0444, dw_wdt->dbgfs_dir, regset); +} + +static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) +{ + debugfs_remove_recursive(dw_wdt->dbgfs_dir); +} + +#else /* !CONFIG_DEBUG_FS */ + +static void dw_wdt_dbgfs_init(struct dw_wdt *dw_wdt) {} +static void dw_wdt_dbgfs_clear(struct dw_wdt *dw_wdt) {} + +#endif /* !CONFIG_DEBUG_FS */ + static int dw_wdt_drv_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -253,9 +563,18 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) if (IS_ERR(dw_wdt->regs)) return PTR_ERR(dw_wdt->regs); - dw_wdt->clk = devm_clk_get(dev, NULL); - if (IS_ERR(dw_wdt->clk)) - return PTR_ERR(dw_wdt->clk); + /* + * Try to request the watchdog dedicated timer clock source. It must + * be supplied if asynchronous mode is enabled. Otherwise fallback + * to the common timer/bus clocks configuration, in which the very + * first found clock supply both timer and APB signals. + */ + dw_wdt->clk = devm_clk_get(dev, "tclk"); + if (IS_ERR(dw_wdt->clk)) { + dw_wdt->clk = devm_clk_get(dev, NULL); + if (IS_ERR(dw_wdt->clk)) + return PTR_ERR(dw_wdt->clk); + } ret = clk_prepare_enable(dw_wdt->clk); if (ret) @@ -267,20 +586,64 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) goto out_disable_clk; } + /* + * Request APB clock if device is configured with async clocks mode. + * In this case both tclk and pclk clocks are supposed to be specified. + * Alas we can't know for sure whether async mode was really activated, + * so the pclk phandle reference is left optional. If it couldn't be + * found we consider the device configured in synchronous clocks mode. + */ + dw_wdt->pclk = devm_clk_get_optional(dev, "pclk"); + if (IS_ERR(dw_wdt->pclk)) { + ret = PTR_ERR(dw_wdt->pclk); + goto out_disable_clk; + } + + ret = clk_prepare_enable(dw_wdt->pclk); + if (ret) + goto out_disable_clk; + dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); if (IS_ERR(dw_wdt->rst)) { ret = PTR_ERR(dw_wdt->rst); - goto out_disable_clk; + goto out_disable_pclk; + } + + /* Enable normal reset without pre-timeout by default. */ + dw_wdt_update_mode(dw_wdt, DW_WDT_RMOD_RESET); + + /* + * Pre-timeout IRQ is optional, since some hardware may lack support + * of it. Note we must request rising-edge IRQ, since the lane is left + * pending either until the next watchdog kick event or up to the + * system reset. + */ + ret = platform_get_irq_optional(pdev, 0); + if (ret > 0) { + ret = devm_request_irq(dev, ret, dw_wdt_irq, + IRQF_SHARED | IRQF_TRIGGER_RISING, + pdev->name, dw_wdt); + if (ret) + goto out_disable_pclk; + + dw_wdt->wdd.info = &dw_wdt_pt_ident; + } else { + if (ret == -EPROBE_DEFER) + goto out_disable_pclk; + + dw_wdt->wdd.info = &dw_wdt_ident; } reset_control_deassert(dw_wdt->rst); + ret = dw_wdt_init_timeouts(dw_wdt, dev); + if (ret) + goto out_disable_clk; + wdd = &dw_wdt->wdd; - wdd->info = &dw_wdt_ident; wdd->ops = &dw_wdt_ops; - wdd->min_timeout = 1; - wdd->max_hw_heartbeat_ms = - dw_wdt_top_in_seconds(dw_wdt, DW_WDT_MAX_TOP) * 1000; + wdd->min_timeout = dw_wdt_get_min_timeout(dw_wdt); + wdd->max_hw_heartbeat_ms = dw_wdt_get_max_timeout_ms(dw_wdt); wdd->parent = dev; watchdog_set_drvdata(wdd, dw_wdt); @@ -293,7 +656,7 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) * devicetree. */ if (dw_wdt_is_enabled(dw_wdt)) { - wdd->timeout = dw_wdt_get_top(dw_wdt); + wdd->timeout = dw_wdt_get_timeout(dw_wdt); set_bit(WDOG_HW_RUNNING, &wdd->status); } else { wdd->timeout = DW_WDT_DEFAULT_SECONDS; @@ -306,10 +669,15 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) ret = watchdog_register_device(wdd); if (ret) - goto out_disable_clk; + goto out_disable_pclk; + + dw_wdt_dbgfs_init(dw_wdt); return 0; +out_disable_pclk: + clk_disable_unprepare(dw_wdt->pclk); + out_disable_clk: clk_disable_unprepare(dw_wdt->clk); return ret; @@ -319,8 +687,11 @@ static int dw_wdt_drv_remove(struct platform_device *pdev) { struct dw_wdt *dw_wdt = platform_get_drvdata(pdev); + dw_wdt_dbgfs_clear(dw_wdt); + watchdog_unregister_device(&dw_wdt->wdd); reset_control_assert(dw_wdt->rst); + clk_disable_unprepare(dw_wdt->pclk); clk_disable_unprepare(dw_wdt->clk); return 0; diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c index f5ffa7be066e..2418ebb707bd 100644 --- a/drivers/watchdog/eurotechwdt.c +++ b/drivers/watchdog/eurotechwdt.c @@ -286,7 +286,7 @@ static long eurwdt_ioctl(struct file *file, eurwdt_timeout = time; eurwdt_set_timeout(time); spin_unlock(&eurwdt_lock); - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(eurwdt_timeout, p); diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index a3c44d75d80e..f60beec1bbae 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -306,27 +306,6 @@ exit_unlock: return err; } -static int f71862fg_pin_configure(unsigned short ioaddr) -{ - /* When ioaddr is non-zero the calling function has to take care of - mutex handling and superio preparation! */ - - if (f71862fg_pin == 63) { - if (ioaddr) { - /* SPI must be disabled first to use this pin! */ - superio_clear_bit(ioaddr, SIO_REG_ROM_ADDR_SEL, 6); - superio_set_bit(ioaddr, SIO_REG_MFUNCT3, 4); - } - } else if (f71862fg_pin == 56) { - if (ioaddr) - superio_set_bit(ioaddr, SIO_REG_MFUNCT1, 1); - } else { - pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin); - return -EINVAL; - } - return 0; -} - static int watchdog_start(void) { int err; @@ -352,9 +331,13 @@ static int watchdog_start(void) break; case f71862fg: - err = f71862fg_pin_configure(watchdog.sioaddr); - if (err) - goto exit_superio; + if (f71862fg_pin == 63) { + /* SPI must be disabled first to use this pin! */ + superio_clear_bit(watchdog.sioaddr, SIO_REG_ROM_ADDR_SEL, 6); + superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT3, 4); + } else if (f71862fg_pin == 56) { + superio_set_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 1); + } break; case f71868: @@ -629,7 +612,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, if (new_options & WDIOS_ENABLECARD) return watchdog_start(); - /* fall through */ + fallthrough; case WDIOC_KEEPALIVE: watchdog_keepalive(); @@ -643,7 +626,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, return -EINVAL; watchdog_keepalive(); - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(watchdog.timeout, uarg.i); @@ -690,9 +673,9 @@ static int __init watchdog_init(int sioaddr) * into the module have been registered yet. */ watchdog.sioaddr = sioaddr; - watchdog.ident.options = WDIOC_SETTIMEOUT - | WDIOF_MAGICCLOSE - | WDIOF_KEEPALIVEPING; + watchdog.ident.options = WDIOF_MAGICCLOSE + | WDIOF_KEEPALIVEPING + | WDIOF_CARDRESET; snprintf(watchdog.ident.identity, sizeof(watchdog.ident.identity), "%s watchdog", @@ -706,6 +689,13 @@ static int __init watchdog_init(int sioaddr) wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF); watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS); + /* + * We don't want WDTMOUT_STS to stick around till regular reboot. + * Write 1 to the bit to clear it to zero. + */ + superio_outb(sioaddr, F71808FG_REG_WDT_CONF, + wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS)); + superio_exit(sioaddr); err = watchdog_set_timeout(timeout); @@ -803,7 +793,6 @@ static int __init f71808e_find(int sioaddr) break; case SIO_F71862_ID: watchdog.type = f71862fg; - err = f71862fg_pin_configure(0); /* validate module parameter */ break; case SIO_F71868_ID: watchdog.type = f71868; @@ -852,6 +841,11 @@ static int __init f71808e_init(void) int err = -ENODEV; int i; + if (f71862fg_pin != 63 && f71862fg_pin != 56) { + pr_err("Invalid argument f71862fg_pin=%d\n", f71862fg_pin); + return -EINVAL; + } + for (i = 0; i < ARRAY_SIZE(addrs); i++) { err = f71808e_find(addrs[i]); if (err == 0) diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c index f6541d1b65e3..df5406aa7d25 100644 --- a/drivers/watchdog/gef_wdt.c +++ b/drivers/watchdog/gef_wdt.c @@ -201,7 +201,7 @@ static long gef_wdt_ioctl(struct file *file, unsigned int cmd, if (get_user(timeout, (int __user *)argp)) return -EFAULT; gef_wdt_set_timeout(timeout); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: if (put_user(gef_wdt_timeout, (int __user *)argp)) diff --git a/drivers/watchdog/geodewdt.c b/drivers/watchdog/geodewdt.c index 9914a4283cb2..83418924e30a 100644 --- a/drivers/watchdog/geodewdt.c +++ b/drivers/watchdog/geodewdt.c @@ -185,7 +185,7 @@ static long geodewdt_ioctl(struct file *file, unsigned int cmd, if (geodewdt_set_heartbeat(interval)) return -EINVAL; - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); diff --git a/drivers/watchdog/ib700wdt.c b/drivers/watchdog/ib700wdt.c index 2b65ea9451d1..a0ddedc362fc 100644 --- a/drivers/watchdog/ib700wdt.c +++ b/drivers/watchdog/ib700wdt.c @@ -214,7 +214,7 @@ static long ibwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (ibwdt_set_heartbeat(new_margin)) return -EINVAL; ibwdt_ping(); - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); diff --git a/drivers/watchdog/it8712f_wdt.c b/drivers/watchdog/it8712f_wdt.c index 2fed40d14007..9b89d2f09568 100644 --- a/drivers/watchdog/it8712f_wdt.c +++ b/drivers/watchdog/it8712f_wdt.c @@ -303,7 +303,7 @@ static long it8712f_wdt_ioctl(struct file *file, unsigned int cmd, superio_exit(); it8712f_wdt_ping(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: if (put_user(margin, p)) return -EFAULT; diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c index 09886616fd21..aae29dcfaf11 100644 --- a/drivers/watchdog/ixp4xx_wdt.c +++ b/drivers/watchdog/ixp4xx_wdt.c @@ -136,7 +136,7 @@ static long ixp4xx_wdt_ioctl(struct file *file, unsigned int cmd, heartbeat = time; wdt_enable(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: ret = put_user(heartbeat, (int *)arg); diff --git a/drivers/watchdog/m54xx_wdt.c b/drivers/watchdog/m54xx_wdt.c index 60ed6252e5f4..f388a769dbd3 100644 --- a/drivers/watchdog/m54xx_wdt.c +++ b/drivers/watchdog/m54xx_wdt.c @@ -155,7 +155,7 @@ static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd, heartbeat = time; wdt_enable(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: ret = put_user(heartbeat, (int *)arg); diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index 80ff94688487..743377c5b173 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c @@ -171,7 +171,7 @@ static inline void zf_set_timer(unsigned short new, unsigned char n) switch (n) { case WD1: zf_writew(COUNTER_1, new); - /* fall through */ + fallthrough; case WD2: zf_writeb(COUNTER_2, new > 0xff ? 0xff : new); default: diff --git a/drivers/watchdog/mlx_wdt.c b/drivers/watchdog/mlx_wdt.c index 03b9ac4b99af..54193369e85c 100644 --- a/drivers/watchdog/mlx_wdt.c +++ b/drivers/watchdog/mlx_wdt.c @@ -21,6 +21,7 @@ #define MLXREG_WDT_CLOCK_SCALE 1000 #define MLXREG_WDT_MAX_TIMEOUT_TYPE1 32 #define MLXREG_WDT_MAX_TIMEOUT_TYPE2 255 +#define MLXREG_WDT_MAX_TIMEOUT_TYPE3 65535 #define MLXREG_WDT_MIN_TIMEOUT 1 #define MLXREG_WDT_OPTIONS_BASE (WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE | \ WDIOF_SETTIMEOUT) @@ -49,6 +50,7 @@ struct mlxreg_wdt { int tleft_idx; int ping_idx; int reset_idx; + int regmap_val_sz; enum mlxreg_wdt_type wdt_type; }; @@ -111,7 +113,8 @@ static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd, u32 regval, set_time, hw_timeout; int rc; - if (wdt->wdt_type == MLX_WDT_TYPE1) { + switch (wdt->wdt_type) { + case MLX_WDT_TYPE1: rc = regmap_read(wdt->regmap, reg_data->reg, ®val); if (rc) return rc; @@ -120,14 +123,32 @@ static int mlxreg_wdt_set_timeout(struct watchdog_device *wdd, regval = (regval & reg_data->mask) | hw_timeout; /* Rowndown to actual closest number of sec. */ set_time = BIT(hw_timeout) / MLXREG_WDT_CLOCK_SCALE; - } else { + rc = regmap_write(wdt->regmap, reg_data->reg, regval); + break; + case MLX_WDT_TYPE2: + set_time = timeout; + rc = regmap_write(wdt->regmap, reg_data->reg, timeout); + break; + case MLX_WDT_TYPE3: + /* WD_TYPE3 has 2B set time register */ set_time = timeout; - regval = timeout; + if (wdt->regmap_val_sz == 1) { + regval = timeout & 0xff; + rc = regmap_write(wdt->regmap, reg_data->reg, regval); + if (!rc) { + regval = (timeout & 0xff00) >> 8; + rc = regmap_write(wdt->regmap, + reg_data->reg + 1, regval); + } + } else { + rc = regmap_write(wdt->regmap, reg_data->reg, timeout); + } + break; + default: + return -EINVAL; } wdd->timeout = set_time; - rc = regmap_write(wdt->regmap, reg_data->reg, regval); - if (!rc) { /* * Restart watchdog with new timeout period @@ -147,10 +168,25 @@ static unsigned int mlxreg_wdt_get_timeleft(struct watchdog_device *wdd) { struct mlxreg_wdt *wdt = watchdog_get_drvdata(wdd); struct mlxreg_core_data *reg_data = &wdt->pdata->data[wdt->tleft_idx]; - u32 regval; + u32 regval, msb, lsb; int rc; - rc = regmap_read(wdt->regmap, reg_data->reg, ®val); + if (wdt->wdt_type == MLX_WDT_TYPE2) { + rc = regmap_read(wdt->regmap, reg_data->reg, ®val); + } else { + /* WD_TYPE3 has 2 byte timeleft register */ + if (wdt->regmap_val_sz == 1) { + rc = regmap_read(wdt->regmap, reg_data->reg, &lsb); + if (!rc) { + rc = regmap_read(wdt->regmap, + reg_data->reg + 1, &msb); + regval = (msb & 0xff) << 8 | (lsb & 0xff); + } + } else { + rc = regmap_read(wdt->regmap, reg_data->reg, ®val); + } + } + /* Return 0 timeleft in case of failure register read. */ return rc == 0 ? regval : 0; } @@ -212,13 +248,23 @@ static void mlxreg_wdt_config(struct mlxreg_wdt *wdt, wdt->wdd.info = &mlxreg_wdt_aux_info; wdt->wdt_type = pdata->version; - if (wdt->wdt_type == MLX_WDT_TYPE2) { - wdt->wdd.ops = &mlxreg_wdt_ops_type2; - wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2; - } else { + switch (wdt->wdt_type) { + case MLX_WDT_TYPE1: wdt->wdd.ops = &mlxreg_wdt_ops_type1; wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE1; + break; + case MLX_WDT_TYPE2: + wdt->wdd.ops = &mlxreg_wdt_ops_type2; + wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE2; + break; + case MLX_WDT_TYPE3: + wdt->wdd.ops = &mlxreg_wdt_ops_type2; + wdt->wdd.max_timeout = MLXREG_WDT_MAX_TIMEOUT_TYPE3; + break; + default: + break; } + wdt->wdd.min_timeout = MLXREG_WDT_MIN_TIMEOUT; } @@ -249,6 +295,11 @@ static int mlxreg_wdt_probe(struct platform_device *pdev) wdt->wdd.parent = dev; wdt->regmap = pdata->regmap; + rc = regmap_get_val_bytes(wdt->regmap); + if (rc < 0) + return -EINVAL; + + wdt->regmap_val_sz = rc; mlxreg_wdt_config(wdt, pdata); if ((pdata->features & MLXREG_CORE_WD_FEATURE_NOWAYOUT)) diff --git a/drivers/watchdog/mv64x60_wdt.c b/drivers/watchdog/mv64x60_wdt.c index 0bc72dd69b70..894aa63488d3 100644 --- a/drivers/watchdog/mv64x60_wdt.c +++ b/drivers/watchdog/mv64x60_wdt.c @@ -222,7 +222,7 @@ static long mv64x60_wdt_ioctl(struct file *file, if (get_user(timeout, (int __user *)argp)) return -EFAULT; mv64x60_wdt_set_timeout(timeout); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: if (put_user(mv64x60_wdt_timeout, (int __user *)argp)) diff --git a/drivers/watchdog/nv_tco.c b/drivers/watchdog/nv_tco.c index d7a560e348d5..f6902a337422 100644 --- a/drivers/watchdog/nv_tco.c +++ b/drivers/watchdog/nv_tco.c @@ -7,7 +7,7 @@ * Based off i8xx_tco.c: * (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights * Reserved. - * http://www.kernelconcepts.de + * https://www.kernelconcepts.de * * TCO timer driver for NV chipsets * based on softdog.c by Alan Cox <alan@redhat.com> @@ -250,7 +250,7 @@ static long nv_tco_ioctl(struct file *file, unsigned int cmd, if (tco_timer_set_heartbeat(new_heartbeat)) return -EINVAL; tco_timer_keepalive(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: diff --git a/drivers/watchdog/nv_tco.h b/drivers/watchdog/nv_tco.h index d325e528010f..c65f82588386 100644 --- a/drivers/watchdog/nv_tco.h +++ b/drivers/watchdog/nv_tco.h @@ -9,7 +9,7 @@ * * (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights * Reserved. - * http://www.kernelconcepts.de + * https://www.kernelconcepts.de * * Neither kernel concepts nor Nils Faerber admit liability nor provide * warranty for any of this software. This material is provided diff --git a/drivers/watchdog/pc87413_wdt.c b/drivers/watchdog/pc87413_wdt.c index 73fbfc99083b..2d4504302c9e 100644 --- a/drivers/watchdog/pc87413_wdt.c +++ b/drivers/watchdog/pc87413_wdt.c @@ -433,7 +433,7 @@ static long pc87413_ioctl(struct file *file, unsigned int cmd, return -EINVAL; timeout = new_timeout; pc87413_refresh(); - /* fall through - and return the new timeout... */ + fallthrough; /* and return the new timeout */ case WDIOC_GETTIMEOUT: new_timeout = timeout * 60; return put_user(new_timeout, uarg.i); diff --git a/drivers/watchdog/pcwd.c b/drivers/watchdog/pcwd.c index 7a0587fdc52c..e86fa7f8351d 100644 --- a/drivers/watchdog/pcwd.c +++ b/drivers/watchdog/pcwd.c @@ -651,7 +651,7 @@ static long pcwd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; pcwd_keepalive(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(heartbeat, argp); diff --git a/drivers/watchdog/pcwd_pci.c b/drivers/watchdog/pcwd_pci.c index 81508a42a90c..54d86fcb1837 100644 --- a/drivers/watchdog/pcwd_pci.c +++ b/drivers/watchdog/pcwd_pci.c @@ -542,7 +542,7 @@ static long pcipcwd_ioctl(struct file *file, unsigned int cmd, pcipcwd_keepalive(); } - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c index 2f44af1831d0..41a928eb91ed 100644 --- a/drivers/watchdog/pcwd_usb.c +++ b/drivers/watchdog/pcwd_usb.c @@ -452,7 +452,7 @@ static long usb_pcwd_ioctl(struct file *file, unsigned int cmd, usb_pcwd_keepalive(usb_pcwd_device); } - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); @@ -585,9 +585,8 @@ static struct notifier_block usb_pcwd_notifier = { static inline void usb_pcwd_delete(struct usb_pcwd_private *usb_pcwd) { usb_free_urb(usb_pcwd->intr_urb); - if (usb_pcwd->intr_buffer != NULL) - usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size, - usb_pcwd->intr_buffer, usb_pcwd->intr_dma); + usb_free_coherent(usb_pcwd->udev, usb_pcwd->intr_size, + usb_pcwd->intr_buffer, usb_pcwd->intr_dma); kfree(usb_pcwd); } diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c index aee3c2efd565..e74802f3a32e 100644 --- a/drivers/watchdog/rc32434_wdt.c +++ b/drivers/watchdog/rc32434_wdt.c @@ -230,7 +230,7 @@ static long rc32434_wdt_ioctl(struct file *file, unsigned int cmd, return -EFAULT; if (rc32434_wdt_set(new_timeout)) return -EINVAL; - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return copy_to_user(argp, &timeout, sizeof(int)) ? -EFAULT : 0; default: diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c index 1b9a6dc8f982..7008596a575f 100644 --- a/drivers/watchdog/riowd.c +++ b/drivers/watchdog/riowd.c @@ -134,7 +134,7 @@ static long riowd_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -EINVAL; riowd_timeout = (new_margin + 59) / 60; riowd_writereg(p, riowd_timeout, WDTO_INDEX); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(riowd_timeout * 60, (int __user *)argp); diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c index d456dd72d99a..705e8f7523e8 100644 --- a/drivers/watchdog/rti_wdt.c +++ b/drivers/watchdog/rti_wdt.c @@ -35,7 +35,11 @@ #define RTIWWDRX_NMI 0xa -#define RTIWWDSIZE_50P 0x50 +#define RTIWWDSIZE_50P 0x50 +#define RTIWWDSIZE_25P 0x500 +#define RTIWWDSIZE_12P5 0x5000 +#define RTIWWDSIZE_6P25 0x50000 +#define RTIWWDSIZE_3P125 0x500000 #define WDENABLE_KEY 0xa98559da @@ -48,7 +52,7 @@ #define DWDST BIT(1) -static int heartbeat; +static int heartbeat = DEFAULT_HEARTBEAT; /* * struct to hold data for each WDT device @@ -79,11 +83,9 @@ static int rti_wdt_start(struct watchdog_device *wdd) * be petted during the open window; not too early or not too late. * The HW configuration options only allow for the open window size * to be 50% or less than that; we obviouly want to configure the open - * window as large as possible so we select the 50% option. To avoid - * any glitches, we accommodate 5% safety margin also, so we setup - * the min_hw_hearbeat at 55% of the timeout period. + * window as large as possible so we select the 50% option. */ - wdd->min_hw_heartbeat_ms = 11 * wdd->timeout * 1000 / 20; + wdd->min_hw_heartbeat_ms = 500 * wdd->timeout; /* Generate NMI when wdt expires */ writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL); @@ -110,7 +112,48 @@ static int rti_wdt_ping(struct watchdog_device *wdd) return 0; } -static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd) +static int rti_wdt_setup_hw_hb(struct watchdog_device *wdd, u32 wsize) +{ + /* + * RTI only supports a windowed mode, where the watchdog can only + * be petted during the open window; not too early or not too late. + * The HW configuration options only allow for the open window size + * to be 50% or less than that. + */ + switch (wsize) { + case RTIWWDSIZE_50P: + /* 50% open window => 50% min heartbeat */ + wdd->min_hw_heartbeat_ms = 500 * heartbeat; + break; + + case RTIWWDSIZE_25P: + /* 25% open window => 75% min heartbeat */ + wdd->min_hw_heartbeat_ms = 750 * heartbeat; + break; + + case RTIWWDSIZE_12P5: + /* 12.5% open window => 87.5% min heartbeat */ + wdd->min_hw_heartbeat_ms = 875 * heartbeat; + break; + + case RTIWWDSIZE_6P25: + /* 6.5% open window => 93.5% min heartbeat */ + wdd->min_hw_heartbeat_ms = 935 * heartbeat; + break; + + case RTIWWDSIZE_3P125: + /* 3.125% open window => 96.9% min heartbeat */ + wdd->min_hw_heartbeat_ms = 969 * heartbeat; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static unsigned int rti_wdt_get_timeleft_ms(struct watchdog_device *wdd) { u64 timer_counter; u32 val; @@ -123,11 +166,18 @@ static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd) timer_counter = readl_relaxed(wdt->base + RTIDWDCNTR); + timer_counter *= 1000; + do_div(timer_counter, wdt->freq); return timer_counter; } +static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd) +{ + return rti_wdt_get_timeleft_ms(wdd) / 1000; +} + static const struct watchdog_info rti_wdt_info = { .options = WDIOF_KEEPALIVEPING, .identity = "K3 RTI Watchdog", @@ -148,6 +198,7 @@ static int rti_wdt_probe(struct platform_device *pdev) struct watchdog_device *wdd; struct rti_wdt_device *wdt; struct clk *clk; + u32 last_ping = 0; wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); if (!wdt) @@ -169,6 +220,14 @@ static int rti_wdt_probe(struct platform_device *pdev) return -EINVAL; } + /* + * If watchdog is running at 32k clock, it is not accurate. + * Adjust frequency down in this case so that we don't pet + * the watchdog too often. + */ + if (wdt->freq < 32768) + wdt->freq = wdt->freq * 9 / 10; + pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret) { @@ -185,11 +244,8 @@ static int rti_wdt_probe(struct platform_device *pdev) wdd->min_timeout = 1; wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) / wdt->freq * 1000; - wdd->timeout = DEFAULT_HEARTBEAT; wdd->parent = dev; - watchdog_init_timeout(wdd, heartbeat, dev); - watchdog_set_drvdata(wdd, wdt); watchdog_set_nowayout(wdd, 1); watchdog_set_restart_priority(wdd, 128); @@ -201,16 +257,53 @@ static int rti_wdt_probe(struct platform_device *pdev) goto err_iomap; } + if (readl(wdt->base + RTIDWDCTRL) == WDENABLE_KEY) { + u32 time_left_ms; + u64 heartbeat_ms; + u32 wsize; + + set_bit(WDOG_HW_RUNNING, &wdd->status); + time_left_ms = rti_wdt_get_timeleft_ms(wdd); + heartbeat_ms = readl(wdt->base + RTIDWDPRLD); + heartbeat_ms <<= WDT_PRELOAD_SHIFT; + heartbeat_ms *= 1000; + do_div(heartbeat_ms, wdt->freq); + if (heartbeat_ms != heartbeat * 1000) + dev_warn(dev, "watchdog already running, ignoring heartbeat config!\n"); + + heartbeat = heartbeat_ms; + heartbeat /= 1000; + + wsize = readl(wdt->base + RTIWWDSIZECTRL); + ret = rti_wdt_setup_hw_hb(wdd, wsize); + if (ret) { + dev_err(dev, "bad window size.\n"); + goto err_iomap; + } + + last_ping = heartbeat_ms - time_left_ms; + if (time_left_ms > heartbeat_ms) { + dev_warn(dev, "time_left > heartbeat? Assuming last ping just before now.\n"); + last_ping = 0; + } + } + + watchdog_init_timeout(wdd, heartbeat, dev); + ret = watchdog_register_device(wdd); if (ret) { dev_err(dev, "cannot register watchdog device\n"); goto err_iomap; } + if (last_ping) + watchdog_set_last_hw_keepalive(wdd, last_ping); + return 0; err_iomap: pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); return ret; } @@ -221,6 +314,7 @@ static int rti_wdt_remove(struct platform_device *pdev) watchdog_unregister_device(&wdt->wdd); pm_runtime_put(&pdev->dev); + pm_runtime_disable(&pdev->dev); return 0; } diff --git a/drivers/watchdog/sa1100_wdt.c b/drivers/watchdog/sa1100_wdt.c index 9b93be00109f..27846c6bdfb0 100644 --- a/drivers/watchdog/sa1100_wdt.c +++ b/drivers/watchdog/sa1100_wdt.c @@ -127,7 +127,7 @@ static long sa1100dog_ioctl(struct file *file, unsigned int cmd, pre_margin = oscr_freq * time; writel_relaxed(readl_relaxed(OSCR) + pre_margin, OSMR3); - /*fall through*/ + fallthrough; case WDIOC_GETTIMEOUT: ret = put_user(pre_margin / oscr_freq, p); diff --git a/drivers/watchdog/sb_wdog.c b/drivers/watchdog/sb_wdog.c index da2dad00d473..504be461f992 100644 --- a/drivers/watchdog/sb_wdog.c +++ b/drivers/watchdog/sb_wdog.c @@ -202,7 +202,7 @@ static long sbwdog_ioctl(struct file *file, unsigned int cmd, timeout = time; sbwdog_set(user_dog, timeout); sbwdog_pet(user_dog); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: /* diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c index f2cbe6d880a8..a947a63fb44a 100644 --- a/drivers/watchdog/sbc60xxwdt.c +++ b/drivers/watchdog/sbc60xxwdt.c @@ -265,7 +265,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) timeout = new_timeout; wdt_keepalive(); } - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/sbc7240_wdt.c b/drivers/watchdog/sbc7240_wdt.c index 520b8dd77ed4..d640b26e18a6 100644 --- a/drivers/watchdog/sbc7240_wdt.c +++ b/drivers/watchdog/sbc7240_wdt.c @@ -195,7 +195,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (wdt_set_timeout(new_timeout)) return -EINVAL; } - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, (int __user *)arg); default: diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index 1b20b33879c4..04483d6453d6 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c @@ -154,7 +154,7 @@ static long fitpc2_wdt_ioctl(struct file *file, unsigned int cmd, margin = time; wdt_enable(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: ret = put_user(margin, (int *)arg); diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c index fbe79bcc9297..e66e6b905964 100644 --- a/drivers/watchdog/sc520_wdt.c +++ b/drivers/watchdog/sc520_wdt.c @@ -321,7 +321,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) wdt_keepalive(); } - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 83949a385f62..d8b77fe10eba 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -295,7 +295,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd, if (sch311x_wdt_set_heartbeat(new_timeout)) return -EINVAL; sch311x_wdt_keepalive(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/scx200_wdt.c b/drivers/watchdog/scx200_wdt.c index c94098acb78f..7b5e18323f3f 100644 --- a/drivers/watchdog/scx200_wdt.c +++ b/drivers/watchdog/scx200_wdt.c @@ -186,7 +186,7 @@ static long scx200_wdt_ioctl(struct file *file, unsigned int cmd, margin = new_margin; scx200_wdt_update_margin(); scx200_wdt_ping(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: if (put_user(margin, p)) return -EFAULT; diff --git a/drivers/watchdog/smsc37b787_wdt.c b/drivers/watchdog/smsc37b787_wdt.c index 43de56acd767..7463df479d11 100644 --- a/drivers/watchdog/smsc37b787_wdt.c +++ b/drivers/watchdog/smsc37b787_wdt.c @@ -474,7 +474,7 @@ static long wb_smsc_wdt_ioctl(struct file *file, return -EINVAL; timeout = new_timeout; wb_smsc_wdt_set_timeout(timeout); - /* fall through - and return the new timeout... */ + fallthrough; /* and return the new timeout */ case WDIOC_GETTIMEOUT: new_timeout = timeout; if (unit == UNIT_MINUTE) diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c index 3e4885c1545e..7a1096265f18 100644 --- a/drivers/watchdog/softdog.c +++ b/drivers/watchdog/softdog.c @@ -20,11 +20,13 @@ #include <linux/hrtimer.h> #include <linux/init.h> #include <linux/kernel.h> +#include <linux/kthread.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/reboot.h> #include <linux/types.h> #include <linux/watchdog.h> +#include <linux/workqueue.h> #define TIMER_MARGIN 60 /* Default is 60 seconds */ static unsigned int soft_margin = TIMER_MARGIN; /* in seconds */ @@ -49,11 +51,34 @@ module_param(soft_panic, int, 0); MODULE_PARM_DESC(soft_panic, "Softdog action, set to 1 to panic, 0 to reboot (default=0)"); +static char *soft_reboot_cmd; +module_param(soft_reboot_cmd, charp, 0000); +MODULE_PARM_DESC(soft_reboot_cmd, + "Set reboot command. Emergency reboot takes place if unset"); + +static bool soft_active_on_boot; +module_param(soft_active_on_boot, bool, 0000); +MODULE_PARM_DESC(soft_active_on_boot, + "Set to true to active Softdog on boot (default=false)"); + static struct hrtimer softdog_ticktock; static struct hrtimer softdog_preticktock; +static int reboot_kthread_fn(void *data) +{ + kernel_restart(soft_reboot_cmd); + return -EPERM; /* Should not reach here */ +} + +static void reboot_work_fn(struct work_struct *unused) +{ + kthread_run(reboot_kthread_fn, NULL, "softdog_reboot"); +} + static enum hrtimer_restart softdog_fire(struct hrtimer *timer) { + static bool soft_reboot_fired; + module_put(THIS_MODULE); if (soft_noboot) { pr_crit("Triggered - Reboot ignored\n"); @@ -62,6 +87,33 @@ static enum hrtimer_restart softdog_fire(struct hrtimer *timer) panic("Software Watchdog Timer expired"); } else { pr_crit("Initiating system reboot\n"); + if (!soft_reboot_fired && soft_reboot_cmd != NULL) { + static DECLARE_WORK(reboot_work, reboot_work_fn); + /* + * The 'kernel_restart' is a 'might-sleep' operation. + * Also, executing it in system-wide workqueues blocks + * any driver from using the same workqueue in its + * shutdown callback function. Thus, we should execute + * the 'kernel_restart' in a standalone kernel thread. + * But since starting a kernel thread is also a + * 'might-sleep' operation, so the 'reboot_work' is + * required as a launcher of the kernel thread. + * + * After request the reboot, restart the timer to + * schedule an 'emergency_restart' reboot after + * 'TIMER_MARGIN' seconds. It's because if the softdog + * hangs, it might be because of scheduling issues. And + * if that is the case, both 'schedule_work' and + * 'kernel_restart' may possibly be malfunctional at the + * same time. + */ + soft_reboot_fired = true; + schedule_work(&reboot_work); + hrtimer_add_expires_ns(timer, + (u64)TIMER_MARGIN * NSEC_PER_SEC); + + return HRTIMER_RESTART; + } emergency_restart(); pr_crit("Reboot didn't ?????\n"); } @@ -145,12 +197,17 @@ static int __init softdog_init(void) softdog_preticktock.function = softdog_pretimeout; } + if (soft_active_on_boot) + softdog_ping(&softdog_dev); + ret = watchdog_register_device(&softdog_dev); if (ret) return ret; pr_info("initialized. soft_noboot=%d soft_margin=%d sec soft_panic=%d (nowayout=%d)\n", soft_noboot, softdog_dev.timeout, soft_panic, nowayout); + pr_info(" soft_reboot_cmd=%s soft_active_on_boot=%d\n", + soft_reboot_cmd ?: "<not set>", soft_active_on_boot); return 0; } diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 93bd302ae7c5..85e9664318c9 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -7,7 +7,7 @@ * Based on i8xx_tco.c: * (c) Copyright 2000 kernel concepts <nils@kernelconcepts.de>, All Rights * Reserved. - * http://www.kernelconcepts.de + * https://www.kernelconcepts.de * * See AMD Publication 43009 "AMD SB700/710/750 Register Reference Guide", * AMD Publication 45482 "AMD SB800-Series Southbridges Register diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c index 5f05a45ac187..b50757882a98 100644 --- a/drivers/watchdog/sunxi_wdt.c +++ b/drivers/watchdog/sunxi_wdt.c @@ -235,7 +235,7 @@ static int sunxi_wdt_probe(struct platform_device *pdev) sunxi_wdt = devm_kzalloc(dev, sizeof(*sunxi_wdt), GFP_KERNEL); if (!sunxi_wdt) - return -EINVAL; + return -ENOMEM; sunxi_wdt->wdt_regs = of_device_get_match_data(dev); if (!sunxi_wdt->wdt_regs) diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c index 6b3b667e6f23..5772cc5d3780 100644 --- a/drivers/watchdog/w83877f_wdt.c +++ b/drivers/watchdog/w83877f_wdt.c @@ -289,7 +289,7 @@ static long fop_ioctl(struct file *file, unsigned int cmd, unsigned long arg) timeout = new_timeout; wdt_keepalive(); } - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 5212e68c6b01..fd64ae77780a 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -422,7 +422,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; wdt_keepalive(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index a6925847f76f..a8a1ed215e1e 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -174,7 +174,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, timeout = new_timeout; wafwdt_stop(); wafwdt_start(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index b9dc2c352151..6798addabd5a 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -275,15 +275,18 @@ static int watchdog_start(struct watchdog_device *wdd) set_bit(_WDOG_KEEPALIVE, &wd_data->status); started_at = ktime_get(); - if (watchdog_hw_running(wdd) && wdd->ops->ping) - err = wdd->ops->ping(wdd); - else + if (watchdog_hw_running(wdd) && wdd->ops->ping) { + err = __watchdog_ping(wdd); + if (err == 0) + set_bit(WDOG_ACTIVE, &wdd->status); + } else { err = wdd->ops->start(wdd); - if (err == 0) { - set_bit(WDOG_ACTIVE, &wdd->status); - wd_data->last_keepalive = started_at; - wd_data->last_hw_keepalive = started_at; - watchdog_update_worker(wdd); + if (err == 0) { + set_bit(WDOG_ACTIVE, &wdd->status); + wd_data->last_keepalive = started_at; + wd_data->last_hw_keepalive = started_at; + watchdog_update_worker(wdd); + } } return err; @@ -587,7 +590,7 @@ static DEVICE_ATTR_RW(pretimeout_governor); static umode_t wdt_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct watchdog_device *wdd = dev_get_drvdata(dev); umode_t mode = attr->mode; @@ -776,7 +779,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, err = watchdog_ping(wdd); if (err < 0) break; - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: /* timeout == 0 means that we don't know the timeout */ if (wdd->timeout == 0) { @@ -916,7 +919,7 @@ static int watchdog_release(struct inode *inode, struct file *file) * or if WDIOF_MAGICCLOSE is not set. If nowayout was set then * watchdog_stop will fail. */ - if (!test_bit(WDOG_ACTIVE, &wdd->status)) + if (!watchdog_active(wdd)) err = 0; else if (test_and_clear_bit(_WDOG_ALLOW_RELEASE, &wd_data->status) || !(wdd->info->options & WDIOF_MAGICCLOSE)) @@ -994,6 +997,15 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) if (IS_ERR_OR_NULL(watchdog_kworker)) return -ENODEV; + device_initialize(&wd_data->dev); + wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); + wd_data->dev.class = &watchdog_class; + wd_data->dev.parent = wdd->parent; + wd_data->dev.groups = wdd->groups; + wd_data->dev.release = watchdog_core_data_release; + dev_set_drvdata(&wd_data->dev, wdd); + dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); + kthread_init_work(&wd_data->work, watchdog_ping_work); hrtimer_init(&wd_data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); wd_data->timer.function = watchdog_timer_expired; @@ -1014,15 +1026,6 @@ static int watchdog_cdev_register(struct watchdog_device *wdd) } } - device_initialize(&wd_data->dev); - wd_data->dev.devt = MKDEV(MAJOR(watchdog_devt), wdd->id); - wd_data->dev.class = &watchdog_class; - wd_data->dev.parent = wdd->parent; - wd_data->dev.groups = wdd->groups; - wd_data->dev.release = watchdog_core_data_release; - dev_set_drvdata(&wd_data->dev, wdd); - dev_set_name(&wd_data->dev, "watchdog%d", wdd->id); - /* Fill in the data structures */ cdev_init(&wd_data->cdev, &watchdog_fops); @@ -1136,6 +1139,36 @@ void watchdog_dev_unregister(struct watchdog_device *wdd) } /* + * watchdog_set_last_hw_keepalive: set last HW keepalive time for watchdog + * @wdd: watchdog device + * @last_ping_ms: time since last HW heartbeat + * + * Adjusts the last known HW keepalive time for a watchdog timer. + * This is needed if the watchdog is already running when the probe + * function is called, and it can't be pinged immediately. This + * function must be called immediately after watchdog registration, + * and min_hw_heartbeat_ms must be set for this to be useful. + */ +int watchdog_set_last_hw_keepalive(struct watchdog_device *wdd, + unsigned int last_ping_ms) +{ + struct watchdog_core_data *wd_data; + ktime_t now; + + if (!wdd) + return -EINVAL; + + wd_data = wdd->wd_data; + + now = ktime_get(); + + wd_data->last_hw_keepalive = ktime_sub(now, ms_to_ktime(last_ping_ms)); + + return __watchdog_ping(wdd); +} +EXPORT_SYMBOL_GPL(watchdog_set_last_hw_keepalive); + +/* * watchdog_dev_init: init dev part of watchdog core * * Allocate a range of chardev nodes to use for watchdog devices diff --git a/drivers/watchdog/wdt.c b/drivers/watchdog/wdt.c index f9054cb0f8e2..a9e40b5c633e 100644 --- a/drivers/watchdog/wdt.c +++ b/drivers/watchdog/wdt.c @@ -389,7 +389,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) if (wdt_set_heartbeat(new_heartbeat)) return -EINVAL; wdt_ping(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: diff --git a/drivers/watchdog/wdt285.c b/drivers/watchdog/wdt285.c index e60993d0767e..110249e5f642 100644 --- a/drivers/watchdog/wdt285.c +++ b/drivers/watchdog/wdt285.c @@ -168,7 +168,7 @@ static long watchdog_ioctl(struct file *file, unsigned int cmd, soft_margin = new_margin; reload = soft_margin * (mem_fclk_21285 / 256); watchdog_ping(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: ret = put_user(soft_margin, int_arg); break; diff --git a/drivers/watchdog/wdt977.c b/drivers/watchdog/wdt977.c index 066a4fb4d75b..c9b8e863f70f 100644 --- a/drivers/watchdog/wdt977.c +++ b/drivers/watchdog/wdt977.c @@ -398,7 +398,7 @@ static long wdt977_ioctl(struct file *file, unsigned int cmd, return -EINVAL; wdt977_keepalive(); - /* Fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); diff --git a/drivers/watchdog/wdt_pci.c b/drivers/watchdog/wdt_pci.c index e528024faa41..c3254ba5ace6 100644 --- a/drivers/watchdog/wdt_pci.c +++ b/drivers/watchdog/wdt_pci.c @@ -426,7 +426,7 @@ static long wdtpci_ioctl(struct file *file, unsigned int cmd, if (wdtpci_set_heartbeat(new_heartbeat)) return -EINVAL; wdtpci_ping(); - /* fall through */ + fallthrough; case WDIOC_GETTIMEOUT: return put_user(heartbeat, p); default: diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 1d339ef92422..ea6c1e7e3e42 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -52,9 +52,7 @@ config XEN_BALLOON_MEMORY_HOTPLUG config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT int "Hotplugged memory limit (in GiB) for a PV guest" - default 512 if X86_64 - default 4 if X86_32 - range 0 64 if X86_32 + default 512 depends on XEN_HAVE_PVMMU depends on XEN_BALLOON_MEMORY_HOTPLUG help diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 0d322f3d90cd..c25c9a699b48 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -5,8 +5,7 @@ obj-y += mem-reservation.o obj-y += events/ obj-y += xenbus/ -nostackp := $(call cc-option, -fno-stack-protector) -CFLAGS_features.o := $(nostackp) +CFLAGS_features.o := -fno-stack-protector dom0-$(CONFIG_ARM64) += arm-device.o dom0-$(CONFIG_PCI) += pci.o diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c index 75d3bb948bf3..b1b6eebafd5d 100644 --- a/drivers/xen/gntdev-dmabuf.c +++ b/drivers/xen/gntdev-dmabuf.c @@ -613,6 +613,14 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev, goto fail_detach; } + /* Check that we have zero offset. */ + if (sgt->sgl->offset) { + ret = ERR_PTR(-EINVAL); + pr_debug("DMA buffer has %d bytes offset, user-space expects 0\n", + sgt->sgl->offset); + goto fail_unmap; + } + /* Check number of pages that imported buffer has. */ if (attach->dmabuf->size != gntdev_dmabuf->nr_pages << PAGE_SHIFT) { ret = ERR_PTR(-EINVAL); |