diff options
Diffstat (limited to 'drivers')
516 files changed, 27545 insertions, 4500 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index c7396659fdbe..dcecc9f6e33f 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -138,6 +138,10 @@ source "drivers/virt/Kconfig" source "drivers/virtio/Kconfig" +source "drivers/vdpa/Kconfig" + +source "drivers/vhost/Kconfig" + source "drivers/hv/Kconfig" source "drivers/xen/Kconfig" @@ -200,6 +204,8 @@ source "drivers/thunderbolt/Kconfig" source "drivers/android/Kconfig" +source "drivers/gpu/trace/Kconfig" + source "drivers/nvdimm/Kconfig" source "drivers/dax/Kconfig" diff --git a/drivers/Makefile b/drivers/Makefile index 7646549a1e93..c0cd1b9075e3 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_DMADEVICES) += dma/ obj-y += soc/ obj-$(CONFIG_VIRTIO) += virtio/ +obj-$(CONFIG_VDPA) += vdpa/ obj-$(CONFIG_XEN) += xen/ # regulators early, since some subsystems rely on them to initialize diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h index e618ddfab2fd..40f6a3c33a15 100644 --- a/drivers/acpi/acpica/acnamesp.h +++ b/drivers/acpi/acpica/acnamesp.h @@ -256,6 +256,8 @@ u32 acpi_ns_build_normalized_path(struct acpi_namespace_node *node, char *full_path, u32 path_size, u8 no_trailing); +void acpi_ns_normalize_pathname(char *original_path); + char *acpi_ns_get_normalized_pathname(struct acpi_namespace_node *node, u8 no_trailing); diff --git a/drivers/acpi/acpica/dbinput.c b/drivers/acpi/acpica/dbinput.c index aa71f65395d2..ee6a1b77af3f 100644 --- a/drivers/acpi/acpica/dbinput.c +++ b/drivers/acpi/acpica/dbinput.c @@ -468,16 +468,14 @@ char *acpi_db_get_next_token(char *string, return (NULL); } - /* Remove any spaces at the beginning */ + /* Remove any spaces at the beginning, ignore blank lines */ - if (*string == ' ') { - while (*string && (*string == ' ')) { - string++; - } + while (*string && isspace(*string)) { + string++; + } - if (!(*string)) { - return (NULL); - } + if (!(*string)) { + return (NULL); } switch (*string) { @@ -570,7 +568,7 @@ char *acpi_db_get_next_token(char *string, /* Find end of token */ - while (*string && (*string != ' ')) { + while (*string && !isspace(*string)) { string++; } break; diff --git a/drivers/acpi/acpica/dbxface.c b/drivers/acpi/acpica/dbxface.c index 3eb45ea93e5e..9dfd693cda3e 100644 --- a/drivers/acpi/acpica/dbxface.c +++ b/drivers/acpi/acpica/dbxface.c @@ -409,6 +409,7 @@ acpi_status acpi_initialize_debugger(void) acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT; acpi_gbl_db_opt_no_ini_methods = FALSE; + acpi_gbl_db_opt_no_region_support = FALSE; acpi_gbl_db_buffer = acpi_os_allocate(ACPI_DEBUG_BUFFER_SIZE); if (!acpi_gbl_db_buffer) { diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c index 5e81a1ae44cf..1d4f8c81028c 100644 --- a/drivers/acpi/acpica/dswexec.c +++ b/drivers/acpi/acpica/dswexec.c @@ -16,6 +16,9 @@ #include "acinterp.h" #include "acnamesp.h" #include "acdebug.h" +#ifdef ACPI_EXEC_APP +#include "aecommon.h" +#endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswexec") @@ -329,6 +332,10 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) u32 op_class; union acpi_parse_object *next_op; union acpi_parse_object *first_arg; +#ifdef ACPI_EXEC_APP + char *namepath; + union acpi_operand_object *obj_desc; +#endif ACPI_FUNCTION_TRACE_PTR(ds_exec_end_op, walk_state); @@ -537,6 +544,32 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) status = acpi_ds_eval_buffer_field_operands(walk_state, op); + if (ACPI_FAILURE(status)) { + break; + } +#ifdef ACPI_EXEC_APP + /* + * acpi_exec support for namespace initialization file (initialize + * buffer_fields in this code.) + */ + namepath = + acpi_ns_get_external_pathname(op->common.node); + status = ae_lookup_init_file_entry(namepath, &obj_desc); + if (ACPI_SUCCESS(status)) { + status = + acpi_ex_write_data_to_field(obj_desc, + op->common. + node->object, + NULL); + if ACPI_FAILURE + (status) { + ACPI_EXCEPTION((AE_INFO, status, + "While writing to buffer field")); + } + } + ACPI_FREE(namepath); + status = AE_OK; +#endif break; case AML_TYPE_CREATE_OBJECT: diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c index 697974e37edf..27069325b6de 100644 --- a/drivers/acpi/acpica/dswload.c +++ b/drivers/acpi/acpica/dswload.c @@ -14,7 +14,6 @@ #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" - #ifdef ACPI_ASL_COMPILER #include "acdisasm.h" #endif @@ -399,7 +398,6 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) union acpi_parse_object *op; acpi_object_type object_type; acpi_status status = AE_OK; - #ifdef ACPI_ASL_COMPILER u8 param_count; #endif diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c index b31457ca926c..edadbe146506 100644 --- a/drivers/acpi/acpica/dswload2.c +++ b/drivers/acpi/acpica/dswload2.c @@ -15,6 +15,9 @@ #include "acinterp.h" #include "acnamesp.h" #include "acevents.h" +#ifdef ACPI_EXEC_APP +#include "aecommon.h" +#endif #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dswload2") @@ -373,6 +376,10 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) struct acpi_namespace_node *new_node; u32 i; u8 region_space; +#ifdef ACPI_EXEC_APP + union acpi_operand_object *obj_desc; + char *namepath; +#endif ACPI_FUNCTION_TRACE(ds_load2_end_op); @@ -466,6 +473,11 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) * be evaluated later during the execution phase */ status = acpi_ds_create_buffer_field(op, walk_state); + if (ACPI_FAILURE(status)) { + ACPI_EXCEPTION((AE_INFO, status, + "CreateBufferField failure")); + goto cleanup; + } break; case AML_TYPE_NAMED_FIELD: @@ -604,6 +616,29 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) case AML_NAME_OP: status = acpi_ds_create_node(walk_state, node, op); + if (ACPI_FAILURE(status)) { + goto cleanup; + } +#ifdef ACPI_EXEC_APP + /* + * acpi_exec support for namespace initialization file (initialize + * Name opcodes in this code.) + */ + namepath = acpi_ns_get_external_pathname(node); + status = ae_lookup_init_file_entry(namepath, &obj_desc); + if (ACPI_SUCCESS(status)) { + + /* Detach any existing object, attach new object */ + + if (node->object) { + acpi_ns_detach_object(node); + } + acpi_ns_attach_object(node, obj_desc, + obj_desc->common.type); + } + ACPI_FREE(namepath); + status = AE_OK; +#endif break; case AML_METHOD_OP: diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c index d4d26147610e..d91153f65700 100644 --- a/drivers/acpi/acpica/nsnames.c +++ b/drivers/acpi/acpica/nsnames.c @@ -13,9 +13,6 @@ #define _COMPONENT ACPI_NAMESPACE ACPI_MODULE_NAME("nsnames") -/* Local Prototypes */ -static void acpi_ns_normalize_pathname(char *original_path); - /******************************************************************************* * * FUNCTION: acpi_ns_get_external_pathname @@ -30,7 +27,6 @@ static void acpi_ns_normalize_pathname(char *original_path); * for error and debug statements. * ******************************************************************************/ - char *acpi_ns_get_external_pathname(struct acpi_namespace_node *node) { char *name_buffer; @@ -411,7 +407,7 @@ cleanup: * ******************************************************************************/ -static void acpi_ns_normalize_pathname(char *original_path) +void acpi_ns_normalize_pathname(char *original_path) { char *input_path = original_path; char *new_path_buffer; diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c index befdd13b403b..177ab88d95de 100644 --- a/drivers/acpi/acpica/utdecode.c +++ b/drivers/acpi/acpica/utdecode.c @@ -78,7 +78,7 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = { "IPMI", /* 0x07 */ "GeneralPurposeIo", /* 0x08 */ "GenericSerialBus", /* 0x09 */ - "PlatformCommChannel" /* 0x0A */ + "PCC" /* 0x0A */ }; const char *acpi_ut_get_region_name(u8 space_id) diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c index eee263cb7beb..c365faf4e6cd 100644 --- a/drivers/acpi/acpica/utdelete.c +++ b/drivers/acpi/acpica/utdelete.c @@ -452,13 +452,13 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action) * * FUNCTION: acpi_ut_update_object_reference * - * PARAMETERS: object - Increment ref count for this object - * and all sub-objects + * PARAMETERS: object - Increment or decrement the ref count for + * this object and all sub-objects * action - Either REF_INCREMENT or REF_DECREMENT * * RETURN: Status * - * DESCRIPTION: Increment the object reference count + * DESCRIPTION: Increment or decrement the object reference count * * Object references are incremented when: * 1) An object is attached to a Node (namespace object) @@ -492,7 +492,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) } /* - * All sub-objects must have their reference count incremented + * All sub-objects must have their reference count updated * also. Different object types have different subobjects. */ switch (object->common.type) { @@ -559,6 +559,7 @@ acpi_ut_update_object_reference(union acpi_operand_object *object, u16 action) break; } } + next_object = NULL; break; diff --git a/drivers/acpi/acpica/utprint.c b/drivers/acpi/acpica/utprint.c index a874dac7db5c..681c11f4af4e 100644 --- a/drivers/acpi/acpica/utprint.c +++ b/drivers/acpi/acpica/utprint.c @@ -332,7 +332,12 @@ int vsnprintf(char *string, acpi_size size, const char *format, va_list args) int i; pos = string; - end = string + size; + + if (size != ACPI_UINT32_MAX) { + end = string + size; + } else { + end = ACPI_CAST_PTR(char, ACPI_UINT32_MAX); + } for (; *format; ++format) { if (*format != '%') { diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index ed3d2d1a7ae9..7d04424189df 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1015,6 +1015,7 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) return ops; if (dev_is_pci(dev)) { + struct iommu_fwspec *fwspec; struct pci_bus *bus = to_pci_dev(dev)->bus; struct iort_pci_alias_info info = { .dev = dev }; @@ -1027,8 +1028,9 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev) err = pci_for_each_dma_alias(to_pci_dev(dev), iort_pci_iommu_init, &info); - if (!err && iort_pci_rc_supports_ats(node)) - dev->iommu_fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; + fwspec = dev_iommu_fwspec_get(dev); + if (fwspec && iort_pci_rc_supports_ats(node)) + fwspec->flags |= IOMMU_FWSPEC_PCI_RC_ATS; } else { int i = 0; diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index a1a858ad4d18..8b2e89c20c11 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -438,13 +438,10 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) * domain info. */ for_each_possible_cpu(i) { - pr = all_cpu_data[i]; - if (!pr) - continue; - if (cpumask_test_cpu(i, covered_cpus)) continue; + pr = all_cpu_data[i]; cpc_ptr = per_cpu(cpc_desc_ptr, i); if (!cpc_ptr) { retval = -EFAULT; @@ -495,44 +492,28 @@ int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data) cpumask_set_cpu(j, pr->shared_cpu_map); } - for_each_possible_cpu(j) { + for_each_cpu(j, pr->shared_cpu_map) { if (i == j) continue; match_pr = all_cpu_data[j]; - if (!match_pr) - continue; - - match_cpc_ptr = per_cpu(cpc_desc_ptr, j); - if (!match_cpc_ptr) { - retval = -EFAULT; - goto err_ret; - } - - match_pdomain = &(match_cpc_ptr->domain_info); - if (match_pdomain->domain != pdomain->domain) - continue; - match_pr->shared_type = pr->shared_type; cpumask_copy(match_pr->shared_cpu_map, pr->shared_cpu_map); } } + goto out; err_ret: for_each_possible_cpu(i) { pr = all_cpu_data[i]; - if (!pr) - continue; /* Assume no coordination on any error parsing domain info */ - if (retval) { - cpumask_clear(pr->shared_cpu_map); - cpumask_set_cpu(i, pr->shared_cpu_map); - pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; - } + cpumask_clear(pr->shared_cpu_map); + cpumask_set_cpu(i, pr->shared_cpu_map); + pr->shared_type = CPUFREQ_SHARED_TYPE_ALL; } - +out: free_cpumask_var(covered_cpus); return retval; } diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index b64c62bfcea5..b2263ec67b43 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -1321,8 +1321,8 @@ int acpi_dev_pm_attach(struct device *dev, bool power_on) */ static const struct acpi_device_id special_pm_ids[] = { {"PNP0C0B", }, /* Generic ACPI fan */ - {"INT1044", }, /* Fan for Tiger Lake generation */ {"INT3404", }, /* Fan */ + {"INTC1044", }, /* Fan for Tiger Lake generation */ {} }; struct acpi_device *adev = ACPI_COMPANION(dev); diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index 387f27ef3368..e4e8b75d39f0 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -97,8 +97,8 @@ static int dptf_power_remove(struct platform_device *pdev) } static const struct acpi_device_id int3407_device_ids[] = { - {"INT1047", 0}, {"INT3407", 0}, + {"INTC1047", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3407_device_ids); diff --git a/drivers/acpi/dptf/int340x_thermal.c b/drivers/acpi/dptf/int340x_thermal.c index 1ec7b6900662..bc71a6a60334 100644 --- a/drivers/acpi/dptf/int340x_thermal.c +++ b/drivers/acpi/dptf/int340x_thermal.c @@ -13,10 +13,6 @@ #define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { - {"INT1040"}, - {"INT1043"}, - {"INT1044"}, - {"INT1047"}, {"INT3400"}, {"INT3401", INT3401_DEVICE}, {"INT3402"}, @@ -28,6 +24,10 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = { {"INT3409"}, {"INT340A"}, {"INT340B"}, + {"INTC1040"}, + {"INTC1043"}, + {"INTC1044"}, + {"INTC1047"}, {""}, }; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index a3320f93616d..fa4500f9cfd1 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -360,7 +360,7 @@ static union acpi_object *acpi_label_info(acpi_handle handle) static u8 nfit_dsm_revid(unsigned family, unsigned func) { - static const u8 revid_table[NVDIMM_FAMILY_MAX+1][32] = { + static const u8 revid_table[NVDIMM_FAMILY_MAX+1][NVDIMM_CMD_MAX+1] = { [NVDIMM_FAMILY_INTEL] = { [NVDIMM_INTEL_GET_MODES] = 2, [NVDIMM_INTEL_GET_FWINFO] = 2, @@ -386,7 +386,7 @@ static u8 nfit_dsm_revid(unsigned family, unsigned func) if (family > NVDIMM_FAMILY_MAX) return 0; - if (func > 31) + if (func > NVDIMM_CMD_MAX) return 0; id = revid_table[family][func]; if (id == 0) @@ -492,7 +492,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, * Check for a valid command. For ND_CMD_CALL, we also have to * make sure that the DSM function is supported. */ - if (cmd == ND_CMD_CALL && !test_bit(func, &dsm_mask)) + if (cmd == ND_CMD_CALL && + (func > NVDIMM_CMD_MAX || !test_bit(func, &dsm_mask))) return -ENOTTY; else if (!test_bit(cmd, &cmd_mask)) return -ENOTTY; @@ -2026,8 +2027,10 @@ static int acpi_nfit_register_dimms(struct acpi_nfit_desc *acpi_desc) continue; } - if (nfit_mem->bdw && nfit_mem->memdev_pmem) + if (nfit_mem->bdw && nfit_mem->memdev_pmem) { set_bit(NDD_ALIASING, &flags); + set_bit(NDD_LABELING, &flags); + } /* collate flags across all memdevs for this dimm */ list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) { @@ -3492,7 +3495,8 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc, if (nvdimm && cmd == ND_CMD_CALL && call_pkg->nd_family == NVDIMM_FAMILY_INTEL) { func = call_pkg->nd_command; - if ((1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) + if (func > NVDIMM_CMD_MAX || + (1 << func) & NVDIMM_INTEL_SECURITY_CMDMASK) return -EOPNOTSUPP; } diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h index 24241941181c..f5525f8bb770 100644 --- a/drivers/acpi/nfit/nfit.h +++ b/drivers/acpi/nfit/nfit.h @@ -34,6 +34,7 @@ | ACPI_NFIT_MEM_NOT_ARMED | ACPI_NFIT_MEM_MAP_FAILED) #define NVDIMM_FAMILY_MAX NVDIMM_FAMILY_HYPERV +#define NVDIMM_CMD_MAX 31 #define NVDIMM_STANDARD_CMDMASK \ (1 << ND_CMD_SMART | 1 << ND_CMD_SMART_THRESHOLD | 1 << ND_CMD_DIMM_FLAGS \ @@ -144,32 +145,32 @@ struct nfit_spa { unsigned long ars_state; u32 clear_err_unit; u32 max_ars; - struct acpi_nfit_system_address spa[0]; + struct acpi_nfit_system_address spa[]; }; struct nfit_dcr { struct list_head list; - struct acpi_nfit_control_region dcr[0]; + struct acpi_nfit_control_region dcr[]; }; struct nfit_bdw { struct list_head list; - struct acpi_nfit_data_region bdw[0]; + struct acpi_nfit_data_region bdw[]; }; struct nfit_idt { struct list_head list; - struct acpi_nfit_interleave idt[0]; + struct acpi_nfit_interleave idt[]; }; struct nfit_flush { struct list_head list; - struct acpi_nfit_flush_address flush[0]; + struct acpi_nfit_flush_address flush[]; }; struct nfit_memdev { struct list_head list; - struct acpi_nfit_memory_map memdev[0]; + struct acpi_nfit_memory_map memdev[]; }; enum nfit_mem_flags { diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index eadbf90e65d1..47b4969d9b93 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -72,47 +72,6 @@ int acpi_map_pxm_to_node(int pxm) } EXPORT_SYMBOL(acpi_map_pxm_to_node); -/** - * acpi_map_pxm_to_online_node - Map proximity ID to online node - * @pxm: ACPI proximity ID - * - * This is similar to acpi_map_pxm_to_node(), but always returns an online - * node. When the mapped node from a given proximity ID is offline, it - * looks up the node distance table and returns the nearest online node. - * - * ACPI device drivers, which are called after the NUMA initialization has - * completed in the kernel, can call this interface to obtain their device - * NUMA topology from ACPI tables. Such drivers do not have to deal with - * offline nodes. A node may be offline when a device proximity ID is - * unique, SRAT memory entry does not exist, or NUMA is disabled, ex. - * "numa=off" on x86. - */ -int acpi_map_pxm_to_online_node(int pxm) -{ - int node, min_node; - - node = acpi_map_pxm_to_node(pxm); - - if (node == NUMA_NO_NODE) - node = 0; - - min_node = node; - if (!node_online(node)) { - int min_dist = INT_MAX, dist, n; - - for_each_online_node(n) { - dist = node_distance(node, n); - if (dist < min_dist) { - min_dist = dist; - min_node = n; - } - } - } - - return min_node; -} -EXPORT_SYMBOL(acpi_map_pxm_to_online_node); - static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 532a1ae3595a..a0bd56ece3ff 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c @@ -897,13 +897,6 @@ static long __acpi_processor_get_throttling(void *data) return pr->throttling.acpi_processor_get_throttling(pr); } -static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct) -{ - if (direct || (is_percpu_thread() && cpu == smp_processor_id())) - return fn(arg); - return work_on_cpu(cpu, fn, arg); -} - static int acpi_processor_get_throttling(struct acpi_processor *pr) { if (!pr) diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index bb1ae400ec1f..4edc8a3ce40f 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -1009,6 +1009,10 @@ static bool acpi_s2idle_wake(void) if (acpi_any_fixed_event_status_set()) return true; + /* Check wakeups from drivers sharing the SCI. */ + if (acpi_check_wakeup_handlers()) + return true; + /* * If the status bit is set for any enabled GPE other than the * EC one, the wakeup is regarded as a genuine one. diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h index 41675d24a9bc..3d90480ce1b1 100644 --- a/drivers/acpi/sleep.h +++ b/drivers/acpi/sleep.h @@ -2,6 +2,7 @@ extern void acpi_enable_wakeup_devices(u8 sleep_state); extern void acpi_disable_wakeup_devices(u8 sleep_state); +extern bool acpi_check_wakeup_handlers(void); extern struct list_head acpi_wakeup_device_list; extern struct mutex acpi_device_lock; diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 180ac4329763..0e905c3d1645 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -501,7 +501,7 @@ static const char * const table_sigs[] = { ACPI_SIG_WDDT, ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, - NULL }; + ACPI_SIG_NHLT, NULL }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 419f814d596a..b4994e50608d 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -352,6 +352,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision 7510"), }, }, + { + .callback = video_detect_force_native, + .ident = "Acer Aspire 5738z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + DMI_MATCH(DMI_BOARD_NAME, "JV50"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c index c28244df56a5..0b2e42530adf 100644 --- a/drivers/acpi/wakeup.c +++ b/drivers/acpi/wakeup.c @@ -12,6 +12,15 @@ #include "internal.h" #include "sleep.h" +struct acpi_wakeup_handler { + struct list_head list_node; + bool (*wakeup)(void *context); + void *context; +}; + +static LIST_HEAD(acpi_wakeup_handler_head); +static DEFINE_MUTEX(acpi_wakeup_handler_mutex); + /* * We didn't lock acpi_device_lock in the file, because it invokes oops in * suspend/resume and isn't really required as this is called in S-state. At @@ -90,3 +99,75 @@ int __init acpi_wakeup_device_init(void) mutex_unlock(&acpi_device_lock); return 0; } + +/** + * acpi_register_wakeup_handler - Register wakeup handler + * @wake_irq: The IRQ through which the device may receive wakeups + * @wakeup: Wakeup-handler to call when the SCI has triggered a wakeup + * @context: Context to pass to the handler when calling it + * + * Drivers which may share an IRQ with the SCI can use this to register + * a handler which returns true when the device they are managing wants + * to trigger a wakeup. + */ +int acpi_register_wakeup_handler(int wake_irq, bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + /* + * If the device is not sharing its IRQ with the SCI, there is no + * need to register the handler. + */ + if (!acpi_sci_irq_valid() || wake_irq != acpi_sci_irq) + return 0; + + handler = kmalloc(sizeof(*handler), GFP_KERNEL); + if (!handler) + return -ENOMEM; + + handler->wakeup = wakeup; + handler->context = context; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_add(&handler->list_node, &acpi_wakeup_handler_head); + mutex_unlock(&acpi_wakeup_handler_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_register_wakeup_handler); + +/** + * acpi_unregister_wakeup_handler - Unregister wakeup handler + * @wakeup: Wakeup-handler passed to acpi_register_wakeup_handler() + * @context: Context passed to acpi_register_wakeup_handler() + */ +void acpi_unregister_wakeup_handler(bool (*wakeup)(void *context), + void *context) +{ + struct acpi_wakeup_handler *handler; + + mutex_lock(&acpi_wakeup_handler_mutex); + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup == wakeup && handler->context == context) { + list_del(&handler->list_node); + kfree(handler); + break; + } + } + mutex_unlock(&acpi_wakeup_handler_mutex); +} +EXPORT_SYMBOL_GPL(acpi_unregister_wakeup_handler); + +bool acpi_check_wakeup_handlers(void) +{ + struct acpi_wakeup_handler *handler; + + /* No need to lock, nothing else is running when we're called. */ + list_for_each_entry(handler, &acpi_wakeup_handler_head, list_node) { + if (handler->wakeup(handler->context)) + return true; + } + + return false; +} diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 4086718f6876..dbec3a05590a 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -27,6 +27,24 @@ #define MEMORY_CLASS_NAME "memory" +static const char *const online_type_to_str[] = { + [MMOP_OFFLINE] = "offline", + [MMOP_ONLINE] = "online", + [MMOP_ONLINE_KERNEL] = "online_kernel", + [MMOP_ONLINE_MOVABLE] = "online_movable", +}; + +int memhp_online_type_from_str(const char *str) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) { + if (sysfs_streq(str, online_type_to_str[i])) + return i; + } + return -EINVAL; +} + #define to_memory_block(dev) container_of(dev, struct memory_block, dev) static int sections_per_block; @@ -145,45 +163,6 @@ int memory_notify(unsigned long val, void *v) } /* - * The probe routines leave the pages uninitialized, just as the bootmem code - * does. Make sure we do not access them, but instead use only information from - * within sections. - */ -static bool pages_correctly_probed(unsigned long start_pfn) -{ - unsigned long section_nr = pfn_to_section_nr(start_pfn); - unsigned long section_nr_end = section_nr + sections_per_block; - unsigned long pfn = start_pfn; - - /* - * memmap between sections is not contiguous except with - * SPARSEMEM_VMEMMAP. We lookup the page once per section - * and assume memmap is contiguous within each section - */ - for (; section_nr < section_nr_end; section_nr++) { - if (WARN_ON_ONCE(!pfn_valid(pfn))) - return false; - - if (!present_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) not present\n", - section_nr, pfn, pfn + PAGES_PER_SECTION); - return false; - } else if (!valid_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n", - section_nr, pfn, pfn + PAGES_PER_SECTION); - return false; - } else if (online_section_nr(section_nr)) { - pr_warn("section %ld pfn[%lx, %lx) is already online\n", - section_nr, pfn, pfn + PAGES_PER_SECTION); - return false; - } - pfn += PAGES_PER_SECTION; - } - - return true; -} - -/* * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is * OK to have direct references to sparsemem variables in here. */ @@ -199,9 +178,6 @@ memory_block_action(unsigned long start_section_nr, unsigned long action, switch (action) { case MEM_ONLINE: - if (!pages_correctly_probed(start_pfn)) - return -EBUSY; - ret = online_pages(start_pfn, nr_pages, online_type, nid); break; case MEM_OFFLINE: @@ -245,17 +221,14 @@ static int memory_subsys_online(struct device *dev) return 0; /* - * If we are called from state_store(), online_type will be - * set >= 0 Otherwise we were called from the device online - * attribute and need to set the online_type. + * When called via device_online() without configuring the online_type, + * we want to default to MMOP_ONLINE. */ - if (mem->online_type < 0) - mem->online_type = MMOP_ONLINE_KEEP; + if (mem->online_type == MMOP_OFFLINE) + mem->online_type = MMOP_ONLINE; ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); - - /* clear online_type */ - mem->online_type = -1; + mem->online_type = MMOP_OFFLINE; return ret; } @@ -267,40 +240,27 @@ static int memory_subsys_offline(struct device *dev) if (mem->state == MEM_OFFLINE) return 0; - /* Can't offline block with non-present sections */ - if (mem->section_count != sections_per_block) - return -EINVAL; - return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); } static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + const int online_type = memhp_online_type_from_str(buf); struct memory_block *mem = to_memory_block(dev); - int ret, online_type; + int ret; + + if (online_type < 0) + return -EINVAL; ret = lock_device_hotplug_sysfs(); if (ret) return ret; - if (sysfs_streq(buf, "online_kernel")) - online_type = MMOP_ONLINE_KERNEL; - else if (sysfs_streq(buf, "online_movable")) - online_type = MMOP_ONLINE_MOVABLE; - else if (sysfs_streq(buf, "online")) - online_type = MMOP_ONLINE_KEEP; - else if (sysfs_streq(buf, "offline")) - online_type = MMOP_OFFLINE; - else { - ret = -EINVAL; - goto err; - } - switch (online_type) { case MMOP_ONLINE_KERNEL: case MMOP_ONLINE_MOVABLE: - case MMOP_ONLINE_KEEP: + case MMOP_ONLINE: /* mem->online_type is protected by device_hotplug_lock */ mem->online_type = online_type; ret = device_online(&mem->dev); @@ -312,7 +272,6 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, ret = -EINVAL; /* should never happen */ } -err: unlock_device_hotplug(); if (ret < 0) @@ -380,7 +339,8 @@ static ssize_t valid_zones_show(struct device *dev, } nid = mem->nid; - default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages); + default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn, + nr_pages); strcat(buf, default_zone->name); print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL, @@ -418,23 +378,20 @@ static DEVICE_ATTR_RO(block_size_bytes); static ssize_t auto_online_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { - if (memhp_auto_online) - return sprintf(buf, "online\n"); - else - return sprintf(buf, "offline\n"); + return sprintf(buf, "%s\n", + online_type_to_str[memhp_default_online_type]); } static ssize_t auto_online_blocks_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { - if (sysfs_streq(buf, "online")) - memhp_auto_online = true; - else if (sysfs_streq(buf, "offline")) - memhp_auto_online = false; - else + const int online_type = memhp_online_type_from_str(buf); + + if (online_type < 0) return -EINVAL; + memhp_default_online_type = online_type; return count; } @@ -627,7 +584,7 @@ static int init_memory_block(struct memory_block **memory, static int add_memory_block(unsigned long base_section_nr) { - int ret, section_count = 0; + int section_count = 0; struct memory_block *mem; unsigned long nr; @@ -638,12 +595,8 @@ static int add_memory_block(unsigned long base_section_nr) if (section_count == 0) return 0; - ret = init_memory_block(&mem, base_memory_block_id(base_section_nr), - MEM_ONLINE); - if (ret) - return ret; - mem->section_count = section_count; - return 0; + return init_memory_block(&mem, base_memory_block_id(base_section_nr), + MEM_ONLINE); } static void unregister_memory(struct memory_block *memory) @@ -679,7 +632,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size) ret = init_memory_block(&mem, block_id, MEM_OFFLINE); if (ret) break; - mem->section_count = sections_per_block; } if (ret) { end_block_id = block_id; @@ -688,7 +640,6 @@ int create_memory_block_devices(unsigned long start, unsigned long size) mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue; - mem->section_count = 0; unregister_memory(mem); } } @@ -717,7 +668,6 @@ void remove_memory_block_devices(unsigned long start, unsigned long size) mem = find_memory_block_by_id(block_id); if (WARN_ON_ONCE(!mem)) continue; - mem->section_count = 0; unregister_memory_block_under_nodes(mem); unregister_memory(mem); } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 6d1dee7051eb..fdd508a78ffd 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1922,10 +1922,6 @@ static int device_prepare(struct device *dev, pm_message_t state) if (dev->power.syscore) return 0; - WARN_ON(!pm_runtime_enabled(dev) && - dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND | - DPM_FLAG_LEAVE_SUSPENDED)); - /* * If a device's parent goes into runtime suspend at the wrong time, * it won't be possible to resume the device. To prevent this we @@ -1973,8 +1969,7 @@ unlock: */ spin_lock_irq(&dev->power.lock); dev->power.direct_complete = state.event == PM_EVENT_SUSPEND && - ((pm_runtime_suspended(dev) && ret > 0) || - dev->power.no_pm_callbacks) && + (ret > 0 || dev->power.no_pm_callbacks) && !dev_pm_test_driver_flags(dev, DPM_FLAG_NEVER_SKIP); spin_unlock_irq(&dev->power.lock); return 0; diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6343402c09e6..1e0a6b19ae0d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -337,10 +337,7 @@ struct rbd_img_request { u64 snap_id; /* for reads */ struct ceph_snap_context *snapc; /* for writes */ }; - union { - struct request *rq; /* block request */ - struct rbd_obj_request *obj_request; /* obj req initiator */ - }; + struct rbd_obj_request *obj_request; /* obj req initiator */ struct list_head lock_item; struct list_head object_extents; /* obj_req.ex structs */ @@ -349,7 +346,6 @@ struct rbd_img_request { struct pending_result pending; struct work_struct work; int work_result; - struct kref kref; }; #define for_each_obj_request(ireq, oreq) \ @@ -1320,15 +1316,6 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request) kref_put(&obj_request->kref, rbd_obj_request_destroy); } -static void rbd_img_request_destroy(struct kref *kref); -static void rbd_img_request_put(struct rbd_img_request *img_request) -{ - rbd_assert(img_request != NULL); - dout("%s: img %p (was %d)\n", __func__, img_request, - kref_read(&img_request->kref)); - kref_put(&img_request->kref, rbd_img_request_destroy); -} - static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, struct rbd_obj_request *obj_request) { @@ -1366,18 +1353,10 @@ static void rbd_osd_submit(struct ceph_osd_request *osd_req) static void img_request_layered_set(struct rbd_img_request *img_request) { set_bit(IMG_REQ_LAYERED, &img_request->flags); - smp_mb(); -} - -static void img_request_layered_clear(struct rbd_img_request *img_request) -{ - clear_bit(IMG_REQ_LAYERED, &img_request->flags); - smp_mb(); } static bool img_request_layered_test(struct rbd_img_request *img_request) { - smp_mb(); return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0; } @@ -1619,10 +1598,8 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) if (!rbd_dev->parent_spec) return false; - down_read(&rbd_dev->header_rwsem); if (rbd_dev->parent_overlap) counter = atomic_inc_return_safe(&rbd_dev->parent_ref); - up_read(&rbd_dev->header_rwsem); if (counter < 0) rbd_warn(rbd_dev, "parent reference overflow"); @@ -1630,63 +1607,54 @@ static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) return counter > 0; } -/* - * Caller is responsible for filling in the list of object requests - * that comprises the image request, and the Linux request pointer - * (if there is one). - */ -static struct rbd_img_request *rbd_img_request_create( - struct rbd_device *rbd_dev, - enum obj_operation_type op_type, - struct ceph_snap_context *snapc) +static void rbd_img_request_init(struct rbd_img_request *img_request, + struct rbd_device *rbd_dev, + enum obj_operation_type op_type) { - struct rbd_img_request *img_request; - - img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO); - if (!img_request) - return NULL; + memset(img_request, 0, sizeof(*img_request)); img_request->rbd_dev = rbd_dev; img_request->op_type = op_type; - if (!rbd_img_is_write(img_request)) - img_request->snap_id = rbd_dev->spec->snap_id; - else - img_request->snapc = snapc; - - if (rbd_dev_parent_get(rbd_dev)) - img_request_layered_set(img_request); INIT_LIST_HEAD(&img_request->lock_item); INIT_LIST_HEAD(&img_request->object_extents); mutex_init(&img_request->state_mutex); - kref_init(&img_request->kref); +} + +static void rbd_img_capture_header(struct rbd_img_request *img_req) +{ + struct rbd_device *rbd_dev = img_req->rbd_dev; + + lockdep_assert_held(&rbd_dev->header_rwsem); - return img_request; + if (rbd_img_is_write(img_req)) + img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); + else + img_req->snap_id = rbd_dev->spec->snap_id; + + if (rbd_dev_parent_get(rbd_dev)) + img_request_layered_set(img_req); } -static void rbd_img_request_destroy(struct kref *kref) +static void rbd_img_request_destroy(struct rbd_img_request *img_request) { - struct rbd_img_request *img_request; struct rbd_obj_request *obj_request; struct rbd_obj_request *next_obj_request; - img_request = container_of(kref, struct rbd_img_request, kref); - dout("%s: img %p\n", __func__, img_request); WARN_ON(!list_empty(&img_request->lock_item)); for_each_obj_request_safe(img_request, obj_request, next_obj_request) rbd_img_obj_request_del(img_request, obj_request); - if (img_request_layered_test(img_request)) { - img_request_layered_clear(img_request); + if (img_request_layered_test(img_request)) rbd_dev_parent_put(img_request->rbd_dev); - } if (rbd_img_is_write(img_request)) ceph_put_snap_context(img_request->snapc); - kmem_cache_free(rbd_img_request_cache, img_request); + if (test_bit(IMG_REQ_CHILD, &img_request->flags)) + kmem_cache_free(rbd_img_request_cache, img_request); } #define BITS_PER_OBJ 2 @@ -2849,17 +2817,22 @@ static int rbd_obj_read_object(struct rbd_obj_request *obj_req) static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req) { struct rbd_img_request *img_req = obj_req->img_request; + struct rbd_device *parent = img_req->rbd_dev->parent; struct rbd_img_request *child_img_req; int ret; - child_img_req = rbd_img_request_create(img_req->rbd_dev->parent, - OBJ_OP_READ, NULL); + child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO); if (!child_img_req) return -ENOMEM; + rbd_img_request_init(child_img_req, parent, OBJ_OP_READ); __set_bit(IMG_REQ_CHILD, &child_img_req->flags); child_img_req->obj_request = obj_req; + down_read(&parent->header_rwsem); + rbd_img_capture_header(child_img_req); + up_read(&parent->header_rwsem); + dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req, obj_req); @@ -2888,7 +2861,7 @@ static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req) obj_req->copyup_bvecs); } if (ret) { - rbd_img_request_put(child_img_req); + rbd_img_request_destroy(child_img_req); return ret; } @@ -3647,15 +3620,15 @@ again: if (test_bit(IMG_REQ_CHILD, &img_req->flags)) { struct rbd_obj_request *obj_req = img_req->obj_request; - rbd_img_request_put(img_req); + rbd_img_request_destroy(img_req); if (__rbd_obj_handle_request(obj_req, &result)) { img_req = obj_req->img_request; goto again; } } else { - struct request *rq = img_req->rq; + struct request *rq = blk_mq_rq_from_pdu(img_req); - rbd_img_request_put(img_req); + rbd_img_request_destroy(img_req); blk_mq_end_request(rq, errno_to_blk_status(result)); } } @@ -4707,84 +4680,36 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, static void rbd_queue_workfn(struct work_struct *work) { - struct request *rq = blk_mq_rq_from_pdu(work); - struct rbd_device *rbd_dev = rq->q->queuedata; - struct rbd_img_request *img_request; - struct ceph_snap_context *snapc = NULL; + struct rbd_img_request *img_request = + container_of(work, struct rbd_img_request, work); + struct rbd_device *rbd_dev = img_request->rbd_dev; + enum obj_operation_type op_type = img_request->op_type; + struct request *rq = blk_mq_rq_from_pdu(img_request); u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; u64 length = blk_rq_bytes(rq); - enum obj_operation_type op_type; u64 mapping_size; int result; - switch (req_op(rq)) { - case REQ_OP_DISCARD: - op_type = OBJ_OP_DISCARD; - break; - case REQ_OP_WRITE_ZEROES: - op_type = OBJ_OP_ZEROOUT; - break; - case REQ_OP_WRITE: - op_type = OBJ_OP_WRITE; - break; - case REQ_OP_READ: - op_type = OBJ_OP_READ; - break; - default: - dout("%s: non-fs request type %d\n", __func__, req_op(rq)); - result = -EIO; - goto err; - } - /* Ignore/skip any zero-length requests */ - if (!length) { dout("%s: zero-length request\n", __func__); result = 0; - goto err_rq; - } - - if (op_type != OBJ_OP_READ) { - if (rbd_is_ro(rbd_dev)) { - rbd_warn(rbd_dev, "%s on read-only mapping", - obj_op_name(op_type)); - result = -EIO; - goto err; - } - rbd_assert(!rbd_is_snap(rbd_dev)); - } - - if (offset && length > U64_MAX - offset + 1) { - rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset, - length); - result = -EINVAL; - goto err_rq; /* Shouldn't happen */ + goto err_img_request; } blk_mq_start_request(rq); down_read(&rbd_dev->header_rwsem); mapping_size = rbd_dev->mapping.size; - if (op_type != OBJ_OP_READ) { - snapc = rbd_dev->header.snapc; - ceph_get_snap_context(snapc); - } + rbd_img_capture_header(img_request); up_read(&rbd_dev->header_rwsem); if (offset + length > mapping_size) { rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, length, mapping_size); result = -EIO; - goto err_rq; - } - - img_request = rbd_img_request_create(rbd_dev, op_type, snapc); - if (!img_request) { - result = -ENOMEM; - goto err_rq; + goto err_img_request; } - img_request->rq = rq; - snapc = NULL; /* img_request consumes a ref */ dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev, img_request, obj_op_name(op_type), offset, length); @@ -4801,23 +4726,51 @@ static void rbd_queue_workfn(struct work_struct *work) return; err_img_request: - rbd_img_request_put(img_request); -err_rq: + rbd_img_request_destroy(img_request); if (result) rbd_warn(rbd_dev, "%s %llx at %llx result %d", obj_op_name(op_type), length, offset, result); - ceph_put_snap_context(snapc); -err: blk_mq_end_request(rq, errno_to_blk_status(result)); } static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { - struct request *rq = bd->rq; - struct work_struct *work = blk_mq_rq_to_pdu(rq); + struct rbd_device *rbd_dev = hctx->queue->queuedata; + struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq); + enum obj_operation_type op_type; - queue_work(rbd_wq, work); + switch (req_op(bd->rq)) { + case REQ_OP_DISCARD: + op_type = OBJ_OP_DISCARD; + break; + case REQ_OP_WRITE_ZEROES: + op_type = OBJ_OP_ZEROOUT; + break; + case REQ_OP_WRITE: + op_type = OBJ_OP_WRITE; + break; + case REQ_OP_READ: + op_type = OBJ_OP_READ; + break; + default: + rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq)); + return BLK_STS_IOERR; + } + + rbd_img_request_init(img_req, rbd_dev, op_type); + + if (rbd_img_is_write(img_req)) { + if (rbd_is_ro(rbd_dev)) { + rbd_warn(rbd_dev, "%s on read-only mapping", + obj_op_name(img_req->op_type)); + return BLK_STS_IOERR; + } + rbd_assert(!rbd_is_snap(rbd_dev)); + } + + INIT_WORK(&img_req->work, rbd_queue_workfn); + queue_work(rbd_wq, &img_req->work); return BLK_STS_OK; } @@ -4984,18 +4937,8 @@ out: return ret; } -static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq, - unsigned int hctx_idx, unsigned int numa_node) -{ - struct work_struct *work = blk_mq_rq_to_pdu(rq); - - INIT_WORK(work, rbd_queue_workfn); - return 0; -} - static const struct blk_mq_ops rbd_mq_ops = { .queue_rq = rbd_queue_rq, - .init_request = rbd_init_request, }; static int rbd_init_disk(struct rbd_device *rbd_dev) @@ -5027,8 +4970,8 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth; rbd_dev->tag_set.numa_node = NUMA_NO_NODE; rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; - rbd_dev->tag_set.nr_hw_queues = 1; - rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); + rbd_dev->tag_set.nr_hw_queues = num_present_cpus(); + rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request); err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); if (err) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index fea084e0909b..d4665fe9ccd2 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -474,7 +474,7 @@ endmenu config RANDOM_TRUST_CPU bool "Trust the CPU manufacturer to initialize Linux's CRNG" - depends on X86 || S390 || PPC + depends on ARCH_RANDOM default n help Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or diff --git a/drivers/char/random.c b/drivers/char/random.c index c7f9584de2c8..0d10e31fd342 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -781,27 +781,55 @@ static int __init parse_trust_cpu(char *arg) } early_param("random.trust_cpu", parse_trust_cpu); -static void crng_initialize(struct crng_state *crng) +static bool crng_init_try_arch(struct crng_state *crng) { int i; - int arch_init = 1; + bool arch_init = true; unsigned long rv; - memcpy(&crng->state[0], "expand 32-byte k", 16); - if (crng == &primary_crng) - _extract_entropy(&input_pool, &crng->state[4], - sizeof(__u32) * 12, 0); - else - _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); for (i = 4; i < 16; i++) { if (!arch_get_random_seed_long(&rv) && !arch_get_random_long(&rv)) { rv = random_get_entropy(); - arch_init = 0; + arch_init = false; + } + crng->state[i] ^= rv; + } + + return arch_init; +} + +static bool __init crng_init_try_arch_early(struct crng_state *crng) +{ + int i; + bool arch_init = true; + unsigned long rv; + + for (i = 4; i < 16; i++) { + if (!arch_get_random_seed_long_early(&rv) && + !arch_get_random_long_early(&rv)) { + rv = random_get_entropy(); + arch_init = false; } crng->state[i] ^= rv; } - if (trust_cpu && arch_init && crng == &primary_crng) { + + return arch_init; +} + +static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) +{ + memcpy(&crng->state[0], "expand 32-byte k", 16); + _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); + crng_init_try_arch(crng); + crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; +} + +static void __init crng_initialize_primary(struct crng_state *crng) +{ + memcpy(&crng->state[0], "expand 32-byte k", 16); + _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); + if (crng_init_try_arch_early(crng) && trust_cpu) { invalidate_batched_entropy(); numa_crng_init(); crng_init = 2; @@ -822,7 +850,7 @@ static void do_numa_crng_init(struct work_struct *work) crng = kmalloc_node(sizeof(struct crng_state), GFP_KERNEL | __GFP_NOFAIL, i); spin_lock_init(&crng->lock); - crng_initialize(crng); + crng_initialize_secondary(crng); pool[i] = crng; } mb(); @@ -1142,14 +1170,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) * We take into account the first, second and third-order deltas * in order to make our estimate. */ - delta = sample.jiffies - state->last_time; - state->last_time = sample.jiffies; + delta = sample.jiffies - READ_ONCE(state->last_time); + WRITE_ONCE(state->last_time, sample.jiffies); - delta2 = delta - state->last_delta; - state->last_delta = delta; + delta2 = delta - READ_ONCE(state->last_delta); + WRITE_ONCE(state->last_delta, delta); - delta3 = delta2 - state->last_delta2; - state->last_delta2 = delta2; + delta3 = delta2 - READ_ONCE(state->last_delta2); + WRITE_ONCE(state->last_delta2, delta2); if (delta < 0) delta = -delta; @@ -1771,7 +1799,7 @@ static void __init init_std_data(struct entropy_store *r) int __init rand_initialize(void) { init_std_data(&input_pool); - crng_initialize(&primary_crng); + crng_initialize_primary(&primary_crng); crng_global_init_time = jiffies; if (ratelimit_disable) { urandom_warning.interval = 0; @@ -2149,11 +2177,11 @@ struct batched_entropy { /* * Get a random word for internal kernel use only. The quality of the random - * number is either as good as RDRAND or as good as /dev/urandom, with the - * goal of being quite fast and not depleting entropy. In order to ensure + * number is good as /dev/urandom, but there is no backtrack protection, with + * the goal of being quite fast and not depleting entropy. In order to ensure * that the randomness provided by this function is okay, the function - * wait_for_random_bytes() should be called and return 0 at least once - * at any point prior. + * wait_for_random_bytes() should be called and return 0 at least once at any + * point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), @@ -2166,15 +2194,6 @@ u64 get_random_u64(void) struct batched_entropy *batch; static void *previous; -#if BITS_PER_LONG == 64 - if (arch_get_random_long((unsigned long *)&ret)) - return ret; -#else - if (arch_get_random_long((unsigned long *)&ret) && - arch_get_random_long((unsigned long *)&ret + 1)) - return ret; -#endif - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u64); @@ -2199,9 +2218,6 @@ u32 get_random_u32(void) struct batched_entropy *batch; static void *previous; - if (arch_get_random_int(&ret)) - return ret; - warn_unseeded_randomness(&previous); batch = raw_cpu_ptr(&batched_entropy_u32); diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 58073836b555..8c77e88012e9 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -514,15 +514,15 @@ static int tpm_add_legacy_sysfs(struct tpm_chip *chip) if (chip->flags & (TPM_CHIP_FLAG_TPM2 | TPM_CHIP_FLAG_VIRTUAL)) return 0; - rc = __compat_only_sysfs_link_entry_to_kobj( - &chip->dev.parent->kobj, &chip->dev.kobj, "ppi"); + rc = compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, "ppi", NULL); if (rc && rc != -ENOENT) return rc; /* All the names from tpm-sysfs */ for (i = chip->groups[0]->attrs; *i != NULL; ++i) { - rc = __compat_only_sysfs_link_entry_to_kobj( - &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name); + rc = compat_only_sysfs_link_entry_to_kobj( + &chip->dev.parent->kobj, &chip->dev.kobj, (*i)->name, NULL); if (rc) { tpm_del_legacy_sysfs(chip); return rc; diff --git a/drivers/clk/at91/Makefile b/drivers/clk/at91/Makefile index 3732241352ce..8b90357f2a93 100644 --- a/drivers/clk/at91/Makefile +++ b/drivers/clk/at91/Makefile @@ -15,7 +15,11 @@ obj-$(CONFIG_HAVE_AT91_H32MX) += clk-h32mx.o obj-$(CONFIG_HAVE_AT91_GENERATED_CLK) += clk-generated.o obj-$(CONFIG_HAVE_AT91_I2S_MUX_CLK) += clk-i2s-mux.o obj-$(CONFIG_HAVE_AT91_SAM9X60_PLL) += clk-sam9x60-pll.o +obj-$(CONFIG_SOC_AT91RM9200) += at91rm9200.o obj-$(CONFIG_SOC_AT91SAM9) += at91sam9260.o at91sam9rl.o at91sam9x5.o +obj-$(CONFIG_SOC_AT91SAM9) += at91sam9g45.o +obj-$(CONFIG_SOC_AT91SAM9) += at91sam9n12.o at91sam9x5.o obj-$(CONFIG_SOC_SAM9X60) += sam9x60.o +obj-$(CONFIG_SOC_SAMA5D3) += sama5d3.o obj-$(CONFIG_SOC_SAMA5D4) += sama5d4.o obj-$(CONFIG_SOC_SAMA5D2) += sama5d2.o diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c new file mode 100644 index 000000000000..c44a431b6c97 --- /dev/null +++ b/drivers/clk/at91/at91rm9200.c @@ -0,0 +1,199 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +struct sck { + char *n; + char *p; + u8 id; +}; + +struct pck { + char *n; + u8 id; +}; + +static const struct clk_master_characteristics rm9200_mck_characteristics = { + .output = { .min = 0, .max = 80000000 }, + .divisors = { 1, 2, 3, 4 }, +}; + +static u8 rm9200_pll_out[] = { 0, 2 }; + +static const struct clk_range rm9200_pll_outputs[] = { + { .min = 80000000, .max = 160000000 }, + { .min = 150000000, .max = 180000000 }, +}; + +static const struct clk_pll_characteristics rm9200_pll_characteristics = { + .input = { .min = 1000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(rm9200_pll_outputs), + .output = rm9200_pll_outputs, + .out = rm9200_pll_out, +}; + +static const struct sck at91rm9200_systemck[] = { + { .n = "udpck", .p = "usbck", .id = 2 }, + { .n = "uhpck", .p = "usbck", .id = 4 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, + { .n = "pck2", .p = "prog2", .id = 10 }, + { .n = "pck3", .p = "prog3", .id = 11 }, +}; + +static const struct pck at91rm9200_periphck[] = { + { .n = "pioA_clk", .id = 2 }, + { .n = "pioB_clk", .id = 3 }, + { .n = "pioC_clk", .id = 4 }, + { .n = "pioD_clk", .id = 5 }, + { .n = "usart0_clk", .id = 6 }, + { .n = "usart1_clk", .id = 7 }, + { .n = "usart2_clk", .id = 8 }, + { .n = "usart3_clk", .id = 9 }, + { .n = "mci0_clk", .id = 10 }, + { .n = "udc_clk", .id = 11 }, + { .n = "twi0_clk", .id = 12 }, + { .n = "spi0_clk", .id = 13 }, + { .n = "ssc0_clk", .id = 14 }, + { .n = "ssc1_clk", .id = 15 }, + { .n = "ssc2_clk", .id = 16 }, + { .n = "tc0_clk", .id = 17 }, + { .n = "tc1_clk", .id = 18 }, + { .n = "tc2_clk", .id = 19 }, + { .n = "tc3_clk", .id = 20 }, + { .n = "tc4_clk", .id = 21 }, + { .n = "tc5_clk", .id = 22 }, + { .n = "ohci_clk", .id = 23 }, + { .n = "macb0_clk", .id = 24 }, +}; + +static void __init at91rm9200_pmc_setup(struct device_node *np) +{ + const char *slowxtal_name, *mainxtal_name; + struct pmc_data *at91rm9200_pmc; + u32 usb_div[] = { 1, 2, 0, 0 }; + const char *parent_names[6]; + struct regmap *regmap; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_xtal"); + if (i < 0) + return; + + slowxtal_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = device_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + at91rm9200_pmc = pmc_data_allocate(PMC_MAIN + 1, + nck(at91rm9200_systemck), + nck(at91rm9200_periphck), 0); + if (!at91rm9200_pmc) + return; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc"); + if (IS_ERR(hw)) + goto err_free; + + at91rm9200_pmc->chws[PMC_MAIN] = hw; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &at91rm9200_pll_layout, + &rm9200_pll_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1, + &at91rm9200_pll_layout, + &rm9200_pll_characteristics); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slowxtal_name; + parent_names[1] = "mainck"; + parent_names[2] = "pllack"; + parent_names[3] = "pllbck"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91rm9200_master_layout, + &rm9200_mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + at91rm9200_pmc->chws[PMC_MCK] = hw; + + hw = at91rm9200_clk_register_usb(regmap, "usbck", "pllbck", usb_div); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slowxtal_name; + parent_names[1] = "mainck"; + parent_names[2] = "pllack"; + parent_names[3] = "pllbck"; + for (i = 0; i < 4; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 4, i, + &at91rm9200_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(at91rm9200_systemck); i++) { + hw = at91_clk_register_system(regmap, at91rm9200_systemck[i].n, + at91rm9200_systemck[i].p, + at91rm9200_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91rm9200_pmc->shws[at91rm9200_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(at91rm9200_periphck); i++) { + hw = at91_clk_register_peripheral(regmap, + at91rm9200_periphck[i].n, + "masterck", + at91rm9200_periphck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91rm9200_pmc->phws[at91rm9200_periphck[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91rm9200_pmc); + + return; + +err_free: + pmc_data_free(at91rm9200_pmc); +} +/* + * While the TCB can be used as the clocksource, the system timer is most likely + * to be used instead. However, the pinctrl driver doesn't support probe + * deferring properly. Once this is fixed, this can be switched to a platform + * driver. + */ +CLK_OF_DECLARE_DRIVER(at91rm9200_pmc, "atmel,at91rm9200-pmc", + at91rm9200_pmc_setup); diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c new file mode 100644 index 000000000000..38a7d2d2df0c --- /dev/null +++ b/drivers/clk/at91/at91sam9g45.c @@ -0,0 +1,220 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 0, .max = 133333333 }, + .divisors = { 1, 2, 4, 3 }, +}; + +static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 }; + +static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 }; + +static const struct clk_range plla_outputs[] = { + { .min = 745000000, .max = 800000000 }, + { .min = 695000000, .max = 750000000 }, + { .min = 645000000, .max = 700000000 }, + { .min = 595000000, .max = 650000000 }, + { .min = 545000000, .max = 600000000 }, + { .min = 495000000, .max = 555000000 }, + { .min = 445000000, .max = 500000000 }, + { .min = 400000000, .max = 450000000 }, +}; + +static const struct clk_pll_characteristics plla_characteristics = { + .input = { .min = 2000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, + .icpll = plla_icpll, + .out = plla_out, +}; + +static const struct { + char *n; + char *p; + u8 id; +} at91sam9g45_systemck[] = { + { .n = "ddrck", .p = "masterck", .id = 2 }, + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, +}; + +static const struct clk_pcr_layout at91sam9g45_pcr_layout = { + .offset = 0x10c, + .cmd = BIT(12), + .pid_mask = GENMASK(5, 0), + .div_mask = GENMASK(17, 16), +}; + +struct pck { + char *n; + u8 id; +}; + +static const struct pck at91sam9g45_periphck[] = { + { .n = "pioA_clk", .id = 2, }, + { .n = "pioB_clk", .id = 3, }, + { .n = "pioC_clk", .id = 4, }, + { .n = "pioDE_clk", .id = 5, }, + { .n = "trng_clk", .id = 6, }, + { .n = "usart0_clk", .id = 7, }, + { .n = "usart1_clk", .id = 8, }, + { .n = "usart2_clk", .id = 9, }, + { .n = "usart3_clk", .id = 10, }, + { .n = "mci0_clk", .id = 11, }, + { .n = "twi0_clk", .id = 12, }, + { .n = "twi1_clk", .id = 13, }, + { .n = "spi0_clk", .id = 14, }, + { .n = "spi1_clk", .id = 15, }, + { .n = "ssc0_clk", .id = 16, }, + { .n = "ssc1_clk", .id = 17, }, + { .n = "tcb0_clk", .id = 18, }, + { .n = "pwm_clk", .id = 19, }, + { .n = "adc_clk", .id = 20, }, + { .n = "dma0_clk", .id = 21, }, + { .n = "uhphs_clk", .id = 22, }, + { .n = "lcd_clk", .id = 23, }, + { .n = "ac97_clk", .id = 24, }, + { .n = "macb0_clk", .id = 25, }, + { .n = "isi_clk", .id = 26, }, + { .n = "udphs_clk", .id = 27, }, + { .n = "aestdessha_clk", .id = 28, }, + { .n = "mci1_clk", .id = 29, }, + { .n = "vdec_clk", .id = 30, }, +}; + +static void __init at91sam9g45_pmc_setup(struct device_node *np) +{ + const char *slck_name, *mainxtal_name; + struct pmc_data *at91sam9g45_pmc; + const char *parent_names[6]; + struct regmap *regmap; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_clk"); + if (i < 0) + return; + + slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + at91sam9g45_pmc = pmc_data_allocate(PMC_MAIN + 1, + nck(at91sam9g45_systemck), + nck(at91sam9g45_periphck), 0); + if (!at91sam9g45_pmc) + return; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_rm9200_main(regmap, "mainck", "main_osc"); + if (IS_ERR(hw)) + goto err_free; + + at91sam9g45_pmc->chws[PMC_MAIN] = hw; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &at91rm9200_pll_layout, &plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); + if (IS_ERR(hw)) + goto err_free; + + at91sam9g45_pmc->chws[PMC_UTMI] = hw; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91rm9200_master_layout, + &mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + at91sam9g45_pmc->chws[PMC_MCK] = hw; + + parent_names[0] = "plladivck"; + parent_names[1] = "utmick"; + hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + parent_names[4] = "masterck"; + for (i = 0; i < 2; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 5, i, + &at91sam9g45_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9g45_systemck); i++) { + hw = at91_clk_register_system(regmap, at91sam9g45_systemck[i].n, + at91sam9g45_systemck[i].p, + at91sam9g45_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9g45_pmc->shws[at91sam9g45_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9g45_periphck); i++) { + hw = at91_clk_register_peripheral(regmap, + at91sam9g45_periphck[i].n, + "masterck", + at91sam9g45_periphck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9g45_pmc->phws[at91sam9g45_periphck[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9g45_pmc); + + return; + +err_free: + pmc_data_free(at91sam9g45_pmc); +} +/* + * The TCB is used as the clocksource so its clock is needed early. This means + * this can't be a platform driver. + */ +CLK_OF_DECLARE_DRIVER(at91sam9g45_pmc, "atmel,at91sam9g45-pmc", + at91sam9g45_pmc_setup); diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c new file mode 100644 index 000000000000..8bb39d2ba84b --- /dev/null +++ b/drivers/clk/at91/at91sam9n12.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 0, .max = 133333333 }, + .divisors = { 1, 2, 4, 3 }, + .have_div3_pres = 1, +}; + +static u8 plla_out[] = { 0, 1, 2, 3, 0, 1, 2, 3 }; + +static u16 plla_icpll[] = { 0, 0, 0, 0, 1, 1, 1, 1 }; + +static const struct clk_range plla_outputs[] = { + { .min = 745000000, .max = 800000000 }, + { .min = 695000000, .max = 750000000 }, + { .min = 645000000, .max = 700000000 }, + { .min = 595000000, .max = 650000000 }, + { .min = 545000000, .max = 600000000 }, + { .min = 495000000, .max = 555000000 }, + { .min = 445000000, .max = 500000000 }, + { .min = 400000000, .max = 450000000 }, +}; + +static const struct clk_pll_characteristics plla_characteristics = { + .input = { .min = 2000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, + .icpll = plla_icpll, + .out = plla_out, +}; + +static u8 pllb_out[] = { 0 }; + +static const struct clk_range pllb_outputs[] = { + { .min = 30000000, .max = 100000000 }, +}; + +static const struct clk_pll_characteristics pllb_characteristics = { + .input = { .min = 2000000, .max = 32000000 }, + .num_output = ARRAY_SIZE(pllb_outputs), + .output = pllb_outputs, + .out = pllb_out, +}; + +static const struct { + char *n; + char *p; + u8 id; +} at91sam9n12_systemck[] = { + { .n = "ddrck", .p = "masterck", .id = 2 }, + { .n = "lcdck", .p = "masterck", .id = 3 }, + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, +}; + +static const struct clk_pcr_layout at91sam9n12_pcr_layout = { + .offset = 0x10c, + .cmd = BIT(12), + .pid_mask = GENMASK(5, 0), + .div_mask = GENMASK(17, 16), +}; + +struct pck { + char *n; + u8 id; +}; + +static const struct pck at91sam9n12_periphck[] = { + { .n = "pioAB_clk", .id = 2, }, + { .n = "pioCD_clk", .id = 3, }, + { .n = "fuse_clk", .id = 4, }, + { .n = "usart0_clk", .id = 5, }, + { .n = "usart1_clk", .id = 6, }, + { .n = "usart2_clk", .id = 7, }, + { .n = "usart3_clk", .id = 8, }, + { .n = "twi0_clk", .id = 9, }, + { .n = "twi1_clk", .id = 10, }, + { .n = "mci0_clk", .id = 12, }, + { .n = "spi0_clk", .id = 13, }, + { .n = "spi1_clk", .id = 14, }, + { .n = "uart0_clk", .id = 15, }, + { .n = "uart1_clk", .id = 16, }, + { .n = "tcb_clk", .id = 17, }, + { .n = "pwm_clk", .id = 18, }, + { .n = "adc_clk", .id = 19, }, + { .n = "dma0_clk", .id = 20, }, + { .n = "uhphs_clk", .id = 22, }, + { .n = "udphs_clk", .id = 23, }, + { .n = "lcdc_clk", .id = 25, }, + { .n = "sha_clk", .id = 27, }, + { .n = "ssc0_clk", .id = 28, }, + { .n = "aes_clk", .id = 29, }, + { .n = "trng_clk", .id = 30, }, +}; + +static void __init at91sam9n12_pmc_setup(struct device_node *np) +{ + struct clk_range range = CLK_RANGE(0, 0); + const char *slck_name, *mainxtal_name; + struct pmc_data *at91sam9n12_pmc; + const char *parent_names[6]; + struct regmap *regmap; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_clk"); + if (i < 0) + return; + + slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + at91sam9n12_pmc = pmc_data_allocate(PMC_MAIN + 1, + nck(at91sam9n12_systemck), 31, 0); + if (!at91sam9n12_pmc) + return; + + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, + 50000000); + if (IS_ERR(hw)) + goto err_free; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "main_rc_osc"; + parent_names[1] = "main_osc"; + hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + at91sam9n12_pmc->chws[PMC_MAIN] = hw; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &at91rm9200_pll_layout, &plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_pll(regmap, "pllbck", "mainck", 1, + &at91rm9200_pll_layout, &pllb_characteristics); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "pllbck"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91sam9x5_master_layout, + &mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + at91sam9n12_pmc->chws[PMC_MCK] = hw; + + hw = at91sam9n12_clk_register_usb(regmap, "usbck", "pllbck"); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "pllbck"; + parent_names[4] = "masterck"; + for (i = 0; i < 2; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 5, i, + &at91sam9x5_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9n12_systemck); i++) { + hw = at91_clk_register_system(regmap, at91sam9n12_systemck[i].n, + at91sam9n12_systemck[i].p, + at91sam9n12_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + at91sam9n12_pmc->shws[at91sam9n12_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(at91sam9n12_periphck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + &at91sam9n12_pcr_layout, + at91sam9n12_periphck[i].n, + "masterck", + at91sam9n12_periphck[i].id, + &range); + if (IS_ERR(hw)) + goto err_free; + + at91sam9n12_pmc->phws[at91sam9n12_periphck[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, at91sam9n12_pmc); + + return; + +err_free: + pmc_data_free(at91sam9n12_pmc); +} +/* + * The TCB is used as the clocksource so its clock is needed early. This means + * this can't be a platform driver. + */ +CLK_OF_DECLARE_DRIVER(at91sam9n12_pmc, "atmel,at91sam9n12-pmc", + at91sam9n12_pmc_setup); diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c index 22aede42a336..31d5c45e30d7 100644 --- a/drivers/clk/at91/clk-usb.c +++ b/drivers/clk/at91/clk-usb.c @@ -25,6 +25,7 @@ struct at91sam9x5_clk_usb { struct clk_hw hw; struct regmap *regmap; u32 usbs_mask; + u8 num_parents; }; #define to_at91sam9x5_clk_usb(hw) \ @@ -75,6 +76,9 @@ static int at91sam9x5_clk_usb_determine_rate(struct clk_hw *hw, tmp_parent_rate = req->rate * div; tmp_parent_rate = clk_hw_round_rate(parent, tmp_parent_rate); + if (!tmp_parent_rate) + continue; + tmp_rate = DIV_ROUND_CLOSEST(tmp_parent_rate, div); if (tmp_rate < req->rate) tmp_diff = req->rate - tmp_rate; @@ -107,7 +111,7 @@ static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) { struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); - if (index > 1) + if (index >= usb->num_parents) return -EINVAL; regmap_update_bits(usb->regmap, AT91_PMC_USB, usb->usbs_mask, index); @@ -211,7 +215,8 @@ _at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name, usb->hw.init = &init; usb->regmap = regmap; - usb->usbs_mask = SAM9X5_USBS_MASK; + usb->usbs_mask = usbs_mask; + usb->num_parents = num_parents; hw = &usb->hw; ret = clk_hw_register(NULL, &usb->hw); diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c index 77398aefeb6d..cc19e8fb83be 100644 --- a/drivers/clk/at91/sam9x60.c +++ b/drivers/clk/at91/sam9x60.c @@ -124,7 +124,6 @@ static const struct { char *n; u8 id; struct clk_range r; - bool pll; } sam9x60_gck[] = { { .n = "flex0_gclk", .id = 5, }, { .n = "flex1_gclk", .id = 6, }, @@ -144,11 +143,9 @@ static const struct { { .n = "sdmmc1_gclk", .id = 26, .r = { .min = 0, .max = 105000000 }, }, { .n = "flex11_gclk", .id = 32, }, { .n = "flex12_gclk", .id = 33, }, - { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, - .pll = true, }, + { .n = "i2s_gclk", .id = 34, .r = { .min = 0, .max = 105000000 }, }, { .n = "pit64b_gclk", .id = 37, }, - { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, - .pll = true, }, + { .n = "classd_gclk", .id = 42, .r = { .min = 0, .max = 100000000 }, }, { .n = "tcb1_gclk", .id = 45, }, { .n = "dbgu_gclk", .id = 47, }, }; @@ -237,9 +234,8 @@ static void __init sam9x60_pmc_setup(struct device_node *np) parent_names[0] = "pllack"; parent_names[1] = "upllck"; - parent_names[2] = "mainck"; - parent_names[3] = "mainck"; - hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 4); + parent_names[2] = "main_osc"; + hw = sam9x60_clk_register_usb(regmap, "usbck", parent_names, 3); if (IS_ERR(hw)) goto err_free; @@ -290,7 +286,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np) sam9x60_gck[i].n, parent_names, 6, sam9x60_gck[i].id, - sam9x60_gck[i].pll, + false, &sam9x60_gck[i].r); if (IS_ERR(hw)) goto err_free; diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c new file mode 100644 index 000000000000..88506f909c08 --- /dev/null +++ b/drivers/clk/at91/sama5d3.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/at91.h> + +#include "pmc.h" + +static const struct clk_master_characteristics mck_characteristics = { + .output = { .min = 0, .max = 166000000 }, + .divisors = { 1, 2, 4, 3 }, +}; + +static u8 plla_out[] = { 0 }; + +static u16 plla_icpll[] = { 0 }; + +static const struct clk_range plla_outputs[] = { + { .min = 400000000, .max = 1000000000 }, +}; + +static const struct clk_pll_characteristics plla_characteristics = { + .input = { .min = 8000000, .max = 50000000 }, + .num_output = ARRAY_SIZE(plla_outputs), + .output = plla_outputs, + .icpll = plla_icpll, + .out = plla_out, +}; + +static const struct clk_pcr_layout sama5d3_pcr_layout = { + .offset = 0x10c, + .cmd = BIT(12), + .pid_mask = GENMASK(6, 0), + .div_mask = GENMASK(17, 16), +}; + +static const struct { + char *n; + char *p; + u8 id; +} sama5d3_systemck[] = { + { .n = "ddrck", .p = "masterck", .id = 2 }, + { .n = "lcdck", .p = "masterck", .id = 3 }, + { .n = "smdck", .p = "smdclk", .id = 4 }, + { .n = "uhpck", .p = "usbck", .id = 6 }, + { .n = "udpck", .p = "usbck", .id = 7 }, + { .n = "pck0", .p = "prog0", .id = 8 }, + { .n = "pck1", .p = "prog1", .id = 9 }, + { .n = "pck2", .p = "prog2", .id = 10 }, +}; + +static const struct { + char *n; + u8 id; + struct clk_range r; +} sama5d3_periphck[] = { + { .n = "dbgu_clk", .id = 2, }, + { .n = "hsmc_clk", .id = 5, }, + { .n = "pioA_clk", .id = 6, }, + { .n = "pioB_clk", .id = 7, }, + { .n = "pioC_clk", .id = 8, }, + { .n = "pioD_clk", .id = 9, }, + { .n = "pioE_clk", .id = 10, }, + { .n = "usart0_clk", .id = 12, .r = { .min = 0, .max = 83000000 }, }, + { .n = "usart1_clk", .id = 13, .r = { .min = 0, .max = 83000000 }, }, + { .n = "usart2_clk", .id = 14, .r = { .min = 0, .max = 83000000 }, }, + { .n = "usart3_clk", .id = 15, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uart0_clk", .id = 16, .r = { .min = 0, .max = 83000000 }, }, + { .n = "uart1_clk", .id = 17, .r = { .min = 0, .max = 83000000 }, }, + { .n = "twi0_clk", .id = 18, .r = { .min = 0, .max = 41500000 }, }, + { .n = "twi1_clk", .id = 19, .r = { .min = 0, .max = 41500000 }, }, + { .n = "twi2_clk", .id = 20, .r = { .min = 0, .max = 41500000 }, }, + { .n = "mci0_clk", .id = 21, }, + { .n = "mci1_clk", .id = 22, }, + { .n = "mci2_clk", .id = 23, }, + { .n = "spi0_clk", .id = 24, .r = { .min = 0, .max = 166000000 }, }, + { .n = "spi1_clk", .id = 25, .r = { .min = 0, .max = 166000000 }, }, + { .n = "tcb0_clk", .id = 26, .r = { .min = 0, .max = 166000000 }, }, + { .n = "tcb1_clk", .id = 27, .r = { .min = 0, .max = 166000000 }, }, + { .n = "pwm_clk", .id = 28, }, + { .n = "adc_clk", .id = 29, .r = { .min = 0, .max = 83000000 }, }, + { .n = "dma0_clk", .id = 30, }, + { .n = "dma1_clk", .id = 31, }, + { .n = "uhphs_clk", .id = 32, }, + { .n = "udphs_clk", .id = 33, }, + { .n = "macb0_clk", .id = 34, }, + { .n = "macb1_clk", .id = 35, }, + { .n = "lcdc_clk", .id = 36, }, + { .n = "isi_clk", .id = 37, }, + { .n = "ssc0_clk", .id = 38, .r = { .min = 0, .max = 83000000 }, }, + { .n = "ssc1_clk", .id = 39, .r = { .min = 0, .max = 83000000 }, }, + { .n = "can0_clk", .id = 40, .r = { .min = 0, .max = 83000000 }, }, + { .n = "can1_clk", .id = 41, .r = { .min = 0, .max = 83000000 }, }, + { .n = "sha_clk", .id = 42, }, + { .n = "aes_clk", .id = 43, }, + { .n = "tdes_clk", .id = 44, }, + { .n = "trng_clk", .id = 45, }, + { .n = "fuse_clk", .id = 48, }, + { .n = "mpddr_clk", .id = 49, }, +}; + +static void __init sama5d3_pmc_setup(struct device_node *np) +{ + const char *slck_name, *mainxtal_name; + struct pmc_data *sama5d3_pmc; + const char *parent_names[5]; + struct regmap *regmap; + struct clk_hw *hw; + int i; + bool bypass; + + i = of_property_match_string(np, "clock-names", "slow_clk"); + if (i < 0) + return; + + slck_name = of_clk_get_parent_name(np, i); + + i = of_property_match_string(np, "clock-names", "main_xtal"); + if (i < 0) + return; + mainxtal_name = of_clk_get_parent_name(np, i); + + regmap = syscon_node_to_regmap(np); + if (IS_ERR(regmap)) + return; + + sama5d3_pmc = pmc_data_allocate(PMC_MAIN + 1, + nck(sama5d3_systemck), + nck(sama5d3_periphck), 0); + if (!sama5d3_pmc) + return; + + hw = at91_clk_register_main_rc_osc(regmap, "main_rc_osc", 12000000, + 50000000); + if (IS_ERR(hw)) + goto err_free; + + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + + hw = at91_clk_register_main_osc(regmap, "main_osc", mainxtal_name, + bypass); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = "main_rc_osc"; + parent_names[1] = "main_osc"; + hw = at91_clk_register_sam9x5_main(regmap, "mainck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_pll(regmap, "pllack", "mainck", 0, + &sama5d3_pll_layout, &plla_characteristics); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_plldiv(regmap, "plladivck", "pllack"); + if (IS_ERR(hw)) + goto err_free; + + hw = at91_clk_register_utmi(regmap, NULL, "utmick", "mainck"); + if (IS_ERR(hw)) + goto err_free; + + sama5d3_pmc->chws[PMC_UTMI] = hw; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + hw = at91_clk_register_master(regmap, "masterck", 4, parent_names, + &at91sam9x5_master_layout, + &mck_characteristics); + if (IS_ERR(hw)) + goto err_free; + + sama5d3_pmc->chws[PMC_MCK] = hw; + + parent_names[0] = "plladivck"; + parent_names[1] = "utmick"; + hw = at91sam9x5_clk_register_usb(regmap, "usbck", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + hw = at91sam9x5_clk_register_smd(regmap, "smdclk", parent_names, 2); + if (IS_ERR(hw)) + goto err_free; + + parent_names[0] = slck_name; + parent_names[1] = "mainck"; + parent_names[2] = "plladivck"; + parent_names[3] = "utmick"; + parent_names[4] = "masterck"; + for (i = 0; i < 3; i++) { + char name[6]; + + snprintf(name, sizeof(name), "prog%d", i); + + hw = at91_clk_register_programmable(regmap, name, + parent_names, 5, i, + &at91sam9x5_programmable_layout); + if (IS_ERR(hw)) + goto err_free; + } + + for (i = 0; i < ARRAY_SIZE(sama5d3_systemck); i++) { + hw = at91_clk_register_system(regmap, sama5d3_systemck[i].n, + sama5d3_systemck[i].p, + sama5d3_systemck[i].id); + if (IS_ERR(hw)) + goto err_free; + + sama5d3_pmc->shws[sama5d3_systemck[i].id] = hw; + } + + for (i = 0; i < ARRAY_SIZE(sama5d3_periphck); i++) { + hw = at91_clk_register_sam9x5_peripheral(regmap, &pmc_pcr_lock, + &sama5d3_pcr_layout, + sama5d3_periphck[i].n, + "masterck", + sama5d3_periphck[i].id, + &sama5d3_periphck[i].r); + if (IS_ERR(hw)) + goto err_free; + + sama5d3_pmc->phws[sama5d3_periphck[i].id] = hw; + } + + of_clk_add_hw_provider(np, of_clk_hw_pmc_get, sama5d3_pmc); + + return; + +err_free: + pmc_data_free(sama5d3_pmc); +} +/* + * The TCB is used as the clocksource so its clock is needed early. This means + * this can't be a platform driver. + */ +CLK_OF_DECLARE_DRIVER(sama5d3_pmc, "atmel,sama5d3-pmc", sama5d3_pmc_setup); diff --git a/drivers/clk/clk-si5341.c b/drivers/clk/clk-si5341.c index 6e780c2a9e6b..3c228b018116 100644 --- a/drivers/clk/clk-si5341.c +++ b/drivers/clk/clk-si5341.c @@ -16,6 +16,8 @@ #include <linux/slab.h> #include <asm/unaligned.h> +#define SI5341_NUM_INPUTS 4 + #define SI5341_MAX_NUM_OUTPUTS 10 #define SI5340_MAX_NUM_OUTPUTS 4 @@ -56,8 +58,8 @@ struct clk_si5341 { struct i2c_client *i2c_client; struct clk_si5341_synth synth[SI5341_NUM_SYNTH]; struct clk_si5341_output clk[SI5341_MAX_NUM_OUTPUTS]; - struct clk *pxtal; - const char *pxtal_name; + struct clk *input_clk[SI5341_NUM_INPUTS]; + const char *input_clk_name[SI5341_NUM_INPUTS]; const u16 *reg_output_offset; const u16 *reg_rdiv_offset; u64 freq_vco; /* 13500–14256 MHz */ @@ -78,10 +80,25 @@ struct clk_si5341_output_config { #define SI5341_DEVICE_REV 0x0005 #define SI5341_STATUS 0x000C #define SI5341_SOFT_RST 0x001C +#define SI5341_IN_SEL 0x0021 +#define SI5341_XAXB_CFG 0x090E +#define SI5341_IN_EN 0x0949 +#define SI5341_INX_TO_PFD_EN 0x094A + +/* Input selection */ +#define SI5341_IN_SEL_MASK 0x06 +#define SI5341_IN_SEL_SHIFT 1 +#define SI5341_IN_SEL_REGCTRL 0x01 +#define SI5341_INX_TO_PFD_SHIFT 4 + +/* XTAL config bits */ +#define SI5341_XAXB_CFG_EXTCLK_EN BIT(0) +#define SI5341_XAXB_CFG_PDNB BIT(1) /* Input dividers (48-bit) */ #define SI5341_IN_PDIV(x) (0x0208 + ((x) * 10)) #define SI5341_IN_PSET(x) (0x020E + ((x) * 10)) +#define SI5341_PX_UPD 0x0230 /* PLL configuration */ #define SI5341_PLL_M_NUM 0x0235 @@ -120,6 +137,10 @@ struct si5341_reg_default { u8 value; }; +static const char * const si5341_input_clock_names[] = { + "in0", "in1", "in2", "xtal" +}; + /* Output configuration registers 0..9 are not quite logically organized */ static const u16 si5341_reg_output_offset[] = { 0x0108, @@ -390,7 +411,112 @@ static unsigned long si5341_clk_recalc_rate(struct clk_hw *hw, return (unsigned long)res; } +static int si5341_clk_get_selected_input(struct clk_si5341 *data) +{ + int err; + u32 val; + + err = regmap_read(data->regmap, SI5341_IN_SEL, &val); + if (err < 0) + return err; + + return (val & SI5341_IN_SEL_MASK) >> SI5341_IN_SEL_SHIFT; +} + +static u8 si5341_clk_get_parent(struct clk_hw *hw) +{ + struct clk_si5341 *data = to_clk_si5341(hw); + int res = si5341_clk_get_selected_input(data); + + if (res < 0) + return 0; /* Apparently we cannot report errors */ + + return res; +} + +static int si5341_clk_reparent(struct clk_si5341 *data, u8 index) +{ + int err; + u8 val; + + val = (index << SI5341_IN_SEL_SHIFT) & SI5341_IN_SEL_MASK; + /* Enable register-based input selection */ + val |= SI5341_IN_SEL_REGCTRL; + + err = regmap_update_bits(data->regmap, + SI5341_IN_SEL, SI5341_IN_SEL_REGCTRL | SI5341_IN_SEL_MASK, val); + if (err < 0) + return err; + + if (index < 3) { + /* Enable input buffer for selected input */ + err = regmap_update_bits(data->regmap, + SI5341_IN_EN, 0x07, BIT(index)); + if (err < 0) + return err; + + /* Enables the input to phase detector */ + err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN, + 0x7 << SI5341_INX_TO_PFD_SHIFT, + BIT(index + SI5341_INX_TO_PFD_SHIFT)); + if (err < 0) + return err; + + /* Power down XTAL oscillator and buffer */ + err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG, + SI5341_XAXB_CFG_PDNB, 0); + if (err < 0) + return err; + + /* + * Set the P divider to "1". There's no explanation in the + * datasheet of these registers, but the clockbuilder software + * programs a "1" when the input is being used. + */ + err = regmap_write(data->regmap, SI5341_IN_PDIV(index), 1); + if (err < 0) + return err; + + err = regmap_write(data->regmap, SI5341_IN_PSET(index), 1); + if (err < 0) + return err; + + /* Set update PDIV bit */ + err = regmap_write(data->regmap, SI5341_PX_UPD, BIT(index)); + if (err < 0) + return err; + } else { + /* Disable all input buffers */ + err = regmap_update_bits(data->regmap, SI5341_IN_EN, 0x07, 0); + if (err < 0) + return err; + + /* Disable input to phase detector */ + err = regmap_update_bits(data->regmap, SI5341_INX_TO_PFD_EN, + 0x7 << SI5341_INX_TO_PFD_SHIFT, 0); + if (err < 0) + return err; + + /* Power up XTAL oscillator and buffer */ + err = regmap_update_bits(data->regmap, SI5341_XAXB_CFG, + SI5341_XAXB_CFG_PDNB, SI5341_XAXB_CFG_PDNB); + if (err < 0) + return err; + } + + return 0; +} + +static int si5341_clk_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_si5341 *data = to_clk_si5341(hw); + + return si5341_clk_reparent(data, index); +} + static const struct clk_ops si5341_clk_ops = { + .set_parent = si5341_clk_set_parent, + .get_parent = si5341_clk_get_parent, .recalc_rate = si5341_clk_recalc_rate, }; @@ -985,7 +1111,8 @@ static const struct regmap_range si5341_regmap_volatile_range[] = { regmap_reg_range(0x000C, 0x0012), /* Status */ regmap_reg_range(0x001C, 0x001E), /* reset, finc/fdec */ regmap_reg_range(0x00E2, 0x00FE), /* NVM, interrupts, device ready */ - /* Update bits for synth config */ + /* Update bits for P divider and synth config */ + regmap_reg_range(SI5341_PX_UPD, SI5341_PX_UPD), regmap_reg_range(SI5341_SYNTH_N_UPD(0), SI5341_SYNTH_N_UPD(0)), regmap_reg_range(SI5341_SYNTH_N_UPD(1), SI5341_SYNTH_N_UPD(1)), regmap_reg_range(SI5341_SYNTH_N_UPD(2), SI5341_SYNTH_N_UPD(2)), @@ -1122,6 +1249,7 @@ static int si5341_initialize_pll(struct clk_si5341 *data) struct device_node *np = data->i2c_client->dev.of_node; u32 m_num = 0; u32 m_den = 0; + int sel; if (of_property_read_u32(np, "silabs,pll-m-num", &m_num)) { dev_err(&data->i2c_client->dev, @@ -1135,7 +1263,11 @@ static int si5341_initialize_pll(struct clk_si5341 *data) if (!m_num || !m_den) { dev_err(&data->i2c_client->dev, "PLL configuration invalid, assume 14GHz\n"); - m_den = clk_get_rate(data->pxtal) / 10; + sel = si5341_clk_get_selected_input(data); + if (sel < 0) + return sel; + + m_den = clk_get_rate(data->input_clk[sel]) / 10; m_num = 1400000000; } @@ -1143,11 +1275,52 @@ static int si5341_initialize_pll(struct clk_si5341 *data) SI5341_PLL_M_NUM, m_num, m_den); } +static int si5341_clk_select_active_input(struct clk_si5341 *data) +{ + int res; + int err; + int i; + + res = si5341_clk_get_selected_input(data); + if (res < 0) + return res; + + /* If the current register setting is invalid, pick the first input */ + if (!data->input_clk[res]) { + dev_dbg(&data->i2c_client->dev, + "Input %d not connected, rerouting\n", res); + res = -ENODEV; + for (i = 0; i < SI5341_NUM_INPUTS; ++i) { + if (data->input_clk[i]) { + res = i; + break; + } + } + if (res < 0) { + dev_err(&data->i2c_client->dev, + "No clock input available\n"); + return res; + } + } + + /* Make sure the selected clock is also enabled and routed */ + err = si5341_clk_reparent(data, res); + if (err < 0) + return err; + + err = clk_prepare_enable(data->input_clk[res]); + if (err < 0) + return err; + + return res; +} + static int si5341_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct clk_si5341 *data; struct clk_init_data init; + struct clk *input; const char *root_clock_name; const char *synth_clock_names[SI5341_NUM_SYNTH]; int err; @@ -1161,12 +1334,16 @@ static int si5341_probe(struct i2c_client *client, data->i2c_client = client; - data->pxtal = devm_clk_get(&client->dev, "xtal"); - if (IS_ERR(data->pxtal)) { - if (PTR_ERR(data->pxtal) == -EPROBE_DEFER) - return -EPROBE_DEFER; - - dev_err(&client->dev, "Missing xtal clock input\n"); + for (i = 0; i < SI5341_NUM_INPUTS; ++i) { + input = devm_clk_get(&client->dev, si5341_input_clock_names[i]); + if (IS_ERR(input)) { + if (PTR_ERR(input) == -EPROBE_DEFER) + return -EPROBE_DEFER; + data->input_clk_name[i] = si5341_input_clock_names[i]; + } else { + data->input_clk[i] = input; + data->input_clk_name[i] = __clk_get_name(input); + } } err = si5341_dt_parse_dt(client, config); @@ -1188,9 +1365,6 @@ static int si5341_probe(struct i2c_client *client, if (err < 0) return err; - /* "Activate" the xtal (usually a fixed clock) */ - clk_prepare_enable(data->pxtal); - if (of_property_read_bool(client->dev.of_node, "silabs,reprogram")) { initialization_required = true; } else { @@ -1223,7 +1397,14 @@ static int si5341_probe(struct i2c_client *client, ARRAY_SIZE(si5341_reg_defaults)); if (err < 0) return err; + } + + /* Input must be up and running at this point */ + err = si5341_clk_select_active_input(data); + if (err < 0) + return err; + if (initialization_required) { /* PLL configuration is required */ err = si5341_initialize_pll(data); if (err < 0) @@ -1231,9 +1412,8 @@ static int si5341_probe(struct i2c_client *client, } /* Register the PLL */ - data->pxtal_name = __clk_get_name(data->pxtal); - init.parent_names = &data->pxtal_name; - init.num_parents = 1; /* For now, only XTAL input supported */ + init.parent_names = data->input_clk_name; + init.num_parents = SI5341_NUM_INPUTS; init.ops = &si5341_clk_ops; init.flags = 0; data->hw.init = &init; diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 95adf6c6db3d..39c59f063aa0 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -488,7 +488,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw) } EXPORT_SYMBOL_GPL(clk_hw_get_rate); -static unsigned long __clk_get_accuracy(struct clk_core *core) +static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core) { if (!core) return 0; @@ -774,7 +774,7 @@ static void clk_core_rate_restore_protect(struct clk_core *core, int count) * clk_rate_exclusive_get - get exclusivity over the clk rate control * @clk: the clk over which the exclusity of rate control is requested * - * clk_rate_exlusive_get() begins a critical section during which a clock + * clk_rate_exclusive_get() begins a critical section during which a clock * consumer cannot tolerate any other consumer making any operation on the * clock which could result in a rate change or rate glitch. Exclusive clocks * cannot have their rate changed, either directly or indirectly due to changes @@ -1517,18 +1517,12 @@ static void __clk_recalc_accuracies(struct clk_core *core) __clk_recalc_accuracies(child); } -static long clk_core_get_accuracy(struct clk_core *core) +static long clk_core_get_accuracy_recalc(struct clk_core *core) { - unsigned long accuracy; - - clk_prepare_lock(); if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE)) __clk_recalc_accuracies(core); - accuracy = __clk_get_accuracy(core); - clk_prepare_unlock(); - - return accuracy; + return clk_core_get_accuracy_no_lock(core); } /** @@ -1542,10 +1536,16 @@ static long clk_core_get_accuracy(struct clk_core *core) */ long clk_get_accuracy(struct clk *clk) { + long accuracy; + if (!clk) return 0; - return clk_core_get_accuracy(clk->core); + clk_prepare_lock(); + accuracy = clk_core_get_accuracy_recalc(clk->core); + clk_prepare_unlock(); + + return accuracy; } EXPORT_SYMBOL_GPL(clk_get_accuracy); @@ -1599,19 +1599,12 @@ static void __clk_recalc_rates(struct clk_core *core, unsigned long msg) __clk_recalc_rates(child, msg); } -static unsigned long clk_core_get_rate(struct clk_core *core) +static unsigned long clk_core_get_rate_recalc(struct clk_core *core) { - unsigned long rate; - - clk_prepare_lock(); - if (core && (core->flags & CLK_GET_RATE_NOCACHE)) __clk_recalc_rates(core, 0); - rate = clk_core_get_rate_nolock(core); - clk_prepare_unlock(); - - return rate; + return clk_core_get_rate_nolock(core); } /** @@ -1624,10 +1617,16 @@ static unsigned long clk_core_get_rate(struct clk_core *core) */ unsigned long clk_get_rate(struct clk *clk) { + unsigned long rate; + if (!clk) return 0; - return clk_core_get_rate(clk->core); + clk_prepare_lock(); + rate = clk_core_get_rate_recalc(clk->core); + clk_prepare_unlock(); + + return rate; } EXPORT_SYMBOL_GPL(clk_get_rate); @@ -2660,12 +2659,14 @@ static int clk_core_get_phase(struct clk_core *core) { int ret; - clk_prepare_lock(); + lockdep_assert_held(&prepare_lock); + if (!core->ops->get_phase) + return 0; + /* Always try to update cached phase if possible */ - if (core->ops->get_phase) - core->phase = core->ops->get_phase(core->hw); - ret = core->phase; - clk_prepare_unlock(); + ret = core->ops->get_phase(core->hw); + if (ret >= 0) + core->phase = ret; return ret; } @@ -2679,10 +2680,16 @@ static int clk_core_get_phase(struct clk_core *core) */ int clk_get_phase(struct clk *clk) { + int ret; + if (!clk) return 0; - return clk_core_get_phase(clk->core); + clk_prepare_lock(); + ret = clk_core_get_phase(clk->core); + clk_prepare_unlock(); + + return ret; } EXPORT_SYMBOL_GPL(clk_get_phase); @@ -2896,13 +2903,22 @@ static struct hlist_head *orphan_list[] = { static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, int level) { - seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu %5d %6d\n", + int phase; + + seq_printf(s, "%*s%-*s %7d %8d %8d %11lu %10lu ", level * 3 + 1, "", 30 - level * 3, c->name, c->enable_count, c->prepare_count, c->protect_count, - clk_core_get_rate(c), clk_core_get_accuracy(c), - clk_core_get_phase(c), - clk_core_get_scaled_duty_cycle(c, 100000)); + clk_core_get_rate_recalc(c), + clk_core_get_accuracy_recalc(c)); + + phase = clk_core_get_phase(c); + if (phase >= 0) + seq_printf(s, "%5d", phase); + else + seq_puts(s, "-----"); + + seq_printf(s, " %6d\n", clk_core_get_scaled_duty_cycle(c, 100000)); } static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, @@ -2939,6 +2955,7 @@ DEFINE_SHOW_ATTRIBUTE(clk_summary); static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) { + int phase; unsigned long min_rate, max_rate; clk_core_get_boundaries(c, &min_rate, &max_rate); @@ -2948,11 +2965,13 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) seq_printf(s, "\"enable_count\": %d,", c->enable_count); seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); seq_printf(s, "\"protect_count\": %d,", c->protect_count); - seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c)); + seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c)); seq_printf(s, "\"min_rate\": %lu,", min_rate); seq_printf(s, "\"max_rate\": %lu,", max_rate); - seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c)); - seq_printf(s, "\"phase\": %d,", clk_core_get_phase(c)); + seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c)); + phase = clk_core_get_phase(c); + if (phase >= 0) + seq_printf(s, "\"phase\": %d,", phase); seq_printf(s, "\"duty_cycle\": %u", clk_core_get_scaled_duty_cycle(c, 100000)); } @@ -3323,7 +3342,9 @@ static void clk_core_reparent_orphans_nolock(void) static int __clk_core_init(struct clk_core *core) { int ret; + struct clk_core *parent; unsigned long rate; + int phase; if (!core) return -EINVAL; @@ -3394,7 +3415,7 @@ static int __clk_core_init(struct clk_core *core) goto out; } - core->parent = __clk_init_parent(core); + parent = core->parent = __clk_init_parent(core); /* * Populate core->parent if parent has already been clk_core_init'd. If @@ -3406,10 +3427,9 @@ static int __clk_core_init(struct clk_core *core) * clocks and re-parent any that are children of the clock currently * being clk_init'd. */ - if (core->parent) { - hlist_add_head(&core->child_node, - &core->parent->children); - core->orphan = core->parent->orphan; + if (parent) { + hlist_add_head(&core->child_node, &parent->children); + core->orphan = parent->orphan; } else if (!core->num_parents) { hlist_add_head(&core->child_node, &clk_root_list); core->orphan = false; @@ -3427,21 +3447,24 @@ static int __clk_core_init(struct clk_core *core) */ if (core->ops->recalc_accuracy) core->accuracy = core->ops->recalc_accuracy(core->hw, - __clk_get_accuracy(core->parent)); - else if (core->parent) - core->accuracy = core->parent->accuracy; + clk_core_get_accuracy_no_lock(parent)); + else if (parent) + core->accuracy = parent->accuracy; else core->accuracy = 0; /* - * Set clk's phase. + * Set clk's phase by clk_core_get_phase() caching the phase. * Since a phase is by definition relative to its parent, just * query the current clock phase, or just assume it's in phase. */ - if (core->ops->get_phase) - core->phase = core->ops->get_phase(core->hw); - else - core->phase = 0; + phase = clk_core_get_phase(core); + if (phase < 0) { + ret = phase; + pr_warn("%s: Failed to get phase for clk '%s'\n", __func__, + core->name); + goto out; + } /* * Set clk's duty cycle. @@ -3456,9 +3479,9 @@ static int __clk_core_init(struct clk_core *core) */ if (core->ops->recalc_rate) rate = core->ops->recalc_rate(core->hw, - clk_core_get_rate_nolock(core->parent)); - else if (core->parent) - rate = core->parent->rate; + clk_core_get_rate_nolock(parent)); + else if (parent) + rate = parent->rate; else rate = 0; core->rate = core->req_rate = rate; @@ -4865,8 +4888,8 @@ static int parent_ready(struct device_node *np) * * Return: error code or zero on success */ -int of_clk_detect_critical(struct device_node *np, - int index, unsigned long *flags) +int of_clk_detect_critical(struct device_node *np, int index, + unsigned long *flags) { struct property *prop; const __be32 *cur; diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c index 20f7c91c03d2..99773519b5a5 100644 --- a/drivers/clk/imx/clk-composite-8m.c +++ b/drivers/clk/imx/clk-composite-8m.c @@ -15,6 +15,7 @@ #define PCG_PREDIV_MAX 8 #define PCG_DIV_SHIFT 0 +#define PCG_CORE_DIV_WIDTH 3 #define PCG_DIV_WIDTH 6 #define PCG_DIV_MAX 64 @@ -91,7 +92,7 @@ static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_divider *divider = to_clk_divider(hw); - unsigned long flags = 0; + unsigned long flags; int prediv_value; int div_value; int ret; @@ -126,6 +127,7 @@ static const struct clk_ops imx8m_clk_composite_divider_ops = { struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, const char * const *parent_names, int num_parents, void __iomem *reg, + u32 composite_flags, unsigned long flags) { struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw; @@ -133,6 +135,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, struct clk_divider *div = NULL; struct clk_gate *gate = NULL; struct clk_mux *mux = NULL; + const struct clk_ops *divider_ops; mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) @@ -150,8 +153,16 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, div_hw = &div->hw; div->reg = reg; - div->shift = PCG_PREDIV_SHIFT; - div->width = PCG_PREDIV_WIDTH; + if (composite_flags & IMX_COMPOSITE_CORE) { + div->shift = PCG_DIV_SHIFT; + div->width = PCG_CORE_DIV_WIDTH; + divider_ops = &clk_divider_ops; + } else { + div->shift = PCG_PREDIV_SHIFT; + div->width = PCG_PREDIV_WIDTH; + divider_ops = &imx8m_clk_composite_divider_ops; + } + div->lock = &imx_ccm_lock; div->flags = CLK_DIVIDER_ROUND_CLOSEST; @@ -166,8 +177,7 @@ struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ops, div_hw, - &imx8m_clk_composite_divider_ops, - gate_hw, &clk_gate_ops, flags); + divider_ops, gate_hw, &clk_gate_ops, flags); if (IS_ERR(hw)) goto fail; diff --git a/drivers/clk/imx/clk-fixup-div.c b/drivers/clk/imx/clk-fixup-div.c index 4b17b91504ed..100ca828b052 100644 --- a/drivers/clk/imx/clk-fixup-div.c +++ b/drivers/clk/imx/clk-fixup-div.c @@ -55,7 +55,7 @@ static int clk_fixup_div_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_fixup_div *fixup_div = to_clk_fixup_div(hw); struct clk_divider *div = to_clk_divider(hw); unsigned int divider, value; - unsigned long flags = 0; + unsigned long flags; u32 val; divider = parent_rate / rate; diff --git a/drivers/clk/imx/clk-fixup-mux.c b/drivers/clk/imx/clk-fixup-mux.c index b569d919c645..58a67630bb6a 100644 --- a/drivers/clk/imx/clk-fixup-mux.c +++ b/drivers/clk/imx/clk-fixup-mux.c @@ -42,7 +42,7 @@ static int clk_fixup_mux_set_parent(struct clk_hw *hw, u8 index) { struct clk_fixup_mux *fixup_mux = to_clk_fixup_mux(hw); struct clk_mux *mux = to_clk_mux(hw); - unsigned long flags = 0; + unsigned long flags; u32 val; spin_lock_irqsave(mux->lock, flags); diff --git a/drivers/clk/imx/clk-gate2.c b/drivers/clk/imx/clk-gate2.c index 7d44ce814806..ce0060e8873e 100644 --- a/drivers/clk/imx/clk-gate2.c +++ b/drivers/clk/imx/clk-gate2.c @@ -40,7 +40,7 @@ static int clk_gate2_enable(struct clk_hw *hw) { struct clk_gate2 *gate = to_clk_gate2(hw); u32 reg; - unsigned long flags = 0; + unsigned long flags; spin_lock_irqsave(gate->lock, flags); @@ -62,7 +62,7 @@ static void clk_gate2_disable(struct clk_hw *hw) { struct clk_gate2 *gate = to_clk_gate2(hw); u32 reg; - unsigned long flags = 0; + unsigned long flags; spin_lock_irqsave(gate->lock, flags); @@ -101,7 +101,7 @@ static int clk_gate2_is_enabled(struct clk_hw *hw) static void clk_gate2_disable_unused(struct clk_hw *hw) { struct clk_gate2 *gate = to_clk_gate2(hw); - unsigned long flags = 0; + unsigned long flags; u32 reg; spin_lock_irqsave(gate->lock, flags); @@ -154,7 +154,7 @@ struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name, gate->hw.init = &init; hw = &gate->hw; - ret = clk_hw_register(NULL, hw); + ret = clk_hw_register(dev, hw); if (ret) { kfree(gate); return ERR_PTR(ret); diff --git a/drivers/clk/imx/clk-imx6sl.c b/drivers/clk/imx/clk-imx6sl.c index 4bd44d89eaaa..0f647d148abf 100644 --- a/drivers/clk/imx/clk-imx6sl.c +++ b/drivers/clk/imx/clk-imx6sl.c @@ -208,6 +208,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node) np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-anatop"); base = of_iomap(np, 0); WARN_ON(!base); + of_node_put(np); anatop_base = base; hws[IMX6SL_PLL1_BYPASS_SRC] = imx_clk_hw_mux("pll1_bypass_src", base + 0x00, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); diff --git a/drivers/clk/imx/clk-imx7d.c b/drivers/clk/imx/clk-imx7d.c index 0c9f7adb41ae..b2057bd42e25 100644 --- a/drivers/clk/imx/clk-imx7d.c +++ b/drivers/clk/imx/clk-imx7d.c @@ -802,6 +802,7 @@ static void __init imx7d_clocks_init(struct device_node *ccm_node) hws[IMX7D_PCIE_PHY_ROOT_CLK] = imx_clk_hw_gate4("pcie_phy_root_clk", "pcie_phy_post_div", base + 0x4600, 0); hws[IMX7D_EPDC_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("epdc_pixel_root_clk", "epdc_pixel_post_div", base + 0x44a0, 0); hws[IMX7D_LCDIF_PIXEL_ROOT_CLK] = imx_clk_hw_gate4("lcdif_pixel_root_clk", "lcdif_pixel_post_div", base + 0x44b0, 0); + hws[IMX7D_PXP_CLK] = imx_clk_hw_gate4("pxp_clk", "main_axi_root_clk", base + 0x44c0, 0); hws[IMX7D_MIPI_DSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_dsi_root_clk", "mipi_dsi_post_div", base + 0x4650, 0); hws[IMX7D_MIPI_CSI_ROOT_CLK] = imx_clk_hw_gate4("mipi_csi_root_clk", "mipi_csi_post_div", base + 0x4640, 0); hws[IMX7D_MIPI_DPHY_ROOT_CLK] = imx_clk_hw_gate4("mipi_dphy_root_clk", "mipi_dphy_post_div", base + 0x4660, 0); diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c index 0620d6c8c072..3710aa0dee9b 100644 --- a/drivers/clk/imx/clk-imx7ulp.c +++ b/drivers/clk/imx/clk-imx7ulp.c @@ -8,7 +8,7 @@ */ #include <dt-bindings/clock/imx7ulp-clock.h> -#include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> diff --git a/drivers/clk/imx/clk-imx8mm.c b/drivers/clk/imx/clk-imx8mm.c index 2ed93fc25087..925670438f23 100644 --- a/drivers/clk/imx/clk-imx8mm.c +++ b/drivers/clk/imx/clk-imx8mm.c @@ -4,12 +4,10 @@ */ #include <dt-bindings/clock/imx8mm-clock.h> -#include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -41,6 +39,8 @@ static const char *sys_pll3_bypass_sels[] = {"sys_pll3", "sys_pll3_ref_sel", }; static const char *imx8mm_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pll2_500m", "sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", }; +static const char * const imx8mm_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; + static const char *imx8mm_m4_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "sys_pll1_266m", "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; @@ -283,8 +283,10 @@ static const char *imx8mm_vpu_h1_sels[] = {"osc_24m", "vpu_pll_out", "sys_pll1_8 static const char *imx8mm_dram_core_sels[] = {"dram_pll_out", "dram_alt_root", }; -static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "osc_27m", "sys_pll1_200m", "audio_pll2_out", - "vpu_pll", "sys_pll1_80m", }; +static const char *imx8mm_clko1_sels[] = {"osc_24m", "sys_pll1_800m", "dummy", "sys_pll1_200m", + "audio_pll2_out", "sys_pll2_500m", "vpu_pll", "sys_pll1_80m", }; +static const char *imx8mm_clko2_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll1_400m", "sys_pll2_166m", + "sys_pll3_out", "audio_pll1_out", "video_pll1_out", "osc_32k", }; static struct clk_hw_onecell_data *clk_hw_data; static struct clk_hw **hws; @@ -322,6 +324,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) np = of_find_compatible_node(NULL, NULL, "fsl,imx8mm-anatop"); base = of_iomap(np, 0); + of_node_put(np); if (WARN_ON(!base)) return -ENOMEM; @@ -414,20 +417,30 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) /* Core Slice */ hws[IMX8MM_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mm_a53_sels, ARRAY_SIZE(imx8mm_a53_sels)); - hws[IMX8MM_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mm_m4_sels, ARRAY_SIZE(imx8mm_m4_sels)); - hws[IMX8MM_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mm_vpu_sels, ARRAY_SIZE(imx8mm_vpu_sels)); - hws[IMX8MM_CLK_GPU3D_SRC] = imx_clk_hw_mux2("gpu3d_src", base + 0x8180, 24, 3, imx8mm_gpu3d_sels, ARRAY_SIZE(imx8mm_gpu3d_sels)); - hws[IMX8MM_CLK_GPU2D_SRC] = imx_clk_hw_mux2("gpu2d_src", base + 0x8200, 24, 3, imx8mm_gpu2d_sels, ARRAY_SIZE(imx8mm_gpu2d_sels)); hws[IMX8MM_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28); - hws[IMX8MM_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); - hws[IMX8MM_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28); - hws[IMX8MM_CLK_GPU3D_CG] = imx_clk_hw_gate3("gpu3d_cg", "gpu3d_src", base + 0x8180, 28); - hws[IMX8MM_CLK_GPU2D_CG] = imx_clk_hw_gate3("gpu2d_cg", "gpu2d_src", base + 0x8200, 28); hws[IMX8MM_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); - hws[IMX8MM_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); - hws[IMX8MM_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3); - hws[IMX8MM_CLK_GPU3D_DIV] = imx_clk_hw_divider2("gpu3d_div", "gpu3d_cg", base + 0x8180, 0, 3); - hws[IMX8MM_CLK_GPU2D_DIV] = imx_clk_hw_divider2("gpu2d_div", "gpu2d_cg", base + 0x8200, 0, 3); + + hws[IMX8MM_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mm_m4_sels, base + 0x8080); + hws[IMX8MM_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mm_vpu_sels, base + 0x8100); + hws[IMX8MM_CLK_GPU3D_CORE] = imx8m_clk_hw_composite_core("gpu3d_core", imx8mm_gpu3d_sels, base + 0x8180); + hws[IMX8MM_CLK_GPU2D_CORE] = imx8m_clk_hw_composite_core("gpu2d_core", imx8mm_gpu2d_sels, base + 0x8200); + + /* For backwards compatibility */ + hws[IMX8MM_CLK_M4_SRC] = hws[IMX8MM_CLK_M4_CORE]; + hws[IMX8MM_CLK_M4_CG] = hws[IMX8MM_CLK_M4_CORE]; + hws[IMX8MM_CLK_M4_DIV] = hws[IMX8MM_CLK_M4_CORE]; + hws[IMX8MM_CLK_VPU_SRC] = hws[IMX8MM_CLK_VPU_CORE]; + hws[IMX8MM_CLK_VPU_CG] = hws[IMX8MM_CLK_VPU_CORE]; + hws[IMX8MM_CLK_VPU_DIV] = hws[IMX8MM_CLK_VPU_CORE]; + hws[IMX8MM_CLK_GPU3D_SRC] = hws[IMX8MM_CLK_GPU3D_CORE]; + hws[IMX8MM_CLK_GPU3D_CG] = hws[IMX8MM_CLK_GPU3D_CORE]; + hws[IMX8MM_CLK_GPU3D_DIV] = hws[IMX8MM_CLK_GPU3D_CORE]; + hws[IMX8MM_CLK_GPU2D_SRC] = hws[IMX8MM_CLK_GPU2D_CORE]; + hws[IMX8MM_CLK_GPU2D_CG] = hws[IMX8MM_CLK_GPU2D_CORE]; + hws[IMX8MM_CLK_GPU2D_DIV] = hws[IMX8MM_CLK_GPU2D_CORE]; + + /* CORE SEL */ + hws[IMX8MM_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mm_a53_core_sels, ARRAY_SIZE(imx8mm_a53_core_sels)); /* BUS */ hws[IMX8MM_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mm_main_axi_sels, base + 0x8800); @@ -504,6 +517,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) hws[IMX8MM_CLK_WDOG] = imx8m_clk_hw_composite("wdog", imx8mm_wdog_sels, base + 0xb900); hws[IMX8MM_CLK_WRCLK] = imx8m_clk_hw_composite("wrclk", imx8mm_wrclk_sels, base + 0xb980); hws[IMX8MM_CLK_CLKO1] = imx8m_clk_hw_composite("clko1", imx8mm_clko1_sels, base + 0xba00); + hws[IMX8MM_CLK_CLKO2] = imx8m_clk_hw_composite("clko2", imx8mm_clko2_sels, base + 0xba80); hws[IMX8MM_CLK_DSI_CORE] = imx8m_clk_hw_composite("dsi_core", imx8mm_dsi_core_sels, base + 0xbb00); hws[IMX8MM_CLK_DSI_PHY_REF] = imx8m_clk_hw_composite("dsi_phy_ref", imx8mm_dsi_phy_sels, base + 0xbb80); hws[IMX8MM_CLK_DSI_DBI] = imx8m_clk_hw_composite("dsi_dbi", imx8mm_dsi_dbi_sels, base + 0xbc00); @@ -564,7 +578,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) hws[IMX8MM_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); hws[IMX8MM_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0); hws[IMX8MM_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0); - hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_div", base + 0x44f0, 0); + hws[IMX8MM_CLK_GPU3D_ROOT] = imx_clk_hw_gate4("gpu3d_root_clk", "gpu3d_core", base + 0x44f0, 0); hws[IMX8MM_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0); hws[IMX8MM_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0); hws[IMX8MM_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0); @@ -586,7 +600,7 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) hws[IMX8MM_CLK_SDMA1_ROOT] = imx_clk_hw_gate4("sdma1_clk", "ipg_root", base + 0x43a0, 0); hws[IMX8MM_CLK_SDMA2_ROOT] = imx_clk_hw_gate4("sdma2_clk", "ipg_audio_root", base + 0x43b0, 0); hws[IMX8MM_CLK_SDMA3_ROOT] = imx_clk_hw_gate4("sdma3_clk", "ipg_audio_root", base + 0x45f0, 0); - hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_div", base + 0x4660, 0); + hws[IMX8MM_CLK_GPU2D_ROOT] = imx_clk_hw_gate4("gpu2d_root_clk", "gpu2d_core", base + 0x4660, 0); hws[IMX8MM_CLK_CSI1_ROOT] = imx_clk_hw_gate4("csi1_root_clk", "csi1_core", base + 0x4650, 0); hws[IMX8MM_CLK_GPT_3M] = imx_clk_hw_fixed_factor("gpt_3m", "osc_24m", 1, 8); @@ -594,11 +608,14 @@ static int imx8mm_clocks_probe(struct platform_device *pdev) hws[IMX8MM_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4); hws[IMX8MM_CLK_DRAM_CORE] = imx_clk_hw_mux2_flags("dram_core_clk", base + 0x9800, 24, 1, imx8mm_dram_core_sels, ARRAY_SIZE(imx8mm_dram_core_sels), CLK_IS_CRITICAL); - hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div", - hws[IMX8MM_CLK_A53_DIV]->clk, - hws[IMX8MM_CLK_A53_SRC]->clk, + hws[IMX8MM_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core", + hws[IMX8MM_CLK_A53_CORE]->clk, + hws[IMX8MM_CLK_A53_CORE]->clk, hws[IMX8MM_ARM_PLL_OUT]->clk, - hws[IMX8MM_SYS_PLL1_800M]->clk); + hws[IMX8MM_CLK_A53_DIV]->clk); + + clk_hw_set_parent(hws[IMX8MM_CLK_A53_SRC], hws[IMX8MM_SYS_PLL1_800M]); + clk_hw_set_parent(hws[IMX8MM_CLK_A53_CORE], hws[IMX8MM_ARM_PLL_OUT]); imx_check_clk_hws(hws, IMX8MM_CLK_END); diff --git a/drivers/clk/imx/clk-imx8mn.c b/drivers/clk/imx/clk-imx8mn.c index a181eb2df876..0bc7070235bd 100644 --- a/drivers/clk/imx/clk-imx8mn.c +++ b/drivers/clk/imx/clk-imx8mn.c @@ -4,12 +4,10 @@ */ #include <dt-bindings/clock/imx8mn-clock.h> -#include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/err.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/module.h> -#include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/slab.h> @@ -40,6 +38,8 @@ static const char * const imx8mn_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl "sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", }; +static const char * const imx8mn_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; + static const char * const imx8mn_gpu_core_sels[] = {"osc_24m", "gpu_pll_out", "sys_pll1_800m", "sys_pll3_out", "sys_pll2_1000m", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", }; @@ -317,6 +317,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) np = of_find_compatible_node(NULL, NULL, "fsl,imx8mn-anatop"); base = of_iomap(np, 0); + of_node_put(np); if (WARN_ON(!base)) { ret = -ENOMEM; goto unregister_hws; @@ -413,15 +414,21 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) /* CORE */ hws[IMX8MN_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mn_a53_sels, ARRAY_SIZE(imx8mn_a53_sels)); - hws[IMX8MN_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mn_gpu_core_sels, ARRAY_SIZE(imx8mn_gpu_core_sels)); - hws[IMX8MN_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mn_gpu_shader_sels, ARRAY_SIZE(imx8mn_gpu_shader_sels)); hws[IMX8MN_CLK_A53_CG] = imx_clk_hw_gate3("arm_a53_cg", "arm_a53_src", base + 0x8000, 28); - hws[IMX8MN_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28); - hws[IMX8MN_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28); - hws[IMX8MN_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); - hws[IMX8MN_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3); - hws[IMX8MN_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3); + + hws[IMX8MN_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mn_gpu_core_sels, base + 0x8180); + hws[IMX8MN_CLK_GPU_SHADER] = imx8m_clk_hw_composite_core("gpu_shader", imx8mn_gpu_shader_sels, base + 0x8200); + + hws[IMX8MN_CLK_GPU_CORE_SRC] = hws[IMX8MN_CLK_GPU_CORE]; + hws[IMX8MN_CLK_GPU_CORE_CG] = hws[IMX8MN_CLK_GPU_CORE]; + hws[IMX8MN_CLK_GPU_CORE_DIV] = hws[IMX8MN_CLK_GPU_CORE]; + hws[IMX8MN_CLK_GPU_SHADER_SRC] = hws[IMX8MN_CLK_GPU_SHADER]; + hws[IMX8MN_CLK_GPU_SHADER_CG] = hws[IMX8MN_CLK_GPU_SHADER]; + hws[IMX8MN_CLK_GPU_SHADER_DIV] = hws[IMX8MN_CLK_GPU_SHADER]; + + /* CORE SEL */ + hws[IMX8MN_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mn_a53_core_sels, ARRAY_SIZE(imx8mn_a53_core_sels)); /* BUS */ hws[IMX8MN_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mn_main_axi_sels, base + 0x8800); @@ -529,7 +536,7 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_CLK_UART3_ROOT] = imx_clk_hw_gate4("uart3_root_clk", "uart3", base + 0x44b0, 0); hws[IMX8MN_CLK_UART4_ROOT] = imx_clk_hw_gate4("uart4_root_clk", "uart4", base + 0x44c0, 0); hws[IMX8MN_CLK_USB1_CTRL_ROOT] = imx_clk_hw_gate4("usb1_ctrl_root_clk", "usb_bus", base + 0x44d0, 0); - hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core_div", base + 0x44f0, 0); + hws[IMX8MN_CLK_GPU_CORE_ROOT] = imx_clk_hw_gate4("gpu_core_root_clk", "gpu_core", base + 0x44f0, 0); hws[IMX8MN_CLK_USDHC1_ROOT] = imx_clk_hw_gate4("usdhc1_root_clk", "usdhc1", base + 0x4510, 0); hws[IMX8MN_CLK_USDHC2_ROOT] = imx_clk_hw_gate4("usdhc2_root_clk", "usdhc2", base + 0x4520, 0); hws[IMX8MN_CLK_WDOG1_ROOT] = imx_clk_hw_gate4("wdog1_root_clk", "wdog", base + 0x4530, 0); @@ -552,11 +559,14 @@ static int imx8mn_clocks_probe(struct platform_device *pdev) hws[IMX8MN_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4); - hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div", - hws[IMX8MN_CLK_A53_DIV]->clk, - hws[IMX8MN_CLK_A53_SRC]->clk, + hws[IMX8MN_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core", + hws[IMX8MN_CLK_A53_CORE]->clk, + hws[IMX8MN_CLK_A53_CORE]->clk, hws[IMX8MN_ARM_PLL_OUT]->clk, - hws[IMX8MN_SYS_PLL1_800M]->clk); + hws[IMX8MN_CLK_A53_DIV]->clk); + + clk_hw_set_parent(hws[IMX8MN_CLK_A53_SRC], hws[IMX8MN_SYS_PLL1_800M]); + clk_hw_set_parent(hws[IMX8MN_CLK_A53_CORE], hws[IMX8MN_ARM_PLL_OUT]); imx_check_clk_hws(hws, IMX8MN_CLK_END); diff --git a/drivers/clk/imx/clk-imx8mp.c b/drivers/clk/imx/clk-imx8mp.c index cf192907b7dc..41469e2cc3de 100644 --- a/drivers/clk/imx/clk-imx8mp.c +++ b/drivers/clk/imx/clk-imx8mp.c @@ -4,13 +4,13 @@ */ #include <dt-bindings/clock/imx8mp-clock.h> -#include <linux/clkdev.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/types.h> #include "clk.h" @@ -34,6 +34,8 @@ static const char * const imx8mp_a53_sels[] = {"osc_24m", "arm_pll_out", "sys_pl "sys_pll2_1000m", "sys_pll1_800m", "sys_pll1_400m", "audio_pll1_out", "sys_pll3_out", }; +static const char * const imx8mp_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; + static const char * const imx8mp_m7_sels[] = {"osc_24m", "sys_pll2_200m", "sys_pll2_250m", "vpu_pll_out", "sys_pll1_800m", "audio_pll1_out", "video_pll1_out", "sys_pll3_out", }; @@ -342,7 +344,7 @@ static const char * const imx8mp_hdmi_fdcc_tst_sels[] = {"osc_24m", "sys_pll1_26 "sys_pll1_800m", "sys_pll2_1000m", "sys_pll3_out", "audio_pll2_out", "video_pll1_out", }; -static const char * const imx8mp_hdmi_27m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", +static const char * const imx8mp_hdmi_24m_sels[] = {"osc_24m", "sys_pll1_160m", "sys_pll2_50m", "sys_pll3_out", "audio_pll1_out", "video_pll1_out", "audio_pll2_out", "sys_pll1_133m", }; @@ -434,6 +436,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) np = of_find_compatible_node(NULL, NULL, "fsl,imx8mp-anatop"); anatop_base = of_iomap(np, 0); + of_node_put(np); if (WARN_ON(!anatop_base)) return -ENOMEM; @@ -553,6 +556,9 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_HSIO_AXI_DIV] = imx_clk_hw_divider2("hsio_axi_div", "hsio_axi_cg", ccm_base + 0x8380, 0, 3); hws[IMX8MP_CLK_MEDIA_ISP_DIV] = imx_clk_hw_divider2("media_isp_div", "media_isp_cg", ccm_base + 0x8400, 0, 3); + /* CORE SEL */ + hws[IMX8MP_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", ccm_base + 0x9880, 24, 1, imx8mp_a53_core_sels, ARRAY_SIZE(imx8mp_a53_core_sels)); + hws[IMX8MP_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mp_main_axi_sels, ccm_base + 0x8800); hws[IMX8MP_CLK_ENET_AXI] = imx8m_clk_hw_composite("enet_axi", imx8mp_enet_axi_sels, ccm_base + 0x8880); hws[IMX8MP_CLK_NAND_USDHC_BUS] = imx8m_clk_hw_composite_critical("nand_usdhc_bus", imx8mp_nand_usdhc_sels, ccm_base + 0x8900); @@ -631,7 +637,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_IPP_DO_CLKO1] = imx8m_clk_hw_composite("ipp_do_clko1", imx8mp_ipp_do_clko1_sels, ccm_base + 0xba00); hws[IMX8MP_CLK_IPP_DO_CLKO2] = imx8m_clk_hw_composite("ipp_do_clko2", imx8mp_ipp_do_clko2_sels, ccm_base + 0xba80); hws[IMX8MP_CLK_HDMI_FDCC_TST] = imx8m_clk_hw_composite("hdmi_fdcc_tst", imx8mp_hdmi_fdcc_tst_sels, ccm_base + 0xbb00); - hws[IMX8MP_CLK_HDMI_27M] = imx8m_clk_hw_composite("hdmi_27m", imx8mp_hdmi_27m_sels, ccm_base + 0xbb80); + hws[IMX8MP_CLK_HDMI_24M] = imx8m_clk_hw_composite("hdmi_24m", imx8mp_hdmi_24m_sels, ccm_base + 0xbb80); hws[IMX8MP_CLK_HDMI_REF_266M] = imx8m_clk_hw_composite("hdmi_ref_266m", imx8mp_hdmi_ref_266m_sels, ccm_base + 0xbc00); hws[IMX8MP_CLK_USDHC3] = imx8m_clk_hw_composite("usdhc3", imx8mp_usdhc3_sels, ccm_base + 0xbc80); hws[IMX8MP_CLK_MEDIA_CAM1_PIX] = imx8m_clk_hw_composite("media_cam1_pix", imx8mp_media_cam1_pix_sels, ccm_base + 0xbd00); @@ -671,6 +677,7 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_I2C2_ROOT] = imx_clk_hw_gate4("i2c2_root_clk", "i2c2", ccm_base + 0x4180, 0); hws[IMX8MP_CLK_I2C3_ROOT] = imx_clk_hw_gate4("i2c3_root_clk", "i2c3", ccm_base + 0x4190, 0); hws[IMX8MP_CLK_I2C4_ROOT] = imx_clk_hw_gate4("i2c4_root_clk", "i2c4", ccm_base + 0x41a0, 0); + hws[IMX8MP_CLK_OCOTP_ROOT] = imx_clk_hw_gate4("ocotp_root_clk", "ipg_root", ccm_base + 0x4220, 0); hws[IMX8MP_CLK_PCIE_ROOT] = imx_clk_hw_gate4("pcie_root_clk", "pcie_aux", ccm_base + 0x4250, 0); hws[IMX8MP_CLK_PWM1_ROOT] = imx_clk_hw_gate4("pwm1_root_clk", "pwm1", ccm_base + 0x4280, 0); hws[IMX8MP_CLK_PWM2_ROOT] = imx_clk_hw_gate4("pwm2_root_clk", "pwm2", ccm_base + 0x4290, 0); @@ -722,11 +729,14 @@ static int imx8mp_clocks_probe(struct platform_device *pdev) hws[IMX8MP_CLK_VPU_ROOT] = imx_clk_hw_gate4("vpu_root_clk", "vpu_bus", ccm_base + 0x4630, 0); hws[IMX8MP_CLK_AUDIO_ROOT] = imx_clk_hw_gate4("audio_root_clk", "ipg_root", ccm_base + 0x4650, 0); - hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div", - hws[IMX8MP_CLK_A53_DIV]->clk, - hws[IMX8MP_CLK_A53_SRC]->clk, + hws[IMX8MP_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core", + hws[IMX8MP_CLK_A53_CORE]->clk, + hws[IMX8MP_CLK_A53_CORE]->clk, hws[IMX8MP_ARM_PLL_OUT]->clk, - hws[IMX8MP_SYS_PLL1_800M]->clk); + hws[IMX8MP_CLK_A53_DIV]->clk); + + clk_hw_set_parent(hws[IMX8MP_CLK_A53_SRC], hws[IMX8MP_SYS_PLL1_800M]); + clk_hw_set_parent(hws[IMX8MP_CLK_A53_CORE], hws[IMX8MP_ARM_PLL_OUT]); imx_check_clk_hws(hws, IMX8MP_CLK_END); diff --git a/drivers/clk/imx/clk-imx8mq.c b/drivers/clk/imx/clk-imx8mq.c index 4c0edca1a6d0..fdc68db68de5 100644 --- a/drivers/clk/imx/clk-imx8mq.c +++ b/drivers/clk/imx/clk-imx8mq.c @@ -5,7 +5,7 @@ */ #include <dt-bindings/clock/imx8mq-clock.h> -#include <linux/clk.h> +#include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> @@ -41,6 +41,8 @@ static const char * const video2_pll_out_sels[] = {"video2_pll1_ref_sel", }; static const char * const imx8mq_a53_sels[] = {"osc_25m", "arm_pll_out", "sys2_pll_500m", "sys2_pll_1000m", "sys1_pll_800m", "sys1_pll_400m", "audio_pll1_out", "sys3_pll_out", }; +static const char * const imx8mq_a53_core_sels[] = {"arm_a53_div", "arm_pll_out", }; + static const char * const imx8mq_arm_m4_sels[] = {"osc_25m", "sys2_pll_200m", "sys2_pll_250m", "sys1_pll_266m", "sys1_pll_800m", "audio_pll1_out", "video_pll1_out", "sys3_pll_out", }; @@ -305,6 +307,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-anatop"); base = of_iomap(np, 0); + of_node_put(np); if (WARN_ON(!base)) return -ENOMEM; @@ -403,22 +406,29 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) /* CORE */ hws[IMX8MQ_CLK_A53_SRC] = imx_clk_hw_mux2("arm_a53_src", base + 0x8000, 24, 3, imx8mq_a53_sels, ARRAY_SIZE(imx8mq_a53_sels)); - hws[IMX8MQ_CLK_M4_SRC] = imx_clk_hw_mux2("arm_m4_src", base + 0x8080, 24, 3, imx8mq_arm_m4_sels, ARRAY_SIZE(imx8mq_arm_m4_sels)); - hws[IMX8MQ_CLK_VPU_SRC] = imx_clk_hw_mux2("vpu_src", base + 0x8100, 24, 3, imx8mq_vpu_sels, ARRAY_SIZE(imx8mq_vpu_sels)); - hws[IMX8MQ_CLK_GPU_CORE_SRC] = imx_clk_hw_mux2("gpu_core_src", base + 0x8180, 24, 3, imx8mq_gpu_core_sels, ARRAY_SIZE(imx8mq_gpu_core_sels)); - hws[IMX8MQ_CLK_GPU_SHADER_SRC] = imx_clk_hw_mux2("gpu_shader_src", base + 0x8200, 24, 3, imx8mq_gpu_shader_sels, ARRAY_SIZE(imx8mq_gpu_shader_sels)); - hws[IMX8MQ_CLK_A53_CG] = imx_clk_hw_gate3_flags("arm_a53_cg", "arm_a53_src", base + 0x8000, 28, CLK_IS_CRITICAL); - hws[IMX8MQ_CLK_M4_CG] = imx_clk_hw_gate3("arm_m4_cg", "arm_m4_src", base + 0x8080, 28); - hws[IMX8MQ_CLK_VPU_CG] = imx_clk_hw_gate3("vpu_cg", "vpu_src", base + 0x8100, 28); - hws[IMX8MQ_CLK_GPU_CORE_CG] = imx_clk_hw_gate3("gpu_core_cg", "gpu_core_src", base + 0x8180, 28); - hws[IMX8MQ_CLK_GPU_SHADER_CG] = imx_clk_hw_gate3("gpu_shader_cg", "gpu_shader_src", base + 0x8200, 28); - hws[IMX8MQ_CLK_A53_DIV] = imx_clk_hw_divider2("arm_a53_div", "arm_a53_cg", base + 0x8000, 0, 3); - hws[IMX8MQ_CLK_M4_DIV] = imx_clk_hw_divider2("arm_m4_div", "arm_m4_cg", base + 0x8080, 0, 3); - hws[IMX8MQ_CLK_VPU_DIV] = imx_clk_hw_divider2("vpu_div", "vpu_cg", base + 0x8100, 0, 3); - hws[IMX8MQ_CLK_GPU_CORE_DIV] = imx_clk_hw_divider2("gpu_core_div", "gpu_core_cg", base + 0x8180, 0, 3); - hws[IMX8MQ_CLK_GPU_SHADER_DIV] = imx_clk_hw_divider2("gpu_shader_div", "gpu_shader_cg", base + 0x8200, 0, 3); + + hws[IMX8MQ_CLK_M4_CORE] = imx8m_clk_hw_composite_core("arm_m4_core", imx8mq_arm_m4_sels, base + 0x8080); + hws[IMX8MQ_CLK_VPU_CORE] = imx8m_clk_hw_composite_core("vpu_core", imx8mq_vpu_sels, base + 0x8100); + hws[IMX8MQ_CLK_GPU_CORE] = imx8m_clk_hw_composite_core("gpu_core", imx8mq_gpu_core_sels, base + 0x8180); + hws[IMX8MQ_CLK_GPU_SHADER] = imx8m_clk_hw_composite("gpu_shader", imx8mq_gpu_shader_sels, base + 0x8200); + /* For backwards compatibility */ + hws[IMX8MQ_CLK_M4_SRC] = hws[IMX8MQ_CLK_M4_CORE]; + hws[IMX8MQ_CLK_M4_CG] = hws[IMX8MQ_CLK_M4_CORE]; + hws[IMX8MQ_CLK_M4_DIV] = hws[IMX8MQ_CLK_M4_CORE]; + hws[IMX8MQ_CLK_VPU_SRC] = hws[IMX8MQ_CLK_VPU_CORE]; + hws[IMX8MQ_CLK_VPU_CG] = hws[IMX8MQ_CLK_VPU_CORE]; + hws[IMX8MQ_CLK_VPU_DIV] = hws[IMX8MQ_CLK_VPU_CORE]; + hws[IMX8MQ_CLK_GPU_CORE_SRC] = hws[IMX8MQ_CLK_GPU_CORE]; + hws[IMX8MQ_CLK_GPU_CORE_CG] = hws[IMX8MQ_CLK_GPU_CORE]; + hws[IMX8MQ_CLK_GPU_CORE_DIV] = hws[IMX8MQ_CLK_GPU_CORE]; + hws[IMX8MQ_CLK_GPU_SHADER_SRC] = hws[IMX8MQ_CLK_GPU_SHADER]; + hws[IMX8MQ_CLK_GPU_SHADER_CG] = hws[IMX8MQ_CLK_GPU_SHADER]; + hws[IMX8MQ_CLK_GPU_SHADER_DIV] = hws[IMX8MQ_CLK_GPU_SHADER]; + + /* CORE SEL */ + hws[IMX8MQ_CLK_A53_CORE] = imx_clk_hw_mux2("arm_a53_core", base + 0x9880, 24, 1, imx8mq_a53_core_sels, ARRAY_SIZE(imx8mq_a53_core_sels)); /* BUS */ hws[IMX8MQ_CLK_MAIN_AXI] = imx8m_clk_hw_composite_critical("main_axi", imx8mq_main_axi_sels, base + 0x8800); @@ -567,7 +577,7 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) hws[IMX8MQ_CLK_WDOG2_ROOT] = imx_clk_hw_gate4("wdog2_root_clk", "wdog", base + 0x4540, 0); hws[IMX8MQ_CLK_WDOG3_ROOT] = imx_clk_hw_gate4("wdog3_root_clk", "wdog", base + 0x4550, 0); hws[IMX8MQ_CLK_VPU_G1_ROOT] = imx_clk_hw_gate2_flags("vpu_g1_root_clk", "vpu_g1", base + 0x4560, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE); - hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core_div", base + 0x4570, 0); + hws[IMX8MQ_CLK_GPU_ROOT] = imx_clk_hw_gate4("gpu_root_clk", "gpu_core", base + 0x4570, 0); hws[IMX8MQ_CLK_VPU_G2_ROOT] = imx_clk_hw_gate2_flags("vpu_g2_root_clk", "vpu_g2", base + 0x45a0, 0, CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE); hws[IMX8MQ_CLK_DISP_ROOT] = imx_clk_hw_gate2_shared2("disp_root_clk", "disp_dc8000", base + 0x45d0, 0, &share_count_dcss); hws[IMX8MQ_CLK_DISP_AXI_ROOT] = imx_clk_hw_gate2_shared2("disp_axi_root_clk", "disp_axi", base + 0x45d0, 0, &share_count_dcss); @@ -583,11 +593,14 @@ static int imx8mq_clocks_probe(struct platform_device *pdev) hws[IMX8MQ_GPT_3M_CLK] = imx_clk_hw_fixed_factor("gpt_3m", "osc_25m", 1, 8); hws[IMX8MQ_CLK_DRAM_ALT_ROOT] = imx_clk_hw_fixed_factor("dram_alt_root", "dram_alt", 1, 4); - hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_div", - hws[IMX8MQ_CLK_A53_DIV]->clk, - hws[IMX8MQ_CLK_A53_SRC]->clk, + hws[IMX8MQ_CLK_ARM] = imx_clk_hw_cpu("arm", "arm_a53_core", + hws[IMX8MQ_CLK_A53_CORE]->clk, + hws[IMX8MQ_CLK_A53_CORE]->clk, hws[IMX8MQ_ARM_PLL_OUT]->clk, - hws[IMX8MQ_SYS1_PLL_800M]->clk); + hws[IMX8MQ_CLK_A53_DIV]->clk); + + clk_hw_set_parent(hws[IMX8MQ_CLK_A53_SRC], hws[IMX8MQ_SYS1_PLL_800M]); + clk_hw_set_parent(hws[IMX8MQ_CLK_A53_CORE], hws[IMX8MQ_ARM_PLL_OUT]); imx_check_clk_hws(hws, IMX8MQ_CLK_END); diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c index de93ce73101b..78e1f7641aaa 100644 --- a/drivers/clk/imx/clk-pfdv2.c +++ b/drivers/clk/imx/clk-pfdv2.c @@ -98,26 +98,45 @@ static unsigned long clk_pfdv2_recalc_rate(struct clk_hw *hw, return tmp; } -static long clk_pfdv2_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +static int clk_pfdv2_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - u64 tmp = *prate; + unsigned long parent_rates[] = { + 480000000, + 528000000, + req->best_parent_rate + }; + unsigned long best_rate = -1UL, rate = req->rate; + unsigned long best_parent_rate = req->best_parent_rate; + u64 tmp; u8 frac; + int i; + + for (i = 0; i < ARRAY_SIZE(parent_rates); i++) { + tmp = parent_rates[i]; + tmp = tmp * 18 + rate / 2; + do_div(tmp, rate); + frac = tmp; + + if (frac < 12) + frac = 12; + else if (frac > 35) + frac = 35; + + tmp = parent_rates[i]; + tmp *= 18; + do_div(tmp, frac); + + if (abs(tmp - req->rate) < abs(best_rate - req->rate)) { + best_rate = tmp; + best_parent_rate = parent_rates[i]; + } + } - tmp = tmp * 18 + rate / 2; - do_div(tmp, rate); - frac = tmp; - - if (frac < 12) - frac = 12; - else if (frac > 35) - frac = 35; - - tmp = *prate; - tmp *= 18; - do_div(tmp, frac); + req->best_parent_rate = best_parent_rate; + req->rate = best_rate; - return tmp; + return 0; } static int clk_pfdv2_is_enabled(struct clk_hw *hw) @@ -139,6 +158,12 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate, u32 val; u8 frac; + if (!rate) + return -EINVAL; + + /* PFD can NOT change rate without gating */ + WARN_ON(clk_pfdv2_is_enabled(hw)); + tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = tmp; @@ -161,7 +186,7 @@ static const struct clk_ops clk_pfdv2_ops = { .enable = clk_pfdv2_enable, .disable = clk_pfdv2_disable, .recalc_rate = clk_pfdv2_recalc_rate, - .round_rate = clk_pfdv2_round_rate, + .determine_rate = clk_pfdv2_determine_rate, .set_rate = clk_pfdv2_set_rate, .is_enabled = clk_pfdv2_is_enabled, }; @@ -189,7 +214,7 @@ struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name, init.ops = &clk_pfdv2_ops; init.parent_names = &parent_name; init.num_parents = 1; - init.flags = CLK_SET_RATE_GATE; + init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT; pfd->hw.init = &init; diff --git a/drivers/clk/imx/clk-pll14xx.c b/drivers/clk/imx/clk-pll14xx.c index 5b0519a81a7a..a83bbbee77d9 100644 --- a/drivers/clk/imx/clk-pll14xx.c +++ b/drivers/clk/imx/clk-pll14xx.c @@ -55,8 +55,10 @@ static const struct imx_pll14xx_rate_table imx_pll1416x_tbl[] = { }; static const struct imx_pll14xx_rate_table imx_pll1443x_tbl[] = { + PLL_1443X_RATE(1039500000U, 173, 2, 1, 16384), PLL_1443X_RATE(650000000U, 325, 3, 2, 0), PLL_1443X_RATE(594000000U, 198, 2, 2, 0), + PLL_1443X_RATE(519750000U, 173, 2, 2, 16384), PLL_1443X_RATE(393216000U, 262, 2, 3, 9437), PLL_1443X_RATE(361267200U, 361, 3, 3, 17511), }; @@ -408,6 +410,8 @@ struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name, default: pr_err("%s: Unknown pll type for pll clk %s\n", __func__, name); + kfree(pll); + return ERR_PTR(-EINVAL); }; pll->base = base; diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c index f51a800c268c..a49450431855 100644 --- a/drivers/clk/imx/clk-pllv4.c +++ b/drivers/clk/imx/clk-pllv4.c @@ -54,7 +54,7 @@ static inline int clk_pllv4_wait_lock(struct clk_pllv4 *pll) csr, csr & PLL_VLD, 0, LOCK_TIMEOUT_US); } -static int clk_pllv4_is_enabled(struct clk_hw *hw) +static int clk_pllv4_is_prepared(struct clk_hw *hw) { struct clk_pllv4 *pll = to_clk_pllv4(hw); @@ -175,7 +175,7 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static int clk_pllv4_enable(struct clk_hw *hw) +static int clk_pllv4_prepare(struct clk_hw *hw) { u32 val; struct clk_pllv4 *pll = to_clk_pllv4(hw); @@ -187,7 +187,7 @@ static int clk_pllv4_enable(struct clk_hw *hw) return clk_pllv4_wait_lock(pll); } -static void clk_pllv4_disable(struct clk_hw *hw) +static void clk_pllv4_unprepare(struct clk_hw *hw) { u32 val; struct clk_pllv4 *pll = to_clk_pllv4(hw); @@ -201,9 +201,9 @@ static const struct clk_ops clk_pllv4_ops = { .recalc_rate = clk_pllv4_recalc_rate, .round_rate = clk_pllv4_round_rate, .set_rate = clk_pllv4_set_rate, - .enable = clk_pllv4_enable, - .disable = clk_pllv4_disable, - .is_enabled = clk_pllv4_is_enabled, + .prepare = clk_pllv4_prepare, + .unprepare = clk_pllv4_unprepare, + .is_prepared = clk_pllv4_is_prepared, }; struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name, diff --git a/drivers/clk/imx/clk-sscg-pll.c b/drivers/clk/imx/clk-sscg-pll.c index acd1b9002be6..d4a2be16d132 100644 --- a/drivers/clk/imx/clk-sscg-pll.c +++ b/drivers/clk/imx/clk-sscg-pll.c @@ -195,10 +195,10 @@ static int clk_sscg_pll2_find_setup(struct clk_sscg_pll_setup *setup, uint64_t ref) { - int ret = -EINVAL; + int ret; if (ref < PLL_STAGE1_MIN_FREQ || ref > PLL_STAGE1_MAX_FREQ) - return ret; + return -EINVAL; temp_setup->vco1 = ref; @@ -254,10 +254,10 @@ static int clk_sscg_pll1_find_setup(struct clk_sscg_pll_setup *setup, uint64_t ref) { - int ret = -EINVAL; + int ret; if (ref < PLL_REF_MIN_FREQ || ref > PLL_REF_MAX_FREQ) - return ret; + return -EINVAL; temp_setup->ref = ref; @@ -428,7 +428,7 @@ static int __clk_sscg_pll_determine_rate(struct clk_hw *hw, struct clk_sscg_pll_setup *setup = &pll->setup; struct clk_hw *parent_hw = NULL; int bypass_parent_index; - int ret = -EINVAL; + int ret; req->max_rate = max; req->min_rate = min; @@ -467,10 +467,10 @@ static int clk_sscg_pll_determine_rate(struct clk_hw *hw, uint64_t rate = req->rate; uint64_t min = req->min_rate; uint64_t max = req->max_rate; - int ret = -EINVAL; + int ret; if (rate < PLL_OUT_MIN_FREQ || rate > PLL_OUT_MAX_FREQ) - return ret; + return -EINVAL; ret = __clk_sscg_pll_determine_rate(hw, req, req->rate, req->rate, rate, PLL_BYPASS2); diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h index b05213b91dcf..f074dd8ec42e 100644 --- a/drivers/clk/imx/clk.h +++ b/drivers/clk/imx/clk.h @@ -477,20 +477,29 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name, struct clk *div, struct clk *mux, struct clk *pll, struct clk *step); +#define IMX_COMPOSITE_CORE BIT(0) + struct clk_hw *imx8m_clk_hw_composite_flags(const char *name, const char * const *parent_names, int num_parents, void __iomem *reg, + u32 composite_flags, unsigned long flags); +#define imx8m_clk_hw_composite_core(name, parent_names, reg) \ + imx8m_clk_hw_composite_flags(name, parent_names, \ + ARRAY_SIZE(parent_names), reg, \ + IMX_COMPOSITE_CORE, \ + CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE) + #define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \ flags) \ to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \ - num_parents, reg, flags)) + num_parents, reg, 0, flags)) #define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \ imx8m_clk_hw_composite_flags(name, parent_names, \ - ARRAY_SIZE(parent_names), reg, \ + ARRAY_SIZE(parent_names), reg, 0, \ flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE) #define __imx8m_clk_composite(name, parent_names, reg, flags) \ diff --git a/drivers/clk/ingenic/jz4770-cgu.c b/drivers/clk/ingenic/jz4770-cgu.c index 956dd653a43d..c051ecba5cf8 100644 --- a/drivers/clk/ingenic/jz4770-cgu.c +++ b/drivers/clk/ingenic/jz4770-cgu.c @@ -432,8 +432,10 @@ static void __init jz4770_cgu_init(struct device_node *np) cgu = ingenic_cgu_new(jz4770_cgu_clocks, ARRAY_SIZE(jz4770_cgu_clocks), np); - if (!cgu) + if (!cgu) { pr_err("%s: failed to initialise CGU\n", __func__); + return; + } retval = ingenic_cgu_register_clocks(cgu); if (retval) diff --git a/drivers/clk/ingenic/jz4780-cgu.c b/drivers/clk/ingenic/jz4780-cgu.c index ea905ff72bf0..c758f1643067 100644 --- a/drivers/clk/ingenic/jz4780-cgu.c +++ b/drivers/clk/ingenic/jz4780-cgu.c @@ -9,14 +9,16 @@ #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/of.h> + #include <dt-bindings/clock/jz4780-cgu.h> #include "cgu.h" #include "pm.h" /* CGU register offsets */ #define CGU_REG_CLOCKCONTROL 0x00 -#define CGU_REG_PLLCONTROL 0x0c +#define CGU_REG_LCR 0x04 #define CGU_REG_APLL 0x10 #define CGU_REG_MPLL 0x14 #define CGU_REG_EPLL 0x18 @@ -46,8 +48,8 @@ #define CGU_REG_CLOCKSTATUS 0xd4 /* bits within the OPCR register */ -#define OPCR_SPENDN0 (1 << 7) -#define OPCR_SPENDN1 (1 << 6) +#define OPCR_SPENDN0 BIT(7) +#define OPCR_SPENDN1 BIT(6) /* bits within the USBPCR register */ #define USBPCR_USB_MODE BIT(31) @@ -88,6 +90,13 @@ #define USBVBFIL_IDDIGFIL_MASK (0xffff << USBVBFIL_IDDIGFIL_SHIFT) #define USBVBFIL_USBVBFIL_MASK (0xffff) +/* bits within the LCR register */ +#define LCR_PD_SCPU BIT(31) +#define LCR_SCPUS BIT(27) + +/* bits within the CLKGR1 register */ +#define CLKGR1_CORE1 BIT(15) + static struct ingenic_cgu *cgu; static u8 jz4780_otg_phy_get_parent(struct clk_hw *hw) @@ -205,6 +214,42 @@ static const struct clk_ops jz4780_otg_phy_ops = { .set_rate = jz4780_otg_phy_set_rate, }; +static int jz4780_core1_enable(struct clk_hw *hw) +{ + struct ingenic_clk *ingenic_clk = to_ingenic_clk(hw); + struct ingenic_cgu *cgu = ingenic_clk->cgu; + const unsigned int timeout = 5000; + unsigned long flags; + int retval; + u32 lcr, clkgr1; + + spin_lock_irqsave(&cgu->lock, flags); + + lcr = readl(cgu->base + CGU_REG_LCR); + lcr &= ~LCR_PD_SCPU; + writel(lcr, cgu->base + CGU_REG_LCR); + + clkgr1 = readl(cgu->base + CGU_REG_CLKGR1); + clkgr1 &= ~CLKGR1_CORE1; + writel(clkgr1, cgu->base + CGU_REG_CLKGR1); + + spin_unlock_irqrestore(&cgu->lock, flags); + + /* wait for the CPU to be powered up */ + retval = readl_poll_timeout(cgu->base + CGU_REG_LCR, lcr, + !(lcr & LCR_SCPUS), 10, timeout); + if (retval == -ETIMEDOUT) { + pr_err("%s: Wait for power up core1 timeout\n", __func__); + return retval; + } + + return 0; +} + +static const struct clk_ops jz4780_core1_ops = { + .enable = jz4780_core1_enable, +}; + static const s8 pll_od_encoding[16] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, @@ -699,9 +744,9 @@ static const struct ingenic_cgu_clk_info jz4780_cgu_clocks[] = { }, [JZ4780_CLK_CORE1] = { - "core1", CGU_CLK_GATE, + "core1", CGU_CLK_CUSTOM, .parents = { JZ4780_CLK_CPU, -1, -1, -1 }, - .gate = { CGU_REG_CLKGR1, 15 }, + .custom = { &jz4780_core1_ops }, }, }; diff --git a/drivers/clk/ingenic/tcu.c b/drivers/clk/ingenic/tcu.c index ad7daa494fd4..153a954b0d2f 100644 --- a/drivers/clk/ingenic/tcu.c +++ b/drivers/clk/ingenic/tcu.c @@ -189,7 +189,7 @@ static long ingenic_tcu_round_rate(struct clk_hw *hw, unsigned long req_rate, u8 prescale; if (req_rate > rate) - return -EINVAL; + return rate; prescale = ingenic_tcu_get_prescale(rate, req_rate); @@ -317,10 +317,17 @@ static const struct ingenic_soc_info jz4770_soc_info = { .has_tcu_clk = false, }; +static const struct ingenic_soc_info x1000_soc_info = { + .num_channels = 8, + .has_ost = false, /* X1000 has OST, but it not belong TCU */ + .has_tcu_clk = false, +}; + static const struct of_device_id ingenic_tcu_of_match[] __initconst = { { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, }, { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, }, { .compatible = "ingenic,jz4770-tcu", .data = &jz4770_soc_info, }, + { .compatible = "ingenic,x1000-tcu", .data = &x1000_soc_info, }, { /* sentinel */ } }; @@ -471,3 +478,4 @@ static void __init ingenic_tcu_init(struct device_node *np) CLK_OF_DECLARE_DRIVER(jz4740_cgu, "ingenic,jz4740-tcu", ingenic_tcu_init); CLK_OF_DECLARE_DRIVER(jz4725b_cgu, "ingenic,jz4725b-tcu", ingenic_tcu_init); CLK_OF_DECLARE_DRIVER(jz4770_cgu, "ingenic,jz4770-tcu", ingenic_tcu_init); +CLK_OF_DECLARE_DRIVER(x1000_cgu, "ingenic,x1000-tcu", ingenic_tcu_init); diff --git a/drivers/clk/keystone/Kconfig b/drivers/clk/keystone/Kconfig index 38aeefb1e808..ab613f28b502 100644 --- a/drivers/clk/keystone/Kconfig +++ b/drivers/clk/keystone/Kconfig @@ -26,3 +26,11 @@ config TI_SCI_CLK_PROBE_FROM_FW This is mostly only useful for debugging purposes, and will increase the boot time of the device. If you want the clocks probed from firmware, say Y. Otherwise, say N. + +config TI_SYSCON_CLK + tristate "Syscon based clock driver for K2/K3 SoCs" + depends on ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST + default ARCH_KEYSTONE || ARCH_K3 + help + This adds clock driver support for syscon based gate + clocks on TI's K2 and K3 SoCs. diff --git a/drivers/clk/keystone/Makefile b/drivers/clk/keystone/Makefile index d044de6f965c..0e426e648f7c 100644 --- a/drivers/clk/keystone/Makefile +++ b/drivers/clk/keystone/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_COMMON_CLK_KEYSTONE) += pll.o gate.o obj-$(CONFIG_TI_SCI_CLK) += sci-clk.o +obj-$(CONFIG_TI_SYSCON_CLK) += syscon-clk.o diff --git a/drivers/clk/keystone/syscon-clk.c b/drivers/clk/keystone/syscon-clk.c new file mode 100644 index 000000000000..8d7dbea3bd30 --- /dev/null +++ b/drivers/clk/keystone/syscon-clk.c @@ -0,0 +1,172 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/ + */ + +#include <linux/clk-provider.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +struct ti_syscon_gate_clk_priv { + struct clk_hw hw; + struct regmap *regmap; + u32 reg; + u32 idx; +}; + +struct ti_syscon_gate_clk_data { + char *name; + u32 offset; + u32 bit_idx; +}; + +static struct +ti_syscon_gate_clk_priv *to_ti_syscon_gate_clk_priv(struct clk_hw *hw) +{ + return container_of(hw, struct ti_syscon_gate_clk_priv, hw); +} + +static int ti_syscon_gate_clk_enable(struct clk_hw *hw) +{ + struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw); + + return regmap_write_bits(priv->regmap, priv->reg, priv->idx, + priv->idx); +} + +static void ti_syscon_gate_clk_disable(struct clk_hw *hw) +{ + struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw); + + regmap_write_bits(priv->regmap, priv->reg, priv->idx, 0); +} + +static int ti_syscon_gate_clk_is_enabled(struct clk_hw *hw) +{ + unsigned int val; + struct ti_syscon_gate_clk_priv *priv = to_ti_syscon_gate_clk_priv(hw); + + regmap_read(priv->regmap, priv->reg, &val); + + return !!(val & priv->idx); +} + +static const struct clk_ops ti_syscon_gate_clk_ops = { + .enable = ti_syscon_gate_clk_enable, + .disable = ti_syscon_gate_clk_disable, + .is_enabled = ti_syscon_gate_clk_is_enabled, +}; + +static struct clk_hw +*ti_syscon_gate_clk_register(struct device *dev, struct regmap *regmap, + const struct ti_syscon_gate_clk_data *data) +{ + struct ti_syscon_gate_clk_priv *priv; + struct clk_init_data init; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return ERR_PTR(-ENOMEM); + + init.name = data->name; + init.ops = &ti_syscon_gate_clk_ops; + init.parent_names = NULL; + init.num_parents = 0; + init.flags = 0; + + priv->regmap = regmap; + priv->reg = data->offset; + priv->idx = BIT(data->bit_idx); + priv->hw.init = &init; + + ret = devm_clk_hw_register(dev, &priv->hw); + if (ret) + return ERR_PTR(ret); + + return &priv->hw; +} + +static int ti_syscon_gate_clk_probe(struct platform_device *pdev) +{ + const struct ti_syscon_gate_clk_data *data, *p; + struct clk_hw_onecell_data *hw_data; + struct device *dev = &pdev->dev; + struct regmap *regmap; + int num_clks, i; + + data = device_get_match_data(dev); + if (!data) + return -EINVAL; + + regmap = syscon_node_to_regmap(dev->of_node); + if (IS_ERR(regmap)) { + if (PTR_ERR(regmap) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_err(dev, "failed to find parent regmap\n"); + return PTR_ERR(regmap); + } + + num_clks = 0; + for (p = data; p->name; p++) + num_clks++; + + hw_data = devm_kzalloc(dev, struct_size(hw_data, hws, num_clks), + GFP_KERNEL); + if (!hw_data) + return -ENOMEM; + + hw_data->num = num_clks; + + for (i = 0; i < num_clks; i++) { + hw_data->hws[i] = ti_syscon_gate_clk_register(dev, regmap, + &data[i]); + if (IS_ERR(hw_data->hws[i])) + dev_warn(dev, "failed to register %s\n", + data[i].name); + } + + return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, + hw_data); +} + +#define TI_SYSCON_CLK_GATE(_name, _offset, _bit_idx) \ + { \ + .name = _name, \ + .offset = (_offset), \ + .bit_idx = (_bit_idx), \ + } + +static const struct ti_syscon_gate_clk_data am654_clk_data[] = { + TI_SYSCON_CLK_GATE("ehrpwm_tbclk0", 0x0, 0), + TI_SYSCON_CLK_GATE("ehrpwm_tbclk1", 0x4, 0), + TI_SYSCON_CLK_GATE("ehrpwm_tbclk2", 0x8, 0), + TI_SYSCON_CLK_GATE("ehrpwm_tbclk3", 0xc, 0), + TI_SYSCON_CLK_GATE("ehrpwm_tbclk4", 0x10, 0), + TI_SYSCON_CLK_GATE("ehrpwm_tbclk5", 0x14, 0), + { /* Sentinel */ }, +}; + +static const struct of_device_id ti_syscon_gate_clk_ids[] = { + { + .compatible = "ti,am654-ehrpwm-tbclk", + .data = &am654_clk_data, + }, + { } +}; +MODULE_DEVICE_TABLE(of, ti_syscon_gate_clk_ids); + +static struct platform_driver ti_syscon_gate_clk_driver = { + .probe = ti_syscon_gate_clk_probe, + .driver = { + .name = "ti-syscon-gate-clk", + .of_match_table = ti_syscon_gate_clk_ids, + }, +}; +module_platform_driver(ti_syscon_gate_clk_driver); + +MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); +MODULE_DESCRIPTION("Syscon backed gate-clock driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/clk/meson/g12a.c b/drivers/clk/meson/g12a.c index d2760a021301..fad616cac01e 100644 --- a/drivers/clk/meson/g12a.c +++ b/drivers/clk/meson/g12a.c @@ -3862,6 +3862,111 @@ static struct clk_regmap g12a_ts = { }, }; +/* SPICC SCLK source clock */ + +static const struct clk_parent_data spicc_sclk_parent_data[] = { + { .fw_name = "xtal", }, + { .hw = &g12a_clk81.hw }, + { .hw = &g12a_fclk_div4.hw }, + { .hw = &g12a_fclk_div3.hw }, + { .hw = &g12a_fclk_div5.hw }, + { .hw = &g12a_fclk_div7.hw }, +}; + +static struct clk_regmap g12a_spicc0_sclk_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SPICC_CLK_CNTL, + .mask = 7, + .shift = 7, + }, + .hw.init = &(struct clk_init_data){ + .name = "spicc0_sclk_sel", + .ops = &clk_regmap_mux_ops, + .parent_data = spicc_sclk_parent_data, + .num_parents = ARRAY_SIZE(spicc_sclk_parent_data), + }, +}; + +static struct clk_regmap g12a_spicc0_sclk_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SPICC_CLK_CNTL, + .shift = 0, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "spicc0_sclk_div", + .ops = &clk_regmap_divider_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_spicc0_sclk_sel.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_spicc0_sclk = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SPICC_CLK_CNTL, + .bit_idx = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "spicc0_sclk", + .ops = &clk_regmap_gate_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_spicc0_sclk_div.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_spicc1_sclk_sel = { + .data = &(struct clk_regmap_mux_data){ + .offset = HHI_SPICC_CLK_CNTL, + .mask = 7, + .shift = 23, + }, + .hw.init = &(struct clk_init_data){ + .name = "spicc1_sclk_sel", + .ops = &clk_regmap_mux_ops, + .parent_data = spicc_sclk_parent_data, + .num_parents = ARRAY_SIZE(spicc_sclk_parent_data), + }, +}; + +static struct clk_regmap g12a_spicc1_sclk_div = { + .data = &(struct clk_regmap_div_data){ + .offset = HHI_SPICC_CLK_CNTL, + .shift = 16, + .width = 6, + }, + .hw.init = &(struct clk_init_data){ + .name = "spicc1_sclk_div", + .ops = &clk_regmap_divider_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_spicc1_sclk_sel.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + +static struct clk_regmap g12a_spicc1_sclk = { + .data = &(struct clk_regmap_gate_data){ + .offset = HHI_SPICC_CLK_CNTL, + .bit_idx = 22, + }, + .hw.init = &(struct clk_init_data){ + .name = "spicc1_sclk", + .ops = &clk_regmap_gate_ops, + .parent_hws = (const struct clk_hw *[]) { + &g12a_spicc1_sclk_div.hw + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }, +}; + #define MESON_GATE(_name, _reg, _bit) \ MESON_PCLK(_name, _reg, _bit, &g12a_clk81.hw) @@ -4159,6 +4264,12 @@ static struct clk_hw_onecell_data g12a_hw_onecell_data = { [CLKID_VDEC_HEVCF] = &g12a_vdec_hevcf.hw, [CLKID_TS_DIV] = &g12a_ts_div.hw, [CLKID_TS] = &g12a_ts.hw, + [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw, + [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw, + [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw, + [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw, + [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw, + [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -4408,6 +4519,12 @@ static struct clk_hw_onecell_data g12b_hw_onecell_data = { [CLKID_CPUB_CLK_AXI] = &g12b_cpub_clk_axi.hw, [CLKID_CPUB_CLK_TRACE_SEL] = &g12b_cpub_clk_trace_sel.hw, [CLKID_CPUB_CLK_TRACE] = &g12b_cpub_clk_trace.hw, + [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw, + [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw, + [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw, + [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw, + [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw, + [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -4642,6 +4759,12 @@ static struct clk_hw_onecell_data sm1_hw_onecell_data = { [CLKID_CPU1_CLK] = &sm1_cpu1_clk.hw, [CLKID_CPU2_CLK] = &sm1_cpu2_clk.hw, [CLKID_CPU3_CLK] = &sm1_cpu3_clk.hw, + [CLKID_SPICC0_SCLK_SEL] = &g12a_spicc0_sclk_sel.hw, + [CLKID_SPICC0_SCLK_DIV] = &g12a_spicc0_sclk_div.hw, + [CLKID_SPICC0_SCLK] = &g12a_spicc0_sclk.hw, + [CLKID_SPICC1_SCLK_SEL] = &g12a_spicc1_sclk_sel.hw, + [CLKID_SPICC1_SCLK_DIV] = &g12a_spicc1_sclk_div.hw, + [CLKID_SPICC1_SCLK] = &g12a_spicc1_sclk.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -4877,6 +5000,12 @@ static struct clk_regmap *const g12a_clk_regmaps[] = { &sm1_cpu1_clk, &sm1_cpu2_clk, &sm1_cpu3_clk, + &g12a_spicc0_sclk_sel, + &g12a_spicc0_sclk_div, + &g12a_spicc0_sclk, + &g12a_spicc1_sclk_sel, + &g12a_spicc1_sclk_div, + &g12a_spicc1_sclk, }; static const struct reg_sequence g12a_init_regs[] = { diff --git a/drivers/clk/meson/g12a.h b/drivers/clk/meson/g12a.h index 9df4068aced1..a8852556836e 100644 --- a/drivers/clk/meson/g12a.h +++ b/drivers/clk/meson/g12a.h @@ -255,8 +255,12 @@ #define CLKID_DSU_CLK_DYN1 249 #define CLKID_DSU_CLK_DYN 250 #define CLKID_DSU_CLK_FINAL 251 +#define CLKID_SPICC0_SCLK_SEL 256 +#define CLKID_SPICC0_SCLK_DIV 257 +#define CLKID_SPICC1_SCLK_SEL 259 +#define CLKID_SPICC1_SCLK_DIV 260 -#define NR_CLKS 256 +#define NR_CLKS 262 /* include the CLKIDs that have been made part of the DT binding */ #include <dt-bindings/clock/g12a-clkc.h> diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c index 1f9c056e684c..5fd6a574f8c3 100644 --- a/drivers/clk/meson/gxbb.c +++ b/drivers/clk/meson/gxbb.c @@ -2613,19 +2613,12 @@ static MESON_GATE(gxbb_assist_misc, HHI_GCLK_MPEG0, 23); static MESON_GATE(gxbb_emmc_a, HHI_GCLK_MPEG0, 24); static MESON_GATE(gxbb_emmc_b, HHI_GCLK_MPEG0, 25); static MESON_GATE(gxbb_emmc_c, HHI_GCLK_MPEG0, 26); +static MESON_GATE(gxl_acodec, HHI_GCLK_MPEG0, 28); static MESON_GATE(gxbb_spi, HHI_GCLK_MPEG0, 30); static MESON_GATE(gxbb_i2s_spdif, HHI_GCLK_MPEG1, 2); static MESON_GATE(gxbb_eth, HHI_GCLK_MPEG1, 3); static MESON_GATE(gxbb_demux, HHI_GCLK_MPEG1, 4); -static MESON_GATE(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6); -static MESON_GATE(gxbb_iec958, HHI_GCLK_MPEG1, 7); -static MESON_GATE(gxbb_i2s_out, HHI_GCLK_MPEG1, 8); -static MESON_GATE(gxbb_amclk, HHI_GCLK_MPEG1, 9); -static MESON_GATE(gxbb_aififo2, HHI_GCLK_MPEG1, 10); -static MESON_GATE(gxbb_mixer, HHI_GCLK_MPEG1, 11); -static MESON_GATE(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12); -static MESON_GATE(gxbb_adc, HHI_GCLK_MPEG1, 13); static MESON_GATE(gxbb_blkmv, HHI_GCLK_MPEG1, 14); static MESON_GATE(gxbb_aiu, HHI_GCLK_MPEG1, 15); static MESON_GATE(gxbb_uart1, HHI_GCLK_MPEG1, 16); @@ -2680,6 +2673,16 @@ static MESON_GATE(gxbb_ao_ahb_bus, HHI_GCLK_AO, 2); static MESON_GATE(gxbb_ao_iface, HHI_GCLK_AO, 3); static MESON_GATE(gxbb_ao_i2c, HHI_GCLK_AO, 4); +/* AIU gates */ +static MESON_PCLK(gxbb_aiu_glue, HHI_GCLK_MPEG1, 6, &gxbb_aiu.hw); +static MESON_PCLK(gxbb_iec958, HHI_GCLK_MPEG1, 7, &gxbb_aiu_glue.hw); +static MESON_PCLK(gxbb_i2s_out, HHI_GCLK_MPEG1, 8, &gxbb_aiu_glue.hw); +static MESON_PCLK(gxbb_amclk, HHI_GCLK_MPEG1, 9, &gxbb_aiu_glue.hw); +static MESON_PCLK(gxbb_aififo2, HHI_GCLK_MPEG1, 10, &gxbb_aiu_glue.hw); +static MESON_PCLK(gxbb_mixer, HHI_GCLK_MPEG1, 11, &gxbb_aiu_glue.hw); +static MESON_PCLK(gxbb_mixer_iface, HHI_GCLK_MPEG1, 12, &gxbb_aiu_glue.hw); +static MESON_PCLK(gxbb_adc, HHI_GCLK_MPEG1, 13, &gxbb_aiu_glue.hw); + /* Array of all clocks provided by this provider */ static struct clk_hw_onecell_data gxbb_hw_onecell_data = { @@ -3100,6 +3103,7 @@ static struct clk_hw_onecell_data gxl_hw_onecell_data = { [CLKID_HDMI_SEL] = &gxbb_hdmi_sel.hw, [CLKID_HDMI_DIV] = &gxbb_hdmi_div.hw, [CLKID_HDMI] = &gxbb_hdmi.hw, + [CLKID_ACODEC] = &gxl_acodec.hw, [NR_CLKS] = NULL, }, .num = NR_CLKS, @@ -3491,6 +3495,7 @@ static struct clk_regmap *const gxl_clk_regmaps[] = { &gxl_hdmi_pll_od, &gxl_hdmi_pll_od2, &gxl_hdmi_pll_dco, + &gxl_acodec, }; static const struct meson_eeclkc_data gxbb_clkc_data = { diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index b53584fe66cf..1ee8cb7e2f5a 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -188,7 +188,7 @@ #define CLKID_HDMI_SEL 203 #define CLKID_HDMI_DIV 204 -#define NR_CLKS 206 +#define NR_CLKS 207 /* include the CLKIDs that have been made part of the DT binding */ #include <dt-bindings/clock/gxbb-clkc.h> diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c index 9fd31f23b2a9..34a70c4b4899 100644 --- a/drivers/clk/meson/meson8b.c +++ b/drivers/clk/meson/meson8b.c @@ -2605,14 +2605,6 @@ static MESON_GATE(meson8b_spi, HHI_GCLK_MPEG0, 30); static MESON_GATE(meson8b_i2s_spdif, HHI_GCLK_MPEG1, 2); static MESON_GATE(meson8b_eth, HHI_GCLK_MPEG1, 3); static MESON_GATE(meson8b_demux, HHI_GCLK_MPEG1, 4); -static MESON_GATE(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6); -static MESON_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7); -static MESON_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8); -static MESON_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9); -static MESON_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10); -static MESON_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11); -static MESON_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12); -static MESON_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13); static MESON_GATE(meson8b_blkmv, HHI_GCLK_MPEG1, 14); static MESON_GATE(meson8b_aiu, HHI_GCLK_MPEG1, 15); static MESON_GATE(meson8b_uart1, HHI_GCLK_MPEG1, 16); @@ -2659,6 +2651,19 @@ static MESON_GATE(meson8b_vclk2_vencl, HHI_GCLK_OTHER, 25); static MESON_GATE(meson8b_vclk2_other, HHI_GCLK_OTHER, 26); static MESON_GATE(meson8b_edp, HHI_GCLK_OTHER, 31); +/* AIU gates */ +#define MESON_AIU_GLUE_GATE(_name, _reg, _bit) \ + MESON_PCLK(_name, _reg, _bit, &meson8b_aiu_glue.hw) + +static MESON_PCLK(meson8b_aiu_glue, HHI_GCLK_MPEG1, 6, &meson8b_aiu.hw); +static MESON_AIU_GLUE_GATE(meson8b_iec958, HHI_GCLK_MPEG1, 7); +static MESON_AIU_GLUE_GATE(meson8b_i2s_out, HHI_GCLK_MPEG1, 8); +static MESON_AIU_GLUE_GATE(meson8b_amclk, HHI_GCLK_MPEG1, 9); +static MESON_AIU_GLUE_GATE(meson8b_aififo2, HHI_GCLK_MPEG1, 10); +static MESON_AIU_GLUE_GATE(meson8b_mixer, HHI_GCLK_MPEG1, 11); +static MESON_AIU_GLUE_GATE(meson8b_mixer_iface, HHI_GCLK_MPEG1, 12); +static MESON_AIU_GLUE_GATE(meson8b_adc, HHI_GCLK_MPEG1, 13); + /* Always On (AO) domain gates */ static MESON_GATE(meson8b_ao_media_cpu, HHI_GCLK_AO, 0); diff --git a/drivers/clk/mmp/Makefile b/drivers/clk/mmp/Makefile index acc141adf087..14dc8a8a9d08 100644 --- a/drivers/clk/mmp/Makefile +++ b/drivers/clk/mmp/Makefile @@ -8,7 +8,7 @@ obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o obj-$(CONFIG_RESET_CONTROLLER) += reset.o obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o -obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o +obj-$(CONFIG_COMMON_CLK_MMP2) += clk-of-mmp2.o clk-pll.o obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c index d2cd36c54474..7a351ec65564 100644 --- a/drivers/clk/mmp/clk-mix.c +++ b/drivers/clk/mmp/clk-mix.c @@ -441,7 +441,7 @@ const struct clk_ops mmp_clk_mix_ops = { struct clk *mmp_clk_register_mix(struct device *dev, const char *name, - const char **parent_names, + const char * const *parent_names, u8 num_parents, unsigned long flags, struct mmp_clk_mix_config *config, diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 6e71591e63a0..52dc8b43acd9 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c @@ -3,6 +3,7 @@ * * Copyright (C) 2012 Marvell * Chao Xie <xiechao.mail@gmail.com> + * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any @@ -48,17 +49,39 @@ #define APMU_SDH1 0x58 #define APMU_SDH2 0xe8 #define APMU_SDH3 0xec +#define APMU_SDH4 0x15c #define APMU_USB 0x5c #define APMU_DISP0 0x4c #define APMU_DISP1 0x110 #define APMU_CCIC0 0x50 #define APMU_CCIC1 0xf4 +#define APBC_THERMAL0 0x90 +#define APBC_THERMAL1 0x98 +#define APBC_THERMAL2 0x9c +#define APBC_THERMAL3 0xa0 #define APMU_USBHSIC0 0xf8 #define APMU_USBHSIC1 0xfc -#define MPMU_UART_PLL 0x14 +#define APMU_GPU 0xcc + +#define MPMU_FCCR 0x8 +#define MPMU_POSR 0x10 +#define MPMU_UART_PLL 0x14 +#define MPMU_PLL2_CR 0x34 +/* MMP3 specific below */ +#define MPMU_PLL3_CR 0x50 +#define MPMU_PLL3_CTRL1 0x58 +#define MPMU_PLL1_CTRL 0x5c +#define MPMU_PLL_DIFF_CTRL 0x68 +#define MPMU_PLL2_CTRL1 0x414 + +enum mmp2_clk_model { + CLK_MODEL_MMP2, + CLK_MODEL_MMP3, +}; struct mmp2_clk_unit { struct mmp_clk_unit unit; + enum mmp2_clk_model model; void __iomem *mpmu_base; void __iomem *apmu_base; void __iomem *apbc_base; @@ -67,11 +90,22 @@ struct mmp2_clk_unit { static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = { {MMP2_CLK_CLK32, "clk32", NULL, 0, 32768}, {MMP2_CLK_VCTCXO, "vctcxo", NULL, 0, 26000000}, - {MMP2_CLK_PLL1, "pll1", NULL, 0, 800000000}, - {MMP2_CLK_PLL2, "pll2", NULL, 0, 960000000}, {MMP2_CLK_USB_PLL, "usb_pll", NULL, 0, 480000000}, }; +static struct mmp_param_pll_clk pll_clks[] = { + {MMP2_CLK_PLL1, "pll1", 797330000, MPMU_FCCR, 0x4000, MPMU_POSR, 0}, + {MMP2_CLK_PLL2, "pll2", 0, MPMU_PLL2_CR, 0x0300, MPMU_PLL2_CR, 10}, +}; + +static struct mmp_param_pll_clk mmp3_pll_clks[] = { + {MMP2_CLK_PLL2, "pll1", 797330000, MPMU_FCCR, 0x4000, MPMU_POSR, 0, 26000000, MPMU_PLL1_CTRL, 25}, + {MMP2_CLK_PLL2, "pll2", 0, MPMU_PLL2_CR, 0x0300, MPMU_PLL2_CR, 10, 26000000, MPMU_PLL2_CTRL1, 25}, + {MMP3_CLK_PLL1_P, "pll1_p", 0, MPMU_PLL_DIFF_CTRL, 0x0010, 0, 0, 797330000, MPMU_PLL_DIFF_CTRL, 0}, + {MMP3_CLK_PLL2_P, "pll2_p", 0, MPMU_PLL_DIFF_CTRL, 0x0100, MPMU_PLL2_CR, 10, 26000000, MPMU_PLL_DIFF_CTRL, 5}, + {MMP3_CLK_PLL3, "pll3", 0, MPMU_PLL3_CR, 0x0300, MPMU_PLL3_CR, 10, 26000000, MPMU_PLL3_CTRL1, 25}, +}; + static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = { {MMP2_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0}, {MMP2_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0}, @@ -113,6 +147,16 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit) mmp_register_fixed_rate_clks(unit, fixed_rate_clks, ARRAY_SIZE(fixed_rate_clks)); + if (pxa_unit->model == CLK_MODEL_MMP3) { + mmp_register_pll_clks(unit, mmp3_pll_clks, + pxa_unit->mpmu_base, + ARRAY_SIZE(mmp3_pll_clks)); + } else { + mmp_register_pll_clks(unit, pll_clks, + pxa_unit->mpmu_base, + ARRAY_SIZE(pll_clks)); + } + mmp_register_fixed_factor_clks(unit, fixed_factor_clks, ARRAY_SIZE(fixed_factor_clks)); @@ -127,16 +171,16 @@ static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit) static DEFINE_SPINLOCK(uart0_lock); static DEFINE_SPINLOCK(uart1_lock); static DEFINE_SPINLOCK(uart2_lock); -static const char *uart_parent_names[] = {"uart_pll", "vctcxo"}; +static const char * const uart_parent_names[] = {"uart_pll", "vctcxo"}; static DEFINE_SPINLOCK(ssp0_lock); static DEFINE_SPINLOCK(ssp1_lock); static DEFINE_SPINLOCK(ssp2_lock); static DEFINE_SPINLOCK(ssp3_lock); -static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; +static const char * const ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"}; static DEFINE_SPINLOCK(timer_lock); -static const char *timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"}; +static const char * const timer_parent_names[] = {"clk32", "vctcxo_4", "vctcxo_2", "vctcxo"}; static DEFINE_SPINLOCK(reset_lock); @@ -176,6 +220,13 @@ static struct mmp_param_gate_clk apbc_gate_clks[] = { {MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock}, {MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock}, {MMP2_CLK_TIMER, "timer_clk", "timer_mux", CLK_SET_RATE_PARENT, APBC_TIMER, 0x7, 0x3, 0x0, 0, &timer_lock}, + {MMP2_CLK_THERMAL0, "thermal0_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL0, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock}, +}; + +static struct mmp_param_gate_clk mmp3_apbc_gate_clks[] = { + {MMP3_CLK_THERMAL1, "thermal1_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL1, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock}, + {MMP3_CLK_THERMAL2, "thermal2_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL2, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock}, + {MMP3_CLK_THERMAL3, "thermal3_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_THERMAL3, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock}, }; static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit) @@ -187,10 +238,15 @@ static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit) mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base, ARRAY_SIZE(apbc_gate_clks)); + + if (pxa_unit->model == CLK_MODEL_MMP3) { + mmp_register_gate_clks(unit, mmp3_apbc_gate_clks, pxa_unit->apbc_base, + ARRAY_SIZE(mmp3_apbc_gate_clks)); + } } static DEFINE_SPINLOCK(sdh_lock); -static const char *sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"}; +static const char * const sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"}; static struct mmp_clk_mix_config sdh_mix_config = { .reg_info = DEFINE_MIX_REG_INFO(4, 10, 2, 8, 32), }; @@ -201,11 +257,20 @@ static DEFINE_SPINLOCK(usbhsic1_lock); static DEFINE_SPINLOCK(disp0_lock); static DEFINE_SPINLOCK(disp1_lock); -static const char *disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"}; +static const char * const disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"}; static DEFINE_SPINLOCK(ccic0_lock); static DEFINE_SPINLOCK(ccic1_lock); -static const char *ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"}; +static const char * const ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"}; + +static DEFINE_SPINLOCK(gpu_lock); +static const char * const mmp2_gpu_gc_parent_names[] = {"pll1_2", "pll1_3", "pll2_2", "pll2_3", "pll2", "usb_pll"}; +static u32 mmp2_gpu_gc_parent_table[] = { 0x0000, 0x0040, 0x0080, 0x00c0, 0x1000, 0x1040 }; +static const char * const mmp2_gpu_bus_parent_names[] = {"pll1_4", "pll2", "pll2_2", "usb_pll"}; +static u32 mmp2_gpu_bus_parent_table[] = { 0x0000, 0x0020, 0x0030, 0x4020 }; +static const char * const mmp3_gpu_bus_parent_names[] = {"pll1_4", "pll1_6", "pll1_2", "pll2_2"}; +static const char * const mmp3_gpu_gc_parent_names[] = {"pll1", "pll2", "pll1_p", "pll2_p"}; + static struct mmp_clk_mix_config ccic0_mix_config = { .reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32), }; @@ -218,6 +283,15 @@ static struct mmp_param_mux_clk apmu_mux_clks[] = { {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, }; +static struct mmp_param_mux_clk mmp3_apmu_mux_clks[] = { + {0, "gpu_bus_mux", mmp3_gpu_bus_parent_names, ARRAY_SIZE(mmp3_gpu_bus_parent_names), + CLK_SET_RATE_PARENT, APMU_GPU, 4, 2, 0, &gpu_lock}, + {0, "gpu_3d_mux", mmp3_gpu_gc_parent_names, ARRAY_SIZE(mmp3_gpu_gc_parent_names), + CLK_SET_RATE_PARENT, APMU_GPU, 6, 2, 0, &gpu_lock}, + {0, "gpu_2d_mux", mmp3_gpu_gc_parent_names, ARRAY_SIZE(mmp3_gpu_gc_parent_names), + CLK_SET_RATE_PARENT, APMU_GPU, 12, 2, 0, &gpu_lock}, +}; + static struct mmp_param_div_clk apmu_div_clks[] = { {0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock}, {0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock}, @@ -226,6 +300,11 @@ static struct mmp_param_div_clk apmu_div_clks[] = { {0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock}, }; +static struct mmp_param_div_clk mmp3_apmu_div_clks[] = { + {0, "gpu_3d_div", "gpu_3d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 24, 4, 0, &gpu_lock}, + {0, "gpu_2d_div", "gpu_2d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 28, 4, 0, &gpu_lock}, +}; + static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock}, {MMP2_CLK_USBHSIC0, "usbhsic0_clk", "usb_pll", 0, APMU_USBHSIC0, 0x1b, 0x1b, 0x0, 0, &usbhsic0_lock}, @@ -235,8 +314,8 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH2, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, {MMP2_CLK_SDH3, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, - {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock}, - {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock}, + {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x12, 0x12, 0x0, 0, &disp0_lock}, + {MMP2_CLK_DISP0_LCDC, "disp0_lcdc_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x09, 0x09, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock}, {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x09, 0x09, 0x0, 0, &disp1_lock}, {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock}, @@ -246,6 +325,17 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, + {MMP2_CLK_GPU_BUS, "gpu_bus_clk", "gpu_bus_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0xa, 0xa, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock}, +}; + +static struct mmp_param_gate_clk mmp2_apmu_gate_clks[] = { + {MMP2_CLK_GPU_3D, "gpu_3d_clk", "gpu_3d_mux", CLK_SET_RATE_PARENT, APMU_GPU, 0x5, 0x5, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock}, +}; + +static struct mmp_param_gate_clk mmp3_apmu_gate_clks[] = { + {MMP3_CLK_SDH4, "sdh4_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH4, 0x1b, 0x1b, 0x0, 0, &sdh_lock}, + {MMP3_CLK_GPU_3D, "gpu_3d_clk", "gpu_3d_div", CLK_SET_RATE_PARENT, APMU_GPU, 0x5, 0x5, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock}, + {MMP3_CLK_GPU_2D, "gpu_2d_clk", "gpu_2d_div", CLK_SET_RATE_PARENT, APMU_GPU, 0x1c0000, 0x1c0000, 0x0, MMP_CLK_GATE_NEED_DELAY, &gpu_lock}, }; static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) @@ -281,6 +371,34 @@ static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base, ARRAY_SIZE(apmu_gate_clks)); + + if (pxa_unit->model == CLK_MODEL_MMP3) { + mmp_register_mux_clks(unit, mmp3_apmu_mux_clks, pxa_unit->apmu_base, + ARRAY_SIZE(mmp3_apmu_mux_clks)); + + mmp_register_div_clks(unit, mmp3_apmu_div_clks, pxa_unit->apmu_base, + ARRAY_SIZE(mmp3_apmu_div_clks)); + + mmp_register_gate_clks(unit, mmp3_apmu_gate_clks, pxa_unit->apmu_base, + ARRAY_SIZE(mmp3_apmu_gate_clks)); + } else { + clk_register_mux_table(NULL, "gpu_3d_mux", mmp2_gpu_gc_parent_names, + ARRAY_SIZE(mmp2_gpu_gc_parent_names), + CLK_SET_RATE_PARENT, + pxa_unit->apmu_base + APMU_GPU, + 0, 0x10c0, 0, + mmp2_gpu_gc_parent_table, &gpu_lock); + + clk_register_mux_table(NULL, "gpu_bus_mux", mmp2_gpu_bus_parent_names, + ARRAY_SIZE(mmp2_gpu_bus_parent_names), + CLK_SET_RATE_PARENT, + pxa_unit->apmu_base + APMU_GPU, + 0, 0x4030, 0, + mmp2_gpu_bus_parent_table, &gpu_lock); + + mmp_register_gate_clks(unit, mmp2_apmu_gate_clks, pxa_unit->apmu_base, + ARRAY_SIZE(mmp2_apmu_gate_clks)); + } } static void mmp2_clk_reset_init(struct device_node *np, @@ -313,6 +431,11 @@ static void __init mmp2_clk_init(struct device_node *np) if (!pxa_unit) return; + if (of_device_is_compatible(np, "marvell,mmp3-clock")) + pxa_unit->model = CLK_MODEL_MMP3; + else + pxa_unit->model = CLK_MODEL_MMP2; + pxa_unit->mpmu_base = of_iomap(np, 0); if (!pxa_unit->mpmu_base) { pr_err("failed to map mpmu registers\n"); @@ -352,3 +475,4 @@ free_memory: } CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init); +CLK_OF_DECLARE(mmp3_clk, "marvell,mmp3-clock", mmp2_clk_init); diff --git a/drivers/clk/mmp/clk-pll.c b/drivers/clk/mmp/clk-pll.c new file mode 100644 index 000000000000..7077be293871 --- /dev/null +++ b/drivers/clk/mmp/clk-pll.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * MMP PLL clock rate calculation + * + * Copyright (C) 2020 Lubomir Rintel <lkundrak@v3.sk> + */ + +#include <linux/clk-provider.h> +#include <linux/slab.h> +#include <linux/io.h> + +#include "clk.h" + +#define to_clk_mmp_pll(hw) container_of(hw, struct mmp_clk_pll, hw) + +struct mmp_clk_pll { + struct clk_hw hw; + unsigned long default_rate; + void __iomem *enable_reg; + u32 enable; + void __iomem *reg; + u8 shift; + + unsigned long input_rate; + void __iomem *postdiv_reg; + u8 postdiv_shift; +}; + +static int mmp_clk_pll_is_enabled(struct clk_hw *hw) +{ + struct mmp_clk_pll *pll = to_clk_mmp_pll(hw); + u32 val; + + val = readl_relaxed(pll->enable_reg); + if ((val & pll->enable) == pll->enable) + return 1; + + /* Some PLLs, if not software controlled, output default clock. */ + if (pll->default_rate > 0) + return 1; + + return 0; +} + +static unsigned long mmp_clk_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct mmp_clk_pll *pll = to_clk_mmp_pll(hw); + u32 fbdiv, refdiv, postdiv; + u64 rate; + u32 val; + + val = readl_relaxed(pll->enable_reg); + if ((val & pll->enable) != pll->enable) + return pll->default_rate; + + if (pll->reg) { + val = readl_relaxed(pll->reg); + fbdiv = (val >> pll->shift) & 0x1ff; + refdiv = (val >> (pll->shift + 9)) & 0x1f; + } else { + fbdiv = 2; + refdiv = 1; + } + + if (pll->postdiv_reg) { + /* MMP3 clock rate calculation */ + static const u8 postdivs[] = {2, 3, 4, 5, 6, 8, 10, 12, 16}; + + val = readl_relaxed(pll->postdiv_reg); + postdiv = (val >> pll->postdiv_shift) & 0x7; + + rate = pll->input_rate; + rate *= 2 * fbdiv; + do_div(rate, refdiv); + do_div(rate, postdivs[postdiv]); + } else { + /* MMP2 clock rate calculation */ + if (refdiv == 3) { + rate = 19200000; + } else if (refdiv == 4) { + rate = 26000000; + } else { + pr_err("bad refdiv: %d (0x%08x)\n", refdiv, val); + return 0; + } + + rate *= fbdiv + 2; + do_div(rate, refdiv + 2); + } + + return (unsigned long)rate; +} + +static const struct clk_ops mmp_clk_pll_ops = { + .is_enabled = mmp_clk_pll_is_enabled, + .recalc_rate = mmp_clk_pll_recalc_rate, +}; + +struct clk *mmp_clk_register_pll(char *name, + unsigned long default_rate, + void __iomem *enable_reg, u32 enable, + void __iomem *reg, u8 shift, + unsigned long input_rate, + void __iomem *postdiv_reg, u8 postdiv_shift) +{ + struct mmp_clk_pll *pll; + struct clk *clk; + struct clk_init_data init; + + pll = kzalloc(sizeof(*pll), GFP_KERNEL); + if (!pll) + return ERR_PTR(-ENOMEM); + + init.name = name; + init.ops = &mmp_clk_pll_ops; + init.flags = 0; + init.parent_names = NULL; + init.num_parents = 0; + + pll->default_rate = default_rate; + pll->enable_reg = enable_reg; + pll->enable = enable; + pll->reg = reg; + pll->shift = shift; + + pll->input_rate = input_rate; + pll->postdiv_reg = postdiv_reg; + pll->postdiv_shift = postdiv_shift; + + pll->hw.init = &init; + + clk = clk_register(NULL, &pll->hw); + + if (IS_ERR(clk)) + kfree(pll); + + return clk; +} diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c index ca7d37e2c7be..317123641d1e 100644 --- a/drivers/clk/mmp/clk.c +++ b/drivers/clk/mmp/clk.c @@ -176,6 +176,37 @@ void mmp_register_div_clks(struct mmp_clk_unit *unit, } } +void mmp_register_pll_clks(struct mmp_clk_unit *unit, + struct mmp_param_pll_clk *clks, + void __iomem *base, int size) +{ + struct clk *clk; + int i; + + for (i = 0; i < size; i++) { + void __iomem *reg = NULL; + + if (clks[i].offset) + reg = base + clks[i].offset; + + clk = mmp_clk_register_pll(clks[i].name, + clks[i].default_rate, + base + clks[i].enable_offset, + clks[i].enable, + reg, clks[i].shift, + clks[i].input_rate, + base + clks[i].postdiv_offset, + clks[i].postdiv_shift); + if (IS_ERR(clk)) { + pr_err("%s: failed to register clock %s\n", + __func__, clks[i].name); + continue; + } + if (clks[i].id) + unit->clk_table[clks[i].id] = clk; + } +} + void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id, struct clk *clk) { diff --git a/drivers/clk/mmp/clk.h b/drivers/clk/mmp/clk.h index 70bb73257647..971b4d6d992f 100644 --- a/drivers/clk/mmp/clk.h +++ b/drivers/clk/mmp/clk.h @@ -97,7 +97,7 @@ struct mmp_clk_mix { extern const struct clk_ops mmp_clk_mix_ops; extern struct clk *mmp_clk_register_mix(struct device *dev, const char *name, - const char **parent_names, + const char * const *parent_names, u8 num_parents, unsigned long flags, struct mmp_clk_mix_config *config, @@ -124,9 +124,6 @@ extern struct clk *mmp_clk_register_gate(struct device *dev, const char *name, u32 val_disable, unsigned int gate_flags, spinlock_t *lock); - -extern struct clk *mmp_clk_register_pll2(const char *name, - const char *parent_name, unsigned long flags); extern struct clk *mmp_clk_register_apbc(const char *name, const char *parent_name, void __iomem *base, unsigned int delay, unsigned int apbc_flags, spinlock_t *lock); @@ -196,7 +193,7 @@ void mmp_register_gate_clks(struct mmp_clk_unit *unit, struct mmp_param_mux_clk { unsigned int id; char *name; - const char **parent_name; + const char * const *parent_name; u8 num_parents; unsigned long flags; unsigned long offset; @@ -224,6 +221,30 @@ void mmp_register_div_clks(struct mmp_clk_unit *unit, struct mmp_param_div_clk *clks, void __iomem *base, int size); +struct mmp_param_pll_clk { + unsigned int id; + char *name; + unsigned long default_rate; + unsigned long enable_offset; + u32 enable; + unsigned long offset; + u8 shift; + /* MMP3 specific: */ + unsigned long input_rate; + unsigned long postdiv_offset; + unsigned long postdiv_shift; +}; +void mmp_register_pll_clks(struct mmp_clk_unit *unit, + struct mmp_param_pll_clk *clks, + void __iomem *base, int size); + +extern struct clk *mmp_clk_register_pll(char *name, + unsigned long default_rate, + void __iomem *enable_reg, u32 enable, + void __iomem *reg, u8 shift, + unsigned long input_rate, + void __iomem *postdiv_reg, u8 postdiv_shift); + #define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc) \ { \ .width_div = (w_d), \ diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 15cdcdc9b3b8..11ec6f466467 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -280,6 +280,15 @@ config SC_GPUCC_7180 Say Y if you want to support graphics controller devices and functionality such as 3D graphics. +config SC_MSS_7180 + tristate "SC7180 Modem Clock Controller" + select SC_GCC_7180 + help + Support for the Modem Subsystem clock controller on Qualcomm + Technologies, Inc on SC7180 devices. + Say Y if you want to use the Modem branch clocks of the Modem + subsystem clock controller to reset the MSS subsystem. + config SC_VIDEOCC_7180 tristate "SC7180 Video Clock Controller" select SC_GCC_7180 @@ -366,6 +375,13 @@ config SM_GCC_8150 Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, SD/UFS, PCIe etc. +config SM_GCC_8250 + tristate "SM8250 Global Clock Controller" + help + Support for the global clock controller on SM8250 devices. + Say Y if you want to use peripheral devices such as UART, + SPI, I2C, USB, SD/UFS, PCIe etc. + config SPMI_PMIC_CLKDIV tristate "SPMI PMIC clkdiv Support" depends on SPMI || COMPILE_TEST diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 656a87e629d4..691efbf7e81f 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -50,6 +50,7 @@ obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o +obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o obj-$(CONFIG_SDM_CAMCC_845) += camcc-sdm845.o obj-$(CONFIG_SDM_DISPCC_845) += dispcc-sdm845.o @@ -59,6 +60,7 @@ obj-$(CONFIG_SDM_GPUCC_845) += gpucc-sdm845.o obj-$(CONFIG_SDM_LPASSCC_845) += lpasscc-sdm845.o obj-$(CONFIG_SDM_VIDEOCC_845) += videocc-sdm845.o obj-$(CONFIG_SM_GCC_8150) += gcc-sm8150.o +obj-$(CONFIG_SM_GCC_8250) += gcc-sm8250.o obj-$(CONFIG_SPMI_PMIC_CLKDIV) += clk-spmi-pmic-div.o obj-$(CONFIG_KPSS_XCC) += kpss-xcc.o obj-$(CONFIG_QCOM_HFPLL) += hfpll.o diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c index 7c2936da9b14..9b2dfa08acb2 100644 --- a/drivers/clk/qcom/clk-alpha-pll.c +++ b/drivers/clk/qcom/clk-alpha-pll.c @@ -52,6 +52,7 @@ #define PLL_CONFIG_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_CONFIG_CTL_U1]) #define PLL_TEST_CTL(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL]) #define PLL_TEST_CTL_U(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U]) +#define PLL_TEST_CTL_U1(p) ((p)->offset + (p)->regs[PLL_OFF_TEST_CTL_U1]) #define PLL_STATUS(p) ((p)->offset + (p)->regs[PLL_OFF_STATUS]) #define PLL_OPMODE(p) ((p)->offset + (p)->regs[PLL_OFF_OPMODE]) #define PLL_FRAC(p) ((p)->offset + (p)->regs[PLL_OFF_FRAC]) @@ -116,6 +117,22 @@ const u8 clk_alpha_pll_regs[][PLL_OFF_MAX_REGS] = { [PLL_OFF_ALPHA_VAL] = 0x40, [PLL_OFF_CAL_VAL] = 0x44, }, + [CLK_ALPHA_PLL_TYPE_LUCID] = { + [PLL_OFF_L_VAL] = 0x04, + [PLL_OFF_CAL_L_VAL] = 0x08, + [PLL_OFF_USER_CTL] = 0x0c, + [PLL_OFF_USER_CTL_U] = 0x10, + [PLL_OFF_USER_CTL_U1] = 0x14, + [PLL_OFF_CONFIG_CTL] = 0x18, + [PLL_OFF_CONFIG_CTL_U] = 0x1c, + [PLL_OFF_CONFIG_CTL_U1] = 0x20, + [PLL_OFF_TEST_CTL] = 0x24, + [PLL_OFF_TEST_CTL_U] = 0x28, + [PLL_OFF_TEST_CTL_U1] = 0x2c, + [PLL_OFF_STATUS] = 0x30, + [PLL_OFF_OPMODE] = 0x38, + [PLL_OFF_ALPHA_VAL] = 0x40, + }, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); @@ -134,15 +151,14 @@ EXPORT_SYMBOL_GPL(clk_alpha_pll_regs); #define PLL_HUAYRA_N_MASK 0xff #define PLL_HUAYRA_ALPHA_WIDTH 16 -#define FABIA_OPMODE_STANDBY 0x0 -#define FABIA_OPMODE_RUN 0x1 - -#define FABIA_PLL_OUT_MASK 0x7 -#define FABIA_PLL_RATE_MARGIN 500 +#define PLL_STANDBY 0x0 +#define PLL_RUN 0x1 +#define PLL_OUT_MASK 0x7 +#define PLL_RATE_MARGIN 500 -#define TRION_PLL_STANDBY 0x0 -#define TRION_PLL_RUN 0x1 -#define TRION_PLL_OUT_MASK 0x7 +/* LUCID PLL specific settings and offsets */ +#define LUCID_PLL_CAL_VAL 0x44 +#define LUCID_PCAL_DONE BIT(26) #define pll_alpha_width(p) \ ((PLL_ALPHA_VAL_U(p) - PLL_ALPHA_VAL(p) == 4) ? \ @@ -544,7 +560,8 @@ static int __clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate, rate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width); vco = alpha_pll_find_vco(pll, rate); if (pll->vco_table && !vco) { - pr_err("alpha pll not in a valid vco range\n"); + pr_err("%s: alpha pll not in a valid vco range\n", + clk_hw_get_name(hw)); return -EINVAL; } @@ -722,7 +739,7 @@ static int alpha_pll_huayra_set_rate(struct clk_hw *hw, unsigned long rate, */ if (clk_alpha_pll_is_enabled(hw)) { if (cur_alpha != a) { - pr_err("clock needs to be gated %s\n", + pr_err("%s: clock needs to be gated\n", clk_hw_get_name(hw)); return -EBUSY; } @@ -765,7 +782,7 @@ static int trion_pll_is_enabled(struct clk_alpha_pll *pll, if (ret) return 0; - return ((opmode_regval & TRION_PLL_RUN) && (mode_regval & PLL_OUTCTRL)); + return ((opmode_regval & PLL_RUN) && (mode_regval & PLL_OUTCTRL)); } static int clk_trion_pll_is_enabled(struct clk_hw *hw) @@ -795,7 +812,7 @@ static int clk_trion_pll_enable(struct clk_hw *hw) } /* Set operation mode to RUN */ - regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_RUN); + regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN); ret = wait_for_pll_enable_lock(pll); if (ret) @@ -803,7 +820,7 @@ static int clk_trion_pll_enable(struct clk_hw *hw) /* Enable the PLL outputs */ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), - TRION_PLL_OUT_MASK, TRION_PLL_OUT_MASK); + PLL_OUT_MASK, PLL_OUT_MASK); if (ret) return ret; @@ -836,12 +853,12 @@ static void clk_trion_pll_disable(struct clk_hw *hw) /* Disable the PLL outputs */ ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), - TRION_PLL_OUT_MASK, 0); + PLL_OUT_MASK, 0); if (ret) return; /* Place the PLL mode in STANDBY */ - regmap_write(regmap, PLL_OPMODE(pll), TRION_PLL_STANDBY); + regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); } @@ -849,33 +866,12 @@ static unsigned long clk_trion_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); - struct regmap *regmap = pll->clkr.regmap; - u32 l, frac; - u64 prate = parent_rate; - - regmap_read(regmap, PLL_L_VAL(pll), &l); - regmap_read(regmap, PLL_ALPHA_VAL(pll), &frac); - - return alpha_pll_calc_rate(prate, l, frac, ALPHA_REG_16BIT_WIDTH); -} - -static long clk_trion_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); - unsigned long min_freq, max_freq; - u32 l; - u64 a; - - rate = alpha_pll_round_rate(rate, *prate, - &l, &a, ALPHA_REG_16BIT_WIDTH); - if (!pll->vco_table || alpha_pll_find_vco(pll, rate)) - return rate; + u32 l, frac, alpha_width = pll_alpha_width(pll); - min_freq = pll->vco_table[0].min_freq; - max_freq = pll->vco_table[pll->num_vco - 1].max_freq; + regmap_read(pll->clkr.regmap, PLL_L_VAL(pll), &l); + regmap_read(pll->clkr.regmap, PLL_ALPHA_VAL(pll), &frac); - return clamp(rate, min_freq, max_freq); + return alpha_pll_calc_rate(parent_rate, l, frac, alpha_width); } const struct clk_ops clk_alpha_pll_fixed_ops = { @@ -921,7 +917,7 @@ const struct clk_ops clk_trion_fixed_pll_ops = { .disable = clk_trion_pll_disable, .is_enabled = clk_trion_pll_is_enabled, .recalc_rate = clk_trion_pll_recalc_rate, - .round_rate = clk_trion_pll_round_rate, + .round_rate = clk_alpha_pll_round_rate, }; EXPORT_SYMBOL_GPL(clk_trion_fixed_pll_ops); @@ -1088,14 +1084,14 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw) return ret; /* Skip If PLL is already running */ - if ((opmode_val & FABIA_OPMODE_RUN) && (val & PLL_OUTCTRL)) + if ((opmode_val & PLL_RUN) && (val & PLL_OUTCTRL)) return 0; ret = regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); if (ret) return ret; - ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY); + ret = regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); if (ret) return ret; @@ -1104,7 +1100,7 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw) if (ret) return ret; - ret = regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_RUN); + ret = regmap_write(regmap, PLL_OPMODE(pll), PLL_RUN); if (ret) return ret; @@ -1113,7 +1109,7 @@ static int alpha_pll_fabia_enable(struct clk_hw *hw) return ret; ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), - FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK); + PLL_OUT_MASK, PLL_OUT_MASK); if (ret) return ret; @@ -1143,13 +1139,12 @@ static void alpha_pll_fabia_disable(struct clk_hw *hw) return; /* Disable main outputs */ - ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), FABIA_PLL_OUT_MASK, - 0); + ret = regmap_update_bits(regmap, PLL_USER_CTL(pll), PLL_OUT_MASK, 0); if (ret) return; /* Place the PLL in STANDBY */ - regmap_write(regmap, PLL_OPMODE(pll), FABIA_OPMODE_STANDBY); + regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); } static unsigned long alpha_pll_fabia_recalc_rate(struct clk_hw *hw, @@ -1170,7 +1165,7 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); u32 l, alpha_width = pll_alpha_width(pll); u64 a; - unsigned long rrate; + unsigned long rrate, max = rate + PLL_RATE_MARGIN; rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width); @@ -1178,8 +1173,9 @@ static int alpha_pll_fabia_set_rate(struct clk_hw *hw, unsigned long rate, * Due to limited number of bits for fractional rate programming, the * rounded up rate could be marginally higher than the requested rate. */ - if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) { - pr_err("Call set rate on the PLL with rounded rates!\n"); + if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) { + pr_err("%s: Rounded rate %lu not within range [%lu, %lu)\n", + clk_hw_get_name(hw), rrate, rate, max); return -EINVAL; } @@ -1196,6 +1192,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw) struct clk_hw *parent_hw; unsigned long cal_freq, rrate; u32 cal_l, val, alpha_width = pll_alpha_width(pll); + const char *name = clk_hw_get_name(hw); u64 a; int ret; @@ -1210,7 +1207,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw) vco = alpha_pll_find_vco(pll, clk_hw_get_rate(hw)); if (!vco) { - pr_err("alpha pll: not in a valid vco range\n"); + pr_err("%s: alpha pll not in a valid vco range\n", name); return -EINVAL; } @@ -1227,7 +1224,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw) * Due to a limited number of bits for fractional rate programming, the * rounded up rate could be marginally higher than the requested rate. */ - if (rrate > (cal_freq + FABIA_PLL_RATE_MARGIN) || rrate < cal_freq) + if (rrate > (cal_freq + PLL_RATE_MARGIN) || rrate < cal_freq) return -EINVAL; /* Setup PLL for calibration frequency */ @@ -1236,7 +1233,7 @@ static int alpha_pll_fabia_prepare(struct clk_hw *hw) /* Bringup the PLL at calibration frequency */ ret = clk_alpha_pll_enable(hw); if (ret) { - pr_err("alpha pll calibration failed\n"); + pr_err("%s: alpha pll calibration failed\n", name); return ret; } @@ -1394,3 +1391,175 @@ const struct clk_ops clk_alpha_pll_postdiv_fabia_ops = { .set_rate = clk_alpha_pll_postdiv_fabia_set_rate, }; EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_fabia_ops); + +/** + * clk_lucid_pll_configure - configure the lucid pll + * + * @pll: clk alpha pll + * @regmap: register map + * @config: configuration to apply for pll + */ +void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config) +{ + if (config->l) + regmap_write(regmap, PLL_L_VAL(pll), config->l); + + regmap_write(regmap, PLL_CAL_L_VAL(pll), LUCID_PLL_CAL_VAL); + + if (config->alpha) + regmap_write(regmap, PLL_ALPHA_VAL(pll), config->alpha); + + if (config->config_ctl_val) + regmap_write(regmap, PLL_CONFIG_CTL(pll), + config->config_ctl_val); + + if (config->config_ctl_hi_val) + regmap_write(regmap, PLL_CONFIG_CTL_U(pll), + config->config_ctl_hi_val); + + if (config->config_ctl_hi1_val) + regmap_write(regmap, PLL_CONFIG_CTL_U1(pll), + config->config_ctl_hi1_val); + + if (config->user_ctl_val) + regmap_write(regmap, PLL_USER_CTL(pll), + config->user_ctl_val); + + if (config->user_ctl_hi_val) + regmap_write(regmap, PLL_USER_CTL_U(pll), + config->user_ctl_hi_val); + + if (config->user_ctl_hi1_val) + regmap_write(regmap, PLL_USER_CTL_U1(pll), + config->user_ctl_hi1_val); + + if (config->test_ctl_val) + regmap_write(regmap, PLL_TEST_CTL(pll), + config->test_ctl_val); + + if (config->test_ctl_hi_val) + regmap_write(regmap, PLL_TEST_CTL_U(pll), + config->test_ctl_hi_val); + + if (config->test_ctl_hi1_val) + regmap_write(regmap, PLL_TEST_CTL_U1(pll), + config->test_ctl_hi1_val); + + regmap_update_bits(regmap, PLL_MODE(pll), PLL_UPDATE_BYPASS, + PLL_UPDATE_BYPASS); + + /* Disable PLL output */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_OUTCTRL, 0); + + /* Set operation mode to OFF */ + regmap_write(regmap, PLL_OPMODE(pll), PLL_STANDBY); + + /* Place the PLL in STANDBY mode */ + regmap_update_bits(regmap, PLL_MODE(pll), PLL_RESET_N, PLL_RESET_N); +} +EXPORT_SYMBOL_GPL(clk_lucid_pll_configure); + +/* + * The Lucid PLL requires a power-on self-calibration which happens when the + * PLL comes out of reset. Calibrate in case it is not completed. + */ +static int alpha_pll_lucid_prepare(struct clk_hw *hw) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + u32 regval; + int ret; + + /* Return early if calibration is not needed. */ + regmap_read(pll->clkr.regmap, PLL_STATUS(pll), ®val); + if (regval & LUCID_PCAL_DONE) + return 0; + + /* On/off to calibrate */ + ret = clk_trion_pll_enable(hw); + if (!ret) + clk_trion_pll_disable(hw); + + return ret; +} + +static int alpha_pll_lucid_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct clk_alpha_pll *pll = to_clk_alpha_pll(hw); + unsigned long rrate; + u32 regval, l, alpha_width = pll_alpha_width(pll); + u64 a; + int ret; + + rrate = alpha_pll_round_rate(rate, prate, &l, &a, alpha_width); + + /* + * Due to a limited number of bits for fractional rate programming, the + * rounded up rate could be marginally higher than the requested rate. + */ + if (rrate > (rate + PLL_RATE_MARGIN) || rrate < rate) { + pr_err("Call set rate on the PLL with rounded rates!\n"); + return -EINVAL; + } + + regmap_write(pll->clkr.regmap, PLL_L_VAL(pll), l); + regmap_write(pll->clkr.regmap, PLL_ALPHA_VAL(pll), a); + + /* Latch the PLL input */ + ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), + PLL_UPDATE, PLL_UPDATE); + if (ret) + return ret; + + /* Wait for 2 reference cycles before checking the ACK bit. */ + udelay(1); + regmap_read(pll->clkr.regmap, PLL_MODE(pll), ®val); + if (!(regval & ALPHA_PLL_ACK_LATCH)) { + pr_err("Lucid PLL latch failed. Output may be unstable!\n"); + return -EINVAL; + } + + /* Return the latch input to 0 */ + ret = regmap_update_bits(pll->clkr.regmap, PLL_MODE(pll), + PLL_UPDATE, 0); + if (ret) + return ret; + + if (clk_hw_is_enabled(hw)) { + ret = wait_for_pll_enable_lock(pll); + if (ret) + return ret; + } + + /* Wait for PLL output to stabilize */ + udelay(100); + return 0; +} + +const struct clk_ops clk_alpha_pll_lucid_ops = { + .prepare = alpha_pll_lucid_prepare, + .enable = clk_trion_pll_enable, + .disable = clk_trion_pll_disable, + .is_enabled = clk_trion_pll_is_enabled, + .recalc_rate = clk_trion_pll_recalc_rate, + .round_rate = clk_alpha_pll_round_rate, + .set_rate = alpha_pll_lucid_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_alpha_pll_lucid_ops); + +const struct clk_ops clk_alpha_pll_fixed_lucid_ops = { + .enable = clk_trion_pll_enable, + .disable = clk_trion_pll_disable, + .is_enabled = clk_trion_pll_is_enabled, + .recalc_rate = clk_trion_pll_recalc_rate, + .round_rate = clk_alpha_pll_round_rate, +}; +EXPORT_SYMBOL_GPL(clk_alpha_pll_fixed_lucid_ops); + +const struct clk_ops clk_alpha_pll_postdiv_lucid_ops = { + .recalc_rate = clk_alpha_pll_postdiv_fabia_recalc_rate, + .round_rate = clk_alpha_pll_postdiv_fabia_round_rate, + .set_rate = clk_alpha_pll_postdiv_fabia_set_rate, +}; +EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_lucid_ops); diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h index fbc1f67c7a26..704674a153b6 100644 --- a/drivers/clk/qcom/clk-alpha-pll.h +++ b/drivers/clk/qcom/clk-alpha-pll.h @@ -14,6 +14,7 @@ enum { CLK_ALPHA_PLL_TYPE_BRAMMO, CLK_ALPHA_PLL_TYPE_FABIA, CLK_ALPHA_PLL_TYPE_TRION, + CLK_ALPHA_PLL_TYPE_LUCID, CLK_ALPHA_PLL_TYPE_MAX, }; @@ -30,6 +31,7 @@ enum { PLL_OFF_CONFIG_CTL_U1, PLL_OFF_TEST_CTL, PLL_OFF_TEST_CTL_U, + PLL_OFF_TEST_CTL_U1, PLL_OFF_STATUS, PLL_OFF_OPMODE, PLL_OFF_FRAC, @@ -94,10 +96,13 @@ struct alpha_pll_config { u32 alpha_hi; u32 config_ctl_val; u32 config_ctl_hi_val; + u32 config_ctl_hi1_val; u32 user_ctl_val; u32 user_ctl_hi_val; + u32 user_ctl_hi1_val; u32 test_ctl_val; u32 test_ctl_hi_val; + u32 test_ctl_hi1_val; u32 main_output_mask; u32 aux_output_mask; u32 aux2_output_mask; @@ -123,10 +128,17 @@ extern const struct clk_ops clk_alpha_pll_fabia_ops; extern const struct clk_ops clk_alpha_pll_fixed_fabia_ops; extern const struct clk_ops clk_alpha_pll_postdiv_fabia_ops; +extern const struct clk_ops clk_alpha_pll_lucid_ops; +extern const struct clk_ops clk_alpha_pll_fixed_lucid_ops; +extern const struct clk_ops clk_alpha_pll_postdiv_lucid_ops; + void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, const struct alpha_pll_config *config); +void clk_lucid_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap, + const struct alpha_pll_config *config); + extern const struct clk_ops clk_trion_fixed_pll_ops; extern const struct clk_ops clk_trion_pll_postdiv_ops; diff --git a/drivers/clk/qcom/clk-rpm.c b/drivers/clk/qcom/clk-rpm.c index 9e3110a71f12..f71d228fd6bd 100644 --- a/drivers/clk/qcom/clk-rpm.c +++ b/drivers/clk/qcom/clk-rpm.c @@ -543,10 +543,45 @@ static const struct rpm_clk_desc rpm_clk_apq8064 = { .num_clks = ARRAY_SIZE(apq8064_clks), }; +/* ipq806x */ +DEFINE_CLK_RPM(ipq806x, afab_clk, afab_a_clk, QCOM_RPM_APPS_FABRIC_CLK); +DEFINE_CLK_RPM(ipq806x, cfpb_clk, cfpb_a_clk, QCOM_RPM_CFPB_CLK); +DEFINE_CLK_RPM(ipq806x, daytona_clk, daytona_a_clk, QCOM_RPM_DAYTONA_FABRIC_CLK); +DEFINE_CLK_RPM(ipq806x, ebi1_clk, ebi1_a_clk, QCOM_RPM_EBI1_CLK); +DEFINE_CLK_RPM(ipq806x, sfab_clk, sfab_a_clk, QCOM_RPM_SYS_FABRIC_CLK); +DEFINE_CLK_RPM(ipq806x, sfpb_clk, sfpb_a_clk, QCOM_RPM_SFPB_CLK); +DEFINE_CLK_RPM(ipq806x, nss_fabric_0_clk, nss_fabric_0_a_clk, QCOM_RPM_NSS_FABRIC_0_CLK); +DEFINE_CLK_RPM(ipq806x, nss_fabric_1_clk, nss_fabric_1_a_clk, QCOM_RPM_NSS_FABRIC_1_CLK); + +static struct clk_rpm *ipq806x_clks[] = { + [RPM_APPS_FABRIC_CLK] = &ipq806x_afab_clk, + [RPM_APPS_FABRIC_A_CLK] = &ipq806x_afab_a_clk, + [RPM_CFPB_CLK] = &ipq806x_cfpb_clk, + [RPM_CFPB_A_CLK] = &ipq806x_cfpb_a_clk, + [RPM_DAYTONA_FABRIC_CLK] = &ipq806x_daytona_clk, + [RPM_DAYTONA_FABRIC_A_CLK] = &ipq806x_daytona_a_clk, + [RPM_EBI1_CLK] = &ipq806x_ebi1_clk, + [RPM_EBI1_A_CLK] = &ipq806x_ebi1_a_clk, + [RPM_SYS_FABRIC_CLK] = &ipq806x_sfab_clk, + [RPM_SYS_FABRIC_A_CLK] = &ipq806x_sfab_a_clk, + [RPM_SFPB_CLK] = &ipq806x_sfpb_clk, + [RPM_SFPB_A_CLK] = &ipq806x_sfpb_a_clk, + [RPM_NSS_FABRIC_0_CLK] = &ipq806x_nss_fabric_0_clk, + [RPM_NSS_FABRIC_0_A_CLK] = &ipq806x_nss_fabric_0_a_clk, + [RPM_NSS_FABRIC_1_CLK] = &ipq806x_nss_fabric_1_clk, + [RPM_NSS_FABRIC_1_A_CLK] = &ipq806x_nss_fabric_1_a_clk, +}; + +static const struct rpm_clk_desc rpm_clk_ipq806x = { + .clks = ipq806x_clks, + .num_clks = ARRAY_SIZE(ipq806x_clks), +}; + static const struct of_device_id rpm_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8660", .data = &rpm_clk_msm8660 }, { .compatible = "qcom,rpmcc-apq8060", .data = &rpm_clk_msm8660 }, { .compatible = "qcom,rpmcc-apq8064", .data = &rpm_clk_apq8064 }, + { .compatible = "qcom,rpmcc-ipq806x", .data = &rpm_clk_ipq806x }, { } }; MODULE_DEVICE_TABLE(of, rpm_clk_match_table); diff --git a/drivers/clk/qcom/clk-rpmh.c b/drivers/clk/qcom/clk-rpmh.c index 98a118c1e244..e2c669b08aff 100644 --- a/drivers/clk/qcom/clk-rpmh.c +++ b/drivers/clk/qcom/clk-rpmh.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -143,12 +143,22 @@ static inline bool has_state_changed(struct clk_rpmh *c, u32 state) != (c->aggr_state & BIT(state)); } +static int clk_rpmh_send(struct clk_rpmh *c, enum rpmh_state state, + struct tcs_cmd *cmd, bool wait) +{ + if (wait) + return rpmh_write(c->dev, state, cmd, 1); + + return rpmh_write_async(c->dev, state, cmd, 1); +} + static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) { struct tcs_cmd cmd = { 0 }; u32 cmd_state, on_val; enum rpmh_state state = RPMH_SLEEP_STATE; int ret; + bool wait; cmd.addr = c->res_addr; cmd_state = c->aggr_state; @@ -159,7 +169,8 @@ static int clk_rpmh_send_aggregate_command(struct clk_rpmh *c) if (cmd_state & BIT(state)) cmd.data = on_val; - ret = rpmh_write_async(c->dev, state, &cmd, 1); + wait = cmd_state && state == RPMH_ACTIVE_ONLY_STATE; + ret = clk_rpmh_send(c, state, &cmd, wait); if (ret) { dev_err(c->dev, "set %s state of %s failed: (%d)\n", !state ? "sleep" : @@ -216,7 +227,7 @@ static int clk_rpmh_prepare(struct clk_hw *hw) mutex_unlock(&rpmh_clk_lock); return ret; -}; +} static void clk_rpmh_unprepare(struct clk_hw *hw) { @@ -248,38 +259,33 @@ static int clk_rpmh_bcm_send_cmd(struct clk_rpmh *c, bool enable) { struct tcs_cmd cmd = { 0 }; u32 cmd_state; - int ret; + int ret = 0; mutex_lock(&rpmh_clk_lock); - - cmd_state = 0; if (enable) { cmd_state = 1; if (c->aggr_state) cmd_state = c->aggr_state; + } else { + cmd_state = 0; } - if (c->last_sent_aggr_state == cmd_state) { - mutex_unlock(&rpmh_clk_lock); - return 0; - } + if (c->last_sent_aggr_state != cmd_state) { + cmd.addr = c->res_addr; + cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); - cmd.addr = c->res_addr; - cmd.data = BCM_TCS_CMD(1, enable, 0, cmd_state); - - ret = rpmh_write_async(c->dev, RPMH_ACTIVE_ONLY_STATE, &cmd, 1); - if (ret) { - dev_err(c->dev, "set active state of %s failed: (%d)\n", - c->res_name, ret); - mutex_unlock(&rpmh_clk_lock); - return ret; + ret = clk_rpmh_send(c, RPMH_ACTIVE_ONLY_STATE, &cmd, enable); + if (ret) { + dev_err(c->dev, "set active state of %s failed: (%d)\n", + c->res_name, ret); + } else { + c->last_sent_aggr_state = cmd_state; + } } - c->last_sent_aggr_state = cmd_state; - mutex_unlock(&rpmh_clk_lock); - return 0; + return ret; } static int clk_rpmh_bcm_prepare(struct clk_hw *hw) @@ -287,14 +293,14 @@ static int clk_rpmh_bcm_prepare(struct clk_hw *hw) struct clk_rpmh *c = to_clk_rpmh(hw); return clk_rpmh_bcm_send_cmd(c, true); -}; +} static void clk_rpmh_bcm_unprepare(struct clk_hw *hw) { struct clk_rpmh *c = to_clk_rpmh(hw); clk_rpmh_bcm_send_cmd(c, false); -}; +} static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) @@ -310,7 +316,7 @@ static int clk_rpmh_bcm_set_rate(struct clk_hw *hw, unsigned long rate, clk_rpmh_bcm_send_cmd(c, true); return 0; -}; +} static long clk_rpmh_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) @@ -404,6 +410,28 @@ static const struct clk_rpmh_desc clk_rpmh_sc7180 = { .num_clks = ARRAY_SIZE(sc7180_rpmh_clocks), }; +DEFINE_CLK_RPMH_VRM(sm8250, ln_bb_clk1, ln_bb_clk1_ao, "lnbclka1", 2); + +static struct clk_hw *sm8250_rpmh_clocks[] = { + [RPMH_CXO_CLK] = &sdm845_bi_tcxo.hw, + [RPMH_CXO_CLK_A] = &sdm845_bi_tcxo_ao.hw, + [RPMH_LN_BB_CLK1] = &sm8250_ln_bb_clk1.hw, + [RPMH_LN_BB_CLK1_A] = &sm8250_ln_bb_clk1_ao.hw, + [RPMH_LN_BB_CLK2] = &sdm845_ln_bb_clk2.hw, + [RPMH_LN_BB_CLK2_A] = &sdm845_ln_bb_clk2_ao.hw, + [RPMH_LN_BB_CLK3] = &sdm845_ln_bb_clk3.hw, + [RPMH_LN_BB_CLK3_A] = &sdm845_ln_bb_clk3_ao.hw, + [RPMH_RF_CLK1] = &sdm845_rf_clk1.hw, + [RPMH_RF_CLK1_A] = &sdm845_rf_clk1_ao.hw, + [RPMH_RF_CLK3] = &sdm845_rf_clk3.hw, + [RPMH_RF_CLK3_A] = &sdm845_rf_clk3_ao.hw, +}; + +static const struct clk_rpmh_desc clk_rpmh_sm8250 = { + .clks = sm8250_rpmh_clocks, + .num_clks = ARRAY_SIZE(sm8250_rpmh_clocks), +}; + static struct clk_hw *of_clk_rpmh_hw_get(struct of_phandle_args *clkspec, void *data) { @@ -490,6 +518,7 @@ static const struct of_device_id clk_rpmh_match_table[] = { { .compatible = "qcom,sc7180-rpmh-clk", .data = &clk_rpmh_sc7180}, { .compatible = "qcom,sdm845-rpmh-clk", .data = &clk_rpmh_sdm845}, { .compatible = "qcom,sm8150-rpmh-clk", .data = &clk_rpmh_sm8150}, + { .compatible = "qcom,sm8250-rpmh-clk", .data = &clk_rpmh_sm8250}, { } }; MODULE_DEVICE_TABLE(of, clk_rpmh_match_table); diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c index 0bbfef9fa6de..52f63ad787ba 100644 --- a/drivers/clk/qcom/clk-smd-rpm.c +++ b/drivers/clk/qcom/clk-smd-rpm.c @@ -525,6 +525,55 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8974 = { .num_clks = ARRAY_SIZE(msm8974_clks), }; + +/* msm8976 */ +DEFINE_CLK_SMD_RPM(msm8976, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8976, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); +DEFINE_CLK_SMD_RPM(msm8976, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, + QCOM_SMD_RPM_BUS_CLK, 2); +DEFINE_CLK_SMD_RPM(msm8976, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0); +DEFINE_CLK_SMD_RPM(msm8976, ipa_clk, ipa_a_clk, QCOM_SMD_RPM_IPA_CLK, 0); +DEFINE_CLK_SMD_RPM_QDSS(msm8976, qdss_clk, qdss_a_clk, + QCOM_SMD_RPM_MISC_CLK, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, bb_clk1, bb_clk1_a, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, bb_clk2, bb_clk2_a, 2); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, rf_clk2, rf_clk2_a, 5); +DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8976, div_clk2, div_clk2_a, 12); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8976, bb_clk1_pin, bb_clk1_a_pin, 1); +DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8976, bb_clk2_pin, bb_clk2_a_pin, 2); + +static struct clk_smd_rpm *msm8976_clks[] = { + [RPM_SMD_PCNOC_CLK] = &msm8976_pcnoc_clk, + [RPM_SMD_PCNOC_A_CLK] = &msm8976_pcnoc_a_clk, + [RPM_SMD_SNOC_CLK] = &msm8976_snoc_clk, + [RPM_SMD_SNOC_A_CLK] = &msm8976_snoc_a_clk, + [RPM_SMD_BIMC_CLK] = &msm8976_bimc_clk, + [RPM_SMD_BIMC_A_CLK] = &msm8976_bimc_a_clk, + [RPM_SMD_QDSS_CLK] = &msm8976_qdss_clk, + [RPM_SMD_QDSS_A_CLK] = &msm8976_qdss_a_clk, + [RPM_SMD_BB_CLK1] = &msm8976_bb_clk1, + [RPM_SMD_BB_CLK1_A] = &msm8976_bb_clk1_a, + [RPM_SMD_BB_CLK2] = &msm8976_bb_clk2, + [RPM_SMD_BB_CLK2_A] = &msm8976_bb_clk2_a, + [RPM_SMD_RF_CLK2] = &msm8976_rf_clk2, + [RPM_SMD_RF_CLK2_A] = &msm8976_rf_clk2_a, + [RPM_SMD_BB_CLK1_PIN] = &msm8976_bb_clk1_pin, + [RPM_SMD_BB_CLK1_A_PIN] = &msm8976_bb_clk1_a_pin, + [RPM_SMD_BB_CLK2_PIN] = &msm8976_bb_clk2_pin, + [RPM_SMD_BB_CLK2_A_PIN] = &msm8976_bb_clk2_a_pin, + [RPM_SMD_MMSSNOC_AHB_CLK] = &msm8976_mmssnoc_ahb_clk, + [RPM_SMD_MMSSNOC_AHB_A_CLK] = &msm8976_mmssnoc_ahb_a_clk, + [RPM_SMD_DIV_CLK2] = &msm8976_div_clk2, + [RPM_SMD_DIV_A_CLK2] = &msm8976_div_clk2_a, + [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk, + [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk, +}; + +static const struct rpm_smd_clk_desc rpm_clk_msm8976 = { + .clks = msm8976_clks, + .num_clks = ARRAY_SIZE(msm8976_clks), +}; + /* msm8996 */ DEFINE_CLK_SMD_RPM(msm8996, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0); DEFINE_CLK_SMD_RPM(msm8996, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1); @@ -720,6 +769,7 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = { static const struct of_device_id rpm_smd_clk_match_table[] = { { .compatible = "qcom,rpmcc-msm8916", .data = &rpm_clk_msm8916 }, { .compatible = "qcom,rpmcc-msm8974", .data = &rpm_clk_msm8974 }, + { .compatible = "qcom,rpmcc-msm8976", .data = &rpm_clk_msm8976 }, { .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 }, { .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 }, { .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 }, diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index b0eee0903807..a8456e09c44d 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -1224,6 +1224,8 @@ static struct clk_rcg prng_src = { .parent_map = gcc_pxo_pll8_map, }, .clkr = { + .enable_reg = 0x2e80, + .enable_mask = BIT(11), .hw.init = &(struct clk_init_data){ .name = "prng_src", .parent_names = gcc_pxo_pll8, diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index 7f59fb8da033..6a51b5b5fc19 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * Copyright (c) 2019, The Linux Foundation. All rights reserved. + * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. */ #include <linux/clk-provider.h> @@ -2165,6 +2165,71 @@ static struct clk_branch gcc_video_xo_clk = { }, }; +static struct clk_branch gcc_mss_cfg_ahb_clk = { + .halt_reg = 0x8a000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8a000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_mfab_axis_clk = { + .halt_reg = 0x8a004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8a004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_mfab_axis_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_nav_axi_clk = { + .halt_reg = 0x8a00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8a00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_nav_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_snoc_axi_clk = { + .halt_reg = 0x8a150, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8a150, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_snoc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_mss_q6_memnoc_axi_clk = { + .halt_reg = 0x8a154, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8a154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_mss_q6_memnoc_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct gdsc ufs_phy_gdsc = { .gdscr = 0x77004, .pd = { @@ -2336,6 +2401,11 @@ static struct clk_regmap *gcc_sc7180_clocks[] = { [GPLL7] = &gpll7.clkr, [GPLL4] = &gpll4.clkr, [GPLL1] = &gpll1.clkr, + [GCC_MSS_CFG_AHB_CLK] = &gcc_mss_cfg_ahb_clk.clkr, + [GCC_MSS_MFAB_AXIS_CLK] = &gcc_mss_mfab_axis_clk.clkr, + [GCC_MSS_NAV_AXI_CLK] = &gcc_mss_nav_axi_clk.clkr, + [GCC_MSS_Q6_MEMNOC_AXI_CLK] = &gcc_mss_q6_memnoc_axi_clk.clkr, + [GCC_MSS_SNOC_AXI_CLK] = &gcc_mss_snoc_axi_clk.clkr, }; static const struct qcom_reset_map gcc_sc7180_resets[] = { diff --git a/drivers/clk/qcom/gcc-sm8150.c b/drivers/clk/qcom/gcc-sm8150.c index 20877214acff..ef98fdc51755 100644 --- a/drivers/clk/qcom/gcc-sm8150.c +++ b/drivers/clk/qcom/gcc-sm8150.c @@ -21,6 +21,7 @@ #include "clk-rcg.h" #include "clk-regmap.h" #include "reset.h" +#include "gdsc.h" enum { P_BI_TCXO, @@ -3171,6 +3172,18 @@ static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { }, }; +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0xf058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + static struct clk_branch gcc_usb3_sec_clkref_clk = { .halt_reg = 0x8c028, .halt_check = BRANCH_HALT, @@ -3218,6 +3231,18 @@ static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = { }, }; +static struct clk_branch gcc_usb3_sec_phy_pipe_clk = { + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x10058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + /* * Clock ON depends on external parent 'config noc', so cant poll * delay and also mark as crtitical for video boot @@ -3292,6 +3317,24 @@ static struct clk_branch gcc_video_xo_clk = { }, }; +static struct gdsc usb30_prim_gdsc = { + .gdscr = 0xf004, + .pd = { + .name = "usb30_prim_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR, +}; + +static struct gdsc usb30_sec_gdsc = { + .gdscr = 0x10004, + .pd = { + .name = "usb30_sec_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = POLL_CFG_GDSCR, +}; + static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr, [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr, @@ -3480,10 +3523,12 @@ static struct clk_regmap *gcc_sm8150_clocks[] = { [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, [GCC_USB3_SEC_CLKREF_CLK] = &gcc_usb3_sec_clkref_clk.clkr, [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr, [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr, [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr, + [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr, [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr, [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, @@ -3527,6 +3572,11 @@ static const struct qcom_reset_map gcc_sm8150_resets[] = { [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 }, }; +static struct gdsc *gcc_sm8150_gdscs[] = { + [USB30_PRIM_GDSC] = &usb30_prim_gdsc, + [USB30_SEC_GDSC] = &usb30_sec_gdsc, +}; + static const struct regmap_config gcc_sm8150_regmap_config = { .reg_bits = 32, .reg_stride = 4, @@ -3541,6 +3591,8 @@ static const struct qcom_cc_desc gcc_sm8150_desc = { .num_clks = ARRAY_SIZE(gcc_sm8150_clocks), .resets = gcc_sm8150_resets, .num_resets = ARRAY_SIZE(gcc_sm8150_resets), + .gdscs = gcc_sm8150_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_sm8150_gdscs), }; static const struct of_device_id gcc_sm8150_match_table[] = { diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c new file mode 100644 index 000000000000..6cb6617b8d88 --- /dev/null +++ b/drivers/clk/qcom/gcc-sm8250.c @@ -0,0 +1,3690 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,gcc-sm8250.h> + +#include "clk-alpha-pll.h" +#include "clk-branch.h" +#include "clk-rcg.h" +#include "clk-regmap.h" +#include "clk-regmap-divider.h" +#include "common.h" +#include "gdsc.h" +#include "reset.h" + +enum { + P_BI_TCXO, + P_AUD_REF_CLK, + P_CORE_BI_PLL_TEST_SE, + P_GPLL0_OUT_EVEN, + P_GPLL0_OUT_MAIN, + P_GPLL4_OUT_MAIN, + P_GPLL9_OUT_MAIN, + P_SLEEP_CLK, +}; + +static struct clk_alpha_pll gpll0 = { + .offset = 0x0, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gpll0", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + }, + }, +}; + +static const struct clk_div_table post_div_table_gpll0_out_even[] = { + { 0x1, 2 }, + { } +}; + +static struct clk_alpha_pll_postdiv gpll0_out_even = { + .offset = 0x0, + .post_div_shift = 8, + .post_div_table = post_div_table_gpll0_out_even, + .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_even), + .width = 4, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr.hw.init = &(struct clk_init_data){ + .name = "gpll0_out_even", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpll0.clkr.hw, + }, + .num_parents = 1, + .ops = &clk_alpha_pll_postdiv_lucid_ops, + }, +}; + +static struct clk_alpha_pll gpll4 = { + .offset = 0x76000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gpll4", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + }, + }, +}; + +static struct clk_alpha_pll gpll9 = { + .offset = 0x1c000, + .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID], + .clkr = { + .enable_reg = 0x52018, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gpll9", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "bi_tcxo", + }, + .num_parents = 1, + .ops = &clk_alpha_pll_fixed_lucid_ops, + }, + }, +}; + +static const struct parent_map gcc_parent_map_0[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_0[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll0_out_even.clkr.hw }, +}; + +static const struct clk_parent_data gcc_parent_data_0_ao[] = { + { .fw_name = "bi_tcxo_ao" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_1[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_SLEEP_CLK, 5 }, + { P_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_1[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .fw_name = "sleep_clk" }, + { .hw = &gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_2[] = { + { P_BI_TCXO, 0 }, + { P_SLEEP_CLK, 5 }, +}; + +static const struct clk_parent_data gcc_parent_data_2[] = { + { .fw_name = "bi_tcxo" }, + { .fw_name = "sleep_clk" }, +}; + +static const struct parent_map gcc_parent_map_3[] = { + { P_BI_TCXO, 0 }, +}; + +static const struct clk_parent_data gcc_parent_data_3[] = { + { .fw_name = "bi_tcxo" }, +}; + +static const struct parent_map gcc_parent_map_4[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_GPLL9_OUT_MAIN, 2 }, + { P_GPLL4_OUT_MAIN, 5 }, + { P_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_4[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .hw = &gpll9.clkr.hw }, + { .hw = &gpll4.clkr.hw }, + { .hw = &gpll0_out_even.clkr.hw }, +}; + +static const struct parent_map gcc_parent_map_5[] = { + { P_BI_TCXO, 0 }, + { P_GPLL0_OUT_MAIN, 1 }, + { P_AUD_REF_CLK, 2 }, + { P_GPLL0_OUT_EVEN, 6 }, +}; + +static const struct clk_parent_data gcc_parent_data_5[] = { + { .fw_name = "bi_tcxo" }, + { .hw = &gpll0.clkr.hw }, + { .fw_name = "aud_ref_clk" }, + { .hw = &gpll0_out_even.clkr.hw }, +}; + +static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { + .cmd_rcgr = 0x48010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk_src", + .parent_data = gcc_parent_data_0_ao, + .num_parents = 3, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_gp1_clk_src = { + .cmd_rcgr = 0x64004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_gp2_clk_src = { + .cmd_rcgr = 0x65004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_gp3_clk_src = { + .cmd_rcgr = 0x66004, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_1, + .freq_tbl = ftbl_gcc_gp1_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk_src", + .parent_data = gcc_parent_data_1, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_0_aux_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_0_aux_clk_src = { + .cmd_rcgr = 0x6b038, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_1_aux_clk_src = { + .cmd_rcgr = 0x8d038, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_pcie_2_aux_clk_src = { + .cmd_rcgr = 0x6038, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_2_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pcie_phy_refgen_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pcie_phy_refgen_clk_src = { + .cmd_rcgr = 0x6f014, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pcie_phy_refgen_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_phy_refgen_clk_src", + .parent_data = gcc_parent_data_0_ao, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = { + F(9600000, P_BI_TCXO, 2, 0, 0), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(60000000, P_GPLL0_OUT_MAIN, 10, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_pdm2_clk_src = { + .cmd_rcgr = 0x33010, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_pdm2_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = { + F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(102400000, P_GPLL0_OUT_EVEN, 1, 128, 375), + F(112000000, P_GPLL0_OUT_EVEN, 1, 28, 75), + F(117964800, P_GPLL0_OUT_EVEN, 1, 6144, 15625), + F(120000000, P_GPLL0_OUT_EVEN, 2.5, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = { + .name = "gcc_qupv3_wrap0_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = { + .cmd_rcgr = 0x17010, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = { + .name = "gcc_qupv3_wrap0_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = { + .cmd_rcgr = 0x17140, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s2_clk_src[] = { + F(7372800, P_GPLL0_OUT_EVEN, 1, 384, 15625), + F(14745600, P_GPLL0_OUT_EVEN, 1, 768, 15625), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(29491200, P_GPLL0_OUT_EVEN, 1, 1536, 15625), + F(32000000, P_GPLL0_OUT_EVEN, 1, 8, 75), + F(48000000, P_GPLL0_OUT_EVEN, 1, 4, 25), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(64000000, P_GPLL0_OUT_EVEN, 1, 16, 75), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(80000000, P_GPLL0_OUT_EVEN, 1, 4, 15), + F(96000000, P_GPLL0_OUT_EVEN, 1, 8, 25), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = { + .name = "gcc_qupv3_wrap0_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = { + .cmd_rcgr = 0x17270, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = { + .name = "gcc_qupv3_wrap0_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = { + .cmd_rcgr = 0x173a0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = { + .name = "gcc_qupv3_wrap0_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = { + .cmd_rcgr = 0x174d0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = { + .name = "gcc_qupv3_wrap0_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = { + .cmd_rcgr = 0x17600, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s6_clk_src_init = { + .name = "gcc_qupv3_wrap0_s6_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s6_clk_src = { + .cmd_rcgr = 0x17730, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s6_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap0_s7_clk_src_init = { + .name = "gcc_qupv3_wrap0_s7_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap0_s7_clk_src = { + .cmd_rcgr = 0x17860, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap0_s7_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s0_clk_src_init = { + .name = "gcc_qupv3_wrap1_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s0_clk_src = { + .cmd_rcgr = 0x18010, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s1_clk_src_init = { + .name = "gcc_qupv3_wrap1_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s1_clk_src = { + .cmd_rcgr = 0x18140, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s2_clk_src_init = { + .name = "gcc_qupv3_wrap1_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s2_clk_src = { + .cmd_rcgr = 0x18270, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s3_clk_src_init = { + .name = "gcc_qupv3_wrap1_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s3_clk_src = { + .cmd_rcgr = 0x183a0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s4_clk_src_init = { + .name = "gcc_qupv3_wrap1_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s4_clk_src = { + .cmd_rcgr = 0x184d0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap1_s5_clk_src_init = { + .name = "gcc_qupv3_wrap1_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap1_s5_clk_src = { + .cmd_rcgr = 0x18600, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap1_s5_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s0_clk_src_init = { + .name = "gcc_qupv3_wrap2_s0_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s0_clk_src = { + .cmd_rcgr = 0x1e010, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s0_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s1_clk_src_init = { + .name = "gcc_qupv3_wrap2_s1_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s1_clk_src = { + .cmd_rcgr = 0x1e140, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s1_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s2_clk_src_init = { + .name = "gcc_qupv3_wrap2_s2_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s2_clk_src = { + .cmd_rcgr = 0x1e270, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s2_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s3_clk_src_init = { + .name = "gcc_qupv3_wrap2_s3_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s3_clk_src = { + .cmd_rcgr = 0x1e3a0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s3_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s4_clk_src_init = { + .name = "gcc_qupv3_wrap2_s4_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s4_clk_src = { + .cmd_rcgr = 0x1e4d0, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s4_clk_src_init, +}; + +static struct clk_init_data gcc_qupv3_wrap2_s5_clk_src_init = { + .name = "gcc_qupv3_wrap2_s5_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, +}; + +static struct clk_rcg2 gcc_qupv3_wrap2_s5_clk_src = { + .cmd_rcgr = 0x1e600, + .mnd_width = 16, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_qupv3_wrap0_s2_clk_src, + .clkr.hw.init = &gcc_qupv3_wrap2_s5_clk_src_init, +}; + +static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(202000000, P_GPLL9_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { + .cmd_rcgr = 0x1400c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_4, + .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk_src", + .parent_data = gcc_parent_data_4, + .num_parents = 5, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_sdcc4_apps_clk_src[] = { + F(400000, P_BI_TCXO, 12, 1, 4), + F(19200000, P_BI_TCXO, 1, 0, 0), + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { + .cmd_rcgr = 0x1600c, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_sdcc4_apps_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc4_apps_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_tsif_ref_clk_src[] = { + F(105495, P_BI_TCXO, 2, 1, 91), + { } +}; + +static struct clk_rcg2 gcc_tsif_ref_clk_src = { + .cmd_rcgr = 0x36010, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_5, + .freq_tbl = ftbl_gcc_tsif_ref_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_ref_clk_src", + .parent_data = gcc_parent_data_5, + .num_parents = 4, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_axi_clk_src[] = { + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(50000000, P_GPLL0_OUT_EVEN, 6, 0, 0), + F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_axi_clk_src = { + .cmd_rcgr = 0x75024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_axi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_axi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_ice_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_ice_core_clk_src = { + .cmd_rcgr = 0x7506c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_ice_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_phy_aux_clk_src[] = { + F(19200000, P_BI_TCXO, 1, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_phy_aux_clk_src = { + .cmd_rcgr = 0x750a0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_phy_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_card_unipro_core_clk_src[] = { + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_card_unipro_core_clk_src = { + .cmd_rcgr = 0x75084, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_unipro_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_unipro_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_ufs_phy_axi_clk_src[] = { + F(25000000, P_GPLL0_OUT_EVEN, 12, 0, 0), + F(37500000, P_GPLL0_OUT_EVEN, 8, 0, 0), + F(75000000, P_GPLL0_OUT_EVEN, 4, 0, 0), + F(150000000, P_GPLL0_OUT_MAIN, 4, 0, 0), + F(300000000, P_GPLL0_OUT_MAIN, 2, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_ufs_phy_axi_clk_src = { + .cmd_rcgr = 0x77024, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_phy_axi_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_ice_core_clk_src = { + .cmd_rcgr = 0x7706c, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_phy_aux_clk_src = { + .cmd_rcgr = 0x770a0, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_3, + .freq_tbl = ftbl_gcc_pcie_0_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk_src", + .parent_data = gcc_parent_data_3, + .num_parents = 1, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_ufs_phy_unipro_core_clk_src = { + .cmd_rcgr = 0x77084, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_ice_core_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = { + F(33333333, P_GPLL0_OUT_EVEN, 9, 0, 0), + F(66666667, P_GPLL0_OUT_EVEN, 4.5, 0, 0), + F(133333333, P_GPLL0_OUT_MAIN, 4.5, 0, 0), + F(200000000, P_GPLL0_OUT_MAIN, 3, 0, 0), + F(240000000, P_GPLL0_OUT_MAIN, 2.5, 0, 0), + { } +}; + +static struct clk_rcg2 gcc_usb30_prim_master_clk_src = { + .cmd_rcgr = 0xf020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = { + .cmd_rcgr = 0xf038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_sec_master_clk_src = { + .cmd_rcgr = 0x10020, + .mnd_width = 8, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_master_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb30_sec_mock_utmi_clk_src = { + .cmd_rcgr = 0x10038, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_0, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_mock_utmi_clk_src", + .parent_data = gcc_parent_data_0, + .num_parents = 3, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = { + .cmd_rcgr = 0xf064, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_rcg2 gcc_usb3_sec_phy_aux_clk_src = { + .cmd_rcgr = 0x10064, + .mnd_width = 0, + .hid_width = 5, + .parent_map = gcc_parent_map_2, + .freq_tbl = ftbl_gcc_ufs_card_phy_aux_clk_src, + .clkr.hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_aux_clk_src", + .parent_data = gcc_parent_data_2, + .num_parents = 2, + .ops = &clk_rcg2_ops, + }, +}; + +static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = { + .reg = 0x48028, + .shift = 0, + .width = 4, + .clkr.hw.init = &(struct clk_init_data) { + .name = "gcc_cpuss_ahb_postdiv_clk_src", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_cpuss_ahb_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = { + .reg = 0xf050, + .shift = 0, + .width = 2, + .clkr.hw.init = &(struct clk_init_data) { + .name = "gcc_usb30_prim_mock_utmi_postdiv_clk_src", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_regmap_div gcc_usb30_sec_mock_utmi_postdiv_clk_src = { + .reg = 0x10050, + .shift = 0, + .width = 2, + .clkr.hw.init = &(struct clk_init_data) { + .name = "gcc_usb30_sec_mock_utmi_postdiv_clk_src", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_sec_mock_utmi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_regmap_div_ro_ops, + }, +}; + +static struct clk_branch gcc_aggre_noc_pcie_tbu_clk = { + .halt_reg = 0x9000c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x9000c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_noc_pcie_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_card_axi_clk = { + .halt_reg = 0x750cc, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x750cc, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x750cc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_card_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_card_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_ufs_phy_axi_clk = { + .halt_reg = 0x770cc, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x770cc, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x770cc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_ufs_phy_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_prim_axi_clk = { + .halt_reg = 0xf080, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xf080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_usb3_prim_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_aggre_usb3_sec_axi_clk = { + .halt_reg = 0x10080, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x10080, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_aggre_usb3_sec_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_sec_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_boot_rom_ahb_clk = { + .halt_reg = 0x38004, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x38004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_boot_rom_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_hf_axi_clk = { + .halt_reg = 0xb02c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_sf_axi_clk = { + .halt_reg = 0xb030, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_camera_xo_clk = { + .halt_reg = 0xb040, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb040, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_camera_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = { + .halt_reg = 0xf07c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xf07c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_prim_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = { + .halt_reg = 0x1007c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x1007c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cfg_noc_usb3_sec_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_sec_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_ahb_clk = { + .halt_reg = 0x48000, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_ahb_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_cpuss_rbcpr_clk = { + .halt_reg = 0x48004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x48004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_cpuss_rbcpr_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_gpu_axi_clk = { + .halt_reg = 0x71154, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x71154, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_gpu_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ddrss_pcie_sf_tbu_clk = { + .halt_reg = 0x8d058, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x8d058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ddrss_pcie_sf_tbu_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_hf_axi_clk = { + .halt_reg = 0xb034, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_hf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_sf_axi_clk = { + .halt_reg = 0xb038, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb038, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_sf_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_disp_xo_clk = { + .halt_reg = 0xb044, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb044, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_disp_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp1_clk = { + .halt_reg = 0x64000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x64000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_gp1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp2_clk = { + .halt_reg = 0x65000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x65000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp2_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_gp2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gp3_clk = { + .halt_reg = 0x66000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x66000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gp3_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_gp3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_clk_src", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_gpll0_div_clk_src", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpll0_out_even.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_iref_en = { + .halt_reg = 0x8c014, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_iref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_memnoc_gfx_clk = { + .halt_reg = 0x7100c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x7100c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_memnoc_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = { + .halt_reg = 0x71018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x71018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_gpu_snoc_dvm_gfx_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_axi_clk = { + .halt_reg = 0x4d008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4d008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_bwmon_axi_clk = { + .halt_reg = 0x73008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x73008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_bwmon_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_bwmon_cfg_ahb_clk = { + .halt_reg = 0x73004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x73004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_bwmon_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_cfg_ahb_clk = { + .halt_reg = 0x4d004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x4d004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x4d004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_dma_clk = { + .halt_reg = 0x4d00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x4d00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_dma_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_gpll0_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_clk_src", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpll0.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_npu_gpll0_div_clk_src = { + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_npu_gpll0_div_clk_src", + .parent_data = &(const struct clk_parent_data){ + .hw = &gpll0_out_even.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie0_phy_refgen_clk = { + .halt_reg = 0x6f02c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f02c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie0_phy_refgen_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie1_phy_refgen_clk = { + .halt_reg = 0x6f030, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f030, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie1_phy_refgen_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie2_phy_refgen_clk = { + .halt_reg = 0x6f034, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f034, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie2_phy_refgen_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pcie_phy_refgen_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_aux_clk = { + .halt_reg = 0x6b028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pcie_0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_cfg_ahb_clk = { + .halt_reg = 0x6b024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_mstr_axi_clk = { + .halt_reg = 0x6b01c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_pipe_clk = { + .halt_reg = 0x6b02c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_axi_clk = { + .halt_reg = 0x6b014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6b014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_0_slv_q2a_axi_clk = { + .halt_reg = 0x6b010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_0_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_aux_clk = { + .halt_reg = 0x8d028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(29), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pcie_1_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_cfg_ahb_clk = { + .halt_reg = 0x8d024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(28), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_mstr_axi_clk = { + .halt_reg = 0x8d01c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_pipe_clk = { + .halt_reg = 0x8d02c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(30), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_axi_clk = { + .halt_reg = 0x8d014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x8d014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_1_slv_q2a_axi_clk = { + .halt_reg = 0x8d010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_1_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2_aux_clk = { + .halt_reg = 0x6028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_2_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pcie_2_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2_cfg_ahb_clk = { + .halt_reg = 0x6024, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6024, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_2_cfg_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2_mstr_axi_clk = { + .halt_reg = 0x601c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_2_mstr_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2_pipe_clk = { + .halt_reg = 0x602c, + .halt_check = BRANCH_HALT_SKIP, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_2_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2_slv_axi_clk = { + .halt_reg = 0x6014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x6014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_2_slv_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_2_slv_q2a_axi_clk = { + .halt_reg = 0x6010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_2_slv_q2a_axi_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_mdm_clkref_en = { + .halt_reg = 0x8c00c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c00c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_mdm_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_phy_aux_clk = { + .halt_reg = 0x6f004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x6f004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_phy_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pcie_0_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_wifi_clkref_en = { + .halt_reg = 0x8c004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_wifi_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pcie_wigig_clkref_en = { + .halt_reg = 0x8c008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pcie_wigig_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm2_clk = { + .halt_reg = 0x3300c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3300c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm2_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_pdm2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_ahb_clk = { + .halt_reg = 0x33004, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x33004, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x33004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_pdm_xo4_clk = { + .halt_reg = 0x33008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x33008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_pdm_xo4_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_prng_ahb_clk = { + .halt_reg = 0x34004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52000, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_prng_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = { + .halt_reg = 0xb018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_nrt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_camera_rt_ahb_clk = { + .halt_reg = 0xb01c, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb01c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_camera_rt_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_disp_ahb_clk = { + .halt_reg = 0xb020, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb020, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_disp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_cvp_ahb_clk = { + .halt_reg = 0xb010, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_cvp_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = { + .halt_reg = 0xb014, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0xb014, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0xb014, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qmip_video_vcodec_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = { + .halt_reg = 0x23008, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_core_clk = { + .halt_reg = 0x23000, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s0_clk = { + .halt_reg = 0x1700c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s1_clk = { + .halt_reg = 0x1713c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s2_clk = { + .halt_reg = 0x1726c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s2_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s3_clk = { + .halt_reg = 0x1739c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(13), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s3_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s4_clk = { + .halt_reg = 0x174cc, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(14), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s4_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s5_clk = { + .halt_reg = 0x175fc, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s5_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s6_clk = { + .halt_reg = 0x1772c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(16), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s6_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s6_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap0_s7_clk = { + .halt_reg = 0x1785c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap0_s7_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap0_s7_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = { + .halt_reg = 0x23140, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(18), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_core_clk = { + .halt_reg = 0x23138, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(19), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s0_clk = { + .halt_reg = 0x1800c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(22), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap1_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s1_clk = { + .halt_reg = 0x1813c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(23), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap1_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s2_clk = { + .halt_reg = 0x1826c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(24), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s2_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap1_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s3_clk = { + .halt_reg = 0x1839c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(25), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s3_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap1_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s4_clk = { + .halt_reg = 0x184cc, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(26), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s4_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap1_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap1_s5_clk = { + .halt_reg = 0x185fc, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(27), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap1_s5_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap1_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_core_2x_clk = { + .halt_reg = 0x23278, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(3), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_core_2x_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_core_clk = { + .halt_reg = 0x23270, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_core_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s0_clk = { + .halt_reg = 0x1e00c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s0_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap2_s0_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s1_clk = { + .halt_reg = 0x1e13c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(5), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s1_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap2_s1_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s2_clk = { + .halt_reg = 0x1e26c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s2_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap2_s2_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s3_clk = { + .halt_reg = 0x1e39c, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s3_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap2_s3_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s4_clk = { + .halt_reg = 0x1e4cc, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(8), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s4_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap2_s4_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap2_s5_clk = { + .halt_reg = 0x1e5fc, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap2_s5_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_qupv3_wrap2_s5_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = { + .halt_reg = 0x17004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(6), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = { + .halt_reg = 0x17008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x17008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(7), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_0_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_m_ahb_clk = { + .halt_reg = 0x18004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(20), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_1_s_ahb_clk = { + .halt_reg = 0x18008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x18008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52008, + .enable_mask = BIT(21), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_1_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_2_m_ahb_clk = { + .halt_reg = 0x1e004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(2), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_2_m_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_qupv3_wrap_2_s_ahb_clk = { + .halt_reg = 0x1e008, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x1e008, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x52010, + .enable_mask = BIT(1), + .hw.init = &(struct clk_init_data){ + .name = "gcc_qupv3_wrap_2_s_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_ahb_clk = { + .halt_reg = 0x14008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc2_apps_clk = { + .halt_reg = 0x14004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x14004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc2_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_sdcc2_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc4_ahb_clk = { + .halt_reg = 0x16008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc4_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_sdcc4_apps_clk = { + .halt_reg = 0x16004, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x16004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_sdcc4_apps_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_sdcc4_apps_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_tsif_ahb_clk = { + .halt_reg = 0x36004, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x36004, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_tsif_inactivity_timers_clk = { + .halt_reg = 0x3600c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x3600c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_inactivity_timers_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_tsif_ref_clk = { + .halt_reg = 0x36008, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x36008, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_tsif_ref_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_tsif_ref_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_1x_clkref_en = { + .halt_reg = 0x8c000, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c000, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_1x_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ahb_clk = { + .halt_reg = 0x75018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x75018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_axi_clk = { + .halt_reg = 0x75010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x75010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_card_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_ice_core_clk = { + .halt_reg = 0x75064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x75064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x75064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_ice_core_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_card_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_phy_aux_clk = { + .halt_reg = 0x7509c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7509c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7509c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_phy_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_card_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_rx_symbol_0_clk = { + .halt_reg = 0x75020, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x75020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_rx_symbol_1_clk = { + .halt_reg = 0x750b8, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x750b8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_tx_symbol_0_clk = { + .halt_reg = 0x7501c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x7501c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_card_unipro_core_clk = { + .halt_reg = 0x7505c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7505c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7505c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_card_unipro_core_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_card_unipro_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ahb_clk = { + .halt_reg = 0x77018, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x77018, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ahb_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_axi_clk = { + .halt_reg = 0x77010, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x77010, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_axi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_phy_axi_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_ice_core_clk = { + .halt_reg = 0x77064, + .halt_check = BRANCH_HALT_VOTED, + .hwcg_reg = 0x77064, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x77064, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_ice_core_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_phy_ice_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_phy_aux_clk = { + .halt_reg = 0x7709c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7709c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7709c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_phy_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_phy_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_0_clk = { + .halt_reg = 0x77020, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x77020, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_rx_symbol_1_clk = { + .halt_reg = 0x770b8, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x770b8, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_rx_symbol_1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_tx_symbol_0_clk = { + .halt_reg = 0x7701c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x7701c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_tx_symbol_0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_ufs_phy_unipro_core_clk = { + .halt_reg = 0x7705c, + .halt_check = BRANCH_HALT, + .hwcg_reg = 0x7705c, + .hwcg_bit = 1, + .clkr = { + .enable_reg = 0x7705c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_ufs_phy_unipro_core_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_ufs_phy_unipro_core_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_master_clk = { + .halt_reg = 0xf010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xf010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_master_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_prim_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_mock_utmi_clk = { + .halt_reg = 0xf01c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf01c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_mock_utmi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = + &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_prim_sleep_clk = { + .halt_reg = 0xf018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_prim_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_master_clk = { + .halt_reg = 0x10010, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x10010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_master_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb30_sec_master_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_mock_utmi_clk = { + .halt_reg = 0x1001c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x1001c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_mock_utmi_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = + &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb30_sec_sleep_clk = { + .halt_reg = 0x10018, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10018, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb30_sec_sleep_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_aux_clk = { + .halt_reg = 0xf054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = { + .halt_reg = 0xf058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xf058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_com_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb3_prim_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_prim_phy_pipe_clk = { + .halt_reg = 0xf05c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0xf05c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_prim_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_clkref_en = { + .halt_reg = 0x8c010, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x8c010, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_clkref_en", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_aux_clk = { + .halt_reg = 0x10054, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10054, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_com_aux_clk = { + .halt_reg = 0x10058, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x10058, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_com_aux_clk", + .parent_data = &(const struct clk_parent_data){ + .hw = &gcc_usb3_sec_phy_aux_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_usb3_sec_phy_pipe_clk = { + .halt_reg = 0x1005c, + .halt_check = BRANCH_HALT_DELAY, + .clkr = { + .enable_reg = 0x1005c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_usb3_sec_phy_pipe_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi0_clk = { + .halt_reg = 0xb024, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb024, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi0_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_axi1_clk = { + .halt_reg = 0xb028, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0xb028, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_axi1_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch gcc_video_xo_clk = { + .halt_reg = 0xb03c, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0xb03c, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "gcc_video_xo_clk", + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct gdsc pcie_0_gdsc = { + .gdscr = 0x6b004, + .pd = { + .name = "pcie_0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_1_gdsc = { + .gdscr = 0x8d004, + .pd = { + .name = "pcie_1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc pcie_2_gdsc = { + .gdscr = 0x6004, + .pd = { + .name = "pcie_2_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ufs_card_gdsc = { + .gdscr = 0x75004, + .pd = { + .name = "ufs_card_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc ufs_phy_gdsc = { + .gdscr = 0x77004, + .pd = { + .name = "ufs_phy_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc usb30_prim_gdsc = { + .gdscr = 0xf004, + .pd = { + .name = "usb30_prim_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc usb30_sec_gdsc = { + .gdscr = 0x10004, + .pd = { + .name = "usb30_sec_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, +}; + +static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc = { + .gdscr = 0x7d050, + .pd = { + .name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc = { + .gdscr = 0x7d058, + .pd = { + .name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc = { + .gdscr = 0x7d054, + .pd = { + .name = "hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct gdsc hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc = { + .gdscr = 0x7d06c, + .pd = { + .name = "hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc", + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = VOTABLE, +}; + +static struct clk_regmap *gcc_sm8250_clocks[] = { + [GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr, + [GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr, + [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr, + [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, + [GCC_AGGRE_USB3_SEC_AXI_CLK] = &gcc_aggre_usb3_sec_axi_clk.clkr, + [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, + [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, + [GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr, + [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr, + [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr, + [GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr, + [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr, + [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr, + [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr, + [GCC_CPUSS_RBCPR_CLK] = &gcc_cpuss_rbcpr_clk.clkr, + [GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr, + [GCC_DDRSS_PCIE_SF_TBU_CLK] = &gcc_ddrss_pcie_sf_tbu_clk.clkr, + [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr, + [GCC_DISP_SF_AXI_CLK] = &gcc_disp_sf_axi_clk.clkr, + [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr, + [GCC_GP1_CLK] = &gcc_gp1_clk.clkr, + [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr, + [GCC_GP2_CLK] = &gcc_gp2_clk.clkr, + [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr, + [GCC_GP3_CLK] = &gcc_gp3_clk.clkr, + [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr, + [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr, + [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr, + [GCC_GPU_IREF_EN] = &gcc_gpu_iref_en.clkr, + [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr, + [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr, + [GCC_NPU_AXI_CLK] = &gcc_npu_axi_clk.clkr, + [GCC_NPU_BWMON_AXI_CLK] = &gcc_npu_bwmon_axi_clk.clkr, + [GCC_NPU_BWMON_CFG_AHB_CLK] = &gcc_npu_bwmon_cfg_ahb_clk.clkr, + [GCC_NPU_CFG_AHB_CLK] = &gcc_npu_cfg_ahb_clk.clkr, + [GCC_NPU_DMA_CLK] = &gcc_npu_dma_clk.clkr, + [GCC_NPU_GPLL0_CLK_SRC] = &gcc_npu_gpll0_clk_src.clkr, + [GCC_NPU_GPLL0_DIV_CLK_SRC] = &gcc_npu_gpll0_div_clk_src.clkr, + [GCC_PCIE0_PHY_REFGEN_CLK] = &gcc_pcie0_phy_refgen_clk.clkr, + [GCC_PCIE1_PHY_REFGEN_CLK] = &gcc_pcie1_phy_refgen_clk.clkr, + [GCC_PCIE2_PHY_REFGEN_CLK] = &gcc_pcie2_phy_refgen_clk.clkr, + [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr, + [GCC_PCIE_0_AUX_CLK_SRC] = &gcc_pcie_0_aux_clk_src.clkr, + [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr, + [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr, + [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr, + [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr, + [GCC_PCIE_0_SLV_Q2A_AXI_CLK] = &gcc_pcie_0_slv_q2a_axi_clk.clkr, + [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr, + [GCC_PCIE_1_AUX_CLK_SRC] = &gcc_pcie_1_aux_clk_src.clkr, + [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr, + [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr, + [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr, + [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr, + [GCC_PCIE_1_SLV_Q2A_AXI_CLK] = &gcc_pcie_1_slv_q2a_axi_clk.clkr, + [GCC_PCIE_2_AUX_CLK] = &gcc_pcie_2_aux_clk.clkr, + [GCC_PCIE_2_AUX_CLK_SRC] = &gcc_pcie_2_aux_clk_src.clkr, + [GCC_PCIE_2_CFG_AHB_CLK] = &gcc_pcie_2_cfg_ahb_clk.clkr, + [GCC_PCIE_2_MSTR_AXI_CLK] = &gcc_pcie_2_mstr_axi_clk.clkr, + [GCC_PCIE_2_PIPE_CLK] = &gcc_pcie_2_pipe_clk.clkr, + [GCC_PCIE_2_SLV_AXI_CLK] = &gcc_pcie_2_slv_axi_clk.clkr, + [GCC_PCIE_2_SLV_Q2A_AXI_CLK] = &gcc_pcie_2_slv_q2a_axi_clk.clkr, + [GCC_PCIE_MDM_CLKREF_EN] = &gcc_pcie_mdm_clkref_en.clkr, + [GCC_PCIE_PHY_AUX_CLK] = &gcc_pcie_phy_aux_clk.clkr, + [GCC_PCIE_PHY_REFGEN_CLK_SRC] = &gcc_pcie_phy_refgen_clk_src.clkr, + [GCC_PCIE_WIFI_CLKREF_EN] = &gcc_pcie_wifi_clkref_en.clkr, + [GCC_PCIE_WIGIG_CLKREF_EN] = &gcc_pcie_wigig_clkref_en.clkr, + [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr, + [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr, + [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr, + [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr, + [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr, + [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr, + [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr, + [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_CVP_AHB_CLK] = &gcc_qmip_video_cvp_ahb_clk.clkr, + [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr, + [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr, + [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr, + [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr, + [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr, + [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr, + [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr, + [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr, + [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr, + [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr, + [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr, + [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr, + [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr, + [GCC_QUPV3_WRAP0_S6_CLK] = &gcc_qupv3_wrap0_s6_clk.clkr, + [GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr, + [GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr, + [GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr, + [GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr, + [GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr, + [GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr, + [GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr, + [GCC_QUPV3_WRAP1_S1_CLK_SRC] = &gcc_qupv3_wrap1_s1_clk_src.clkr, + [GCC_QUPV3_WRAP1_S2_CLK] = &gcc_qupv3_wrap1_s2_clk.clkr, + [GCC_QUPV3_WRAP1_S2_CLK_SRC] = &gcc_qupv3_wrap1_s2_clk_src.clkr, + [GCC_QUPV3_WRAP1_S3_CLK] = &gcc_qupv3_wrap1_s3_clk.clkr, + [GCC_QUPV3_WRAP1_S3_CLK_SRC] = &gcc_qupv3_wrap1_s3_clk_src.clkr, + [GCC_QUPV3_WRAP1_S4_CLK] = &gcc_qupv3_wrap1_s4_clk.clkr, + [GCC_QUPV3_WRAP1_S4_CLK_SRC] = &gcc_qupv3_wrap1_s4_clk_src.clkr, + [GCC_QUPV3_WRAP1_S5_CLK] = &gcc_qupv3_wrap1_s5_clk.clkr, + [GCC_QUPV3_WRAP1_S5_CLK_SRC] = &gcc_qupv3_wrap1_s5_clk_src.clkr, + [GCC_QUPV3_WRAP2_CORE_2X_CLK] = &gcc_qupv3_wrap2_core_2x_clk.clkr, + [GCC_QUPV3_WRAP2_CORE_CLK] = &gcc_qupv3_wrap2_core_clk.clkr, + [GCC_QUPV3_WRAP2_S0_CLK] = &gcc_qupv3_wrap2_s0_clk.clkr, + [GCC_QUPV3_WRAP2_S0_CLK_SRC] = &gcc_qupv3_wrap2_s0_clk_src.clkr, + [GCC_QUPV3_WRAP2_S1_CLK] = &gcc_qupv3_wrap2_s1_clk.clkr, + [GCC_QUPV3_WRAP2_S1_CLK_SRC] = &gcc_qupv3_wrap2_s1_clk_src.clkr, + [GCC_QUPV3_WRAP2_S2_CLK] = &gcc_qupv3_wrap2_s2_clk.clkr, + [GCC_QUPV3_WRAP2_S2_CLK_SRC] = &gcc_qupv3_wrap2_s2_clk_src.clkr, + [GCC_QUPV3_WRAP2_S3_CLK] = &gcc_qupv3_wrap2_s3_clk.clkr, + [GCC_QUPV3_WRAP2_S3_CLK_SRC] = &gcc_qupv3_wrap2_s3_clk_src.clkr, + [GCC_QUPV3_WRAP2_S4_CLK] = &gcc_qupv3_wrap2_s4_clk.clkr, + [GCC_QUPV3_WRAP2_S4_CLK_SRC] = &gcc_qupv3_wrap2_s4_clk_src.clkr, + [GCC_QUPV3_WRAP2_S5_CLK] = &gcc_qupv3_wrap2_s5_clk.clkr, + [GCC_QUPV3_WRAP2_S5_CLK_SRC] = &gcc_qupv3_wrap2_s5_clk_src.clkr, + [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_M_AHB_CLK] = &gcc_qupv3_wrap_1_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_1_S_AHB_CLK] = &gcc_qupv3_wrap_1_s_ahb_clk.clkr, + [GCC_QUPV3_WRAP_2_M_AHB_CLK] = &gcc_qupv3_wrap_2_m_ahb_clk.clkr, + [GCC_QUPV3_WRAP_2_S_AHB_CLK] = &gcc_qupv3_wrap_2_s_ahb_clk.clkr, + [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr, + [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr, + [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr, + [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr, + [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr, + [GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr, + [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr, + [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr, + [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr, + [GCC_TSIF_REF_CLK_SRC] = &gcc_tsif_ref_clk_src.clkr, + [GCC_UFS_1X_CLKREF_EN] = &gcc_ufs_1x_clkref_en.clkr, + [GCC_UFS_CARD_AHB_CLK] = &gcc_ufs_card_ahb_clk.clkr, + [GCC_UFS_CARD_AXI_CLK] = &gcc_ufs_card_axi_clk.clkr, + [GCC_UFS_CARD_AXI_CLK_SRC] = &gcc_ufs_card_axi_clk_src.clkr, + [GCC_UFS_CARD_ICE_CORE_CLK] = &gcc_ufs_card_ice_core_clk.clkr, + [GCC_UFS_CARD_ICE_CORE_CLK_SRC] = &gcc_ufs_card_ice_core_clk_src.clkr, + [GCC_UFS_CARD_PHY_AUX_CLK] = &gcc_ufs_card_phy_aux_clk.clkr, + [GCC_UFS_CARD_PHY_AUX_CLK_SRC] = &gcc_ufs_card_phy_aux_clk_src.clkr, + [GCC_UFS_CARD_RX_SYMBOL_0_CLK] = &gcc_ufs_card_rx_symbol_0_clk.clkr, + [GCC_UFS_CARD_RX_SYMBOL_1_CLK] = &gcc_ufs_card_rx_symbol_1_clk.clkr, + [GCC_UFS_CARD_TX_SYMBOL_0_CLK] = &gcc_ufs_card_tx_symbol_0_clk.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_CLK] = &gcc_ufs_card_unipro_core_clk.clkr, + [GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC] = + &gcc_ufs_card_unipro_core_clk_src.clkr, + [GCC_UFS_PHY_AHB_CLK] = &gcc_ufs_phy_ahb_clk.clkr, + [GCC_UFS_PHY_AXI_CLK] = &gcc_ufs_phy_axi_clk.clkr, + [GCC_UFS_PHY_AXI_CLK_SRC] = &gcc_ufs_phy_axi_clk_src.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK] = &gcc_ufs_phy_ice_core_clk.clkr, + [GCC_UFS_PHY_ICE_CORE_CLK_SRC] = &gcc_ufs_phy_ice_core_clk_src.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK] = &gcc_ufs_phy_phy_aux_clk.clkr, + [GCC_UFS_PHY_PHY_AUX_CLK_SRC] = &gcc_ufs_phy_phy_aux_clk_src.clkr, + [GCC_UFS_PHY_RX_SYMBOL_0_CLK] = &gcc_ufs_phy_rx_symbol_0_clk.clkr, + [GCC_UFS_PHY_RX_SYMBOL_1_CLK] = &gcc_ufs_phy_rx_symbol_1_clk.clkr, + [GCC_UFS_PHY_TX_SYMBOL_0_CLK] = &gcc_ufs_phy_tx_symbol_0_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK] = &gcc_ufs_phy_unipro_core_clk.clkr, + [GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC] = + &gcc_ufs_phy_unipro_core_clk_src.clkr, + [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr, + [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] = + &gcc_usb30_prim_mock_utmi_clk_src.clkr, + [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC] = + &gcc_usb30_prim_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr, + [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr, + [GCC_USB30_SEC_MASTER_CLK_SRC] = &gcc_usb30_sec_master_clk_src.clkr, + [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr, + [GCC_USB30_SEC_MOCK_UTMI_CLK_SRC] = + &gcc_usb30_sec_mock_utmi_clk_src.clkr, + [GCC_USB30_SEC_MOCK_UTMI_POSTDIV_CLK_SRC] = + &gcc_usb30_sec_mock_utmi_postdiv_clk_src.clkr, + [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK] = &gcc_usb3_prim_phy_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr, + [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr, + [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr, + [GCC_USB3_SEC_CLKREF_EN] = &gcc_usb3_sec_clkref_en.clkr, + [GCC_USB3_SEC_PHY_AUX_CLK] = &gcc_usb3_sec_phy_aux_clk.clkr, + [GCC_USB3_SEC_PHY_AUX_CLK_SRC] = &gcc_usb3_sec_phy_aux_clk_src.clkr, + [GCC_USB3_SEC_PHY_COM_AUX_CLK] = &gcc_usb3_sec_phy_com_aux_clk.clkr, + [GCC_USB3_SEC_PHY_PIPE_CLK] = &gcc_usb3_sec_phy_pipe_clk.clkr, + [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr, + [GCC_VIDEO_AXI1_CLK] = &gcc_video_axi1_clk.clkr, + [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr, + [GPLL0] = &gpll0.clkr, + [GPLL0_OUT_EVEN] = &gpll0_out_even.clkr, + [GPLL4] = &gpll4.clkr, + [GPLL9] = &gpll9.clkr, +}; + +static struct gdsc *gcc_sm8250_gdscs[] = { + [PCIE_0_GDSC] = &pcie_0_gdsc, + [PCIE_1_GDSC] = &pcie_1_gdsc, + [PCIE_2_GDSC] = &pcie_2_gdsc, + [UFS_CARD_GDSC] = &ufs_card_gdsc, + [UFS_PHY_GDSC] = &ufs_phy_gdsc, + [USB30_PRIM_GDSC] = &usb30_prim_gdsc, + [USB30_SEC_GDSC] = &usb30_sec_gdsc, + [HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC] = + &hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc, + [HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC] = + &hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc, + [HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC] = + &hlos1_vote_mmnoc_mmu_tbu_sf0_gdsc, + [HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC] = + &hlos1_vote_mmnoc_mmu_tbu_sf1_gdsc, +}; + +static const struct qcom_reset_map gcc_sm8250_resets[] = { + [GCC_GPU_BCR] = { 0x71000 }, + [GCC_MMSS_BCR] = { 0xb000 }, + [GCC_NPU_BWMON_BCR] = { 0x73000 }, + [GCC_NPU_BCR] = { 0x4d000 }, + [GCC_PCIE_0_BCR] = { 0x6b000 }, + [GCC_PCIE_0_LINK_DOWN_BCR] = { 0x6c014 }, + [GCC_PCIE_0_NOCSR_COM_PHY_BCR] = { 0x6c020 }, + [GCC_PCIE_0_PHY_BCR] = { 0x6c01c }, + [GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR] = { 0x6c028 }, + [GCC_PCIE_1_BCR] = { 0x8d000 }, + [GCC_PCIE_1_LINK_DOWN_BCR] = { 0x8e014 }, + [GCC_PCIE_1_NOCSR_COM_PHY_BCR] = { 0x8e020 }, + [GCC_PCIE_1_PHY_BCR] = { 0x8e01c }, + [GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR] = { 0x8e000 }, + [GCC_PCIE_2_BCR] = { 0x6000 }, + [GCC_PCIE_2_LINK_DOWN_BCR] = { 0x1f014 }, + [GCC_PCIE_2_NOCSR_COM_PHY_BCR] = { 0x1f020 }, + [GCC_PCIE_2_PHY_BCR] = { 0x1f01c }, + [GCC_PCIE_2_PHY_NOCSR_COM_PHY_BCR] = { 0x1f028 }, + [GCC_PCIE_PHY_BCR] = { 0x6f000 }, + [GCC_PCIE_PHY_CFG_AHB_BCR] = { 0x6f00c }, + [GCC_PCIE_PHY_COM_BCR] = { 0x6f010 }, + [GCC_PDM_BCR] = { 0x33000 }, + [GCC_PRNG_BCR] = { 0x34000 }, + [GCC_QUPV3_WRAPPER_0_BCR] = { 0x17000 }, + [GCC_QUPV3_WRAPPER_1_BCR] = { 0x18000 }, + [GCC_QUPV3_WRAPPER_2_BCR] = { 0x1e000 }, + [GCC_QUSB2PHY_PRIM_BCR] = { 0x12000 }, + [GCC_QUSB2PHY_SEC_BCR] = { 0x12004 }, + [GCC_SDCC2_BCR] = { 0x14000 }, + [GCC_SDCC4_BCR] = { 0x16000 }, + [GCC_TSIF_BCR] = { 0x36000 }, + [GCC_UFS_CARD_BCR] = { 0x75000 }, + [GCC_UFS_PHY_BCR] = { 0x77000 }, + [GCC_USB30_PRIM_BCR] = { 0xf000 }, + [GCC_USB30_SEC_BCR] = { 0x10000 }, + [GCC_USB3_DP_PHY_PRIM_BCR] = { 0x50008 }, + [GCC_USB3_DP_PHY_SEC_BCR] = { 0x50014 }, + [GCC_USB3_PHY_PRIM_BCR] = { 0x50000 }, + [GCC_USB3_PHY_SEC_BCR] = { 0x5000c }, + [GCC_USB3PHY_PHY_PRIM_BCR] = { 0x50004 }, + [GCC_USB3PHY_PHY_SEC_BCR] = { 0x50010 }, + [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x6a000 }, + [GCC_VIDEO_AXI0_CLK_ARES] = { 0xb024, 2 }, + [GCC_VIDEO_AXI1_CLK_ARES] = { 0xb028, 2 }, +}; + +static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = { + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s6_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap0_s7_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap1_s5_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s0_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s1_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s2_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s3_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s4_clk_src), + DEFINE_RCG_DFS(gcc_qupv3_wrap2_s5_clk_src), +}; + +static const struct regmap_config gcc_sm8250_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x9c100, + .fast_io = true, +}; + +static const struct qcom_cc_desc gcc_sm8250_desc = { + .config = &gcc_sm8250_regmap_config, + .clks = gcc_sm8250_clocks, + .num_clks = ARRAY_SIZE(gcc_sm8250_clocks), + .resets = gcc_sm8250_resets, + .num_resets = ARRAY_SIZE(gcc_sm8250_resets), + .gdscs = gcc_sm8250_gdscs, + .num_gdscs = ARRAY_SIZE(gcc_sm8250_gdscs), +}; + +static const struct of_device_id gcc_sm8250_match_table[] = { + { .compatible = "qcom,gcc-sm8250" }, + { } +}; +MODULE_DEVICE_TABLE(of, gcc_sm8250_match_table); + +static int gcc_sm8250_probe(struct platform_device *pdev) +{ + struct regmap *regmap; + int ret; + + regmap = qcom_cc_map(pdev, &gcc_sm8250_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* + * Disable the GPLL0 active input to NPU and GPU + * via MISC registers. + */ + regmap_update_bits(regmap, 0x4d110, 0x3, 0x3); + regmap_update_bits(regmap, 0x71028, 0x3, 0x3); + + /* + * Keep the clocks always-ON + * GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, GCC_DISP_AHB_CLK, + * GCC_CPUSS_DVM_BUS_CLK, GCC_GPU_CFG_AHB_CLK, + * GCC_SYS_NOC_CPUSS_AHB_CLK + */ + regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x4818c, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x52000, BIT(0), BIT(0)); + + ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, + ARRAY_SIZE(gcc_dfs_clocks)); + if (ret) + return ret; + + return qcom_cc_really_probe(pdev, &gcc_sm8250_desc, regmap); +} + +static struct platform_driver gcc_sm8250_driver = { + .probe = gcc_sm8250_probe, + .driver = { + .name = "gcc-sm8250", + .of_match_table = gcc_sm8250_match_table, + }, +}; + +static int __init gcc_sm8250_init(void) +{ + return platform_driver_register(&gcc_sm8250_driver); +} +subsys_initcall(gcc_sm8250_init); + +static void __exit gcc_sm8250_exit(void) +{ + platform_driver_unregister(&gcc_sm8250_driver); +} +module_exit(gcc_sm8250_exit); + +MODULE_DESCRIPTION("QTI GCC SM8250 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/qcom/gpucc-sc7180.c b/drivers/clk/qcom/gpucc-sc7180.c index a96c0b945de2..7b656b6aeced 100644 --- a/drivers/clk/qcom/gpucc-sc7180.c +++ b/drivers/clk/qcom/gpucc-sc7180.c @@ -170,8 +170,45 @@ static struct gdsc cx_gdsc = { .flags = VOTABLE, }; +/* + * On SC7180 the GPU GX domain is *almost* entirely controlled by the GMU + * running in the CX domain so the CPU doesn't need to know anything about the + * GX domain EXCEPT.... + * + * Hardware constraints dictate that the GX be powered down before the CX. If + * the GMU crashes it could leave the GX on. In order to successfully bring back + * the device the CPU needs to disable the GX headswitch. There being no sane + * way to reach in and touch that register from deep inside the GPU driver we + * need to set up the infrastructure to be able to ensure that the GPU can + * ensure that the GX is off during this super special case. We do this by + * defining a GX gdsc with a dummy enable function and a "default" disable + * function. + * + * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU + * driver. During power up, nothing will happen from the CPU (and the GMU will + * power up normally but during power down this will ensure that the GX domain + * is *really* off - this gives us a semi standard way of doing what we need. + */ +static int gx_gdsc_enable(struct generic_pm_domain *domain) +{ + /* Do nothing but give genpd the impression that we were successful */ + return 0; +} + +static struct gdsc gx_gdsc = { + .gdscr = 0x100c, + .clamp_io_ctrl = 0x1508, + .pd = { + .name = "gx_gdsc", + .power_on = gx_gdsc_enable, + }, + .pwrsts = PWRSTS_OFF_ON, + .flags = CLAMP_IO, +}; + static struct gdsc *gpu_cc_sc7180_gdscs[] = { [CX_GDSC] = &cx_gdsc, + [GX_GDSC] = &gx_gdsc, }; static struct clk_regmap *gpu_cc_sc7180_clocks[] = { diff --git a/drivers/clk/qcom/mss-sc7180.c b/drivers/clk/qcom/mss-sc7180.c new file mode 100644 index 000000000000..673fa1a4f734 --- /dev/null +++ b/drivers/clk/qcom/mss-sc7180.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ + +#include <linux/clk-provider.h> +#include <linux/platform_device.h> +#include <linux/module.h> +#include <linux/pm_clock.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> + +#include <dt-bindings/clock/qcom,mss-sc7180.h> + +#include "clk-regmap.h" +#include "clk-branch.h" +#include "common.h" + +static struct clk_branch mss_axi_nav_clk = { + .halt_reg = 0x20bc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x20bc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "mss_axi_nav_clk", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "gcc_mss_nav_axi", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static struct clk_branch mss_axi_crypto_clk = { + .halt_reg = 0x20cc, + .halt_check = BRANCH_HALT, + .clkr = { + .enable_reg = 0x20cc, + .enable_mask = BIT(0), + .hw.init = &(struct clk_init_data){ + .name = "mss_axi_crypto_clk", + .parent_data = &(const struct clk_parent_data){ + .fw_name = "gcc_mss_mfab_axis", + }, + .num_parents = 1, + .ops = &clk_branch2_ops, + }, + }, +}; + +static const struct regmap_config mss_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + .max_register = 0x41aa0cc, +}; + +static struct clk_regmap *mss_sc7180_clocks[] = { + [MSS_AXI_CRYPTO_CLK] = &mss_axi_crypto_clk.clkr, + [MSS_AXI_NAV_CLK] = &mss_axi_nav_clk.clkr, +}; + +static const struct qcom_cc_desc mss_sc7180_desc = { + .config = &mss_regmap_config, + .clks = mss_sc7180_clocks, + .num_clks = ARRAY_SIZE(mss_sc7180_clocks), +}; + +static int mss_sc7180_probe(struct platform_device *pdev) +{ + int ret; + + pm_runtime_enable(&pdev->dev); + ret = pm_clk_create(&pdev->dev); + if (ret) + goto disable_pm_runtime; + + ret = pm_clk_add(&pdev->dev, "cfg_ahb"); + if (ret < 0) { + dev_err(&pdev->dev, "failed to acquire iface clock\n"); + goto destroy_pm_clk; + } + + ret = qcom_cc_probe(pdev, &mss_sc7180_desc); + if (ret < 0) + goto destroy_pm_clk; + + return 0; + +destroy_pm_clk: + pm_clk_destroy(&pdev->dev); + +disable_pm_runtime: + pm_runtime_disable(&pdev->dev); + + return ret; +} + +static int mss_sc7180_remove(struct platform_device *pdev) +{ + pm_clk_destroy(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static const struct dev_pm_ops mss_sc7180_pm_ops = { + SET_RUNTIME_PM_OPS(pm_clk_suspend, pm_clk_resume, NULL) +}; + +static const struct of_device_id mss_sc7180_match_table[] = { + { .compatible = "qcom,sc7180-mss" }, + { } +}; +MODULE_DEVICE_TABLE(of, mss_sc7180_match_table); + +static struct platform_driver mss_sc7180_driver = { + .probe = mss_sc7180_probe, + .remove = mss_sc7180_remove, + .driver = { + .name = "sc7180-mss", + .of_match_table = mss_sc7180_match_table, + .pm = &mss_sc7180_pm_ops, + }, +}; + +static int __init mss_sc7180_init(void) +{ + return platform_driver_register(&mss_sc7180_driver); +} +subsys_initcall(mss_sc7180_init); + +static void __exit mss_sc7180_exit(void) +{ + platform_driver_unregister(&mss_sc7180_driver); +} +module_exit(mss_sc7180_exit); + +MODULE_DESCRIPTION("QTI MSS SC7180 Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/renesas/Kconfig b/drivers/clk/renesas/Kconfig index 250d8165167a..ac2dd92ce2ef 100644 --- a/drivers/clk/renesas/Kconfig +++ b/drivers/clk/renesas/Kconfig @@ -20,7 +20,7 @@ config CLK_RENESAS select CLK_R8A7791 if ARCH_R8A7791 || ARCH_R8A7793 select CLK_R8A7792 if ARCH_R8A7792 select CLK_R8A7794 if ARCH_R8A7794 - select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 || ARCH_R8A7795 + select CLK_R8A7795 if ARCH_R8A77950 || ARCH_R8A77951 select CLK_R8A77960 if ARCH_R8A77960 select CLK_R8A77961 if ARCH_R8A77961 select CLK_R8A77965 if ARCH_R8A77965 @@ -161,6 +161,7 @@ config CLK_RCAR_GEN3_CPG config CLK_RCAR_USB2_CLOCK_SEL bool "Renesas R-Car USB2 clock selector support" depends on ARCH_RENESAS || COMPILE_TEST + select RESET_CONTROLLER help This is a driver for R-Car USB2 clock selector diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c index fbc8c75f4314..ff5b3020cb03 100644 --- a/drivers/clk/renesas/r8a7795-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c @@ -44,6 +44,7 @@ enum clk_ids { CLK_S3, CLK_SDSRC, CLK_SSPSRC, + CLK_RPCSRC, CLK_RINT, /* Module Clocks */ @@ -70,6 +71,12 @@ static struct cpg_core_clk r8a7795_core_clks[] __initdata = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), + + DEF_BASE("rpc", R8A7795_CLK_RPC, CLK_TYPE_GEN3_RPC, + CLK_RPCSRC), + DEF_BASE("rpcd2", R8A7795_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, + R8A7795_CLK_RPC), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -242,6 +249,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = { DEF_MOD("can-fd", 914, R8A7795_CLK_S3D2), DEF_MOD("can-if1", 915, R8A7795_CLK_S3D4), DEF_MOD("can-if0", 916, R8A7795_CLK_S3D4), + DEF_MOD("rpc-if", 917, R8A7795_CLK_RPCD2), DEF_MOD("i2c6", 918, R8A7795_CLK_S0D6), DEF_MOD("i2c5", 919, R8A7795_CLK_S0D6), DEF_MOD("i2c-dvfs", 926, R8A7795_CLK_CP), diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c index e8420d3ada94..e8d466dbc7f9 100644 --- a/drivers/clk/renesas/r8a7796-cpg-mssr.c +++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c @@ -46,6 +46,7 @@ enum clk_ids { CLK_S3, CLK_SDSRC, CLK_SSPSRC, + CLK_RPCSRC, CLK_RINT, /* Module Clocks */ @@ -72,6 +73,12 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), + + DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC, + CLK_RPCSRC), + DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, + R8A7796_CLK_RPC), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -105,6 +112,7 @@ static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, CLK_SDSRC, 0x26c), DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1), DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A7796_CLK_CPEX, CLK_EXTAL, 2, 1), @@ -132,6 +140,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = { DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1), DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3), + DEF_MOD("sceg-pub", 229, R8A7796_CLK_CR), DEF_MOD("cmt3", 300, R8A7796_CLK_R), DEF_MOD("cmt2", 301, R8A7796_CLK_R), DEF_MOD("cmt1", 302, R8A7796_CLK_R), @@ -215,6 +224,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = { DEF_MOD("can-fd", 914, R8A7796_CLK_S3D2), DEF_MOD("can-if1", 915, R8A7796_CLK_S3D4), DEF_MOD("can-if0", 916, R8A7796_CLK_S3D4), + DEF_MOD("rpc-if", 917, R8A7796_CLK_RPCD2), DEF_MOD("i2c6", 918, R8A7796_CLK_S0D6), DEF_MOD("i2c5", 919, R8A7796_CLK_S0D6), DEF_MOD("i2c-dvfs", 926, R8A7796_CLK_CP), diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c index b3af4da2ca74..7a05a2fc1cc6 100644 --- a/drivers/clk/renesas/r8a77965-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c @@ -43,6 +43,7 @@ enum clk_ids { CLK_S3, CLK_SDSRC, CLK_SSPSRC, + CLK_RPCSRC, CLK_RINT, /* Module Clocks */ @@ -68,6 +69,12 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = { DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), + DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), + + DEF_BASE("rpc", R8A77965_CLK_RPC, CLK_TYPE_GEN3_RPC, + CLK_RPCSRC), + DEF_BASE("rpcd2", R8A77965_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, + R8A77965_CLK_RPC), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), @@ -99,7 +106,8 @@ static const struct cpg_core_clk r8a77965_core_clks[] __initconst = { DEF_GEN3_SD("sd2", R8A77965_CLK_SD2, CLK_SDSRC, 0x268), DEF_GEN3_SD("sd3", R8A77965_CLK_SD3, CLK_SDSRC, 0x26c), - DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cl", R8A77965_CLK_CL, CLK_PLL1_DIV2, 48, 1), + DEF_FIXED("cr", R8A77965_CLK_CR, CLK_PLL1_DIV4, 2, 1), DEF_FIXED("cp", R8A77965_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A77965_CLK_CPEX, CLK_EXTAL, 2, 1), @@ -127,6 +135,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("sys-dmac2", 217, R8A77965_CLK_S3D1), DEF_MOD("sys-dmac1", 218, R8A77965_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A77965_CLK_S0D3), + DEF_MOD("sceg-pub", 229, R8A77965_CLK_CR), DEF_MOD("cmt3", 300, R8A77965_CLK_R), DEF_MOD("cmt2", 301, R8A77965_CLK_R), @@ -215,6 +224,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = { DEF_MOD("can-fd", 914, R8A77965_CLK_S3D2), DEF_MOD("can-if1", 915, R8A77965_CLK_S3D4), DEF_MOD("can-if0", 916, R8A77965_CLK_S3D4), + DEF_MOD("rpc-if", 917, R8A77965_CLK_RPCD2), DEF_MOD("i2c6", 918, R8A77965_CLK_S0D6), DEF_MOD("i2c5", 919, R8A77965_CLK_S0D6), DEF_MOD("i2c-dvfs", 926, R8A77965_CLK_CP), diff --git a/drivers/clk/renesas/r8a77990-cpg-mssr.c b/drivers/clk/renesas/r8a77990-cpg-mssr.c index ceabf55c21c2..8eda2e3e2480 100644 --- a/drivers/clk/renesas/r8a77990-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77990-cpg-mssr.c @@ -105,6 +105,7 @@ static const struct cpg_core_clk r8a77990_core_clks[] __initconst = { DEF_GEN3_SD("sd3", R8A77990_CLK_SD3, CLK_SDSRC, 0x026c), DEF_FIXED("cl", R8A77990_CLK_CL, CLK_PLL1, 48, 1), + DEF_FIXED("cr", R8A77990_CLK_CR, CLK_PLL1D2, 2, 1), DEF_FIXED("cp", R8A77990_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A77990_CLK_CPEX, CLK_EXTAL, 4, 1), @@ -135,6 +136,7 @@ static const struct mssr_mod_clk r8a77990_mod_clks[] __initconst = { DEF_MOD("sys-dmac2", 217, R8A77990_CLK_S3D1), DEF_MOD("sys-dmac1", 218, R8A77990_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A77990_CLK_S3D1), + DEF_MOD("sceg-pub", 229, R8A77990_CLK_CR), DEF_MOD("cmt3", 300, R8A77990_CLK_R), DEF_MOD("cmt2", 301, R8A77990_CLK_R), diff --git a/drivers/clk/renesas/r8a77995-cpg-mssr.c b/drivers/clk/renesas/r8a77995-cpg-mssr.c index 962bb337f2e7..056ebf3e70e2 100644 --- a/drivers/clk/renesas/r8a77995-cpg-mssr.c +++ b/drivers/clk/renesas/r8a77995-cpg-mssr.c @@ -91,6 +91,7 @@ static const struct cpg_core_clk r8a77995_core_clks[] __initconst = { DEF_FIXED("s3d4", R8A77995_CLK_S3D4, CLK_S3, 4, 1), DEF_FIXED("cl", R8A77995_CLK_CL, CLK_PLL1, 48, 1), + DEF_FIXED("cr", R8A77995_CLK_CR, CLK_PLL1D2, 2, 1), DEF_FIXED("cp", R8A77995_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A77995_CLK_CPEX, CLK_EXTAL, 4, 1), @@ -122,6 +123,7 @@ static const struct mssr_mod_clk r8a77995_mod_clks[] __initconst = { DEF_MOD("sys-dmac2", 217, R8A77995_CLK_S3D1), DEF_MOD("sys-dmac1", 218, R8A77995_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A77995_CLK_S3D1), + DEF_MOD("sceg-pub", 229, R8A77995_CLK_CR), DEF_MOD("cmt3", 300, R8A77995_CLK_R), DEF_MOD("cmt2", 301, R8A77995_CLK_R), DEF_MOD("cmt1", 302, R8A77995_CLK_R), diff --git a/drivers/clk/renesas/rcar-usb2-clock-sel.c b/drivers/clk/renesas/rcar-usb2-clock-sel.c index b97f5f9326cf..d4c02986c34e 100644 --- a/drivers/clk/renesas/rcar-usb2-clock-sel.c +++ b/drivers/clk/renesas/rcar-usb2-clock-sel.c @@ -19,6 +19,7 @@ #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> +#include <linux/reset.h> #include <linux/slab.h> #define USB20_CLKSET0 0x00 @@ -26,9 +27,16 @@ #define CLKSET0_PRIVATE BIT(0) #define CLKSET0_EXTAL_ONLY (CLKSET0_INTCLK_EN | CLKSET0_PRIVATE) +static const struct clk_bulk_data rcar_usb2_clocks[] = { + { .id = "ehci_ohci", }, + { .id = "hs-usb-if", }, +}; + struct usb2_clock_sel_priv { void __iomem *base; struct clk_hw hw; + struct clk_bulk_data clks[ARRAY_SIZE(rcar_usb2_clocks)]; + struct reset_control *rsts; bool extal; bool xtal; }; @@ -53,14 +61,32 @@ static void usb2_clock_sel_disable_extal_only(struct usb2_clock_sel_priv *priv) static int usb2_clock_sel_enable(struct clk_hw *hw) { - usb2_clock_sel_enable_extal_only(to_priv(hw)); + struct usb2_clock_sel_priv *priv = to_priv(hw); + int ret; + + ret = reset_control_deassert(priv->rsts); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(priv->clks), priv->clks); + if (ret) { + reset_control_assert(priv->rsts); + return ret; + } + + usb2_clock_sel_enable_extal_only(priv); return 0; } static void usb2_clock_sel_disable(struct clk_hw *hw) { - usb2_clock_sel_disable_extal_only(to_priv(hw)); + struct usb2_clock_sel_priv *priv = to_priv(hw); + + usb2_clock_sel_disable_extal_only(priv); + + clk_bulk_disable_unprepare(ARRAY_SIZE(priv->clks), priv->clks); + reset_control_assert(priv->rsts); } /* @@ -119,6 +145,7 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev) struct usb2_clock_sel_priv *priv; struct clk *clk; struct clk_init_data init; + int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -128,6 +155,15 @@ static int rcar_usb2_clock_sel_probe(struct platform_device *pdev) if (IS_ERR(priv->base)) return PTR_ERR(priv->base); + memcpy(priv->clks, rcar_usb2_clocks, sizeof(priv->clks)); + ret = devm_clk_bulk_get(dev, ARRAY_SIZE(priv->clks), priv->clks); + if (ret < 0) + return ret; + + priv->rsts = devm_reset_control_array_get(dev, true, false); + if (IS_ERR(priv->rsts)) + return PTR_ERR(priv->rsts); + pm_runtime_enable(dev); pm_runtime_get_sync(dev); diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c index 4abe7ff31f53..975454a3dd72 100644 --- a/drivers/clk/rockchip/clk-mmc-phase.c +++ b/drivers/clk/rockchip/clk-mmc-phase.c @@ -51,9 +51,9 @@ static int rockchip_mmc_get_phase(struct clk_hw *hw) u16 degrees; u32 delay_num = 0; - /* See the comment for rockchip_mmc_set_phase below */ + /* Constant signal, no measurable phase shift */ if (!rate) - return -EINVAL; + return 0; raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift); diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index dad31308c071..1949ae7851b2 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -356,10 +356,6 @@ struct samsung_clk_provider * __init samsung_cmu_register_one( } ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids); - if (!ctx) { - panic("%s: unable to allocate ctx\n", __func__); - return ctx; - } if (cmu->pll_clks) samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks, diff --git a/drivers/clk/socfpga/clk-gate-s10.c b/drivers/clk/socfpga/clk-gate-s10.c index 54a464fa63e0..8be4722f6064 100644 --- a/drivers/clk/socfpga/clk-gate-s10.c +++ b/drivers/clk/socfpga/clk-gate-s10.c @@ -65,54 +65,49 @@ static const struct clk_ops dbgclk_ops = { .get_parent = socfpga_gate_get_parent, }; -struct clk *s10_register_gate(const char *name, const char *parent_name, - const char * const *parent_names, - u8 num_parents, unsigned long flags, - void __iomem *regbase, unsigned long gate_reg, - unsigned long gate_idx, unsigned long div_reg, - unsigned long div_offset, u8 div_width, - unsigned long bypass_reg, u8 bypass_shift, - u8 fixed_div) +struct clk *s10_register_gate(const struct stratix10_gate_clock *clks, void __iomem *regbase) { struct clk *clk; struct socfpga_gate_clk *socfpga_clk; struct clk_init_data init; + const char * const *parent_names = clks->parent_names; + const char *parent_name = clks->parent_name; socfpga_clk = kzalloc(sizeof(*socfpga_clk), GFP_KERNEL); if (!socfpga_clk) return NULL; - socfpga_clk->hw.reg = regbase + gate_reg; - socfpga_clk->hw.bit_idx = gate_idx; + socfpga_clk->hw.reg = regbase + clks->gate_reg; + socfpga_clk->hw.bit_idx = clks->gate_idx; gateclk_ops.enable = clk_gate_ops.enable; gateclk_ops.disable = clk_gate_ops.disable; - socfpga_clk->fixed_div = fixed_div; + socfpga_clk->fixed_div = clks->fixed_div; - if (div_reg) - socfpga_clk->div_reg = regbase + div_reg; + if (clks->div_reg) + socfpga_clk->div_reg = regbase + clks->div_reg; else socfpga_clk->div_reg = NULL; - socfpga_clk->width = div_width; - socfpga_clk->shift = div_offset; + socfpga_clk->width = clks->div_width; + socfpga_clk->shift = clks->div_offset; - if (bypass_reg) - socfpga_clk->bypass_reg = regbase + bypass_reg; + if (clks->bypass_reg) + socfpga_clk->bypass_reg = regbase + clks->bypass_reg; else socfpga_clk->bypass_reg = NULL; - socfpga_clk->bypass_shift = bypass_shift; + socfpga_clk->bypass_shift = clks->bypass_shift; - if (streq(name, "cs_pdbg_clk")) + if (streq(clks->name, "cs_pdbg_clk")) init.ops = &dbgclk_ops; else init.ops = &gateclk_ops; - init.name = name; - init.flags = flags; + init.name = clks->name; + init.flags = clks->flags; - init.num_parents = num_parents; + init.num_parents = clks->num_parents; init.parent_names = parent_names ? parent_names : &parent_name; socfpga_clk->hw.hw.init = &init; @@ -121,6 +116,5 @@ struct clk *s10_register_gate(const char *name, const char *parent_name, kfree(socfpga_clk); return NULL; } - return clk; } diff --git a/drivers/clk/socfpga/clk-periph-s10.c b/drivers/clk/socfpga/clk-periph-s10.c index 1a191eeeebba..dd6d4056e9de 100644 --- a/drivers/clk/socfpga/clk-periph-s10.c +++ b/drivers/clk/socfpga/clk-periph-s10.c @@ -73,26 +73,27 @@ static const struct clk_ops peri_cnt_clk_ops = { .get_parent = clk_periclk_get_parent, }; -struct clk *s10_register_periph(const char *name, const char *parent_name, - const char * const *parent_names, - u8 num_parents, unsigned long flags, - void __iomem *reg, unsigned long offset) +struct clk *s10_register_periph(const struct stratix10_perip_c_clock *clks, + void __iomem *reg) { struct clk *clk; struct socfpga_periph_clk *periph_clk; struct clk_init_data init; + const char *name = clks->name; + const char *parent_name = clks->parent_name; + const char * const *parent_names = clks->parent_names; periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL); if (WARN_ON(!periph_clk)) return NULL; - periph_clk->hw.reg = reg + offset; + periph_clk->hw.reg = reg + clks->offset; init.name = name; init.ops = &peri_c_clk_ops; - init.flags = flags; + init.flags = clks->flags; - init.num_parents = num_parents; + init.num_parents = clks->num_parents; init.parent_names = parent_names ? parent_names : &parent_name; periph_clk->hw.hw.init = &init; @@ -105,38 +106,37 @@ struct clk *s10_register_periph(const char *name, const char *parent_name, return clk; } -struct clk *s10_register_cnt_periph(const char *name, const char *parent_name, - const char * const *parent_names, - u8 num_parents, unsigned long flags, - void __iomem *regbase, unsigned long offset, - u8 fixed_divider, unsigned long bypass_reg, - unsigned long bypass_shift) +struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *clks, + void __iomem *regbase) { struct clk *clk; struct socfpga_periph_clk *periph_clk; struct clk_init_data init; + const char *name = clks->name; + const char *parent_name = clks->parent_name; + const char * const *parent_names = clks->parent_names; periph_clk = kzalloc(sizeof(*periph_clk), GFP_KERNEL); if (WARN_ON(!periph_clk)) return NULL; - if (offset) - periph_clk->hw.reg = regbase + offset; + if (clks->offset) + periph_clk->hw.reg = regbase + clks->offset; else periph_clk->hw.reg = NULL; - if (bypass_reg) - periph_clk->bypass_reg = regbase + bypass_reg; + if (clks->bypass_reg) + periph_clk->bypass_reg = regbase + clks->bypass_reg; else periph_clk->bypass_reg = NULL; - periph_clk->bypass_shift = bypass_shift; - periph_clk->fixed_div = fixed_divider; + periph_clk->bypass_shift = clks->bypass_shift; + periph_clk->fixed_div = clks->fixed_divider; init.name = name; init.ops = &peri_cnt_clk_ops; - init.flags = flags; + init.flags = clks->flags; - init.num_parents = num_parents; + init.num_parents = clks->num_parents; init.parent_names = parent_names ? parent_names : &parent_name; periph_clk->hw.hw.init = &init; diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c index 4705eb544f01..a301bb22f36c 100644 --- a/drivers/clk/socfpga/clk-pll-s10.c +++ b/drivers/clk/socfpga/clk-pll-s10.c @@ -39,7 +39,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk, /* read VCO1 reg for numerator and denominator */ reg = readl(socfpgaclk->hw.reg); refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT; - vco_freq = (unsigned long long)parent_rate / refdiv; + + vco_freq = parent_rate; + do_div(vco_freq, refdiv); /* Read mdiv and fdiv from the fdbck register */ reg = readl(socfpgaclk->hw.reg + 0x4); @@ -108,19 +110,20 @@ static struct clk_ops clk_boot_ops = { .prepare = clk_pll_prepare, }; -struct clk *s10_register_pll(const char *name, const char * const *parent_names, - u8 num_parents, unsigned long flags, - void __iomem *reg, unsigned long offset) +struct clk *s10_register_pll(const struct stratix10_pll_clock *clks, + void __iomem *reg) { struct clk *clk; struct socfpga_pll *pll_clk; struct clk_init_data init; + const char *name = clks->name; + const char * const *parent_names = clks->parent_names; pll_clk = kzalloc(sizeof(*pll_clk), GFP_KERNEL); if (WARN_ON(!pll_clk)) return NULL; - pll_clk->hw.reg = reg + offset; + pll_clk->hw.reg = reg + clks->offset; if (streq(name, SOCFPGA_BOOT_CLK)) init.ops = &clk_boot_ops; @@ -128,9 +131,9 @@ struct clk *s10_register_pll(const char *name, const char * const *parent_names, init.ops = &clk_pll_ops; init.name = name; - init.flags = flags; + init.flags = clks->flags; - init.num_parents = num_parents; + init.num_parents = clks->num_parents; init.parent_names = parent_names; pll_clk->hw.hw.init = &init; diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index 993f3a73c71e..dea7c6c7d269 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -177,9 +177,7 @@ static int s10_clk_register_c_perip(const struct stratix10_perip_c_clock *clks, int i; for (i = 0; i < nums; i++) { - clk = s10_register_periph(clks[i].name, clks[i].parent_name, - clks[i].parent_names, clks[i].num_parents, - clks[i].flags, base, clks[i].offset); + clk = s10_register_periph(&clks[i], base); if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); @@ -198,14 +196,7 @@ static int s10_clk_register_cnt_perip(const struct stratix10_perip_cnt_clock *cl int i; for (i = 0; i < nums; i++) { - clk = s10_register_cnt_periph(clks[i].name, clks[i].parent_name, - clks[i].parent_names, - clks[i].num_parents, - clks[i].flags, base, - clks[i].offset, - clks[i].fixed_divider, - clks[i].bypass_reg, - clks[i].bypass_shift); + clk = s10_register_cnt_periph(&clks[i], base); if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); @@ -225,16 +216,7 @@ static int s10_clk_register_gate(const struct stratix10_gate_clock *clks, int i; for (i = 0; i < nums; i++) { - clk = s10_register_gate(clks[i].name, clks[i].parent_name, - clks[i].parent_names, - clks[i].num_parents, - clks[i].flags, base, - clks[i].gate_reg, - clks[i].gate_idx, clks[i].div_reg, - clks[i].div_offset, clks[i].div_width, - clks[i].bypass_reg, - clks[i].bypass_shift, - clks[i].fixed_div); + clk = s10_register_gate(&clks[i], base); if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); @@ -254,10 +236,7 @@ static int s10_clk_register_pll(const struct stratix10_pll_clock *clks, int i; for (i = 0; i < nums; i++) { - clk = s10_register_pll(clks[i].name, clks[i].parent_names, - clks[i].num_parents, - clks[i].flags, base, - clks[i].offset); + clk = s10_register_pll(&clks[i], base); if (IS_ERR(clk)) { pr_err("%s: failed to register clock %s\n", __func__, clks[i].name); diff --git a/drivers/clk/socfpga/stratix10-clk.h b/drivers/clk/socfpga/stratix10-clk.h index e8e121907952..fcabef42249c 100644 --- a/drivers/clk/socfpga/stratix10-clk.h +++ b/drivers/clk/socfpga/stratix10-clk.h @@ -60,21 +60,12 @@ struct stratix10_gate_clock { u8 fixed_div; }; -struct clk *s10_register_pll(const char *, const char *const *, u8, - unsigned long, void __iomem *, unsigned long); - -struct clk *s10_register_periph(const char *, const char *, - const char * const *, u8, unsigned long, - void __iomem *, unsigned long); -struct clk *s10_register_cnt_periph(const char *, const char *, - const char * const *, u8, - unsigned long, void __iomem *, - unsigned long, u8, unsigned long, - unsigned long); -struct clk *s10_register_gate(const char *, const char *, - const char * const *, u8, - unsigned long, void __iomem *, - unsigned long, unsigned long, - unsigned long, unsigned long, u8, - unsigned long, u8, u8); +struct clk *s10_register_pll(const struct stratix10_pll_clock *, + void __iomem *); +struct clk *s10_register_periph(const struct stratix10_perip_c_clock *, + void __iomem *); +struct clk *s10_register_cnt_periph(const struct stratix10_perip_cnt_clock *, + void __iomem *); +struct clk *s10_register_gate(const struct stratix10_gate_clock *, + void __iomem *); #endif /* __STRATIX10_CLK_H */ diff --git a/drivers/clk/sprd/Kconfig b/drivers/clk/sprd/Kconfig index 3c219af25100..e18c80fbe804 100644 --- a/drivers/clk/sprd/Kconfig +++ b/drivers/clk/sprd/Kconfig @@ -13,4 +13,12 @@ config SPRD_SC9860_CLK tristate "Support for the Spreadtrum SC9860 clocks" depends on (ARM64 && ARCH_SPRD) || COMPILE_TEST default ARM64 && ARCH_SPRD + +config SPRD_SC9863A_CLK + tristate "Support for the Spreadtrum SC9863A clocks" + depends on (ARM64 && ARCH_SPRD) || COMPILE_TEST + default ARM64 && ARCH_SPRD + help + Support for the global clock controller on sc9863a devices. + Say Y if you want to use peripheral devices on sc9863a SoC. endif diff --git a/drivers/clk/sprd/Makefile b/drivers/clk/sprd/Makefile index d4c00788d53c..41d90e0d7863 100644 --- a/drivers/clk/sprd/Makefile +++ b/drivers/clk/sprd/Makefile @@ -10,3 +10,4 @@ clk-sprd-y += pll.o ## SoC support obj-$(CONFIG_SPRD_SC9860_CLK) += sc9860-clk.o +obj-$(CONFIG_SPRD_SC9863A_CLK) += sc9863a-clk.o diff --git a/drivers/clk/sprd/common.c b/drivers/clk/sprd/common.c index c0af4779892b..d620bbbcdfc8 100644 --- a/drivers/clk/sprd/common.c +++ b/drivers/clk/sprd/common.c @@ -40,7 +40,8 @@ int sprd_clk_regmap_init(struct platform_device *pdev, const struct sprd_clk_desc *desc) { void __iomem *base; - struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; struct regmap *regmap; if (of_find_property(node, "sprd,syscon", NULL)) { @@ -49,6 +50,13 @@ int sprd_clk_regmap_init(struct platform_device *pdev, pr_err("%s: failed to get syscon regmap\n", __func__); return PTR_ERR(regmap); } + } else if (of_device_is_compatible(of_get_parent(dev->of_node), + "syscon")) { + regmap = device_node_to_regmap(of_get_parent(dev->of_node)); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to get regmap from its parent.\n"); + return PTR_ERR(regmap); + } } else { base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) diff --git a/drivers/clk/sprd/composite.h b/drivers/clk/sprd/composite.h index 04ab3f587ee2..adbabbe596b7 100644 --- a/drivers/clk/sprd/composite.h +++ b/drivers/clk/sprd/composite.h @@ -18,26 +18,43 @@ struct sprd_comp { struct sprd_clk_common common; }; -#define SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, _table, \ - _mshift, _mwidth, _dshift, _dwidth, _flags) \ +#define SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \ + _mshift, _mwidth, _dshift, _dwidth, \ + _flags, _fn) \ struct sprd_comp _struct = { \ .mux = _SPRD_MUX_CLK(_mshift, _mwidth, _table), \ .div = _SPRD_DIV_CLK(_dshift, _dwidth), \ .common = { \ .regmap = NULL, \ .reg = _reg, \ - .hw.init = CLK_HW_INIT_PARENTS(_name, \ - _parent, \ - &sprd_comp_ops, \ - _flags), \ + .hw.init = _fn(_name, _parent, \ + &sprd_comp_ops, _flags), \ } \ } -#define SPRD_COMP_CLK(_struct, _name, _parent, _reg, _mshift, \ - _mwidth, _dshift, _dwidth, _flags) \ - SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, \ - NULL, _mshift, _mwidth, \ - _dshift, _dwidth, _flags) +#define SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, _table, \ + _mshift, _mwidth, _dshift, _dwidth, _flags) \ + SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \ + _mshift, _mwidth, _dshift, _dwidth, \ + _flags, CLK_HW_INIT_PARENTS) + +#define SPRD_COMP_CLK(_struct, _name, _parent, _reg, _mshift, \ + _mwidth, _dshift, _dwidth, _flags) \ + SPRD_COMP_CLK_TABLE(_struct, _name, _parent, _reg, NULL, \ + _mshift, _mwidth, _dshift, _dwidth, _flags) + +#define SPRD_COMP_CLK_DATA_TABLE(_struct, _name, _parent, _reg, _table, \ + _mshift, _mwidth, _dshift, \ + _dwidth, _flags) \ + SPRD_COMP_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, _table, \ + _mshift, _mwidth, _dshift, _dwidth, \ + _flags, CLK_HW_INIT_PARENTS_DATA) + +#define SPRD_COMP_CLK_DATA(_struct, _name, _parent, _reg, _mshift, \ + _mwidth, _dshift, _dwidth, _flags) \ + SPRD_COMP_CLK_DATA_TABLE(_struct, _name, _parent, _reg, NULL, \ + _mshift, _mwidth, _dshift, _dwidth, \ + _flags) static inline struct sprd_comp *hw_to_sprd_comp(const struct clk_hw *hw) { diff --git a/drivers/clk/sprd/div.h b/drivers/clk/sprd/div.h index 87510e3d0e14..6acfe6b179fc 100644 --- a/drivers/clk/sprd/div.h +++ b/drivers/clk/sprd/div.h @@ -35,20 +35,28 @@ struct sprd_div { struct sprd_clk_common common; }; -#define SPRD_DIV_CLK(_struct, _name, _parent, _reg, \ - _shift, _width, _flags) \ +#define SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _shift, _width, _flags, _fn) \ struct sprd_div _struct = { \ .div = _SPRD_DIV_CLK(_shift, _width), \ .common = { \ .regmap = NULL, \ .reg = _reg, \ - .hw.init = CLK_HW_INIT(_name, \ - _parent, \ - &sprd_div_ops, \ - _flags), \ + .hw.init = _fn(_name, _parent, \ + &sprd_div_ops, _flags), \ } \ } +#define SPRD_DIV_CLK(_struct, _name, _parent, _reg, \ + _shift, _width, _flags) \ + SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _shift, _width, _flags, CLK_HW_INIT) + +#define SPRD_DIV_CLK_HW(_struct, _name, _parent, _reg, \ + _shift, _width, _flags) \ + SPRD_DIV_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _shift, _width, _flags, CLK_HW_INIT_HW) + static inline struct sprd_div *hw_to_sprd_div(const struct clk_hw *hw) { struct sprd_clk_common *common = hw_to_sprd_clk_common(hw); diff --git a/drivers/clk/sprd/gate.c b/drivers/clk/sprd/gate.c index f59d1936b412..574cfc116bbc 100644 --- a/drivers/clk/sprd/gate.c +++ b/drivers/clk/sprd/gate.c @@ -79,6 +79,17 @@ static int sprd_sc_gate_enable(struct clk_hw *hw) return 0; } + +static int sprd_pll_sc_gate_prepare(struct clk_hw *hw) +{ + struct sprd_gate *sg = hw_to_sprd_gate(hw); + + clk_sc_gate_toggle(sg, true); + udelay(sg->udelay); + + return 0; +} + static int sprd_gate_is_enabled(struct clk_hw *hw) { struct sprd_gate *sg = hw_to_sprd_gate(hw); @@ -109,3 +120,9 @@ const struct clk_ops sprd_sc_gate_ops = { }; EXPORT_SYMBOL_GPL(sprd_sc_gate_ops); +const struct clk_ops sprd_pll_sc_gate_ops = { + .unprepare = sprd_sc_gate_disable, + .prepare = sprd_pll_sc_gate_prepare, + .is_enabled = sprd_gate_is_enabled, +}; +EXPORT_SYMBOL_GPL(sprd_pll_sc_gate_ops); diff --git a/drivers/clk/sprd/gate.h b/drivers/clk/sprd/gate.h index dc352ea55e1f..b55817869367 100644 --- a/drivers/clk/sprd/gate.h +++ b/drivers/clk/sprd/gate.h @@ -14,37 +14,136 @@ struct sprd_gate { u32 enable_mask; u16 flags; u16 sc_offset; + u16 udelay; struct sprd_clk_common common; }; -#define SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \ - _enable_mask, _flags, _gate_flags, _ops) \ +#define SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay, _ops, _fn) \ struct sprd_gate _struct = { \ .enable_mask = _enable_mask, \ .sc_offset = _sc_offset, \ .flags = _gate_flags, \ + .udelay = _udelay, \ .common = { \ .regmap = NULL, \ .reg = _reg, \ - .hw.init = CLK_HW_INIT(_name, \ - _parent, \ - _ops, \ - _flags), \ + .hw.init = _fn(_name, _parent, \ + _ops, _flags), \ } \ } +#define SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay, _ops) \ + SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay, _ops, CLK_HW_INIT) + +#define SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \ + _enable_mask, _flags, _gate_flags, _ops) \ + SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, 0, _ops) + +#define SPRD_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \ + _enable_mask, _flags, _gate_flags) \ + SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \ + _enable_mask, _flags, _gate_flags, \ + &sprd_sc_gate_ops) + #define SPRD_GATE_CLK(_struct, _name, _parent, _reg, \ _enable_mask, _flags, _gate_flags) \ SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, 0, \ _enable_mask, _flags, _gate_flags, \ &sprd_gate_ops) -#define SPRD_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \ - _enable_mask, _flags, _gate_flags) \ - SPRD_SC_GATE_CLK_OPS(_struct, _name, _parent, _reg, _sc_offset, \ +#define SPRD_PLL_SC_GATE_CLK(_struct, _name, _parent, _reg, _sc_offset, \ _enable_mask, _flags, _gate_flags, \ - &sprd_sc_gate_ops) + _udelay) \ + SPRD_SC_GATE_CLK_OPS_UDELAY(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay, \ + &sprd_pll_sc_gate_ops) + + +#define SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, \ + _flags, _gate_flags, \ + _udelay, _ops) \ + SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay, _ops, \ + CLK_HW_INIT_HW) + +#define SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _ops) \ + SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, \ + _flags, _gate_flags, 0, _ops) + +#define SPRD_SC_GATE_CLK_HW(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags) \ + SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, &sprd_sc_gate_ops) + +#define SPRD_GATE_CLK_HW(_struct, _name, _parent, _reg, \ + _enable_mask, _flags, _gate_flags) \ + SPRD_SC_GATE_CLK_HW_OPS(_struct, _name, _parent, _reg, 0, \ + _enable_mask, _flags, _gate_flags, \ + &sprd_gate_ops) + +#define SPRD_PLL_SC_GATE_CLK_HW(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay) \ + SPRD_SC_GATE_CLK_HW_OPS_UDELAY(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, \ + _flags, _gate_flags, _udelay, \ + &sprd_pll_sc_gate_ops) + +#define SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \ + _reg, _sc_offset, \ + _enable_mask, _flags, \ + _gate_flags, _udelay, _ops) \ + SPRD_SC_GATE_CLK_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay, _ops, \ + CLK_HW_INIT_FW_NAME) + +#define SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _ops) \ + SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \ + _reg, _sc_offset, \ + _enable_mask, _flags, \ + _gate_flags, 0, _ops) + +#define SPRD_SC_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags) \ + SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, &sprd_sc_gate_ops) + +#define SPRD_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \ + _enable_mask, _flags, _gate_flags) \ + SPRD_SC_GATE_CLK_FW_NAME_OPS(_struct, _name, _parent, _reg, 0, \ + _enable_mask, _flags, _gate_flags, \ + &sprd_gate_ops) + +#define SPRD_PLL_SC_GATE_CLK_FW_NAME(_struct, _name, _parent, _reg, \ + _sc_offset, _enable_mask, _flags, \ + _gate_flags, _udelay) \ + SPRD_SC_GATE_CLK_FW_NAME_OPS_UDELAY(_struct, _name, _parent, \ + _reg, _sc_offset, \ + _enable_mask, _flags, \ + _gate_flags, _udelay, \ + &sprd_pll_sc_gate_ops) static inline struct sprd_gate *hw_to_sprd_gate(const struct clk_hw *hw) { @@ -55,5 +154,6 @@ static inline struct sprd_gate *hw_to_sprd_gate(const struct clk_hw *hw) extern const struct clk_ops sprd_gate_ops; extern const struct clk_ops sprd_sc_gate_ops; +extern const struct clk_ops sprd_pll_sc_gate_ops; #endif /* _SPRD_GATE_H_ */ diff --git a/drivers/clk/sprd/mux.h b/drivers/clk/sprd/mux.h index 892e4191cc7f..f3cc31dae06f 100644 --- a/drivers/clk/sprd/mux.h +++ b/drivers/clk/sprd/mux.h @@ -36,26 +36,40 @@ struct sprd_mux { .table = _table, \ } -#define SPRD_MUX_CLK_TABLE(_struct, _name, _parents, _table, \ - _reg, _shift, _width, \ - _flags) \ +#define SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \ + _reg, _shift, _width, _flags, _fn) \ struct sprd_mux _struct = { \ .mux = _SPRD_MUX_CLK(_shift, _width, _table), \ .common = { \ .regmap = NULL, \ .reg = _reg, \ - .hw.init = CLK_HW_INIT_PARENTS(_name, \ - _parents, \ - &sprd_mux_ops, \ - _flags), \ + .hw.init = _fn(_name, _parents, \ + &sprd_mux_ops, _flags), \ } \ } +#define SPRD_MUX_CLK_TABLE(_struct, _name, _parents, _table, \ + _reg, _shift, _width, _flags) \ + SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \ + _reg, _shift, _width, _flags, \ + CLK_HW_INIT_PARENTS) + #define SPRD_MUX_CLK(_struct, _name, _parents, _reg, \ _shift, _width, _flags) \ SPRD_MUX_CLK_TABLE(_struct, _name, _parents, NULL, \ _reg, _shift, _width, _flags) +#define SPRD_MUX_CLK_DATA_TABLE(_struct, _name, _parents, _table, \ + _reg, _shift, _width, _flags) \ + SPRD_MUX_CLK_HW_INIT_FN(_struct, _name, _parents, _table, \ + _reg, _shift, _width, _flags, \ + CLK_HW_INIT_PARENTS_DATA) + +#define SPRD_MUX_CLK_DATA(_struct, _name, _parents, _reg, \ + _shift, _width, _flags) \ + SPRD_MUX_CLK_DATA_TABLE(_struct, _name, _parents, NULL, \ + _reg, _shift, _width, _flags) + static inline struct sprd_mux *hw_to_sprd_mux(const struct clk_hw *hw) { struct sprd_clk_common *common = hw_to_sprd_clk_common(hw); diff --git a/drivers/clk/sprd/pll.c b/drivers/clk/sprd/pll.c index 640270f51aa5..15791484388f 100644 --- a/drivers/clk/sprd/pll.c +++ b/drivers/clk/sprd/pll.c @@ -87,11 +87,12 @@ static u32 pll_get_ibias(u64 rate, const u64 *table) { u32 i, num = table[0]; - for (i = 1; i < num + 1; i++) - if (rate <= table[i]) + /* table[0] indicates the number of items in this table */ + for (i = 0; i < num; i++) + if (rate <= table[i + 1]) break; - return (i == num + 1) ? num : i; + return i == num ? num - 1 : i; } static unsigned long _sprd_pll_recalc_rate(const struct sprd_pll *pll, diff --git a/drivers/clk/sprd/pll.h b/drivers/clk/sprd/pll.h index e95f11e91ffe..6558f50d0296 100644 --- a/drivers/clk/sprd/pll.h +++ b/drivers/clk/sprd/pll.h @@ -61,27 +61,33 @@ struct sprd_pll { struct sprd_clk_common common; }; +#define SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, \ + _regs_num, _itable, _factors, \ + _udelay, _k1, _k2, _fflag, \ + _fvco, _fn) \ + struct sprd_pll _struct = { \ + .regs_num = _regs_num, \ + .itable = _itable, \ + .factors = _factors, \ + .udelay = _udelay, \ + .k1 = _k1, \ + .k2 = _k2, \ + .fflag = _fflag, \ + .fvco = _fvco, \ + .common = { \ + .regmap = NULL, \ + .reg = _reg, \ + .hw.init = _fn(_name, _parent, \ + &sprd_pll_ops, 0),\ + }, \ + } + #define SPRD_PLL_WITH_ITABLE_K_FVCO(_struct, _name, _parent, _reg, \ _regs_num, _itable, _factors, \ _udelay, _k1, _k2, _fflag, _fvco) \ - struct sprd_pll _struct = { \ - .regs_num = _regs_num, \ - .itable = _itable, \ - .factors = _factors, \ - .udelay = _udelay, \ - .k1 = _k1, \ - .k2 = _k2, \ - .fflag = _fflag, \ - .fvco = _fvco, \ - .common = { \ - .regmap = NULL, \ - .reg = _reg, \ - .hw.init = CLK_HW_INIT(_name, \ - _parent, \ - &sprd_pll_ops, \ - 0), \ - }, \ - } + SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \ + _itable, _factors, _udelay, _k1, _k2, \ + _fflag, _fvco, CLK_HW_INIT) #define SPRD_PLL_WITH_ITABLE_K(_struct, _name, _parent, _reg, \ _regs_num, _itable, _factors, \ @@ -96,6 +102,19 @@ struct sprd_pll { _regs_num, _itable, _factors, \ _udelay, 1000, 1000, 0, 0) +#define SPRD_PLL_FW_NAME(_struct, _name, _parent, _reg, _regs_num, \ + _itable, _factors, _udelay, _k1, _k2, \ + _fflag, _fvco) \ + SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \ + _itable, _factors, _udelay, _k1, _k2, \ + _fflag, _fvco, CLK_HW_INIT_FW_NAME) + +#define SPRD_PLL_HW(_struct, _name, _parent, _reg, _regs_num, _itable, \ + _factors, _udelay, _k1, _k2, _fflag, _fvco) \ + SPRD_PLL_HW_INIT_FN(_struct, _name, _parent, _reg, _regs_num, \ + _itable, _factors, _udelay, _k1, _k2, \ + _fflag, _fvco, CLK_HW_INIT_HW) + static inline struct sprd_pll *hw_to_sprd_pll(struct clk_hw *hw) { struct sprd_clk_common *common = hw_to_sprd_clk_common(hw); diff --git a/drivers/clk/sprd/sc9863a-clk.c b/drivers/clk/sprd/sc9863a-clk.c new file mode 100644 index 000000000000..a0631f7756cf --- /dev/null +++ b/drivers/clk/sprd/sc9863a-clk.c @@ -0,0 +1,1772 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Unisoc SC9863A clock driver + * + * Copyright (C) 2019 Unisoc, Inc. + * Author: Chunyan Zhang <chunyan.zhang@unisoc.com> + */ + +#include <linux/clk-provider.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +#include <dt-bindings/clock/sprd,sc9863a-clk.h> + +#include "common.h" +#include "composite.h" +#include "div.h" +#include "gate.h" +#include "mux.h" +#include "pll.h" + +/* mpll*_gate clocks control cpu cores, they were enabled by default */ +SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll0_gate, "mpll0-gate", "ext-26m", 0x94, + 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); +SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll0_gate, "dpll0-gate", "ext-26m", 0x98, + 0x1000, BIT(0), 0, 0, 240); +SPRD_PLL_SC_GATE_CLK_FW_NAME(lpll_gate, "lpll-gate", "ext-26m", 0x9c, + 0x1000, BIT(0), 0, 0, 240); +SPRD_PLL_SC_GATE_CLK_FW_NAME(gpll_gate, "gpll-gate", "ext-26m", 0xa8, + 0x1000, BIT(0), 0, 0, 240); +SPRD_PLL_SC_GATE_CLK_FW_NAME(dpll1_gate, "dpll1-gate", "ext-26m", 0x1dc, + 0x1000, BIT(0), 0, 0, 240); +SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll1_gate, "mpll1-gate", "ext-26m", 0x1e0, + 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); +SPRD_PLL_SC_GATE_CLK_FW_NAME(mpll2_gate, "mpll2-gate", "ext-26m", 0x1e4, + 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0, 240); +SPRD_PLL_SC_GATE_CLK_FW_NAME(isppll_gate, "isppll-gate", "ext-26m", 0x1e8, + 0x1000, BIT(0), 0, 0, 240); + +static struct sprd_clk_common *sc9863a_pmu_gate_clks[] = { + /* address base is 0x402b0000 */ + &mpll0_gate.common, + &dpll0_gate.common, + &lpll_gate.common, + &gpll_gate.common, + &dpll1_gate.common, + &mpll1_gate.common, + &mpll2_gate.common, + &isppll_gate.common, +}; + +static struct clk_hw_onecell_data sc9863a_pmu_gate_hws = { + .hws = { + [CLK_MPLL0_GATE] = &mpll0_gate.common.hw, + [CLK_DPLL0_GATE] = &dpll0_gate.common.hw, + [CLK_LPLL_GATE] = &lpll_gate.common.hw, + [CLK_GPLL_GATE] = &gpll_gate.common.hw, + [CLK_DPLL1_GATE] = &dpll1_gate.common.hw, + [CLK_MPLL1_GATE] = &mpll1_gate.common.hw, + [CLK_MPLL2_GATE] = &mpll2_gate.common.hw, + [CLK_ISPPLL_GATE] = &isppll_gate.common.hw, + }, + .num = CLK_PMU_APB_NUM, +}; + +static const struct sprd_clk_desc sc9863a_pmu_gate_desc = { + .clk_clks = sc9863a_pmu_gate_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_pmu_gate_clks), + .hw_clks = &sc9863a_pmu_gate_hws, +}; + +static const u64 itable[5] = {4, 1000000000, 1200000000, + 1400000000, 1600000000}; + +static const struct clk_bit_field f_twpll[PLL_FACT_MAX] = { + { .shift = 95, .width = 1 }, /* lock_done */ + { .shift = 0, .width = 1 }, /* div_s */ + { .shift = 1, .width = 1 }, /* mod_en */ + { .shift = 2, .width = 1 }, /* sdm_en */ + { .shift = 0, .width = 0 }, /* refin */ + { .shift = 3, .width = 3 }, /* ibias */ + { .shift = 8, .width = 11 }, /* n */ + { .shift = 55, .width = 7 }, /* nint */ + { .shift = 32, .width = 23}, /* kint */ + { .shift = 0, .width = 0 }, /* prediv */ + { .shift = 0, .width = 0 }, /* postdiv */ +}; +static SPRD_PLL_FW_NAME(twpll, "twpll", "ext-26m", 0x4, 3, itable, + f_twpll, 240, 1000, 1000, 0, 0); +static CLK_FIXED_FACTOR_HW(twpll_768m, "twpll-768m", &twpll.common.hw, 2, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_384m, "twpll-384m", &twpll.common.hw, 4, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_192m, "twpll-192m", &twpll.common.hw, 8, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_96m, "twpll-96m", &twpll.common.hw, 16, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_48m, "twpll-48m", &twpll.common.hw, 32, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_24m, "twpll-24m", &twpll.common.hw, 64, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_12m, "twpll-12m", &twpll.common.hw, 128, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_512m, "twpll-512m", &twpll.common.hw, 3, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_256m, "twpll-256m", &twpll.common.hw, 6, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_128m, "twpll-128m", &twpll.common.hw, 12, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_64m, "twpll-64m", &twpll.common.hw, 24, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_307m2, "twpll-307m2", &twpll.common.hw, 5, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_219m4, "twpll-219m4", &twpll.common.hw, 7, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_170m6, "twpll-170m6", &twpll.common.hw, 9, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_153m6, "twpll-153m6", &twpll.common.hw, 10, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_76m8, "twpll-76m8", &twpll.common.hw, 20, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_51m2, "twpll-51m2", &twpll.common.hw, 30, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_38m4, "twpll-38m4", &twpll.common.hw, 40, 1, 0); +static CLK_FIXED_FACTOR_HW(twpll_19m2, "twpll-19m2", &twpll.common.hw, 80, 1, 0); + +static const struct clk_bit_field f_lpll[PLL_FACT_MAX] = { + { .shift = 95, .width = 1 }, /* lock_done */ + { .shift = 0, .width = 1 }, /* div_s */ + { .shift = 1, .width = 1 }, /* mod_en */ + { .shift = 2, .width = 1 }, /* sdm_en */ + { .shift = 0, .width = 0 }, /* refin */ + { .shift = 6, .width = 2 }, /* ibias */ + { .shift = 8, .width = 11 }, /* n */ + { .shift = 55, .width = 7 }, /* nint */ + { .shift = 32, .width = 23}, /* kint */ + { .shift = 0, .width = 0 }, /* prediv */ + { .shift = 0, .width = 0 }, /* postdiv */ +}; +static SPRD_PLL_HW(lpll, "lpll", &lpll_gate.common.hw, 0x20, 3, itable, + f_lpll, 240, 1000, 1000, 0, 0); +static CLK_FIXED_FACTOR_HW(lpll_409m6, "lpll-409m6", &lpll.common.hw, 3, 1, 0); +static CLK_FIXED_FACTOR_HW(lpll_245m76, "lpll-245m76", &lpll.common.hw, 5, 1, 0); + +static const struct clk_bit_field f_gpll[PLL_FACT_MAX] = { + { .shift = 95, .width = 1 }, /* lock_done */ + { .shift = 0, .width = 1 }, /* div_s */ + { .shift = 1, .width = 1 }, /* mod_en */ + { .shift = 2, .width = 1 }, /* sdm_en */ + { .shift = 0, .width = 0 }, /* refin */ + { .shift = 6, .width = 2 }, /* ibias */ + { .shift = 8, .width = 11 }, /* n */ + { .shift = 55, .width = 7 }, /* nint */ + { .shift = 32, .width = 23}, /* kint */ + { .shift = 0, .width = 0 }, /* prediv */ + { .shift = 80, .width = 1 }, /* postdiv */ +}; +static SPRD_PLL_HW(gpll, "gpll", &gpll_gate.common.hw, 0x38, 3, itable, + f_gpll, 240, 1000, 1000, 1, 400000000); + +static SPRD_PLL_HW(isppll, "isppll", &isppll_gate.common.hw, 0x50, 3, itable, + f_gpll, 240, 1000, 1000, 0, 0); +static CLK_FIXED_FACTOR_HW(isppll_468m, "isppll-468m", &isppll.common.hw, 2, 1, 0); + +static struct sprd_clk_common *sc9863a_pll_clks[] = { + /* address base is 0x40353000 */ + &twpll.common, + &lpll.common, + &gpll.common, + &isppll.common, +}; + +static struct clk_hw_onecell_data sc9863a_pll_hws = { + .hws = { + [CLK_TWPLL] = &twpll.common.hw, + [CLK_TWPLL_768M] = &twpll_768m.hw, + [CLK_TWPLL_384M] = &twpll_384m.hw, + [CLK_TWPLL_192M] = &twpll_192m.hw, + [CLK_TWPLL_96M] = &twpll_96m.hw, + [CLK_TWPLL_48M] = &twpll_48m.hw, + [CLK_TWPLL_24M] = &twpll_24m.hw, + [CLK_TWPLL_12M] = &twpll_12m.hw, + [CLK_TWPLL_512M] = &twpll_512m.hw, + [CLK_TWPLL_256M] = &twpll_256m.hw, + [CLK_TWPLL_128M] = &twpll_128m.hw, + [CLK_TWPLL_64M] = &twpll_64m.hw, + [CLK_TWPLL_307M2] = &twpll_307m2.hw, + [CLK_TWPLL_219M4] = &twpll_219m4.hw, + [CLK_TWPLL_170M6] = &twpll_170m6.hw, + [CLK_TWPLL_153M6] = &twpll_153m6.hw, + [CLK_TWPLL_76M8] = &twpll_76m8.hw, + [CLK_TWPLL_51M2] = &twpll_51m2.hw, + [CLK_TWPLL_38M4] = &twpll_38m4.hw, + [CLK_TWPLL_19M2] = &twpll_19m2.hw, + [CLK_LPLL] = &lpll.common.hw, + [CLK_LPLL_409M6] = &lpll_409m6.hw, + [CLK_LPLL_245M76] = &lpll_245m76.hw, + [CLK_GPLL] = &gpll.common.hw, + [CLK_ISPPLL] = &isppll.common.hw, + [CLK_ISPPLL_468M] = &isppll_468m.hw, + + }, + .num = CLK_ANLG_PHY_G1_NUM, +}; + +static const struct sprd_clk_desc sc9863a_pll_desc = { + .clk_clks = sc9863a_pll_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_pll_clks), + .hw_clks = &sc9863a_pll_hws, +}; + +static const u64 itable_mpll[6] = {5, 1000000000, 1200000000, 1400000000, + 1600000000, 1800000000}; +static SPRD_PLL_HW(mpll0, "mpll0", &mpll0_gate.common.hw, 0x0, 3, itable_mpll, + f_gpll, 240, 1000, 1000, 1, 1000000000); +static SPRD_PLL_HW(mpll1, "mpll1", &mpll1_gate.common.hw, 0x18, 3, itable_mpll, + f_gpll, 240, 1000, 1000, 1, 1000000000); +static SPRD_PLL_HW(mpll2, "mpll2", &mpll2_gate.common.hw, 0x30, 3, itable_mpll, + f_gpll, 240, 1000, 1000, 1, 1000000000); +static CLK_FIXED_FACTOR_HW(mpll2_675m, "mpll2-675m", &mpll2.common.hw, 2, 1, 0); + +static struct sprd_clk_common *sc9863a_mpll_clks[] = { + /* address base is 0x40359000 */ + &mpll0.common, + &mpll1.common, + &mpll2.common, +}; + +static struct clk_hw_onecell_data sc9863a_mpll_hws = { + .hws = { + [CLK_MPLL0] = &mpll0.common.hw, + [CLK_MPLL1] = &mpll1.common.hw, + [CLK_MPLL2] = &mpll2.common.hw, + [CLK_MPLL2_675M] = &mpll2_675m.hw, + + }, + .num = CLK_ANLG_PHY_G4_NUM, +}; + +static const struct sprd_clk_desc sc9863a_mpll_desc = { + .clk_clks = sc9863a_mpll_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_mpll_clks), + .hw_clks = &sc9863a_mpll_hws, +}; + +static SPRD_SC_GATE_CLK_FW_NAME(audio_gate, "audio-gate", "ext-26m", + 0x4, 0x1000, BIT(8), 0, 0); + +static SPRD_PLL_FW_NAME(rpll, "rpll", "ext-26m", 0x10, + 3, itable, f_lpll, 240, 1000, 1000, 0, 0); + +static CLK_FIXED_FACTOR_HW(rpll_390m, "rpll-390m", &rpll.common.hw, 2, 1, 0); +static CLK_FIXED_FACTOR_HW(rpll_260m, "rpll-260m", &rpll.common.hw, 3, 1, 0); +static CLK_FIXED_FACTOR_HW(rpll_195m, "rpll-195m", &rpll.common.hw, 4, 1, 0); +static CLK_FIXED_FACTOR_HW(rpll_26m, "rpll-26m", &rpll.common.hw, 30, 1, 0); + +static struct sprd_clk_common *sc9863a_rpll_clks[] = { + /* address base is 0x4035c000 */ + &audio_gate.common, + &rpll.common, +}; + +static struct clk_hw_onecell_data sc9863a_rpll_hws = { + .hws = { + [CLK_AUDIO_GATE] = &audio_gate.common.hw, + [CLK_RPLL] = &rpll.common.hw, + [CLK_RPLL_390M] = &rpll_390m.hw, + [CLK_RPLL_260M] = &rpll_260m.hw, + [CLK_RPLL_195M] = &rpll_195m.hw, + [CLK_RPLL_26M] = &rpll_26m.hw, + }, + .num = CLK_ANLG_PHY_G5_NUM, +}; + +static const struct sprd_clk_desc sc9863a_rpll_desc = { + .clk_clks = sc9863a_rpll_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_rpll_clks), + .hw_clks = &sc9863a_rpll_hws, +}; + +static const u64 itable_dpll[5] = {4, 1211000000, 1320000000, 1570000000, + 1866000000}; +static SPRD_PLL_HW(dpll0, "dpll0", &dpll0_gate.common.hw, 0x0, 3, itable_dpll, + f_lpll, 240, 1000, 1000, 0, 0); +static SPRD_PLL_HW(dpll1, "dpll1", &dpll1_gate.common.hw, 0x18, 3, itable_dpll, + f_lpll, 240, 1000, 1000, 0, 0); + +static CLK_FIXED_FACTOR_HW(dpll0_933m, "dpll0-933m", &dpll0.common.hw, 2, 1, 0); +static CLK_FIXED_FACTOR_HW(dpll0_622m3, "dpll0-622m3", &dpll0.common.hw, 3, 1, 0); +static CLK_FIXED_FACTOR_HW(dpll1_400m, "dpll1-400m", &dpll0.common.hw, 4, 1, 0); +static CLK_FIXED_FACTOR_HW(dpll1_266m7, "dpll1-266m7", &dpll0.common.hw, 6, 1, 0); +static CLK_FIXED_FACTOR_HW(dpll1_123m1, "dpll1-123m1", &dpll0.common.hw, 13, 1, 0); +static CLK_FIXED_FACTOR_HW(dpll1_50m, "dpll1-50m", &dpll0.common.hw, 32, 1, 0); + +static struct sprd_clk_common *sc9863a_dpll_clks[] = { + /* address base is 0x40363000 */ + &dpll0.common, + &dpll1.common, +}; + +static struct clk_hw_onecell_data sc9863a_dpll_hws = { + .hws = { + [CLK_DPLL0] = &dpll0.common.hw, + [CLK_DPLL1] = &dpll1.common.hw, + [CLK_DPLL0_933M] = &dpll0_933m.hw, + [CLK_DPLL0_622M3] = &dpll0_622m3.hw, + [CLK_DPLL0_400M] = &dpll1_400m.hw, + [CLK_DPLL0_266M7] = &dpll1_266m7.hw, + [CLK_DPLL0_123M1] = &dpll1_123m1.hw, + [CLK_DPLL0_50M] = &dpll1_50m.hw, + + }, + .num = CLK_ANLG_PHY_G7_NUM, +}; + +static const struct sprd_clk_desc sc9863a_dpll_desc = { + .clk_clks = sc9863a_dpll_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_dpll_clks), + .hw_clks = &sc9863a_dpll_hws, +}; + +static CLK_FIXED_FACTOR_FW_NAME(clk_6m5, "clk-6m5", "ext-26m", 4, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(clk_4m3, "clk-4m3", "ext-26m", 6, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(clk_2m, "clk-2m", "ext-26m", 13, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(clk_250k, "clk-250k", "ext-26m", 104, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(rco_25m, "rco-25m", "rco-100m", 4, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(rco_4m, "rco-4m", "rco-100m", 25, 1, 0); +static CLK_FIXED_FACTOR_FW_NAME(rco_2m, "rco-2m", "rco-100m", 50, 1, 0); + +#define SC9863A_MUX_FLAG \ + (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT) + +static CLK_FIXED_FACTOR_FW_NAME(clk_13m, "clk-13m", "ext-26m", 2, 1, 0); +static const struct clk_parent_data emc_clk_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_384m.hw }, + { .hw = &twpll_512m.hw }, + { .hw = &twpll_768m.hw }, + { .hw = &twpll.common.hw }, +}; +static SPRD_MUX_CLK_DATA(emc_clk, "emc-clk", emc_clk_parents, 0x220, + 0, 3, SC9863A_MUX_FLAG); + +static const struct clk_parent_data aon_apb_parents[] = { + { .hw = &rco_4m.hw }, + { .hw = &rco_25m.hw }, + { .fw_name = "ext-26m" }, + { .hw = &twpll_96m.hw }, + { .fw_name = "rco-100m" }, + { .hw = &twpll_128m.hw }, +}; +static SPRD_COMP_CLK_DATA(aon_apb, "aon-apb", aon_apb_parents, 0x224, + 0, 3, 8, 2, 0); + +static const struct clk_parent_data adi_parents[] = { + { .hw = &rco_4m.hw }, + { .hw = &rco_25m.hw }, + { .fw_name = "ext-26m" }, + { .hw = &twpll_38m4.hw }, + { .hw = &twpll_51m2.hw }, +}; +static SPRD_MUX_CLK_DATA(adi_clk, "adi-clk", adi_parents, 0x228, + 0, 3, SC9863A_MUX_FLAG); + +static const struct clk_parent_data aux_parents[] = { + { .fw_name = "ext-32k" }, + { .hw = &rpll_26m.hw }, + { .fw_name = "ext-26m" }, +}; +static SPRD_COMP_CLK_DATA(aux0_clk, "aux0-clk", aux_parents, 0x22c, + 0, 5, 8, 4, 0); +static SPRD_COMP_CLK_DATA(aux1_clk, "aux1-clk", aux_parents, 0x230, + 0, 5, 8, 4, 0); +static SPRD_COMP_CLK_DATA(aux2_clk, "aux2-clk", aux_parents, 0x234, + 0, 5, 8, 4, 0); +static SPRD_COMP_CLK_DATA(probe_clk, "probe-clk", aux_parents, 0x238, + 0, 5, 8, 4, 0); + +static const struct clk_parent_data pwm_parents[] = { + { .fw_name = "ext-32k" }, + { .hw = &rpll_26m.hw }, + { .fw_name = "ext-26m" }, + { .hw = &twpll_48m.hw }, +}; +static SPRD_MUX_CLK_DATA(pwm0_clk, "pwm0-clk", pwm_parents, 0x23c, + 0, 2, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(pwm1_clk, "pwm1-clk", pwm_parents, 0x240, + 0, 2, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(pwm2_clk, "pwm2-clk", pwm_parents, 0x244, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data aon_thm_parents[] = { + { .fw_name = "ext-32k" }, + { .hw = &clk_250k.hw }, +}; +static SPRD_MUX_CLK_DATA(aon_thm_clk, "aon-thm-clk", aon_thm_parents, 0x25c, + 0, 1, SC9863A_MUX_FLAG); + +static const struct clk_parent_data audif_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_38m4.hw }, + { .hw = &twpll_51m2.hw }, +}; +static SPRD_MUX_CLK_DATA(audif_clk, "audif-clk", audif_parents, 0x264, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data cpu_dap_parents[] = { + { .hw = &rco_4m.hw }, + { .hw = &rco_25m.hw }, + { .fw_name = "ext-26m" }, + { .hw = &twpll_76m8.hw }, + { .fw_name = "rco-100m" }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_153m6.hw }, +}; +static SPRD_MUX_CLK_DATA(cpu_dap_clk, "cpu-dap-clk", cpu_dap_parents, 0x26c, + 0, 3, SC9863A_MUX_FLAG); + +static const struct clk_parent_data cpu_ts_parents[] = { + { .fw_name = "ext-32k" }, + { .fw_name = "ext-26m" }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_153m6.hw }, +}; +static SPRD_MUX_CLK_DATA(cpu_ts_clk, "cpu-ts-clk", cpu_ts_parents, 0x274, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data djtag_tck_parents[] = { + { .hw = &rco_4m.hw }, + { .fw_name = "ext-26m" }, +}; +static SPRD_MUX_CLK_DATA(djtag_tck_clk, "djtag-tck-clk", djtag_tck_parents, 0x28c, + 0, 1, SC9863A_MUX_FLAG); + +static const struct clk_parent_data emc_ref_parents[] = { + { .hw = &clk_6m5.hw }, + { .hw = &clk_13m.hw }, + { .fw_name = "ext-26m" }, +}; +static SPRD_MUX_CLK_DATA(emc_ref_clk, "emc-ref-clk", emc_ref_parents, 0x29c, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data cssys_parents[] = { + { .hw = &rco_4m.hw }, + { .fw_name = "ext-26m" }, + { .hw = &twpll_96m.hw }, + { .fw_name = "rco-100m" }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_153m6.hw }, + { .hw = &twpll_384m.hw }, + { .hw = &twpll_512m.hw }, + { .hw = &mpll2_675m.hw }, +}; +static SPRD_COMP_CLK_DATA(cssys_clk, "cssys-clk", cssys_parents, 0x2a0, + 0, 4, 8, 2, 0); + +static const struct clk_parent_data aon_pmu_parents[] = { + { .fw_name = "ext-32k" }, + { .hw = &rco_4m.hw }, + { .fw_name = "ext-4m" }, +}; +static SPRD_MUX_CLK_DATA(aon_pmu_clk, "aon-pmu-clk", aon_pmu_parents, 0x2a8, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data pmu_26m_parents[] = { + { .hw = &rco_4m.hw }, + { .hw = &rco_25m.hw }, + { .fw_name = "ext-26m" }, +}; +static SPRD_MUX_CLK_DATA(pmu_26m_clk, "26m-pmu-clk", pmu_26m_parents, 0x2ac, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data aon_tmr_parents[] = { + { .hw = &rco_4m.hw }, + { .fw_name = "ext-26m" }, +}; +static SPRD_MUX_CLK_DATA(aon_tmr_clk, "aon-tmr-clk", aon_tmr_parents, 0x2b0, + 0, 1, SC9863A_MUX_FLAG); + +static const struct clk_parent_data power_cpu_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &rco_25m.hw }, + { .fw_name = "rco-100m" }, + { .hw = &twpll_128m.hw }, +}; +static SPRD_MUX_CLK_DATA(power_cpu_clk, "power-cpu-clk", power_cpu_parents, 0x2c4, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data ap_axi_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_76m8.hw }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_256m.hw }, +}; +static SPRD_MUX_CLK_DATA(ap_axi, "ap-axi", ap_axi_parents, 0x2c8, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data sdio_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_307m2.hw }, + { .hw = &twpll_384m.hw }, + { .hw = &rpll_390m.hw }, + { .hw = &dpll1_400m.hw }, + { .hw = &lpll_409m6.hw }, +}; +static SPRD_MUX_CLK_DATA(sdio0_2x, "sdio0-2x", sdio_parents, 0x2cc, + 0, 3, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(sdio1_2x, "sdio1-2x", sdio_parents, 0x2d4, + 0, 3, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(sdio2_2x, "sdio2-2x", sdio_parents, 0x2dc, + 0, 3, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(emmc_2x, "emmc-2x", sdio_parents, 0x2e4, + 0, 3, SC9863A_MUX_FLAG); + +static const struct clk_parent_data dpu_parents[] = { + { .hw = &twpll_153m6.hw }, + { .hw = &twpll_192m.hw }, + { .hw = &twpll_256m.hw }, + { .hw = &twpll_384m.hw }, +}; +static SPRD_MUX_CLK_DATA(dpu_clk, "dpu", dpu_parents, 0x2f4, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data dpu_dpi_parents[] = { + { .hw = &twpll_128m.hw }, + { .hw = &twpll_153m6.hw }, + { .hw = &twpll_192m.hw }, +}; +static SPRD_COMP_CLK_DATA(dpu_dpi, "dpu-dpi", dpu_dpi_parents, 0x2f8, + 0, 2, 8, 4, 0); + +static const struct clk_parent_data otg_ref_parents[] = { + { .hw = &twpll_12m.hw }, + { .fw_name = "ext-26m" }, +}; +static SPRD_MUX_CLK_DATA(otg_ref_clk, "otg-ref-clk", otg_ref_parents, 0x308, + 0, 1, SC9863A_MUX_FLAG); + +static const struct clk_parent_data sdphy_apb_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_48m.hw }, +}; +static SPRD_MUX_CLK_DATA(sdphy_apb_clk, "sdphy-apb-clk", sdphy_apb_parents, 0x330, + 0, 1, SC9863A_MUX_FLAG); + +static const struct clk_parent_data alg_io_apb_parents[] = { + { .hw = &rco_4m.hw }, + { .fw_name = "ext-26m" }, + { .hw = &twpll_48m.hw }, + { .hw = &twpll_96m.hw }, +}; +static SPRD_MUX_CLK_DATA(alg_io_apb_clk, "alg-io-apb-clk", alg_io_apb_parents, 0x33c, + 0, 1, SC9863A_MUX_FLAG); + +static const struct clk_parent_data gpu_parents[] = { + { .hw = &twpll_153m6.hw }, + { .hw = &twpll_192m.hw }, + { .hw = &twpll_256m.hw }, + { .hw = &twpll_307m2.hw }, + { .hw = &twpll_384m.hw }, + { .hw = &twpll_512m.hw }, + { .hw = &gpll.common.hw }, +}; +static SPRD_COMP_CLK_DATA(gpu_core, "gpu-core", gpu_parents, 0x344, + 0, 3, 8, 2, 0); +static SPRD_COMP_CLK_DATA(gpu_soc, "gpu-soc", gpu_parents, 0x348, + 0, 3, 8, 2, 0); + +static const struct clk_parent_data mm_emc_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_384m.hw }, + { .hw = &isppll_468m.hw }, + { .hw = &twpll_512m.hw }, +}; +static SPRD_MUX_CLK_DATA(mm_emc, "mm-emc", mm_emc_parents, 0x350, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data mm_ahb_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_96m.hw }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_153m6.hw }, +}; +static SPRD_MUX_CLK_DATA(mm_ahb, "mm-ahb", mm_ahb_parents, 0x354, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data bpc_clk_parents[] = { + { .hw = &twpll_192m.hw }, + { .hw = &twpll_307m2.hw }, + { .hw = &twpll_384m.hw }, + { .hw = &isppll_468m.hw }, + { .hw = &dpll0_622m3.hw }, +}; +static SPRD_MUX_CLK_DATA(bpc_clk, "bpc-clk", bpc_clk_parents, 0x358, + 0, 3, SC9863A_MUX_FLAG); + +static const struct clk_parent_data dcam_if_parents[] = { + { .hw = &twpll_192m.hw }, + { .hw = &twpll_256m.hw }, + { .hw = &twpll_307m2.hw }, + { .hw = &twpll_384m.hw }, +}; +static SPRD_MUX_CLK_DATA(dcam_if_clk, "dcam-if-clk", dcam_if_parents, 0x35c, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data isp_parents[] = { + { .hw = &twpll_128m.hw }, + { .hw = &twpll_256m.hw }, + { .hw = &twpll_307m2.hw }, + { .hw = &twpll_384m.hw }, + { .hw = &isppll_468m.hw }, +}; +static SPRD_MUX_CLK_DATA(isp_clk, "isp-clk", isp_parents, 0x360, + 0, 3, SC9863A_MUX_FLAG); + +static const struct clk_parent_data jpg_parents[] = { + { .hw = &twpll_76m8.hw }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_256m.hw }, + { .hw = &twpll_307m2.hw }, +}; +static SPRD_MUX_CLK_DATA(jpg_clk, "jpg-clk", jpg_parents, 0x364, + 0, 2, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(cpp_clk, "cpp-clk", jpg_parents, 0x368, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data sensor_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_48m.hw }, + { .hw = &twpll_76m8.hw }, + { .hw = &twpll_96m.hw }, +}; +static SPRD_COMP_CLK_DATA(sensor0_clk, "sensor0-clk", sensor_parents, 0x36c, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(sensor1_clk, "sensor1-clk", sensor_parents, 0x370, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(sensor2_clk, "sensor2-clk", sensor_parents, 0x374, + 0, 2, 8, 3, 0); + +static const struct clk_parent_data mm_vemc_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_307m2.hw }, + { .hw = &twpll_384m.hw }, + { .hw = &isppll_468m.hw }, +}; +static SPRD_MUX_CLK_DATA(mm_vemc, "mm-vemc", mm_vemc_parents, 0x378, + 0, 2, SC9863A_MUX_FLAG); + +static SPRD_MUX_CLK_DATA(mm_vahb, "mm-vahb", mm_ahb_parents, 0x37c, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data vsp_parents[] = { + { .hw = &twpll_76m8.hw }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_256m.hw }, + { .hw = &twpll_307m2.hw }, + { .hw = &twpll_384m.hw }, +}; +static SPRD_MUX_CLK_DATA(clk_vsp, "vsp-clk", vsp_parents, 0x380, + 0, 3, SC9863A_MUX_FLAG); + +static const struct clk_parent_data core_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_512m.hw }, + { .hw = &twpll_768m.hw }, + { .hw = &lpll.common.hw }, + { .hw = &dpll0.common.hw }, + { .hw = &mpll2.common.hw }, + { .hw = &mpll0.common.hw }, + { .hw = &mpll1.common.hw }, +}; +static SPRD_COMP_CLK_DATA(core0_clk, "core0-clk", core_parents, 0xa20, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(core1_clk, "core1-clk", core_parents, 0xa24, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(core2_clk, "core2-clk", core_parents, 0xa28, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(core3_clk, "core3-clk", core_parents, 0xa2c, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(core4_clk, "core4-clk", core_parents, 0xa30, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(core5_clk, "core5-clk", core_parents, 0xa34, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(core6_clk, "core6-clk", core_parents, 0xa38, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(core7_clk, "core7-clk", core_parents, 0xa3c, + 0, 3, 8, 3, 0); +static SPRD_COMP_CLK_DATA(scu_clk, "scu-clk", core_parents, 0xa40, + 0, 3, 8, 3, 0); + +static SPRD_DIV_CLK_HW(ace_clk, "ace-clk", &scu_clk.common.hw, 0xa44, + 8, 3, 0); +static SPRD_DIV_CLK_HW(axi_periph_clk, "axi-periph-clk", &scu_clk.common.hw, 0xa48, + 8, 3, 0); +static SPRD_DIV_CLK_HW(axi_acp_clk, "axi-acp-clk", &scu_clk.common.hw, 0xa4c, + 8, 3, 0); + +static const struct clk_parent_data atb_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_384m.hw }, + { .hw = &twpll_512m.hw }, + { .hw = &mpll2.common.hw }, +}; +static SPRD_COMP_CLK_DATA(atb_clk, "atb-clk", atb_parents, 0xa50, + 0, 2, 8, 3, 0); +static SPRD_DIV_CLK_HW(debug_apb_clk, "debug-apb-clk", &atb_clk.common.hw, 0xa54, + 8, 3, 0); + +static const struct clk_parent_data gic_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_153m6.hw }, + { .hw = &twpll_384m.hw }, + { .hw = &twpll_512m.hw }, +}; +static SPRD_COMP_CLK_DATA(gic_clk, "gic-clk", gic_parents, 0xa58, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(periph_clk, "periph-clk", gic_parents, 0xa5c, + 0, 2, 8, 3, 0); + +static struct sprd_clk_common *sc9863a_aon_clks[] = { + /* address base is 0x402d0000 */ + &emc_clk.common, + &aon_apb.common, + &adi_clk.common, + &aux0_clk.common, + &aux1_clk.common, + &aux2_clk.common, + &probe_clk.common, + &pwm0_clk.common, + &pwm1_clk.common, + &pwm2_clk.common, + &aon_thm_clk.common, + &audif_clk.common, + &cpu_dap_clk.common, + &cpu_ts_clk.common, + &djtag_tck_clk.common, + &emc_ref_clk.common, + &cssys_clk.common, + &aon_pmu_clk.common, + &pmu_26m_clk.common, + &aon_tmr_clk.common, + &power_cpu_clk.common, + &ap_axi.common, + &sdio0_2x.common, + &sdio1_2x.common, + &sdio2_2x.common, + &emmc_2x.common, + &dpu_clk.common, + &dpu_dpi.common, + &otg_ref_clk.common, + &sdphy_apb_clk.common, + &alg_io_apb_clk.common, + &gpu_core.common, + &gpu_soc.common, + &mm_emc.common, + &mm_ahb.common, + &bpc_clk.common, + &dcam_if_clk.common, + &isp_clk.common, + &jpg_clk.common, + &cpp_clk.common, + &sensor0_clk.common, + &sensor1_clk.common, + &sensor2_clk.common, + &mm_vemc.common, + &mm_vahb.common, + &clk_vsp.common, + &core0_clk.common, + &core1_clk.common, + &core2_clk.common, + &core3_clk.common, + &core4_clk.common, + &core5_clk.common, + &core6_clk.common, + &core7_clk.common, + &scu_clk.common, + &ace_clk.common, + &axi_periph_clk.common, + &axi_acp_clk.common, + &atb_clk.common, + &debug_apb_clk.common, + &gic_clk.common, + &periph_clk.common, +}; + +static struct clk_hw_onecell_data sc9863a_aon_clk_hws = { + .hws = { + [CLK_13M] = &clk_13m.hw, + [CLK_6M5] = &clk_6m5.hw, + [CLK_4M3] = &clk_4m3.hw, + [CLK_2M] = &clk_2m.hw, + [CLK_250K] = &clk_250k.hw, + [CLK_RCO_25M] = &rco_25m.hw, + [CLK_RCO_4M] = &rco_4m.hw, + [CLK_RCO_2M] = &rco_2m.hw, + [CLK_EMC] = &emc_clk.common.hw, + [CLK_AON_APB] = &aon_apb.common.hw, + [CLK_ADI] = &adi_clk.common.hw, + [CLK_AUX0] = &aux0_clk.common.hw, + [CLK_AUX1] = &aux1_clk.common.hw, + [CLK_AUX2] = &aux2_clk.common.hw, + [CLK_PROBE] = &probe_clk.common.hw, + [CLK_PWM0] = &pwm0_clk.common.hw, + [CLK_PWM1] = &pwm1_clk.common.hw, + [CLK_PWM2] = &pwm2_clk.common.hw, + [CLK_AON_THM] = &aon_thm_clk.common.hw, + [CLK_AUDIF] = &audif_clk.common.hw, + [CLK_CPU_DAP] = &cpu_dap_clk.common.hw, + [CLK_CPU_TS] = &cpu_ts_clk.common.hw, + [CLK_DJTAG_TCK] = &djtag_tck_clk.common.hw, + [CLK_EMC_REF] = &emc_ref_clk.common.hw, + [CLK_CSSYS] = &cssys_clk.common.hw, + [CLK_AON_PMU] = &aon_pmu_clk.common.hw, + [CLK_PMU_26M] = &pmu_26m_clk.common.hw, + [CLK_AON_TMR] = &aon_tmr_clk.common.hw, + [CLK_POWER_CPU] = &power_cpu_clk.common.hw, + [CLK_AP_AXI] = &ap_axi.common.hw, + [CLK_SDIO0_2X] = &sdio0_2x.common.hw, + [CLK_SDIO1_2X] = &sdio1_2x.common.hw, + [CLK_SDIO2_2X] = &sdio2_2x.common.hw, + [CLK_EMMC_2X] = &emmc_2x.common.hw, + [CLK_DPU] = &dpu_clk.common.hw, + [CLK_DPU_DPI] = &dpu_dpi.common.hw, + [CLK_OTG_REF] = &otg_ref_clk.common.hw, + [CLK_SDPHY_APB] = &sdphy_apb_clk.common.hw, + [CLK_ALG_IO_APB] = &alg_io_apb_clk.common.hw, + [CLK_GPU_CORE] = &gpu_core.common.hw, + [CLK_GPU_SOC] = &gpu_soc.common.hw, + [CLK_MM_EMC] = &mm_emc.common.hw, + [CLK_MM_AHB] = &mm_ahb.common.hw, + [CLK_BPC] = &bpc_clk.common.hw, + [CLK_DCAM_IF] = &dcam_if_clk.common.hw, + [CLK_ISP] = &isp_clk.common.hw, + [CLK_JPG] = &jpg_clk.common.hw, + [CLK_CPP] = &cpp_clk.common.hw, + [CLK_SENSOR0] = &sensor0_clk.common.hw, + [CLK_SENSOR1] = &sensor1_clk.common.hw, + [CLK_SENSOR2] = &sensor2_clk.common.hw, + [CLK_MM_VEMC] = &mm_vemc.common.hw, + [CLK_MM_VAHB] = &mm_vahb.common.hw, + [CLK_VSP] = &clk_vsp.common.hw, + [CLK_CORE0] = &core0_clk.common.hw, + [CLK_CORE1] = &core1_clk.common.hw, + [CLK_CORE2] = &core2_clk.common.hw, + [CLK_CORE3] = &core3_clk.common.hw, + [CLK_CORE4] = &core4_clk.common.hw, + [CLK_CORE5] = &core5_clk.common.hw, + [CLK_CORE6] = &core6_clk.common.hw, + [CLK_CORE7] = &core7_clk.common.hw, + [CLK_SCU] = &scu_clk.common.hw, + [CLK_ACE] = &ace_clk.common.hw, + [CLK_AXI_PERIPH] = &axi_periph_clk.common.hw, + [CLK_AXI_ACP] = &axi_acp_clk.common.hw, + [CLK_ATB] = &atb_clk.common.hw, + [CLK_DEBUG_APB] = &debug_apb_clk.common.hw, + [CLK_GIC] = &gic_clk.common.hw, + [CLK_PERIPH] = &periph_clk.common.hw, + }, + .num = CLK_AON_CLK_NUM, +}; + +static const struct sprd_clk_desc sc9863a_aon_clk_desc = { + .clk_clks = sc9863a_aon_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_aon_clks), + .hw_clks = &sc9863a_aon_clk_hws, +}; + +static const struct clk_parent_data ap_apb_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_64m.hw }, + { .hw = &twpll_96m.hw }, + { .hw = &twpll_128m.hw }, +}; +static SPRD_MUX_CLK_DATA(ap_apb, "ap-apb", ap_apb_parents, 0x20, + 0, 2, SC9863A_MUX_FLAG); + +static const struct clk_parent_data ap_ce_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_256m.hw }, +}; +static SPRD_COMP_CLK_DATA(ap_ce, "ap-ce", ap_ce_parents, 0x24, + 0, 1, 8, 3, 0); + +static const struct clk_parent_data nandc_ecc_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_256m.hw }, + { .hw = &twpll_307m2.hw }, +}; +static SPRD_COMP_CLK_DATA(nandc_ecc, "nandc-ecc", nandc_ecc_parents, 0x28, + 0, 2, 8, 3, 0); + +static const struct clk_parent_data nandc_26m_parents[] = { + { .fw_name = "ext-32k" }, + { .fw_name = "ext-26m" }, +}; +static SPRD_MUX_CLK_DATA(nandc_26m, "nandc-26m", nandc_26m_parents, 0x2c, + 0, 1, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(emmc_32k, "emmc-32k", nandc_26m_parents, 0x30, + 0, 1, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(sdio0_32k, "sdio0-32k", nandc_26m_parents, 0x34, + 0, 1, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(sdio1_32k, "sdio1-32k", nandc_26m_parents, 0x38, + 0, 1, SC9863A_MUX_FLAG); +static SPRD_MUX_CLK_DATA(sdio2_32k, "sdio2-32k", nandc_26m_parents, 0x3c, + 0, 1, SC9863A_MUX_FLAG); + +static SPRD_GATE_CLK_HW(otg_utmi, "otg-utmi", &aon_apb.common.hw, 0x40, + BIT(16), 0, 0); + +static const struct clk_parent_data ap_uart_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_48m.hw }, + { .hw = &twpll_51m2.hw }, + { .hw = &twpll_96m.hw }, +}; +static SPRD_COMP_CLK_DATA(ap_uart0, "ap-uart0", ap_uart_parents, 0x44, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_uart1, "ap-uart1", ap_uart_parents, 0x48, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_uart2, "ap-uart2", ap_uart_parents, 0x4c, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_uart3, "ap-uart3", ap_uart_parents, 0x50, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_uart4, "ap-uart4", ap_uart_parents, 0x54, + 0, 2, 8, 3, 0); + +static const struct clk_parent_data i2c_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_48m.hw }, + { .hw = &twpll_51m2.hw }, + { .hw = &twpll_153m6.hw }, +}; +static SPRD_COMP_CLK_DATA(ap_i2c0, "ap-i2c0", i2c_parents, 0x58, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_i2c1, "ap-i2c1", i2c_parents, 0x5c, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_i2c2, "ap-i2c2", i2c_parents, 0x60, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_i2c3, "ap-i2c3", i2c_parents, 0x64, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_i2c4, "ap-i2c4", i2c_parents, 0x68, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_i2c5, "ap-i2c5", i2c_parents, 0x6c, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_i2c6, "ap-i2c6", i2c_parents, 0x70, + 0, 2, 8, 3, 0); + +static const struct clk_parent_data spi_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_153m6.hw }, + { .hw = &twpll_192m.hw }, +}; +static SPRD_COMP_CLK_DATA(ap_spi0, "ap-spi0", spi_parents, 0x74, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_spi1, "ap-spi1", spi_parents, 0x78, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_spi2, "ap-spi2", spi_parents, 0x7c, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_spi3, "ap-spi3", spi_parents, 0x80, + 0, 2, 8, 3, 0); + +static const struct clk_parent_data iis_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_128m.hw }, + { .hw = &twpll_153m6.hw }, +}; +static SPRD_COMP_CLK_DATA(ap_iis0, "ap-iis0", iis_parents, 0x84, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_iis1, "ap-iis1", iis_parents, 0x88, + 0, 2, 8, 3, 0); +static SPRD_COMP_CLK_DATA(ap_iis2, "ap-iis2", iis_parents, 0x8c, + 0, 2, 8, 3, 0); + +static const struct clk_parent_data sim0_parents[] = { + { .fw_name = "ext-26m" }, + { .hw = &twpll_51m2.hw }, + { .hw = &twpll_64m.hw }, + { .hw = &twpll_96m.hw }, + { .hw = &twpll_128m.hw }, +}; +static SPRD_COMP_CLK_DATA(sim0, "sim0", sim0_parents, 0x90, + 0, 3, 8, 3, 0); + +static const struct clk_parent_data sim0_32k_parents[] = { + { .fw_name = "ext-32k" }, + { .fw_name = "ext-26m" }, +}; +static SPRD_MUX_CLK_DATA(sim0_32k, "sim0-32k", sim0_32k_parents, 0x94, + 0, 1, SC9863A_MUX_FLAG); + +static struct sprd_clk_common *sc9863a_ap_clks[] = { + /* address base is 0x21500000 */ + &ap_apb.common, + &ap_ce.common, + &nandc_ecc.common, + &nandc_26m.common, + &emmc_32k.common, + &sdio0_32k.common, + &sdio1_32k.common, + &sdio2_32k.common, + &otg_utmi.common, + &ap_uart0.common, + &ap_uart1.common, + &ap_uart2.common, + &ap_uart3.common, + &ap_uart4.common, + &ap_i2c0.common, + &ap_i2c1.common, + &ap_i2c2.common, + &ap_i2c3.common, + &ap_i2c4.common, + &ap_i2c5.common, + &ap_i2c6.common, + &ap_spi0.common, + &ap_spi1.common, + &ap_spi2.common, + &ap_spi3.common, + &ap_iis0.common, + &ap_iis1.common, + &ap_iis2.common, + &sim0.common, + &sim0_32k.common, +}; + +static struct clk_hw_onecell_data sc9863a_ap_clk_hws = { + .hws = { + [CLK_AP_APB] = &ap_apb.common.hw, + [CLK_AP_CE] = &ap_ce.common.hw, + [CLK_NANDC_ECC] = &nandc_ecc.common.hw, + [CLK_NANDC_26M] = &nandc_26m.common.hw, + [CLK_EMMC_32K] = &emmc_32k.common.hw, + [CLK_SDIO0_32K] = &sdio0_32k.common.hw, + [CLK_SDIO1_32K] = &sdio1_32k.common.hw, + [CLK_SDIO2_32K] = &sdio2_32k.common.hw, + [CLK_OTG_UTMI] = &otg_utmi.common.hw, + [CLK_AP_UART0] = &ap_uart0.common.hw, + [CLK_AP_UART1] = &ap_uart1.common.hw, + [CLK_AP_UART2] = &ap_uart2.common.hw, + [CLK_AP_UART3] = &ap_uart3.common.hw, + [CLK_AP_UART4] = &ap_uart4.common.hw, + [CLK_AP_I2C0] = &ap_i2c0.common.hw, + [CLK_AP_I2C1] = &ap_i2c1.common.hw, + [CLK_AP_I2C2] = &ap_i2c2.common.hw, + [CLK_AP_I2C3] = &ap_i2c3.common.hw, + [CLK_AP_I2C4] = &ap_i2c4.common.hw, + [CLK_AP_I2C5] = &ap_i2c5.common.hw, + [CLK_AP_I2C6] = &ap_i2c6.common.hw, + [CLK_AP_SPI0] = &ap_spi0.common.hw, + [CLK_AP_SPI1] = &ap_spi1.common.hw, + [CLK_AP_SPI2] = &ap_spi2.common.hw, + [CLK_AP_SPI3] = &ap_spi3.common.hw, + [CLK_AP_IIS0] = &ap_iis0.common.hw, + [CLK_AP_IIS1] = &ap_iis1.common.hw, + [CLK_AP_IIS2] = &ap_iis2.common.hw, + [CLK_SIM0] = &sim0.common.hw, + [CLK_SIM0_32K] = &sim0_32k.common.hw, + }, + .num = CLK_AP_CLK_NUM, +}; + +static const struct sprd_clk_desc sc9863a_ap_clk_desc = { + .clk_clks = sc9863a_ap_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_ap_clks), + .hw_clks = &sc9863a_ap_clk_hws, +}; + +static SPRD_SC_GATE_CLK_HW(otg_eb, "otg-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(4), 0, 0); +static SPRD_SC_GATE_CLK_HW(dma_eb, "dma-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(5), 0, 0); +static SPRD_SC_GATE_CLK_HW(ce_eb, "ce-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(6), 0, 0); +static SPRD_SC_GATE_CLK_HW(nandc_eb, "nandc-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(7), 0, 0); +static SPRD_SC_GATE_CLK_HW(sdio0_eb, "sdio0-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(8), 0, 0); +static SPRD_SC_GATE_CLK_HW(sdio1_eb, "sdio1-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(9), 0, 0); +static SPRD_SC_GATE_CLK_HW(sdio2_eb, "sdio2-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(10), 0, 0); +static SPRD_SC_GATE_CLK_HW(emmc_eb, "emmc-eb", &ap_axi.common.hw, 0x0, 0x1000, + BIT(11), 0, 0); +static SPRD_SC_GATE_CLK_HW(emmc_32k_eb, "emmc-32k-eb", &ap_axi.common.hw, 0x0, + 0x1000, BIT(27), 0, 0); +static SPRD_SC_GATE_CLK_HW(sdio0_32k_eb, "sdio0-32k-eb", &ap_axi.common.hw, 0x0, + 0x1000, BIT(28), 0, 0); +static SPRD_SC_GATE_CLK_HW(sdio1_32k_eb, "sdio1-32k-eb", &ap_axi.common.hw, 0x0, + 0x1000, BIT(29), 0, 0); +static SPRD_SC_GATE_CLK_HW(sdio2_32k_eb, "sdio2-32k-eb", &ap_axi.common.hw, 0x0, + 0x1000, BIT(30), 0, 0); +static SPRD_SC_GATE_CLK_HW(nandc_26m_eb, "nandc-26m-eb", &ap_axi.common.hw, 0x0, + 0x1000, BIT(31), 0, 0); +static SPRD_SC_GATE_CLK_HW(dma_eb2, "dma-eb2", &ap_axi.common.hw, 0x18, + 0x1000, BIT(0), 0, 0); +static SPRD_SC_GATE_CLK_HW(ce_eb2, "ce-eb2", &ap_axi.common.hw, 0x18, + 0x1000, BIT(1), 0, 0); + +static struct sprd_clk_common *sc9863a_apahb_gate_clks[] = { + /* address base is 0x20e00000 */ + &otg_eb.common, + &dma_eb.common, + &ce_eb.common, + &nandc_eb.common, + &sdio0_eb.common, + &sdio1_eb.common, + &sdio2_eb.common, + &emmc_eb.common, + &emmc_32k_eb.common, + &sdio0_32k_eb.common, + &sdio1_32k_eb.common, + &sdio2_32k_eb.common, + &nandc_26m_eb.common, + &dma_eb2.common, + &ce_eb2.common, +}; + +static struct clk_hw_onecell_data sc9863a_apahb_gate_hws = { + .hws = { + [CLK_OTG_EB] = &otg_eb.common.hw, + [CLK_DMA_EB] = &dma_eb.common.hw, + [CLK_CE_EB] = &ce_eb.common.hw, + [CLK_NANDC_EB] = &nandc_eb.common.hw, + [CLK_SDIO0_EB] = &sdio0_eb.common.hw, + [CLK_SDIO1_EB] = &sdio1_eb.common.hw, + [CLK_SDIO2_EB] = &sdio2_eb.common.hw, + [CLK_EMMC_EB] = &emmc_eb.common.hw, + [CLK_EMMC_32K_EB] = &emmc_32k_eb.common.hw, + [CLK_SDIO0_32K_EB] = &sdio0_32k_eb.common.hw, + [CLK_SDIO1_32K_EB] = &sdio1_32k_eb.common.hw, + [CLK_SDIO2_32K_EB] = &sdio2_32k_eb.common.hw, + [CLK_NANDC_26M_EB] = &nandc_26m_eb.common.hw, + [CLK_DMA_EB2] = &dma_eb2.common.hw, + [CLK_CE_EB2] = &ce_eb2.common.hw, + }, + .num = CLK_AP_AHB_GATE_NUM, +}; + +static const struct sprd_clk_desc sc9863a_apahb_gate_desc = { + .clk_clks = sc9863a_apahb_gate_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_apahb_gate_clks), + .hw_clks = &sc9863a_apahb_gate_hws, +}; + +/* aon gate clocks */ +static SPRD_SC_GATE_CLK_HW(gpio_eb, "gpio-eb", &aon_apb.common.hw, + 0x0, 0x1000, BIT(3), 0, 0); +static SPRD_SC_GATE_CLK_HW(pwm0_eb, "pwm0-eb", &aon_apb.common.hw, + 0x0, 0x1000, BIT(4), 0, 0); +static SPRD_SC_GATE_CLK_HW(pwm1_eb, "pwm1-eb", &aon_apb.common.hw, + 0x0, 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(pwm2_eb, "pwm2-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(6), 0, 0); +static SPRD_SC_GATE_CLK_HW(pwm3_eb, "pwm3-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(7), 0, 0); +static SPRD_SC_GATE_CLK_HW(kpd_eb, "kpd-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(8), 0, 0); +static SPRD_SC_GATE_CLK_HW(aon_syst_eb, "aon-syst-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_syst_eb, "ap-syst-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(aon_tmr_eb, "aon-tmr-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(efuse_eb, "efuse-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(13), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(eic_eb, "eic-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(14), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(intc_eb, "intc-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(adi_eb, "adi-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(audif_eb, "audif-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(17), 0, 0); +static SPRD_SC_GATE_CLK_HW(aud_eb, "aud-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(18), 0, 0); +static SPRD_SC_GATE_CLK_HW(vbc_eb, "vbc-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(19), 0, 0); +static SPRD_SC_GATE_CLK_HW(pin_eb, "pin-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_wdg_eb, "ap-wdg-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(24), 0, 0); +static SPRD_SC_GATE_CLK_HW(mm_eb, "mm-eb", &aon_apb.common.hw, 0x0, + 0x1000, BIT(25), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(aon_apb_ckg_eb, "aon-apb-ckg-eb", &aon_apb.common.hw, + 0x0, 0x1000, BIT(26), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ca53_ts0_eb, "ca53-ts0-eb", &aon_apb.common.hw, + 0x0, 0x1000, BIT(28), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ca53_ts1_eb, "ca53-ts1-eb", &aon_apb.common.hw, + 0x0, 0x1000, BIT(29), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ca53_dap_eb, "ca53-dap-eb", &aon_apb.common.hw, + 0x0, 0x1000, BIT(30), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(pmu_eb, "pmu-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(thm_eb, "thm-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(1), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(aux0_eb, "aux0-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(aux1_eb, "aux1-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(3), 0, 0); +static SPRD_SC_GATE_CLK_HW(aux2_eb, "aux2-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(probe_eb, "probe-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(5), 0, 0); +static SPRD_SC_GATE_CLK_HW(emc_ref_eb, "emc-ref-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ca53_wdg_eb, "ca53-wdg-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_tmr1_eb, "ap-tmr1-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(9), 0, 0); +static SPRD_SC_GATE_CLK_HW(ap_tmr2_eb, "ap-tmr2-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(10), 0, 0); +static SPRD_SC_GATE_CLK_HW(disp_emc_eb, "disp-emc-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(11), 0, 0); +static SPRD_SC_GATE_CLK_HW(zip_emc_eb, "zip-emc-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(12), 0, 0); +static SPRD_SC_GATE_CLK_HW(gsp_emc_eb, "gsp-emc-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(13), 0, 0); +static SPRD_SC_GATE_CLK_HW(mm_vsp_eb, "mm-vsp-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(14), 0, 0); +static SPRD_SC_GATE_CLK_HW(mdar_eb, "mdar-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(17), 0, 0); +static SPRD_SC_GATE_CLK_HW(rtc4m0_cal_eb, "rtc4m0-cal-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(18), 0, 0); +static SPRD_SC_GATE_CLK_HW(rtc4m1_cal_eb, "rtc4m1-cal-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(19), 0, 0); +static SPRD_SC_GATE_CLK_HW(djtag_eb, "djtag-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(20), 0, 0); +static SPRD_SC_GATE_CLK_HW(mbox_eb, "mbox-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(21), 0, 0); +static SPRD_SC_GATE_CLK_HW(aon_dma_eb, "aon-dma-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(22), 0, 0); +static SPRD_SC_GATE_CLK_HW(aon_apb_def_eb, "aon-apb-def-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(25), 0, 0); +static SPRD_SC_GATE_CLK_HW(ca5_ts0_eb, "ca5-ts0-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(26), 0, 0); +static SPRD_SC_GATE_CLK_HW(dbg_eb, "dbg-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(28), 0, 0); +static SPRD_SC_GATE_CLK_HW(dbg_emc_eb, "dbg-emc-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(29), 0, 0); +static SPRD_SC_GATE_CLK_HW(cross_trig_eb, "cross-trig-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(30), 0, 0); +static SPRD_SC_GATE_CLK_HW(serdes_dphy_eb, "serdes-dphy-eb", &aon_apb.common.hw, + 0x4, 0x1000, BIT(31), 0, 0); +static SPRD_SC_GATE_CLK_HW(arch_rtc_eb, "arch-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(0), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(kpd_rtc_eb, "kpd-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(1), 0, 0); +static SPRD_SC_GATE_CLK_HW(aon_syst_rtc_eb, "aon-syst-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(2), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_syst_rtc_eb, "ap-syst-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(3), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(aon_tmr_rtc_eb, "aon-tmr-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(4), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_tmr0_rtc_eb, "ap-tmr0-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(5), 0, 0); +static SPRD_SC_GATE_CLK_HW(eic_rtc_eb, "eic-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(6), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(eic_rtcdv5_eb, "eic-rtcdv5-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(7), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_wdg_rtc_eb, "ap-wdg-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(8), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ca53_wdg_rtc_eb, "ca53-wdg-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(9), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(thm_rtc_eb, "thm-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(10), 0, 0); +static SPRD_SC_GATE_CLK_HW(athma_rtc_eb, "athma-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(11), 0, 0); +static SPRD_SC_GATE_CLK_HW(gthma_rtc_eb, "gthma-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(12), 0, 0); +static SPRD_SC_GATE_CLK_HW(athma_rtc_a_eb, "athma-rtc-a-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(13), 0, 0); +static SPRD_SC_GATE_CLK_HW(gthma_rtc_a_eb, "gthma-rtc-a-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(14), 0, 0); +static SPRD_SC_GATE_CLK_HW(ap_tmr1_rtc_eb, "ap-tmr1-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(15), 0, 0); +static SPRD_SC_GATE_CLK_HW(ap_tmr2_rtc_eb, "ap-tmr2-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(16), 0, 0); +static SPRD_SC_GATE_CLK_HW(dxco_lc_rtc_eb, "dxco-lc-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(17), 0, 0); +static SPRD_SC_GATE_CLK_HW(bb_cal_rtc_eb, "bb-cal-rtc-eb", &aon_apb.common.hw, + 0x10, 0x1000, BIT(18), 0, 0); +static SPRD_SC_GATE_CLK_HW(gpu_eb, "gpu-eb", &aon_apb.common.hw, 0x50, + 0x1000, BIT(0), 0, 0); +static SPRD_SC_GATE_CLK_HW(disp_eb, "disp-eb", &aon_apb.common.hw, 0x50, + 0x1000, BIT(2), 0, 0); +static SPRD_SC_GATE_CLK_HW(mm_emc_eb, "mm-emc-eb", &aon_apb.common.hw, 0x50, + 0x1000, BIT(3), 0, 0); +static SPRD_SC_GATE_CLK_HW(power_cpu_eb, "power-cpu-eb", &aon_apb.common.hw, 0x50, + 0x1000, BIT(10), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(hw_i2c_eb, "hw-i2c-eb", &aon_apb.common.hw, 0x50, + 0x1000, BIT(11), 0, 0); +static SPRD_SC_GATE_CLK_HW(mm_vsp_emc_eb, "mm-vsp-emc-eb", &aon_apb.common.hw, 0x50, + 0x1000, BIT(14), 0, 0); +static SPRD_SC_GATE_CLK_HW(vsp_eb, "vsp-eb", &aon_apb.common.hw, 0x50, + 0x1000, BIT(16), 0, 0); +static SPRD_SC_GATE_CLK_HW(cssys_eb, "cssys-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(4), 0, 0); +static SPRD_SC_GATE_CLK_HW(dmc_eb, "dmc-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(5), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(rosc_eb, "rosc-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(7), 0, 0); +static SPRD_SC_GATE_CLK_HW(s_d_cfg_eb, "s-d-cfg-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(8), 0, 0); +static SPRD_SC_GATE_CLK_HW(s_d_ref_eb, "s-d-ref-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(9), 0, 0); +static SPRD_SC_GATE_CLK_HW(b_dma_eb, "b-dma-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(10), 0, 0); +static SPRD_SC_GATE_CLK_HW(anlg_eb, "anlg-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(11), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(anlg_apb_eb, "anlg-apb-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(13), 0, 0); +static SPRD_SC_GATE_CLK_HW(bsmtmr_eb, "bsmtmr-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(14), 0, 0); +static SPRD_SC_GATE_CLK_HW(ap_axi_eb, "ap-axi-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(15), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_intc0_eb, "ap-intc0-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(16), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_intc1_eb, "ap-intc1-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(17), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_intc2_eb, "ap-intc2-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(18), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_intc3_eb, "ap-intc3-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(19), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_intc4_eb, "ap-intc4-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(20), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(ap_intc5_eb, "ap-intc5-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(21), CLK_IGNORE_UNUSED, 0); +static SPRD_SC_GATE_CLK_HW(scc_eb, "scc-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(22), 0, 0); +static SPRD_SC_GATE_CLK_HW(dphy_cfg_eb, "dphy-cfg-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(23), 0, 0); +static SPRD_SC_GATE_CLK_HW(dphy_ref_eb, "dphy-ref-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(24), 0, 0); +static SPRD_SC_GATE_CLK_HW(cphy_cfg_eb, "cphy-cfg-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(25), 0, 0); +static SPRD_SC_GATE_CLK_HW(otg_ref_eb, "otg-ref-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(26), 0, 0); +static SPRD_SC_GATE_CLK_HW(serdes_eb, "serdes-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(27), 0, 0); +static SPRD_SC_GATE_CLK_HW(aon_ap_emc_eb, "aon-ap-emc-eb", &aon_apb.common.hw, 0xb0, + 0x1000, BIT(28), 0, 0); +static struct sprd_clk_common *sc9863a_aonapb_gate_clks[] = { + /* address base is 0x402e0000 */ + &gpio_eb.common, + &pwm0_eb.common, + &pwm1_eb.common, + &pwm2_eb.common, + &pwm3_eb.common, + &kpd_eb.common, + &aon_syst_eb.common, + &ap_syst_eb.common, + &aon_tmr_eb.common, + &efuse_eb.common, + &eic_eb.common, + &intc_eb.common, + &adi_eb.common, + &audif_eb.common, + &aud_eb.common, + &vbc_eb.common, + &pin_eb.common, + &ap_wdg_eb.common, + &mm_eb.common, + &aon_apb_ckg_eb.common, + &ca53_ts0_eb.common, + &ca53_ts1_eb.common, + &ca53_dap_eb.common, + &pmu_eb.common, + &thm_eb.common, + &aux0_eb.common, + &aux1_eb.common, + &aux2_eb.common, + &probe_eb.common, + &emc_ref_eb.common, + &ca53_wdg_eb.common, + &ap_tmr1_eb.common, + &ap_tmr2_eb.common, + &disp_emc_eb.common, + &zip_emc_eb.common, + &gsp_emc_eb.common, + &mm_vsp_eb.common, + &mdar_eb.common, + &rtc4m0_cal_eb.common, + &rtc4m1_cal_eb.common, + &djtag_eb.common, + &mbox_eb.common, + &aon_dma_eb.common, + &aon_apb_def_eb.common, + &ca5_ts0_eb.common, + &dbg_eb.common, + &dbg_emc_eb.common, + &cross_trig_eb.common, + &serdes_dphy_eb.common, + &arch_rtc_eb.common, + &kpd_rtc_eb.common, + &aon_syst_rtc_eb.common, + &ap_syst_rtc_eb.common, + &aon_tmr_rtc_eb.common, + &ap_tmr0_rtc_eb.common, + &eic_rtc_eb.common, + &eic_rtcdv5_eb.common, + &ap_wdg_rtc_eb.common, + &ca53_wdg_rtc_eb.common, + &thm_rtc_eb.common, + &athma_rtc_eb.common, + >hma_rtc_eb.common, + &athma_rtc_a_eb.common, + >hma_rtc_a_eb.common, + &ap_tmr1_rtc_eb.common, + &ap_tmr2_rtc_eb.common, + &dxco_lc_rtc_eb.common, + &bb_cal_rtc_eb.common, + &gpu_eb.common, + &disp_eb.common, + &mm_emc_eb.common, + &power_cpu_eb.common, + &hw_i2c_eb.common, + &mm_vsp_emc_eb.common, + &vsp_eb.common, + &cssys_eb.common, + &dmc_eb.common, + &rosc_eb.common, + &s_d_cfg_eb.common, + &s_d_ref_eb.common, + &b_dma_eb.common, + &anlg_eb.common, + &anlg_apb_eb.common, + &bsmtmr_eb.common, + &ap_axi_eb.common, + &ap_intc0_eb.common, + &ap_intc1_eb.common, + &ap_intc2_eb.common, + &ap_intc3_eb.common, + &ap_intc4_eb.common, + &ap_intc5_eb.common, + &scc_eb.common, + &dphy_cfg_eb.common, + &dphy_ref_eb.common, + &cphy_cfg_eb.common, + &otg_ref_eb.common, + &serdes_eb.common, + &aon_ap_emc_eb.common, +}; + +static struct clk_hw_onecell_data sc9863a_aonapb_gate_hws = { + .hws = { + [CLK_GPIO_EB] = &gpio_eb.common.hw, + [CLK_PWM0_EB] = &pwm0_eb.common.hw, + [CLK_PWM1_EB] = &pwm1_eb.common.hw, + [CLK_PWM2_EB] = &pwm2_eb.common.hw, + [CLK_PWM3_EB] = &pwm3_eb.common.hw, + [CLK_KPD_EB] = &kpd_eb.common.hw, + [CLK_AON_SYST_EB] = &aon_syst_eb.common.hw, + [CLK_AP_SYST_EB] = &ap_syst_eb.common.hw, + [CLK_AON_TMR_EB] = &aon_tmr_eb.common.hw, + [CLK_EFUSE_EB] = &efuse_eb.common.hw, + [CLK_EIC_EB] = &eic_eb.common.hw, + [CLK_INTC_EB] = &intc_eb.common.hw, + [CLK_ADI_EB] = &adi_eb.common.hw, + [CLK_AUDIF_EB] = &audif_eb.common.hw, + [CLK_AUD_EB] = &aud_eb.common.hw, + [CLK_VBC_EB] = &vbc_eb.common.hw, + [CLK_PIN_EB] = &pin_eb.common.hw, + [CLK_AP_WDG_EB] = &ap_wdg_eb.common.hw, + [CLK_MM_EB] = &mm_eb.common.hw, + [CLK_AON_APB_CKG_EB] = &aon_apb_ckg_eb.common.hw, + [CLK_CA53_TS0_EB] = &ca53_ts0_eb.common.hw, + [CLK_CA53_TS1_EB] = &ca53_ts1_eb.common.hw, + [CLK_CS53_DAP_EB] = &ca53_dap_eb.common.hw, + [CLK_PMU_EB] = &pmu_eb.common.hw, + [CLK_THM_EB] = &thm_eb.common.hw, + [CLK_AUX0_EB] = &aux0_eb.common.hw, + [CLK_AUX1_EB] = &aux1_eb.common.hw, + [CLK_AUX2_EB] = &aux2_eb.common.hw, + [CLK_PROBE_EB] = &probe_eb.common.hw, + [CLK_EMC_REF_EB] = &emc_ref_eb.common.hw, + [CLK_CA53_WDG_EB] = &ca53_wdg_eb.common.hw, + [CLK_AP_TMR1_EB] = &ap_tmr1_eb.common.hw, + [CLK_AP_TMR2_EB] = &ap_tmr2_eb.common.hw, + [CLK_DISP_EMC_EB] = &disp_emc_eb.common.hw, + [CLK_ZIP_EMC_EB] = &zip_emc_eb.common.hw, + [CLK_GSP_EMC_EB] = &gsp_emc_eb.common.hw, + [CLK_MM_VSP_EB] = &mm_vsp_eb.common.hw, + [CLK_MDAR_EB] = &mdar_eb.common.hw, + [CLK_RTC4M0_CAL_EB] = &rtc4m0_cal_eb.common.hw, + [CLK_RTC4M1_CAL_EB] = &rtc4m1_cal_eb.common.hw, + [CLK_DJTAG_EB] = &djtag_eb.common.hw, + [CLK_MBOX_EB] = &mbox_eb.common.hw, + [CLK_AON_DMA_EB] = &aon_dma_eb.common.hw, + [CLK_AON_APB_DEF_EB] = &aon_apb_def_eb.common.hw, + [CLK_CA5_TS0_EB] = &ca5_ts0_eb.common.hw, + [CLK_DBG_EB] = &dbg_eb.common.hw, + [CLK_DBG_EMC_EB] = &dbg_emc_eb.common.hw, + [CLK_CROSS_TRIG_EB] = &cross_trig_eb.common.hw, + [CLK_SERDES_DPHY_EB] = &serdes_dphy_eb.common.hw, + [CLK_ARCH_RTC_EB] = &arch_rtc_eb.common.hw, + [CLK_KPD_RTC_EB] = &kpd_rtc_eb.common.hw, + [CLK_AON_SYST_RTC_EB] = &aon_syst_rtc_eb.common.hw, + [CLK_AP_SYST_RTC_EB] = &ap_syst_rtc_eb.common.hw, + [CLK_AON_TMR_RTC_EB] = &aon_tmr_rtc_eb.common.hw, + [CLK_AP_TMR0_RTC_EB] = &ap_tmr0_rtc_eb.common.hw, + [CLK_EIC_RTC_EB] = &eic_rtc_eb.common.hw, + [CLK_EIC_RTCDV5_EB] = &eic_rtcdv5_eb.common.hw, + [CLK_AP_WDG_RTC_EB] = &ap_wdg_rtc_eb.common.hw, + [CLK_CA53_WDG_RTC_EB] = &ca53_wdg_rtc_eb.common.hw, + [CLK_THM_RTC_EB] = &thm_rtc_eb.common.hw, + [CLK_ATHMA_RTC_EB] = &athma_rtc_eb.common.hw, + [CLK_GTHMA_RTC_EB] = >hma_rtc_eb.common.hw, + [CLK_ATHMA_RTC_A_EB] = &athma_rtc_a_eb.common.hw, + [CLK_GTHMA_RTC_A_EB] = >hma_rtc_a_eb.common.hw, + [CLK_AP_TMR1_RTC_EB] = &ap_tmr1_rtc_eb.common.hw, + [CLK_AP_TMR2_RTC_EB] = &ap_tmr2_rtc_eb.common.hw, + [CLK_DXCO_LC_RTC_EB] = &dxco_lc_rtc_eb.common.hw, + [CLK_BB_CAL_RTC_EB] = &bb_cal_rtc_eb.common.hw, + [CLK_GNU_EB] = &gpu_eb.common.hw, + [CLK_DISP_EB] = &disp_eb.common.hw, + [CLK_MM_EMC_EB] = &mm_emc_eb.common.hw, + [CLK_POWER_CPU_EB] = &power_cpu_eb.common.hw, + [CLK_HW_I2C_EB] = &hw_i2c_eb.common.hw, + [CLK_MM_VSP_EMC_EB] = &mm_vsp_emc_eb.common.hw, + [CLK_VSP_EB] = &vsp_eb.common.hw, + [CLK_CSSYS_EB] = &cssys_eb.common.hw, + [CLK_DMC_EB] = &dmc_eb.common.hw, + [CLK_ROSC_EB] = &rosc_eb.common.hw, + [CLK_S_D_CFG_EB] = &s_d_cfg_eb.common.hw, + [CLK_S_D_REF_EB] = &s_d_ref_eb.common.hw, + [CLK_B_DMA_EB] = &b_dma_eb.common.hw, + [CLK_ANLG_EB] = &anlg_eb.common.hw, + [CLK_ANLG_APB_EB] = &anlg_apb_eb.common.hw, + [CLK_BSMTMR_EB] = &bsmtmr_eb.common.hw, + [CLK_AP_AXI_EB] = &ap_axi_eb.common.hw, + [CLK_AP_INTC0_EB] = &ap_intc0_eb.common.hw, + [CLK_AP_INTC1_EB] = &ap_intc1_eb.common.hw, + [CLK_AP_INTC2_EB] = &ap_intc2_eb.common.hw, + [CLK_AP_INTC3_EB] = &ap_intc3_eb.common.hw, + [CLK_AP_INTC4_EB] = &ap_intc4_eb.common.hw, + [CLK_AP_INTC5_EB] = &ap_intc5_eb.common.hw, + [CLK_SCC_EB] = &scc_eb.common.hw, + [CLK_DPHY_CFG_EB] = &dphy_cfg_eb.common.hw, + [CLK_DPHY_REF_EB] = &dphy_ref_eb.common.hw, + [CLK_CPHY_CFG_EB] = &cphy_cfg_eb.common.hw, + [CLK_OTG_REF_EB] = &otg_ref_eb.common.hw, + [CLK_SERDES_EB] = &serdes_eb.common.hw, + [CLK_AON_AP_EMC_EB] = &aon_ap_emc_eb.common.hw, + }, + .num = CLK_AON_APB_GATE_NUM, +}; + +static const struct sprd_clk_desc sc9863a_aonapb_gate_desc = { + .clk_clks = sc9863a_aonapb_gate_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_aonapb_gate_clks), + .hw_clks = &sc9863a_aonapb_gate_hws, +}; + +/* mm gate clocks */ +static SPRD_SC_GATE_CLK_HW(mahb_ckg_eb, "mahb-ckg-eb", &mm_ahb.common.hw, 0x0, 0x1000, + BIT(0), 0, 0); +static SPRD_SC_GATE_CLK_HW(mdcam_eb, "mdcam-eb", &mm_ahb.common.hw, 0x0, 0x1000, + BIT(1), 0, 0); +static SPRD_SC_GATE_CLK_HW(misp_eb, "misp-eb", &mm_ahb.common.hw, 0x0, 0x1000, + BIT(2), 0, 0); +static SPRD_SC_GATE_CLK_HW(mahbcsi_eb, "mahbcsi-eb", &mm_ahb.common.hw, 0x0, 0x1000, + BIT(3), 0, 0); +static SPRD_SC_GATE_CLK_HW(mcsi_s_eb, "mcsi-s-eb", &mm_ahb.common.hw, 0x0, 0x1000, + BIT(4), 0, 0); +static SPRD_SC_GATE_CLK_HW(mcsi_t_eb, "mcsi-t-eb", &mm_ahb.common.hw, 0x0, 0x1000, + BIT(5), 0, 0); +static SPRD_GATE_CLK_HW(dcam_axi_eb, "dcam-axi-eb", &mm_ahb.common.hw, 0x8, + BIT(0), 0, 0); +static SPRD_GATE_CLK_HW(isp_axi_eb, "isp-axi-eb", &mm_ahb.common.hw, 0x8, + BIT(1), 0, 0); +static SPRD_GATE_CLK_HW(mcsi_eb, "mcsi-eb", &mm_ahb.common.hw, 0x8, + BIT(2), 0, 0); +static SPRD_GATE_CLK_HW(mcsi_s_ckg_eb, "mcsi-s-ckg-eb", &mm_ahb.common.hw, 0x8, + BIT(3), 0, 0); +static SPRD_GATE_CLK_HW(mcsi_t_ckg_eb, "mcsi-t-ckg-eb", &mm_ahb.common.hw, 0x8, + BIT(4), 0, 0); +static SPRD_GATE_CLK_HW(sensor0_eb, "sensor0-eb", &mm_ahb.common.hw, 0x8, + BIT(5), 0, 0); +static SPRD_GATE_CLK_HW(sensor1_eb, "sensor1-eb", &mm_ahb.common.hw, 0x8, + BIT(6), 0, 0); +static SPRD_GATE_CLK_HW(sensor2_eb, "sensor2-eb", &mm_ahb.common.hw, 0x8, + BIT(7), 0, 0); +static SPRD_GATE_CLK_HW(mcphy_cfg_eb, "mcphy-cfg-eb", &mm_ahb.common.hw, 0x8, + BIT(8), 0, 0); + +static struct sprd_clk_common *sc9863a_mm_gate_clks[] = { + /* address base is 0x60800000 */ + &mahb_ckg_eb.common, + &mdcam_eb.common, + &misp_eb.common, + &mahbcsi_eb.common, + &mcsi_s_eb.common, + &mcsi_t_eb.common, + &dcam_axi_eb.common, + &isp_axi_eb.common, + &mcsi_eb.common, + &mcsi_s_ckg_eb.common, + &mcsi_t_ckg_eb.common, + &sensor0_eb.common, + &sensor1_eb.common, + &sensor2_eb.common, + &mcphy_cfg_eb.common, +}; + +static struct clk_hw_onecell_data sc9863a_mm_gate_hws = { + .hws = { + [CLK_MAHB_CKG_EB] = &mahb_ckg_eb.common.hw, + [CLK_MDCAM_EB] = &mdcam_eb.common.hw, + [CLK_MISP_EB] = &misp_eb.common.hw, + [CLK_MAHBCSI_EB] = &mahbcsi_eb.common.hw, + [CLK_MCSI_S_EB] = &mcsi_s_eb.common.hw, + [CLK_MCSI_T_EB] = &mcsi_t_eb.common.hw, + [CLK_DCAM_AXI_EB] = &dcam_axi_eb.common.hw, + [CLK_ISP_AXI_EB] = &isp_axi_eb.common.hw, + [CLK_MCSI_EB] = &mcsi_eb.common.hw, + [CLK_MCSI_S_CKG_EB] = &mcsi_s_ckg_eb.common.hw, + [CLK_MCSI_T_CKG_EB] = &mcsi_t_ckg_eb.common.hw, + [CLK_SENSOR0_EB] = &sensor0_eb.common.hw, + [CLK_SENSOR1_EB] = &sensor1_eb.common.hw, + [CLK_SENSOR2_EB] = &sensor2_eb.common.hw, + [CLK_MCPHY_CFG_EB] = &mcphy_cfg_eb.common.hw, + }, + .num = CLK_MM_GATE_NUM, +}; + +static const struct sprd_clk_desc sc9863a_mm_gate_desc = { + .clk_clks = sc9863a_mm_gate_clks, + .num_clk_clks = ARRAY_SIZE(sc9863a_mm_gate_clks), + .hw_clks = &sc9863a_mm_gate_hws, +}; + +static SPRD_SC_GATE_CLK_FW_NAME(sim0_eb, "sim0-eb", "ext-26m", 0x0, + 0x1000, BIT(0), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(iis0_eb, "iis0-eb", "ext-26m", 0x0, + 0x1000, BIT(1), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(iis1_eb, "iis1-eb", "ext-26m", 0x0, + 0x1000, BIT(2), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(iis2_eb, "iis2-eb", "ext-26m", 0x0, + 0x1000, BIT(3), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(spi0_eb, "spi0-eb", "ext-26m", 0x0, + 0x1000, BIT(5), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(spi1_eb, "spi1-eb", "ext-26m", 0x0, + 0x1000, BIT(6), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(spi2_eb, "spi2-eb", "ext-26m", 0x0, + 0x1000, BIT(7), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(i2c0_eb, "i2c0-eb", "ext-26m", 0x0, + 0x1000, BIT(8), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(i2c1_eb, "i2c1-eb", "ext-26m", 0x0, + 0x1000, BIT(9), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(i2c2_eb, "i2c2-eb", "ext-26m", 0x0, + 0x1000, BIT(10), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(i2c3_eb, "i2c3-eb", "ext-26m", 0x0, + 0x1000, BIT(11), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(i2c4_eb, "i2c4-eb", "ext-26m", 0x0, + 0x1000, BIT(12), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(uart0_eb, "uart0-eb", "ext-26m", 0x0, + 0x1000, BIT(13), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(uart1_eb, "uart1-eb", "ext-26m", 0x0, + 0x1000, BIT(14), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(uart2_eb, "uart2-eb", "ext-26m", 0x0, + 0x1000, BIT(15), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(uart3_eb, "uart3-eb", "ext-26m", 0x0, + 0x1000, BIT(16), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(uart4_eb, "uart4-eb", "ext-26m", 0x0, + 0x1000, BIT(17), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(sim0_32k_eb, "sim0_32k-eb", "ext-26m", 0x0, + 0x1000, BIT(18), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(spi3_eb, "spi3-eb", "ext-26m", 0x0, + 0x1000, BIT(19), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(i2c5_eb, "i2c5-eb", "ext-26m", 0x0, + 0x1000, BIT(20), 0, 0); +static SPRD_SC_GATE_CLK_FW_NAME(i2c6_eb, "i2c6-eb", "ext-26m", 0x0, + 0x1000, BIT(21), 0, 0); + +static struct sprd_clk_common *sc9863a_apapb_gate[] = { + /* address base is 0x71300000 */ + &sim0_eb.common, + &iis0_eb.common, + &iis1_eb.common, + &iis2_eb.common, + &spi0_eb.common, + &spi1_eb.common, + &spi2_eb.common, + &i2c0_eb.common, + &i2c1_eb.common, + &i2c2_eb.common, + &i2c3_eb.common, + &i2c4_eb.common, + &uart0_eb.common, + &uart1_eb.common, + &uart2_eb.common, + &uart3_eb.common, + &uart4_eb.common, + &sim0_32k_eb.common, + &spi3_eb.common, + &i2c5_eb.common, + &i2c6_eb.common, +}; + +static struct clk_hw_onecell_data sc9863a_apapb_gate_hws = { + .hws = { + [CLK_SIM0_EB] = &sim0_eb.common.hw, + [CLK_IIS0_EB] = &iis0_eb.common.hw, + [CLK_IIS1_EB] = &iis1_eb.common.hw, + [CLK_IIS2_EB] = &iis2_eb.common.hw, + [CLK_SPI0_EB] = &spi0_eb.common.hw, + [CLK_SPI1_EB] = &spi1_eb.common.hw, + [CLK_SPI2_EB] = &spi2_eb.common.hw, + [CLK_I2C0_EB] = &i2c0_eb.common.hw, + [CLK_I2C1_EB] = &i2c1_eb.common.hw, + [CLK_I2C2_EB] = &i2c2_eb.common.hw, + [CLK_I2C3_EB] = &i2c3_eb.common.hw, + [CLK_I2C4_EB] = &i2c4_eb.common.hw, + [CLK_UART0_EB] = &uart0_eb.common.hw, + [CLK_UART1_EB] = &uart1_eb.common.hw, + [CLK_UART2_EB] = &uart2_eb.common.hw, + [CLK_UART3_EB] = &uart3_eb.common.hw, + [CLK_UART4_EB] = &uart4_eb.common.hw, + [CLK_SIM0_32K_EB] = &sim0_32k_eb.common.hw, + [CLK_SPI3_EB] = &spi3_eb.common.hw, + [CLK_I2C5_EB] = &i2c5_eb.common.hw, + [CLK_I2C6_EB] = &i2c6_eb.common.hw, + }, + .num = CLK_AP_APB_GATE_NUM, +}; + +static const struct sprd_clk_desc sc9863a_apapb_gate_desc = { + .clk_clks = sc9863a_apapb_gate, + .num_clk_clks = ARRAY_SIZE(sc9863a_apapb_gate), + .hw_clks = &sc9863a_apapb_gate_hws, +}; + +static const struct of_device_id sprd_sc9863a_clk_ids[] = { + { .compatible = "sprd,sc9863a-ap-clk", /* 0x21500000 */ + .data = &sc9863a_ap_clk_desc }, + { .compatible = "sprd,sc9863a-pmu-gate", /* 0x402b0000 */ + .data = &sc9863a_pmu_gate_desc }, + { .compatible = "sprd,sc9863a-pll", /* 0x40353000 */ + .data = &sc9863a_pll_desc }, + { .compatible = "sprd,sc9863a-mpll", /* 0x40359000 */ + .data = &sc9863a_mpll_desc }, + { .compatible = "sprd,sc9863a-rpll", /* 0x4035c000 */ + .data = &sc9863a_rpll_desc }, + { .compatible = "sprd,sc9863a-dpll", /* 0x40363000 */ + .data = &sc9863a_dpll_desc }, + { .compatible = "sprd,sc9863a-aon-clk", /* 0x402d0000 */ + .data = &sc9863a_aon_clk_desc }, + { .compatible = "sprd,sc9863a-apahb-gate", /* 0x20e00000 */ + .data = &sc9863a_apahb_gate_desc }, + { .compatible = "sprd,sc9863a-aonapb-gate", /* 0x402e0000 */ + .data = &sc9863a_aonapb_gate_desc }, + { .compatible = "sprd,sc9863a-mm-gate", /* 0x60800000 */ + .data = &sc9863a_mm_gate_desc }, + { .compatible = "sprd,sc9863a-apapb-gate", /* 0x71300000 */ + .data = &sc9863a_apapb_gate_desc }, + { } +}; +MODULE_DEVICE_TABLE(of, sprd_sc9863a_clk_ids); + +static int sc9863a_clk_probe(struct platform_device *pdev) +{ + const struct sprd_clk_desc *desc; + int ret; + + desc = device_get_match_data(&pdev->dev); + if (!desc) + return -ENODEV; + + ret = sprd_clk_regmap_init(pdev, desc); + if (ret) + return ret; + + return sprd_clk_probe(&pdev->dev, desc->hw_clks); +} + +static struct platform_driver sc9863a_clk_driver = { + .probe = sc9863a_clk_probe, + .driver = { + .name = "sc9863a-clk", + .of_match_table = sprd_sc9863a_clk_ids, + }, +}; +module_platform_driver(sc9863a_clk_driver); + +MODULE_DESCRIPTION("Spreadtrum SC9863A Clock Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h index 116e6f826d04..54d1f96f4b68 100644 --- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.h +++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.h @@ -55,10 +55,6 @@ /* All the DRAM gates are exported */ -/* Some more module clocks are exported */ - -#define CLK_MBUS 112 - /* And the DSI and GPU module clock is exported */ #define CLK_NUMBER (CLK_GPU + 1) diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c index d9668493c3f9..524f33275bc7 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c @@ -50,8 +50,10 @@ static SUNXI_CCU_M(mixer1_div_a83_clk, "mixer1-div", "pll-de", 0x0c, 4, 4, CLK_SET_RATE_PARENT); static SUNXI_CCU_M(wb_div_a83_clk, "wb-div", "pll-de", 0x0c, 8, 4, CLK_SET_RATE_PARENT); +static SUNXI_CCU_M(rot_div_a83_clk, "rot-div", "pll-de", 0x0c, 0x0c, 4, + CLK_SET_RATE_PARENT); -static struct ccu_common *sun50i_h6_de3_clks[] = { +static struct ccu_common *sun8i_a83t_de2_clks[] = { &mixer0_clk.common, &mixer1_clk.common, &wb_clk.common, @@ -60,16 +62,16 @@ static struct ccu_common *sun50i_h6_de3_clks[] = { &bus_mixer1_clk.common, &bus_wb_clk.common, - &mixer0_div_clk.common, - &mixer1_div_clk.common, - &wb_div_clk.common, + &mixer0_div_a83_clk.common, + &mixer1_div_a83_clk.common, + &wb_div_a83_clk.common, &bus_rot_clk.common, &rot_clk.common, - &rot_div_clk.common, + &rot_div_a83_clk.common, }; -static struct ccu_common *sun8i_a83t_de2_clks[] = { +static struct ccu_common *sun8i_h3_de2_clks[] = { &mixer0_clk.common, &mixer1_clk.common, &wb_clk.common, @@ -78,34 +80,38 @@ static struct ccu_common *sun8i_a83t_de2_clks[] = { &bus_mixer1_clk.common, &bus_wb_clk.common, - &mixer0_div_a83_clk.common, - &mixer1_div_a83_clk.common, - &wb_div_a83_clk.common, + &mixer0_div_clk.common, + &mixer1_div_clk.common, + &wb_div_clk.common, }; -static struct ccu_common *sun8i_h3_de2_clks[] = { +static struct ccu_common *sun8i_v3s_de2_clks[] = { &mixer0_clk.common, - &mixer1_clk.common, &wb_clk.common, &bus_mixer0_clk.common, - &bus_mixer1_clk.common, &bus_wb_clk.common, &mixer0_div_clk.common, - &mixer1_div_clk.common, &wb_div_clk.common, }; -static struct ccu_common *sun8i_v3s_de2_clks[] = { +static struct ccu_common *sun50i_a64_de2_clks[] = { &mixer0_clk.common, + &mixer1_clk.common, &wb_clk.common, &bus_mixer0_clk.common, + &bus_mixer1_clk.common, &bus_wb_clk.common, &mixer0_div_clk.common, + &mixer1_div_clk.common, &wb_div_clk.common, + + &bus_rot_clk.common, + &rot_clk.common, + &rot_div_clk.common, }; static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = { @@ -113,16 +119,19 @@ static struct clk_hw_onecell_data sun8i_a83t_de2_hw_clks = { [CLK_MIXER0] = &mixer0_clk.common.hw, [CLK_MIXER1] = &mixer1_clk.common.hw, [CLK_WB] = &wb_clk.common.hw, + [CLK_ROT] = &rot_clk.common.hw, [CLK_BUS_MIXER0] = &bus_mixer0_clk.common.hw, [CLK_BUS_MIXER1] = &bus_mixer1_clk.common.hw, [CLK_BUS_WB] = &bus_wb_clk.common.hw, + [CLK_BUS_ROT] = &bus_rot_clk.common.hw, [CLK_MIXER0_DIV] = &mixer0_div_a83_clk.common.hw, [CLK_MIXER1_DIV] = &mixer1_div_a83_clk.common.hw, [CLK_WB_DIV] = &wb_div_a83_clk.common.hw, + [CLK_ROT_DIV] = &rot_div_a83_clk.common.hw, }, - .num = CLK_NUMBER_WITHOUT_ROT, + .num = CLK_NUMBER_WITH_ROT, }; static struct clk_hw_onecell_data sun8i_h3_de2_hw_clks = { @@ -156,7 +165,7 @@ static struct clk_hw_onecell_data sun8i_v3s_de2_hw_clks = { .num = CLK_NUMBER_WITHOUT_ROT, }; -static struct clk_hw_onecell_data sun50i_h6_de3_hw_clks = { +static struct clk_hw_onecell_data sun50i_a64_de2_hw_clks = { .hws = { [CLK_MIXER0] = &mixer0_clk.common.hw, [CLK_MIXER1] = &mixer1_clk.common.hw, @@ -179,9 +188,19 @@ static struct clk_hw_onecell_data sun50i_h6_de3_hw_clks = { static struct ccu_reset_map sun8i_a83t_de2_resets[] = { [RST_MIXER0] = { 0x08, BIT(0) }, /* - * For A83T, H3 and R40, mixer1 reset line is shared with wb, so - * only RST_WB is exported here. - * For V3s there's just no mixer1, so it also shares this struct. + * Mixer1 reset line is shared with wb, so only RST_WB is + * exported here. + */ + [RST_WB] = { 0x08, BIT(2) }, + [RST_ROT] = { 0x08, BIT(3) }, +}; + +static struct ccu_reset_map sun8i_h3_de2_resets[] = { + [RST_MIXER0] = { 0x08, BIT(0) }, + /* + * Mixer1 reset line is shared with wb, so only RST_WB is + * exported here. + * V3s doesn't have mixer1, so it also shares this struct. */ [RST_WB] = { 0x08, BIT(2) }, }; @@ -190,13 +209,13 @@ static struct ccu_reset_map sun50i_a64_de2_resets[] = { [RST_MIXER0] = { 0x08, BIT(0) }, [RST_MIXER1] = { 0x08, BIT(1) }, [RST_WB] = { 0x08, BIT(2) }, + [RST_ROT] = { 0x08, BIT(3) }, }; -static struct ccu_reset_map sun50i_h6_de3_resets[] = { +static struct ccu_reset_map sun50i_h5_de2_resets[] = { [RST_MIXER0] = { 0x08, BIT(0) }, [RST_MIXER1] = { 0x08, BIT(1) }, [RST_WB] = { 0x08, BIT(2) }, - [RST_ROT] = { 0x08, BIT(3) }, }; static const struct sunxi_ccu_desc sun8i_a83t_de2_clk_desc = { @@ -215,28 +234,18 @@ static const struct sunxi_ccu_desc sun8i_h3_de2_clk_desc = { .hw_clks = &sun8i_h3_de2_hw_clks, - .resets = sun8i_a83t_de2_resets, - .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets), + .resets = sun8i_h3_de2_resets, + .num_resets = ARRAY_SIZE(sun8i_h3_de2_resets), }; -static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = { - .ccu_clks = sun8i_h3_de2_clks, - .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks), +static const struct sunxi_ccu_desc sun8i_r40_de2_clk_desc = { + .ccu_clks = sun50i_a64_de2_clks, + .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks), - .hw_clks = &sun8i_h3_de2_hw_clks, + .hw_clks = &sun50i_a64_de2_hw_clks, - .resets = sun50i_a64_de2_resets, - .num_resets = ARRAY_SIZE(sun50i_a64_de2_resets), -}; - -static const struct sunxi_ccu_desc sun50i_h6_de3_clk_desc = { - .ccu_clks = sun50i_h6_de3_clks, - .num_ccu_clks = ARRAY_SIZE(sun50i_h6_de3_clks), - - .hw_clks = &sun50i_h6_de3_hw_clks, - - .resets = sun50i_h6_de3_resets, - .num_resets = ARRAY_SIZE(sun50i_h6_de3_resets), + .resets = sun8i_a83t_de2_resets, + .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets), }; static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = { @@ -249,6 +258,26 @@ static const struct sunxi_ccu_desc sun8i_v3s_de2_clk_desc = { .num_resets = ARRAY_SIZE(sun8i_a83t_de2_resets), }; +static const struct sunxi_ccu_desc sun50i_a64_de2_clk_desc = { + .ccu_clks = sun50i_a64_de2_clks, + .num_ccu_clks = ARRAY_SIZE(sun50i_a64_de2_clks), + + .hw_clks = &sun50i_a64_de2_hw_clks, + + .resets = sun50i_a64_de2_resets, + .num_resets = ARRAY_SIZE(sun50i_a64_de2_resets), +}; + +static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = { + .ccu_clks = sun8i_h3_de2_clks, + .num_ccu_clks = ARRAY_SIZE(sun8i_h3_de2_clks), + + .hw_clks = &sun8i_h3_de2_hw_clks, + + .resets = sun50i_h5_de2_resets, + .num_resets = ARRAY_SIZE(sun50i_h5_de2_resets), +}; + static int sunxi_de2_clk_probe(struct platform_device *pdev) { struct resource *res; @@ -338,6 +367,10 @@ static const struct of_device_id sunxi_de2_clk_ids[] = { .data = &sun8i_h3_de2_clk_desc, }, { + .compatible = "allwinner,sun8i-r40-de2-clk", + .data = &sun8i_r40_de2_clk_desc, + }, + { .compatible = "allwinner,sun8i-v3s-de2-clk", .data = &sun8i_v3s_de2_clk_desc, }, @@ -347,11 +380,11 @@ static const struct of_device_id sunxi_de2_clk_ids[] = { }, { .compatible = "allwinner,sun50i-h5-de2-clk", - .data = &sun50i_a64_de2_clk_desc, + .data = &sun50i_h5_de2_clk_desc, }, { .compatible = "allwinner,sun50i-h6-de3-clk", - .data = &sun50i_h6_de3_clk_desc, + .data = &sun50i_h5_de2_clk_desc, }, { } }; diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index df966ca06788..1f7c30f87ece 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -12,7 +12,6 @@ obj-y += clk-sdmmc-mux.o obj-y += clk-super.o obj-y += clk-tegra-audio.o obj-y += clk-tegra-periph.o -obj-y += clk-tegra-pmc.o obj-y += clk-tegra-fixed.o obj-y += clk-tegra-super-gen4.o obj-$(CONFIG_TEGRA_CLK_EMC) += clk-emc.o diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index c4faebd32760..ff7da2d3e94d 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -32,7 +32,6 @@ enum clk_id { tegra_clk_audio4, tegra_clk_audio4_2x, tegra_clk_audio4_mux, - tegra_clk_blink, tegra_clk_bsea, tegra_clk_bsev, tegra_clk_cclk_g, @@ -44,14 +43,9 @@ enum clk_id { tegra_clk_clk72Mhz, tegra_clk_clk72Mhz_8, tegra_clk_clk_m, - tegra_clk_clk_m_div2, - tegra_clk_clk_m_div4, - tegra_clk_clk_out_1, - tegra_clk_clk_out_1_mux, - tegra_clk_clk_out_2, - tegra_clk_clk_out_2_mux, - tegra_clk_clk_out_3, - tegra_clk_clk_out_3_mux, + tegra_clk_osc, + tegra_clk_osc_div2, + tegra_clk_osc_div4, tegra_clk_cml0, tegra_clk_cml1, tegra_clk_csi, diff --git a/drivers/clk/tegra/clk-tegra-fixed.c b/drivers/clk/tegra/clk-tegra-fixed.c index 7c6c8abfcde6..77c22cef5014 100644 --- a/drivers/clk/tegra/clk-tegra-fixed.c +++ b/drivers/clk/tegra/clk-tegra-fixed.c @@ -46,7 +46,28 @@ int __init tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks, return -EINVAL; } + dt_clk = tegra_lookup_dt_id(tegra_clk_osc, clks); + if (!dt_clk) + return 0; + osc = clk_register_fixed_rate(NULL, "osc", NULL, 0, *osc_freq); + *dt_clk = osc; + + /* osc_div2 */ + dt_clk = tegra_lookup_dt_id(tegra_clk_osc_div2, clks); + if (dt_clk) { + clk = clk_register_fixed_factor(NULL, "osc_div2", "osc", + 0, 1, 2); + *dt_clk = clk; + } + + /* osc_div4 */ + dt_clk = tegra_lookup_dt_id(tegra_clk_osc_div4, clks); + if (dt_clk) { + clk = clk_register_fixed_factor(NULL, "osc_div4", "osc", + 0, 1, 4); + *dt_clk = clk; + } dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m, clks); if (!dt_clk) @@ -84,22 +105,6 @@ void __init tegra_fixed_clk_init(struct tegra_clk *tegra_clks) clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768); *dt_clk = clk; } - - /* clk_m_div2 */ - dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div2, tegra_clks); - if (dt_clk) { - clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m", - CLK_SET_RATE_PARENT, 1, 2); - *dt_clk = clk; - } - - /* clk_m_div4 */ - dt_clk = tegra_lookup_dt_id(tegra_clk_clk_m_div4, tegra_clks); - if (dt_clk) { - clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m", - CLK_SET_RATE_PARENT, 1, 4); - *dt_clk = clk; - } } void tegra_clk_osc_resume(void __iomem *clk_base) diff --git a/drivers/clk/tegra/clk-tegra-pmc.c b/drivers/clk/tegra/clk-tegra-pmc.c deleted file mode 100644 index bec3e008335f..000000000000 --- a/drivers/clk/tegra/clk-tegra-pmc.c +++ /dev/null @@ -1,122 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved. - */ - -#include <linux/io.h> -#include <linux/clk-provider.h> -#include <linux/clkdev.h> -#include <linux/of.h> -#include <linux/of_address.h> -#include <linux/delay.h> -#include <linux/export.h> -#include <linux/clk/tegra.h> - -#include "clk.h" -#include "clk-id.h" - -#define PMC_CLK_OUT_CNTRL 0x1a8 -#define PMC_DPD_PADS_ORIDE 0x1c -#define PMC_DPD_PADS_ORIDE_BLINK_ENB 20 -#define PMC_CTRL 0 -#define PMC_CTRL_BLINK_ENB 7 -#define PMC_BLINK_TIMER 0x40 - -struct pmc_clk_init_data { - char *mux_name; - char *gate_name; - const char **parents; - int num_parents; - int mux_id; - int gate_id; - char *dev_name; - u8 mux_shift; - u8 gate_shift; -}; - -#define PMC_CLK(_num, _mux_shift, _gate_shift)\ - {\ - .mux_name = "clk_out_" #_num "_mux",\ - .gate_name = "clk_out_" #_num,\ - .parents = clk_out ##_num ##_parents,\ - .num_parents = ARRAY_SIZE(clk_out ##_num ##_parents),\ - .mux_id = tegra_clk_clk_out_ ##_num ##_mux,\ - .gate_id = tegra_clk_clk_out_ ##_num,\ - .dev_name = "extern" #_num,\ - .mux_shift = _mux_shift,\ - .gate_shift = _gate_shift,\ - } - -static DEFINE_SPINLOCK(clk_out_lock); - -static const char *clk_out1_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern1", -}; - -static const char *clk_out2_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern2", -}; - -static const char *clk_out3_parents[] = { "clk_m", "clk_m_div2", - "clk_m_div4", "extern3", -}; - -static struct pmc_clk_init_data pmc_clks[] = { - PMC_CLK(1, 6, 2), - PMC_CLK(2, 14, 10), - PMC_CLK(3, 22, 18), -}; - -void __init tegra_pmc_clk_init(void __iomem *pmc_base, - struct tegra_clk *tegra_clks) -{ - struct clk *clk; - struct clk **dt_clk; - int i; - - for (i = 0; i < ARRAY_SIZE(pmc_clks); i++) { - struct pmc_clk_init_data *data; - - data = pmc_clks + i; - - dt_clk = tegra_lookup_dt_id(data->mux_id, tegra_clks); - if (!dt_clk) - continue; - - clk = clk_register_mux(NULL, data->mux_name, data->parents, - data->num_parents, - CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, - pmc_base + PMC_CLK_OUT_CNTRL, data->mux_shift, - 3, 0, &clk_out_lock); - *dt_clk = clk; - - - dt_clk = tegra_lookup_dt_id(data->gate_id, tegra_clks); - if (!dt_clk) - continue; - - clk = clk_register_gate(NULL, data->gate_name, data->mux_name, - CLK_SET_RATE_PARENT, - pmc_base + PMC_CLK_OUT_CNTRL, - data->gate_shift, 0, &clk_out_lock); - *dt_clk = clk; - clk_register_clkdev(clk, data->dev_name, data->gate_name); - } - - /* blink */ - writel_relaxed(0, pmc_base + PMC_BLINK_TIMER); - clk = clk_register_gate(NULL, "blink_override", "clk_32k", 0, - pmc_base + PMC_DPD_PADS_ORIDE, - PMC_DPD_PADS_ORIDE_BLINK_ENB, 0, NULL); - - dt_clk = tegra_lookup_dt_id(tegra_clk_blink, tegra_clks); - if (!dt_clk) - return; - - clk = clk_register_gate(NULL, "blink", "blink_override", 0, - pmc_base + PMC_CTRL, - PMC_CTRL_BLINK_ENB, 0, NULL); - clk_register_clkdev(clk, "blink", NULL); - *dt_clk = clk; -} - diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 4efcaaf51b3a..bc9e47a4cb60 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -735,8 +735,9 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_fuse_burn] = { .dt_id = TEGRA114_CLK_FUSE_BURN, .present = true }, [tegra_clk_clk_32k] = { .dt_id = TEGRA114_CLK_CLK_32K, .present = true }, [tegra_clk_clk_m] = { .dt_id = TEGRA114_CLK_CLK_M, .present = true }, - [tegra_clk_clk_m_div2] = { .dt_id = TEGRA114_CLK_CLK_M_DIV2, .present = true }, - [tegra_clk_clk_m_div4] = { .dt_id = TEGRA114_CLK_CLK_M_DIV4, .present = true }, + [tegra_clk_osc] = { .dt_id = TEGRA114_CLK_OSC, .present = true }, + [tegra_clk_osc_div2] = { .dt_id = TEGRA114_CLK_OSC_DIV2, .present = true }, + [tegra_clk_osc_div4] = { .dt_id = TEGRA114_CLK_OSC_DIV4, .present = true }, [tegra_clk_pll_ref] = { .dt_id = TEGRA114_CLK_PLL_REF, .present = true }, [tegra_clk_pll_c] = { .dt_id = TEGRA114_CLK_PLL_C, .present = true }, [tegra_clk_pll_c_out1] = { .dt_id = TEGRA114_CLK_PLL_C_OUT1, .present = true }, @@ -778,10 +779,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_audio3] = { .dt_id = TEGRA114_CLK_AUDIO3, .present = true }, [tegra_clk_audio4] = { .dt_id = TEGRA114_CLK_AUDIO4, .present = true }, [tegra_clk_spdif] = { .dt_id = TEGRA114_CLK_SPDIF, .present = true }, - [tegra_clk_clk_out_1] = { .dt_id = TEGRA114_CLK_CLK_OUT_1, .present = true }, - [tegra_clk_clk_out_2] = { .dt_id = TEGRA114_CLK_CLK_OUT_2, .present = true }, - [tegra_clk_clk_out_3] = { .dt_id = TEGRA114_CLK_CLK_OUT_3, .present = true }, - [tegra_clk_blink] = { .dt_id = TEGRA114_CLK_BLINK, .present = true }, [tegra_clk_xusb_host_src] = { .dt_id = TEGRA114_CLK_XUSB_HOST_SRC, .present = true }, [tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA114_CLK_XUSB_FALCON_SRC, .present = true }, [tegra_clk_xusb_fs_src] = { .dt_id = TEGRA114_CLK_XUSB_FS_SRC, .present = true }, @@ -803,9 +800,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_audio3_mux] = { .dt_id = TEGRA114_CLK_AUDIO3_MUX, .present = true }, [tegra_clk_audio4_mux] = { .dt_id = TEGRA114_CLK_AUDIO4_MUX, .present = true }, [tegra_clk_spdif_mux] = { .dt_id = TEGRA114_CLK_SPDIF_MUX, .present = true }, - [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_1_MUX, .present = true }, - [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_2_MUX, .present = true }, - [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA114_CLK_CLK_OUT_3_MUX, .present = true }, [tegra_clk_dsia_mux] = { .dt_id = TEGRA114_CLK_DSIA_MUX, .present = true }, [tegra_clk_dsib_mux] = { .dt_id = TEGRA114_CLK_DSIB_MUX, .present = true }, [tegra_clk_cec] = { .dt_id = TEGRA114_CLK_CEC, .present = true }, @@ -815,8 +809,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "clk_m", .dt_id = TEGRA114_CLK_CLK_M }, { .con_id = "pll_ref", .dt_id = TEGRA114_CLK_PLL_REF }, { .con_id = "clk_32k", .dt_id = TEGRA114_CLK_CLK_32K }, - { .con_id = "clk_m_div2", .dt_id = TEGRA114_CLK_CLK_M_DIV2 }, - { .con_id = "clk_m_div4", .dt_id = TEGRA114_CLK_CLK_M_DIV4 }, + { .con_id = "osc", .dt_id = TEGRA114_CLK_OSC }, + { .con_id = "osc_div2", .dt_id = TEGRA114_CLK_OSC_DIV2 }, + { .con_id = "osc_div4", .dt_id = TEGRA114_CLK_OSC_DIV4 }, { .con_id = "pll_c", .dt_id = TEGRA114_CLK_PLL_C }, { .con_id = "pll_c_out1", .dt_id = TEGRA114_CLK_PLL_C_OUT1 }, { .con_id = "pll_c2", .dt_id = TEGRA114_CLK_PLL_C2 }, @@ -863,10 +858,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "audio3_2x", .dt_id = TEGRA114_CLK_AUDIO3_2X }, { .con_id = "audio4_2x", .dt_id = TEGRA114_CLK_AUDIO4_2X }, { .con_id = "spdif_2x", .dt_id = TEGRA114_CLK_SPDIF_2X }, - { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA114_CLK_EXTERN1 }, - { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA114_CLK_EXTERN2 }, - { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA114_CLK_EXTERN3 }, - { .con_id = "blink", .dt_id = TEGRA114_CLK_BLINK }, + { .con_id = "extern1", .dt_id = TEGRA114_CLK_EXTERN1 }, + { .con_id = "extern2", .dt_id = TEGRA114_CLK_EXTERN2 }, + { .con_id = "extern3", .dt_id = TEGRA114_CLK_EXTERN3 }, { .con_id = "cclk_g", .dt_id = TEGRA114_CLK_CCLK_G }, { .con_id = "cclk_lp", .dt_id = TEGRA114_CLK_CCLK_LP }, { .con_id = "sclk", .dt_id = TEGRA114_CLK_SCLK }, @@ -900,17 +894,6 @@ static void __init tegra114_fixed_clk_init(void __iomem *clk_base) /* clk_32k */ clk = clk_register_fixed_rate(NULL, "clk_32k", NULL, 0, 32768); clks[TEGRA114_CLK_CLK_32K] = clk; - - /* clk_m_div2 */ - clk = clk_register_fixed_factor(NULL, "clk_m_div2", "clk_m", - CLK_SET_RATE_PARENT, 1, 2); - clks[TEGRA114_CLK_CLK_M_DIV2] = clk; - - /* clk_m_div4 */ - clk = clk_register_fixed_factor(NULL, "clk_m_div4", "clk_m", - CLK_SET_RATE_PARENT, 1, 4); - clks[TEGRA114_CLK_CLK_M_DIV4] = clk; - } static void __init tegra114_pll_init(void __iomem *clk_base, @@ -1153,11 +1136,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA114_CLK_UARTB, TEGRA114_CLK_PLL_P, 408000000, 0 }, { TEGRA114_CLK_UARTC, TEGRA114_CLK_PLL_P, 408000000, 0 }, { TEGRA114_CLK_UARTD, TEGRA114_CLK_PLL_P, 408000000, 0 }, - { TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 1 }, - { TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 1 }, - { TEGRA114_CLK_EXTERN1, TEGRA114_CLK_PLL_A_OUT0, 0, 1 }, - { TEGRA114_CLK_CLK_OUT_1_MUX, TEGRA114_CLK_EXTERN1, 0, 1 }, - { TEGRA114_CLK_CLK_OUT_1, TEGRA114_CLK_CLK_MAX, 0, 1 }, + { TEGRA114_CLK_PLL_A, TEGRA114_CLK_CLK_MAX, 564480000, 0 }, + { TEGRA114_CLK_PLL_A_OUT0, TEGRA114_CLK_CLK_MAX, 11289600, 0 }, { TEGRA114_CLK_I2S0, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA114_CLK_I2S1, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA114_CLK_I2S2, TEGRA114_CLK_PLL_A_OUT0, 11289600, 0 }, @@ -1359,7 +1339,6 @@ static void __init tegra114_clock_init(struct device_node *np) tegra_audio_clk_init(clk_base, pmc_base, tegra114_clks, tegra114_audio_plls, ARRAY_SIZE(tegra114_audio_plls), 24000000); - tegra_pmc_clk_init(pmc_base, tegra114_clks); tegra_super_clk_gen4_init(clk_base, pmc_base, tegra114_clks, &pll_x_params); diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index b3110d5b5a6c..64e229ddf2a5 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -860,8 +860,9 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_fuse_burn] = { .dt_id = TEGRA124_CLK_FUSE_BURN, .present = true }, [tegra_clk_clk_32k] = { .dt_id = TEGRA124_CLK_CLK_32K, .present = true }, [tegra_clk_clk_m] = { .dt_id = TEGRA124_CLK_CLK_M, .present = true }, - [tegra_clk_clk_m_div2] = { .dt_id = TEGRA124_CLK_CLK_M_DIV2, .present = true }, - [tegra_clk_clk_m_div4] = { .dt_id = TEGRA124_CLK_CLK_M_DIV4, .present = true }, + [tegra_clk_osc] = { .dt_id = TEGRA124_CLK_OSC, .present = true }, + [tegra_clk_osc_div2] = { .dt_id = TEGRA124_CLK_OSC_DIV2, .present = true }, + [tegra_clk_osc_div4] = { .dt_id = TEGRA124_CLK_OSC_DIV4, .present = true }, [tegra_clk_pll_ref] = { .dt_id = TEGRA124_CLK_PLL_REF, .present = true }, [tegra_clk_pll_c] = { .dt_id = TEGRA124_CLK_PLL_C, .present = true }, [tegra_clk_pll_c_out1] = { .dt_id = TEGRA124_CLK_PLL_C_OUT1, .present = true }, @@ -902,10 +903,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_audio3] = { .dt_id = TEGRA124_CLK_AUDIO3, .present = true }, [tegra_clk_audio4] = { .dt_id = TEGRA124_CLK_AUDIO4, .present = true }, [tegra_clk_spdif] = { .dt_id = TEGRA124_CLK_SPDIF, .present = true }, - [tegra_clk_clk_out_1] = { .dt_id = TEGRA124_CLK_CLK_OUT_1, .present = true }, - [tegra_clk_clk_out_2] = { .dt_id = TEGRA124_CLK_CLK_OUT_2, .present = true }, - [tegra_clk_clk_out_3] = { .dt_id = TEGRA124_CLK_CLK_OUT_3, .present = true }, - [tegra_clk_blink] = { .dt_id = TEGRA124_CLK_BLINK, .present = true }, [tegra_clk_xusb_host_src] = { .dt_id = TEGRA124_CLK_XUSB_HOST_SRC, .present = true }, [tegra_clk_xusb_falcon_src] = { .dt_id = TEGRA124_CLK_XUSB_FALCON_SRC, .present = true }, [tegra_clk_xusb_fs_src] = { .dt_id = TEGRA124_CLK_XUSB_FS_SRC, .present = true }, @@ -931,9 +928,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_audio3_mux] = { .dt_id = TEGRA124_CLK_AUDIO3_MUX, .present = true }, [tegra_clk_audio4_mux] = { .dt_id = TEGRA124_CLK_AUDIO4_MUX, .present = true }, [tegra_clk_spdif_mux] = { .dt_id = TEGRA124_CLK_SPDIF_MUX, .present = true }, - [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true }, - [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true }, - [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true }, [tegra_clk_cec] = { .dt_id = TEGRA124_CLK_CEC, .present = true }, }; @@ -941,8 +935,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "clk_m", .dt_id = TEGRA124_CLK_CLK_M }, { .con_id = "pll_ref", .dt_id = TEGRA124_CLK_PLL_REF }, { .con_id = "clk_32k", .dt_id = TEGRA124_CLK_CLK_32K }, - { .con_id = "clk_m_div2", .dt_id = TEGRA124_CLK_CLK_M_DIV2 }, - { .con_id = "clk_m_div4", .dt_id = TEGRA124_CLK_CLK_M_DIV4 }, + { .con_id = "osc", .dt_id = TEGRA124_CLK_OSC }, + { .con_id = "osc_div2", .dt_id = TEGRA124_CLK_OSC_DIV2 }, + { .con_id = "osc_div4", .dt_id = TEGRA124_CLK_OSC_DIV4 }, { .con_id = "pll_c", .dt_id = TEGRA124_CLK_PLL_C }, { .con_id = "pll_c_out1", .dt_id = TEGRA124_CLK_PLL_C_OUT1 }, { .con_id = "pll_c2", .dt_id = TEGRA124_CLK_PLL_C2 }, @@ -988,10 +983,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "audio3_2x", .dt_id = TEGRA124_CLK_AUDIO3_2X }, { .con_id = "audio4_2x", .dt_id = TEGRA124_CLK_AUDIO4_2X }, { .con_id = "spdif_2x", .dt_id = TEGRA124_CLK_SPDIF_2X }, - { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA124_CLK_EXTERN1 }, - { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA124_CLK_EXTERN2 }, - { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA124_CLK_EXTERN3 }, - { .con_id = "blink", .dt_id = TEGRA124_CLK_BLINK }, + { .con_id = "extern1", .dt_id = TEGRA124_CLK_EXTERN1 }, + { .con_id = "extern2", .dt_id = TEGRA124_CLK_EXTERN2 }, + { .con_id = "extern3", .dt_id = TEGRA124_CLK_EXTERN3 }, { .con_id = "cclk_g", .dt_id = TEGRA124_CLK_CCLK_G }, { .con_id = "cclk_lp", .dt_id = TEGRA124_CLK_CCLK_LP }, { .con_id = "sclk", .dt_id = TEGRA124_CLK_SCLK }, @@ -1298,11 +1292,8 @@ static struct tegra_clk_init_table common_init_table[] __initdata = { { TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0 }, { TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0 }, { TEGRA124_CLK_UARTD, TEGRA124_CLK_PLL_P, 408000000, 0 }, - { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 1 }, - { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 1 }, - { TEGRA124_CLK_EXTERN1, TEGRA124_CLK_PLL_A_OUT0, 0, 1 }, - { TEGRA124_CLK_CLK_OUT_1_MUX, TEGRA124_CLK_EXTERN1, 0, 1 }, - { TEGRA124_CLK_CLK_OUT_1, TEGRA124_CLK_CLK_MAX, 0, 1 }, + { TEGRA124_CLK_PLL_A, TEGRA124_CLK_CLK_MAX, 564480000, 0 }, + { TEGRA124_CLK_PLL_A_OUT0, TEGRA124_CLK_CLK_MAX, 11289600, 0 }, { TEGRA124_CLK_I2S0, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA124_CLK_I2S1, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA124_CLK_I2S2, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0 }, @@ -1457,11 +1448,9 @@ static void __init tegra132_clock_apply_init_table(void) * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132 * @np: struct device_node * of the DT node for the SoC CAR IP block * - * Register most of the clocks controlled by the CAR IP block, along - * with a few clocks controlled by the PMC IP block. Everything in - * this function should be common to Tegra124 and Tegra132. XXX The - * PMC clock initialization should probably be moved to PMC-specific - * driver code. No return value. + * Register most of the clocks controlled by the CAR IP block. + * Everything in this function should be common to Tegra124 and Tegra132. + * No return value. */ static void __init tegra124_132_clock_init_pre(struct device_node *np) { @@ -1504,7 +1493,6 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np) tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, tegra124_audio_plls, ARRAY_SIZE(tegra124_audio_plls), 24576000); - tegra_pmc_clk_init(pmc_base, tegra124_clks); /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */ plld_base = readl(clk_base + PLLD_BASE); @@ -1516,11 +1504,11 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np) * tegra124_132_clock_init_post - clock initialization postamble for T124/T132 * @np: struct device_node * of the DT node for the SoC CAR IP block * - * Register most of the along with a few clocks controlled by the PMC - * IP block. Everything in this function should be common to Tegra124 + * Register most of the clocks controlled by the CAR IP block. + * Everything in this function should be common to Tegra124 * and Tegra132. This function must be called after - * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will - * not be set. No return value. + * tegra124_132_clock_init_pre(), otherwise clk_base will not be set. + * No return value. */ static void __init tegra124_132_clock_init_post(struct device_node *np) { diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c index fff5cba87637..085feb04e913 100644 --- a/drivers/clk/tegra/clk-tegra20.c +++ b/drivers/clk/tegra/clk-tegra20.c @@ -458,7 +458,6 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "cdev1", .dt_id = TEGRA20_CLK_CDEV1 }, { .con_id = "cdev2", .dt_id = TEGRA20_CLK_CDEV2 }, { .con_id = "clk_32k", .dt_id = TEGRA20_CLK_CLK_32K }, - { .con_id = "blink", .dt_id = TEGRA20_CLK_BLINK }, { .con_id = "clk_m", .dt_id = TEGRA20_CLK_CLK_M }, { .con_id = "pll_ref", .dt_id = TEGRA20_CLK_PLL_REF }, { .dev_id = "tegra20-i2s.0", .dt_id = TEGRA20_CLK_I2S1 }, @@ -537,7 +536,6 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = { [tegra_clk_csi] = { .dt_id = TEGRA20_CLK_CSI, .present = true }, [tegra_clk_isp] = { .dt_id = TEGRA20_CLK_ISP, .present = true }, [tegra_clk_clk_32k] = { .dt_id = TEGRA20_CLK_CLK_32K, .present = true }, - [tegra_clk_blink] = { .dt_id = TEGRA20_CLK_BLINK, .present = true }, [tegra_clk_hclk] = { .dt_id = TEGRA20_CLK_HCLK, .present = true }, [tegra_clk_pclk] = { .dt_id = TEGRA20_CLK_PCLK, .present = true }, [tegra_clk_pll_p_out1] = { .dt_id = TEGRA20_CLK_PLL_P_OUT1, .present = true }, @@ -1031,10 +1029,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA20_CLK_UARTC, TEGRA20_CLK_PLL_P, 0, 0 }, { TEGRA20_CLK_UARTD, TEGRA20_CLK_PLL_P, 0, 0 }, { TEGRA20_CLK_UARTE, TEGRA20_CLK_PLL_P, 0, 0 }, - { TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 1 }, - { TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 1 }, - { TEGRA20_CLK_CDEV1, TEGRA20_CLK_CLK_MAX, 0, 1 }, - { TEGRA20_CLK_BLINK, TEGRA20_CLK_CLK_MAX, 32768, 1 }, + { TEGRA20_CLK_PLL_A, TEGRA20_CLK_CLK_MAX, 56448000, 0 }, + { TEGRA20_CLK_PLL_A_OUT0, TEGRA20_CLK_CLK_MAX, 11289600, 0 }, { TEGRA20_CLK_I2S1, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA20_CLK_I2S2, TEGRA20_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA20_CLK_SDMMC1, TEGRA20_CLK_PLL_P, 48000000, 0 }, @@ -1146,7 +1142,6 @@ static void __init tegra20_clock_init(struct device_node *np) tegra_super_clk_gen4_init(clk_base, pmc_base, tegra20_clks, NULL); tegra20_periph_clk_init(); tegra20_audio_clk_init(); - tegra_pmc_clk_init(pmc_base, tegra20_clks); tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA20_CLK_CLK_MAX); diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c index 762cd186f714..defe3b7ebfa4 100644 --- a/drivers/clk/tegra/clk-tegra210.c +++ b/drivers/clk/tegra/clk-tegra210.c @@ -2371,8 +2371,9 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_fuse_burn] = { .dt_id = TEGRA210_CLK_FUSE_BURN, .present = true }, [tegra_clk_clk_32k] = { .dt_id = TEGRA210_CLK_CLK_32K, .present = true }, [tegra_clk_clk_m] = { .dt_id = TEGRA210_CLK_CLK_M, .present = true }, - [tegra_clk_clk_m_div2] = { .dt_id = TEGRA210_CLK_CLK_M_DIV2, .present = true }, - [tegra_clk_clk_m_div4] = { .dt_id = TEGRA210_CLK_CLK_M_DIV4, .present = true }, + [tegra_clk_osc] = { .dt_id = TEGRA210_CLK_OSC, .present = true }, + [tegra_clk_osc_div2] = { .dt_id = TEGRA210_CLK_OSC_DIV2, .present = true }, + [tegra_clk_osc_div4] = { .dt_id = TEGRA210_CLK_OSC_DIV4, .present = true }, [tegra_clk_pll_ref] = { .dt_id = TEGRA210_CLK_PLL_REF, .present = true }, [tegra_clk_pll_c] = { .dt_id = TEGRA210_CLK_PLL_C, .present = true }, [tegra_clk_pll_c_out1] = { .dt_id = TEGRA210_CLK_PLL_C_OUT1, .present = true }, @@ -2417,10 +2418,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_audio3] = { .dt_id = TEGRA210_CLK_AUDIO3, .present = true }, [tegra_clk_audio4] = { .dt_id = TEGRA210_CLK_AUDIO4, .present = true }, [tegra_clk_spdif] = { .dt_id = TEGRA210_CLK_SPDIF, .present = true }, - [tegra_clk_clk_out_1] = { .dt_id = TEGRA210_CLK_CLK_OUT_1, .present = true }, - [tegra_clk_clk_out_2] = { .dt_id = TEGRA210_CLK_CLK_OUT_2, .present = true }, - [tegra_clk_clk_out_3] = { .dt_id = TEGRA210_CLK_CLK_OUT_3, .present = true }, - [tegra_clk_blink] = { .dt_id = TEGRA210_CLK_BLINK, .present = true }, [tegra_clk_xusb_gate] = { .dt_id = TEGRA210_CLK_XUSB_GATE, .present = true }, [tegra_clk_xusb_host_src_8] = { .dt_id = TEGRA210_CLK_XUSB_HOST_SRC, .present = true }, [tegra_clk_xusb_falcon_src_8] = { .dt_id = TEGRA210_CLK_XUSB_FALCON_SRC, .present = true }, @@ -2452,9 +2449,6 @@ static struct tegra_clk tegra210_clks[tegra_clk_max] __initdata = { [tegra_clk_audio3_mux] = { .dt_id = TEGRA210_CLK_AUDIO3_MUX, .present = true }, [tegra_clk_audio4_mux] = { .dt_id = TEGRA210_CLK_AUDIO4_MUX, .present = true }, [tegra_clk_spdif_mux] = { .dt_id = TEGRA210_CLK_SPDIF_MUX, .present = true }, - [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_1_MUX, .present = true }, - [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_2_MUX, .present = true }, - [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA210_CLK_CLK_OUT_3_MUX, .present = true }, [tegra_clk_maud] = { .dt_id = TEGRA210_CLK_MAUD, .present = true }, [tegra_clk_mipibif] = { .dt_id = TEGRA210_CLK_MIPIBIF, .present = true }, [tegra_clk_qspi] = { .dt_id = TEGRA210_CLK_QSPI, .present = true }, @@ -2497,8 +2491,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "clk_m", .dt_id = TEGRA210_CLK_CLK_M }, { .con_id = "pll_ref", .dt_id = TEGRA210_CLK_PLL_REF }, { .con_id = "clk_32k", .dt_id = TEGRA210_CLK_CLK_32K }, - { .con_id = "clk_m_div2", .dt_id = TEGRA210_CLK_CLK_M_DIV2 }, - { .con_id = "clk_m_div4", .dt_id = TEGRA210_CLK_CLK_M_DIV4 }, + { .con_id = "osc", .dt_id = TEGRA210_CLK_OSC }, + { .con_id = "osc_div2", .dt_id = TEGRA210_CLK_OSC_DIV2 }, + { .con_id = "osc_div4", .dt_id = TEGRA210_CLK_OSC_DIV4 }, { .con_id = "pll_c", .dt_id = TEGRA210_CLK_PLL_C }, { .con_id = "pll_c_out1", .dt_id = TEGRA210_CLK_PLL_C_OUT1 }, { .con_id = "pll_c2", .dt_id = TEGRA210_CLK_PLL_C2 }, @@ -2540,10 +2535,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "audio4", .dt_id = TEGRA210_CLK_AUDIO4 }, { .con_id = "spdif", .dt_id = TEGRA210_CLK_SPDIF }, { .con_id = "spdif_2x", .dt_id = TEGRA210_CLK_SPDIF_2X }, - { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA210_CLK_EXTERN1 }, - { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA210_CLK_EXTERN2 }, - { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA210_CLK_EXTERN3 }, - { .con_id = "blink", .dt_id = TEGRA210_CLK_BLINK }, + { .con_id = "extern1", .dt_id = TEGRA210_CLK_EXTERN1 }, + { .con_id = "extern2", .dt_id = TEGRA210_CLK_EXTERN2 }, + { .con_id = "extern3", .dt_id = TEGRA210_CLK_EXTERN3 }, { .con_id = "cclk_g", .dt_id = TEGRA210_CLK_CCLK_G }, { .con_id = "cclk_lp", .dt_id = TEGRA210_CLK_CCLK_LP }, { .con_id = "sclk", .dt_id = TEGRA210_CLK_SCLK }, @@ -2999,7 +2993,7 @@ static const char * const la_parents[] = { }; static struct tegra_clk_periph tegra210_la = - TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, 0); + TEGRA_CLK_PERIPH(29, 7, 9, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP, 76, 0, NULL, NULL); static __init void tegra210_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base) @@ -3448,11 +3442,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA210_CLK_UARTB, TEGRA210_CLK_PLL_P, 408000000, 0 }, { TEGRA210_CLK_UARTC, TEGRA210_CLK_PLL_P, 408000000, 0 }, { TEGRA210_CLK_UARTD, TEGRA210_CLK_PLL_P, 408000000, 0 }, - { TEGRA210_CLK_PLL_A, TEGRA210_CLK_CLK_MAX, 564480000, 1 }, - { TEGRA210_CLK_PLL_A_OUT0, TEGRA210_CLK_CLK_MAX, 11289600, 1 }, - { TEGRA210_CLK_EXTERN1, TEGRA210_CLK_PLL_A_OUT0, 0, 1 }, - { TEGRA210_CLK_CLK_OUT_1_MUX, TEGRA210_CLK_EXTERN1, 0, 1 }, - { TEGRA210_CLK_CLK_OUT_1, TEGRA210_CLK_CLK_MAX, 0, 1 }, + { TEGRA210_CLK_PLL_A, TEGRA210_CLK_CLK_MAX, 564480000, 0 }, + { TEGRA210_CLK_PLL_A_OUT0, TEGRA210_CLK_CLK_MAX, 11289600, 0 }, { TEGRA210_CLK_I2S0, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA210_CLK_I2S1, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA210_CLK_I2S2, TEGRA210_CLK_PLL_A_OUT0, 11289600, 0 }, @@ -3693,7 +3684,6 @@ static void __init tegra210_clock_init(struct device_node *np) tegra_audio_clk_init(clk_base, pmc_base, tegra210_clks, tegra210_audio_plls, ARRAY_SIZE(tegra210_audio_plls), 24576000); - tegra_pmc_clk_init(pmc_base, tegra210_clks); /* For Tegra210, PLLD is the only source for DSIA & DSIB */ value = readl(clk_base + PLLD_BASE); diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c index b20891489e11..3255f82e61b5 100644 --- a/drivers/clk/tegra/clk-tegra30.c +++ b/drivers/clk/tegra/clk-tegra30.c @@ -569,10 +569,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "audio3_2x", .dt_id = TEGRA30_CLK_AUDIO3_2X }, { .con_id = "audio4_2x", .dt_id = TEGRA30_CLK_AUDIO4_2X }, { .con_id = "spdif_2x", .dt_id = TEGRA30_CLK_SPDIF_2X }, - { .con_id = "extern1", .dev_id = "clk_out_1", .dt_id = TEGRA30_CLK_EXTERN1 }, - { .con_id = "extern2", .dev_id = "clk_out_2", .dt_id = TEGRA30_CLK_EXTERN2 }, - { .con_id = "extern3", .dev_id = "clk_out_3", .dt_id = TEGRA30_CLK_EXTERN3 }, - { .con_id = "blink", .dt_id = TEGRA30_CLK_BLINK }, + { .con_id = "extern1", .dt_id = TEGRA30_CLK_EXTERN1 }, + { .con_id = "extern2", .dt_id = TEGRA30_CLK_EXTERN2 }, + { .con_id = "extern3", .dt_id = TEGRA30_CLK_EXTERN3 }, { .con_id = "cclk_g", .dt_id = TEGRA30_CLK_CCLK_G }, { .con_id = "cclk_lp", .dt_id = TEGRA30_CLK_CCLK_LP }, { .con_id = "sclk", .dt_id = TEGRA30_CLK_SCLK }, @@ -581,8 +580,9 @@ static struct tegra_devclk devclks[] __initdata = { { .con_id = "twd", .dt_id = TEGRA30_CLK_TWD }, { .con_id = "emc", .dt_id = TEGRA30_CLK_EMC }, { .con_id = "clk_32k", .dt_id = TEGRA30_CLK_CLK_32K }, - { .con_id = "clk_m_div2", .dt_id = TEGRA30_CLK_CLK_M_DIV2 }, - { .con_id = "clk_m_div4", .dt_id = TEGRA30_CLK_CLK_M_DIV4 }, + { .con_id = "osc", .dt_id = TEGRA30_CLK_OSC }, + { .con_id = "osc_div2", .dt_id = TEGRA30_CLK_OSC_DIV2 }, + { .con_id = "osc_div4", .dt_id = TEGRA30_CLK_OSC_DIV4 }, { .con_id = "cml0", .dt_id = TEGRA30_CLK_CML0 }, { .con_id = "cml1", .dt_id = TEGRA30_CLK_CML1 }, { .con_id = "clk_m", .dt_id = TEGRA30_CLK_CLK_M }, @@ -683,8 +683,9 @@ static struct tegra_devclk devclks[] __initdata = { static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = { [tegra_clk_clk_32k] = { .dt_id = TEGRA30_CLK_CLK_32K, .present = true }, [tegra_clk_clk_m] = { .dt_id = TEGRA30_CLK_CLK_M, .present = true }, - [tegra_clk_clk_m_div2] = { .dt_id = TEGRA30_CLK_CLK_M_DIV2, .present = true }, - [tegra_clk_clk_m_div4] = { .dt_id = TEGRA30_CLK_CLK_M_DIV4, .present = true }, + [tegra_clk_osc] = { .dt_id = TEGRA30_CLK_OSC, .present = true }, + [tegra_clk_osc_div2] = { .dt_id = TEGRA30_CLK_OSC_DIV2, .present = true }, + [tegra_clk_osc_div4] = { .dt_id = TEGRA30_CLK_OSC_DIV4, .present = true }, [tegra_clk_pll_ref] = { .dt_id = TEGRA30_CLK_PLL_REF, .present = true }, [tegra_clk_spdif_in_sync] = { .dt_id = TEGRA30_CLK_SPDIF_IN_SYNC, .present = true }, [tegra_clk_i2s0_sync] = { .dt_id = TEGRA30_CLK_I2S0_SYNC, .present = true }, @@ -711,13 +712,6 @@ static struct tegra_clk tegra30_clks[tegra_clk_max] __initdata = { [tegra_clk_audio3_2x] = { .dt_id = TEGRA30_CLK_AUDIO3_2X, .present = true }, [tegra_clk_audio4_2x] = { .dt_id = TEGRA30_CLK_AUDIO4_2X, .present = true }, [tegra_clk_spdif_2x] = { .dt_id = TEGRA30_CLK_SPDIF_2X, .present = true }, - [tegra_clk_clk_out_1] = { .dt_id = TEGRA30_CLK_CLK_OUT_1, .present = true }, - [tegra_clk_clk_out_2] = { .dt_id = TEGRA30_CLK_CLK_OUT_2, .present = true }, - [tegra_clk_clk_out_3] = { .dt_id = TEGRA30_CLK_CLK_OUT_3, .present = true }, - [tegra_clk_blink] = { .dt_id = TEGRA30_CLK_BLINK, .present = true }, - [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_1_MUX, .present = true }, - [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_2_MUX, .present = true }, - [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA30_CLK_CLK_OUT_3_MUX, .present = true }, [tegra_clk_hclk] = { .dt_id = TEGRA30_CLK_HCLK, .present = true }, [tegra_clk_pclk] = { .dt_id = TEGRA30_CLK_PCLK, .present = true }, [tegra_clk_i2s0] = { .dt_id = TEGRA30_CLK_I2S0, .present = true }, @@ -1227,12 +1221,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { { TEGRA30_CLK_UARTC, TEGRA30_CLK_PLL_P, 408000000, 0 }, { TEGRA30_CLK_UARTD, TEGRA30_CLK_PLL_P, 408000000, 0 }, { TEGRA30_CLK_UARTE, TEGRA30_CLK_PLL_P, 408000000, 0 }, - { TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 1 }, - { TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 1 }, - { TEGRA30_CLK_EXTERN1, TEGRA30_CLK_PLL_A_OUT0, 0, 1 }, - { TEGRA30_CLK_CLK_OUT_1_MUX, TEGRA30_CLK_EXTERN1, 0, 0 }, - { TEGRA30_CLK_CLK_OUT_1, TEGRA30_CLK_CLK_MAX, 0, 1 }, - { TEGRA30_CLK_BLINK, TEGRA30_CLK_CLK_MAX, 0, 1 }, + { TEGRA30_CLK_PLL_A, TEGRA30_CLK_CLK_MAX, 564480000, 0 }, + { TEGRA30_CLK_PLL_A_OUT0, TEGRA30_CLK_CLK_MAX, 11289600, 0 }, { TEGRA30_CLK_I2S0, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA30_CLK_I2S1, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 }, { TEGRA30_CLK_I2S2, TEGRA30_CLK_PLL_A_OUT0, 11289600, 0 }, @@ -1362,7 +1352,6 @@ static void __init tegra30_clock_init(struct device_node *np) tegra_audio_clk_init(clk_base, pmc_base, tegra30_clks, tegra30_audio_plls, ARRAY_SIZE(tegra30_audio_plls), 24000000); - tegra_pmc_clk_init(pmc_base, tegra30_clks); tegra_init_dup_clks(tegra_clk_duplicates, clks, TEGRA30_CLK_CLK_MAX); diff --git a/drivers/clk/tegra/clk.h b/drivers/clk/tegra/clk.h index 416a6b09f6a3..2c9a68302e02 100644 --- a/drivers/clk/tegra/clk.h +++ b/drivers/clk/tegra/clk.h @@ -854,7 +854,6 @@ void tegra_periph_clk_init(void __iomem *clk_base, void __iomem *pmc_base, struct tegra_clk *tegra_clks, struct tegra_clk_pll_params *pll_params); -void tegra_pmc_clk_init(void __iomem *pmc_base, struct tegra_clk *tegra_clks); void tegra_fixed_clk_init(struct tegra_clk *tegra_clks); int tegra_osc_clk_init(void __iomem *clk_base, struct tegra_clk *clks, unsigned long *input_freqs, unsigned int num, diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c index fe686f77787f..692be2fd9261 100644 --- a/drivers/clk/versatile/clk-icst.c +++ b/drivers/clk/versatile/clk-icst.c @@ -34,18 +34,6 @@ #define INTEGRATOR_AP_PCI_25_33_MHZ BIT(8) /** - * enum icst_control_type - the type of ICST control register - */ -enum icst_control_type { - ICST_VERSATILE, /* The standard type, all control bits available */ - ICST_INTEGRATOR_AP_CM, /* Only 8 bits of VDW available */ - ICST_INTEGRATOR_AP_SYS, /* Only 8 bits of VDW available */ - ICST_INTEGRATOR_AP_PCI, /* Odd bit pattern storage */ - ICST_INTEGRATOR_CP_CM_CORE, /* Only 8 bits of VDW and 3 bits of OD */ - ICST_INTEGRATOR_CP_CM_MEM, /* Only 8 bits of VDW and 3 bits of OD */ -}; - -/** * struct clk_icst - ICST VCO clock wrapper * @hw: corresponding clock hardware entry * @vcoreg: VCO register address @@ -344,12 +332,12 @@ static const struct clk_ops icst_ops = { .set_rate = icst_set_rate, }; -static struct clk *icst_clk_setup(struct device *dev, - const struct clk_icst_desc *desc, - const char *name, - const char *parent_name, - struct regmap *map, - enum icst_control_type ctype) +struct clk *icst_clk_setup(struct device *dev, + const struct clk_icst_desc *desc, + const char *name, + const char *parent_name, + struct regmap *map, + enum icst_control_type ctype) { struct clk *clk; struct clk_icst *icst; @@ -386,6 +374,7 @@ static struct clk *icst_clk_setup(struct device *dev, return clk; } +EXPORT_SYMBOL_GPL(icst_clk_setup); struct clk *icst_clk_register(struct device *dev, const struct clk_icst_desc *desc, diff --git a/drivers/clk/versatile/clk-icst.h b/drivers/clk/versatile/clk-icst.h index e36ca1a20e90..1a119ef11066 100644 --- a/drivers/clk/versatile/clk-icst.h +++ b/drivers/clk/versatile/clk-icst.h @@ -1,4 +1,19 @@ /* SPDX-License-Identifier: GPL-2.0 */ +struct regmap; + +/** + * enum icst_control_type - the type of ICST control register + */ +enum icst_control_type { + ICST_VERSATILE, /* The standard type, all control bits available */ + ICST_INTEGRATOR_AP_CM, /* Only 8 bits of VDW available */ + ICST_INTEGRATOR_AP_SYS, /* Only 8 bits of VDW available */ + ICST_INTEGRATOR_AP_PCI, /* Odd bit pattern storage */ + ICST_INTEGRATOR_CP_CM_CORE, /* Only 8 bits of VDW and 3 bits of OD */ + ICST_INTEGRATOR_CP_CM_MEM, /* Only 8 bits of VDW and 3 bits of OD */ + ICST_INTEGRATOR_IM_PD1, /* Like the Versatile, all control bits */ +}; + /** * struct clk_icst_desc - descriptor for the ICST VCO * @params: ICST parameters @@ -17,3 +32,10 @@ struct clk *icst_clk_register(struct device *dev, const char *name, const char *parent_name, void __iomem *base); + +struct clk *icst_clk_setup(struct device *dev, + const struct clk_icst_desc *desc, + const char *name, + const char *parent_name, + struct regmap *map, + enum icst_control_type ctype); diff --git a/drivers/clk/versatile/clk-impd1.c b/drivers/clk/versatile/clk-impd1.c index 1991f15a5db9..b05da8516d4c 100644 --- a/drivers/clk/versatile/clk-impd1.c +++ b/drivers/clk/versatile/clk-impd1.c @@ -7,7 +7,11 @@ #include <linux/clkdev.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/platform_device.h> #include <linux/platform_data/clk-integrator.h> +#include <linux/module.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #include "icst.h" #include "clk-icst.h" @@ -175,3 +179,78 @@ void integrator_impd1_clk_exit(unsigned int id) kfree(imc->pclkname); } EXPORT_SYMBOL_GPL(integrator_impd1_clk_exit); + +static int integrator_impd1_clk_spawn(struct device *dev, + struct device_node *parent, + struct device_node *np) +{ + struct regmap *map; + struct clk *clk = ERR_PTR(-EINVAL); + const char *name = np->name; + const char *parent_name; + const struct clk_icst_desc *desc; + int ret; + + map = syscon_node_to_regmap(parent); + if (IS_ERR(map)) { + pr_err("no regmap for syscon IM-PD1 ICST clock parent\n"); + return PTR_ERR(map); + } + + if (of_device_is_compatible(np, "arm,impd1-vco1")) { + desc = &impd1_icst1_desc; + } else if (of_device_is_compatible(np, "arm,impd1-vco2")) { + desc = &impd1_icst2_desc; + } else { + dev_err(dev, "not a clock node %s\n", name); + return -ENODEV; + } + + parent_name = of_clk_get_parent_name(np, 0); + clk = icst_clk_setup(NULL, desc, name, parent_name, map, + ICST_INTEGRATOR_IM_PD1); + if (!IS_ERR(clk)) { + of_clk_add_provider(np, of_clk_src_simple_get, clk); + ret = 0; + } else { + dev_err(dev, "error setting up IM-PD1 ICST clock\n"); + ret = PTR_ERR(clk); + } + + return ret; +} + +static int integrator_impd1_clk_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct device_node *child; + int ret = 0; + + for_each_available_child_of_node(np, child) { + ret = integrator_impd1_clk_spawn(dev, np, child); + if (ret) + break; + } + + return ret; +} + +static const struct of_device_id impd1_syscon_match[] = { + { .compatible = "arm,im-pd1-syscon", }, + {} +}; +MODULE_DEVICE_TABLE(of, impd1_syscon_match); + +static struct platform_driver impd1_clk_driver = { + .driver = { + .name = "impd1-clk", + .of_match_table = impd1_syscon_match, + }, + .probe = integrator_impd1_clk_probe, +}; +builtin_platform_driver(impd1_clk_driver); + +MODULE_AUTHOR("Linus Walleij <linusw@kernel.org>"); +MODULE_DESCRIPTION("Arm IM-PD1 module clock driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/clocksource/timer-vf-pit.c b/drivers/clocksource/timer-vf-pit.c index 7ad4a8b008c2..1a86a4e7e344 100644 --- a/drivers/clocksource/timer-vf-pit.c +++ b/drivers/clocksource/timer-vf-pit.c @@ -129,7 +129,7 @@ static int __init pit_clockevent_init(unsigned long rate, int irq) __raw_writel(PITTFLG_TIF, clkevt_base + PITTFLG); BUG_ON(request_irq(irq, pit_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, - "VF pit timer", &clockevent_pit); + "VF pit timer", &clockevent_pit)); clockevent_pit.cpumask = cpumask_of(0); clockevent_pit.irq = irq; diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index bff5295016ae..c3e6bd59e920 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -37,10 +37,12 @@ config CPU_FREQ_STAT choice prompt "Default CPUFreq governor" default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ + default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if BIG_LITTLE + default CPU_FREQ_DEFAULT_GOV_SCHEDUTIL if X86_INTEL_PSTATE && SMP default CPU_FREQ_DEFAULT_GOV_PERFORMANCE help This option sets which CPUFreq governor shall be loaded at - startup. If in doubt, select 'performance'. + startup. If in doubt, use the default setting. config CPU_FREQ_DEFAULT_GOV_PERFORMANCE bool "performance" diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 62502d0e4c33..bc58a0809d11 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -8,6 +8,8 @@ config X86_INTEL_PSTATE depends on X86 select ACPI_PROCESSOR if ACPI select ACPI_CPPC_LIB if X86_64 && ACPI && SCHED_MC_PRIO + select CPU_FREQ_GOV_PERFORMANCE + select CPU_FREQ_GOV_SCHEDUTIL if SMP help This driver provides a P state for Intel core processors. The driver implements an internal governor and will become diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 56f4bc0d209e..8646eb197cd9 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -902,6 +902,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = { void powernv_cpufreq_work_fn(struct work_struct *work) { struct chip *chip = container_of(work, struct chip, throttle); + struct cpufreq_policy *policy; unsigned int cpu; cpumask_t mask; @@ -916,12 +917,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work) chip->restore = false; for_each_cpu(cpu, &mask) { int index; - struct cpufreq_policy policy; - cpufreq_get_policy(&policy, cpu); - index = cpufreq_table_find_index_c(&policy, policy.cur); - powernv_cpufreq_target_index(&policy, index); - cpumask_andnot(&mask, &mask, policy.cpus); + policy = cpufreq_cpu_get(cpu); + if (!policy) + continue; + index = cpufreq_table_find_index_c(policy, policy->cur); + powernv_cpufreq_target_index(policy, index); + cpumask_andnot(&mask, &mask, policy->cpus); + cpufreq_cpu_put(policy); } out: put_online_cpus(); @@ -1080,6 +1083,12 @@ free_and_return: static inline void clean_chip_info(void) { + int i; + + /* flush any pending work items */ + if (chips) + for (i = 0; i < nr_chips; i++) + cancel_work_sync(&chips[i].throttle); kfree(chips); } @@ -1108,9 +1117,6 @@ static int __init powernv_cpufreq_init(void) if (rc) goto out; - register_reboot_notifier(&powernv_cpufreq_reboot_nb); - opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); - if (powernv_pstate_info.wof_enabled) powernv_cpufreq_driver.boost_enabled = true; else @@ -1119,15 +1125,17 @@ static int __init powernv_cpufreq_init(void) rc = cpufreq_register_driver(&powernv_cpufreq_driver); if (rc) { pr_info("Failed to register the cpufreq driver (%d)\n", rc); - goto cleanup_notifiers; + goto cleanup; } if (powernv_pstate_info.wof_enabled) cpufreq_enable_boost_support(); + register_reboot_notifier(&powernv_cpufreq_reboot_nb); + opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb); + return 0; -cleanup_notifiers: - unregister_all_notifiers(); +cleanup: clean_chip_info(); out: pr_info("Platform driver disabled. System does not support PState control\n"); diff --git a/drivers/crypto/chelsio/chcr_ktls.c b/drivers/crypto/chelsio/chcr_ktls.c index 73658b71d4a3..cd1769ecdc1c 100644 --- a/drivers/crypto/chelsio/chcr_ktls.c +++ b/drivers/crypto/chelsio/chcr_ktls.c @@ -2,6 +2,7 @@ /* Copyright (C) 2020 Chelsio Communications. All rights reserved. */ #ifdef CONFIG_CHELSIO_TLS_DEVICE +#include <linux/highmem.h> #include "chcr_ktls.h" #include "clip_tbl.h" diff --git a/drivers/crypto/hisilicon/Kconfig b/drivers/crypto/hisilicon/Kconfig index 095850d01dcc..f09c6cf7823e 100644 --- a/drivers/crypto/hisilicon/Kconfig +++ b/drivers/crypto/hisilicon/Kconfig @@ -27,6 +27,7 @@ config CRYPTO_DEV_HISI_SEC2 select CRYPTO_SHA256 select CRYPTO_SHA512 depends on PCI && PCI_MSI + depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) help Support for HiSilicon SEC Engine of version 2 in crypto subsystem. @@ -58,6 +59,7 @@ config CRYPTO_DEV_HISI_ZIP config CRYPTO_DEV_HISI_HPRE tristate "Support for HISI HPRE accelerator" depends on PCI && PCI_MSI + depends on UACCE || UACCE=n depends on ARM64 || (COMPILE_TEST && 64BIT) select CRYPTO_DEV_HISI_QM select CRYPTO_DH diff --git a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c index 946fb62949b2..06202bcffb33 100644 --- a/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c +++ b/drivers/crypto/marvell/octeontx/otx_cptvf_algs.c @@ -1161,13 +1161,13 @@ static inline u32 create_aead_null_output_list(struct aead_request *req, inputlen); if (status != inputlen) { status = -EINVAL; - goto error; + goto error_free; } status = sg_copy_from_buffer(req->dst, sg_nents(req->dst), ptr, inputlen); if (status != inputlen) { status = -EINVAL; - goto error; + goto error_free; } kfree(ptr); } @@ -1209,8 +1209,10 @@ static inline u32 create_aead_null_output_list(struct aead_request *req, req_info->outcnt = argcnt; return 0; -error: + +error_free: kfree(ptr); +error: return status; } diff --git a/drivers/dax/bus.c b/drivers/dax/bus.c index 46e46047a1f7..df238c8b6ef2 100644 --- a/drivers/dax/bus.c +++ b/drivers/dax/bus.c @@ -421,8 +421,10 @@ struct dev_dax *__devm_create_dev_dax(struct dax_region *dax_region, int id, * device outside of mmap of the resulting character device. */ dax_dev = alloc_dax(dev_dax, NULL, NULL, DAXDEV_F_SYNC); - if (!dax_dev) + if (IS_ERR(dax_dev)) { + rc = PTR_ERR(dax_dev); goto err; + } /* a device_dax instance is dead while the driver is not attached */ kill_dax(dax_dev); diff --git a/drivers/dax/super.c b/drivers/dax/super.c index 0aa4b6bc5101..8e32345be0f7 100644 --- a/drivers/dax/super.c +++ b/drivers/dax/super.c @@ -344,6 +344,23 @@ size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, } EXPORT_SYMBOL_GPL(dax_copy_to_iter); +int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages) +{ + if (!dax_alive(dax_dev)) + return -ENXIO; + /* + * There are no callers that want to zero more than one page as of now. + * Once users are there, this check can be removed after the + * device mapper code has been updated to split ranges across targets. + */ + if (nr_pages != 1) + return -EIO; + + return dax_dev->ops->zero_page_range(dax_dev, pgoff, nr_pages); +} +EXPORT_SYMBOL_GPL(dax_zero_page_range); + #ifdef CONFIG_ARCH_HAS_PMEM_API void arch_wb_cache_pmem(void *addr, size_t size); void dax_flush(struct dax_device *dax_dev, void *addr, size_t size) @@ -551,9 +568,16 @@ struct dax_device *alloc_dax(void *private, const char *__host, dev_t devt; int minor; + if (ops && !ops->zero_page_range) { + pr_debug("%s: error: device does not provide dax" + " operation zero_page_range()\n", + __host ? __host : "Unknown"); + return ERR_PTR(-EINVAL); + } + host = kstrdup(__host, GFP_KERNEL); if (__host && !host) - return NULL; + return ERR_PTR(-ENOMEM); minor = ida_simple_get(&dax_minor_ida, 0, MINORMASK+1, GFP_KERNEL); if (minor < 0) @@ -576,7 +600,7 @@ struct dax_device *alloc_dax(void *private, const char *__host, ida_simple_remove(&dax_minor_ida, minor); err_minor: kfree(host); - return NULL; + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(alloc_dax); diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index ef73b678419c..9626673f1d83 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -43,11 +43,12 @@ config DMABUF_MOVE_NOTIFY bool "Move notify between drivers (EXPERIMENTAL)" default n help - Don''t pin buffers if the dynamic DMA-buf interface is available on both the - exporter as well as the importer. This fixes a security problem where - userspace is able to pin unrestricted amounts of memory through DMA-buf. - But marked experimental because we don''t jet have a consistent execution - context and memory management between drivers. + Don't pin buffers if the dynamic DMA-buf interface is available on + both the exporter as well as the importer. This fixes a security + problem where userspace is able to pin unrestricted amounts of memory + through DMA-buf. + This is marked experimental because we don't yet have a consistent + execution context and memory management between drivers. config DMABUF_SELFTESTS tristate "Selftests for the dma-buf interfaces" diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index db0c1a9c1699..fc9f8ab533a7 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -75,14 +75,12 @@ efi_status_t handle_kernel_image(unsigned long *image_addr, if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && phys_seed != 0) { /* - * If CONFIG_DEBUG_ALIGN_RODATA is not set, produce a - * displacement in the interval [0, MIN_KIMG_ALIGN) that - * doesn't violate this kernel's de-facto alignment + * Produce a displacement in the interval [0, MIN_KIMG_ALIGN) + * that doesn't violate this kernel's de-facto alignment * constraints. */ u32 mask = (MIN_KIMG_ALIGN - 1) & ~(EFI_KIMG_ALIGN - 1); - u32 offset = !IS_ENABLED(CONFIG_DEBUG_ALIGN_RODATA) ? - (phys_seed >> 32) & mask : TEXT_OFFSET; + u32 offset = (phys_seed >> 32) & mask; /* * With CONFIG_RANDOMIZE_TEXT_OFFSET=y, TEXT_OFFSET may not diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index b8013cf90064..1b96169d84f7 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -394,13 +394,13 @@ config GPIO_MVEBU config GPIO_MXC def_bool y - depends on ARCH_MXC + depends on ARCH_MXC || COMPILE_TEST select GPIO_GENERIC select GENERIC_IRQ_CHIP config GPIO_MXS def_bool y - depends on ARCH_MXS + depends on ARCH_MXS || COMPILE_TEST select GPIO_GENERIC select GENERIC_IRQ_CHIP @@ -1399,6 +1399,13 @@ config GPIO_MLXBF help Say Y here if you want GPIO support on Mellanox BlueField SoC. +config GPIO_MLXBF2 + tristate "Mellanox BlueField 2 SoC GPIO" + depends on (MELLANOX_PLATFORM && ARM64 && ACPI) || (64BIT && COMPILE_TEST) + select GPIO_GENERIC + help + Say Y here if you want GPIO support on Mellanox BlueField 2 SoC. + config GPIO_ML_IOH tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support" depends on X86 || COMPILE_TEST diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index 0b571264ddbc..b2cfc21a97f3 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -93,6 +93,7 @@ obj-$(CONFIG_GPIO_MENZ127) += gpio-menz127.o obj-$(CONFIG_GPIO_MERRIFIELD) += gpio-merrifield.o obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o obj-$(CONFIG_GPIO_MLXBF) += gpio-mlxbf.o +obj-$(CONFIG_GPIO_MLXBF2) += gpio-mlxbf2.o obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o obj-$(CONFIG_GPIO_MOCKUP) += gpio-mockup.o obj-$(CONFIG_GPIO_MOXTET) += gpio-moxtet.o diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index 05e3f99ae59c..fcfc1a1f1a5c 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -603,6 +603,49 @@ static const struct dev_pm_ops brcmstb_gpio_pm_ops = { .resume_noirq = brcmstb_gpio_resume, }; +static void brcmstb_gpio_set_names(struct device *dev, + struct brcmstb_gpio_bank *bank) +{ + struct device_node *np = dev->of_node; + const char **names; + int nstrings, base; + unsigned int i; + + base = bank->id * MAX_GPIO_PER_BANK; + + nstrings = of_property_count_strings(np, "gpio-line-names"); + if (nstrings <= base) + /* Line names not present */ + return; + + names = devm_kcalloc(dev, MAX_GPIO_PER_BANK, sizeof(*names), + GFP_KERNEL); + if (!names) + return; + + /* + * Make sure to not index beyond the end of the number of descriptors + * of the GPIO device. + */ + for (i = 0; i < bank->width; i++) { + const char *name; + int ret; + + ret = of_property_read_string_index(np, "gpio-line-names", + base + i, &name); + if (ret) { + if (ret != -ENODATA) + dev_err(dev, "unable to name line %d: %d\n", + base + i, ret); + break; + } + if (*name) + names[i] = name; + } + + bank->gc.names = names; +} + static int brcmstb_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -726,6 +769,7 @@ static int brcmstb_gpio_probe(struct platform_device *pdev) need_wakeup_event |= !!__brcmstb_gpio_get_active_irqs(bank); gc->write_reg(reg_base + GIO_MASK(bank->id), 0); + brcmstb_gpio_set_names(dev, bank); err = gpiochip_add_data(gc, bank); if (err) { dev_err(dev, "Could not add gpiochip for bank %d\n", diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c index e0b025689625..085b874db2a9 100644 --- a/drivers/gpio/gpio-davinci.c +++ b/drivers/gpio/gpio-davinci.c @@ -259,11 +259,8 @@ static int davinci_gpio_probe(struct platform_device *pdev) chips->chip.of_gpio_n_cells = 2; chips->chip.parent = dev; chips->chip.of_node = dev->of_node; - - if (of_property_read_bool(dev->of_node, "gpio-ranges")) { - chips->chip.request = gpiochip_generic_request; - chips->chip.free = gpiochip_generic_free; - } + chips->chip.request = gpiochip_generic_request; + chips->chip.free = gpiochip_generic_free; #endif spin_lock_init(&chips->lock); diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index bb287f35cf40..8c9757774010 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c @@ -569,6 +569,7 @@ static int sprd_eic_probe(struct platform_device *pdev) const struct sprd_eic_variant_data *pdata; struct gpio_irq_chip *irq; struct sprd_eic *sprd_eic; + struct resource *res; int ret, i; pdata = of_device_get_match_data(&pdev->dev); @@ -595,9 +596,13 @@ static int sprd_eic_probe(struct platform_device *pdev) * have one bank EIC, thus base[1] and base[2] can be * optional. */ - sprd_eic->base[i] = devm_platform_ioremap_resource(pdev, i); - if (IS_ERR(sprd_eic->base[i])) + res = platform_get_resource(pdev, IORESOURCE_MEM, i); + if (!res) continue; + + sprd_eic->base[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sprd_eic->base[i])) + return PTR_ERR(sprd_eic->base[i]); } sprd_eic->chip.label = sprd_eic_label_name[sprd_eic->type]; diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c new file mode 100644 index 000000000000..7b7085050219 --- /dev/null +++ b/drivers/gpio/gpio-mlxbf2.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/acpi.h> +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/gpio/driver.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/resource.h> +#include <linux/spinlock.h> +#include <linux/types.h> +#include <linux/version.h> + +/* + * There are 3 YU GPIO blocks: + * gpio[0]: HOST_GPIO0->HOST_GPIO31 + * gpio[1]: HOST_GPIO32->HOST_GPIO63 + * gpio[2]: HOST_GPIO64->HOST_GPIO69 + */ +#define MLXBF2_GPIO_MAX_PINS_PER_BLOCK 32 + +/* + * arm_gpio_lock register: + * bit[31] lock status: active if set + * bit[15:0] set lock + * The lock is enabled only if 0xd42f is written to this field + */ +#define YU_ARM_GPIO_LOCK_ADDR 0x2801088 +#define YU_ARM_GPIO_LOCK_SIZE 0x8 +#define YU_LOCK_ACTIVE_BIT(val) (val >> 31) +#define YU_ARM_GPIO_LOCK_ACQUIRE 0xd42f +#define YU_ARM_GPIO_LOCK_RELEASE 0x0 + +/* + * gpio[x] block registers and their offset + */ +#define YU_GPIO_DATAIN 0x04 +#define YU_GPIO_MODE1 0x08 +#define YU_GPIO_MODE0 0x0c +#define YU_GPIO_DATASET 0x14 +#define YU_GPIO_DATACLEAR 0x18 +#define YU_GPIO_MODE1_CLEAR 0x50 +#define YU_GPIO_MODE0_SET 0x54 +#define YU_GPIO_MODE0_CLEAR 0x58 + +#ifdef CONFIG_PM +struct mlxbf2_gpio_context_save_regs { + u32 gpio_mode0; + u32 gpio_mode1; +}; +#endif + +/* BlueField-2 gpio block context structure. */ +struct mlxbf2_gpio_context { + struct gpio_chip gc; + + /* YU GPIO blocks address */ + void __iomem *gpio_io; + +#ifdef CONFIG_PM + struct mlxbf2_gpio_context_save_regs *csave_regs; +#endif +}; + +/* BlueField-2 gpio shared structure. */ +struct mlxbf2_gpio_param { + void __iomem *io; + struct resource *res; + struct mutex *lock; +}; + +static struct resource yu_arm_gpio_lock_res = { + .start = YU_ARM_GPIO_LOCK_ADDR, + .end = YU_ARM_GPIO_LOCK_ADDR + YU_ARM_GPIO_LOCK_SIZE - 1, + .name = "YU_ARM_GPIO_LOCK", +}; + +static DEFINE_MUTEX(yu_arm_gpio_lock_mutex); + +static struct mlxbf2_gpio_param yu_arm_gpio_lock_param = { + .res = &yu_arm_gpio_lock_res, + .lock = &yu_arm_gpio_lock_mutex, +}; + +/* Request memory region and map yu_arm_gpio_lock resource */ +static int mlxbf2_gpio_get_lock_res(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct resource *res; + resource_size_t size; + int ret = 0; + + mutex_lock(yu_arm_gpio_lock_param.lock); + + /* Check if the memory map already exists */ + if (yu_arm_gpio_lock_param.io) + goto exit; + + res = yu_arm_gpio_lock_param.res; + size = resource_size(res); + + if (!devm_request_mem_region(dev, res->start, size, res->name)) { + ret = -EFAULT; + goto exit; + } + + yu_arm_gpio_lock_param.io = devm_ioremap(dev, res->start, size); + if (IS_ERR(yu_arm_gpio_lock_param.io)) + ret = PTR_ERR(yu_arm_gpio_lock_param.io); + +exit: + mutex_unlock(yu_arm_gpio_lock_param.lock); + + return ret; +} + +/* + * Acquire the YU arm_gpio_lock to be able to change the direction + * mode. If the lock_active bit is already set, return an error. + */ +static int mlxbf2_gpio_lock_acquire(struct mlxbf2_gpio_context *gs) +{ + u32 arm_gpio_lock_val; + + spin_lock(&gs->gc.bgpio_lock); + mutex_lock(yu_arm_gpio_lock_param.lock); + + arm_gpio_lock_val = readl(yu_arm_gpio_lock_param.io); + + /* + * When lock active bit[31] is set, ModeX is write enabled + */ + if (YU_LOCK_ACTIVE_BIT(arm_gpio_lock_val)) { + mutex_unlock(yu_arm_gpio_lock_param.lock); + spin_unlock(&gs->gc.bgpio_lock); + return -EINVAL; + } + + writel(YU_ARM_GPIO_LOCK_ACQUIRE, yu_arm_gpio_lock_param.io); + + return 0; +} + +/* + * Release the YU arm_gpio_lock after changing the direction mode. + */ +static void mlxbf2_gpio_lock_release(struct mlxbf2_gpio_context *gs) +{ + writel(YU_ARM_GPIO_LOCK_RELEASE, yu_arm_gpio_lock_param.io); + mutex_unlock(yu_arm_gpio_lock_param.lock); + spin_unlock(&gs->gc.bgpio_lock); +} + +/* + * mode0 and mode1 are both locked by the gpio_lock field. + * + * Together, mode0 and mode1 define the gpio Mode dependeing also + * on Reg_DataOut. + * + * {mode1,mode0}:{Reg_DataOut=0,Reg_DataOut=1}->{DataOut=0,DataOut=1} + * + * {0,0}:Reg_DataOut{0,1}->{Z,Z} Input PAD + * {0,1}:Reg_DataOut{0,1}->{0,1} Full drive Output PAD + * {1,0}:Reg_DataOut{0,1}->{0,Z} 0-set PAD to low, 1-float + * {1,1}:Reg_DataOut{0,1}->{Z,1} 0-float, 1-set PAD to high + */ + +/* + * Set input direction: + * {mode1,mode0} = {0,0} + */ +static int mlxbf2_gpio_direction_input(struct gpio_chip *chip, + unsigned int offset) +{ + struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip); + int ret; + + /* + * Although the arm_gpio_lock was set in the probe function, check again + * if it is still enabled to be able to write to the ModeX registers. + */ + ret = mlxbf2_gpio_lock_acquire(gs); + if (ret < 0) + return ret; + + writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_CLEAR); + writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR); + + mlxbf2_gpio_lock_release(gs); + + return ret; +} + +/* + * Set output direction: + * {mode1,mode0} = {0,1} + */ +static int mlxbf2_gpio_direction_output(struct gpio_chip *chip, + unsigned int offset, + int value) +{ + struct mlxbf2_gpio_context *gs = gpiochip_get_data(chip); + int ret = 0; + + /* + * Although the arm_gpio_lock was set in the probe function, + * check again it is still enabled to be able to write to the + * ModeX registers. + */ + ret = mlxbf2_gpio_lock_acquire(gs); + if (ret < 0) + return ret; + + writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE1_CLEAR); + writel(BIT(offset), gs->gpio_io + YU_GPIO_MODE0_SET); + + mlxbf2_gpio_lock_release(gs); + + return ret; +} + +/* BlueField-2 GPIO driver initialization routine. */ +static int +mlxbf2_gpio_probe(struct platform_device *pdev) +{ + struct mlxbf2_gpio_context *gs; + struct device *dev = &pdev->dev; + struct gpio_chip *gc; + struct resource *res; + unsigned int npins; + int ret; + + gs = devm_kzalloc(dev, sizeof(*gs), GFP_KERNEL); + if (!gs) + return -ENOMEM; + + /* YU GPIO block address */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + gs->gpio_io = devm_ioremap(dev, res->start, resource_size(res)); + if (!gs->gpio_io) + return -ENOMEM; + + ret = mlxbf2_gpio_get_lock_res(pdev); + if (ret) { + dev_err(dev, "Failed to get yu_arm_gpio_lock resource\n"); + return ret; + } + + if (device_property_read_u32(dev, "npins", &npins)) + npins = MLXBF2_GPIO_MAX_PINS_PER_BLOCK; + + gc = &gs->gc; + + ret = bgpio_init(gc, dev, 4, + gs->gpio_io + YU_GPIO_DATAIN, + gs->gpio_io + YU_GPIO_DATASET, + gs->gpio_io + YU_GPIO_DATACLEAR, + NULL, + NULL, + 0); + + gc->direction_input = mlxbf2_gpio_direction_input; + gc->direction_output = mlxbf2_gpio_direction_output; + gc->ngpio = npins; + gc->owner = THIS_MODULE; + + platform_set_drvdata(pdev, gs); + + ret = devm_gpiochip_add_data(dev, &gs->gc, gs); + if (ret) { + dev_err(dev, "Failed adding memory mapped gpiochip\n"); + return ret; + } + + return 0; +} + +#ifdef CONFIG_PM +static int mlxbf2_gpio_suspend(struct platform_device *pdev, + pm_message_t state) +{ + struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev); + + gs->csave_regs->gpio_mode0 = readl(gs->gpio_io + + YU_GPIO_MODE0); + gs->csave_regs->gpio_mode1 = readl(gs->gpio_io + + YU_GPIO_MODE1); + + return 0; +} + +static int mlxbf2_gpio_resume(struct platform_device *pdev) +{ + struct mlxbf2_gpio_context *gs = platform_get_drvdata(pdev); + + writel(gs->csave_regs->gpio_mode0, gs->gpio_io + + YU_GPIO_MODE0); + writel(gs->csave_regs->gpio_mode1, gs->gpio_io + + YU_GPIO_MODE1); + + return 0; +} +#endif + +static const struct acpi_device_id mlxbf2_gpio_acpi_match[] = { + { "MLNXBF22", 0 }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, mlxbf2_gpio_acpi_match); + +static struct platform_driver mlxbf2_gpio_driver = { + .driver = { + .name = "mlxbf2_gpio", + .acpi_match_table = ACPI_PTR(mlxbf2_gpio_acpi_match), + }, + .probe = mlxbf2_gpio_probe, +#ifdef CONFIG_PM + .suspend = mlxbf2_gpio_suspend, + .resume = mlxbf2_gpio_resume, +#endif +}; + +module_platform_driver(mlxbf2_gpio_driver); + +MODULE_DESCRIPTION("Mellanox BlueField-2 GPIO Driver"); +MODULE_AUTHOR("Mellanox Technologies"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpio/gpio-mmio.c b/drivers/gpio/gpio-mmio.c index f729e3e9e983..b778f33cc6af 100644 --- a/drivers/gpio/gpio-mmio.c +++ b/drivers/gpio/gpio-mmio.c @@ -389,12 +389,10 @@ static int bgpio_get_dir(struct gpio_chip *gc, unsigned int gpio) return GPIO_LINE_DIRECTION_IN; } -static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) +static void bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; - gc->set(gc, gpio, val); - spin_lock_irqsave(&gc->bgpio_lock, flags); gc->bgpio_dir |= bgpio_line2mask(gc, gpio); @@ -405,7 +403,21 @@ static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) gc->write_reg(gc->reg_dir_out, gc->bgpio_dir); spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} +static int bgpio_dir_out_dir_first(struct gpio_chip *gc, unsigned int gpio, + int val) +{ + bgpio_dir_out(gc, gpio, val); + gc->set(gc, gpio, val); + return 0; +} + +static int bgpio_dir_out_val_first(struct gpio_chip *gc, unsigned int gpio, + int val) +{ + gc->set(gc, gpio, val); + bgpio_dir_out(gc, gpio, val); return 0; } @@ -538,7 +550,10 @@ static int bgpio_setup_direction(struct gpio_chip *gc, if (dirout || dirin) { gc->reg_dir_out = dirout; gc->reg_dir_in = dirin; - gc->direction_output = bgpio_dir_out; + if (flags & BGPIOF_NO_SET_ON_INPUT) + gc->direction_output = bgpio_dir_out_dir_first; + else + gc->direction_output = bgpio_dir_out_val_first; gc->direction_input = bgpio_dir_in; gc->get_direction = bgpio_get_dir; } else { diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index 7d343bea784a..3eb94f3740d1 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -171,7 +171,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip, /* Change the value unless we're actively driving the line. */ if (!test_bit(FLAG_REQUESTED, &desc->flags) || - !test_bit(FLAG_IS_OUT, &desc->flags)) + !test_bit(FLAG_IS_OUT, &desc->flags)) __gpio_mockup_set(chip, offset, value); out: diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index b992321bb852..82fb20dca53a 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c @@ -227,8 +227,8 @@ mediatek_gpio_bank_probe(struct device *dev, ctrl = mtk->base + GPIO_REG_DCLR + (rg->bank * GPIO_BANK_STRIDE); diro = mtk->base + GPIO_REG_CTRL + (rg->bank * GPIO_BANK_STRIDE); - ret = bgpio_init(&rg->chip, dev, 4, - dat, set, ctrl, diro, NULL, 0); + ret = bgpio_init(&rg->chip, dev, 4, dat, set, ctrl, diro, NULL, + BGPIOF_NO_SET_ON_INPUT); if (ret) { dev_err(dev, "bgpio_init() failed\n"); return ret; diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index d2b999c7987f..3c9f4fb3d5a2 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -1247,7 +1247,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) * pins. */ for (i = 0; i < 4; i++) { - int irq = platform_get_irq(pdev, i); + int irq = platform_get_irq_optional(pdev, i); if (irq < 0) continue; diff --git a/drivers/gpio/gpio-mxc.c b/drivers/gpio/gpio-mxc.c index c77d474185f3..64278a4756f0 100644 --- a/drivers/gpio/gpio-mxc.c +++ b/drivers/gpio/gpio-mxc.c @@ -485,11 +485,8 @@ static int mxc_gpio_probe(struct platform_device *pdev) if (err) goto out_bgio; - if (of_property_read_bool(np, "gpio-ranges")) { - port->gc.request = gpiochip_generic_request; - port->gc.free = gpiochip_generic_free; - } - + port->gc.request = gpiochip_generic_request; + port->gc.free = gpiochip_generic_free; port->gc.to_irq = mxc_gpio_to_irq; port->gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 : pdev->id * 32; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 3bd8adaeed9e..b8e2ecc3eade 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1102,23 +1102,13 @@ static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) { struct device *dev = bank->chip.parent; void __iomem *base = bank->base; - u32 mask, nowake; + u32 nowake; bank->saved_datain = readl_relaxed(base + bank->regs->datain); if (!bank->enabled_non_wakeup_gpios) goto update_gpio_context_count; - /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */ - mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect; - mask &= ~bank->context.risingdetect; - bank->saved_datain |= mask; - - /* Check for pending EDGE_RISING, ignore EDGE_BOTH */ - mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect; - mask &= ~bank->context.fallingdetect; - bank->saved_datain &= ~mask; - if (!may_lose_context) goto update_gpio_context_count; @@ -1237,26 +1227,35 @@ static int gpio_omap_cpu_notifier(struct notifier_block *nb, { struct gpio_bank *bank; unsigned long flags; + int ret = NOTIFY_OK; + u32 isr, mask; bank = container_of(nb, struct gpio_bank, nb); raw_spin_lock_irqsave(&bank->lock, flags); + if (bank->is_suspended) + goto out_unlock; + switch (cmd) { case CPU_CLUSTER_PM_ENTER: - if (bank->is_suspended) + mask = omap_get_gpio_irqbank_mask(bank); + isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask; + if (isr) { + ret = NOTIFY_BAD; break; + } omap_gpio_idle(bank, true); break; case CPU_CLUSTER_PM_ENTER_FAILED: case CPU_CLUSTER_PM_EXIT: - if (bank->is_suspended) - break; omap_gpio_unidle(bank); break; } + +out_unlock: raw_spin_unlock_irqrestore(&bank->lock, flags); - return NOTIFY_OK; + return ret; } static const struct omap_gpio_reg_offs omap2_gpio_regs = { diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c index 5df7782e348f..e241fb884c12 100644 --- a/drivers/gpio/gpio-pl061.c +++ b/drivers/gpio/gpio-pl061.c @@ -298,11 +298,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) return PTR_ERR(pl061->base); raw_spin_lock_init(&pl061->lock); - if (of_property_read_bool(dev->of_node, "gpio-ranges")) { - pl061->gc.request = gpiochip_generic_request; - pl061->gc.free = gpiochip_generic_free; - } - + pl061->gc.request = gpiochip_generic_request; + pl061->gc.free = gpiochip_generic_free; pl061->gc.base = -1; pl061->gc.get_direction = pl061_get_direction; pl061->gc.direction_input = pl061_direction_input; @@ -326,10 +323,8 @@ static int pl061_probe(struct amba_device *adev, const struct amba_id *id) writeb(0, pl061->base + GPIOIE); /* disable irqs */ irq = adev->irq[0]; - if (irq < 0) { - dev_err(&adev->dev, "invalid IRQ\n"); - return -ENODEV; - } + if (!irq) + dev_warn(&adev->dev, "IRQ support disabled\n"); pl061->parent_irq = irq; girq = &pl061->gc.irq; diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index 9888b62f37af..1361270ecf8c 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c @@ -361,11 +361,8 @@ static int pxa_init_gpio_chip(struct pxa_gpio_chip *pchip, int ngpio, pchip->chip.set = pxa_gpio_set; pchip->chip.to_irq = pxa_gpio_to_irq; pchip->chip.ngpio = ngpio; - - if (pxa_gpio_has_pinctrl()) { - pchip->chip.request = gpiochip_generic_request; - pchip->chip.free = gpiochip_generic_free; - } + pchip->chip.request = gpiochip_generic_request; + pchip->chip.free = gpiochip_generic_free; #ifdef CONFIG_OF_GPIO pchip->chip.of_node = np; @@ -652,8 +649,8 @@ static int pxa_gpio_probe(struct platform_device *pdev) if (!pchip->irqdomain) return -ENOMEM; - irq0 = platform_get_irq_byname(pdev, "gpio0"); - irq1 = platform_get_irq_byname(pdev, "gpio1"); + irq0 = platform_get_irq_byname_optional(pdev, "gpio0"); + irq1 = platform_get_irq_byname_optional(pdev, "gpio1"); irq_mux = platform_get_irq_byname(pdev, "gpio_mux"); if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0) || (irq_mux <= 0)) diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c index f800b250971c..7284473c9fe3 100644 --- a/drivers/gpio/gpio-rcar.c +++ b/drivers/gpio/gpio-rcar.c @@ -116,7 +116,7 @@ static void gpio_rcar_config_interrupt_input_mode(struct gpio_rcar_priv *p, spin_lock_irqsave(&p->lock, flags); - /* Configure postive or negative logic in POSNEG */ + /* Configure positive or negative logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, hwirq, !active_high_rising_edge); /* Configure edge or level trigger in EDGLEVEL */ @@ -228,7 +228,7 @@ static void gpio_rcar_config_general_input_output_mode(struct gpio_chip *chip, spin_lock_irqsave(&p->lock, flags); - /* Configure postive logic in POSNEG */ + /* Configure positive logic in POSNEG */ gpio_rcar_modify_bit(p, POSNEG, gpio, false); /* Select "General Input/Output Mode" in IOINTSEL */ diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c index 311f66757b92..26e1fe092304 100644 --- a/drivers/gpio/gpio-siox.c +++ b/drivers/gpio/gpio-siox.c @@ -15,7 +15,7 @@ struct gpio_siox_ddata { u8 setdata[1]; u8 getdata[3]; - spinlock_t irqlock; + raw_spinlock_t irqlock; u32 irq_enable; u32 irq_status; u32 irq_type[20]; @@ -44,7 +44,7 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[]) mutex_lock(&ddata->lock); - spin_lock_irq(&ddata->irqlock); + raw_spin_lock_irq(&ddata->irqlock); for (offset = 0; offset < 12; ++offset) { unsigned int bitpos = 11 - offset; @@ -66,7 +66,7 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[]) trigger = ddata->irq_status & ddata->irq_enable; - spin_unlock_irq(&ddata->irqlock); + raw_spin_unlock_irq(&ddata->irqlock); ddata->getdata[0] = buf[0]; ddata->getdata[1] = buf[1]; @@ -84,9 +84,9 @@ static int gpio_siox_get_data(struct siox_device *sdevice, const u8 buf[]) * handler of the irq chip. But it doesn't, so we have * to clean the irq_status here. */ - spin_lock_irq(&ddata->irqlock); + raw_spin_lock_irq(&ddata->irqlock); ddata->irq_status &= ~(1 << offset); - spin_unlock_irq(&ddata->irqlock); + raw_spin_unlock_irq(&ddata->irqlock); handle_nested_irq(irq); } @@ -101,9 +101,9 @@ static void gpio_siox_irq_ack(struct irq_data *d) struct gpio_siox_ddata *ddata = container_of(ic, struct gpio_siox_ddata, ichip); - spin_lock_irq(&ddata->irqlock); + raw_spin_lock(&ddata->irqlock); ddata->irq_status &= ~(1 << d->hwirq); - spin_unlock_irq(&ddata->irqlock); + raw_spin_unlock(&ddata->irqlock); } static void gpio_siox_irq_mask(struct irq_data *d) @@ -112,9 +112,9 @@ static void gpio_siox_irq_mask(struct irq_data *d) struct gpio_siox_ddata *ddata = container_of(ic, struct gpio_siox_ddata, ichip); - spin_lock_irq(&ddata->irqlock); + raw_spin_lock(&ddata->irqlock); ddata->irq_enable &= ~(1 << d->hwirq); - spin_unlock_irq(&ddata->irqlock); + raw_spin_unlock(&ddata->irqlock); } static void gpio_siox_irq_unmask(struct irq_data *d) @@ -123,9 +123,9 @@ static void gpio_siox_irq_unmask(struct irq_data *d) struct gpio_siox_ddata *ddata = container_of(ic, struct gpio_siox_ddata, ichip); - spin_lock_irq(&ddata->irqlock); + raw_spin_lock(&ddata->irqlock); ddata->irq_enable |= 1 << d->hwirq; - spin_unlock_irq(&ddata->irqlock); + raw_spin_unlock(&ddata->irqlock); } static int gpio_siox_irq_set_type(struct irq_data *d, u32 type) @@ -134,9 +134,9 @@ static int gpio_siox_irq_set_type(struct irq_data *d, u32 type) struct gpio_siox_ddata *ddata = container_of(ic, struct gpio_siox_ddata, ichip); - spin_lock_irq(&ddata->irqlock); + raw_spin_lock(&ddata->irqlock); ddata->irq_type[d->hwirq] = type; - spin_unlock_irq(&ddata->irqlock); + raw_spin_unlock(&ddata->irqlock); return 0; } @@ -222,7 +222,7 @@ static int gpio_siox_probe(struct siox_device *sdevice) dev_set_drvdata(dev, ddata); mutex_init(&ddata->lock); - spin_lock_init(&ddata->irqlock); + raw_spin_lock_init(&ddata->irqlock); ddata->gchip.base = -1; ddata->gchip.can_sleep = 1; diff --git a/drivers/gpio/gpio-tegra186.c b/drivers/gpio/gpio-tegra186.c index de241263d4be..79b553dc39a3 100644 --- a/drivers/gpio/gpio-tegra186.c +++ b/drivers/gpio/gpio-tegra186.c @@ -58,11 +58,20 @@ struct tegra_gpio_port { unsigned int pins; }; +struct tegra186_pin_range { + unsigned int offset; + const char *group; +}; + struct tegra_gpio_soc { const struct tegra_gpio_port *ports; unsigned int num_ports; const char *name; unsigned int instance; + + const struct tegra186_pin_range *pin_ranges; + unsigned int num_pin_ranges; + const char *pinmux; }; struct tegra_gpio { @@ -254,6 +263,50 @@ static int tegra186_gpio_set_config(struct gpio_chip *chip, return 0; } +static int tegra186_gpio_add_pin_ranges(struct gpio_chip *chip) +{ + struct tegra_gpio *gpio = gpiochip_get_data(chip); + struct pinctrl_dev *pctldev; + struct device_node *np; + unsigned int i, j; + int err; + + if (!gpio->soc->pinmux || gpio->soc->num_pin_ranges == 0) + return 0; + + np = of_find_compatible_node(NULL, NULL, gpio->soc->pinmux); + if (!np) + return -ENODEV; + + pctldev = of_pinctrl_get(np); + of_node_put(np); + if (!pctldev) + return -EPROBE_DEFER; + + for (i = 0; i < gpio->soc->num_pin_ranges; i++) { + unsigned int pin = gpio->soc->pin_ranges[i].offset, port; + const char *group = gpio->soc->pin_ranges[i].group; + + port = pin / 8; + pin = pin % 8; + + if (port >= gpio->soc->num_ports) { + dev_warn(chip->parent, "invalid port %u for %s\n", + port, group); + continue; + } + + for (j = 0; j < port; j++) + pin += gpio->soc->ports[j].pins; + + err = gpiochip_add_pingroup_range(chip, pctldev, pin, group); + if (err < 0) + return err; + } + + return 0; +} + static int tegra186_gpio_of_xlate(struct gpio_chip *chip, const struct of_phandle_args *spec, u32 *flags) @@ -578,12 +631,15 @@ static int tegra186_gpio_probe(struct platform_device *pdev) gpio->gpio.label = gpio->soc->name; gpio->gpio.parent = &pdev->dev; + gpio->gpio.request = gpiochip_generic_request; + gpio->gpio.free = gpiochip_generic_free; gpio->gpio.get_direction = tegra186_gpio_get_direction; gpio->gpio.direction_input = tegra186_gpio_direction_input; gpio->gpio.direction_output = tegra186_gpio_direction_output; gpio->gpio.get = tegra186_gpio_get, gpio->gpio.set = tegra186_gpio_set; gpio->gpio.set_config = tegra186_gpio_set_config; + gpio->gpio.add_pin_ranges = tegra186_gpio_add_pin_ranges; gpio->gpio.base = -1; @@ -783,11 +839,19 @@ static const struct tegra_gpio_port tegra194_main_ports[] = { TEGRA194_MAIN_GPIO_PORT(GG, 0, 0, 2) }; +static const struct tegra186_pin_range tegra194_main_pin_ranges[] = { + { TEGRA194_MAIN_GPIO(GG, 0), "pex_l5_clkreq_n_pgg0" }, + { TEGRA194_MAIN_GPIO(GG, 1), "pex_l5_rst_n_pgg1" }, +}; + static const struct tegra_gpio_soc tegra194_main_soc = { .num_ports = ARRAY_SIZE(tegra194_main_ports), .ports = tegra194_main_ports, .name = "tegra194-gpio", .instance = 0, + .num_pin_ranges = ARRAY_SIZE(tegra194_main_pin_ranges), + .pin_ranges = tegra194_main_pin_ranges, + .pinmux = "nvidia,tegra194-pinmux", }; #define TEGRA194_AON_GPIO_PORT(_name, _bank, _port, _pins) \ diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c index 7ec97499b7f7..f99f3c10bed0 100644 --- a/drivers/gpio/gpio-uniphier.c +++ b/drivers/gpio/gpio-uniphier.c @@ -30,7 +30,7 @@ struct uniphier_gpio_priv { struct irq_domain *domain; void __iomem *regs; spinlock_t lock; - u32 saved_vals[0]; + u32 saved_vals[]; }; static unsigned int uniphier_gpio_bank_to_reg(unsigned int bank) diff --git a/drivers/gpio/gpio-wcd934x.c b/drivers/gpio/gpio-wcd934x.c index 74913f2e5697..1cbce5990855 100644 --- a/drivers/gpio/gpio-wcd934x.c +++ b/drivers/gpio/gpio-wcd934x.c @@ -57,16 +57,19 @@ static int wcd_gpio_direction_output(struct gpio_chip *chip, unsigned int pin, static int wcd_gpio_get(struct gpio_chip *chip, unsigned int pin) { struct wcd_gpio_data *data = gpiochip_get_data(chip); - int value; + unsigned int value; regmap_read(data->map, WCD_REG_VAL_CTL_OFFSET, &value); - return !!(value && WCD_PIN_MASK(pin)); + return !!(value & WCD_PIN_MASK(pin)); } static void wcd_gpio_set(struct gpio_chip *chip, unsigned int pin, int val) { - wcd_gpio_direction_output(chip, pin, val); + struct wcd_gpio_data *data = gpiochip_get_data(chip); + + regmap_update_bits(data->map, WCD_REG_VAL_CTL_OFFSET, + WCD_PIN_MASK(pin), val ? WCD_PIN_MASK(pin) : 0); } static int wcd_gpio_probe(struct platform_device *pdev) diff --git a/drivers/gpio/gpio-zx.c b/drivers/gpio/gpio-zx.c index 98cbaf0e415e..64bfb722756a 100644 --- a/drivers/gpio/gpio-zx.c +++ b/drivers/gpio/gpio-zx.c @@ -226,13 +226,11 @@ static int zx_gpio_probe(struct platform_device *pdev) if (IS_ERR(chip->base)) return PTR_ERR(chip->base); - raw_spin_lock_init(&chip->lock); - if (of_property_read_bool(dev->of_node, "gpio-ranges")) { - chip->gc.request = gpiochip_generic_request; - chip->gc.free = gpiochip_generic_free; - } - id = of_alias_get_id(dev->of_node, "gpio"); + + raw_spin_lock_init(&chip->lock); + chip->gc.request = gpiochip_generic_request; + chip->gc.free = gpiochip_generic_free; chip->gc.direction_input = zx_direction_input; chip->gc.direction_output = zx_direction_output; chip->gc.get = zx_get_value; diff --git a/drivers/gpio/gpiolib-devres.c b/drivers/gpio/gpiolib-devres.c index 72b6001c56ef..5c91c4365da1 100644 --- a/drivers/gpio/gpiolib-devres.c +++ b/drivers/gpio/gpiolib-devres.c @@ -478,3 +478,49 @@ void devm_gpio_free(struct device *dev, unsigned int gpio) &gpio)); } EXPORT_SYMBOL_GPL(devm_gpio_free); + +static void devm_gpio_chip_release(struct device *dev, void *res) +{ + struct gpio_chip *gc = *(struct gpio_chip **)res; + + gpiochip_remove(gc); +} + +/** + * devm_gpiochip_add_data() - Resource managed gpiochip_add_data() + * @dev: pointer to the device that gpio_chip belongs to. + * @gc: the GPIO chip to register + * @data: driver-private data associated with this chip + * + * Context: potentially before irqs will work + * + * The gpio chip automatically be released when the device is unbound. + * + * Returns: + * A negative errno if the chip can't be registered, such as because the + * gc->base is invalid or already associated with a different chip. + * Otherwise it returns zero as a success code. + */ +int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *gc, + void *data) +{ + struct gpio_chip **ptr; + int ret; + + ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr), + GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = gpiochip_add_data(gc, data); + if (ret < 0) { + devres_free(ptr); + return ret; + } + + *ptr = gc; + devres_add(dev, ptr); + + return 0; +} +EXPORT_SYMBOL_GPL(devm_gpiochip_add_data); diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index c6d30f73df07..ccc449df3792 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -605,6 +605,39 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, } /** + * of_gpiochip_add_hog - Add all hogs in a hog device node + * @chip: gpio chip to act on + * @hog: device node describing the hogs + * + * Returns error if it fails otherwise 0 on success. + */ +static int of_gpiochip_add_hog(struct gpio_chip *chip, struct device_node *hog) +{ + enum gpiod_flags dflags; + struct gpio_desc *desc; + unsigned long lflags; + const char *name; + unsigned int i; + int ret; + + for (i = 0;; i++) { + desc = of_parse_own_gpio(hog, chip, i, &name, &lflags, &dflags); + if (IS_ERR(desc)) + break; + + ret = gpiod_hog(desc, name, lflags, dflags); + if (ret < 0) + return ret; + +#ifdef CONFIG_OF_DYNAMIC + desc->hog = hog; +#endif + } + + return 0; +} + +/** * of_gpiochip_scan_gpios - Scan gpio-controller for gpio definitions * @chip: gpio chip to act on * @@ -614,35 +647,109 @@ static struct gpio_desc *of_parse_own_gpio(struct device_node *np, */ static int of_gpiochip_scan_gpios(struct gpio_chip *chip) { - struct gpio_desc *desc = NULL; struct device_node *np; - const char *name; - unsigned long lflags; - enum gpiod_flags dflags; - unsigned int i; int ret; for_each_available_child_of_node(chip->of_node, np) { if (!of_property_read_bool(np, "gpio-hog")) continue; - for (i = 0;; i++) { - desc = of_parse_own_gpio(np, chip, i, &name, &lflags, - &dflags); - if (IS_ERR(desc)) - break; - - ret = gpiod_hog(desc, name, lflags, dflags); - if (ret < 0) { - of_node_put(np); - return ret; - } + ret = of_gpiochip_add_hog(chip, np); + if (ret < 0) { + of_node_put(np); + return ret; } + + of_node_set_flag(np, OF_POPULATED); } return 0; } +#ifdef CONFIG_OF_DYNAMIC +/** + * of_gpiochip_remove_hog - Remove all hogs in a hog device node + * @chip: gpio chip to act on + * @hog: device node describing the hogs + */ +static void of_gpiochip_remove_hog(struct gpio_chip *chip, + struct device_node *hog) +{ + struct gpio_desc *descs = chip->gpiodev->descs; + unsigned int i; + + for (i = 0; i < chip->ngpio; i++) { + if (test_bit(FLAG_IS_HOGGED, &descs[i].flags) && + descs[i].hog == hog) + gpiochip_free_own_desc(&descs[i]); + } +} + +static int of_gpiochip_match_node(struct gpio_chip *chip, void *data) +{ + return chip->gpiodev->dev.of_node == data; +} + +static struct gpio_chip *of_find_gpiochip_by_node(struct device_node *np) +{ + return gpiochip_find(np, of_gpiochip_match_node); +} + +static int of_gpio_notify(struct notifier_block *nb, unsigned long action, + void *arg) +{ + struct of_reconfig_data *rd = arg; + struct gpio_chip *chip; + int ret; + + /* + * This only supports adding and removing complete gpio-hog nodes. + * Modifying an existing gpio-hog node is not supported (except for + * changing its "status" property, which is treated the same as + * addition/removal). + */ + switch (of_reconfig_get_state_change(action, arg)) { + case OF_RECONFIG_CHANGE_ADD: + if (!of_property_read_bool(rd->dn, "gpio-hog")) + return NOTIFY_OK; /* not for us */ + + if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + + chip = of_find_gpiochip_by_node(rd->dn->parent); + if (chip == NULL) + return NOTIFY_OK; /* not for us */ + + ret = of_gpiochip_add_hog(chip, rd->dn); + if (ret < 0) { + pr_err("%s: failed to add hogs for %pOF\n", __func__, + rd->dn); + of_node_clear_flag(rd->dn, OF_POPULATED); + return notifier_from_errno(ret); + } + break; + + case OF_RECONFIG_CHANGE_REMOVE: + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; /* already depopulated */ + + chip = of_find_gpiochip_by_node(rd->dn->parent); + if (chip == NULL) + return NOTIFY_OK; /* not for us */ + + of_gpiochip_remove_hog(chip, rd->dn); + of_node_clear_flag(rd->dn, OF_POPULATED); + break; + } + + return NOTIFY_OK; +} + +struct notifier_block gpio_of_notifier = { + .notifier_call = of_gpio_notify, +}; +#endif /* CONFIG_OF_DYNAMIC */ + /** * of_gpio_simple_xlate - translate gpiospec to the GPIO number and flags * @gc: pointer to the gpio_chip structure diff --git a/drivers/gpio/gpiolib-of.h b/drivers/gpio/gpiolib-of.h index 9768831b1fe2..ed26664f1537 100644 --- a/drivers/gpio/gpiolib-of.h +++ b/drivers/gpio/gpiolib-of.h @@ -35,4 +35,6 @@ static inline bool of_gpio_need_valid_mask(const struct gpio_chip *gc) } #endif /* CONFIG_OF_GPIO */ +extern struct notifier_block gpio_of_notifier; + #endif /* GPIOLIB_OF_H */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 86bde1e30a60..40f2d7f69be2 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -81,14 +81,14 @@ LIST_HEAD(gpio_devices); static DEFINE_MUTEX(gpio_machine_hogs_mutex); static LIST_HEAD(gpio_machine_hogs); -static void gpiochip_free_hogs(struct gpio_chip *chip); -static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, +static void gpiochip_free_hogs(struct gpio_chip *gc); +static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key); -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip); -static int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip); -static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip); -static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip); +static void gpiochip_irqchip_remove(struct gpio_chip *gc); +static int gpiochip_irqchip_init_hw(struct gpio_chip *gc); +static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc); +static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc); static bool gpiolib_initialized; @@ -132,17 +132,17 @@ EXPORT_SYMBOL_GPL(gpio_to_desc); /** * gpiochip_get_desc - get the GPIO descriptor corresponding to the given * hardware number for this chip - * @chip: GPIO chip + * @gc: GPIO chip * @hwnum: hardware number of the GPIO for this chip * * Returns: - * A pointer to the GPIO descriptor or %ERR_PTR(-EINVAL) if no GPIO exists + * A pointer to the GPIO descriptor or ``ERR_PTR(-EINVAL)`` if no GPIO exists * in the given chip for the specified hardware number. */ -struct gpio_desc *gpiochip_get_desc(struct gpio_chip *chip, +struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum) { - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; if (hwnum >= gdev->ngpio) return ERR_PTR(-EINVAL); @@ -214,11 +214,11 @@ static int gpiochip_find_base(int ngpio) */ int gpiod_get_direction(struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; unsigned offset; int ret; - chip = gpiod_to_chip(desc); + gc = gpiod_to_chip(desc); offset = gpio_chip_hwgpio(desc); /* @@ -229,10 +229,10 @@ int gpiod_get_direction(struct gpio_desc *desc) test_bit(FLAG_IS_OUT, &desc->flags)) return 0; - if (!chip->get_direction) + if (!gc->get_direction) return -ENOTSUPP; - ret = chip->get_direction(chip, offset); + ret = gc->get_direction(gc, offset); if (ret < 0) return ret; @@ -302,6 +302,9 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name) struct gpio_device *gdev; unsigned long flags; + if (!name) + return NULL; + spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) { @@ -310,7 +313,7 @@ static struct gpio_desc *gpio_name_to_desc(const char * const name) for (i = 0; i != gdev->ngpio; ++i) { struct gpio_desc *desc = &gdev->descs[i]; - if (!desc->name || !name) + if (!desc->name) continue; if (!strcmp(desc->name, name)) { @@ -357,16 +360,16 @@ static int gpiochip_set_desc_names(struct gpio_chip *gc) return 0; } -static unsigned long *gpiochip_allocate_mask(struct gpio_chip *chip) +static unsigned long *gpiochip_allocate_mask(struct gpio_chip *gc) { unsigned long *p; - p = bitmap_alloc(chip->ngpio, GFP_KERNEL); + p = bitmap_alloc(gc->ngpio, GFP_KERNEL); if (!p) return NULL; /* Assume by default all GPIOs are valid */ - bitmap_fill(p, chip->ngpio); + bitmap_fill(p, gc->ngpio); return p; } @@ -393,10 +396,10 @@ static int gpiochip_init_valid_mask(struct gpio_chip *gc) return 0; } -static void gpiochip_free_valid_mask(struct gpio_chip *gpiochip) +static void gpiochip_free_valid_mask(struct gpio_chip *gc) { - bitmap_free(gpiochip->valid_mask); - gpiochip->valid_mask = NULL; + bitmap_free(gc->valid_mask); + gc->valid_mask = NULL; } static int gpiochip_add_pin_ranges(struct gpio_chip *gc) @@ -407,13 +410,13 @@ static int gpiochip_add_pin_ranges(struct gpio_chip *gc) return 0; } -bool gpiochip_line_is_valid(const struct gpio_chip *gpiochip, +bool gpiochip_line_is_valid(const struct gpio_chip *gc, unsigned int offset) { /* No mask means all valid */ - if (likely(!gpiochip->valid_mask)) + if (likely(!gc->valid_mask)) return true; - return test_bit(offset, gpiochip->valid_mask); + return test_bit(offset, gc->valid_mask); } EXPORT_SYMBOL_GPL(gpiochip_line_is_valid); @@ -547,6 +550,9 @@ static long linehandle_set_config(struct linehandle_state *lh, if (ret) return ret; } + + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_CONFIG, desc); } return 0; } @@ -788,8 +794,6 @@ out_free_lh: * @irq: the interrupt that trigger in response to events on this GPIO * @wait: wait queue that handles blocking reads of events * @events: KFIFO for the GPIO events - * @read_lock: mutex lock to protect reads from colliding with adding - * new events to the FIFO * @timestamp: cache for the timestamp storing it between hardirq * and IRQ thread, used to bring the timestamp close to the actual * event @@ -802,7 +806,6 @@ struct lineevent_state { int irq; wait_queue_head_t wait; DECLARE_KFIFO(events, struct gpioevent_data, 16); - struct mutex read_lock; u64 timestamp; }; @@ -818,7 +821,7 @@ static __poll_t lineevent_poll(struct file *filep, poll_wait(filep, &le->wait, wait); - if (!kfifo_is_empty(&le->events)) + if (!kfifo_is_empty_spinlocked_noirqsave(&le->events, &le->wait.lock)) events = EPOLLIN | EPOLLRDNORM; return events; @@ -831,43 +834,52 @@ static ssize_t lineevent_read(struct file *filep, loff_t *f_ps) { struct lineevent_state *le = filep->private_data; - unsigned int copied; + struct gpioevent_data ge; + ssize_t bytes_read = 0; int ret; - if (count < sizeof(struct gpioevent_data)) + if (count < sizeof(ge)) return -EINVAL; do { + spin_lock(&le->wait.lock); if (kfifo_is_empty(&le->events)) { - if (filep->f_flags & O_NONBLOCK) + if (bytes_read) { + spin_unlock(&le->wait.lock); + return bytes_read; + } + + if (filep->f_flags & O_NONBLOCK) { + spin_unlock(&le->wait.lock); return -EAGAIN; + } - ret = wait_event_interruptible(le->wait, + ret = wait_event_interruptible_locked(le->wait, !kfifo_is_empty(&le->events)); - if (ret) + if (ret) { + spin_unlock(&le->wait.lock); return ret; + } } - if (mutex_lock_interruptible(&le->read_lock)) - return -ERESTARTSYS; - ret = kfifo_to_user(&le->events, buf, count, &copied); - mutex_unlock(&le->read_lock); - - if (ret) - return ret; - - /* - * If we couldn't read anything from the fifo (a different - * thread might have been faster) we either return -EAGAIN if - * the file descriptor is non-blocking, otherwise we go back to - * sleep and wait for more data to arrive. - */ - if (copied == 0 && (filep->f_flags & O_NONBLOCK)) - return -EAGAIN; + ret = kfifo_out(&le->events, &ge, 1); + spin_unlock(&le->wait.lock); + if (ret != 1) { + /* + * This should never happen - we were holding the lock + * from the moment we learned the fifo is no longer + * empty until now. + */ + ret = -EIO; + break; + } - } while (copied == 0); + if (copy_to_user(buf + bytes_read, &ge, sizeof(ge))) + return -EFAULT; + bytes_read += sizeof(ge); + } while (count >= bytes_read + sizeof(ge)); - return copied; + return bytes_read; } static int lineevent_release(struct inode *inode, struct file *filep) @@ -946,7 +958,7 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) * we didn't get the timestamp from lineevent_irq_handler(). */ if (!le->timestamp) - ge.timestamp = ktime_get_real_ns(); + ge.timestamp = ktime_get_ns(); else ge.timestamp = le->timestamp; @@ -969,9 +981,12 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) return IRQ_NONE; } - ret = kfifo_put(&le->events, ge); + ret = kfifo_in_spinlocked_noirqsave(&le->events, &ge, + 1, &le->wait.lock); if (ret) wake_up_poll(&le->wait, EPOLLIN); + else + pr_debug_ratelimited("event FIFO is full - event dropped\n"); return IRQ_HANDLED; } @@ -984,7 +999,7 @@ static irqreturn_t lineevent_irq_handler(int irq, void *p) * Just store the timestamp in hardirq context so we get it as * close in time as possible to the actual event. */ - le->timestamp = ktime_get_real_ns(); + le->timestamp = ktime_get_ns(); return IRQ_WAKE_THREAD; } @@ -1084,7 +1099,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip) INIT_KFIFO(le->events); init_waitqueue_head(&le->wait); - mutex_init(&le->read_lock); /* Request a thread to read the events */ ret = request_threaded_irq(le->irq, @@ -1140,17 +1154,82 @@ out_free_le: return ret; } +static void gpio_desc_to_lineinfo(struct gpio_desc *desc, + struct gpioline_info *info) +{ + struct gpio_chip *gc = desc->gdev->chip; + unsigned long flags; + + spin_lock_irqsave(&gpio_lock, flags); + + if (desc->name) { + strncpy(info->name, desc->name, sizeof(info->name)); + info->name[sizeof(info->name) - 1] = '\0'; + } else { + info->name[0] = '\0'; + } + + if (desc->label) { + strncpy(info->consumer, desc->label, sizeof(info->consumer)); + info->consumer[sizeof(info->consumer) - 1] = '\0'; + } else { + info->consumer[0] = '\0'; + } + + /* + * Userspace only need to know that the kernel is using this GPIO so + * it can't use it. + */ + info->flags = 0; + if (test_bit(FLAG_REQUESTED, &desc->flags) || + test_bit(FLAG_IS_HOGGED, &desc->flags) || + test_bit(FLAG_USED_AS_IRQ, &desc->flags) || + test_bit(FLAG_EXPORT, &desc->flags) || + test_bit(FLAG_SYSFS, &desc->flags) || + !pinctrl_gpio_can_use_line(gc->base + info->line_offset)) + info->flags |= GPIOLINE_FLAG_KERNEL; + if (test_bit(FLAG_IS_OUT, &desc->flags)) + info->flags |= GPIOLINE_FLAG_IS_OUT; + if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) + info->flags |= GPIOLINE_FLAG_ACTIVE_LOW; + if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) + info->flags |= (GPIOLINE_FLAG_OPEN_DRAIN | + GPIOLINE_FLAG_IS_OUT); + if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) + info->flags |= (GPIOLINE_FLAG_OPEN_SOURCE | + GPIOLINE_FLAG_IS_OUT); + if (test_bit(FLAG_BIAS_DISABLE, &desc->flags)) + info->flags |= GPIOLINE_FLAG_BIAS_DISABLE; + if (test_bit(FLAG_PULL_DOWN, &desc->flags)) + info->flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; + if (test_bit(FLAG_PULL_UP, &desc->flags)) + info->flags |= GPIOLINE_FLAG_BIAS_PULL_UP; + + spin_unlock_irqrestore(&gpio_lock, flags); +} + +struct gpio_chardev_data { + struct gpio_device *gdev; + wait_queue_head_t wait; + DECLARE_KFIFO(events, struct gpioline_info_changed, 32); + struct notifier_block lineinfo_changed_nb; + unsigned long *watched_lines; +}; + /* * gpio_ioctl() - ioctl handler for the GPIO chardev */ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { - struct gpio_device *gdev = filp->private_data; - struct gpio_chip *chip = gdev->chip; + struct gpio_chardev_data *priv = filp->private_data; + struct gpio_device *gdev = priv->gdev; + struct gpio_chip *gc = gdev->chip; void __user *ip = (void __user *)arg; + struct gpio_desc *desc; + __u32 offset; /* We fail any subsequent ioctl():s when the chip is gone */ - if (!chip) + if (!gc) return -ENODEV; /* Fill in the struct and pass to userspace */ @@ -1169,68 +1248,40 @@ static long gpio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) return -EFAULT; return 0; - } else if (cmd == GPIO_GET_LINEINFO_IOCTL) { + } else if (cmd == GPIO_GET_LINEINFO_IOCTL || + cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) { struct gpioline_info lineinfo; - struct gpio_desc *desc; if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) return -EFAULT; - desc = gpiochip_get_desc(chip, lineinfo.line_offset); + desc = gpiochip_get_desc(gc, lineinfo.line_offset); if (IS_ERR(desc)) return PTR_ERR(desc); - if (desc->name) { - strncpy(lineinfo.name, desc->name, - sizeof(lineinfo.name)); - lineinfo.name[sizeof(lineinfo.name)-1] = '\0'; - } else { - lineinfo.name[0] = '\0'; - } - if (desc->label) { - strncpy(lineinfo.consumer, desc->label, - sizeof(lineinfo.consumer)); - lineinfo.consumer[sizeof(lineinfo.consumer)-1] = '\0'; - } else { - lineinfo.consumer[0] = '\0'; - } - - /* - * Userspace only need to know that the kernel is using - * this GPIO so it can't use it. - */ - lineinfo.flags = 0; - if (test_bit(FLAG_REQUESTED, &desc->flags) || - test_bit(FLAG_IS_HOGGED, &desc->flags) || - test_bit(FLAG_USED_AS_IRQ, &desc->flags) || - test_bit(FLAG_EXPORT, &desc->flags) || - test_bit(FLAG_SYSFS, &desc->flags) || - !pinctrl_gpio_can_use_line(chip->base + lineinfo.line_offset)) - lineinfo.flags |= GPIOLINE_FLAG_KERNEL; - if (test_bit(FLAG_IS_OUT, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_IS_OUT; - if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_ACTIVE_LOW; - if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) - lineinfo.flags |= (GPIOLINE_FLAG_OPEN_DRAIN | - GPIOLINE_FLAG_IS_OUT); - if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) - lineinfo.flags |= (GPIOLINE_FLAG_OPEN_SOURCE | - GPIOLINE_FLAG_IS_OUT); - if (test_bit(FLAG_BIAS_DISABLE, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_BIAS_DISABLE; - if (test_bit(FLAG_PULL_DOWN, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_DOWN; - if (test_bit(FLAG_PULL_UP, &desc->flags)) - lineinfo.flags |= GPIOLINE_FLAG_BIAS_PULL_UP; + gpio_desc_to_lineinfo(desc, &lineinfo); if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) return -EFAULT; + + if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) + set_bit(gpio_chip_hwgpio(desc), priv->watched_lines); + return 0; } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { return linehandle_create(gdev, ip); } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) { return lineevent_create(gdev, ip); + } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) { + if (copy_from_user(&offset, ip, sizeof(offset))) + return -EFAULT; + + desc = gpiochip_get_desc(gc, offset); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + clear_bit(gpio_chip_hwgpio(desc), priv->watched_lines); + return 0; } return -EINVAL; } @@ -1243,6 +1294,101 @@ static long gpio_ioctl_compat(struct file *filp, unsigned int cmd, } #endif +static struct gpio_chardev_data * +to_gpio_chardev_data(struct notifier_block *nb) +{ + return container_of(nb, struct gpio_chardev_data, lineinfo_changed_nb); +} + +static int lineinfo_changed_notify(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct gpio_chardev_data *priv = to_gpio_chardev_data(nb); + struct gpioline_info_changed chg; + struct gpio_desc *desc = data; + int ret; + + if (!test_bit(gpio_chip_hwgpio(desc), priv->watched_lines)) + return NOTIFY_DONE; + + memset(&chg, 0, sizeof(chg)); + chg.info.line_offset = gpio_chip_hwgpio(desc); + chg.event_type = action; + chg.timestamp = ktime_get_ns(); + gpio_desc_to_lineinfo(desc, &chg.info); + + ret = kfifo_in_spinlocked(&priv->events, &chg, 1, &priv->wait.lock); + if (ret) + wake_up_poll(&priv->wait, EPOLLIN); + else + pr_debug_ratelimited("lineinfo event FIFO is full - event dropped\n"); + + return NOTIFY_OK; +} + +static __poll_t lineinfo_watch_poll(struct file *filep, + struct poll_table_struct *pollt) +{ + struct gpio_chardev_data *priv = filep->private_data; + __poll_t events = 0; + + poll_wait(filep, &priv->wait, pollt); + + if (!kfifo_is_empty_spinlocked_noirqsave(&priv->events, + &priv->wait.lock)) + events = EPOLLIN | EPOLLRDNORM; + + return events; +} + +static ssize_t lineinfo_watch_read(struct file *filep, char __user *buf, + size_t count, loff_t *off) +{ + struct gpio_chardev_data *priv = filep->private_data; + struct gpioline_info_changed event; + ssize_t bytes_read = 0; + int ret; + + if (count < sizeof(event)) + return -EINVAL; + + do { + spin_lock(&priv->wait.lock); + if (kfifo_is_empty(&priv->events)) { + if (bytes_read) { + spin_unlock(&priv->wait.lock); + return bytes_read; + } + + if (filep->f_flags & O_NONBLOCK) { + spin_unlock(&priv->wait.lock); + return -EAGAIN; + } + + ret = wait_event_interruptible_locked(priv->wait, + !kfifo_is_empty(&priv->events)); + if (ret) { + spin_unlock(&priv->wait.lock); + return ret; + } + } + + ret = kfifo_out(&priv->events, &event, 1); + spin_unlock(&priv->wait.lock); + if (ret != 1) { + ret = -EIO; + break; + /* We should never get here. See lineevent_read(). */ + } + + if (copy_to_user(buf + bytes_read, &event, sizeof(event))) + return -EFAULT; + bytes_read += sizeof(event); + } while (count >= bytes_read + sizeof(event)); + + return bytes_read; +} + /** * gpio_chrdev_open() - open the chardev for ioctl operations * @inode: inode for this chardev @@ -1253,14 +1399,48 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp) { struct gpio_device *gdev = container_of(inode->i_cdev, struct gpio_device, chrdev); + struct gpio_chardev_data *priv; + int ret = -ENOMEM; /* Fail on open if the backing gpiochip is gone */ if (!gdev->chip) return -ENODEV; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->watched_lines = bitmap_zalloc(gdev->chip->ngpio, GFP_KERNEL); + if (!priv->watched_lines) + goto out_free_priv; + + init_waitqueue_head(&priv->wait); + INIT_KFIFO(priv->events); + priv->gdev = gdev; + + priv->lineinfo_changed_nb.notifier_call = lineinfo_changed_notify; + ret = atomic_notifier_chain_register(&gdev->notifier, + &priv->lineinfo_changed_nb); + if (ret) + goto out_free_bitmap; + get_device(&gdev->dev); - filp->private_data = gdev; + filp->private_data = priv; - return nonseekable_open(inode, filp); + ret = nonseekable_open(inode, filp); + if (ret) + goto out_unregister_notifier; + + return ret; + +out_unregister_notifier: + atomic_notifier_chain_unregister(&gdev->notifier, + &priv->lineinfo_changed_nb); +out_free_bitmap: + bitmap_free(priv->watched_lines); +out_free_priv: + kfree(priv); + return ret; } /** @@ -1271,17 +1451,23 @@ static int gpio_chrdev_open(struct inode *inode, struct file *filp) */ static int gpio_chrdev_release(struct inode *inode, struct file *filp) { - struct gpio_device *gdev = container_of(inode->i_cdev, - struct gpio_device, chrdev); + struct gpio_chardev_data *priv = filp->private_data; + struct gpio_device *gdev = priv->gdev; + bitmap_free(priv->watched_lines); + atomic_notifier_chain_unregister(&gdev->notifier, + &priv->lineinfo_changed_nb); put_device(&gdev->dev); + kfree(priv); + return 0; } - static const struct file_operations gpio_fileops = { .release = gpio_chrdev_release, .open = gpio_chrdev_open, + .poll = lineinfo_watch_poll, + .read = lineinfo_watch_read, .owner = THIS_MODULE, .llseek = no_llseek, .unlocked_ioctl = gpio_ioctl, @@ -1333,12 +1519,12 @@ err_remove_device: return ret; } -static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog) +static void gpiochip_machine_hog(struct gpio_chip *gc, struct gpiod_hog *hog) { struct gpio_desc *desc; int rv; - desc = gpiochip_get_desc(chip, hog->chip_hwnum); + desc = gpiochip_get_desc(gc, hog->chip_hwnum); if (IS_ERR(desc)) { pr_err("%s: unable to get GPIO desc: %ld\n", __func__, PTR_ERR(desc)); @@ -1351,18 +1537,18 @@ static void gpiochip_machine_hog(struct gpio_chip *chip, struct gpiod_hog *hog) rv = gpiod_hog(desc, hog->line_name, hog->lflags, hog->dflags); if (rv) pr_err("%s: unable to hog GPIO line (%s:%u): %d\n", - __func__, chip->label, hog->chip_hwnum, rv); + __func__, gc->label, hog->chip_hwnum, rv); } -static void machine_gpiochip_add(struct gpio_chip *chip) +static void machine_gpiochip_add(struct gpio_chip *gc) { struct gpiod_hog *hog; mutex_lock(&gpio_machine_hogs_mutex); list_for_each_entry(hog, &gpio_machine_hogs, list) { - if (!strcmp(chip->label, hog->chip_label)) - gpiochip_machine_hog(chip, hog); + if (!strcmp(gc->label, hog->chip_label)) + gpiochip_machine_hog(gc, hog); } mutex_unlock(&gpio_machine_hogs_mutex); @@ -1381,14 +1567,14 @@ static void gpiochip_setup_devs(void) } } -int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, +int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, struct lock_class_key *lock_key, struct lock_class_key *request_key) { unsigned long flags; int ret = 0; unsigned i; - int base = chip->base; + int base = gc->base; struct gpio_device *gdev; /* @@ -1399,19 +1585,19 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, if (!gdev) return -ENOMEM; gdev->dev.bus = &gpio_bus_type; - gdev->chip = chip; - chip->gpiodev = gdev; - if (chip->parent) { - gdev->dev.parent = chip->parent; - gdev->dev.of_node = chip->parent->of_node; + gdev->chip = gc; + gc->gpiodev = gdev; + if (gc->parent) { + gdev->dev.parent = gc->parent; + gdev->dev.of_node = gc->parent->of_node; } #ifdef CONFIG_OF_GPIO /* If the gpiochip has an assigned OF node this takes precedence */ - if (chip->of_node) - gdev->dev.of_node = chip->of_node; + if (gc->of_node) + gdev->dev.of_node = gc->of_node; else - chip->of_node = gdev->dev.of_node; + gc->of_node = gdev->dev.of_node; #endif gdev->id = ida_simple_get(&gpio_ida, 0, 0, GFP_KERNEL); @@ -1422,37 +1608,37 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); device_initialize(&gdev->dev); dev_set_drvdata(&gdev->dev, gdev); - if (chip->parent && chip->parent->driver) - gdev->owner = chip->parent->driver->owner; - else if (chip->owner) + if (gc->parent && gc->parent->driver) + gdev->owner = gc->parent->driver->owner; + else if (gc->owner) /* TODO: remove chip->owner */ - gdev->owner = chip->owner; + gdev->owner = gc->owner; else gdev->owner = THIS_MODULE; - gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); + gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); if (!gdev->descs) { ret = -ENOMEM; goto err_free_ida; } - if (chip->ngpio == 0) { - chip_err(chip, "tried to insert a GPIO chip with zero lines\n"); + if (gc->ngpio == 0) { + chip_err(gc, "tried to insert a GPIO chip with zero lines\n"); ret = -EINVAL; goto err_free_descs; } - if (chip->ngpio > FASTPATH_NGPIO) - chip_warn(chip, "line cnt %u is greater than fast path cnt %u\n", - chip->ngpio, FASTPATH_NGPIO); + if (gc->ngpio > FASTPATH_NGPIO) + chip_warn(gc, "line cnt %u is greater than fast path cnt %u\n", + gc->ngpio, FASTPATH_NGPIO); - gdev->label = kstrdup_const(chip->label ?: "unknown", GFP_KERNEL); + gdev->label = kstrdup_const(gc->label ?: "unknown", GFP_KERNEL); if (!gdev->label) { ret = -ENOMEM; goto err_free_descs; } - gdev->ngpio = chip->ngpio; + gdev->ngpio = gc->ngpio; gdev->data = data; spin_lock_irqsave(&gpio_lock, flags); @@ -1465,7 +1651,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, * of the sysfs interface anyways. */ if (base < 0) { - base = gpiochip_find_base(chip->ngpio); + base = gpiochip_find_base(gc->ngpio); if (base < 0) { ret = base; spin_unlock_irqrestore(&gpio_lock, flags); @@ -1477,7 +1663,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, * see if anyone makes use of this, else drop this and assign * a poison instead. */ - chip->base = base; + gc->base = base; } gdev->base = base; @@ -1487,60 +1673,62 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, goto err_free_label; } - for (i = 0; i < chip->ngpio; i++) + for (i = 0; i < gc->ngpio; i++) gdev->descs[i].gdev = gdev; spin_unlock_irqrestore(&gpio_lock, flags); + ATOMIC_INIT_NOTIFIER_HEAD(&gdev->notifier); + #ifdef CONFIG_PINCTRL INIT_LIST_HEAD(&gdev->pin_ranges); #endif - ret = gpiochip_set_desc_names(chip); + ret = gpiochip_set_desc_names(gc); if (ret) goto err_remove_from_list; - ret = gpiochip_alloc_valid_mask(chip); + ret = gpiochip_alloc_valid_mask(gc); if (ret) goto err_remove_from_list; - ret = of_gpiochip_add(chip); + ret = of_gpiochip_add(gc); if (ret) goto err_free_gpiochip_mask; - ret = gpiochip_init_valid_mask(chip); + ret = gpiochip_init_valid_mask(gc); if (ret) goto err_remove_of_chip; - for (i = 0; i < chip->ngpio; i++) { + for (i = 0; i < gc->ngpio; i++) { struct gpio_desc *desc = &gdev->descs[i]; - if (chip->get_direction && gpiochip_line_is_valid(chip, i)) { + if (gc->get_direction && gpiochip_line_is_valid(gc, i)) { assign_bit(FLAG_IS_OUT, - &desc->flags, !chip->get_direction(chip, i)); + &desc->flags, !gc->get_direction(gc, i)); } else { assign_bit(FLAG_IS_OUT, - &desc->flags, !chip->direction_input); + &desc->flags, !gc->direction_input); } } - ret = gpiochip_add_pin_ranges(chip); + ret = gpiochip_add_pin_ranges(gc); if (ret) goto err_remove_of_chip; - acpi_gpiochip_add(chip); + acpi_gpiochip_add(gc); - machine_gpiochip_add(chip); + machine_gpiochip_add(gc); - ret = gpiochip_irqchip_init_valid_mask(chip); + ret = gpiochip_irqchip_init_valid_mask(gc); if (ret) goto err_remove_acpi_chip; - ret = gpiochip_irqchip_init_hw(chip); + ret = gpiochip_irqchip_init_hw(gc); if (ret) goto err_remove_acpi_chip; - ret = gpiochip_add_irqchip(chip, lock_key, request_key); + ret = gpiochip_add_irqchip(gc, lock_key, request_key); if (ret) goto err_remove_irqchip_mask; @@ -1560,17 +1748,17 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data, return 0; err_remove_irqchip: - gpiochip_irqchip_remove(chip); + gpiochip_irqchip_remove(gc); err_remove_irqchip_mask: - gpiochip_irqchip_free_valid_mask(chip); + gpiochip_irqchip_free_valid_mask(gc); err_remove_acpi_chip: - acpi_gpiochip_remove(chip); + acpi_gpiochip_remove(gc); err_remove_of_chip: - gpiochip_free_hogs(chip); - of_gpiochip_remove(chip); + gpiochip_free_hogs(gc); + of_gpiochip_remove(gc); err_free_gpiochip_mask: - gpiochip_remove_pin_ranges(chip); - gpiochip_free_valid_mask(chip); + gpiochip_remove_pin_ranges(gc); + gpiochip_free_valid_mask(gc); err_remove_from_list: spin_lock_irqsave(&gpio_lock, flags); list_del(&gdev->list); @@ -1585,7 +1773,7 @@ err_free_gdev: /* failures here can mean systems won't boot... */ pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__, gdev->base, gdev->base + gdev->ngpio - 1, - chip->label ? : "generic", ret); + gc->label ? : "generic", ret); kfree(gdev); return ret; } @@ -1593,41 +1781,39 @@ EXPORT_SYMBOL_GPL(gpiochip_add_data_with_key); /** * gpiochip_get_data() - get per-subdriver data for the chip - * @chip: GPIO chip + * @gc: GPIO chip * * Returns: * The per-subdriver data for the chip. */ -void *gpiochip_get_data(struct gpio_chip *chip) +void *gpiochip_get_data(struct gpio_chip *gc) { - return chip->gpiodev->data; + return gc->gpiodev->data; } EXPORT_SYMBOL_GPL(gpiochip_get_data); /** * gpiochip_remove() - unregister a gpio_chip - * @chip: the chip to unregister + * @gc: the chip to unregister * * A gpio_chip with any GPIOs still requested may not be removed. */ -void gpiochip_remove(struct gpio_chip *chip) +void gpiochip_remove(struct gpio_chip *gc) { - struct gpio_device *gdev = chip->gpiodev; - struct gpio_desc *desc; + struct gpio_device *gdev = gc->gpiodev; unsigned long flags; - unsigned i; - bool requested = false; + unsigned int i; /* FIXME: should the legacy sysfs handling be moved to gpio_device? */ gpiochip_sysfs_unregister(gdev); - gpiochip_free_hogs(chip); + gpiochip_free_hogs(gc); /* Numb the device, cancelling all outstanding operations */ gdev->chip = NULL; - gpiochip_irqchip_remove(chip); - acpi_gpiochip_remove(chip); - of_gpiochip_remove(chip); - gpiochip_remove_pin_ranges(chip); - gpiochip_free_valid_mask(chip); + gpiochip_irqchip_remove(gc); + acpi_gpiochip_remove(gc); + of_gpiochip_remove(gc); + gpiochip_remove_pin_ranges(gc); + gpiochip_free_valid_mask(gc); /* * We accept no more calls into the driver from this point, so * NULL the driver data pointer @@ -1636,13 +1822,12 @@ void gpiochip_remove(struct gpio_chip *chip) spin_lock_irqsave(&gpio_lock, flags); for (i = 0; i < gdev->ngpio; i++) { - desc = &gdev->descs[i]; - if (test_bit(FLAG_REQUESTED, &desc->flags)) - requested = true; + if (gpiochip_is_requested(gc, i)) + break; } spin_unlock_irqrestore(&gpio_lock, flags); - if (requested) + if (i != gdev->ngpio) dev_crit(&gdev->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); @@ -1657,52 +1842,6 @@ void gpiochip_remove(struct gpio_chip *chip) } EXPORT_SYMBOL_GPL(gpiochip_remove); -static void devm_gpio_chip_release(struct device *dev, void *res) -{ - struct gpio_chip *chip = *(struct gpio_chip **)res; - - gpiochip_remove(chip); -} - -/** - * devm_gpiochip_add_data() - Resource managed gpiochip_add_data() - * @dev: pointer to the device that gpio_chip belongs to. - * @chip: the chip to register, with chip->base initialized - * @data: driver-private data associated with this chip - * - * Context: potentially before irqs will work - * - * The gpio chip automatically be released when the device is unbound. - * - * Returns: - * A negative errno if the chip can't be registered, such as because the - * chip->base is invalid or already associated with a different chip. - * Otherwise it returns zero as a success code. - */ -int devm_gpiochip_add_data(struct device *dev, struct gpio_chip *chip, - void *data) -{ - struct gpio_chip **ptr; - int ret; - - ptr = devres_alloc(devm_gpio_chip_release, sizeof(*ptr), - GFP_KERNEL); - if (!ptr) - return -ENOMEM; - - ret = gpiochip_add_data(chip, data); - if (ret < 0) { - devres_free(ptr); - return ret; - } - - *ptr = chip; - devres_add(dev, ptr); - - return 0; -} -EXPORT_SYMBOL_GPL(devm_gpiochip_add_data); - /** * gpiochip_find() - iterator for locating a specific gpio_chip * @data: data to pass to match function @@ -1715,31 +1854,31 @@ EXPORT_SYMBOL_GPL(devm_gpiochip_add_data); * more gpio_chips. */ struct gpio_chip *gpiochip_find(void *data, - int (*match)(struct gpio_chip *chip, + int (*match)(struct gpio_chip *gc, void *data)) { struct gpio_device *gdev; - struct gpio_chip *chip = NULL; + struct gpio_chip *gc = NULL; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); list_for_each_entry(gdev, &gpio_devices, list) if (gdev->chip && match(gdev->chip, data)) { - chip = gdev->chip; + gc = gdev->chip; break; } spin_unlock_irqrestore(&gpio_lock, flags); - return chip; + return gc; } EXPORT_SYMBOL_GPL(gpiochip_find); -static int gpiochip_match_name(struct gpio_chip *chip, void *data) +static int gpiochip_match_name(struct gpio_chip *gc, void *data) { const char *name = data; - return !strcmp(chip->label, name); + return !strcmp(gc->label, name); } static struct gpio_chip *find_chip_by_name(const char *name) @@ -1779,21 +1918,21 @@ static int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc) return 0; } -static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip) +static void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) { - bitmap_free(gpiochip->irq.valid_mask); - gpiochip->irq.valid_mask = NULL; + bitmap_free(gc->irq.valid_mask); + gc->irq.valid_mask = NULL; } -bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, +bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gc, unsigned int offset) { - if (!gpiochip_line_is_valid(gpiochip, offset)) + if (!gpiochip_line_is_valid(gc, offset)) return false; /* No mask means all valid */ - if (likely(!gpiochip->irq.valid_mask)) + if (likely(!gc->irq.valid_mask)) return true; - return test_bit(offset, gpiochip->irq.valid_mask); + return test_bit(offset, gc->irq.valid_mask); } EXPORT_SYMBOL_GPL(gpiochip_irqchip_irq_valid); @@ -1845,16 +1984,16 @@ static void gpiochip_set_cascaded_irqchip(struct gpio_chip *gc, /** * gpiochip_set_nested_irqchip() - connects a nested irqchip to a gpiochip - * @gpiochip: the gpiochip to set the irqchip nested handler to + * @gc: the gpiochip to set the irqchip nested handler to * @irqchip: the irqchip to nest to the gpiochip * @parent_irq: the irq number corresponding to the parent IRQ for this * nested irqchip */ -void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip, +void gpiochip_set_nested_irqchip(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int parent_irq) { - gpiochip_set_cascaded_irqchip(gpiochip, parent_irq, NULL); + gpiochip_set_cascaded_irqchip(gc, parent_irq, NULL); } EXPORT_SYMBOL_GPL(gpiochip_set_nested_irqchip); @@ -2031,7 +2170,7 @@ static int gpiochip_hierarchy_irq_domain_alloc(struct irq_domain *d, return ret; } -static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *chip, +static unsigned int gpiochip_child_offset_to_irq_noop(struct gpio_chip *gc, unsigned int offset) { return offset; @@ -2091,7 +2230,7 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) return !!gc->irq.parent_domain; } -void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { @@ -2101,7 +2240,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, if (!fwspec) return NULL; - fwspec->fwnode = chip->irq.parent_domain->fwnode; + fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 2; fwspec->param[0] = parent_hwirq; fwspec->param[1] = parent_type; @@ -2110,7 +2249,7 @@ void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip, } EXPORT_SYMBOL_GPL(gpiochip_populate_parent_fwspec_twocell); -void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, +void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *gc, unsigned int parent_hwirq, unsigned int parent_type) { @@ -2120,7 +2259,7 @@ void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip, if (!fwspec) return NULL; - fwspec->fwnode = chip->irq.parent_domain->fwnode; + fwspec->fwnode = gc->irq.parent_domain->fwnode; fwspec->param_count = 4; fwspec->param[0] = 0; fwspec->param[1] = parent_hwirq; @@ -2158,28 +2297,28 @@ static bool gpiochip_hierarchy_is_hierarchical(struct gpio_chip *gc) int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hwirq) { - struct gpio_chip *chip = d->host_data; + struct gpio_chip *gc = d->host_data; int ret = 0; - if (!gpiochip_irqchip_irq_valid(chip, hwirq)) + if (!gpiochip_irqchip_irq_valid(gc, hwirq)) return -ENXIO; - irq_set_chip_data(irq, chip); + irq_set_chip_data(irq, gc); /* * This lock class tells lockdep that GPIO irqs are in a different * category than their parents, so it won't report false recursion. */ - irq_set_lockdep_class(irq, chip->irq.lock_key, chip->irq.request_key); - irq_set_chip_and_handler(irq, chip->irq.chip, chip->irq.handler); + irq_set_lockdep_class(irq, gc->irq.lock_key, gc->irq.request_key); + irq_set_chip_and_handler(irq, gc->irq.chip, gc->irq.handler); /* Chips that use nested thread handlers have them marked */ - if (chip->irq.threaded) + if (gc->irq.threaded) irq_set_nested_thread(irq, 1); irq_set_noprobe(irq); - if (chip->irq.num_parents == 1) - ret = irq_set_parent(irq, chip->irq.parents[0]); - else if (chip->irq.map) - ret = irq_set_parent(irq, chip->irq.map[hwirq]); + if (gc->irq.num_parents == 1) + ret = irq_set_parent(irq, gc->irq.parents[0]); + else if (gc->irq.map) + ret = irq_set_parent(irq, gc->irq.map[hwirq]); if (ret < 0) return ret; @@ -2188,8 +2327,8 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, * No set-up of the hardware will happen if IRQ_TYPE_NONE * is passed as default type. */ - if (chip->irq.default_type != IRQ_TYPE_NONE) - irq_set_irq_type(irq, chip->irq.default_type); + if (gc->irq.default_type != IRQ_TYPE_NONE) + irq_set_irq_type(irq, gc->irq.default_type); return 0; } @@ -2197,9 +2336,9 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_map); void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq) { - struct gpio_chip *chip = d->host_data; + struct gpio_chip *gc = d->host_data; - if (chip->irq.threaded) + if (gc->irq.threaded) irq_set_nested_thread(irq, 0); irq_set_chip_and_handler(irq, NULL, NULL); irq_set_chip_data(irq, NULL); @@ -2231,9 +2370,9 @@ static const struct irq_domain_ops gpiochip_domain_ops = { int gpiochip_irq_domain_activate(struct irq_domain *domain, struct irq_data *data, bool reserve) { - struct gpio_chip *chip = domain->host_data; + struct gpio_chip *gc = domain->host_data; - return gpiochip_lock_as_irq(chip, data->hwirq); + return gpiochip_lock_as_irq(gc, data->hwirq); } EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate); @@ -2249,17 +2388,17 @@ EXPORT_SYMBOL_GPL(gpiochip_irq_domain_activate); void gpiochip_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data) { - struct gpio_chip *chip = domain->host_data; + struct gpio_chip *gc = domain->host_data; - return gpiochip_unlock_as_irq(chip, data->hwirq); + return gpiochip_unlock_as_irq(gc, data->hwirq); } EXPORT_SYMBOL_GPL(gpiochip_irq_domain_deactivate); -static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) +static int gpiochip_to_irq(struct gpio_chip *gc, unsigned offset) { - struct irq_domain *domain = chip->irq.domain; + struct irq_domain *domain = gc->irq.domain; - if (!gpiochip_irqchip_irq_valid(chip, offset)) + if (!gpiochip_irqchip_irq_valid(gc, offset)) return -ENXIO; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY @@ -2268,7 +2407,7 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) spec.fwnode = domain->fwnode; spec.param_count = 2; - spec.param[0] = chip->irq.child_offset_to_irq(chip, offset); + spec.param[0] = gc->irq.child_offset_to_irq(gc, offset); spec.param[1] = IRQ_TYPE_NONE; return irq_create_fwspec_mapping(&spec); @@ -2280,32 +2419,32 @@ static int gpiochip_to_irq(struct gpio_chip *chip, unsigned offset) static int gpiochip_irq_reqres(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - return gpiochip_reqres_irq(chip, d->hwirq); + return gpiochip_reqres_irq(gc, d->hwirq); } static void gpiochip_irq_relres(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - gpiochip_relres_irq(chip, d->hwirq); + gpiochip_relres_irq(gc, d->hwirq); } static void gpiochip_irq_enable(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); - gpiochip_enable_irq(chip, d->hwirq); - if (chip->irq.irq_enable) - chip->irq.irq_enable(d); + gpiochip_enable_irq(gc, d->hwirq); + if (gc->irq.irq_enable) + gc->irq.irq_enable(d); else - chip->irq.chip->irq_unmask(d); + gc->irq.chip->irq_unmask(d); } static void gpiochip_irq_disable(struct irq_data *d) { - struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); /* * Since we override .irq_disable() we need to mimic the @@ -2314,23 +2453,23 @@ static void gpiochip_irq_disable(struct irq_data *d) * behaviour of mask_irq() which calls .irq_mask() if * it exists. */ - if (chip->irq.irq_disable) - chip->irq.irq_disable(d); - else if (chip->irq.chip->irq_mask) - chip->irq.chip->irq_mask(d); - gpiochip_disable_irq(chip, d->hwirq); + if (gc->irq.irq_disable) + gc->irq.irq_disable(d); + else if (gc->irq.chip->irq_mask) + gc->irq.chip->irq_mask(d); + gpiochip_disable_irq(gc, d->hwirq); } -static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip) +static void gpiochip_set_irq_hooks(struct gpio_chip *gc) { - struct irq_chip *irqchip = gpiochip->irq.chip; + struct irq_chip *irqchip = gc->irq.chip; if (!irqchip->irq_request_resources && !irqchip->irq_release_resources) { irqchip->irq_request_resources = gpiochip_irq_reqres; irqchip->irq_release_resources = gpiochip_irq_relres; } - if (WARN_ON(gpiochip->irq.irq_enable)) + if (WARN_ON(gc->irq.irq_enable)) return; /* Check if the irqchip already has this hook... */ if (irqchip->irq_enable == gpiochip_irq_enable) { @@ -2338,27 +2477,27 @@ static void gpiochip_set_irq_hooks(struct gpio_chip *gpiochip) * ...and if so, give a gentle warning that this is bad * practice. */ - chip_info(gpiochip, + chip_info(gc, "detected irqchip that is shared with multiple gpiochips: please fix the driver.\n"); return; } - gpiochip->irq.irq_enable = irqchip->irq_enable; - gpiochip->irq.irq_disable = irqchip->irq_disable; + gc->irq.irq_enable = irqchip->irq_enable; + gc->irq.irq_disable = irqchip->irq_disable; irqchip->irq_enable = gpiochip_irq_enable; irqchip->irq_disable = gpiochip_irq_disable; } /** * gpiochip_add_irqchip() - adds an IRQ chip to a GPIO chip - * @gpiochip: the GPIO chip to add the IRQ chip to + * @gc: the GPIO chip to add the IRQ chip to * @lock_key: lockdep class for IRQ lock * @request_key: lockdep class for IRQ request */ -static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, +static int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key) { - struct irq_chip *irqchip = gpiochip->irq.chip; + struct irq_chip *irqchip = gc->irq.chip; const struct irq_domain_ops *ops = NULL; struct device_node *np; unsigned int type; @@ -2367,13 +2506,13 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, if (!irqchip) return 0; - if (gpiochip->irq.parent_handler && gpiochip->can_sleep) { - chip_err(gpiochip, "you cannot have chained interrupts on a chip that may sleep\n"); + if (gc->irq.parent_handler && gc->can_sleep) { + chip_err(gc, "you cannot have chained interrupts on a chip that may sleep\n"); return -EINVAL; } - np = gpiochip->gpiodev->dev.of_node; - type = gpiochip->irq.default_type; + np = gc->gpiodev->dev.of_node; + type = gc->irq.default_type; /* * Specifying a default trigger is a terrible idea if DT or ACPI is @@ -2384,74 +2523,74 @@ static int gpiochip_add_irqchip(struct gpio_chip *gpiochip, "%s: Ignoring %u default trigger\n", np->full_name, type)) type = IRQ_TYPE_NONE; - if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) { - acpi_handle_warn(ACPI_HANDLE(gpiochip->parent), + if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) { + acpi_handle_warn(ACPI_HANDLE(gc->parent), "Ignoring %u default trigger\n", type); type = IRQ_TYPE_NONE; } - gpiochip->to_irq = gpiochip_to_irq; - gpiochip->irq.default_type = type; - gpiochip->irq.lock_key = lock_key; - gpiochip->irq.request_key = request_key; + gc->to_irq = gpiochip_to_irq; + gc->irq.default_type = type; + gc->irq.lock_key = lock_key; + gc->irq.request_key = request_key; /* If a parent irqdomain is provided, let's build a hierarchy */ - if (gpiochip_hierarchy_is_hierarchical(gpiochip)) { - int ret = gpiochip_hierarchy_add_domain(gpiochip); + if (gpiochip_hierarchy_is_hierarchical(gc)) { + int ret = gpiochip_hierarchy_add_domain(gc); if (ret) return ret; } else { /* Some drivers provide custom irqdomain ops */ - if (gpiochip->irq.domain_ops) - ops = gpiochip->irq.domain_ops; + if (gc->irq.domain_ops) + ops = gc->irq.domain_ops; if (!ops) ops = &gpiochip_domain_ops; - gpiochip->irq.domain = irq_domain_add_simple(np, - gpiochip->ngpio, - gpiochip->irq.first, - ops, gpiochip); - if (!gpiochip->irq.domain) + gc->irq.domain = irq_domain_add_simple(np, + gc->ngpio, + gc->irq.first, + ops, gc); + if (!gc->irq.domain) return -EINVAL; } - if (gpiochip->irq.parent_handler) { - void *data = gpiochip->irq.parent_handler_data ?: gpiochip; + if (gc->irq.parent_handler) { + void *data = gc->irq.parent_handler_data ?: gc; - for (i = 0; i < gpiochip->irq.num_parents; i++) { + for (i = 0; i < gc->irq.num_parents; i++) { /* * The parent IRQ chip is already using the chip_data * for this IRQ chip, so our callbacks simply use the * handler_data. */ - irq_set_chained_handler_and_data(gpiochip->irq.parents[i], - gpiochip->irq.parent_handler, + irq_set_chained_handler_and_data(gc->irq.parents[i], + gc->irq.parent_handler, data); } } - gpiochip_set_irq_hooks(gpiochip); + gpiochip_set_irq_hooks(gc); - acpi_gpiochip_request_interrupts(gpiochip); + acpi_gpiochip_request_interrupts(gc); return 0; } /** * gpiochip_irqchip_remove() - removes an irqchip added to a gpiochip - * @gpiochip: the gpiochip to remove the irqchip from + * @gc: the gpiochip to remove the irqchip from * * This is called only from gpiochip_remove() */ -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) +static void gpiochip_irqchip_remove(struct gpio_chip *gc) { - struct irq_chip *irqchip = gpiochip->irq.chip; + struct irq_chip *irqchip = gc->irq.chip; unsigned int offset; - acpi_gpiochip_free_interrupts(gpiochip); + acpi_gpiochip_free_interrupts(gc); - if (irqchip && gpiochip->irq.parent_handler) { - struct gpio_irq_chip *irq = &gpiochip->irq; + if (irqchip && gc->irq.parent_handler) { + struct gpio_irq_chip *irq = &gc->irq; unsigned int i; for (i = 0; i < irq->num_parents; i++) @@ -2460,18 +2599,18 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) } /* Remove all IRQ mappings and delete the domain */ - if (gpiochip->irq.domain) { + if (gc->irq.domain) { unsigned int irq; - for (offset = 0; offset < gpiochip->ngpio; offset++) { - if (!gpiochip_irqchip_irq_valid(gpiochip, offset)) + for (offset = 0; offset < gc->ngpio; offset++) { + if (!gpiochip_irqchip_irq_valid(gc, offset)) continue; - irq = irq_find_mapping(gpiochip->irq.domain, offset); + irq = irq_find_mapping(gc->irq.domain, offset); irq_dispose_mapping(irq); } - irq_domain_remove(gpiochip->irq.domain); + irq_domain_remove(gc->irq.domain); } if (irqchip) { @@ -2480,20 +2619,20 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) irqchip->irq_release_resources = NULL; } if (irqchip->irq_enable == gpiochip_irq_enable) { - irqchip->irq_enable = gpiochip->irq.irq_enable; - irqchip->irq_disable = gpiochip->irq.irq_disable; + irqchip->irq_enable = gc->irq.irq_enable; + irqchip->irq_disable = gc->irq.irq_disable; } } - gpiochip->irq.irq_enable = NULL; - gpiochip->irq.irq_disable = NULL; - gpiochip->irq.chip = NULL; + gc->irq.irq_enable = NULL; + gc->irq.irq_disable = NULL; + gc->irq.chip = NULL; - gpiochip_irqchip_free_valid_mask(gpiochip); + gpiochip_irqchip_free_valid_mask(gc); } /** * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip - * @gpiochip: the gpiochip to add the irqchip to + * @gc: the gpiochip to add the irqchip to * @irqchip: the irqchip to add to the gpiochip * @first_irq: if not dynamically assigned, the base (first) IRQ to * allocate gpiochip irqs from @@ -2518,7 +2657,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) * the pins on the gpiochip can generate a unique IRQ. Everything else * need to be open coded. */ -int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, +int gpiochip_irqchip_add_key(struct gpio_chip *gc, struct irq_chip *irqchip, unsigned int first_irq, irq_flow_handler_t handler, @@ -2529,23 +2668,23 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, { struct device_node *of_node; - if (!gpiochip || !irqchip) + if (!gc || !irqchip) return -EINVAL; - if (!gpiochip->parent) { + if (!gc->parent) { pr_err("missing gpiochip .dev parent pointer\n"); return -EINVAL; } - gpiochip->irq.threaded = threaded; - of_node = gpiochip->parent->of_node; + gc->irq.threaded = threaded; + of_node = gc->parent->of_node; #ifdef CONFIG_OF_GPIO /* * If the gpiochip has an assigned OF node this takes precedence - * FIXME: get rid of this and use gpiochip->parent->of_node + * FIXME: get rid of this and use gc->parent->of_node * everywhere */ - if (gpiochip->of_node) - of_node = gpiochip->of_node; + if (gc->of_node) + of_node = gc->of_node; #endif /* * Specifying a default trigger is a terrible idea if DT or ACPI is @@ -2555,29 +2694,29 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, if (WARN(of_node && type != IRQ_TYPE_NONE, "%pOF: Ignoring %d default trigger\n", of_node, type)) type = IRQ_TYPE_NONE; - if (has_acpi_companion(gpiochip->parent) && type != IRQ_TYPE_NONE) { - acpi_handle_warn(ACPI_HANDLE(gpiochip->parent), + if (has_acpi_companion(gc->parent) && type != IRQ_TYPE_NONE) { + acpi_handle_warn(ACPI_HANDLE(gc->parent), "Ignoring %d default trigger\n", type); type = IRQ_TYPE_NONE; } - gpiochip->irq.chip = irqchip; - gpiochip->irq.handler = handler; - gpiochip->irq.default_type = type; - gpiochip->to_irq = gpiochip_to_irq; - gpiochip->irq.lock_key = lock_key; - gpiochip->irq.request_key = request_key; - gpiochip->irq.domain = irq_domain_add_simple(of_node, - gpiochip->ngpio, first_irq, - &gpiochip_domain_ops, gpiochip); - if (!gpiochip->irq.domain) { - gpiochip->irq.chip = NULL; + gc->irq.chip = irqchip; + gc->irq.handler = handler; + gc->irq.default_type = type; + gc->to_irq = gpiochip_to_irq; + gc->irq.lock_key = lock_key; + gc->irq.request_key = request_key; + gc->irq.domain = irq_domain_add_simple(of_node, + gc->ngpio, first_irq, + &gpiochip_domain_ops, gc); + if (!gc->irq.domain) { + gc->irq.chip = NULL; return -EINVAL; } - gpiochip_set_irq_hooks(gpiochip); + gpiochip_set_irq_hooks(gc); - acpi_gpiochip_request_interrupts(gpiochip); + acpi_gpiochip_request_interrupts(gc); return 0; } @@ -2585,60 +2724,65 @@ EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key); #else /* CONFIG_GPIOLIB_IRQCHIP */ -static inline int gpiochip_add_irqchip(struct gpio_chip *gpiochip, +static inline int gpiochip_add_irqchip(struct gpio_chip *gc, struct lock_class_key *lock_key, struct lock_class_key *request_key) { return 0; } -static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip) {} +static void gpiochip_irqchip_remove(struct gpio_chip *gc) {} -static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gpiochip) +static inline int gpiochip_irqchip_init_hw(struct gpio_chip *gc) { return 0; } -static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gpiochip) +static inline int gpiochip_irqchip_init_valid_mask(struct gpio_chip *gc) { return 0; } -static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gpiochip) +static inline void gpiochip_irqchip_free_valid_mask(struct gpio_chip *gc) { } #endif /* CONFIG_GPIOLIB_IRQCHIP */ /** * gpiochip_generic_request() - request the gpio function for a pin - * @chip: the gpiochip owning the GPIO + * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to request for GPIO function */ -int gpiochip_generic_request(struct gpio_chip *chip, unsigned offset) +int gpiochip_generic_request(struct gpio_chip *gc, unsigned offset) { - return pinctrl_gpio_request(chip->gpiodev->base + offset); +#ifdef CONFIG_PINCTRL + if (list_empty(&gc->gpiodev->pin_ranges)) + return 0; +#endif + + return pinctrl_gpio_request(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_request); /** * gpiochip_generic_free() - free the gpio function from a pin - * @chip: the gpiochip to request the gpio function for + * @gc: the gpiochip to request the gpio function for * @offset: the offset of the GPIO to free from GPIO function */ -void gpiochip_generic_free(struct gpio_chip *chip, unsigned offset) +void gpiochip_generic_free(struct gpio_chip *gc, unsigned offset) { - pinctrl_gpio_free(chip->gpiodev->base + offset); + pinctrl_gpio_free(gc->gpiodev->base + offset); } EXPORT_SYMBOL_GPL(gpiochip_generic_free); /** * gpiochip_generic_config() - apply configuration for a pin - * @chip: the gpiochip owning the GPIO + * @gc: the gpiochip owning the GPIO * @offset: the offset of the GPIO to apply the configuration * @config: the configuration to be applied */ -int gpiochip_generic_config(struct gpio_chip *chip, unsigned offset, +int gpiochip_generic_config(struct gpio_chip *gc, unsigned offset, unsigned long config) { - return pinctrl_gpio_set_config(chip->gpiodev->base + offset, config); + return pinctrl_gpio_set_config(gc->gpiodev->base + offset, config); } EXPORT_SYMBOL_GPL(gpiochip_generic_config); @@ -2646,7 +2790,7 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config); /** * gpiochip_add_pingroup_range() - add a range for GPIO <-> pin mapping - * @chip: the gpiochip to add the range for + * @gc: the gpiochip to add the range for * @pctldev: the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_group: name of the pin group inside the pin controller @@ -2656,24 +2800,24 @@ EXPORT_SYMBOL_GPL(gpiochip_generic_config); * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. */ -int gpiochip_add_pingroup_range(struct gpio_chip *chip, +int gpiochip_add_pingroup_range(struct gpio_chip *gc, struct pinctrl_dev *pctldev, unsigned int gpio_offset, const char *pin_group) { struct gpio_pin_range *pin_range; - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { - chip_err(chip, "failed to allocate pin ranges\n"); + chip_err(gc, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; - pin_range->range.gc = chip; - pin_range->range.name = chip->label; + pin_range->range.gc = gc; + pin_range->range.name = gc->label; pin_range->range.base = gdev->base + gpio_offset; pin_range->pctldev = pctldev; @@ -2687,7 +2831,7 @@ int gpiochip_add_pingroup_range(struct gpio_chip *chip, pinctrl_add_gpio_range(pctldev, &pin_range->range); - chip_dbg(chip, "created GPIO range %d->%d ==> %s PINGRP %s\n", + chip_dbg(gc, "created GPIO range %d->%d ==> %s PINGRP %s\n", gpio_offset, gpio_offset + pin_range->range.npins - 1, pinctrl_dev_get_devname(pctldev), pin_group); @@ -2699,7 +2843,7 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range); /** * gpiochip_add_pin_range() - add a range for GPIO <-> pin mapping - * @chip: the gpiochip to add the range for + * @gc: the gpiochip to add the range for * @pinctl_name: the dev_name() of the pin controller to map to * @gpio_offset: the start offset in the current gpio_chip number space * @pin_offset: the start offset in the pin controller number space @@ -2714,24 +2858,24 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pingroup_range); * Documentation/devicetree/bindings/gpio/gpio.txt on how to * bind pinctrl and gpio drivers via the "gpio-ranges" property. */ -int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, +int gpiochip_add_pin_range(struct gpio_chip *gc, const char *pinctl_name, unsigned int gpio_offset, unsigned int pin_offset, unsigned int npins) { struct gpio_pin_range *pin_range; - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; int ret; pin_range = kzalloc(sizeof(*pin_range), GFP_KERNEL); if (!pin_range) { - chip_err(chip, "failed to allocate pin ranges\n"); + chip_err(gc, "failed to allocate pin ranges\n"); return -ENOMEM; } /* Use local offset as range ID */ pin_range->range.id = gpio_offset; - pin_range->range.gc = chip; - pin_range->range.name = chip->label; + pin_range->range.gc = gc; + pin_range->range.name = gc->label; pin_range->range.base = gdev->base + gpio_offset; pin_range->range.pin_base = pin_offset; pin_range->range.npins = npins; @@ -2739,11 +2883,11 @@ int gpiochip_add_pin_range(struct gpio_chip *chip, const char *pinctl_name, &pin_range->range); if (IS_ERR(pin_range->pctldev)) { ret = PTR_ERR(pin_range->pctldev); - chip_err(chip, "could not create pin range\n"); + chip_err(gc, "could not create pin range\n"); kfree(pin_range); return ret; } - chip_dbg(chip, "created GPIO range %d->%d ==> %s PIN %d->%d\n", + chip_dbg(gc, "created GPIO range %d->%d ==> %s PIN %d->%d\n", gpio_offset, gpio_offset + npins - 1, pinctl_name, pin_offset, pin_offset + npins - 1); @@ -2756,12 +2900,12 @@ EXPORT_SYMBOL_GPL(gpiochip_add_pin_range); /** * gpiochip_remove_pin_ranges() - remove all the GPIO <-> pin mappings - * @chip: the chip to remove all the mappings for + * @gc: the chip to remove all the mappings for */ -void gpiochip_remove_pin_ranges(struct gpio_chip *chip) +void gpiochip_remove_pin_ranges(struct gpio_chip *gc) { struct gpio_pin_range *pin_range, *tmp; - struct gpio_device *gdev = chip->gpiodev; + struct gpio_device *gdev = gc->gpiodev; list_for_each_entry_safe(pin_range, tmp, &gdev->pin_ranges, node) { list_del(&pin_range->node); @@ -2780,7 +2924,7 @@ EXPORT_SYMBOL_GPL(gpiochip_remove_pin_ranges); */ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) { - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; int ret; unsigned long flags; unsigned offset; @@ -2806,12 +2950,12 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) goto done; } - if (chip->request) { - /* chip->request may sleep */ + if (gc->request) { + /* gc->request may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); offset = gpio_chip_hwgpio(desc); - if (gpiochip_line_is_valid(chip, offset)) - ret = chip->request(chip, offset); + if (gpiochip_line_is_valid(gc, offset)) + ret = gc->request(gc, offset); else ret = -EINVAL; spin_lock_irqsave(&gpio_lock, flags); @@ -2823,14 +2967,16 @@ static int gpiod_request_commit(struct gpio_desc *desc, const char *label) goto done; } } - if (chip->get_direction) { - /* chip->get_direction may sleep */ + if (gc->get_direction) { + /* gc->get_direction may sleep */ spin_unlock_irqrestore(&gpio_lock, flags); gpiod_get_direction(desc); spin_lock_irqsave(&gpio_lock, flags); } done: spin_unlock_irqrestore(&gpio_lock, flags); + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_REQUESTED, desc); return ret; } @@ -2898,7 +3044,7 @@ static bool gpiod_free_commit(struct gpio_desc *desc) { bool ret = false; unsigned long flags; - struct gpio_chip *chip; + struct gpio_chip *gc; might_sleep(); @@ -2906,12 +3052,12 @@ static bool gpiod_free_commit(struct gpio_desc *desc) spin_lock_irqsave(&gpio_lock, flags); - chip = desc->gdev->chip; - if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { - if (chip->free) { + gc = desc->gdev->chip; + if (gc && test_bit(FLAG_REQUESTED, &desc->flags)) { + if (gc->free) { spin_unlock_irqrestore(&gpio_lock, flags); - might_sleep_if(chip->can_sleep); - chip->free(chip, gpio_chip_hwgpio(desc)); + might_sleep_if(gc->can_sleep); + gc->free(gc, gpio_chip_hwgpio(desc)); spin_lock_irqsave(&gpio_lock, flags); } kfree_const(desc->label); @@ -2924,10 +3070,16 @@ static bool gpiod_free_commit(struct gpio_desc *desc) clear_bit(FLAG_PULL_DOWN, &desc->flags); clear_bit(FLAG_BIAS_DISABLE, &desc->flags); clear_bit(FLAG_IS_HOGGED, &desc->flags); +#ifdef CONFIG_OF_DYNAMIC + desc->hog = NULL; +#endif ret = true; } spin_unlock_irqrestore(&gpio_lock, flags); + atomic_notifier_call_chain(&desc->gdev->notifier, + GPIOLINE_CHANGED_RELEASED, desc); + return ret; } @@ -2943,7 +3095,7 @@ void gpiod_free(struct gpio_desc *desc) /** * gpiochip_is_requested - return string iff signal was requested - * @chip: controller managing the signal + * @gc: controller managing the signal * @offset: of signal within controller's 0..(ngpio - 1) range * * Returns NULL if the GPIO is not currently requested, else a string. @@ -2954,14 +3106,16 @@ void gpiod_free(struct gpio_desc *desc) * help with diagnostics, and knowing that the signal is used as a GPIO * can help avoid accidentally multiplexing it to another controller. */ -const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset) +const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned offset) { struct gpio_desc *desc; - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return NULL; - desc = &chip->gpiodev->descs[offset]; + desc = gpiochip_get_desc(gc, offset); + if (IS_ERR(desc)) + return NULL; if (test_bit(FLAG_REQUESTED, &desc->flags) == 0) return NULL; @@ -2971,7 +3125,7 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested); /** * gpiochip_request_own_desc - Allow GPIO chip to request its own descriptor - * @chip: GPIO chip + * @gc: GPIO chip * @hwnum: hardware number of the GPIO for which to request the descriptor * @label: label for the GPIO * @lflags: lookup flags for this GPIO or 0 if default, this can be used to @@ -2990,17 +3144,17 @@ EXPORT_SYMBOL_GPL(gpiochip_is_requested); * A pointer to the GPIO descriptor, or an ERR_PTR()-encoded negative error * code on failure. */ -struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, +struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc, unsigned int hwnum, const char *label, enum gpio_lookup_flags lflags, enum gpiod_flags dflags) { - struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum); + struct gpio_desc *desc = gpiochip_get_desc(gc, hwnum); int ret; if (IS_ERR(desc)) { - chip_err(chip, "failed to get GPIO descriptor\n"); + chip_err(gc, "failed to get GPIO descriptor\n"); return desc; } @@ -3010,7 +3164,7 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, ret = gpiod_configure_flags(desc, label, lflags, dflags); if (ret) { - chip_err(chip, "setup of own GPIO %s failed\n", label); + chip_err(gc, "setup of own GPIO %s failed\n", label); gpiod_free_commit(desc); return ERR_PTR(ret); } @@ -3052,9 +3206,9 @@ static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset, return gc->set_config(gc, offset, config); } -static int gpio_set_config(struct gpio_chip *gc, unsigned int offset, - enum pin_config_param mode) +static int gpio_set_config(struct gpio_desc *desc, enum pin_config_param mode) { + struct gpio_chip *gc = desc->gdev->chip; unsigned long config; unsigned arg; @@ -3069,10 +3223,10 @@ static int gpio_set_config(struct gpio_chip *gc, unsigned int offset, } config = PIN_CONF_PACKED(mode, arg); - return gpio_do_set_config(gc, offset, config); + return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config); } -static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc) +static int gpio_set_bias(struct gpio_desc *desc) { int bias = 0; int ret = 0; @@ -3085,7 +3239,7 @@ static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc) bias = PIN_CONFIG_BIAS_PULL_DOWN; if (bias) { - ret = gpio_set_config(chip, gpio_chip_hwgpio(desc), bias); + ret = gpio_set_config(desc, bias); if (ret != -ENOTSUPP) return ret; } @@ -3103,18 +3257,18 @@ static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc) */ int gpiod_direction_input(struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; int ret = 0; VALIDATE_DESC(desc); - chip = desc->gdev->chip; + gc = desc->gdev->chip; /* * It is legal to have no .get() and .direction_input() specified if * the chip is output-only, but you can't specify .direction_input() * and not support the .get() operation, that doesn't make sense. */ - if (!chip->get && chip->direction_input) { + if (!gc->get && gc->direction_input) { gpiod_warn(desc, "%s: missing get() but have direction_input()\n", __func__); @@ -3127,10 +3281,10 @@ int gpiod_direction_input(struct gpio_desc *desc) * direction (if .get_direction() is supported) else we silently * assume we are in input mode after this. */ - if (chip->direction_input) { - ret = chip->direction_input(chip, gpio_chip_hwgpio(desc)); - } else if (chip->get_direction && - (chip->get_direction(chip, gpio_chip_hwgpio(desc)) != 1)) { + if (gc->direction_input) { + ret = gc->direction_input(gc, gpio_chip_hwgpio(desc)); + } else if (gc->get_direction && + (gc->get_direction(gc, gpio_chip_hwgpio(desc)) != 1)) { gpiod_warn(desc, "%s: missing direction_input() operation and line is output\n", __func__); @@ -3138,7 +3292,7 @@ int gpiod_direction_input(struct gpio_desc *desc) } if (ret == 0) { clear_bit(FLAG_IS_OUT, &desc->flags); - ret = gpio_set_bias(chip, desc); + ret = gpio_set_bias(desc); } trace_gpio_direction(desc_to_gpio(desc), 1, ret); @@ -3222,7 +3376,6 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output_raw); */ int gpiod_direction_output(struct gpio_desc *desc, int value) { - struct gpio_chip *gc; int ret; VALIDATE_DESC(desc); @@ -3240,11 +3393,9 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) return -EIO; } - gc = desc->gdev->chip; if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { /* First see if we can enable open drain in hardware */ - ret = gpio_set_config(gc, gpio_chip_hwgpio(desc), - PIN_CONFIG_DRIVE_OPEN_DRAIN); + ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_DRAIN); if (!ret) goto set_output_value; /* Emulate open drain by not actively driving the line high */ @@ -3254,8 +3405,7 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) } } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { - ret = gpio_set_config(gc, gpio_chip_hwgpio(desc), - PIN_CONFIG_DRIVE_OPEN_SOURCE); + ret = gpio_set_config(desc, PIN_CONFIG_DRIVE_OPEN_SOURCE); if (!ret) goto set_output_value; /* Emulate open source by not actively driving the line low */ @@ -3264,12 +3414,11 @@ int gpiod_direction_output(struct gpio_desc *desc, int value) goto set_output_flag; } } else { - gpio_set_config(gc, gpio_chip_hwgpio(desc), - PIN_CONFIG_DRIVE_PUSH_PULL); + gpio_set_config(desc, PIN_CONFIG_DRIVE_PUSH_PULL); } set_output_value: - ret = gpio_set_bias(gc, desc); + ret = gpio_set_bias(desc); if (ret) return ret; return gpiod_direction_output_raw_commit(desc, value); @@ -3288,6 +3437,26 @@ set_output_flag: EXPORT_SYMBOL_GPL(gpiod_direction_output); /** + * gpiod_set_config - sets @config for a GPIO + * @desc: descriptor of the GPIO for which to set the configuration + * @config: Same packed config format as generic pinconf + * + * Returns: + * 0 on success, %-ENOTSUPP if the controller doesn't support setting the + * configuration. + */ +int gpiod_set_config(struct gpio_desc *desc, unsigned long config) +{ + struct gpio_chip *gc; + + VALIDATE_DESC(desc); + gc = desc->gdev->chip; + + return gpio_do_set_config(gc, gpio_chip_hwgpio(desc), config); +} +EXPORT_SYMBOL_GPL(gpiod_set_config); + +/** * gpiod_set_debounce - sets @debounce time for a GPIO * @desc: descriptor of the GPIO for which to set debounce time * @debounce: debounce time in microseconds @@ -3298,14 +3467,10 @@ EXPORT_SYMBOL_GPL(gpiod_direction_output); */ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) { - struct gpio_chip *chip; - unsigned long config; - - VALIDATE_DESC(desc); - chip = desc->gdev->chip; + unsigned long config; config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce); - return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config); + return gpiod_set_config(desc, config); } EXPORT_SYMBOL_GPL(gpiod_set_debounce); @@ -3319,7 +3484,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_debounce); */ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) { - struct gpio_chip *chip; + struct gpio_chip *gc; unsigned long packed; int gpio; int rc; @@ -3332,14 +3497,14 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) assign_bit(FLAG_TRANSITORY, &desc->flags, transitory); /* If the driver supports it, set the persistence state now */ - chip = desc->gdev->chip; - if (!chip->set_config) + gc = desc->gdev->chip; + if (!gc->set_config) return 0; packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE, !transitory); gpio = gpio_chip_hwgpio(desc); - rc = gpio_do_set_config(chip, gpio, packed); + rc = gpio_do_set_config(gc, gpio, packed); if (rc == -ENOTSUPP) { dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n", gpio); @@ -3398,28 +3563,28 @@ EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); static int gpiod_get_raw_value_commit(const struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; int offset; int value; - chip = desc->gdev->chip; + gc = desc->gdev->chip; offset = gpio_chip_hwgpio(desc); - value = chip->get ? chip->get(chip, offset) : -EIO; + value = gc->get ? gc->get(gc, offset) : -EIO; value = value < 0 ? value : !!value; trace_gpio_value(desc_to_gpio(desc), 1, value); return value; } -static int gpio_chip_get_multiple(struct gpio_chip *chip, +static int gpio_chip_get_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { - if (chip->get_multiple) { - return chip->get_multiple(chip, mask, bits); - } else if (chip->get) { + if (gc->get_multiple) { + return gc->get_multiple(gc, mask, bits); + } else if (gc->get) { int i, value; - for_each_set_bit(i, mask, chip->ngpio) { - value = chip->get(chip, i); + for_each_set_bit(i, mask, gc->ngpio) { + value = gc->get(gc, i); if (value < 0) return value; __assign_bit(i, bits, value); @@ -3467,26 +3632,26 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, } while (i < array_size) { - struct gpio_chip *chip = desc_array[i]->gdev->chip; + struct gpio_chip *gc = desc_array[i]->gdev->chip; unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)]; unsigned long *mask, *bits; int first, j, ret; - if (likely(chip->ngpio <= FASTPATH_NGPIO)) { + if (likely(gc->ngpio <= FASTPATH_NGPIO)) { mask = fastpath; } else { - mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio), + mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio), sizeof(*mask), can_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!mask) return -ENOMEM; } - bits = mask + BITS_TO_LONGS(chip->ngpio); - bitmap_zero(mask, chip->ngpio); + bits = mask + BITS_TO_LONGS(gc->ngpio); + bitmap_zero(mask, gc->ngpio); if (!can_sleep) - WARN_ON(chip->can_sleep); + WARN_ON(gc->can_sleep); /* collect all inputs belonging to the same chip */ first = i; @@ -3501,9 +3666,9 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, i = find_next_zero_bit(array_info->get_mask, array_size, i); } while ((i < array_size) && - (desc_array[i]->gdev->chip == chip)); + (desc_array[i]->gdev->chip == gc)); - ret = gpio_chip_get_multiple(chip, mask, bits); + ret = gpio_chip_get_multiple(gc, mask, bits); if (ret) { if (mask != fastpath) kfree(mask); @@ -3641,13 +3806,13 @@ EXPORT_SYMBOL_GPL(gpiod_get_array_value); static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value) { int ret = 0; - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; int offset = gpio_chip_hwgpio(desc); if (value) { - ret = chip->direction_input(chip, offset); + ret = gc->direction_input(gc, offset); } else { - ret = chip->direction_output(chip, offset, 0); + ret = gc->direction_output(gc, offset, 0); if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); } @@ -3666,15 +3831,15 @@ static void gpio_set_open_drain_value_commit(struct gpio_desc *desc, bool value) static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value) { int ret = 0; - struct gpio_chip *chip = desc->gdev->chip; + struct gpio_chip *gc = desc->gdev->chip; int offset = gpio_chip_hwgpio(desc); if (value) { - ret = chip->direction_output(chip, offset, 1); + ret = gc->direction_output(gc, offset, 1); if (!ret) set_bit(FLAG_IS_OUT, &desc->flags); } else { - ret = chip->direction_input(chip, offset); + ret = gc->direction_input(gc, offset); } trace_gpio_direction(desc_to_gpio(desc), !value, ret); if (ret < 0) @@ -3685,33 +3850,34 @@ static void gpio_set_open_source_value_commit(struct gpio_desc *desc, bool value static void gpiod_set_raw_value_commit(struct gpio_desc *desc, bool value) { - struct gpio_chip *chip; + struct gpio_chip *gc; - chip = desc->gdev->chip; + gc = desc->gdev->chip; trace_gpio_value(desc_to_gpio(desc), 0, value); - chip->set(chip, gpio_chip_hwgpio(desc), value); + gc->set(gc, gpio_chip_hwgpio(desc), value); } /* * set multiple outputs on the same chip; * use the chip's set_multiple function if available; * otherwise set the outputs sequentially; + * @chip: the GPIO chip we operate on * @mask: bit mask array; one bit per output; BITS_PER_LONG bits per word * defines which outputs are to be changed * @bits: bit value array; one bit per output; BITS_PER_LONG bits per word * defines the values the outputs specified by mask are to be set to */ -static void gpio_chip_set_multiple(struct gpio_chip *chip, +static void gpio_chip_set_multiple(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits) { - if (chip->set_multiple) { - chip->set_multiple(chip, mask, bits); + if (gc->set_multiple) { + gc->set_multiple(gc, mask, bits); } else { unsigned int i; /* set outputs if the corresponding mask bit is set */ - for_each_set_bit(i, mask, chip->ngpio) - chip->set(chip, i, test_bit(i, bits)); + for_each_set_bit(i, mask, gc->ngpio) + gc->set(gc, i, test_bit(i, bits)); } } @@ -3750,26 +3916,26 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, } while (i < array_size) { - struct gpio_chip *chip = desc_array[i]->gdev->chip; + struct gpio_chip *gc = desc_array[i]->gdev->chip; unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)]; unsigned long *mask, *bits; int count = 0; - if (likely(chip->ngpio <= FASTPATH_NGPIO)) { + if (likely(gc->ngpio <= FASTPATH_NGPIO)) { mask = fastpath; } else { - mask = kmalloc_array(2 * BITS_TO_LONGS(chip->ngpio), + mask = kmalloc_array(2 * BITS_TO_LONGS(gc->ngpio), sizeof(*mask), can_sleep ? GFP_KERNEL : GFP_ATOMIC); if (!mask) return -ENOMEM; } - bits = mask + BITS_TO_LONGS(chip->ngpio); - bitmap_zero(mask, chip->ngpio); + bits = mask + BITS_TO_LONGS(gc->ngpio); + bitmap_zero(mask, gc->ngpio); if (!can_sleep) - WARN_ON(chip->can_sleep); + WARN_ON(gc->can_sleep); do { struct gpio_desc *desc = desc_array[i]; @@ -3805,10 +3971,10 @@ int gpiod_set_array_value_complex(bool raw, bool can_sleep, i = find_next_zero_bit(array_info->set_mask, array_size, i); } while ((i < array_size) && - (desc_array[i]->gdev->chip == chip)); + (desc_array[i]->gdev->chip == gc)); /* push collected bits to outputs */ if (count != 0) - gpio_chip_set_multiple(chip, mask, bits); + gpio_chip_set_multiple(gc, mask, bits); if (mask != fastpath) kfree(mask); @@ -3970,7 +4136,7 @@ EXPORT_SYMBOL_GPL(gpiod_set_consumer_name); */ int gpiod_to_irq(const struct gpio_desc *desc) { - struct gpio_chip *chip; + struct gpio_chip *gc; int offset; /* @@ -3981,10 +4147,10 @@ int gpiod_to_irq(const struct gpio_desc *desc) if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip) return -EINVAL; - chip = desc->gdev->chip; + gc = desc->gdev->chip; offset = gpio_chip_hwgpio(desc); - if (chip->to_irq) { - int retirq = chip->to_irq(chip, offset); + if (gc->to_irq) { + int retirq = gc->to_irq(gc, offset); /* Zero means NO_IRQ */ if (!retirq) @@ -3998,17 +4164,17 @@ EXPORT_SYMBOL_GPL(gpiod_to_irq); /** * gpiochip_lock_as_irq() - lock a GPIO to be used as IRQ - * @chip: the chip the GPIO to lock belongs to + * @gc: the chip the GPIO to lock belongs to * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to lock down * a certain GPIO line to be used for IRQs. */ -int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) +int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc; - desc = gpiochip_get_desc(chip, offset); + desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return PTR_ERR(desc); @@ -4016,18 +4182,18 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) * If it's fast: flush the direction setting if something changed * behind our back */ - if (!chip->can_sleep && chip->get_direction) { + if (!gc->can_sleep && gc->get_direction) { int dir = gpiod_get_direction(desc); if (dir < 0) { - chip_err(chip, "%s: cannot get GPIO direction\n", + chip_err(gc, "%s: cannot get GPIO direction\n", __func__); return dir; } } if (test_bit(FLAG_IS_OUT, &desc->flags)) { - chip_err(chip, + chip_err(gc, "%s: tried to flag a GPIO set as output for IRQ\n", __func__); return -EIO; @@ -4050,17 +4216,17 @@ EXPORT_SYMBOL_GPL(gpiochip_lock_as_irq); /** * gpiochip_unlock_as_irq() - unlock a GPIO used as IRQ - * @chip: the chip the GPIO to lock belongs to + * @gc: the chip the GPIO to lock belongs to * @offset: the offset of the GPIO to lock as IRQ * * This is used directly by GPIO drivers that want to indicate * that a certain GPIO is no longer used exclusively for IRQ. */ -void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset) { struct gpio_desc *desc; - desc = gpiochip_get_desc(chip, offset); + desc = gpiochip_get_desc(gc, offset); if (IS_ERR(desc)) return; @@ -4073,9 +4239,9 @@ void gpiochip_unlock_as_irq(struct gpio_chip *chip, unsigned int offset) } EXPORT_SYMBOL_GPL(gpiochip_unlock_as_irq); -void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_disable_irq(struct gpio_chip *gc, unsigned int offset) { - struct gpio_desc *desc = gpiochip_get_desc(chip, offset); + struct gpio_desc *desc = gpiochip_get_desc(gc, offset); if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) @@ -4083,9 +4249,9 @@ void gpiochip_disable_irq(struct gpio_chip *chip, unsigned int offset) } EXPORT_SYMBOL_GPL(gpiochip_disable_irq); -void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_enable_irq(struct gpio_chip *gc, unsigned int offset) { - struct gpio_desc *desc = gpiochip_get_desc(chip, offset); + struct gpio_desc *desc = gpiochip_get_desc(gc, offset); if (!IS_ERR(desc) && !WARN_ON(!test_bit(FLAG_USED_AS_IRQ, &desc->flags))) { @@ -4095,63 +4261,63 @@ void gpiochip_enable_irq(struct gpio_chip *chip, unsigned int offset) } EXPORT_SYMBOL_GPL(gpiochip_enable_irq); -bool gpiochip_line_is_irq(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return test_bit(FLAG_USED_AS_IRQ, &chip->gpiodev->descs[offset].flags); + return test_bit(FLAG_USED_AS_IRQ, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_irq); -int gpiochip_reqres_irq(struct gpio_chip *chip, unsigned int offset) +int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset) { int ret; - if (!try_module_get(chip->gpiodev->owner)) + if (!try_module_get(gc->gpiodev->owner)) return -ENODEV; - ret = gpiochip_lock_as_irq(chip, offset); + ret = gpiochip_lock_as_irq(gc, offset); if (ret) { - chip_err(chip, "unable to lock HW IRQ %u for IRQ\n", offset); - module_put(chip->gpiodev->owner); + chip_err(gc, "unable to lock HW IRQ %u for IRQ\n", offset); + module_put(gc->gpiodev->owner); return ret; } return 0; } EXPORT_SYMBOL_GPL(gpiochip_reqres_irq); -void gpiochip_relres_irq(struct gpio_chip *chip, unsigned int offset) +void gpiochip_relres_irq(struct gpio_chip *gc, unsigned int offset) { - gpiochip_unlock_as_irq(chip, offset); - module_put(chip->gpiodev->owner); + gpiochip_unlock_as_irq(gc, offset); + module_put(gc->gpiodev->owner); } EXPORT_SYMBOL_GPL(gpiochip_relres_irq); -bool gpiochip_line_is_open_drain(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_open_drain(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return test_bit(FLAG_OPEN_DRAIN, &chip->gpiodev->descs[offset].flags); + return test_bit(FLAG_OPEN_DRAIN, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_open_drain); -bool gpiochip_line_is_open_source(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_open_source(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return test_bit(FLAG_OPEN_SOURCE, &chip->gpiodev->descs[offset].flags); + return test_bit(FLAG_OPEN_SOURCE, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_open_source); -bool gpiochip_line_is_persistent(struct gpio_chip *chip, unsigned int offset) +bool gpiochip_line_is_persistent(struct gpio_chip *gc, unsigned int offset) { - if (offset >= chip->ngpio) + if (offset >= gc->ngpio) return false; - return !test_bit(FLAG_TRANSITORY, &chip->gpiodev->descs[offset].flags); + return !test_bit(FLAG_TRANSITORY, &gc->gpiodev->descs[offset].flags); } EXPORT_SYMBOL_GPL(gpiochip_line_is_persistent); @@ -4389,7 +4555,7 @@ EXPORT_SYMBOL_GPL(gpiod_remove_lookup_table); */ void gpiod_add_hogs(struct gpiod_hog *hogs) { - struct gpio_chip *chip; + struct gpio_chip *gc; struct gpiod_hog *hog; mutex_lock(&gpio_machine_hogs_mutex); @@ -4401,9 +4567,9 @@ void gpiod_add_hogs(struct gpiod_hog *hogs) * The chip may have been registered earlier, so check if it * exists and, if so, try to hog the line now. */ - chip = find_chip_by_name(hog->chip_label); - if (chip) - gpiochip_machine_hog(chip, hog); + gc = find_chip_by_name(hog->chip_label); + if (gc) + gpiochip_machine_hog(gc, hog); } mutex_unlock(&gpio_machine_hogs_mutex); @@ -4453,7 +4619,7 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, return desc; for (p = &table->table[0]; p->chip_label; p++) { - struct gpio_chip *chip; + struct gpio_chip *gc; /* idx must always match exactly */ if (p->idx != idx) @@ -4463,9 +4629,9 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, if (p->con_id && (!con_id || strcmp(p->con_id, con_id))) continue; - chip = find_chip_by_name(p->chip_label); + gc = find_chip_by_name(p->chip_label); - if (!chip) { + if (!gc) { /* * As the lookup table indicates a chip with * p->chip_label should exist, assume it may @@ -4478,15 +4644,15 @@ static struct gpio_desc *gpiod_find(struct device *dev, const char *con_id, return ERR_PTR(-EPROBE_DEFER); } - if (chip->ngpio <= p->chip_hwnum) { + if (gc->ngpio <= p->chip_hwnum) { dev_err(dev, "requested GPIO %u (%u) is out of range [0..%u] for chip %s\n", - idx, p->chip_hwnum, chip->ngpio - 1, - chip->label); + idx, p->chip_hwnum, gc->ngpio - 1, + gc->label); return ERR_PTR(-EINVAL); } - desc = gpiochip_get_desc(chip, p->chip_hwnum); + desc = gpiochip_get_desc(gc, p->chip_hwnum); *flags = p->flags; return desc; @@ -4881,20 +5047,20 @@ EXPORT_SYMBOL_GPL(gpiod_get_index_optional); int gpiod_hog(struct gpio_desc *desc, const char *name, unsigned long lflags, enum gpiod_flags dflags) { - struct gpio_chip *chip; + struct gpio_chip *gc; struct gpio_desc *local_desc; int hwnum; int ret; - chip = gpiod_to_chip(desc); + gc = gpiod_to_chip(desc); hwnum = gpio_chip_hwgpio(desc); - local_desc = gpiochip_request_own_desc(chip, hwnum, name, + local_desc = gpiochip_request_own_desc(gc, hwnum, name, lflags, dflags); if (IS_ERR(local_desc)) { ret = PTR_ERR(local_desc); pr_err("requesting hog GPIO %s (chip %s, offset %d) failed, %d\n", - name, chip->label, hwnum, ret); + name, gc->label, hwnum, ret); return ret; } @@ -4912,15 +5078,15 @@ int gpiod_hog(struct gpio_desc *desc, const char *name, /** * gpiochip_free_hogs - Scan gpio-controller chip and release GPIO hog - * @chip: gpio chip to act on + * @gc: gpio chip to act on */ -static void gpiochip_free_hogs(struct gpio_chip *chip) +static void gpiochip_free_hogs(struct gpio_chip *gc) { int id; - for (id = 0; id < chip->ngpio; id++) { - if (test_bit(FLAG_IS_HOGGED, &chip->gpiodev->descs[id].flags)) - gpiochip_free_own_desc(&chip->gpiodev->descs[id]); + for (id = 0; id < gc->ngpio; id++) { + if (test_bit(FLAG_IS_HOGGED, &gc->gpiodev->descs[id].flags)) + gpiochip_free_own_desc(&gc->gpiodev->descs[id]); } } @@ -4943,7 +5109,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, struct gpio_desc *desc; struct gpio_descs *descs; struct gpio_array *array_info = NULL; - struct gpio_chip *chip; + struct gpio_chip *gc; int count, bitmap_size; count = gpiod_count(dev, con_id); @@ -4963,7 +5129,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, descs->desc[descs->ndescs] = desc; - chip = gpiod_to_chip(desc); + gc = gpiod_to_chip(desc); /* * If pin hardware number of array member 0 is also 0, select * its chip as a candidate for fast bitmap processing path. @@ -4971,8 +5137,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, if (descs->ndescs == 0 && gpio_chip_hwgpio(desc) == 0) { struct gpio_descs *array; - bitmap_size = BITS_TO_LONGS(chip->ngpio > count ? - chip->ngpio : count); + bitmap_size = BITS_TO_LONGS(gc->ngpio > count ? + gc->ngpio : count); array = kzalloc(struct_size(descs, desc, count) + struct_size(array_info, invert_mask, @@ -4995,7 +5161,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, array_info->desc = descs->desc; array_info->size = count; - array_info->chip = chip; + array_info->chip = gc; bitmap_set(array_info->get_mask, descs->ndescs, count - descs->ndescs); bitmap_set(array_info->set_mask, descs->ndescs, @@ -5003,7 +5169,7 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, descs->info = array_info; } /* Unmark array members which don't belong to the 'fast' chip */ - if (array_info && array_info->chip != chip) { + if (array_info && array_info->chip != gc) { __clear_bit(descs->ndescs, array_info->get_mask); __clear_bit(descs->ndescs, array_info->set_mask); } @@ -5028,8 +5194,8 @@ struct gpio_descs *__must_check gpiod_get_array(struct device *dev, } } else if (array_info) { /* Exclude open drain or open source from fast output */ - if (gpiochip_line_is_open_drain(chip, descs->ndescs) || - gpiochip_line_is_open_source(chip, descs->ndescs)) + if (gpiochip_line_is_open_drain(gc, descs->ndescs) || + gpiochip_line_is_open_source(gc, descs->ndescs)) __clear_bit(descs->ndescs, array_info->set_mask); /* Identify 'fast' pins which require invertion */ @@ -5117,10 +5283,15 @@ static int __init gpiolib_dev_init(void) if (ret < 0) { pr_err("gpiolib: failed to allocate char dev region\n"); bus_unregister(&gpio_bus_type); - } else { - gpiolib_initialized = true; - gpiochip_setup_devs(); + return ret; } + + gpiolib_initialized = true; + gpiochip_setup_devs(); + + if (IS_ENABLED(CONFIG_OF_DYNAMIC)) + WARN_ON(of_reconfig_notifier_register(&gpio_of_notifier)); + return ret; } core_initcall(gpiolib_dev_init); @@ -5130,7 +5301,7 @@ core_initcall(gpiolib_dev_init); static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) { unsigned i; - struct gpio_chip *chip = gdev->chip; + struct gpio_chip *gc = gdev->chip; unsigned gpio = gdev->base; struct gpio_desc *gdesc = &gdev->descs[0]; bool is_out; @@ -5153,7 +5324,7 @@ static void gpiolib_dbg_show(struct seq_file *s, struct gpio_device *gdev) seq_printf(s, " gpio-%-3d (%-20.20s|%-20.20s) %s %s %s%s", gpio, gdesc->name ? gdesc->name : "", gdesc->label, is_out ? "out" : "in ", - chip->get ? (chip->get(chip, i) ? "hi" : "lo") : "? ", + gc->get ? (gc->get(gc, i) ? "hi" : "lo") : "? ", is_irq ? "IRQ " : "", active_low ? "ACTIVE LOW" : ""); seq_printf(s, "\n"); @@ -5205,10 +5376,10 @@ static void gpiolib_seq_stop(struct seq_file *s, void *v) static int gpiolib_seq_show(struct seq_file *s, void *v) { struct gpio_device *gdev = v; - struct gpio_chip *chip = gdev->chip; + struct gpio_chip *gc = gdev->chip; struct device *parent; - if (!chip) { + if (!gc) { seq_printf(s, "%s%s: (dangling chip)", (char *)s->private, dev_name(&gdev->dev)); return 0; @@ -5217,19 +5388,19 @@ static int gpiolib_seq_show(struct seq_file *s, void *v) seq_printf(s, "%s%s: GPIOs %d-%d", (char *)s->private, dev_name(&gdev->dev), gdev->base, gdev->base + gdev->ngpio - 1); - parent = chip->parent; + parent = gc->parent; if (parent) seq_printf(s, ", parent: %s/%s", parent->bus ? parent->bus->name : "no-bus", dev_name(parent)); - if (chip->label) - seq_printf(s, ", %s", chip->label); - if (chip->can_sleep) + if (gc->label) + seq_printf(s, ", %s", gc->label); + if (gc->can_sleep) seq_printf(s, ", can sleep"); seq_printf(s, ":\n"); - if (chip->dbg_show) - chip->dbg_show(s, chip); + if (gc->dbg_show) + gc->dbg_show(s, gc); else gpiolib_dbg_show(s, gdev); diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index 3e0aab2945d8..853ce681b4a4 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -56,6 +56,7 @@ struct gpio_device { const char *label; void *data; struct list_head list; + struct atomic_notifier_head notifier; #ifdef CONFIG_PINCTRL /* @@ -119,6 +120,9 @@ struct gpio_desc { const char *label; /* Name of the GPIO */ const char *name; +#ifdef CONFIG_OF_DYNAMIC + struct device_node *hog; +#endif }; int gpiod_request(struct gpio_desc *desc, const char *label); diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index f17d01f076c7..835c88318cec 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -5,3 +5,4 @@ obj-$(CONFIG_TEGRA_HOST1X) += host1x/ obj-y += drm/ vga/ obj-$(CONFIG_IMX_IPUV3_CORE) += ipu-v3/ +obj-$(CONFIG_TRACE_GPU_MEM) += trace/ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index c8f2aa1db13b..f6e3f59efa2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -1113,7 +1113,7 @@ static int gfx_v10_0_mec_init(struct amdgpu_device *adev) return r; } - memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); + memset(hpd, 0, mec_hpd_size); amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 37c8231f1407..608ffe3b684e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1946,7 +1946,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev) return r; } - memset(hpd, 0, adev->gfx.mec.hpd_eop_obj->tbo.mem.size); + memset(hpd, 0, mec_hpd_size); amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj); amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index d5386f15c4a5..05bc6d96ec52 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1112,9 +1112,9 @@ kfd_gtt_out: return 0; kfd_gtt_no_free_chunk: - pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); + pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj); mutex_unlock(&kfd->gtt_sa_lock); - kfree(mem_obj); + kfree(*mem_obj); return -ENOMEM; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d3674d805a0a..bab587ab6e8d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3639,6 +3639,9 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, case DRM_FORMAT_NV12: plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; break; + case DRM_FORMAT_P010: + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; + break; default: DRM_ERROR( "Unsupported screen format %s\n", @@ -5535,6 +5538,8 @@ static int get_plane_formats(const struct drm_plane *plane, if (plane_cap && plane_cap->pixel_format_support.nv12) formats[num_formats++] = DRM_FORMAT_NV12; + if (plane_cap && plane_cap->pixel_format_support.p010) + formats[num_formats++] = DRM_FORMAT_P010; break; case DRM_PLANE_TYPE_OVERLAY: @@ -5587,12 +5592,15 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, } if (plane->type == DRM_PLANE_TYPE_PRIMARY && - plane_cap && plane_cap->pixel_format_support.nv12) { + plane_cap && + (plane_cap->pixel_format_support.nv12 || + plane_cap->pixel_format_support.p010)) { /* This only affects YUV formats. */ drm_plane_create_color_properties( plane, BIT(DRM_COLOR_YCBCR_BT601) | - BIT(DRM_COLOR_YCBCR_BT709), + BIT(DRM_COLOR_YCBCR_BT709) | + BIT(DRM_COLOR_YCBCR_BT2020), BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE), DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 5b70ed3cdb88..78e1c11d4ae5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -192,10 +192,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work, &hdcp_work->srm_version); display->adjust.disable = 0; - if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { + hdcp_w->link.adjust.hdcp1.disable = 0; hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; - else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) + } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { + hdcp_w->link.adjust.hdcp1.disable = 1; hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; + } schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); @@ -263,7 +266,7 @@ static void event_callback(struct work_struct *work) mutex_lock(&hdcp_work->mutex); - cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + cancel_delayed_work(&hdcp_work->callback_dwork); mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, &hdcp_work->output); @@ -344,6 +347,8 @@ static void event_watchdog_timer(struct work_struct *work) mutex_lock(&hdcp_work->mutex); + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, &hdcp_work->output); @@ -414,7 +419,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; link->dp.mst_supported = config->mst_supported; display->adjust.disable = 1; - link->adjust.auth_delay = 2; + link->adjust.auth_delay = 3; + link->adjust.hdcp1.disable = 0; hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 2ffb22177df9..f21bbb295ad3 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1360,6 +1360,26 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context) return (result == DC_OK); } +static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context) +{ + int i; + struct pipe_ctx *pipe; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->plane_state) + continue; + + /* Must set to false to start with, due to OR in update function */ + pipe->plane_state->status.is_flip_pending = false; + dc->hwss.update_pending_status(pipe); + if (pipe->plane_state->status.is_flip_pending) + return true; + } + return false; +} + bool dc_post_update_surfaces_to_stream(struct dc *dc) { int i; @@ -1370,6 +1390,9 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc) post_surface_trace(dc); + if (is_flip_pending_in_pipes(dc, context)) + return true; + for (i = 0; i < dc->res_pool->pipe_count; i++) if (context->res_ctx.pipe_ctx[i].stream == NULL || context->res_ctx.pipe_ctx[i].plane_state == NULL) { @@ -1703,6 +1726,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (u->coeff_reduction_factor) update_flags->bits.coeff_reduction_change = 1; + if (u->gamut_remap_matrix) + update_flags->bits.gamut_remap_change = 1; + if (u->gamma) { enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN; @@ -1728,7 +1754,8 @@ static enum surface_update_type det_surface_update(const struct dc *dc, if (update_flags->bits.input_csc_change || update_flags->bits.coeff_reduction_change - || update_flags->bits.gamma_change) { + || update_flags->bits.gamma_change + || update_flags->bits.gamut_remap_change) { type = UPDATE_TYPE_FULL; elevate_update_type(&overall_type, type); } @@ -1973,6 +2000,10 @@ static void copy_surface_update_to_plane( if (srf_update->coeff_reduction_factor) surface->coeff_reduction_factor = *srf_update->coeff_reduction_factor; + + if (srf_update->gamut_remap_matrix) + surface->gamut_remap_matrix = + *srf_update->gamut_remap_matrix; } static void copy_stream_update_to_stream(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 75c7ce4c7581..f4bcc71b2920 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1077,6 +1077,7 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) * on certain displays, such as the Sharp 4k */ pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP; + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha; pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left; pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index d3ceb39e428e..1935cf6601eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -726,6 +726,7 @@ union surface_update_flags { uint32_t output_tf_change:1; uint32_t pixel_format_change:1; uint32_t plane_size_change:1; + uint32_t gamut_remap_change:1; /* Full updates */ uint32_t new_plane:1; @@ -760,6 +761,7 @@ struct dc_plane_state { struct dc_csc_transform input_csc_color_matrix; struct fixed31_32 coeff_reduction_factor; struct fixed31_32 hdr_mult; + struct colorspace_transform gamut_remap_matrix; // TODO: No longer used, remove struct dc_hdr_static_metadata hdr_static_ctx; @@ -839,6 +841,7 @@ struct dc_surface_update { const struct dc_transfer_func *func_shaper; const struct dc_3dlut *lut3d_func; const struct dc_transfer_func *blend_tf; + const struct colorspace_transform *gamut_remap_matrix; }; /* diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 9cc3314966bd..0be010085575 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -2004,6 +2004,12 @@ void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx) for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) adjust.temperature_matrix[i] = pipe_ctx->stream->gamut_remap_matrix.matrix[i]; + } else if (pipe_ctx->plane_state && + pipe_ctx->plane_state->gamut_remap_matrix.enable_remap == true) { + adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW; + for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++) + adjust.temperature_matrix[i] = + pipe_ctx->plane_state->gamut_remap_matrix.matrix[i]; } pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index 63acb8ff7462..17d96ec6acd8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -343,6 +343,23 @@ void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enab } /** + * optc1_set_timing_double_buffer() - DRR double buffering control + * + * Sets double buffer point for V_TOTAL, H_TOTAL, VTOTAL_MIN, + * VTOTAL_MAX, VTOTAL_MIN_SEL and VTOTAL_MAX_SEL registers. + * + * Options: any time, start of frame, dp start of frame (range timing) + */ +void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable) +{ + struct optc *optc1 = DCN10TG_FROM_TG(optc); + uint32_t mode = enable ? 2 : 0; + + REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL, + OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mode); +} + +/** * unblank_crtc * Call ASIC Control Object to UnBlank CRTC. */ @@ -1353,6 +1370,7 @@ void optc1_clear_optc_underflow(struct timing_generator *optc) void optc1_tg_init(struct timing_generator *optc) { optc1_set_blank_data_double_buffer(optc, true); + optc1_set_timing_double_buffer(optc, true); optc1_clear_optc_underflow(optc); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index f277656d5464..9a459a8fe8a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -185,6 +185,7 @@ struct dcn_optc_registers { SF(OTG0_OTG_GLOBAL_CONTROL0, OTG_MASTER_UPDATE_LOCK_SEL, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_UPDATE_PENDING, mask_sh),\ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_BLANK_DATA_DOUBLE_BUFFER_EN, mask_sh),\ + SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\ SF(OTG0_OTG_H_TOTAL, OTG_H_TOTAL, mask_sh),\ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_START, mask_sh),\ SF(OTG0_OTG_H_BLANK_START_END, OTG_H_BLANK_END, mask_sh),\ @@ -643,6 +644,8 @@ bool optc1_is_optc_underflow_occurred(struct timing_generator *optc); void optc1_set_blank_data_double_buffer(struct timing_generator *optc, bool enable); +void optc1_set_timing_double_buffer(struct timing_generator *optc, bool enable); + bool optc1_get_otg_active_size(struct timing_generator *optc, uint32_t *otg_active_width, uint32_t *otg_active_height); diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 261bdc3a8218..8b7122249ddc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -552,7 +552,8 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = true, - .fp16 = true + .fp16 = true, + .p010 = true }, .max_upscale_factor = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index a67395208991..5cdbba0cd873 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -1012,7 +1012,8 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = true, - .fp16 = true + .fp16 = true, + .p010 = true }, .max_upscale_factor = { @@ -3342,7 +3343,7 @@ void dcn20_cap_soc_clocks( void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { - struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES]; + struct _vcs_dpi_voltage_scaling_st calculated_states[DC__VOLTAGE_STATES]; int i; int num_calculated_states = 0; int min_dcfclk = 0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 51b5910cd05f..b25484aa8222 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -300,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .xfc_bus_transport_time_us = 4, .xfc_xbuf_latency_tolerance_us = 4, .use_urgent_burst_bw = 1, - .num_states = 9 + .num_states = 8 }; #ifndef MAX @@ -838,7 +838,8 @@ static const struct dc_plane_cap plane_cap = { .pixel_format_support = { .argb8888 = true, .nv12 = true, - .fp16 = true + .fp16 = true, + .p010 = true }, .max_upscale_factor = { @@ -1376,21 +1377,8 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param unsigned int i, j, k; int closest_clk_lvl; - // diags does not retrieve proper values from SMU - // cap states to 5 and make state 5 the max state - if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) || IS_DIAG_DC(dc->ctx->dce_environment)) { - dcn2_1_soc.num_states = 5; - - dcn2_1_soc.clock_limits[5].state = 5; - dcn2_1_soc.clock_limits[5].dcfclk_mhz = 810.0; - dcn2_1_soc.clock_limits[5].fabricclk_mhz = 1600.0; - dcn2_1_soc.clock_limits[5].dispclk_mhz = 1395.0; - dcn2_1_soc.clock_limits[5].dppclk_mhz = 1285.0; - dcn2_1_soc.clock_limits[5].phyclk_mhz = 1325.0; - dcn2_1_soc.clock_limits[5].socclk_mhz = 953.0; - dcn2_1_soc.clock_limits[5].dscclk_mhz = 489.0; - dcn2_1_soc.clock_limits[5].dram_speed_mts = 4266.0; - } else { + // Default clock levels are used for diags, which may lead to overclocking. + if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment)) { dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; dcn2_1_ip.max_num_dpp = pool->base.pipe_count; dcn2_1_soc.num_chans = bw_params->num_channels; @@ -1403,16 +1391,16 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2; /* - * Other levels: find cloest DCN clocks that fit the given clock limit using dcfclk - * as indicater + * Other levels: find closest DCN clocks that fit the given clock limit using dcfclk + * as indicator */ closest_clk_lvl = -1; /* index currently being filled */ k = 1; for (i = 1; i < clk_table->num_entries; i++) { - /* loop backwards, skip duplicate state, +1 because SMU has precision issue */ - for (j = dcn2_1_soc.num_states - 2; j >= k; j--) { + /* loop backwards, skip duplicate state*/ + for (j = dcn2_1_soc.num_states - 1; j >= k; j--) { if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) { closest_clk_lvl = j; break; @@ -1437,13 +1425,13 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param k++; } } - - /* duplicate last level */ - dcn2_1_soc.clock_limits[k] = dcn2_1_soc.clock_limits[k - 1]; - dcn2_1_soc.clock_limits[k].state = k; - dcn2_1_soc.num_states = k + 1; + dcn2_1_soc.num_states = k; } + /* duplicate last level */ + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states] = dcn2_1_soc.clock_limits[dcn2_1_soc.num_states - 1]; + dcn2_1_soc.clock_limits[dcn2_1_soc.num_states].state = dcn2_1_soc.num_states; + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h index ea4cde952f4f..2a1983324629 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dc_features.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dc_features.h @@ -29,7 +29,7 @@ #define DC__PRESENT 1 #define DC__PRESENT__1 1 #define DC__NUM_DPP 4 -#define DC__VOLTAGE_STATES 7 +#define DC__VOLTAGE_STATES 9 #define DC__NUM_DPP__4 1 #define DC__NUM_DPP__0_PRESENT 1 #define DC__NUM_DPP__1_PRESENT 1 diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index dfd3be452766..687010c17324 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -22,11 +22,12 @@ * Authors: AMD * */ + +#include "dc_features.h" + #ifndef __DISPLAY_MODE_STRUCTS_H__ #define __DISPLAY_MODE_STRUCTS_H__ -#define MAX_CLOCK_LIMIT_STATES 9 - typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st; typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st; typedef struct _vcs_dpi_ip_params_st ip_params_st; @@ -68,7 +69,7 @@ struct _vcs_dpi_voltage_scaling_st { }; struct _vcs_dpi_soc_bounding_box_st { - struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES]; + struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES]; unsigned int num_states; double sr_exit_time_us; double sr_enter_plus_exit_time_us; diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 4e542826cd26..c33454a9e0b4 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -734,6 +734,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, { struct core_freesync *core_freesync = NULL; unsigned long long nominal_field_rate_in_uhz = 0; + unsigned long long rounded_nominal_in_uhz = 0; unsigned int refresh_range = 0; unsigned long long min_refresh_in_uhz = 0; unsigned long long max_refresh_in_uhz = 0; @@ -750,17 +751,20 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; - // Don't allow min > max - if (min_refresh_in_uhz > max_refresh_in_uhz) - min_refresh_in_uhz = max_refresh_in_uhz; - // Full range may be larger than current video timing, so cap at nominal if (max_refresh_in_uhz > nominal_field_rate_in_uhz) max_refresh_in_uhz = nominal_field_rate_in_uhz; // Full range may be larger than current video timing, so cap at nominal - if (min_refresh_in_uhz > nominal_field_rate_in_uhz) - min_refresh_in_uhz = nominal_field_rate_in_uhz; + if (min_refresh_in_uhz > max_refresh_in_uhz) + min_refresh_in_uhz = max_refresh_in_uhz; + + // If a monitor reports exactly max refresh of 2x of min, enforce it on nominal + rounded_nominal_in_uhz = + div_u64(nominal_field_rate_in_uhz + 50000, 100000) * 100000; + if (in_config->max_refresh_in_uhz == (2 * in_config->min_refresh_in_uhz) && + in_config->max_refresh_in_uhz == rounded_nominal_in_uhz) + min_refresh_in_uhz = div_u64(nominal_field_rate_in_uhz, 2); if (!vrr_settings_require_update(core_freesync, in_config, (unsigned int)min_refresh_in_uhz, (unsigned int)max_refresh_in_uhz, @@ -792,11 +796,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; - in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - - 2 * in_out_vrr->min_duration_in_us; - if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) - in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; - in_out_vrr->supported = true; } @@ -804,9 +803,14 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.btr_enabled = in_config->btr; - if (in_out_vrr->max_refresh_in_uhz < - 2 * in_out_vrr->min_refresh_in_uhz) + if (in_out_vrr->max_refresh_in_uhz < (2 * in_out_vrr->min_refresh_in_uhz)) in_out_vrr->btr.btr_enabled = false; + else { + in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - + 2 * in_out_vrr->min_duration_in_us; + if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) + in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; + } in_out_vrr->btr.btr_active = false; in_out_vrr->btr.inserted_duration_in_us = 0; @@ -1008,8 +1012,8 @@ unsigned long long mod_freesync_calc_nominal_field_rate( unsigned int total = stream->timing.h_total * stream->timing.v_total; /* Calculate nominal field rate for stream, rounded up to nearest integer */ - nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; - nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; + nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz; + nominal_field_rate_in_uhz *= 100000000ULL; nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total); diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c index e9fbd94f8635..cc1d3f470b99 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -328,8 +328,7 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, /* add display to connection */ hdcp->connection.link = *link; *display_container = *display; - status = mod_hdcp_add_display_to_topology(hdcp, display_container); - + status = mod_hdcp_add_display_to_topology(hdcp, display->index); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; @@ -375,7 +374,7 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, status = mod_hdcp_remove_display_from_topology(hdcp, index); if (status != MOD_HDCP_STATUS_SUCCESS) goto out; - memset(display, 0, sizeof(struct mod_hdcp_display)); + display->state = MOD_HDCP_DISPLAY_INACTIVE; /* request authentication when connection is not reset */ if (current_state(hdcp) != HDCP_UNINITIALIZED) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h index 60ff1a0028ac..5cb4546be0ef 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -328,7 +328,7 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, /* psp functions */ enum mod_hdcp_status mod_hdcp_add_display_to_topology( - struct mod_hdcp *hdcp, struct mod_hdcp_display *display); + struct mod_hdcp *hdcp, uint8_t index); enum mod_hdcp_status mod_hdcp_remove_display_from_topology( struct mod_hdcp *hdcp, uint8_t index); enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); @@ -503,6 +503,11 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display) return display->state >= MOD_HDCP_DISPLAY_ACTIVE; } +static inline uint8_t is_display_added(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; +} + static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display) { return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; @@ -510,23 +515,34 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) { - uint8_t active_count = 0; + uint8_t added_count = 0; uint8_t i; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) if (is_display_active(&hdcp->displays[i])) - active_count++; - return active_count; + added_count++; + return added_count; +} + +static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->displays[i])) + added_count++; + return added_count; } -static inline struct mod_hdcp_display *get_first_active_display( +static inline struct mod_hdcp_display *get_first_added_display( struct mod_hdcp *hdcp) { uint8_t i; struct mod_hdcp_display *display = NULL; for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) - if (is_display_active(&hdcp->displays[i])) { + if (is_display_added(&hdcp->displays[i])) { display = &hdcp->displays[i]; break; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c index f244b72e74e0..37c8c05497d6 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* device count must be greater than or equal to tracked hdcp displays */ - return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : MOD_HDCP_STATUS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c index 549c113abcf7..491c00f48026 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c @@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp) static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) { /* device count must be greater than or equal to tracked hdcp displays */ - return (get_device_count(hdcp) < get_active_display_count(hdcp)) ? + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE : MOD_HDCP_STATUS_SUCCESS; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 836e47954938..c2929815c3ee 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -54,7 +54,7 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology( dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; - if (!display || !is_display_active(display)) + if (!display || !is_display_added(display)) return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -73,21 +73,25 @@ enum mod_hdcp_status mod_hdcp_remove_display_from_topology( HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); return MOD_HDCP_STATUS_SUCCESS; - } - -enum mod_hdcp_status mod_hdcp_add_display_to_topology( - struct mod_hdcp *hdcp, struct mod_hdcp_display *display) + +} +enum mod_hdcp_status mod_hdcp_add_display_to_topology(struct mod_hdcp *hdcp, + uint8_t index) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = + get_active_display_at_index(hdcp, index); struct mod_hdcp_link *link = &hdcp->connection.link; if (!psp->dtm_context.dtm_initialized) { DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); - display->state = MOD_HDCP_DISPLAY_INACTIVE; return MOD_HDCP_STATUS_FAILURE; } + if (!display || is_display_added(display)) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); @@ -109,11 +113,10 @@ enum mod_hdcp_status mod_hdcp_add_display_to_topology( psp_dtm_invoke(psp, dtm_cmd->cmd_id); - if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { - display->state = MOD_HDCP_DISPLAY_INACTIVE; + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; - } + display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); return MOD_HDCP_STATUS_SUCCESS; @@ -123,7 +126,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; - struct mod_hdcp_display *display = get_first_active_display(hdcp); + struct mod_hdcp_display *display = get_first_added_display(hdcp); struct ta_hdcp_shared_memory *hdcp_cmd; if (!psp->hdcp_context.hdcp_initialized) { @@ -176,7 +179,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) if (is_display_encryption_enabled( &hdcp->displays[i])) { hdcp->displays[i].state = - MOD_HDCP_DISPLAY_ACTIVE; + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; HDCP_HDCP1_DISABLED_TRACE(hdcp, hdcp->displays[i].index); } @@ -228,7 +231,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_active_display(hdcp); + struct mod_hdcp_display *display = get_first_added_display(hdcp); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -298,7 +301,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->displays[i].adjust.disable) + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->displays[i].adjust.disable) continue; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -360,7 +364,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_active_display(hdcp); + struct mod_hdcp_display *display = get_first_added_display(hdcp); if (!psp->hdcp_context.hdcp_initialized) { DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized"); @@ -419,7 +423,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp) if (is_display_encryption_enabled( &hdcp->displays[i])) { hdcp->displays[i].state = - MOD_HDCP_DISPLAY_ACTIVE; + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; HDCP_HDCP2_DISABLED_TRACE(hdcp, hdcp->displays[i].index); } @@ -658,7 +662,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp) { struct psp_context *psp = hdcp->config.psp.handle; struct ta_hdcp_shared_memory *hdcp_cmd; - struct mod_hdcp_display *display = get_first_active_display(hdcp); + struct mod_hdcp_display *display = get_first_added_display(hdcp); hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); @@ -743,7 +747,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { - if (hdcp->displays[i].adjust.disable) + if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->displays[i].adjust.disable) continue; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index; hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id; diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h index eae9309cfb24..c088602bc1a0 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -117,6 +117,7 @@ enum mod_hdcp_operation_mode { enum mod_hdcp_display_state { MOD_HDCP_DISPLAY_INACTIVE = 0, MOD_HDCP_DISPLAY_ACTIVE, + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED, MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED }; diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index c6d3bef15320..5db8c56066ee 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -35,6 +35,7 @@ #include "arcturus_ppt.h" #include "smu_v11_0_pptable.h" #include "arcturus_ppsmc.h" +#include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" #include "amdgpu_xgmi.h" #include <linux/i2c.h> @@ -2210,6 +2211,18 @@ static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) i2c_del_adapter(control); } +static bool arcturus_is_baco_supported(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (!smu_v11_0_baco_is_support(smu)) + return false; + + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; +} + static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu) { PPTable_t *pptable = smu->smu_table.driver_pptable; @@ -2321,7 +2334,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= smu_v11_0_baco_is_support, + .baco_is_support= arcturus_is_baco_supported, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 9c60b38ab53a..15030284b444 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -28,13 +28,15 @@ #include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" +#include "soc15_common.h" #include "smu_v11_0.h" #include "smu11_driver_if_navi10.h" #include "atom.h" #include "navi10_ppt.h" #include "smu_v11_0_pptable.h" #include "smu_v11_0_ppsmc.h" -#include "nbio/nbio_7_4_sh_mask.h" +#include "nbio/nbio_2_3_offset.h" +#include "nbio/nbio_2_3_sh_mask.h" #include "asic_reg/mp/mp_11_0_sh_mask.h" @@ -1985,6 +1987,18 @@ static int navi10_setup_od_limits(struct smu_context *smu) { return 0; } +static bool navi10_is_baco_supported(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (!smu_v11_0_baco_is_support(smu)) + return false; + + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; +} + static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { OverDriveTable_t *od_table, *boot_od_table; int ret = 0; @@ -2361,7 +2375,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= smu_v11_0_baco_is_support, + .baco_is_support= navi10_is_baco_supported, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index d19e1d0d56c0..541c932a6005 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -42,8 +42,6 @@ #include "asic_reg/thm/thm_11_0_2_sh_mask.h" #include "asic_reg/mp/mp_11_0_offset.h" #include "asic_reg/mp/mp_11_0_sh_mask.h" -#include "asic_reg/nbio/nbio_7_4_offset.h" -#include "asic_reg/nbio/nbio_7_4_sh_mask.h" #include "asic_reg/smuio/smuio_11_0_0_offset.h" #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h" @@ -1662,9 +1660,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v bool smu_v11_0_baco_is_support(struct smu_context *smu) { - struct amdgpu_device *adev = smu->adev; struct smu_baco_context *smu_baco = &smu->smu_baco; - uint32_t val; bool baco_support; mutex_lock(&smu_baco->mutex); @@ -1679,11 +1675,7 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) return false; - val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); - if (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) - return true; - - return false; + return true; } enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) @@ -1700,11 +1692,9 @@ enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { - struct smu_baco_context *smu_baco = &smu->smu_baco; struct amdgpu_device *adev = smu->adev; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - uint32_t bif_doorbell_intr_cntl; uint32_t data; int ret = 0; @@ -1713,14 +1703,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) mutex_lock(&smu_baco->mutex); - bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); - if (state == SMU_BACO_STATE_ENTER) { - bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, - BIF_DOORBELL_INT_CNTL, - DOORBELL_INTERRUPT_DISABLE, 1); - WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); - if (!ras || !ras->supported) { data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); data |= 0x80000000; @@ -1735,11 +1718,6 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) if (ret) goto out; - bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, - BIF_DOORBELL_INT_CNTL, - DOORBELL_INTERRUPT_DISABLE, 0); - WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); - /* clear vbios scratch 6 and 7 for coming asic reinit */ WREG32(adev->bios_scratch_reg_offset + 6, 0); WREG32(adev->bios_scratch_reg_offset + 7, 0); diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 49ff3756bd9f..3f1044326dcb 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -35,6 +35,7 @@ #include "vega20_ppt.h" #include "vega20_pptable.h" #include "vega20_ppsmc.h" +#include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" #include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h" @@ -3174,6 +3175,17 @@ static int vega20_update_pcie_parameters(struct smu_context *smu, return ret; } +static bool vega20_is_baco_supported(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t val; + + if (!smu_v11_0_baco_is_support(smu)) + return false; + + val = RREG32_SOC15(NBIO, 0, mmRCC_BIF_STRAP0); + return (val & RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK) ? true : false; +} static const struct pptable_funcs vega20_ppt_funcs = { .tables_init = vega20_tables_init, @@ -3262,7 +3274,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .register_irq_handler = smu_v11_0_register_irq_handler, .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support= smu_v11_0_baco_is_support, + .baco_is_support= vega20_is_baco_supported, .baco_get_state = smu_v11_0_baco_get_state, .baco_set_state = smu_v11_0_baco_set_state, .baco_enter = smu_v11_0_baco_enter, diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index c4c704e01961..eb009d3ab48f 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -48,6 +48,11 @@ #include "drm_internal.h" #include "drm_legacy.h" +#if defined(CONFIG_MMU) && defined(CONFIG_TRANSPARENT_HUGEPAGE) +#include <uapi/asm/mman.h> +#include <drm/drm_vma_manager.h> +#endif + /* from BKL pushdown */ DEFINE_MUTEX(drm_global_mutex); @@ -872,3 +877,139 @@ struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags) return file; } EXPORT_SYMBOL_FOR_TESTS_ONLY(mock_drm_getfile); + +#ifdef CONFIG_MMU +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/* + * drm_addr_inflate() attempts to construct an aligned area by inflating + * the area size and skipping the unaligned start of the area. + * adapted from shmem_get_unmapped_area() + */ +static unsigned long drm_addr_inflate(unsigned long addr, + unsigned long len, + unsigned long pgoff, + unsigned long flags, + unsigned long huge_size) +{ + unsigned long offset, inflated_len; + unsigned long inflated_addr; + unsigned long inflated_offset; + + offset = (pgoff << PAGE_SHIFT) & (huge_size - 1); + if (offset && offset + len < 2 * huge_size) + return addr; + if ((addr & (huge_size - 1)) == offset) + return addr; + + inflated_len = len + huge_size - PAGE_SIZE; + if (inflated_len > TASK_SIZE) + return addr; + if (inflated_len < len) + return addr; + + inflated_addr = current->mm->get_unmapped_area(NULL, 0, inflated_len, + 0, flags); + if (IS_ERR_VALUE(inflated_addr)) + return addr; + if (inflated_addr & ~PAGE_MASK) + return addr; + + inflated_offset = inflated_addr & (huge_size - 1); + inflated_addr += offset - inflated_offset; + if (inflated_offset > offset) + inflated_addr += huge_size; + + if (inflated_addr > TASK_SIZE - len) + return addr; + + return inflated_addr; +} + +/** + * drm_get_unmapped_area() - Get an unused user-space virtual memory area + * suitable for huge page table entries. + * @file: The struct file representing the address space being mmap()'d. + * @uaddr: Start address suggested by user-space. + * @len: Length of the area. + * @pgoff: The page offset into the address space. + * @flags: mmap flags + * @mgr: The address space manager used by the drm driver. This argument can + * probably be removed at some point when all drivers use the same + * address space manager. + * + * This function attempts to find an unused user-space virtual memory area + * that can accommodate the size we want to map, and that is properly + * aligned to facilitate huge page table entries matching actual + * huge pages or huge page aligned memory in buffer objects. Buffer objects + * are assumed to start at huge page boundary pfns (io memory) or be + * populated by huge pages aligned to the start of the buffer object + * (system- or coherent memory). Adapted from shmem_get_unmapped_area. + * + * Return: aligned user-space address. + */ +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr) +{ + unsigned long addr; + unsigned long inflated_addr; + struct drm_vma_offset_node *node; + + if (len > TASK_SIZE) + return -ENOMEM; + + /* + * @pgoff is the file page-offset the huge page boundaries of + * which typically aligns to physical address huge page boundaries. + * That's not true for DRM, however, where physical address huge + * page boundaries instead are aligned with the offset from + * buffer object start. So adjust @pgoff to be the offset from + * buffer object start. + */ + drm_vma_offset_lock_lookup(mgr); + node = drm_vma_offset_lookup_locked(mgr, pgoff, 1); + if (node) + pgoff -= node->vm_node.start; + drm_vma_offset_unlock_lookup(mgr); + + addr = current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); + if (IS_ERR_VALUE(addr)) + return addr; + if (addr & ~PAGE_MASK) + return addr; + if (addr > TASK_SIZE - len) + return addr; + + if (len < HPAGE_PMD_SIZE) + return addr; + if (flags & MAP_FIXED) + return addr; + /* + * Our priority is to support MAP_SHARED mapped hugely; + * and support MAP_PRIVATE mapped hugely too, until it is COWed. + * But if caller specified an address hint, respect that as before. + */ + if (uaddr) + return addr; + + inflated_addr = drm_addr_inflate(addr, len, pgoff, flags, + HPAGE_PMD_SIZE); + + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) && + len >= HPAGE_PUD_SIZE) + inflated_addr = drm_addr_inflate(inflated_addr, len, pgoff, + flags, HPAGE_PUD_SIZE); + return inflated_addr; +} +#else /* CONFIG_TRANSPARENT_HUGEPAGE */ +unsigned long drm_get_unmapped_area(struct file *file, + unsigned long uaddr, unsigned long len, + unsigned long pgoff, unsigned long flags, + struct drm_vma_offset_manager *mgr) +{ + return current->mm->get_unmapped_area(file, uaddr, len, pgoff, flags); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ +EXPORT_SYMBOL_GPL(drm_get_unmapped_area); +#endif /* CONFIG_MMU */ diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index bc6e208949e8..8981abe8b7c9 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -45,7 +45,6 @@ #include <linux/export.h> #include <linux/interval_tree_generic.h> #include <linux/seq_file.h> -#include <linux/sched/signal.h> #include <linux/slab.h> #include <linux/stacktrace.h> @@ -367,11 +366,6 @@ next_hole(struct drm_mm *mm, struct drm_mm_node *node, enum drm_mm_insert_mode mode) { - /* Searching is slow; check if we ran out of time/patience */ - cond_resched(); - if (fatal_signal_pending(current)) - return NULL; - switch (mode) { default: case DRM_MM_INSERT_BEST: @@ -563,7 +557,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, return 0; } - return signal_pending(current) ? -ERESTARTSYS : -ENOSPC; + return -ENOSPC; } EXPORT_SYMBOL(drm_mm_insert_node_in_range); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 9e065ad0658f..a3cc080a46c6 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -164,6 +164,7 @@ struct decode_info { #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01) #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02) #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04) +#define OP_SWTESS_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x03) #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B) @@ -967,18 +968,6 @@ static int cmd_handler_lri(struct parser_exec_state *s) { int i, ret = 0; int cmd_len = cmd_length(s); - u32 valid_len = CMD_LEN(1); - - /* - * Official intel docs are somewhat sloppy , check the definition of - * MI_LOAD_REGISTER_IMM. - */ - #define MAX_VALID_LEN 127 - if ((cmd_len < valid_len) || (cmd_len > MAX_VALID_LEN)) { - gvt_err("len is not valid: len=%u valid_len=%u\n", - cmd_len, valid_len); - return -EFAULT; - } for (i = 1; i < cmd_len; i += 2) { if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) { @@ -2485,6 +2474,9 @@ static const struct cmd_info cmd_info[] = { {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_1(1), 8, NULL}, + {"OP_SWTESS_BASE_ADDRESS", OP_SWTESS_BASE_ADDRESS, + F_LEN_VAR, R_RCS, D_ALL, ADDR_FIX_2(1, 2), 3, NULL}, + {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL}, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 6e5c9885d9fe..a83df2f84eb9 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -221,7 +221,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -241,7 +241,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { @@ -261,7 +261,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | TRANS_DDI_PORT_MASK); vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= - (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI | + (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); if (IS_BROADWELL(dev_priv)) { diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 0182e2a5acff..2faf50e1b051 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -462,11 +462,14 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, return 0; } -/* ascendingly sorted */ +/* sorted in ascending order */ static i915_reg_t force_nonpriv_white_list[] = { + _MMIO(0xd80), GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) - PS_INVOCATION_COUNT,//_MMIO(0x2348) + CL_PRIMITIVES_COUNT, //_MMIO(0x2340) + PS_INVOCATION_COUNT, //_MMIO(0x2348) + PS_DEPTH_COUNT, //_MMIO(0x2350) GEN8_CS_CHICKEN1,//_MMIO(0x2580) _MMIO(0x2690), _MMIO(0x2694), @@ -491,6 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = { _MMIO(0xe18c), _MMIO(0xe48c), _MMIO(0xe5f4), + _MMIO(0x64844), }; /* a simple bsearch */ diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 1c95bf8cbed0..cb11c3184085 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -296,8 +296,8 @@ shadow_context_descriptor_update(struct intel_context *ce, * Update bits 0-11 of the context descriptor which includes flags * like GEN8_CTX_* cached in desc_template */ - desc &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT); - desc |= workload->ctx_desc.addressing_mode << + desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT); + desc |= (u64)workload->ctx_desc.addressing_mode << GEN8_CTX_ADDRESSING_MODE_SHIFT; ce->lrc_desc = desc; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 6650f478b226..47b989834af1 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -633,7 +633,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev) if (config->platform.iommu) { iommu_dev = &pdev->dev; - if (!iommu_dev->iommu_fwspec) + if (!dev_iommu_fwspec_get(iommu_dev)) iommu_dev = iommu_dev->parent; aspace = msm_gem_address_space_create(iommu_dev, diff --git a/drivers/gpu/drm/nouveau/dispnv04/dac.c b/drivers/gpu/drm/nouveau/dispnv04/dac.c index e8eef88a8382..ffdd447d8706 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dac.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dac.c @@ -35,7 +35,8 @@ #include <subdev/bios/gpio.h> #include <subdev/gpio.h> -#include <subdev/timer.h> + +#include <nvif/timer.h> int nv04_dac_output_offset(struct drm_encoder *encoder) { diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index 3fdfafa8b0ad..b674d68ef28a 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -26,6 +26,7 @@ #include "hw.h" #include <subdev/bios/pll.h> +#include <nvif/timer.h> #define CHIPSET_NFORCE 0x01a0 #define CHIPSET_NFORCE2 0x01f0 diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c index 00a85f1e1a4a..ee782151d332 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c @@ -23,6 +23,7 @@ #include <nvif/cl507c.h> #include <nvif/event.h> +#include <nvif/timer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> diff --git a/drivers/gpu/drm/nouveau/dispnv50/core507d.c b/drivers/gpu/drm/nouveau/dispnv50/core507d.c index e7fcfa6e6467..c5152c39c684 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/core507d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/core507d.c @@ -23,6 +23,7 @@ #include "head.h" #include <nvif/cl507d.h> +#include <nvif/timer.h> #include "nouveau_bo.h" diff --git a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c index 3b36dc8d36b2..c03cb987856b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/corec37d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/corec37d.c @@ -24,6 +24,8 @@ #include <nouveau_bo.h> +#include <nvif/timer.h> + void corec37d_wndw_owner(struct nv50_core *core) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c index 397143b639c6..8c5cf096f69b 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c @@ -24,21 +24,36 @@ #include "head.h" #include <nvif/cl507a.h> +#include <nvif/timer.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_plane_helper.h> +bool +curs507a_space(struct nv50_wndw *wndw) +{ + nvif_msec(&nouveau_drm(wndw->plane.dev)->client.device, 2, + if (nvif_rd32(&wndw->wimm.base.user, 0x0008) >= 4) + return true; + ); + WARN_ON(1); + return false; +} + static void curs507a_update(struct nv50_wndw *wndw, u32 *interlock) { - nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000); + if (curs507a_space(wndw)) + nvif_wr32(&wndw->wimm.base.user, 0x0080, 0x00000000); } static void curs507a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) { - nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 | - asyw->point.x); + if (curs507a_space(wndw)) { + nvif_wr32(&wndw->wimm.base.user, 0x0084, asyw->point.y << 16 | + asyw->point.x); + } } const struct nv50_wimm_func diff --git a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c index 23fb29d41efe..96dff4f09f57 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c +++ b/drivers/gpu/drm/nouveau/dispnv50/cursc37a.c @@ -25,14 +25,17 @@ static void cursc37a_update(struct nv50_wndw *wndw, u32 *interlock) { - nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001); + if (curs507a_space(wndw)) + nvif_wr32(&wndw->wimm.base.user, 0x0200, 0x00000001); } static void cursc37a_point(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) { - nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 | - asyw->point.x); + if (curs507a_space(wndw)) { + nvif_wr32(&wndw->wimm.base.user, 0x0208, asyw->point.y << 16 | + asyw->point.x); + } } static const struct nv50_wimm_func diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 4d1c58468dbc..6be9df1820c5 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -45,6 +45,7 @@ #include <nvif/cl5070.h> #include <nvif/cl507d.h> #include <nvif/event.h> +#include <nvif/timer.h> #include "nouveau_drv.h" #include "nouveau_dma.h" diff --git a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c index 2e68fc736fe1..4f7ce57f2036 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c +++ b/drivers/gpu/drm/nouveau/dispnv50/ovly827e.c @@ -24,6 +24,8 @@ #include <nouveau_bo.h> +#include <nvif/timer.h> + static void ovly827e_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) { diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.h b/drivers/gpu/drm/nouveau/dispnv50/wndw.h index caf397475918..a7412b9d3a98 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.h +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.h @@ -97,6 +97,7 @@ struct nv50_wimm_func { }; extern const struct nv50_wimm_func curs507a; +bool curs507a_space(struct nv50_wndw *); int wndwc37e_new(struct nouveau_drm *, enum drm_plane_type, int, s32, struct nv50_wndw **); diff --git a/drivers/gpu/drm/nouveau/include/nvif/device.h b/drivers/gpu/drm/nouveau/include/nvif/device.h index 25d969dcf67d..c2a572c67a76 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/device.h +++ b/drivers/gpu/drm/nouveau/include/nvif/device.h @@ -23,27 +23,6 @@ int nvif_device_init(struct nvif_object *, u32 handle, s32 oclass, void *, u32, void nvif_device_fini(struct nvif_device *); u64 nvif_device_time(struct nvif_device *); -/* Delay based on GPU time (ie. PTIMER). - * - * Will return -ETIMEDOUT unless the loop was terminated with 'break', - * where it will return the number of nanoseconds taken instead. - */ -#define nvif_nsec(d,n,cond...) ({ \ - struct nvif_device *_device = (d); \ - u64 _nsecs = (n), _time0 = nvif_device_time(_device); \ - s64 _taken = 0; \ - \ - do { \ - cond \ - } while (_taken = nvif_device_time(_device) - _time0, _taken < _nsecs);\ - \ - if (_taken >= _nsecs) \ - _taken = -ETIMEDOUT; \ - _taken; \ -}) -#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond) -#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond) - /*XXX*/ #include <subdev/bios.h> #include <subdev/fb.h> diff --git a/drivers/gpu/drm/nouveau/include/nvif/timer.h b/drivers/gpu/drm/nouveau/include/nvif/timer.h new file mode 100644 index 000000000000..57587a985c4b --- /dev/null +++ b/drivers/gpu/drm/nouveau/include/nvif/timer.h @@ -0,0 +1,35 @@ +#ifndef __NVIF_TIMER_H__ +#define __NVIF_TIMER_H__ +#include <nvif/os.h> + +struct nvif_timer_wait { + struct nvif_device *device; + u64 limit; + u64 time0; + u64 time1; + int reads; +}; + +void nvif_timer_wait_init(struct nvif_device *, u64 nsec, + struct nvif_timer_wait *); +s64 nvif_timer_wait_test(struct nvif_timer_wait *); + +/* Delay based on GPU time (ie. PTIMER). + * + * Will return -ETIMEDOUT unless the loop was terminated with 'break', + * where it will return the number of nanoseconds taken instead. + */ +#define nvif_nsec(d,n,cond...) ({ \ + struct nvif_timer_wait _wait; \ + s64 _taken = 0; \ + \ + nvif_timer_wait_init((d), (n), &_wait); \ + do { \ + cond \ + } while ((_taken = nvif_timer_wait_test(&_wait)) >= 0); \ + \ + _taken; \ +}) +#define nvif_usec(d,u,cond...) nvif_nsec((d), (u) * 1000, ##cond) +#define nvif_msec(d,m,cond...) nvif_usec((d), (m) * 1000, ##cond) +#endif diff --git a/drivers/gpu/drm/nouveau/include/nvif/user.h b/drivers/gpu/drm/nouveau/include/nvif/user.h index 03c11826b693..6825574d93c2 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/user.h +++ b/drivers/gpu/drm/nouveau/include/nvif/user.h @@ -10,6 +10,7 @@ struct nvif_user { struct nvif_user_func { void (*doorbell)(struct nvif_user *, u32 token); + u64 (*time)(struct nvif_user *); }; int nvif_user_init(struct nvif_device *); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 2b4b21b02e40..c40f127de3d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1494,8 +1494,13 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg) ret = nvif_object_map_handle(&mem->mem.object, &args, argc, &handle, &length); - if (ret != 1) - return ret ? ret : -EINVAL; + if (ret != 1) { + if (WARN_ON(ret == 0)) + return -EINVAL; + if (ret == -ENOSPC) + return -EAGAIN; + return ret; + } reg->bus.base = 0; reg->bus.offset = handle; diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 7dfbbbc1beea..15a3d40edf02 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -222,22 +222,18 @@ nouveau_drm_debugfs_init(struct drm_minor *minor) { struct nouveau_drm *drm = nouveau_drm(minor->dev); struct dentry *dentry; - int i, ret; + int i; for (i = 0; i < ARRAY_SIZE(nouveau_debugfs_files); i++) { - dentry = debugfs_create_file(nouveau_debugfs_files[i].name, - S_IRUGO | S_IWUSR, - minor->debugfs_root, minor->dev, - nouveau_debugfs_files[i].fops); - if (!dentry) - return -ENOMEM; + debugfs_create_file(nouveau_debugfs_files[i].name, + S_IRUGO | S_IWUSR, + minor->debugfs_root, minor->dev, + nouveau_debugfs_files[i].fops); } - ret = drm_debugfs_create_files(nouveau_debugfs_list, - NOUVEAU_DEBUGFS_ENTRIES, - minor->debugfs_root, minor); - if (ret) - return ret; + drm_debugfs_create_files(nouveau_debugfs_list, + NOUVEAU_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); /* Set the size of the vbios since we know it, and it's confusing to * userspace if it wants to seek() but the file has a length of 0 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 6b1629c14dd7..ca4087f5a15b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -618,6 +618,64 @@ nouveau_drm_device_fini(struct drm_device *dev) kfree(drm); } +/* + * On some Intel PCIe bridge controllers doing a + * D0 -> D3hot -> D3cold -> D0 sequence causes Nvidia GPUs to not reappear. + * Skipping the intermediate D3hot step seems to make it work again. This is + * probably caused by not meeting the expectation the involved AML code has + * when the GPU is put into D3hot state before invoking it. + * + * This leads to various manifestations of this issue: + * - AML code execution to power on the GPU hits an infinite loop (as the + * code waits on device memory to change). + * - kernel crashes, as all PCI reads return -1, which most code isn't able + * to handle well enough. + * + * In all cases dmesg will contain at least one line like this: + * 'nouveau 0000:01:00.0: Refused to change power state, currently in D3' + * followed by a lot of nouveau timeouts. + * + * In the \_SB.PCI0.PEG0.PG00._OFF code deeper down writes bit 0x80 to the not + * documented PCI config space register 0x248 of the Intel PCIe bridge + * controller (0x1901) in order to change the state of the PCIe link between + * the PCIe port and the GPU. There are alternative code paths using other + * registers, which seem to work fine (executed pre Windows 8): + * - 0xbc bit 0x20 (publicly available documentation claims 'reserved') + * - 0xb0 bit 0x10 (link disable) + * Changing the conditions inside the firmware by poking into the relevant + * addresses does resolve the issue, but it seemed to be ACPI private memory + * and not any device accessible memory at all, so there is no portable way of + * changing the conditions. + * On a XPS 9560 that means bits [0,3] on \CPEX need to be cleared. + * + * The only systems where this behavior can be seen are hybrid graphics laptops + * with a secondary Nvidia Maxwell, Pascal or Turing GPU. It's unclear whether + * this issue only occurs in combination with listed Intel PCIe bridge + * controllers and the mentioned GPUs or other devices as well. + * + * documentation on the PCIe bridge controller can be found in the + * "7th Generation Intel® Processor Families for H Platforms Datasheet Volume 2" + * Section "12 PCI Express* Controller (x16) Registers" + */ + +static void quirk_broken_nv_runpm(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + struct pci_dev *bridge = pci_upstream_bridge(pdev); + + if (!bridge || bridge->vendor != PCI_VENDOR_ID_INTEL) + return; + + switch (bridge->device) { + case 0x1901: + drm->old_pm_cap = pdev->pm_cap; + pdev->pm_cap = 0; + NV_INFO(drm, "Disabling PCI power management to avoid bug\n"); + break; + } +} + static int nouveau_drm_probe(struct pci_dev *pdev, const struct pci_device_id *pent) { @@ -699,6 +757,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev, if (ret) goto fail_drm_dev_init; + quirk_broken_nv_runpm(pdev); return 0; fail_drm_dev_init: @@ -734,7 +793,11 @@ static void nouveau_drm_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + struct nouveau_drm *drm = nouveau_drm(dev); + /* revert our workaround */ + if (drm->old_pm_cap) + pdev->pm_cap = drm->old_pm_cap; nouveau_drm_device_remove(dev); pci_disable_device(pdev); } diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index c2c332fbde97..2a6519737800 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -140,6 +140,8 @@ struct nouveau_drm { struct list_head clients; + u8 old_pm_cap; + struct { struct agp_bridge_data *bridge; u32 base; diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index e3797b2d4d17..645fedd77e21 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -171,6 +171,11 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, mm = get_task_mm(current); down_read(&mm->mmap_sem); + if (!cli->svm.svmm) { + up_read(&mm->mmap_sem); + return -EINVAL; + } + for (addr = args->va_start, end = args->va_start + size; addr < end;) { struct vm_area_struct *vma; unsigned long next; @@ -179,6 +184,7 @@ nouveau_svmm_bind(struct drm_device *dev, void *data, if (!vma) break; + addr = max(addr, vma->vm_start); next = min(vma->vm_end, end); /* This is a best effort so we ignore errors */ nouveau_dmem_migrate_vma(cli->drm, vma, addr, next); @@ -656,9 +662,6 @@ nouveau_svm_fault(struct nvif_notify *notify) limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT); if (start < svmm->unmanaged.limit) limit = min_t(u64, limit, svmm->unmanaged.start); - else - if (limit > svmm->unmanaged.start) - start = max_t(u64, start, svmm->unmanaged.limit); SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); mm = svmm->notifier.mm; diff --git a/drivers/gpu/drm/nouveau/nvif/Kbuild b/drivers/gpu/drm/nouveau/nvif/Kbuild index 50d583d63807..f194d354c1f5 100644 --- a/drivers/gpu/drm/nouveau/nvif/Kbuild +++ b/drivers/gpu/drm/nouveau/nvif/Kbuild @@ -8,6 +8,7 @@ nvif-y += nvif/fifo.o nvif-y += nvif/mem.o nvif-y += nvif/mmu.o nvif-y += nvif/notify.o +nvif-y += nvif/timer.o nvif-y += nvif/vmm.o # Usermode classes diff --git a/drivers/gpu/drm/nouveau/nvif/device.c b/drivers/gpu/drm/nouveau/nvif/device.c index 1ec101ba3b42..0e92db44bbc8 100644 --- a/drivers/gpu/drm/nouveau/nvif/device.c +++ b/drivers/gpu/drm/nouveau/nvif/device.c @@ -27,11 +27,15 @@ u64 nvif_device_time(struct nvif_device *device) { - struct nv_device_time_v0 args = {}; - int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME, - &args, sizeof(args)); - WARN_ON_ONCE(ret != 0); - return args.time; + if (!device->user.func) { + struct nv_device_time_v0 args = {}; + int ret = nvif_object_mthd(&device->object, NV_DEVICE_V0_TIME, + &args, sizeof(args)); + WARN_ON_ONCE(ret != 0); + return args.time; + } + + return device->user.func->time(&device->user); } void diff --git a/drivers/gpu/drm/nouveau/nvif/timer.c b/drivers/gpu/drm/nouveau/nvif/timer.c new file mode 100644 index 000000000000..602c1a258d10 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvif/timer.c @@ -0,0 +1,56 @@ +/* + * Copyright 2020 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#include <nvif/timer.h> +#include <nvif/device.h> + +s64 +nvif_timer_wait_test(struct nvif_timer_wait *wait) +{ + u64 time = nvif_device_time(wait->device); + + if (wait->reads == 0) { + wait->time0 = time; + wait->time1 = time; + } + + if (wait->time1 == time) { + if (WARN_ON(wait->reads++ == 16)) + return -ETIMEDOUT; + } else { + wait->time1 = time; + wait->reads = 1; + } + + if (wait->time1 - wait->time0 > wait->limit) + return -ETIMEDOUT; + + return wait->time1 - wait->time0; +} + +void +nvif_timer_wait_init(struct nvif_device *device, u64 nsec, + struct nvif_timer_wait *wait) +{ + wait->device = device; + wait->limit = nsec; + wait->reads = 0; +} diff --git a/drivers/gpu/drm/nouveau/nvif/userc361.c b/drivers/gpu/drm/nouveau/nvif/userc361.c index 19f9958e7e01..1116f871b272 100644 --- a/drivers/gpu/drm/nouveau/nvif/userc361.c +++ b/drivers/gpu/drm/nouveau/nvif/userc361.c @@ -21,6 +21,19 @@ */ #include <nvif/user.h> +static u64 +nvif_userc361_time(struct nvif_user *user) +{ + u32 hi, lo; + + do { + hi = nvif_rd32(&user->object, 0x084); + lo = nvif_rd32(&user->object, 0x080); + } while (hi != nvif_rd32(&user->object, 0x084)); + + return ((u64)hi << 32 | lo); +} + static void nvif_userc361_doorbell(struct nvif_user *user, u32 token) { @@ -30,4 +43,5 @@ nvif_userc361_doorbell(struct nvif_user *user, u32 token) const struct nvif_user_func nvif_userc361 = { .doorbell = nvif_userc361_doorbell, + .time = nvif_userc361_time, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index dd8f85b8b3a7..f2f5636efac4 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -1981,8 +1981,34 @@ gf100_gr_init_(struct nvkm_gr *base) { struct gf100_gr *gr = gf100_gr(base); struct nvkm_subdev *subdev = &base->engine.subdev; + struct nvkm_device *device = subdev->device; + bool reset = device->chipset == 0x137 || device->chipset == 0x138; u32 ret; + /* On certain GP107/GP108 boards, we trigger a weird issue where + * GR will stop responding to PRI accesses after we've asked the + * SEC2 RTOS to boot the GR falcons. This happens with far more + * frequency when cold-booting a board (ie. returning from D3). + * + * The root cause for this is not known and has proven difficult + * to isolate, with many avenues being dead-ends. + * + * A workaround was discovered by Karol, whereby putting GR into + * reset for an extended period right before initialisation + * prevents the problem from occuring. + * + * XXX: As RM does not require any such workaround, this is more + * of a hack than a true fix. + */ + reset = nvkm_boolopt(device->cfgopt, "NvGrResetWar", reset); + if (reset) { + nvkm_mask(device, 0x000200, 0x00001000, 0x00000000); + nvkm_rd32(device, 0x000200); + msleep(50); + nvkm_mask(device, 0x000200, 0x00001000, 0x00001000); + nvkm_rd32(device, 0x000200); + } + nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false); ret = nvkm_falcon_get(&gr->fecs.falcon, subdev); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 0ce81b1f36af..3ad828eaefe1 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -361,7 +361,6 @@ static int panel_dpi_probe(struct device *dev, struct panel_desc *desc; unsigned int bus_flags; struct videomode vm; - const char *mapping; int ret; np = dev->of_node; @@ -386,16 +385,6 @@ static int panel_dpi_probe(struct device *dev, of_property_read_u32(np, "width-mm", &desc->size.width); of_property_read_u32(np, "height-mm", &desc->size.height); - of_property_read_string(np, "data-mapping", &mapping); - if (!strcmp(mapping, "rgb24")) - desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24; - else if (!strcmp(mapping, "rgb565")) - desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16; - else if (!strcmp(mapping, "bgr666")) - desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18; - else if (!strcmp(mapping, "lvds666")) - desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; - /* Extract bus_flags from display_timing */ bus_flags = 0; vm.flags = timing->flags; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index cb8829ca6c7f..6ee3b96f0d13 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -162,6 +162,89 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_vm_reserve); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/** + * ttm_bo_vm_insert_huge - Insert a pfn for PUD or PMD faults + * @vmf: Fault data + * @bo: The buffer object + * @page_offset: Page offset from bo start + * @fault_page_size: The size of the fault in pages. + * @pgprot: The page protections. + * Does additional checking whether it's possible to insert a PUD or PMD + * pfn and performs the insertion. + * + * Return: VM_FAULT_NOPAGE on successful insertion, VM_FAULT_FALLBACK if + * a huge fault was not possible, or on insertion error. + */ +static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, + struct ttm_buffer_object *bo, + pgoff_t page_offset, + pgoff_t fault_page_size, + pgprot_t pgprot) +{ + pgoff_t i; + vm_fault_t ret; + unsigned long pfn; + pfn_t pfnt; + struct ttm_tt *ttm = bo->ttm; + bool write = vmf->flags & FAULT_FLAG_WRITE; + + /* Fault should not cross bo boundary. */ + page_offset &= ~(fault_page_size - 1); + if (page_offset + fault_page_size > bo->num_pages) + goto out_fallback; + + if (bo->mem.bus.is_iomem) + pfn = ttm_bo_io_mem_pfn(bo, page_offset); + else + pfn = page_to_pfn(ttm->pages[page_offset]); + + /* pfn must be fault_page_size aligned. */ + if ((pfn & (fault_page_size - 1)) != 0) + goto out_fallback; + + /* Check that memory is contiguous. */ + if (!bo->mem.bus.is_iomem) { + for (i = 1; i < fault_page_size; ++i) { + if (page_to_pfn(ttm->pages[page_offset + i]) != pfn + i) + goto out_fallback; + } + } else if (bo->bdev->driver->io_mem_pfn) { + for (i = 1; i < fault_page_size; ++i) { + if (ttm_bo_io_mem_pfn(bo, page_offset + i) != pfn + i) + goto out_fallback; + } + } + + pfnt = __pfn_to_pfn_t(pfn, PFN_DEV); + if (fault_page_size == (HPAGE_PMD_SIZE >> PAGE_SHIFT)) + ret = vmf_insert_pfn_pmd_prot(vmf, pfnt, pgprot, write); +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + else if (fault_page_size == (HPAGE_PUD_SIZE >> PAGE_SHIFT)) + ret = vmf_insert_pfn_pud_prot(vmf, pfnt, pgprot, write); +#endif + else + WARN_ON_ONCE(ret = VM_FAULT_FALLBACK); + + if (ret != VM_FAULT_NOPAGE) + goto out_fallback; + + return VM_FAULT_NOPAGE; +out_fallback: + count_vm_event(THP_FAULT_FALLBACK); + return VM_FAULT_FALLBACK; +} +#else +static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, + struct ttm_buffer_object *bo, + pgoff_t page_offset, + pgoff_t fault_page_size, + pgprot_t pgprot) +{ + return VM_FAULT_FALLBACK; +} +#endif + /** * ttm_bo_vm_fault_reserved - TTM fault helper * @vmf: The struct vm_fault given as argument to the fault callback @@ -169,6 +252,7 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve); * @num_prefault: Maximum number of prefault pages. The caller may want to * specify this based on madvice settings and the size of the GPU object * backed by the memory. + * @fault_page_size: The size of the fault in pages. * * This function inserts one or more page table entries pointing to the * memory backing the buffer object, and then returns a return code @@ -182,7 +266,8 @@ EXPORT_SYMBOL(ttm_bo_vm_reserve); */ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, pgprot_t prot, - pgoff_t num_prefault) + pgoff_t num_prefault, + pgoff_t fault_page_size) { struct vm_area_struct *vma = vmf->vma; struct ttm_buffer_object *bo = vma->vm_private_data; @@ -274,6 +359,13 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, prot = pgprot_decrypted(prot); } + /* We don't prefault on huge faults. Yet. */ + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && fault_page_size != 1) { + ret = ttm_bo_vm_insert_huge(vmf, bo, page_offset, + fault_page_size, prot); + goto out_io_unlock; + } + /* * Speculatively prefault a number of pages. Only error on * first page. @@ -340,7 +432,7 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) return ret; prot = vma->vm_page_prot; - ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT); + ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -350,6 +442,66 @@ vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) } EXPORT_SYMBOL(ttm_bo_vm_fault); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +/** + * ttm_pgprot_is_wrprotecting - Is a page protection value write-protecting? + * @prot: The page protection value + * + * Return: true if @prot is write-protecting. false otherwise. + */ +static bool ttm_pgprot_is_wrprotecting(pgprot_t prot) +{ + /* + * This is meant to say "pgprot_wrprotect(prot) == prot" in a generic + * way. Unfortunately there is no generic pgprot_wrprotect. + */ + return pte_val(pte_wrprotect(__pte(pgprot_val(prot)))) == + pgprot_val(prot); +} + +static vm_fault_t ttm_bo_vm_huge_fault(struct vm_fault *vmf, + enum page_entry_size pe_size) +{ + struct vm_area_struct *vma = vmf->vma; + pgprot_t prot; + struct ttm_buffer_object *bo = vma->vm_private_data; + vm_fault_t ret; + pgoff_t fault_page_size = 0; + bool write = vmf->flags & FAULT_FLAG_WRITE; + + switch (pe_size) { + case PE_SIZE_PMD: + fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; + break; +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + case PE_SIZE_PUD: + fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; + break; +#endif + default: + WARN_ON_ONCE(1); + return VM_FAULT_FALLBACK; + } + + /* Fallback on write dirty-tracking or COW */ + if (write && ttm_pgprot_is_wrprotecting(vma->vm_page_prot)) + return VM_FAULT_FALLBACK; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + prot = vm_get_page_prot(vma->vm_flags); + ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + + dma_resv_unlock(bo->base.resv); + + return ret; +} +#endif + void ttm_bo_vm_open(struct vm_area_struct *vma) { struct ttm_buffer_object *bo = vma->vm_private_data; @@ -451,7 +603,10 @@ static const struct vm_operations_struct ttm_bo_vm_ops = { .fault = ttm_bo_vm_fault, .open = ttm_bo_vm_open, .close = ttm_bo_vm_close, - .access = ttm_bo_vm_access + .access = ttm_bo_vm_access, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + .huge_fault = ttm_bo_vm_huge_fault, +#endif }; static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 8512d970a09f..ac8f75db2ecd 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -41,6 +41,10 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) return -ENODEV; + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb"); + if (ret) + return ret; + vbox = kzalloc(sizeof(*vbox), GFP_KERNEL); if (!vbox) return -ENOMEM; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index cea18dc15f77..340719238753 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -681,11 +681,23 @@ static enum drm_mode_status vc4_hdmi_encoder_mode_valid(struct drm_encoder *crtc, const struct drm_display_mode *mode) { - /* HSM clock must be 108% of the pixel clock. Additionally, - * the AXI clock needs to be at least 25% of pixel clock, but - * HSM ends up being the limiting factor. + /* + * As stated in RPi's vc4 firmware "HDMI state machine (HSM) clock must + * be faster than pixel clock, infinitesimally faster, tested in + * simulation. Otherwise, exact value is unimportant for HDMI + * operation." This conflicts with bcm2835's vc4 documentation, which + * states HSM's clock has to be at least 108% of the pixel clock. + * + * Real life tests reveal that vc4's firmware statement holds up, and + * users are able to use pixel clocks closer to HSM's, namely for + * 1920x1200@60Hz. So it was decided to have leave a 1% margin between + * both clocks. Which, for RPi0-3 implies a maximum pixel clock of + * 162MHz. + * + * Additionally, the AXI clock needs to be at least 25% of + * pixel clock, but HSM ends up being the limiting factor. */ - if (mode->clock > HSM_CLOCK_FREQ / (1000 * 108 / 100)) + if (mode->clock > HSM_CLOCK_FREQ / (1000 * 101 / 100)) return MODE_CLOCK_HIGH; return MODE_OK; diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 5c3515e8cce1..31f85f09f1fc 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -11,4 +11,5 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ ttm_object.o ttm_lock.o +vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 71e45b568511..c2247a893ed4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1247,6 +1247,18 @@ static void vmw_remove(struct pci_dev *pdev) pci_disable_device(pdev); } +static unsigned long +vmw_get_unmapped_area(struct file *file, unsigned long uaddr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct drm_file *file_priv = file->private_data; + struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); + + return drm_get_unmapped_area(file, uaddr, len, pgoff, flags, + &dev_priv->vma_manager); +} + static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, void *ptr) { @@ -1418,6 +1430,7 @@ static const struct file_operations vmwgfx_driver_fops = { .compat_ioctl = vmw_compat_ioctl, #endif .llseek = noop_llseek, + .get_unmapped_area = vmw_get_unmapped_area, }; static struct drm_driver driver = { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5ddbcb9f6df4..8cdcd6e5f9e1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1000,6 +1000,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma); extern void vmw_validation_mem_init_ttm(struct vmw_private *dev_priv, size_t gran); + /** * TTM buffer object driver - vmwgfx_ttm_buffer.c */ @@ -1510,6 +1511,17 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, pgoff_t start, pgoff_t end); vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf); vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, + enum page_entry_size pe_size); +#endif + +/* Transparent hugepage support - vmwgfx_thp.c */ +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +extern const struct ttm_mem_type_manager_func vmw_thp_func; +#else +#define vmw_thp_func ttm_bo_manager_func +#endif /** * VMW_DEBUG_KMS - Debug output for kernel mode-setting diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 60cfbfadd3f2..d4d66532f9c9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -473,11 +473,11 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) * a lot of unnecessary write faults. */ if (vbo->dirty && vbo->dirty->method == VMW_BO_DIRTY_MKWRITE) - prot = vma->vm_page_prot; + prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); else prot = vm_get_page_prot(vma->vm_flags); - ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault); + ret = ttm_bo_vm_fault_reserved(vmf, prot, num_prefault, 1); if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) return ret; @@ -486,3 +486,75 @@ out_unlock: return ret; } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, + enum page_entry_size pe_size) +{ + struct vm_area_struct *vma = vmf->vma; + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; + struct vmw_buffer_object *vbo = + container_of(bo, struct vmw_buffer_object, base); + pgprot_t prot; + vm_fault_t ret; + pgoff_t fault_page_size; + bool write = vmf->flags & FAULT_FLAG_WRITE; + bool is_cow_mapping = + (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; + + switch (pe_size) { + case PE_SIZE_PMD: + fault_page_size = HPAGE_PMD_SIZE >> PAGE_SHIFT; + break; +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD + case PE_SIZE_PUD: + fault_page_size = HPAGE_PUD_SIZE >> PAGE_SHIFT; + break; +#endif + default: + WARN_ON_ONCE(1); + return VM_FAULT_FALLBACK; + } + + /* Always do write dirty-tracking and COW on PTE level. */ + if (write && (READ_ONCE(vbo->dirty) || is_cow_mapping)) + return VM_FAULT_FALLBACK; + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + return ret; + + if (vbo->dirty) { + pgoff_t allowed_prefault; + unsigned long page_offset; + + page_offset = vmf->pgoff - + drm_vma_node_start(&bo->base.vma_node); + if (page_offset >= bo->num_pages || + vmw_resources_clean(vbo, page_offset, + page_offset + PAGE_SIZE, + &allowed_prefault)) { + ret = VM_FAULT_SIGBUS; + goto out_unlock; + } + + /* + * Write protect, so we get a new fault on write, and can + * split. + */ + prot = vm_get_page_prot(vma->vm_flags & ~VM_SHARED); + } else { + prot = vm_get_page_prot(vma->vm_flags); + } + + ret = ttm_bo_vm_fault_reserved(vmf, prot, 1, fault_page_size); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + return ret; + +out_unlock: + dma_resv_unlock(bo->base.resv); + + return ret; +} +#endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c new file mode 100644 index 000000000000..b7c816ba7166 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Huge page-table-entry support for IO memory. + * + * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd. + */ +#include "vmwgfx_drv.h" +#include <drm/ttm/ttm_module.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> + +/** + * struct vmw_thp_manager - Range manager implementing huge page alignment + * + * @mm: The underlying range manager. Protected by @lock. + * @lock: Manager lock. + */ +struct vmw_thp_manager { + struct drm_mm mm; + spinlock_t lock; +}; + +static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node, + unsigned long align_pages, + const struct ttm_place *place, + struct ttm_mem_reg *mem, + unsigned long lpfn, + enum drm_mm_insert_mode mode) +{ + if (align_pages >= mem->page_alignment && + (!mem->page_alignment || align_pages % mem->page_alignment == 0)) { + return drm_mm_insert_node_in_range(mm, node, + mem->num_pages, + align_pages, 0, + place->fpfn, lpfn, mode); + } + + return -ENOSPC; +} + +static int vmw_thp_get_node(struct ttm_mem_type_manager *man, + struct ttm_buffer_object *bo, + const struct ttm_place *place, + struct ttm_mem_reg *mem) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + struct drm_mm *mm = &rman->mm; + struct drm_mm_node *node; + unsigned long align_pages; + unsigned long lpfn; + enum drm_mm_insert_mode mode = DRM_MM_INSERT_BEST; + int ret; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return -ENOMEM; + + lpfn = place->lpfn; + if (!lpfn) + lpfn = man->size; + + mode = DRM_MM_INSERT_BEST; + if (place->flags & TTM_PL_FLAG_TOPDOWN) + mode = DRM_MM_INSERT_HIGH; + + spin_lock(&rman->lock); + if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) { + align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT); + if (mem->num_pages >= align_pages) { + ret = vmw_thp_insert_aligned(mm, node, align_pages, + place, mem, lpfn, mode); + if (!ret) + goto found_unlock; + } + } + + align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT); + if (mem->num_pages >= align_pages) { + ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem, + lpfn, mode); + if (!ret) + goto found_unlock; + } + + ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, + mem->page_alignment, 0, + place->fpfn, lpfn, mode); +found_unlock: + spin_unlock(&rman->lock); + + if (unlikely(ret)) { + kfree(node); + } else { + mem->mm_node = node; + mem->start = node->start; + } + + return 0; +} + + + +static void vmw_thp_put_node(struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + + if (mem->mm_node) { + spin_lock(&rman->lock); + drm_mm_remove_node(mem->mm_node); + spin_unlock(&rman->lock); + + kfree(mem->mm_node); + mem->mm_node = NULL; + } +} + +static int vmw_thp_init(struct ttm_mem_type_manager *man, + unsigned long p_size) +{ + struct vmw_thp_manager *rman; + + rman = kzalloc(sizeof(*rman), GFP_KERNEL); + if (!rman) + return -ENOMEM; + + drm_mm_init(&rman->mm, 0, p_size); + spin_lock_init(&rman->lock); + man->priv = rman; + return 0; +} + +static int vmw_thp_takedown(struct ttm_mem_type_manager *man) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + struct drm_mm *mm = &rman->mm; + + spin_lock(&rman->lock); + if (drm_mm_clean(mm)) { + drm_mm_takedown(mm); + spin_unlock(&rman->lock); + kfree(rman); + man->priv = NULL; + return 0; + } + spin_unlock(&rman->lock); + return -EBUSY; +} + +static void vmw_thp_debug(struct ttm_mem_type_manager *man, + struct drm_printer *printer) +{ + struct vmw_thp_manager *rman = (struct vmw_thp_manager *) man->priv; + + spin_lock(&rman->lock); + drm_mm_print(&rman->mm, printer); + spin_unlock(&rman->lock); +} + +const struct ttm_mem_type_manager_func vmw_thp_func = { + .init = vmw_thp_init, + .takedown = vmw_thp_takedown, + .get_node = vmw_thp_get_node, + .put_node = vmw_thp_put_node, + .debug = vmw_thp_debug +}; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 3f3b2c7a208a..bf0bc4697959 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -749,7 +749,7 @@ static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, break; case TTM_PL_VRAM: /* "On-card" video ram */ - man->func = &ttm_bo_manager_func; + man->func = &vmw_thp_func; man->gpu_offset = 0; man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_CACHED; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c index aa7e50f63b94..3c03b1746661 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c @@ -34,7 +34,10 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma) .page_mkwrite = vmw_bo_vm_mkwrite, .fault = vmw_bo_vm_fault, .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close + .close = ttm_bo_vm_close, +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + .huge_fault = vmw_bo_vm_huge_fault, +#endif }; struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index 4be49c1aef51..374142018171 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -401,7 +401,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp, obj = xen_drm_front_gem_create(dev, args->size); if (IS_ERR_OR_NULL(obj)) { - ret = PTR_ERR(obj); + ret = PTR_ERR_OR_ZERO(obj); goto fail; } diff --git a/drivers/gpu/trace/Kconfig b/drivers/gpu/trace/Kconfig new file mode 100644 index 000000000000..c24e9edd022e --- /dev/null +++ b/drivers/gpu/trace/Kconfig @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config TRACE_GPU_MEM + bool diff --git a/drivers/gpu/trace/Makefile b/drivers/gpu/trace/Makefile new file mode 100644 index 000000000000..b70fbdc5847f --- /dev/null +++ b/drivers/gpu/trace/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_TRACE_GPU_MEM) += trace_gpu_mem.o diff --git a/drivers/gpu/trace/trace_gpu_mem.c b/drivers/gpu/trace/trace_gpu_mem.c new file mode 100644 index 000000000000..01e855897b6d --- /dev/null +++ b/drivers/gpu/trace/trace_gpu_mem.c @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * GPU memory trace points + * + * Copyright (C) 2020 Google, Inc. + */ + +#include <linux/module.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/gpu_mem.h> + +EXPORT_TRACEPOINT_SYMBOL(gpu_mem_total); diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index a02ce43d778d..32e3bc0aa665 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -533,7 +533,6 @@ struct hv_dynmem_device { * State to synchronize hot-add. */ struct completion ol_waitevent; - bool ha_waiting; /* * This thread handles hot-add * requests from the host as well as notifying @@ -634,10 +633,7 @@ static int hv_memory_notifier(struct notifier_block *nb, unsigned long val, switch (val) { case MEM_ONLINE: case MEM_CANCEL_ONLINE: - if (dm_device.ha_waiting) { - dm_device.ha_waiting = false; - complete(&dm_device.ol_waitevent); - } + complete(&dm_device.ol_waitevent); break; case MEM_OFFLINE: @@ -726,8 +722,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, has->covered_end_pfn += processed_pfn; spin_unlock_irqrestore(&dm_device.ha_lock, flags); - init_completion(&dm_device.ol_waitevent); - dm_device.ha_waiting = !memhp_auto_online; + reinit_completion(&dm_device.ol_waitevent); nid = memory_add_physaddr_to_nid(PFN_PHYS(start_pfn)); ret = add_memory(nid, PFN_PHYS((start_pfn)), @@ -753,15 +748,14 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size, } /* - * Wait for the memory block to be onlined when memory onlining - * is done outside of kernel (memhp_auto_online). Since the hot - * add has succeeded, it is ok to proceed even if the pages in - * the hot added region have not been "onlined" within the - * allowed time. + * Wait for memory to get onlined. If the kernel onlined the + * memory when adding it, this will return directly. Otherwise, + * it will wait for user space to online the memory. This helps + * to avoid adding memory faster than it is getting onlined. As + * adding succeeded, it is ok to proceed even if the memory was + * not onlined in time. */ - if (dm_device.ha_waiting) - wait_for_completion_timeout(&dm_device.ol_waitevent, - 5*HZ); + wait_for_completion_timeout(&dm_device.ol_waitevent, 5 * HZ); post_status(&dm_device); } } @@ -1706,6 +1700,7 @@ static int balloon_probe(struct hv_device *dev, #ifdef CONFIG_MEMORY_HOTPLUG set_online_page_callback(&hv_online_page); + init_completion(&dm_device.ol_waitevent); register_memory_notifier(&hv_memory_nb); #endif diff --git a/drivers/ide/ide-scan-pci.c b/drivers/ide/ide-scan-pci.c index acf874800ca4..b0411a1827a3 100644 --- a/drivers/ide/ide-scan-pci.c +++ b/drivers/ide/ide-scan-pci.c @@ -89,8 +89,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev) static int __init ide_scan_pcibus(void) { struct pci_dev *dev = NULL; - struct pci_driver *d; - struct list_head *l, *n; + struct pci_driver *d, *tmp; pre_init = 0; for_each_pci_dev(dev) @@ -101,9 +100,8 @@ static int __init ide_scan_pcibus(void) * are post init. */ - list_for_each_safe(l, n, &ide_pci_drivers) { - list_del(l); - d = list_entry(l, struct pci_driver, node); + list_for_each_entry_safe(d, tmp, &ide_pci_drivers, node) { + list_del(&d->node); if (__pci_register_driver(d, d->driver.owner, d->driver.mod_name)) printk(KERN_ERR "%s: failed to register %s driver\n", diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 5bd51853b15e..d5c073a8aa3e 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -88,6 +88,7 @@ source "drivers/iio/orientation/Kconfig" if IIO_TRIGGER source "drivers/iio/trigger/Kconfig" endif #IIO_TRIGGER +source "drivers/iio/position/Kconfig" source "drivers/iio/potentiometer/Kconfig" source "drivers/iio/potentiostat/Kconfig" source "drivers/iio/pressure/Kconfig" diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index bff682ad1cfb..1712011c0f4a 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -31,6 +31,7 @@ obj-y += light/ obj-y += magnetometer/ obj-y += multiplexer/ obj-y += orientation/ +obj-y += position/ obj-y += potentiometer/ obj-y += potentiostat/ obj-y += pressure/ diff --git a/drivers/iio/accel/cros_ec_accel_legacy.c b/drivers/iio/accel/cros_ec_accel_legacy.c index 68e847c6255e..2532b9ad3384 100644 --- a/drivers/iio/accel/cros_ec_accel_legacy.c +++ b/drivers/iio/accel/cros_ec_accel_legacy.c @@ -170,7 +170,8 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, NULL); if (ret) return ret; @@ -190,11 +191,6 @@ static int cros_ec_accel_legacy_probe(struct platform_device *pdev) state->sign[CROS_EC_SENSOR_Z] = -1; } - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index f4da821c4022..12bb8b7ca1ff 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -795,6 +795,16 @@ config RCAR_GYRO_ADC To compile this driver as a module, choose M here: the module will be called rcar-gyroadc. +config RN5T618_ADC + tristate "ADC for the RN5T618/RC5T619 family of chips" + depends on MFD_RN5T618 + help + Say yes here to build support for the integrated ADC inside the + RN5T618/619 series PMICs: + + This driver can also be built as a module. If so, the module + will be called rn5t618-adc. + config ROCKCHIP_SARADC tristate "Rockchip SARADC driver" depends on ARCH_ROCKCHIP || (ARM && COMPILE_TEST) diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index 8462455b4228..637807861112 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -75,6 +75,7 @@ obj-$(CONFIG_QCOM_VADC_COMMON) += qcom-vadc-common.o obj-$(CONFIG_QCOM_SPMI_VADC) += qcom-spmi-vadc.o obj-$(CONFIG_QCOM_PM8XXX_XOADC) += qcom-pm8xxx-xoadc.o obj-$(CONFIG_RCAR_GYRO_ADC) += rcar-gyroadc.o +obj-$(CONFIG_RN5T618_ADC) += rn5t618-adc.o obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o obj-$(CONFIG_SPEAR_ADC) += spear_adc.o diff --git a/drivers/iio/adc/rn5t618-adc.c b/drivers/iio/adc/rn5t618-adc.c new file mode 100644 index 000000000000..f21027e4e26a --- /dev/null +++ b/drivers/iio/adc/rn5t618-adc.c @@ -0,0 +1,256 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ADC driver for the RICOH RN5T618 power management chip family + * + * Copyright (C) 2019 Andreas Kemnade + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mfd/rn5t618.h> +#include <linux/platform_device.h> +#include <linux/completion.h> +#include <linux/regmap.h> +#include <linux/iio/iio.h> +#include <linux/slab.h> + +#define RN5T618_ADC_CONVERSION_TIMEOUT (msecs_to_jiffies(500)) +#define RN5T618_REFERENCE_VOLT 2500 + +/* mask for selecting channels for single conversion */ +#define RN5T618_ADCCNT3_CHANNEL_MASK 0x7 +/* average 4-time conversion mode */ +#define RN5T618_ADCCNT3_AVG BIT(3) +/* set for starting a single conversion, gets cleared by hw when done */ +#define RN5T618_ADCCNT3_GODONE BIT(4) +/* automatic conversion, period is in ADCCNT2, selected channels are + * in ADCCNT1 + */ +#define RN5T618_ADCCNT3_AUTO BIT(5) +#define RN5T618_ADCEND_IRQ BIT(0) + +struct rn5t618_adc_data { + struct device *dev; + struct rn5t618 *rn5t618; + struct completion conv_completion; + int irq; +}; + +struct rn5t618_channel_ratios { + u16 numerator; + u16 denominator; +}; + +enum rn5t618_channels { + LIMMON = 0, + VBAT, + VADP, + VUSB, + VSYS, + VTHM, + AIN1, + AIN0 +}; + +static const struct rn5t618_channel_ratios rn5t618_ratios[8] = { + [LIMMON] = {50, 32}, /* measured across 20mOhm, amplified by 32 */ + [VBAT] = {2, 1}, + [VADP] = {3, 1}, + [VUSB] = {3, 1}, + [VSYS] = {3, 1}, + [VTHM] = {1, 1}, + [AIN1] = {1, 1}, + [AIN0] = {1, 1}, +}; + +static int rn5t618_read_adc_reg(struct rn5t618 *rn5t618, int reg, u16 *val) +{ + u8 data[2]; + int ret; + + ret = regmap_bulk_read(rn5t618->regmap, reg, data, sizeof(data)); + if (ret < 0) + return ret; + + *val = (data[0] << 4) | (data[1] & 0xF); + + return 0; +} + +static irqreturn_t rn5t618_adc_irq(int irq, void *data) +{ + struct rn5t618_adc_data *adc = data; + unsigned int r = 0; + int ret; + + /* clear low & high threshold irqs */ + regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC1, 0); + regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC2, 0); + + ret = regmap_read(adc->rn5t618->regmap, RN5T618_IR_ADC3, &r); + if (ret < 0) + dev_err(adc->dev, "failed to read IRQ status: %d\n", ret); + + regmap_write(adc->rn5t618->regmap, RN5T618_IR_ADC3, 0); + + if (r & RN5T618_ADCEND_IRQ) + complete(&adc->conv_completion); + + return IRQ_HANDLED; +} + +static int rn5t618_adc_read(struct iio_dev *iio_dev, + const struct iio_chan_spec *chan, + int *val, int *val2, long mask) +{ + struct rn5t618_adc_data *adc = iio_priv(iio_dev); + u16 raw; + int ret; + + if (mask == IIO_CHAN_INFO_SCALE) { + *val = RN5T618_REFERENCE_VOLT * + rn5t618_ratios[chan->channel].numerator; + *val2 = rn5t618_ratios[chan->channel].denominator * 4095; + + return IIO_VAL_FRACTIONAL; + } + + /* select channel */ + ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3, + RN5T618_ADCCNT3_CHANNEL_MASK, + chan->channel); + if (ret < 0) + return ret; + + ret = regmap_write(adc->rn5t618->regmap, RN5T618_EN_ADCIR3, + RN5T618_ADCEND_IRQ); + if (ret < 0) + return ret; + + ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3, + RN5T618_ADCCNT3_AVG, + mask == IIO_CHAN_INFO_AVERAGE_RAW ? + RN5T618_ADCCNT3_AVG : 0); + if (ret < 0) + return ret; + + init_completion(&adc->conv_completion); + /* single conversion */ + ret = regmap_update_bits(adc->rn5t618->regmap, RN5T618_ADCCNT3, + RN5T618_ADCCNT3_GODONE, + RN5T618_ADCCNT3_GODONE); + if (ret < 0) + return ret; + + ret = wait_for_completion_timeout(&adc->conv_completion, + RN5T618_ADC_CONVERSION_TIMEOUT); + if (ret == 0) { + dev_warn(adc->dev, "timeout waiting for adc result\n"); + return -ETIMEDOUT; + } + + ret = rn5t618_read_adc_reg(adc->rn5t618, + RN5T618_ILIMDATAH + 2 * chan->channel, + &raw); + if (ret < 0) + return ret; + + *val = raw; + + return IIO_VAL_INT; +} + +static const struct iio_info rn5t618_adc_iio_info = { + .read_raw = &rn5t618_adc_read, +}; + +#define RN5T618_ADC_CHANNEL(_channel, _type, _name) { \ + .type = _type, \ + .channel = _channel, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_AVERAGE_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE), \ + .datasheet_name = _name, \ + .indexed = 1. \ +} + +static const struct iio_chan_spec rn5t618_adc_iio_channels[] = { + RN5T618_ADC_CHANNEL(LIMMON, IIO_CURRENT, "LIMMON"), + RN5T618_ADC_CHANNEL(VBAT, IIO_VOLTAGE, "VBAT"), + RN5T618_ADC_CHANNEL(VADP, IIO_VOLTAGE, "VADP"), + RN5T618_ADC_CHANNEL(VUSB, IIO_VOLTAGE, "VUSB"), + RN5T618_ADC_CHANNEL(VSYS, IIO_VOLTAGE, "VSYS"), + RN5T618_ADC_CHANNEL(VTHM, IIO_VOLTAGE, "VTHM"), + RN5T618_ADC_CHANNEL(AIN1, IIO_VOLTAGE, "AIN1"), + RN5T618_ADC_CHANNEL(AIN0, IIO_VOLTAGE, "AIN0") +}; + +static int rn5t618_adc_probe(struct platform_device *pdev) +{ + int ret; + struct iio_dev *iio_dev; + struct rn5t618_adc_data *adc; + struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent); + + iio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc)); + if (!iio_dev) { + dev_err(&pdev->dev, "failed allocating iio device\n"); + return -ENOMEM; + } + + adc = iio_priv(iio_dev); + adc->dev = &pdev->dev; + adc->rn5t618 = rn5t618; + + if (rn5t618->irq_data) + adc->irq = regmap_irq_get_virq(rn5t618->irq_data, + RN5T618_IRQ_ADC); + + if (adc->irq <= 0) { + dev_err(&pdev->dev, "get virq failed\n"); + return -EINVAL; + } + + init_completion(&adc->conv_completion); + + iio_dev->name = dev_name(&pdev->dev); + iio_dev->dev.parent = &pdev->dev; + iio_dev->info = &rn5t618_adc_iio_info; + iio_dev->modes = INDIO_DIRECT_MODE; + iio_dev->channels = rn5t618_adc_iio_channels; + iio_dev->num_channels = ARRAY_SIZE(rn5t618_adc_iio_channels); + + /* stop any auto-conversion */ + ret = regmap_write(rn5t618->regmap, RN5T618_ADCCNT3, 0); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, iio_dev); + + ret = devm_request_threaded_irq(adc->dev, adc->irq, NULL, + rn5t618_adc_irq, + IRQF_ONESHOT, dev_name(adc->dev), + adc); + if (ret < 0) { + dev_err(adc->dev, "request irq %d failed: %d\n", adc->irq, ret); + return ret; + } + + return devm_iio_device_register(adc->dev, iio_dev); +} + +static struct platform_driver rn5t618_adc_driver = { + .driver = { + .name = "rn5t618-adc", + }, + .probe = rn5t618_adc_probe, +}; + +module_platform_driver(rn5t618_adc_driver); +MODULE_ALIAS("platform:rn5t618-adc"); +MODULE_DESCRIPTION("RICOH RN5T618 ADC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c index 1dcc2a16ab2d..af801e203623 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_lid_angle.c @@ -97,7 +97,7 @@ static int cros_ec_lid_angle_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, false); + ret = cros_ec_sensors_core_init(pdev, indio_dev, false, NULL, NULL); if (ret) return ret; @@ -127,7 +127,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_lid_angle_ids); static struct platform_driver cros_ec_lid_angle_platform_driver = { .driver = { .name = DRV_NAME, - .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_lid_angle_probe, .id_table = cros_ec_lid_angle_ids, diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c index 576e45faafaf..a66941fdb385 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors.c @@ -230,10 +230,14 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, + cros_ec_sensors_push_data); if (ret) return ret; + iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes); + indio_dev->info = &ec_sensors_info; state = iio_priv(indio_dev); for (channel = state->channels, i = CROS_EC_SENSOR_X; @@ -245,7 +249,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) BIT(IIO_CHAN_INFO_CALIBSCALE); channel->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | - BIT(IIO_CHAN_INFO_FREQUENCY) | BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ); @@ -292,11 +295,6 @@ static int cros_ec_sensors_probe(struct platform_device *pdev) else state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd; - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } @@ -317,7 +315,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_sensors_ids); static struct platform_driver cros_ec_sensors_platform_driver = { .driver = { .name = "cros-ec-sensors", - .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_sensors_probe, .id_table = cros_ec_sensors_ids, diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index d3a3626c7cd8..c831915ca7e5 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -11,7 +11,9 @@ #include <linux/iio/common/cros_ec_sensors_core.h> #include <linux/iio/iio.h> #include <linux/iio/kfifo_buf.h> +#include <linux/iio/sysfs.h> #include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> @@ -20,6 +22,12 @@ #include <linux/platform_data/cros_ec_sensorhub.h> #include <linux/platform_device.h> +/* + * Hard coded to the first device to support sensor fifo. The EC has a 2048 + * byte fifo and will trigger an interrupt when fifo is 2/3 full. + */ +#define CROS_EC_FIFO_SIZE (2048 * 2 / 3) + static char *cros_ec_loc[] = { [MOTIONSENSE_LOC_BASE] = "base", [MOTIONSENSE_LOC_LID] = "lid", @@ -53,8 +61,15 @@ static int cros_ec_get_host_cmd_version_mask(struct cros_ec_device *ec_dev, static void get_default_min_max_freq(enum motionsensor_type type, u32 *min_freq, - u32 *max_freq) + u32 *max_freq, + u32 *max_fifo_events) { + /* + * We don't know fifo size, set to size previously used by older + * hardware. + */ + *max_fifo_events = CROS_EC_FIFO_SIZE; + switch (type) { case MOTIONSENSE_TYPE_ACCEL: case MOTIONSENSE_TYPE_GYRO: @@ -82,9 +97,155 @@ static void get_default_min_max_freq(enum motionsensor_type type, } } +static int cros_ec_sensor_set_ec_rate(struct cros_ec_sensors_core_state *st, + int rate) +{ + int ret; + + if (rate > U16_MAX) + rate = U16_MAX; + + mutex_lock(&st->cmd_lock); + st->param.cmd = MOTIONSENSE_CMD_EC_RATE; + st->param.ec_rate.data = rate; + ret = cros_ec_motion_send_host_cmd(st, 0); + mutex_unlock(&st->cmd_lock); + return ret; +} + +static ssize_t cros_ec_sensor_set_report_latency(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + int integer, fract, ret; + int latency; + + ret = iio_str_to_fixpoint(buf, 100000, &integer, &fract); + if (ret) + return ret; + + /* EC rate is in ms. */ + latency = integer * 1000 + fract / 1000; + ret = cros_ec_sensor_set_ec_rate(st, latency); + if (ret < 0) + return ret; + + return len; +} + +static ssize_t cros_ec_sensor_get_report_latency(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + int latency, ret; + + mutex_lock(&st->cmd_lock); + st->param.cmd = MOTIONSENSE_CMD_EC_RATE; + st->param.ec_rate.data = EC_MOTION_SENSE_NO_VALUE; + + ret = cros_ec_motion_send_host_cmd(st, 0); + latency = st->resp->ec_rate.ret; + mutex_unlock(&st->cmd_lock); + if (ret < 0) + return ret; + + return sprintf(buf, "%d.%06u\n", + latency / 1000, + (latency % 1000) * 1000); +} + +static IIO_DEVICE_ATTR(hwfifo_timeout, 0644, + cros_ec_sensor_get_report_latency, + cros_ec_sensor_set_report_latency, 0); + +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + + return sprintf(buf, "%d\n", st->fifo_max_event_count); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); + +const struct attribute *cros_ec_sensor_fifo_attributes[] = { + &iio_dev_attr_hwfifo_timeout.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, + NULL, +}; +EXPORT_SYMBOL_GPL(cros_ec_sensor_fifo_attributes); + +int cros_ec_sensors_push_data(struct iio_dev *indio_dev, + s16 *data, + s64 timestamp) +{ + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + s16 *out; + s64 delta; + unsigned int i; + + /* + * Ignore samples if the buffer is not set: it is needed if the ODR is + * set but the buffer is not enabled yet. + */ + if (!iio_buffer_enabled(indio_dev)) + return 0; + + out = (s16 *)st->samples; + for_each_set_bit(i, + indio_dev->active_scan_mask, + indio_dev->masklength) { + *out = data[i]; + out++; + } + + if (iio_device_get_clock(indio_dev) != CLOCK_BOOTTIME) + delta = iio_get_time_ns(indio_dev) - cros_ec_get_time_ns(); + else + delta = 0; + + iio_push_to_buffers_with_timestamp(indio_dev, st->samples, + timestamp + delta); + + return 0; +} +EXPORT_SYMBOL_GPL(cros_ec_sensors_push_data); + +static void cros_ec_sensors_core_clean(void *arg) +{ + struct platform_device *pdev = (struct platform_device *)arg; + struct cros_ec_sensorhub *sensor_hub = + dev_get_drvdata(pdev->dev.parent); + struct iio_dev *indio_dev = platform_get_drvdata(pdev); + struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); + u8 sensor_num = st->param.info.sensor_num; + + cros_ec_sensorhub_unregister_push_data(sensor_hub, sensor_num); +} + +/** + * cros_ec_sensors_core_init() - basic initialization of the core structure + * @pdev: platform device created for the sensors + * @indio_dev: iio device structure of the device + * @physical_device: true if the device refers to a physical device + * @trigger_capture: function pointer to call buffer is triggered, + * for backward compatibility. + * @push_data: function to call when cros_ec_sensorhub receives + * a sample for that sensor. + * + * Return: 0 on success, -errno on failure. + */ int cros_ec_sensors_core_init(struct platform_device *pdev, struct iio_dev *indio_dev, - bool physical_device) + bool physical_device, + cros_ec_sensors_capture_t trigger_capture, + cros_ec_sensorhub_push_data_cb_t push_data) { struct device *dev = &pdev->dev; struct cros_ec_sensors_core_state *state = iio_priv(indio_dev); @@ -92,6 +253,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, struct cros_ec_dev *ec = sensor_hub->ec; struct cros_ec_sensor_platform *sensor_platform = dev_get_platdata(dev); u32 ver_mask; + int frequencies[ARRAY_SIZE(state->frequencies) / 2] = { 0 }; int ret, i; platform_set_drvdata(pdev, indio_dev); @@ -123,8 +285,6 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, indio_dev->name = pdev->name; if (physical_device) { - indio_dev->modes = INDIO_DIRECT_MODE; - state->param.cmd = MOTIONSENSE_CMD_INFO; state->param.info.sensor_num = sensor_platform->sensor_num; ret = cros_ec_motion_send_host_cmd(state, 0); @@ -142,16 +302,63 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, state->calib[i].scale = MOTION_SENSE_DEFAULT_SCALE; /* 0 is a correct value used to stop the device */ - state->frequencies[0] = 0; if (state->msg->version < 3) { get_default_min_max_freq(state->resp->info.type, - &state->frequencies[1], - &state->frequencies[2]); + &frequencies[1], + &frequencies[2], + &state->fifo_max_event_count); } else { - state->frequencies[1] = - state->resp->info_3.min_frequency; - state->frequencies[2] = - state->resp->info_3.max_frequency; + frequencies[1] = state->resp->info_3.min_frequency; + frequencies[2] = state->resp->info_3.max_frequency; + state->fifo_max_event_count = + state->resp->info_3.fifo_max_event_count; + } + for (i = 0; i < ARRAY_SIZE(frequencies); i++) { + state->frequencies[2 * i] = frequencies[i] / 1000; + state->frequencies[2 * i + 1] = + (frequencies[i] % 1000) * 1000; + } + + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { + /* + * Create a software buffer, feed by the EC FIFO. + * We can not use trigger here, as events are generated + * as soon as sample_frequency is set. + */ + struct iio_buffer *buffer; + + buffer = devm_iio_kfifo_allocate(dev); + if (!buffer) + return -ENOMEM; + + iio_device_attach_buffer(indio_dev, buffer); + indio_dev->modes = INDIO_BUFFER_SOFTWARE; + + ret = cros_ec_sensorhub_register_push_data( + sensor_hub, sensor_platform->sensor_num, + indio_dev, push_data); + if (ret) + return ret; + + ret = devm_add_action_or_reset( + dev, cros_ec_sensors_core_clean, pdev); + if (ret) + return ret; + + /* Timestamp coming from FIFO are in ns since boot. */ + ret = iio_device_set_clock(indio_dev, CLOCK_BOOTTIME); + if (ret) + return ret; + } else { + /* + * The only way to get samples in buffer is to set a + * software tigger (systrig, hrtimer). + */ + ret = devm_iio_triggered_buffer_setup( + dev, indio_dev, NULL, trigger_capture, + NULL); + if (ret) + return ret; } } @@ -159,6 +366,16 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_init); +/** + * cros_ec_motion_send_host_cmd() - send motion sense host command + * @state: pointer to state information for device + * @opt_length: optional length to reduce the response size, useful on the data + * path. Otherwise, the maximal allowed response size is used + * + * When called, the sub-command is assumed to be set in param->cmd. + * + * Return: 0 on success, -errno on failure. + */ int cros_ec_motion_send_host_cmd(struct cros_ec_sensors_core_state *state, u16 opt_length) { @@ -421,6 +638,14 @@ int cros_ec_sensors_read_lpc(struct iio_dev *indio_dev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_read_lpc); +/** + * cros_ec_sensors_read_cmd() - retrieve data using the EC command protocol + * @indio_dev: pointer to IIO device + * @scan_mask: bitmap of the sensor indices to scan + * @data: location to store data + * + * Return: 0 on success, -errno on failure. + */ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, unsigned long scan_mask, s16 *data) { @@ -445,6 +670,18 @@ int cros_ec_sensors_read_cmd(struct iio_dev *indio_dev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_read_cmd); +/** + * cros_ec_sensors_capture() - the trigger handler function + * @irq: the interrupt number. + * @p: a pointer to the poll function. + * + * On a trigger event occurring, if the pollfunc is attached then this + * handler is called as a threaded interrupt (and hence may sleep). It + * is responsible for grabbing data from the device and pushing it into + * the associated buffer. + * + * Return: IRQ_HANDLED + */ irqreturn_t cros_ec_sensors_capture(int irq, void *p) { struct iio_poll_func *pf = p; @@ -480,26 +717,24 @@ done: } EXPORT_SYMBOL_GPL(cros_ec_sensors_capture); +/** + * cros_ec_sensors_core_read() - function to request a value from the sensor + * @st: pointer to state information for device + * @chan: channel specification structure table + * @val: will contain one element making up the returned value + * @val2: will contain another element making up the returned value + * @mask: specifies which values to be requested + * + * Return: the type of value returned by the device + */ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { - int ret; + int ret, frequency; switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = - EC_MOTION_SENSE_NO_VALUE; - - ret = cros_ec_motion_send_host_cmd(st, 0); - if (ret) - break; - - *val = st->resp->ec_rate.ret; - ret = IIO_VAL_INT; - break; - case IIO_CHAN_INFO_FREQUENCY: st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR; st->param.sensor_odr.data = EC_MOTION_SENSE_NO_VALUE; @@ -508,8 +743,10 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, if (ret) break; - *val = st->resp->sensor_odr.ret; - ret = IIO_VAL_INT; + frequency = st->resp->sensor_odr.ret; + *val = frequency / 1000; + *val2 = (frequency % 1000) * 1000; + ret = IIO_VAL_INT_PLUS_MICRO; break; default: ret = -EINVAL; @@ -520,6 +757,17 @@ int cros_ec_sensors_core_read(struct cros_ec_sensors_core_state *st, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read); +/** + * cros_ec_sensors_core_read_avail() - get available values + * @indio_dev: pointer to state information for device + * @chan: channel specification structure table + * @vals: list of available values + * @type: type of data returned + * @length: number of data returned in the array + * @mask: specifies which values to be requested + * + * Return: an error code, IIO_AVAIL_RANGE or IIO_AVAIL_LIST + */ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, @@ -533,7 +781,7 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, case IIO_CHAN_INFO_SAMP_FREQ: *length = ARRAY_SIZE(state->frequencies); *vals = (const int *)&state->frequencies; - *type = IIO_VAL_INT; + *type = IIO_VAL_INT_PLUS_MICRO; return IIO_AVAIL_LIST; } @@ -541,31 +789,33 @@ int cros_ec_sensors_core_read_avail(struct iio_dev *indio_dev, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_read_avail); +/** + * cros_ec_sensors_core_write() - function to write a value to the sensor + * @st: pointer to state information for device + * @chan: channel specification structure table + * @val: first part of value to write + * @val2: second part of value to write + * @mask: specifies which values to write + * + * Return: the type of value returned by the device + */ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, struct iio_chan_spec const *chan, int val, int val2, long mask) { - int ret; + int ret, frequency; switch (mask) { - case IIO_CHAN_INFO_FREQUENCY: + case IIO_CHAN_INFO_SAMP_FREQ: + frequency = val * 1000 + val2 / 1000; st->param.cmd = MOTIONSENSE_CMD_SENSOR_ODR; - st->param.sensor_odr.data = val; + st->param.sensor_odr.data = frequency; /* Always roundup, so caller gets at least what it asks for. */ st->param.sensor_odr.roundup = 1; ret = cros_ec_motion_send_host_cmd(st, 0); break; - case IIO_CHAN_INFO_SAMP_FREQ: - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = val; - - ret = cros_ec_motion_send_host_cmd(st, 0); - if (ret) - break; - st->curr_sampl_freq = val; - break; default: ret = -EINVAL; break; @@ -574,52 +824,5 @@ int cros_ec_sensors_core_write(struct cros_ec_sensors_core_state *st, } EXPORT_SYMBOL_GPL(cros_ec_sensors_core_write); -static int __maybe_unused cros_ec_sensors_prepare(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); - - if (st->curr_sampl_freq == 0) - return 0; - - /* - * If the sensors are sampled at high frequency, we will not be able to - * sleep. Set sampling to a long period if necessary. - */ - if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) { - mutex_lock(&st->cmd_lock); - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY; - cros_ec_motion_send_host_cmd(st, 0); - mutex_unlock(&st->cmd_lock); - } - return 0; -} - -static void __maybe_unused cros_ec_sensors_complete(struct device *dev) -{ - struct iio_dev *indio_dev = dev_get_drvdata(dev); - struct cros_ec_sensors_core_state *st = iio_priv(indio_dev); - - if (st->curr_sampl_freq == 0) - return; - - if (st->curr_sampl_freq < CROS_EC_MIN_SUSPEND_SAMPLING_FREQUENCY) { - mutex_lock(&st->cmd_lock); - st->param.cmd = MOTIONSENSE_CMD_EC_RATE; - st->param.ec_rate.data = st->curr_sampl_freq; - cros_ec_motion_send_host_cmd(st, 0); - mutex_unlock(&st->cmd_lock); - } -} - -const struct dev_pm_ops cros_ec_sensors_pm_ops = { -#ifdef CONFIG_PM_SLEEP - .prepare = cros_ec_sensors_prepare, - .complete = cros_ec_sensors_complete -#endif -}; -EXPORT_SYMBOL_GPL(cros_ec_sensors_pm_ops); - MODULE_DESCRIPTION("ChromeOS EC sensor hub core functions"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index eac63c1bb8da..2352c426bfb5 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -189,7 +189,12 @@ ssize_t iio_read_const_attr(struct device *dev, } EXPORT_SYMBOL(iio_read_const_attr); -static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) +/** + * iio_device_set_clock() - Set current timestamping clock for the device + * @indio_dev: IIO device structure containing the device + * @clock_id: timestamping clock posix identifier to set. + */ +int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) { int ret; const struct iio_event_interface *ev_int = indio_dev->event_interface; @@ -207,6 +212,7 @@ static int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) return 0; } +EXPORT_SYMBOL(iio_device_set_clock); /** * iio_get_time_ns() - utility function to get a time stamp for events etc diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 74970f18a93b..b27719cefcf9 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -194,6 +194,16 @@ config GP2AP020A00F To compile this driver as a module, choose M here: the module will be called gp2ap020a00f. +config IQS621_ALS + tristate "Azoteq IQS621/622 ambient light sensors" + depends on MFD_IQS62X || COMPILE_TEST + help + Say Y here if you want to build support for the Azoteq IQS621 + and IQS622 ambient light sensors. + + To compile this driver as a module, choose M here: the module + will be called iqs621-als. + config SENSORS_ISL29018 tristate "Intersil 29018 light and proximity sensor" depends on I2C diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile index 5c1ebaf578fb..d1c8aa30b9a8 100644 --- a/drivers/iio/light/Makefile +++ b/drivers/iio/light/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_GP2AP002) += gp2ap002.o obj-$(CONFIG_GP2AP020A00F) += gp2ap020a00f.o obj-$(CONFIG_HID_SENSOR_ALS) += hid-sensor-als.o obj-$(CONFIG_HID_SENSOR_PROX) += hid-sensor-prox.o +obj-$(CONFIG_IQS621_ALS) += iqs621-als.o obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o obj-$(CONFIG_ISL29125) += isl29125.o diff --git a/drivers/iio/light/cros_ec_light_prox.c b/drivers/iio/light/cros_ec_light_prox.c index 7a838e2956f4..2198b50909ed 100644 --- a/drivers/iio/light/cros_ec_light_prox.c +++ b/drivers/iio/light/cros_ec_light_prox.c @@ -177,10 +177,14 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, + cros_ec_sensors_push_data); if (ret) return ret; + iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes); + indio_dev->info = &cros_ec_light_prox_info; state = iio_priv(indio_dev); state->core.type = state->core.resp->info.type; @@ -189,8 +193,7 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) /* Common part */ channel->info_mask_shared_by_all = - BIT(IIO_CHAN_INFO_SAMP_FREQ) | - BIT(IIO_CHAN_INFO_FREQUENCY); + BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->scan_type.realbits = CROS_EC_SENSOR_BITS; @@ -236,11 +239,6 @@ static int cros_ec_light_prox_probe(struct platform_device *pdev) state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd; - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } @@ -258,7 +256,6 @@ MODULE_DEVICE_TABLE(platform, cros_ec_light_prox_ids); static struct platform_driver cros_ec_light_prox_platform_driver = { .driver = { .name = "cros-ec-light-prox", - .pm = &cros_ec_sensors_pm_ops, }, .probe = cros_ec_light_prox_probe, .id_table = cros_ec_light_prox_ids, diff --git a/drivers/iio/light/iqs621-als.c b/drivers/iio/light/iqs621-als.c new file mode 100644 index 000000000000..b2988a782bd0 --- /dev/null +++ b/drivers/iio/light/iqs621-als.c @@ -0,0 +1,617 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS621/622 Ambient Light Sensors + * + * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com> + */ + +#include <linux/device.h> +#include <linux/iio/events.h> +#include <linux/iio/iio.h> +#include <linux/kernel.h> +#include <linux/mfd/iqs62x.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define IQS621_ALS_FLAGS_LIGHT BIT(7) +#define IQS621_ALS_FLAGS_RANGE GENMASK(3, 0) + +#define IQS621_ALS_UI_OUT 0x17 + +#define IQS621_ALS_THRESH_DARK 0x80 +#define IQS621_ALS_THRESH_LIGHT 0x81 + +#define IQS622_IR_RANGE 0x15 +#define IQS622_IR_FLAGS 0x16 +#define IQS622_IR_FLAGS_TOUCH BIT(1) +#define IQS622_IR_FLAGS_PROX BIT(0) + +#define IQS622_IR_UI_OUT 0x17 + +#define IQS622_IR_THRESH_PROX 0x91 +#define IQS622_IR_THRESH_TOUCH 0x92 + +struct iqs621_als_private { + struct iqs62x_core *iqs62x; + struct notifier_block notifier; + struct mutex lock; + bool light_en; + bool range_en; + bool prox_en; + u8 als_flags; + u8 ir_flags_mask; + u8 ir_flags; + u8 thresh_light; + u8 thresh_dark; + u8 thresh_prox; +}; + +static int iqs621_als_init(struct iqs621_als_private *iqs621_als) +{ + struct iqs62x_core *iqs62x = iqs621_als->iqs62x; + unsigned int event_mask = 0; + int ret; + + switch (iqs621_als->ir_flags_mask) { + case IQS622_IR_FLAGS_TOUCH: + ret = regmap_write(iqs62x->regmap, IQS622_IR_THRESH_TOUCH, + iqs621_als->thresh_prox); + break; + + case IQS622_IR_FLAGS_PROX: + ret = regmap_write(iqs62x->regmap, IQS622_IR_THRESH_PROX, + iqs621_als->thresh_prox); + break; + + default: + ret = regmap_write(iqs62x->regmap, IQS621_ALS_THRESH_LIGHT, + iqs621_als->thresh_light); + if (ret) + return ret; + + ret = regmap_write(iqs62x->regmap, IQS621_ALS_THRESH_DARK, + iqs621_als->thresh_dark); + } + + if (ret) + return ret; + + if (iqs621_als->light_en || iqs621_als->range_en) + event_mask |= iqs62x->dev_desc->als_mask; + + if (iqs621_als->prox_en) + event_mask |= iqs62x->dev_desc->ir_mask; + + return regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, + event_mask, 0); +} + +static int iqs621_als_notifier(struct notifier_block *notifier, + unsigned long event_flags, void *context) +{ + struct iqs62x_event_data *event_data = context; + struct iqs621_als_private *iqs621_als; + struct iio_dev *indio_dev; + bool light_new, light_old; + bool prox_new, prox_old; + u8 range_new, range_old; + s64 timestamp; + int ret; + + iqs621_als = container_of(notifier, struct iqs621_als_private, + notifier); + indio_dev = iio_priv_to_dev(iqs621_als); + timestamp = iio_get_time_ns(indio_dev); + + mutex_lock(&iqs621_als->lock); + + if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) { + ret = iqs621_als_init(iqs621_als); + if (ret) { + dev_err(indio_dev->dev.parent, + "Failed to re-initialize device: %d\n", ret); + ret = NOTIFY_BAD; + } else { + ret = NOTIFY_OK; + } + + goto err_mutex; + } + + if (!iqs621_als->light_en && !iqs621_als->range_en && + !iqs621_als->prox_en) { + ret = NOTIFY_DONE; + goto err_mutex; + } + + /* IQS621 only */ + light_new = event_data->als_flags & IQS621_ALS_FLAGS_LIGHT; + light_old = iqs621_als->als_flags & IQS621_ALS_FLAGS_LIGHT; + + if (iqs621_als->light_en && light_new && !light_old) + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_RISING), + timestamp); + else if (iqs621_als->light_en && !light_new && light_old) + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_LIGHT, 0, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_FALLING), + timestamp); + + /* IQS621 and IQS622 */ + range_new = event_data->als_flags & IQS621_ALS_FLAGS_RANGE; + range_old = iqs621_als->als_flags & IQS621_ALS_FLAGS_RANGE; + + if (iqs621_als->range_en && (range_new > range_old)) + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, + IIO_EV_TYPE_CHANGE, + IIO_EV_DIR_RISING), + timestamp); + else if (iqs621_als->range_en && (range_new < range_old)) + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_INTENSITY, 0, + IIO_EV_TYPE_CHANGE, + IIO_EV_DIR_FALLING), + timestamp); + + /* IQS622 only */ + prox_new = event_data->ir_flags & iqs621_als->ir_flags_mask; + prox_old = iqs621_als->ir_flags & iqs621_als->ir_flags_mask; + + if (iqs621_als->prox_en && prox_new && !prox_old) + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_RISING), + timestamp); + else if (iqs621_als->prox_en && !prox_new && prox_old) + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_PROXIMITY, 0, + IIO_EV_TYPE_THRESH, + IIO_EV_DIR_FALLING), + timestamp); + + iqs621_als->als_flags = event_data->als_flags; + iqs621_als->ir_flags = event_data->ir_flags; + ret = NOTIFY_OK; + +err_mutex: + mutex_unlock(&iqs621_als->lock); + + return ret; +} + +static void iqs621_als_notifier_unregister(void *context) +{ + struct iqs621_als_private *iqs621_als = context; + struct iio_dev *indio_dev = iio_priv_to_dev(iqs621_als); + int ret; + + ret = blocking_notifier_chain_unregister(&iqs621_als->iqs62x->nh, + &iqs621_als->notifier); + if (ret) + dev_err(indio_dev->dev.parent, + "Failed to unregister notifier: %d\n", ret); +} + +static int iqs621_als_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct iqs621_als_private *iqs621_als = iio_priv(indio_dev); + struct iqs62x_core *iqs62x = iqs621_als->iqs62x; + int ret; + __le16 val_buf; + + switch (chan->type) { + case IIO_INTENSITY: + ret = regmap_read(iqs62x->regmap, chan->address, val); + if (ret) + return ret; + + *val &= IQS621_ALS_FLAGS_RANGE; + return IIO_VAL_INT; + + case IIO_PROXIMITY: + case IIO_LIGHT: + ret = regmap_raw_read(iqs62x->regmap, chan->address, &val_buf, + sizeof(val_buf)); + if (ret) + return ret; + + *val = le16_to_cpu(val_buf); + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static int iqs621_als_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct iqs621_als_private *iqs621_als = iio_priv(indio_dev); + int ret; + + mutex_lock(&iqs621_als->lock); + + switch (chan->type) { + case IIO_LIGHT: + ret = iqs621_als->light_en; + break; + + case IIO_INTENSITY: + ret = iqs621_als->range_en; + break; + + case IIO_PROXIMITY: + ret = iqs621_als->prox_en; + break; + + default: + ret = -EINVAL; + } + + mutex_unlock(&iqs621_als->lock); + + return ret; +} + +static int iqs621_als_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state) +{ + struct iqs621_als_private *iqs621_als = iio_priv(indio_dev); + struct iqs62x_core *iqs62x = iqs621_als->iqs62x; + unsigned int val; + int ret; + + mutex_lock(&iqs621_als->lock); + + ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->als_flags, &val); + if (ret) + goto err_mutex; + iqs621_als->als_flags = val; + + switch (chan->type) { + case IIO_LIGHT: + ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, + iqs62x->dev_desc->als_mask, + iqs621_als->range_en || state ? 0 : + 0xFF); + if (!ret) + iqs621_als->light_en = state; + break; + + case IIO_INTENSITY: + ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, + iqs62x->dev_desc->als_mask, + iqs621_als->light_en || state ? 0 : + 0xFF); + if (!ret) + iqs621_als->range_en = state; + break; + + case IIO_PROXIMITY: + ret = regmap_read(iqs62x->regmap, IQS622_IR_FLAGS, &val); + if (ret) + goto err_mutex; + iqs621_als->ir_flags = val; + + ret = regmap_update_bits(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, + iqs62x->dev_desc->ir_mask, + state ? 0 : 0xFF); + if (!ret) + iqs621_als->prox_en = state; + break; + + default: + ret = -EINVAL; + } + +err_mutex: + mutex_unlock(&iqs621_als->lock); + + return ret; +} + +static int iqs621_als_read_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int *val, int *val2) +{ + struct iqs621_als_private *iqs621_als = iio_priv(indio_dev); + int ret = IIO_VAL_INT; + + mutex_lock(&iqs621_als->lock); + + switch (dir) { + case IIO_EV_DIR_RISING: + *val = iqs621_als->thresh_light * 16; + break; + + case IIO_EV_DIR_FALLING: + *val = iqs621_als->thresh_dark * 4; + break; + + case IIO_EV_DIR_EITHER: + if (iqs621_als->ir_flags_mask == IQS622_IR_FLAGS_TOUCH) + *val = iqs621_als->thresh_prox * 4; + else + *val = iqs621_als->thresh_prox; + break; + + default: + ret = -EINVAL; + } + + mutex_unlock(&iqs621_als->lock); + + return ret; +} + +static int iqs621_als_write_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int val, int val2) +{ + struct iqs621_als_private *iqs621_als = iio_priv(indio_dev); + struct iqs62x_core *iqs62x = iqs621_als->iqs62x; + unsigned int thresh_reg, thresh_val; + u8 ir_flags_mask, *thresh_cache; + int ret = -EINVAL; + + mutex_lock(&iqs621_als->lock); + + switch (dir) { + case IIO_EV_DIR_RISING: + thresh_reg = IQS621_ALS_THRESH_LIGHT; + thresh_val = val / 16; + + thresh_cache = &iqs621_als->thresh_light; + ir_flags_mask = 0; + break; + + case IIO_EV_DIR_FALLING: + thresh_reg = IQS621_ALS_THRESH_DARK; + thresh_val = val / 4; + + thresh_cache = &iqs621_als->thresh_dark; + ir_flags_mask = 0; + break; + + case IIO_EV_DIR_EITHER: + /* + * The IQS622 supports two detection thresholds, both measured + * in the same arbitrary units reported by read_raw: proximity + * (0 through 255 in steps of 1), and touch (0 through 1020 in + * steps of 4). + * + * Based on the single detection threshold chosen by the user, + * select the hardware threshold that gives the best trade-off + * between range and resolution. + * + * By default, the close-range (but coarse) touch threshold is + * chosen during probe. + */ + switch (val) { + case 0 ... 255: + thresh_reg = IQS622_IR_THRESH_PROX; + thresh_val = val; + + ir_flags_mask = IQS622_IR_FLAGS_PROX; + break; + + case 256 ... 1020: + thresh_reg = IQS622_IR_THRESH_TOUCH; + thresh_val = val / 4; + + ir_flags_mask = IQS622_IR_FLAGS_TOUCH; + break; + + default: + goto err_mutex; + } + + thresh_cache = &iqs621_als->thresh_prox; + break; + + default: + goto err_mutex; + } + + if (thresh_val > 0xFF) + goto err_mutex; + + ret = regmap_write(iqs62x->regmap, thresh_reg, thresh_val); + if (ret) + goto err_mutex; + + *thresh_cache = thresh_val; + iqs621_als->ir_flags_mask = ir_flags_mask; + +err_mutex: + mutex_unlock(&iqs621_als->lock); + + return ret; +} + +static const struct iio_info iqs621_als_info = { + .read_raw = &iqs621_als_read_raw, + .read_event_config = iqs621_als_read_event_config, + .write_event_config = iqs621_als_write_event_config, + .read_event_value = iqs621_als_read_event_value, + .write_event_value = iqs621_als_write_event_value, +}; + +static const struct iio_event_spec iqs621_als_range_events[] = { + { + .type = IIO_EV_TYPE_CHANGE, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, +}; + +static const struct iio_event_spec iqs621_als_light_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_VALUE), + }, +}; + +static const struct iio_chan_spec iqs621_als_channels[] = { + { + .type = IIO_INTENSITY, + .address = IQS621_ALS_FLAGS, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .event_spec = iqs621_als_range_events, + .num_event_specs = ARRAY_SIZE(iqs621_als_range_events), + }, + { + .type = IIO_LIGHT, + .address = IQS621_ALS_UI_OUT, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .event_spec = iqs621_als_light_events, + .num_event_specs = ARRAY_SIZE(iqs621_als_light_events), + }, +}; + +static const struct iio_event_spec iqs622_als_prox_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_ENABLE) | + BIT(IIO_EV_INFO_VALUE), + }, +}; + +static const struct iio_chan_spec iqs622_als_channels[] = { + { + .type = IIO_INTENSITY, + .channel2 = IIO_MOD_LIGHT_BOTH, + .address = IQS622_ALS_FLAGS, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .event_spec = iqs621_als_range_events, + .num_event_specs = ARRAY_SIZE(iqs621_als_range_events), + .modified = true, + }, + { + .type = IIO_INTENSITY, + .channel2 = IIO_MOD_LIGHT_IR, + .address = IQS622_IR_RANGE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .modified = true, + }, + { + .type = IIO_PROXIMITY, + .address = IQS622_IR_UI_OUT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .event_spec = iqs622_als_prox_events, + .num_event_specs = ARRAY_SIZE(iqs622_als_prox_events), + }, +}; + +static int iqs621_als_probe(struct platform_device *pdev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent); + struct iqs621_als_private *iqs621_als; + struct iio_dev *indio_dev; + unsigned int val; + int ret; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*iqs621_als)); + if (!indio_dev) + return -ENOMEM; + + iqs621_als = iio_priv(indio_dev); + iqs621_als->iqs62x = iqs62x; + + if (iqs62x->dev_desc->prod_num == IQS622_PROD_NUM) { + ret = regmap_read(iqs62x->regmap, IQS622_IR_THRESH_TOUCH, + &val); + if (ret) + return ret; + iqs621_als->thresh_prox = val; + iqs621_als->ir_flags_mask = IQS622_IR_FLAGS_TOUCH; + + indio_dev->channels = iqs622_als_channels; + indio_dev->num_channels = ARRAY_SIZE(iqs622_als_channels); + } else { + ret = regmap_read(iqs62x->regmap, IQS621_ALS_THRESH_LIGHT, + &val); + if (ret) + return ret; + iqs621_als->thresh_light = val; + + ret = regmap_read(iqs62x->regmap, IQS621_ALS_THRESH_DARK, + &val); + if (ret) + return ret; + iqs621_als->thresh_dark = val; + + indio_dev->channels = iqs621_als_channels; + indio_dev->num_channels = ARRAY_SIZE(iqs621_als_channels); + } + + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->dev.parent = &pdev->dev; + indio_dev->name = iqs62x->dev_desc->dev_name; + indio_dev->info = &iqs621_als_info; + + mutex_init(&iqs621_als->lock); + + iqs621_als->notifier.notifier_call = iqs621_als_notifier; + ret = blocking_notifier_chain_register(&iqs621_als->iqs62x->nh, + &iqs621_als->notifier); + if (ret) { + dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(&pdev->dev, + iqs621_als_notifier_unregister, + iqs621_als); + if (ret) + return ret; + + return devm_iio_device_register(&pdev->dev, indio_dev); +} + +static struct platform_driver iqs621_als_platform_driver = { + .driver = { + .name = "iqs621-als", + }, + .probe = iqs621_als_probe, +}; +module_platform_driver(iqs621_als_platform_driver); + +MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>"); +MODULE_DESCRIPTION("Azoteq IQS621/622 Ambient Light Sensors"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:iqs621-als"); diff --git a/drivers/iio/position/Kconfig b/drivers/iio/position/Kconfig new file mode 100644 index 000000000000..eda67f008c5b --- /dev/null +++ b/drivers/iio/position/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Linear and angular position sensors +# +# When adding new entries keep the list in alphabetical order + +menu "Linear and angular position sensors" + +config IQS624_POS + tristate "Azoteq IQS624/625 angular position sensors" + depends on MFD_IQS62X || COMPILE_TEST + help + Say Y here if you want to build support for the Azoteq IQS624 + and IQS625 angular position sensors. + + To compile this driver as a module, choose M here: the module + will be called iqs624-pos. + +endmenu diff --git a/drivers/iio/position/Makefile b/drivers/iio/position/Makefile new file mode 100644 index 000000000000..3cbe7a734352 --- /dev/null +++ b/drivers/iio/position/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for IIO linear and angular position sensors +# + +# When adding new entries keep the list in alphabetical order + +obj-$(CONFIG_IQS624_POS) += iqs624-pos.o diff --git a/drivers/iio/position/iqs624-pos.c b/drivers/iio/position/iqs624-pos.c new file mode 100644 index 000000000000..77096c31c2ba --- /dev/null +++ b/drivers/iio/position/iqs624-pos.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS624/625 Angular Position Sensors + * + * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com> + */ + +#include <linux/device.h> +#include <linux/iio/events.h> +#include <linux/iio/iio.h> +#include <linux/kernel.h> +#include <linux/mfd/iqs62x.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define IQS624_POS_DEG_OUT 0x16 + +#define IQS624_POS_SCALE1 (314159 / 180) +#define IQS624_POS_SCALE2 100000 + +struct iqs624_pos_private { + struct iqs62x_core *iqs62x; + struct notifier_block notifier; + struct mutex lock; + bool angle_en; + u16 angle; +}; + +static int iqs624_pos_angle_en(struct iqs62x_core *iqs62x, bool angle_en) +{ + unsigned int event_mask = IQS624_HALL_UI_WHL_EVENT; + + /* + * The IQS625 reports angular position in the form of coarse intervals, + * so only interval change events are unmasked. Conversely, the IQS624 + * reports angular position down to one degree of resolution, so wheel + * movement events are unmasked instead. + */ + if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM) + event_mask = IQS624_HALL_UI_INT_EVENT; + + return regmap_update_bits(iqs62x->regmap, IQS624_HALL_UI, event_mask, + angle_en ? 0 : 0xFF); +} + +static int iqs624_pos_notifier(struct notifier_block *notifier, + unsigned long event_flags, void *context) +{ + struct iqs62x_event_data *event_data = context; + struct iqs624_pos_private *iqs624_pos; + struct iqs62x_core *iqs62x; + struct iio_dev *indio_dev; + u16 angle = event_data->ui_data; + s64 timestamp; + int ret; + + iqs624_pos = container_of(notifier, struct iqs624_pos_private, + notifier); + indio_dev = iio_priv_to_dev(iqs624_pos); + timestamp = iio_get_time_ns(indio_dev); + + iqs62x = iqs624_pos->iqs62x; + if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM) + angle = event_data->interval; + + mutex_lock(&iqs624_pos->lock); + + if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) { + ret = iqs624_pos_angle_en(iqs62x, iqs624_pos->angle_en); + if (ret) { + dev_err(indio_dev->dev.parent, + "Failed to re-initialize device: %d\n", ret); + ret = NOTIFY_BAD; + } else { + ret = NOTIFY_OK; + } + } else if (iqs624_pos->angle_en && (angle != iqs624_pos->angle)) { + iio_push_event(indio_dev, + IIO_UNMOD_EVENT_CODE(IIO_ANGL, 0, + IIO_EV_TYPE_CHANGE, + IIO_EV_DIR_NONE), + timestamp); + + iqs624_pos->angle = angle; + ret = NOTIFY_OK; + } else { + ret = NOTIFY_DONE; + } + + mutex_unlock(&iqs624_pos->lock); + + return ret; +} + +static void iqs624_pos_notifier_unregister(void *context) +{ + struct iqs624_pos_private *iqs624_pos = context; + struct iio_dev *indio_dev = iio_priv_to_dev(iqs624_pos); + int ret; + + ret = blocking_notifier_chain_unregister(&iqs624_pos->iqs62x->nh, + &iqs624_pos->notifier); + if (ret) + dev_err(indio_dev->dev.parent, + "Failed to unregister notifier: %d\n", ret); +} + +static int iqs624_pos_angle_get(struct iqs62x_core *iqs62x, unsigned int *val) +{ + int ret; + __le16 val_buf; + + if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM) + return regmap_read(iqs62x->regmap, iqs62x->dev_desc->interval, + val); + + ret = regmap_raw_read(iqs62x->regmap, IQS624_POS_DEG_OUT, &val_buf, + sizeof(val_buf)); + if (ret) + return ret; + + *val = le16_to_cpu(val_buf); + + return 0; +} + +static int iqs624_pos_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev); + struct iqs62x_core *iqs62x = iqs624_pos->iqs62x; + unsigned int scale = 1; + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = iqs624_pos_angle_get(iqs62x, val); + if (ret) + return ret; + + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + if (iqs62x->dev_desc->prod_num == IQS625_PROD_NUM) { + ret = regmap_read(iqs62x->regmap, IQS624_INTERVAL_DIV, + &scale); + if (ret) + return ret; + } + + *val = scale * IQS624_POS_SCALE1; + *val2 = IQS624_POS_SCALE2; + return IIO_VAL_FRACTIONAL; + + default: + return -EINVAL; + } +} + +static int iqs624_pos_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev); + int ret; + + mutex_lock(&iqs624_pos->lock); + ret = iqs624_pos->angle_en; + mutex_unlock(&iqs624_pos->lock); + + return ret; +} + +static int iqs624_pos_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + int state) +{ + struct iqs624_pos_private *iqs624_pos = iio_priv(indio_dev); + struct iqs62x_core *iqs62x = iqs624_pos->iqs62x; + unsigned int val; + int ret; + + mutex_lock(&iqs624_pos->lock); + + ret = iqs624_pos_angle_get(iqs62x, &val); + if (ret) + goto err_mutex; + + ret = iqs624_pos_angle_en(iqs62x, state); + if (ret) + goto err_mutex; + + iqs624_pos->angle = val; + iqs624_pos->angle_en = state; + +err_mutex: + mutex_unlock(&iqs624_pos->lock); + + return ret; +} + +static const struct iio_info iqs624_pos_info = { + .read_raw = &iqs624_pos_read_raw, + .read_event_config = iqs624_pos_read_event_config, + .write_event_config = iqs624_pos_write_event_config, +}; + +static const struct iio_event_spec iqs624_pos_events[] = { + { + .type = IIO_EV_TYPE_CHANGE, + .dir = IIO_EV_DIR_NONE, + .mask_separate = BIT(IIO_EV_INFO_ENABLE), + }, +}; + +static const struct iio_chan_spec iqs624_pos_channels[] = { + { + .type = IIO_ANGL, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .event_spec = iqs624_pos_events, + .num_event_specs = ARRAY_SIZE(iqs624_pos_events), + }, +}; + +static int iqs624_pos_probe(struct platform_device *pdev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent); + struct iqs624_pos_private *iqs624_pos; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*iqs624_pos)); + if (!indio_dev) + return -ENOMEM; + + iqs624_pos = iio_priv(indio_dev); + iqs624_pos->iqs62x = iqs62x; + + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->dev.parent = &pdev->dev; + indio_dev->channels = iqs624_pos_channels; + indio_dev->num_channels = ARRAY_SIZE(iqs624_pos_channels); + indio_dev->name = iqs62x->dev_desc->dev_name; + indio_dev->info = &iqs624_pos_info; + + mutex_init(&iqs624_pos->lock); + + iqs624_pos->notifier.notifier_call = iqs624_pos_notifier; + ret = blocking_notifier_chain_register(&iqs624_pos->iqs62x->nh, + &iqs624_pos->notifier); + if (ret) { + dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret); + return ret; + } + + ret = devm_add_action_or_reset(&pdev->dev, + iqs624_pos_notifier_unregister, + iqs624_pos); + if (ret) + return ret; + + return devm_iio_device_register(&pdev->dev, indio_dev); +} + +static struct platform_driver iqs624_pos_platform_driver = { + .driver = { + .name = "iqs624-pos", + }, + .probe = iqs624_pos_probe, +}; +module_platform_driver(iqs624_pos_platform_driver); + +MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>"); +MODULE_DESCRIPTION("Azoteq IQS624/625 Angular Position Sensors"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:iqs624-pos"); diff --git a/drivers/iio/pressure/cros_ec_baro.c b/drivers/iio/pressure/cros_ec_baro.c index b521bebd551c..c079b8960082 100644 --- a/drivers/iio/pressure/cros_ec_baro.c +++ b/drivers/iio/pressure/cros_ec_baro.c @@ -134,10 +134,14 @@ static int cros_ec_baro_probe(struct platform_device *pdev) if (!indio_dev) return -ENOMEM; - ret = cros_ec_sensors_core_init(pdev, indio_dev, true); + ret = cros_ec_sensors_core_init(pdev, indio_dev, true, + cros_ec_sensors_capture, + cros_ec_sensors_push_data); if (ret) return ret; + iio_buffer_set_attrs(indio_dev->buffer, cros_ec_sensor_fifo_attributes); + indio_dev->info = &cros_ec_baro_info; state = iio_priv(indio_dev); state->core.type = state->core.resp->info.type; @@ -147,8 +151,7 @@ static int cros_ec_baro_probe(struct platform_device *pdev) channel->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); channel->info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | - BIT(IIO_CHAN_INFO_SAMP_FREQ) | - BIT(IIO_CHAN_INFO_FREQUENCY); + BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ); channel->scan_type.realbits = CROS_EC_SENSOR_BITS; @@ -182,11 +185,6 @@ static int cros_ec_baro_probe(struct platform_device *pdev) state->core.read_ec_sensors_data = cros_ec_sensors_read_cmd; - ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, - cros_ec_sensors_capture, NULL); - if (ret) - return ret; - return devm_iio_device_register(dev, indio_dev); } diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig index e1ccb4003015..f1f2a1499c9e 100644 --- a/drivers/iio/temperature/Kconfig +++ b/drivers/iio/temperature/Kconfig @@ -4,6 +4,16 @@ # menu "Temperature sensors" +config IQS620AT_TEMP + tristate "Azoteq IQS620AT temperature sensor" + depends on MFD_IQS62X || COMPILE_TEST + help + Say Y here if you want to build support for the Azoteq IQS620AT + temperature sensor. + + To compile this driver as a module, choose M here: the module + will be called iqs620at-temp. + config LTC2983 tristate "Analog Devices Multi-Sensor Digital Temperature Measurement System" depends on SPI diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile index d6b850b0cf63..90c113115422 100644 --- a/drivers/iio/temperature/Makefile +++ b/drivers/iio/temperature/Makefile @@ -3,6 +3,7 @@ # Makefile for industrial I/O temperature drivers # +obj-$(CONFIG_IQS620AT_TEMP) += iqs620at-temp.o obj-$(CONFIG_LTC2983) += ltc2983.o obj-$(CONFIG_HID_SENSOR_TEMP) += hid-sensor-temperature.o obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o diff --git a/drivers/iio/temperature/iqs620at-temp.c b/drivers/iio/temperature/iqs620at-temp.c new file mode 100644 index 000000000000..3fd52b3eb030 --- /dev/null +++ b/drivers/iio/temperature/iqs620at-temp.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS620AT Temperature Sensor + * + * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com> + */ + +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/kernel.h> +#include <linux/mfd/iqs62x.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#define IQS620_TEMP_UI_OUT 0x1A + +#define IQS620_TEMP_SCALE 1000 +#define IQS620_TEMP_OFFSET (-100) + +static int iqs620_temp_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct iqs62x_core *iqs62x = iio_device_get_drvdata(indio_dev); + int ret; + __le16 val_buf; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = regmap_raw_read(iqs62x->regmap, IQS620_TEMP_UI_OUT, + &val_buf, sizeof(val_buf)); + if (ret) + return ret; + + *val = le16_to_cpu(val_buf); + return IIO_VAL_INT; + + case IIO_CHAN_INFO_SCALE: + *val = IQS620_TEMP_SCALE; + return IIO_VAL_INT; + + case IIO_CHAN_INFO_OFFSET: + *val = IQS620_TEMP_OFFSET; + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static const struct iio_info iqs620_temp_info = { + .read_raw = &iqs620_temp_read_raw, +}; + +static const struct iio_chan_spec iqs620_temp_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + }, +}; + +static int iqs620_temp_probe(struct platform_device *pdev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent); + struct iio_dev *indio_dev; + + indio_dev = devm_iio_device_alloc(&pdev->dev, 0); + if (!indio_dev) + return -ENOMEM; + + iio_device_set_drvdata(indio_dev, iqs62x); + + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->dev.parent = &pdev->dev; + indio_dev->channels = iqs620_temp_channels; + indio_dev->num_channels = ARRAY_SIZE(iqs620_temp_channels); + indio_dev->name = iqs62x->dev_desc->dev_name; + indio_dev->info = &iqs620_temp_info; + + return devm_iio_device_register(&pdev->dev, indio_dev); +} + +static struct platform_driver iqs620_temp_platform_driver = { + .driver = { + .name = "iqs620at-temp", + }, + .probe = iqs620_temp_probe, +}; +module_platform_driver(iqs620_temp_platform_driver); + +MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>"); +MODULE_DESCRIPTION("Azoteq IQS620AT Temperature Sensor"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:iqs620at-temp"); diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4706ff09f0e8..28de965a08d5 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig @@ -663,6 +663,16 @@ config KEYBOARD_IPAQ_MICRO To compile this driver as a module, choose M here: the module will be called ipaq-micro-keys. +config KEYBOARD_IQS62X + tristate "Azoteq IQS620A/621/622/624/625 keys and switches" + depends on MFD_IQS62X + help + Say Y here to enable key and switch support for the Azoteq IQS620A, + IQS621, IQS622, IQS624 and IQS625 multi-function sensors. + + To compile this driver as a module, choose M here: the module will + be called iqs62x-keys. + config KEYBOARD_OMAP tristate "TI OMAP keypad support" depends on ARCH_OMAP1 diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index f5b17524adf2..1d689fdd5c00 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o obj-$(CONFIG_KEYBOARD_IPAQ_MICRO) += ipaq-micro-keys.o +obj-$(CONFIG_KEYBOARD_IQS62X) += iqs62x-keys.o obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o obj-$(CONFIG_KEYBOARD_IMX_SC_KEY) += imx_sc_key.o obj-$(CONFIG_KEYBOARD_HP6XX) += jornada680_kbd.o diff --git a/drivers/input/keyboard/iqs62x-keys.c b/drivers/input/keyboard/iqs62x-keys.c new file mode 100644 index 000000000000..93446b21f98f --- /dev/null +++ b/drivers/input/keyboard/iqs62x-keys.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS620A/621/622/624/625 Keys and Switches + * + * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com> + */ + +#include <linux/device.h> +#include <linux/input.h> +#include <linux/kernel.h> +#include <linux/mfd/iqs62x.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +enum { + IQS62X_SW_HALL_N, + IQS62X_SW_HALL_S, +}; + +static const char * const iqs62x_switch_names[] = { + [IQS62X_SW_HALL_N] = "hall-switch-north", + [IQS62X_SW_HALL_S] = "hall-switch-south", +}; + +struct iqs62x_switch_desc { + enum iqs62x_event_flag flag; + unsigned int code; + bool enabled; +}; + +struct iqs62x_keys_private { + struct iqs62x_core *iqs62x; + struct input_dev *input; + struct notifier_block notifier; + struct iqs62x_switch_desc switches[ARRAY_SIZE(iqs62x_switch_names)]; + unsigned int keycode[IQS62X_NUM_KEYS]; + unsigned int keycodemax; + u8 interval; +}; + +static int iqs62x_keys_parse_prop(struct platform_device *pdev, + struct iqs62x_keys_private *iqs62x_keys) +{ + struct fwnode_handle *child; + unsigned int val; + int ret, i; + + ret = device_property_count_u32(&pdev->dev, "linux,keycodes"); + if (ret > IQS62X_NUM_KEYS) { + dev_err(&pdev->dev, "Too many keycodes present\n"); + return -EINVAL; + } else if (ret < 0) { + dev_err(&pdev->dev, "Failed to count keycodes: %d\n", ret); + return ret; + } + iqs62x_keys->keycodemax = ret; + + ret = device_property_read_u32_array(&pdev->dev, "linux,keycodes", + iqs62x_keys->keycode, + iqs62x_keys->keycodemax); + if (ret) { + dev_err(&pdev->dev, "Failed to read keycodes: %d\n", ret); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) { + child = device_get_named_child_node(&pdev->dev, + iqs62x_switch_names[i]); + if (!child) + continue; + + ret = fwnode_property_read_u32(child, "linux,code", &val); + if (ret) { + dev_err(&pdev->dev, "Failed to read switch code: %d\n", + ret); + return ret; + } + iqs62x_keys->switches[i].code = val; + iqs62x_keys->switches[i].enabled = true; + + if (fwnode_property_present(child, "azoteq,use-prox")) + iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ? + IQS62X_EVENT_HALL_N_P : + IQS62X_EVENT_HALL_S_P); + else + iqs62x_keys->switches[i].flag = (i == IQS62X_SW_HALL_N ? + IQS62X_EVENT_HALL_N_T : + IQS62X_EVENT_HALL_S_T); + } + + return 0; +} + +static int iqs62x_keys_init(struct iqs62x_keys_private *iqs62x_keys) +{ + struct iqs62x_core *iqs62x = iqs62x_keys->iqs62x; + enum iqs62x_event_flag flag; + unsigned int event_reg, val; + unsigned int event_mask = 0; + int ret, i; + + switch (iqs62x->dev_desc->prod_num) { + case IQS620_PROD_NUM: + case IQS621_PROD_NUM: + case IQS622_PROD_NUM: + event_reg = IQS620_GLBL_EVENT_MASK; + + /* + * Discreet button, hysteresis and SAR UI flags represent keys + * and are unmasked if mapped to a valid keycode. + */ + for (i = 0; i < iqs62x_keys->keycodemax; i++) { + if (iqs62x_keys->keycode[i] == KEY_RESERVED) + continue; + + if (iqs62x_events[i].reg == IQS62X_EVENT_PROX) + event_mask |= iqs62x->dev_desc->prox_mask; + else if (iqs62x_events[i].reg == IQS62X_EVENT_HYST) + event_mask |= (iqs62x->dev_desc->hyst_mask | + iqs62x->dev_desc->sar_mask); + } + + ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->hall_flags, + &val); + if (ret) + return ret; + + /* + * Hall UI flags represent switches and are unmasked if their + * corresponding child nodes are present. + */ + for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) { + if (!(iqs62x_keys->switches[i].enabled)) + continue; + + flag = iqs62x_keys->switches[i].flag; + + if (iqs62x_events[flag].reg != IQS62X_EVENT_HALL) + continue; + + event_mask |= iqs62x->dev_desc->hall_mask; + + input_report_switch(iqs62x_keys->input, + iqs62x_keys->switches[i].code, + (val & iqs62x_events[flag].mask) == + iqs62x_events[flag].val); + } + + input_sync(iqs62x_keys->input); + break; + + case IQS624_PROD_NUM: + event_reg = IQS624_HALL_UI; + + /* + * Interval change events represent keys and are unmasked if + * either wheel movement flag is mapped to a valid keycode. + */ + if (iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_UP] != KEY_RESERVED) + event_mask |= IQS624_HALL_UI_INT_EVENT; + + if (iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_DN] != KEY_RESERVED) + event_mask |= IQS624_HALL_UI_INT_EVENT; + + ret = regmap_read(iqs62x->regmap, iqs62x->dev_desc->interval, + &val); + if (ret) + return ret; + + iqs62x_keys->interval = val; + break; + + default: + return 0; + } + + return regmap_update_bits(iqs62x->regmap, event_reg, event_mask, 0); +} + +static int iqs62x_keys_notifier(struct notifier_block *notifier, + unsigned long event_flags, void *context) +{ + struct iqs62x_event_data *event_data = context; + struct iqs62x_keys_private *iqs62x_keys; + int ret, i; + + iqs62x_keys = container_of(notifier, struct iqs62x_keys_private, + notifier); + + if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) { + ret = iqs62x_keys_init(iqs62x_keys); + if (ret) { + dev_err(iqs62x_keys->input->dev.parent, + "Failed to re-initialize device: %d\n", ret); + return NOTIFY_BAD; + } + + return NOTIFY_OK; + } + + for (i = 0; i < iqs62x_keys->keycodemax; i++) { + if (iqs62x_events[i].reg == IQS62X_EVENT_WHEEL && + event_data->interval == iqs62x_keys->interval) + continue; + + input_report_key(iqs62x_keys->input, iqs62x_keys->keycode[i], + event_flags & BIT(i)); + } + + for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) + if (iqs62x_keys->switches[i].enabled) + input_report_switch(iqs62x_keys->input, + iqs62x_keys->switches[i].code, + event_flags & + BIT(iqs62x_keys->switches[i].flag)); + + input_sync(iqs62x_keys->input); + + if (event_data->interval == iqs62x_keys->interval) + return NOTIFY_OK; + + /* + * Each frame contains at most one wheel event (up or down), in which + * case a complementary release cycle is emulated. + */ + if (event_flags & BIT(IQS62X_EVENT_WHEEL_UP)) { + input_report_key(iqs62x_keys->input, + iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_UP], + 0); + input_sync(iqs62x_keys->input); + } else if (event_flags & BIT(IQS62X_EVENT_WHEEL_DN)) { + input_report_key(iqs62x_keys->input, + iqs62x_keys->keycode[IQS62X_EVENT_WHEEL_DN], + 0); + input_sync(iqs62x_keys->input); + } + + iqs62x_keys->interval = event_data->interval; + + return NOTIFY_OK; +} + +static int iqs62x_keys_probe(struct platform_device *pdev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(pdev->dev.parent); + struct iqs62x_keys_private *iqs62x_keys; + struct input_dev *input; + int ret, i; + + iqs62x_keys = devm_kzalloc(&pdev->dev, sizeof(*iqs62x_keys), + GFP_KERNEL); + if (!iqs62x_keys) + return -ENOMEM; + + platform_set_drvdata(pdev, iqs62x_keys); + + ret = iqs62x_keys_parse_prop(pdev, iqs62x_keys); + if (ret) + return ret; + + input = devm_input_allocate_device(&pdev->dev); + if (!input) + return -ENOMEM; + + input->keycodemax = iqs62x_keys->keycodemax; + input->keycode = iqs62x_keys->keycode; + input->keycodesize = sizeof(*iqs62x_keys->keycode); + + input->name = iqs62x->dev_desc->dev_name; + input->id.bustype = BUS_I2C; + + for (i = 0; i < iqs62x_keys->keycodemax; i++) + if (iqs62x_keys->keycode[i] != KEY_RESERVED) + input_set_capability(input, EV_KEY, + iqs62x_keys->keycode[i]); + + for (i = 0; i < ARRAY_SIZE(iqs62x_keys->switches); i++) + if (iqs62x_keys->switches[i].enabled) + input_set_capability(input, EV_SW, + iqs62x_keys->switches[i].code); + + iqs62x_keys->iqs62x = iqs62x; + iqs62x_keys->input = input; + + ret = iqs62x_keys_init(iqs62x_keys); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize device: %d\n", ret); + return ret; + } + + ret = input_register_device(iqs62x_keys->input); + if (ret) { + dev_err(&pdev->dev, "Failed to register device: %d\n", ret); + return ret; + } + + iqs62x_keys->notifier.notifier_call = iqs62x_keys_notifier; + ret = blocking_notifier_chain_register(&iqs62x_keys->iqs62x->nh, + &iqs62x_keys->notifier); + if (ret) + dev_err(&pdev->dev, "Failed to register notifier: %d\n", ret); + + return ret; +} + +static int iqs62x_keys_remove(struct platform_device *pdev) +{ + struct iqs62x_keys_private *iqs62x_keys = platform_get_drvdata(pdev); + int ret; + + ret = blocking_notifier_chain_unregister(&iqs62x_keys->iqs62x->nh, + &iqs62x_keys->notifier); + if (ret) + dev_err(&pdev->dev, "Failed to unregister notifier: %d\n", ret); + + return ret; +} + +static struct platform_driver iqs62x_keys_platform_driver = { + .driver = { + .name = "iqs62x-keys", + }, + .probe = iqs62x_keys_probe, + .remove = iqs62x_keys_remove, +}; +module_platform_driver(iqs62x_keys_platform_driver); + +MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>"); +MODULE_DESCRIPTION("Azoteq IQS620A/621/622/624/625 Keys and Switches"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:iqs62x-keys"); diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index dc974c288e88..08e919dbeb5d 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -530,6 +530,17 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"), }, }, + { + /* + * Acer Aspire 5738z + * Touchpad stops working in mux mode when dis- + re-enabled + * with the touchpad enable/disable toggle hotkey + */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5738"), + }, + }, { } }; diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c index 491179967b29..14c577c16b16 100644 --- a/drivers/input/touchscreen/elants_i2c.c +++ b/drivers/input/touchscreen/elants_i2c.c @@ -1309,6 +1309,7 @@ static int elants_i2c_probe(struct i2c_client *client, input_set_abs_params(ts->input, ABS_MT_PRESSURE, 0, 255, 0, 0); input_abs_set_res(ts->input, ABS_MT_POSITION_X, ts->x_res); input_abs_set_res(ts->input, ABS_MT_POSITION_Y, ts->y_res); + input_abs_set_res(ts->input, ABS_MT_TOUCH_MAJOR, 1); error = input_register_device(ts->input); if (error) { diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 0403102e807e..02c75ea385e0 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -29,33 +29,6 @@ #include <linux/of.h> #include <asm/unaligned.h> -struct goodix_ts_data; - -struct goodix_chip_data { - u16 config_addr; - int config_len; - int (*check_config)(struct goodix_ts_data *, const struct firmware *); -}; - -struct goodix_ts_data { - struct i2c_client *client; - struct input_dev *input_dev; - const struct goodix_chip_data *chip; - struct touchscreen_properties prop; - unsigned int max_touch_num; - unsigned int int_trigger_type; - struct regulator *avdd28; - struct regulator *vddio; - struct gpio_desc *gpiod_int; - struct gpio_desc *gpiod_rst; - u16 id; - u16 version; - const char *cfg_name; - struct completion firmware_loading_complete; - unsigned long irq_flags; - unsigned int contact_size; -}; - #define GOODIX_GPIO_INT_NAME "irq" #define GOODIX_GPIO_RST_NAME "reset" @@ -65,10 +38,13 @@ struct goodix_ts_data { #define GOODIX_CONTACT_SIZE 8 #define GOODIX_MAX_CONTACT_SIZE 9 #define GOODIX_MAX_CONTACTS 10 +#define GOODIX_MAX_KEYS 7 -#define GOODIX_CONFIG_MAX_LENGTH 240 +#define GOODIX_CONFIG_MIN_LENGTH 186 #define GOODIX_CONFIG_911_LENGTH 186 #define GOODIX_CONFIG_967_LENGTH 228 +#define GOODIX_CONFIG_GT9X_LENGTH 240 +#define GOODIX_CONFIG_MAX_LENGTH 240 /* Register defines */ #define GOODIX_REG_COMMAND 0x8040 @@ -80,39 +56,118 @@ struct goodix_ts_data { #define GOODIX_REG_ID 0x8140 #define GOODIX_BUFFER_STATUS_READY BIT(7) +#define GOODIX_HAVE_KEY BIT(4) #define GOODIX_BUFFER_STATUS_TIMEOUT 20 #define RESOLUTION_LOC 1 #define MAX_CONTACTS_LOC 5 #define TRIGGER_LOC 6 +/* Our special handling for GPIO accesses through ACPI is x86 specific */ +#if defined CONFIG_X86 && defined CONFIG_ACPI +#define ACPI_GPIO_SUPPORT +#endif + +struct goodix_ts_data; + +enum goodix_irq_pin_access_method { + IRQ_PIN_ACCESS_NONE, + IRQ_PIN_ACCESS_GPIO, + IRQ_PIN_ACCESS_ACPI_GPIO, + IRQ_PIN_ACCESS_ACPI_METHOD, +}; + +struct goodix_chip_data { + u16 config_addr; + int config_len; + int (*check_config)(struct goodix_ts_data *ts, const u8 *cfg, int len); + void (*calc_config_checksum)(struct goodix_ts_data *ts); +}; + +struct goodix_chip_id { + const char *id; + const struct goodix_chip_data *data; +}; + +#define GOODIX_ID_MAX_LEN 4 + +struct goodix_ts_data { + struct i2c_client *client; + struct input_dev *input_dev; + const struct goodix_chip_data *chip; + struct touchscreen_properties prop; + unsigned int max_touch_num; + unsigned int int_trigger_type; + struct regulator *avdd28; + struct regulator *vddio; + struct gpio_desc *gpiod_int; + struct gpio_desc *gpiod_rst; + int gpio_count; + int gpio_int_idx; + char id[GOODIX_ID_MAX_LEN + 1]; + u16 version; + const char *cfg_name; + bool reset_controller_at_probe; + bool load_cfg_from_disk; + struct completion firmware_loading_complete; + unsigned long irq_flags; + enum goodix_irq_pin_access_method irq_pin_access_method; + unsigned int contact_size; + u8 config[GOODIX_CONFIG_MAX_LENGTH]; + unsigned short keymap[GOODIX_MAX_KEYS]; +}; + static int goodix_check_cfg_8(struct goodix_ts_data *ts, - const struct firmware *cfg); + const u8 *cfg, int len); static int goodix_check_cfg_16(struct goodix_ts_data *ts, - const struct firmware *cfg); + const u8 *cfg, int len); +static void goodix_calc_cfg_checksum_8(struct goodix_ts_data *ts); +static void goodix_calc_cfg_checksum_16(struct goodix_ts_data *ts); static const struct goodix_chip_data gt1x_chip_data = { .config_addr = GOODIX_GT1X_REG_CONFIG_DATA, - .config_len = GOODIX_CONFIG_MAX_LENGTH, + .config_len = GOODIX_CONFIG_GT9X_LENGTH, .check_config = goodix_check_cfg_16, + .calc_config_checksum = goodix_calc_cfg_checksum_16, }; static const struct goodix_chip_data gt911_chip_data = { .config_addr = GOODIX_GT9X_REG_CONFIG_DATA, .config_len = GOODIX_CONFIG_911_LENGTH, .check_config = goodix_check_cfg_8, + .calc_config_checksum = goodix_calc_cfg_checksum_8, }; static const struct goodix_chip_data gt967_chip_data = { .config_addr = GOODIX_GT9X_REG_CONFIG_DATA, .config_len = GOODIX_CONFIG_967_LENGTH, .check_config = goodix_check_cfg_8, + .calc_config_checksum = goodix_calc_cfg_checksum_8, }; static const struct goodix_chip_data gt9x_chip_data = { .config_addr = GOODIX_GT9X_REG_CONFIG_DATA, - .config_len = GOODIX_CONFIG_MAX_LENGTH, + .config_len = GOODIX_CONFIG_GT9X_LENGTH, .check_config = goodix_check_cfg_8, + .calc_config_checksum = goodix_calc_cfg_checksum_8, +}; + +static const struct goodix_chip_id goodix_chip_ids[] = { + { .id = "1151", .data = >1x_chip_data }, + { .id = "5663", .data = >1x_chip_data }, + { .id = "5688", .data = >1x_chip_data }, + { .id = "917S", .data = >1x_chip_data }, + + { .id = "911", .data = >911_chip_data }, + { .id = "9271", .data = >911_chip_data }, + { .id = "9110", .data = >911_chip_data }, + { .id = "927", .data = >911_chip_data }, + { .id = "928", .data = >911_chip_data }, + + { .id = "912", .data = >967_chip_data }, + { .id = "9147", .data = >967_chip_data }, + { .id = "967", .data = >967_chip_data }, + { } }; static const unsigned long goodix_irq_flags[] = { @@ -168,6 +223,22 @@ static const struct dmi_system_id nine_bytes_report[] = { {} }; +/* + * Those tablets have their x coordinate inverted + */ +static const struct dmi_system_id inverted_x_screen[] = { +#if defined(CONFIG_DMI) && defined(CONFIG_X86) + { + .ident = "Cube I15-TC", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Cube"), + DMI_MATCH(DMI_PRODUCT_NAME, "I15-TC") + }, + }, +#endif + {} +}; + /** * goodix_i2c_read - read data from a register of the i2c slave device. * @@ -235,28 +306,16 @@ static int goodix_i2c_write_u8(struct i2c_client *client, u16 reg, u8 value) return goodix_i2c_write(client, reg, &value, sizeof(value)); } -static const struct goodix_chip_data *goodix_get_chip_data(u16 id) +static const struct goodix_chip_data *goodix_get_chip_data(const char *id) { - switch (id) { - case 1151: - case 5663: - case 5688: - return >1x_chip_data; - - case 911: - case 9271: - case 9110: - case 927: - case 928: - return >911_chip_data; - - case 912: - case 967: - return >967_chip_data; + unsigned int i; - default: - return >9x_chip_data; + for (i = 0; goodix_chip_ids[i].id; i++) { + if (!strcmp(goodix_chip_ids[i].id, id)) + return goodix_chip_ids[i].data; } + + return >9x_chip_data; } static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) @@ -264,6 +323,13 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) unsigned long max_timeout; int touch_num; int error; + u16 addr = GOODIX_READ_COOR_ADDR; + /* + * We are going to read 1-byte header, + * ts->contact_size * max(1, touch_num) bytes of coordinates + * and 1-byte footer which contains the touch-key code. + */ + const int header_contact_keycode_size = 1 + ts->contact_size + 1; /* * The 'buffer status' bit, which indicates that the data is valid, is @@ -272,8 +338,8 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) */ max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT); do { - error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, - data, ts->contact_size + 1); + error = goodix_i2c_read(ts->client, addr, data, + header_contact_keycode_size); if (error) { dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); @@ -286,11 +352,10 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) return -EPROTO; if (touch_num > 1) { - data += 1 + ts->contact_size; + addr += header_contact_keycode_size; + data += header_contact_keycode_size; error = goodix_i2c_read(ts->client, - GOODIX_READ_COOR_ADDR + - 1 + ts->contact_size, - data, + addr, data, ts->contact_size * (touch_num - 1)); if (error) @@ -307,7 +372,7 @@ static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) * The Goodix panel will send spurious interrupts after a * 'finger up' event, which will always cause a timeout. */ - return 0; + return -ENOMSG; } static void goodix_ts_report_touch_8b(struct goodix_ts_data *ts, u8 *coor_data) @@ -340,6 +405,25 @@ static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data) input_report_abs(ts->input_dev, ABS_MT_WIDTH_MAJOR, input_w); } +static void goodix_ts_report_key(struct goodix_ts_data *ts, u8 *data) +{ + int touch_num; + u8 key_value; + int i; + + if (data[0] & GOODIX_HAVE_KEY) { + touch_num = data[0] & 0x0f; + key_value = data[1 + ts->contact_size * touch_num]; + for (i = 0; i < GOODIX_MAX_KEYS; i++) + if (key_value & BIT(i)) + input_report_key(ts->input_dev, + ts->keymap[i], 1); + } else { + for (i = 0; i < GOODIX_MAX_KEYS; i++) + input_report_key(ts->input_dev, ts->keymap[i], 0); + } +} + /** * goodix_process_events - Process incoming events * @@ -350,7 +434,7 @@ static void goodix_ts_report_touch_9b(struct goodix_ts_data *ts, u8 *coor_data) */ static void goodix_process_events(struct goodix_ts_data *ts) { - u8 point_data[1 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS]; + u8 point_data[2 + GOODIX_MAX_CONTACT_SIZE * GOODIX_MAX_CONTACTS]; int touch_num; int i; @@ -358,11 +442,7 @@ static void goodix_process_events(struct goodix_ts_data *ts) if (touch_num < 0) return; - /* - * Bit 4 of the first byte reports the status of the capacitive - * Windows/Home button. - */ - input_report_key(ts->input_dev, KEY_LEFTMETA, point_data[0] & BIT(4)); + goodix_ts_report_key(ts, point_data); for (i = 0; i < touch_num; i++) if (ts->contact_size == 9) @@ -406,22 +486,21 @@ static int goodix_request_irq(struct goodix_ts_data *ts) ts->irq_flags, ts->client->name, ts); } -static int goodix_check_cfg_8(struct goodix_ts_data *ts, - const struct firmware *cfg) +static int goodix_check_cfg_8(struct goodix_ts_data *ts, const u8 *cfg, int len) { - int i, raw_cfg_len = cfg->size - 2; + int i, raw_cfg_len = len - 2; u8 check_sum = 0; for (i = 0; i < raw_cfg_len; i++) - check_sum += cfg->data[i]; + check_sum += cfg[i]; check_sum = (~check_sum) + 1; - if (check_sum != cfg->data[raw_cfg_len]) { + if (check_sum != cfg[raw_cfg_len]) { dev_err(&ts->client->dev, "The checksum of the config fw is not correct"); return -EINVAL; } - if (cfg->data[raw_cfg_len + 1] != 1) { + if (cfg[raw_cfg_len + 1] != 1) { dev_err(&ts->client->dev, "Config fw must have Config_Fresh register set"); return -EINVAL; @@ -430,22 +509,35 @@ static int goodix_check_cfg_8(struct goodix_ts_data *ts, return 0; } -static int goodix_check_cfg_16(struct goodix_ts_data *ts, - const struct firmware *cfg) +static void goodix_calc_cfg_checksum_8(struct goodix_ts_data *ts) { - int i, raw_cfg_len = cfg->size - 3; + int i, raw_cfg_len = ts->chip->config_len - 2; + u8 check_sum = 0; + + for (i = 0; i < raw_cfg_len; i++) + check_sum += ts->config[i]; + check_sum = (~check_sum) + 1; + + ts->config[raw_cfg_len] = check_sum; + ts->config[raw_cfg_len + 1] = 1; /* Set "config_fresh" bit */ +} + +static int goodix_check_cfg_16(struct goodix_ts_data *ts, const u8 *cfg, + int len) +{ + int i, raw_cfg_len = len - 3; u16 check_sum = 0; for (i = 0; i < raw_cfg_len; i += 2) - check_sum += get_unaligned_be16(&cfg->data[i]); + check_sum += get_unaligned_be16(&cfg[i]); check_sum = (~check_sum) + 1; - if (check_sum != get_unaligned_be16(&cfg->data[raw_cfg_len])) { + if (check_sum != get_unaligned_be16(&cfg[raw_cfg_len])) { dev_err(&ts->client->dev, "The checksum of the config fw is not correct"); return -EINVAL; } - if (cfg->data[raw_cfg_len + 2] != 1) { + if (cfg[raw_cfg_len + 2] != 1) { dev_err(&ts->client->dev, "Config fw must have Config_Fresh register set"); return -EINVAL; @@ -454,22 +546,35 @@ static int goodix_check_cfg_16(struct goodix_ts_data *ts, return 0; } +static void goodix_calc_cfg_checksum_16(struct goodix_ts_data *ts) +{ + int i, raw_cfg_len = ts->chip->config_len - 3; + u16 check_sum = 0; + + for (i = 0; i < raw_cfg_len; i += 2) + check_sum += get_unaligned_be16(&ts->config[i]); + check_sum = (~check_sum) + 1; + + put_unaligned_be16(check_sum, &ts->config[raw_cfg_len]); + ts->config[raw_cfg_len + 2] = 1; /* Set "config_fresh" bit */ +} + /** * goodix_check_cfg - Checks if config fw is valid * * @ts: goodix_ts_data pointer * @cfg: firmware config data */ -static int goodix_check_cfg(struct goodix_ts_data *ts, - const struct firmware *cfg) +static int goodix_check_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len) { - if (cfg->size > GOODIX_CONFIG_MAX_LENGTH) { + if (len < GOODIX_CONFIG_MIN_LENGTH || + len > GOODIX_CONFIG_MAX_LENGTH) { dev_err(&ts->client->dev, "The length of the config fw is not correct"); return -EINVAL; } - return ts->chip->check_config(ts, cfg); + return ts->chip->check_config(ts, cfg, len); } /** @@ -478,17 +583,15 @@ static int goodix_check_cfg(struct goodix_ts_data *ts, * @ts: goodix_ts_data pointer * @cfg: config firmware to write to device */ -static int goodix_send_cfg(struct goodix_ts_data *ts, - const struct firmware *cfg) +static int goodix_send_cfg(struct goodix_ts_data *ts, const u8 *cfg, int len) { int error; - error = goodix_check_cfg(ts, cfg); + error = goodix_check_cfg(ts, cfg, len); if (error) return error; - error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg->data, - cfg->size); + error = goodix_i2c_write(ts->client, ts->chip->config_addr, cfg, len); if (error) { dev_err(&ts->client->dev, "Failed to write config data: %d", error); @@ -502,17 +605,93 @@ static int goodix_send_cfg(struct goodix_ts_data *ts, return 0; } +#ifdef ACPI_GPIO_SUPPORT +static int goodix_pin_acpi_direction_input(struct goodix_ts_data *ts) +{ + acpi_handle handle = ACPI_HANDLE(&ts->client->dev); + acpi_status status; + + status = acpi_evaluate_object(handle, "INTI", NULL, NULL); + return ACPI_SUCCESS(status) ? 0 : -EIO; +} + +static int goodix_pin_acpi_output_method(struct goodix_ts_data *ts, int value) +{ + acpi_handle handle = ACPI_HANDLE(&ts->client->dev); + acpi_status status; + + status = acpi_execute_simple_method(handle, "INTO", value); + return ACPI_SUCCESS(status) ? 0 : -EIO; +} +#else +static int goodix_pin_acpi_direction_input(struct goodix_ts_data *ts) +{ + dev_err(&ts->client->dev, + "%s called on device without ACPI support\n", __func__); + return -EINVAL; +} + +static int goodix_pin_acpi_output_method(struct goodix_ts_data *ts, int value) +{ + dev_err(&ts->client->dev, + "%s called on device without ACPI support\n", __func__); + return -EINVAL; +} +#endif + +static int goodix_irq_direction_output(struct goodix_ts_data *ts, int value) +{ + switch (ts->irq_pin_access_method) { + case IRQ_PIN_ACCESS_NONE: + dev_err(&ts->client->dev, + "%s called without an irq_pin_access_method set\n", + __func__); + return -EINVAL; + case IRQ_PIN_ACCESS_GPIO: + return gpiod_direction_output(ts->gpiod_int, value); + case IRQ_PIN_ACCESS_ACPI_GPIO: + /* + * The IRQ pin triggers on a falling edge, so its gets marked + * as active-low, use output_raw to avoid the value inversion. + */ + return gpiod_direction_output_raw(ts->gpiod_int, value); + case IRQ_PIN_ACCESS_ACPI_METHOD: + return goodix_pin_acpi_output_method(ts, value); + } + + return -EINVAL; /* Never reached */ +} + +static int goodix_irq_direction_input(struct goodix_ts_data *ts) +{ + switch (ts->irq_pin_access_method) { + case IRQ_PIN_ACCESS_NONE: + dev_err(&ts->client->dev, + "%s called without an irq_pin_access_method set\n", + __func__); + return -EINVAL; + case IRQ_PIN_ACCESS_GPIO: + return gpiod_direction_input(ts->gpiod_int); + case IRQ_PIN_ACCESS_ACPI_GPIO: + return gpiod_direction_input(ts->gpiod_int); + case IRQ_PIN_ACCESS_ACPI_METHOD: + return goodix_pin_acpi_direction_input(ts); + } + + return -EINVAL; /* Never reached */ +} + static int goodix_int_sync(struct goodix_ts_data *ts) { int error; - error = gpiod_direction_output(ts->gpiod_int, 0); + error = goodix_irq_direction_output(ts, 0); if (error) return error; msleep(50); /* T5: 50ms */ - error = gpiod_direction_input(ts->gpiod_int); + error = goodix_irq_direction_input(ts); if (error) return error; @@ -536,7 +715,7 @@ static int goodix_reset(struct goodix_ts_data *ts) msleep(20); /* T2: > 10ms */ /* HIGH: 0x28/0x29, LOW: 0xBA/0xBB */ - error = gpiod_direction_output(ts->gpiod_int, ts->client->addr == 0x14); + error = goodix_irq_direction_output(ts, ts->client->addr == 0x14); if (error) return error; @@ -560,6 +739,124 @@ static int goodix_reset(struct goodix_ts_data *ts) return 0; } +#ifdef ACPI_GPIO_SUPPORT +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +static const struct x86_cpu_id baytrail_cpu_ids[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_SILVERMONT, X86_FEATURE_ANY, }, + {} +}; + +static inline bool is_byt(void) +{ + const struct x86_cpu_id *id = x86_match_cpu(baytrail_cpu_ids); + + return !!id; +} + +static const struct acpi_gpio_params first_gpio = { 0, 0, false }; +static const struct acpi_gpio_params second_gpio = { 1, 0, false }; + +static const struct acpi_gpio_mapping acpi_goodix_int_first_gpios[] = { + { GOODIX_GPIO_INT_NAME "-gpios", &first_gpio, 1 }, + { GOODIX_GPIO_RST_NAME "-gpios", &second_gpio, 1 }, + { }, +}; + +static const struct acpi_gpio_mapping acpi_goodix_int_last_gpios[] = { + { GOODIX_GPIO_RST_NAME "-gpios", &first_gpio, 1 }, + { GOODIX_GPIO_INT_NAME "-gpios", &second_gpio, 1 }, + { }, +}; + +static const struct acpi_gpio_mapping acpi_goodix_reset_only_gpios[] = { + { GOODIX_GPIO_RST_NAME "-gpios", &first_gpio, 1 }, + { }, +}; + +static int goodix_resource(struct acpi_resource *ares, void *data) +{ + struct goodix_ts_data *ts = data; + struct device *dev = &ts->client->dev; + struct acpi_resource_gpio *gpio; + + switch (ares->type) { + case ACPI_RESOURCE_TYPE_GPIO: + gpio = &ares->data.gpio; + if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) { + if (ts->gpio_int_idx == -1) { + ts->gpio_int_idx = ts->gpio_count; + } else { + dev_err(dev, "More then one GpioInt resource, ignoring ACPI GPIO resources\n"); + ts->gpio_int_idx = -2; + } + } + ts->gpio_count++; + break; + default: + break; + } + + return 0; +} + +/* + * This function gets called in case we fail to get the irq GPIO directly + * because the ACPI tables lack GPIO-name to APCI _CRS index mappings + * (no _DSD UUID daffd814-6eba-4d8c-8a91-bc9bbf4aa301 data). + * In that case we add our own mapping and then goodix_get_gpio_config() + * retries to get the GPIOs based on the added mapping. + */ +static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) +{ + const struct acpi_gpio_mapping *gpio_mapping = NULL; + struct device *dev = &ts->client->dev; + LIST_HEAD(resources); + int ret; + + ts->gpio_count = 0; + ts->gpio_int_idx = -1; + ret = acpi_dev_get_resources(ACPI_COMPANION(dev), &resources, + goodix_resource, ts); + if (ret < 0) { + dev_err(dev, "Error getting ACPI resources: %d\n", ret); + return ret; + } + + acpi_dev_free_resource_list(&resources); + + if (ts->gpio_count == 2 && ts->gpio_int_idx == 0) { + ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; + gpio_mapping = acpi_goodix_int_first_gpios; + } else if (ts->gpio_count == 2 && ts->gpio_int_idx == 1) { + ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; + gpio_mapping = acpi_goodix_int_last_gpios; + } else if (ts->gpio_count == 1 && ts->gpio_int_idx == -1 && + acpi_has_method(ACPI_HANDLE(dev), "INTI") && + acpi_has_method(ACPI_HANDLE(dev), "INTO")) { + dev_info(dev, "Using ACPI INTI and INTO methods for IRQ pin access\n"); + ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_METHOD; + gpio_mapping = acpi_goodix_reset_only_gpios; + } else if (is_byt() && ts->gpio_count == 2 && ts->gpio_int_idx == -1) { + dev_info(dev, "No ACPI GpioInt resource, assuming that the GPIO order is reset, int\n"); + ts->irq_pin_access_method = IRQ_PIN_ACCESS_ACPI_GPIO; + gpio_mapping = acpi_goodix_int_last_gpios; + } else { + dev_warn(dev, "Unexpected ACPI resources: gpio_count %d, gpio_int_idx %d\n", + ts->gpio_count, ts->gpio_int_idx); + return -EINVAL; + } + + return devm_acpi_dev_add_driver_gpios(dev, gpio_mapping); +} +#else +static int goodix_add_acpi_gpio_mappings(struct goodix_ts_data *ts) +{ + return -EINVAL; +} +#endif /* CONFIG_X86 && CONFIG_ACPI */ + /** * goodix_get_gpio_config - Get GPIO config from ACPI/DT * @@ -570,6 +867,7 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts) int error; struct device *dev; struct gpio_desc *gpiod; + bool added_acpi_mappings = false; if (!ts->client) return -EINVAL; @@ -593,6 +891,7 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts) return error; } +retry_get_irq_gpio: /* Get the interrupt GPIO pin number */ gpiod = devm_gpiod_get_optional(dev, GOODIX_GPIO_INT_NAME, GPIOD_IN); if (IS_ERR(gpiod)) { @@ -602,6 +901,11 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts) GOODIX_GPIO_INT_NAME, error); return error; } + if (!gpiod && has_acpi_companion(dev) && !added_acpi_mappings) { + added_acpi_mappings = true; + if (goodix_add_acpi_gpio_mappings(ts) == 0) + goto retry_get_irq_gpio; + } ts->gpiod_int = gpiod; @@ -617,6 +921,31 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts) ts->gpiod_rst = gpiod; + switch (ts->irq_pin_access_method) { + case IRQ_PIN_ACCESS_ACPI_GPIO: + /* + * We end up here if goodix_add_acpi_gpio_mappings() has + * called devm_acpi_dev_add_driver_gpios() because the ACPI + * tables did not contain name to index mappings. + * Check that we successfully got both GPIOs after we've + * added our own acpi_gpio_mapping and if we did not get both + * GPIOs reset irq_pin_access_method to IRQ_PIN_ACCESS_NONE. + */ + if (!ts->gpiod_int || !ts->gpiod_rst) + ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE; + break; + case IRQ_PIN_ACCESS_ACPI_METHOD: + if (!ts->gpiod_rst) + ts->irq_pin_access_method = IRQ_PIN_ACCESS_NONE; + break; + default: + if (ts->gpiod_int && ts->gpiod_rst) { + ts->reset_controller_at_probe = true; + ts->load_cfg_from_disk = true; + ts->irq_pin_access_method = IRQ_PIN_ACCESS_GPIO; + } + } + return 0; } @@ -629,12 +958,11 @@ static int goodix_get_gpio_config(struct goodix_ts_data *ts) */ static void goodix_read_config(struct goodix_ts_data *ts) { - u8 config[GOODIX_CONFIG_MAX_LENGTH]; int x_max, y_max; int error; error = goodix_i2c_read(ts->client, ts->chip->config_addr, - config, ts->chip->config_len); + ts->config, ts->chip->config_len); if (error) { dev_warn(&ts->client->dev, "Error reading config: %d\n", error); @@ -643,15 +971,17 @@ static void goodix_read_config(struct goodix_ts_data *ts) return; } - ts->int_trigger_type = config[TRIGGER_LOC] & 0x03; - ts->max_touch_num = config[MAX_CONTACTS_LOC] & 0x0f; + ts->int_trigger_type = ts->config[TRIGGER_LOC] & 0x03; + ts->max_touch_num = ts->config[MAX_CONTACTS_LOC] & 0x0f; - x_max = get_unaligned_le16(&config[RESOLUTION_LOC]); - y_max = get_unaligned_le16(&config[RESOLUTION_LOC + 2]); + x_max = get_unaligned_le16(&ts->config[RESOLUTION_LOC]); + y_max = get_unaligned_le16(&ts->config[RESOLUTION_LOC + 2]); if (x_max && y_max) { input_abs_set_max(ts->input_dev, ABS_MT_POSITION_X, x_max - 1); input_abs_set_max(ts->input_dev, ABS_MT_POSITION_Y, y_max - 1); } + + ts->chip->calc_config_checksum(ts); } /** @@ -663,7 +993,7 @@ static int goodix_read_version(struct goodix_ts_data *ts) { int error; u8 buf[6]; - char id_str[5]; + char id_str[GOODIX_ID_MAX_LEN + 1]; error = goodix_i2c_read(ts->client, GOODIX_REG_ID, buf, sizeof(buf)); if (error) { @@ -671,14 +1001,13 @@ static int goodix_read_version(struct goodix_ts_data *ts) return error; } - memcpy(id_str, buf, 4); - id_str[4] = 0; - if (kstrtou16(id_str, 10, &ts->id)) - ts->id = 0x1001; + memcpy(id_str, buf, GOODIX_ID_MAX_LEN); + id_str[GOODIX_ID_MAX_LEN] = 0; + strscpy(ts->id, id_str, GOODIX_ID_MAX_LEN + 1); ts->version = get_unaligned_le16(&buf[4]); - dev_info(&ts->client->dev, "ID %d, version: %04x\n", ts->id, + dev_info(&ts->client->dev, "ID %s, version: %04x\n", ts->id, ts->version); return 0; @@ -722,6 +1051,7 @@ static int goodix_i2c_test(struct i2c_client *client) static int goodix_configure_dev(struct goodix_ts_data *ts) { int error; + int i; ts->int_trigger_type = GOODIX_INT_TRIGGER; ts->max_touch_num = GOODIX_MAX_CONTACTS; @@ -736,11 +1066,23 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) ts->input_dev->phys = "input/ts"; ts->input_dev->id.bustype = BUS_I2C; ts->input_dev->id.vendor = 0x0416; - ts->input_dev->id.product = ts->id; + if (kstrtou16(ts->id, 10, &ts->input_dev->id.product)) + ts->input_dev->id.product = 0x1001; ts->input_dev->id.version = ts->version; + ts->input_dev->keycode = ts->keymap; + ts->input_dev->keycodesize = sizeof(ts->keymap[0]); + ts->input_dev->keycodemax = GOODIX_MAX_KEYS; + /* Capacitive Windows/Home button on some devices */ - input_set_capability(ts->input_dev, EV_KEY, KEY_LEFTMETA); + for (i = 0; i < GOODIX_MAX_KEYS; ++i) { + if (i == 0) + ts->keymap[i] = KEY_LEFTMETA; + else + ts->keymap[i] = KEY_F1 + (i - 1); + + input_set_capability(ts->input_dev, EV_KEY, ts->keymap[i]); + } input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_X); input_set_capability(ts->input_dev, EV_ABS, ABS_MT_POSITION_Y); @@ -780,6 +1122,12 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) "Non-standard 9-bytes report format quirk\n"); } + if (dmi_check_system(inverted_x_screen)) { + ts->prop.invert_x = true; + dev_dbg(&ts->client->dev, + "Applying 'inverted x screen' quirk\n"); + } + error = input_mt_init_slots(ts->input_dev, ts->max_touch_num, INPUT_MT_DIRECT | INPUT_MT_DROP_UNUSED); if (error) { @@ -820,7 +1168,7 @@ static void goodix_config_cb(const struct firmware *cfg, void *ctx) if (cfg) { /* send device configuration to the firmware */ - error = goodix_send_cfg(ts, cfg); + error = goodix_send_cfg(ts, cfg->data, cfg->size); if (error) goto err_release_cfg; } @@ -889,7 +1237,8 @@ static int goodix_ts_probe(struct i2c_client *client, if (error) return error; - if (ts->gpiod_int && ts->gpiod_rst) { +reset: + if (ts->reset_controller_at_probe) { /* reset the controller */ error = goodix_reset(ts); if (error) { @@ -900,6 +1249,12 @@ static int goodix_ts_probe(struct i2c_client *client, error = goodix_i2c_test(client); if (error) { + if (!ts->reset_controller_at_probe && + ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) { + /* Retry after a controller reset */ + ts->reset_controller_at_probe = true; + goto reset; + } dev_err(&client->dev, "I2C communication failure: %d\n", error); return error; } @@ -912,10 +1267,10 @@ static int goodix_ts_probe(struct i2c_client *client, ts->chip = goodix_get_chip_data(ts->id); - if (ts->gpiod_int && ts->gpiod_rst) { + if (ts->load_cfg_from_disk) { /* update device config */ ts->cfg_name = devm_kasprintf(&client->dev, GFP_KERNEL, - "goodix_%d_cfg.bin", ts->id); + "goodix_%s_cfg.bin", ts->id); if (!ts->cfg_name) return -ENOMEM; @@ -943,7 +1298,7 @@ static int goodix_ts_remove(struct i2c_client *client) { struct goodix_ts_data *ts = i2c_get_clientdata(client); - if (ts->gpiod_int && ts->gpiod_rst) + if (ts->load_cfg_from_disk) wait_for_completion(&ts->firmware_loading_complete); return 0; @@ -955,19 +1310,20 @@ static int __maybe_unused goodix_suspend(struct device *dev) struct goodix_ts_data *ts = i2c_get_clientdata(client); int error; + if (ts->load_cfg_from_disk) + wait_for_completion(&ts->firmware_loading_complete); + /* We need gpio pins to suspend/resume */ - if (!ts->gpiod_int || !ts->gpiod_rst) { + if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) { disable_irq(client->irq); return 0; } - wait_for_completion(&ts->firmware_loading_complete); - /* Free IRQ as IRQ pin is used as output in the suspend sequence */ goodix_free_irq(ts); /* Output LOW on the INT pin for 5 ms */ - error = gpiod_direction_output(ts->gpiod_int, 0); + error = goodix_irq_direction_output(ts, 0); if (error) { goodix_request_irq(ts); return error; @@ -979,7 +1335,7 @@ static int __maybe_unused goodix_suspend(struct device *dev) GOODIX_CMD_SCREEN_OFF); if (error) { dev_err(&ts->client->dev, "Screen off command failed\n"); - gpiod_direction_input(ts->gpiod_int); + goodix_irq_direction_input(ts); goodix_request_irq(ts); return -EAGAIN; } @@ -997,9 +1353,10 @@ static int __maybe_unused goodix_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct goodix_ts_data *ts = i2c_get_clientdata(client); + u8 config_ver; int error; - if (!ts->gpiod_int || !ts->gpiod_rst) { + if (ts->irq_pin_access_method == IRQ_PIN_ACCESS_NONE) { enable_irq(client->irq); return 0; } @@ -1008,7 +1365,7 @@ static int __maybe_unused goodix_resume(struct device *dev) * Exit sleep mode by outputting HIGH level to INT pin * for 2ms~5ms. */ - error = gpiod_direction_output(ts->gpiod_int, 1); + error = goodix_irq_direction_output(ts, 1); if (error) return error; @@ -1018,6 +1375,27 @@ static int __maybe_unused goodix_resume(struct device *dev) if (error) return error; + error = goodix_i2c_read(ts->client, ts->chip->config_addr, + &config_ver, 1); + if (error) + dev_warn(dev, "Error reading config version: %d, resetting controller\n", + error); + else if (config_ver != ts->config[0]) + dev_info(dev, "Config version mismatch %d != %d, resetting controller\n", + config_ver, ts->config[0]); + + if (error != 0 || config_ver != ts->config[0]) { + error = goodix_reset(ts); + if (error) { + dev_err(dev, "Controller reset failed.\n"); + return error; + } + + error = goodix_send_cfg(ts, ts->config, ts->chip->config_len); + if (error) + return error; + } + error = goodix_request_irq(ts); if (error) return error; @@ -1050,6 +1428,8 @@ static const struct of_device_id goodix_of_match[] = { { .compatible = "goodix,gt911" }, { .compatible = "goodix,gt9110" }, { .compatible = "goodix,gt912" }, + { .compatible = "goodix,gt9147" }, + { .compatible = "goodix,gt917s" }, { .compatible = "goodix,gt927" }, { .compatible = "goodix,gt9271" }, { .compatible = "goodix,gt928" }, diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c index e16ec4c7043a..97342e14b4f1 100644 --- a/drivers/input/touchscreen/of_touchscreen.c +++ b/drivers/input/touchscreen/of_touchscreen.c @@ -66,7 +66,7 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, { struct device *dev = input->dev.parent; struct input_absinfo *absinfo; - unsigned int axis; + unsigned int axis, axis_x, axis_y; unsigned int minimum, maximum, fuzz; bool data_present; @@ -74,33 +74,34 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, if (!input->absinfo) return; - axis = multitouch ? ABS_MT_POSITION_X : ABS_X; + axis_x = multitouch ? ABS_MT_POSITION_X : ABS_X; + axis_y = multitouch ? ABS_MT_POSITION_Y : ABS_Y; + data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-x", - input_abs_get_min(input, axis), + input_abs_get_min(input, axis_x), &minimum) | touchscreen_get_prop_u32(dev, "touchscreen-size-x", input_abs_get_max(input, - axis) + 1, + axis_x) + 1, &maximum) | touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x", - input_abs_get_fuzz(input, axis), + input_abs_get_fuzz(input, axis_x), &fuzz); if (data_present) - touchscreen_set_params(input, axis, minimum, maximum - 1, fuzz); + touchscreen_set_params(input, axis_x, minimum, maximum - 1, fuzz); - axis = multitouch ? ABS_MT_POSITION_Y : ABS_Y; data_present = touchscreen_get_prop_u32(dev, "touchscreen-min-y", - input_abs_get_min(input, axis), + input_abs_get_min(input, axis_y), &minimum) | touchscreen_get_prop_u32(dev, "touchscreen-size-y", input_abs_get_max(input, - axis) + 1, + axis_y) + 1, &maximum) | touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y", - input_abs_get_fuzz(input, axis), + input_abs_get_fuzz(input, axis_y), &fuzz); if (data_present) - touchscreen_set_params(input, axis, minimum, maximum - 1, fuzz); + touchscreen_set_params(input, axis_y, minimum, maximum - 1, fuzz); axis = multitouch ? ABS_MT_PRESSURE : ABS_PRESSURE; data_present = touchscreen_get_prop_u32(dev, @@ -117,15 +118,13 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, if (!prop) return; - axis = multitouch ? ABS_MT_POSITION_X : ABS_X; - - prop->max_x = input_abs_get_max(input, axis); - prop->max_y = input_abs_get_max(input, axis + 1); + prop->max_x = input_abs_get_max(input, axis_x); + prop->max_y = input_abs_get_max(input, axis_y); prop->invert_x = device_property_read_bool(dev, "touchscreen-inverted-x"); if (prop->invert_x) { - absinfo = &input->absinfo[axis]; + absinfo = &input->absinfo[axis_x]; absinfo->maximum -= absinfo->minimum; absinfo->minimum = 0; } @@ -133,7 +132,7 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, prop->invert_y = device_property_read_bool(dev, "touchscreen-inverted-y"); if (prop->invert_y) { - absinfo = &input->absinfo[axis + 1]; + absinfo = &input->absinfo[axis_y]; absinfo->maximum -= absinfo->minimum; absinfo->minimum = 0; } @@ -141,7 +140,7 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch, prop->swap_x_y = device_property_read_bool(dev, "touchscreen-swapped-x-y"); if (prop->swap_x_y) - swap(input->absinfo[axis], input->absinfo[axis + 1]); + swap(input->absinfo[axis_x], input->absinfo[axis_y]); } EXPORT_SYMBOL(touchscreen_parse_properties); diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index d2fade984999..58b4a4dbfc78 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -188,6 +188,7 @@ config INTEL_IOMMU select NEED_DMA_MAP_STATE select DMAR_TABLE select SWIOTLB + select IOASID help DMA remapping (DMAR) devices support enables independent address translations for Direct Memory Access (DMA) from devices. @@ -273,7 +274,7 @@ config IRQ_REMAP # OMAP IOMMU support config OMAP_IOMMU bool "OMAP IOMMU Support" - depends on ARM && MMU + depends on ARM && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC)) depends on ARCH_OMAP2PLUS || COMPILE_TEST select IOMMU_API ---help--- @@ -291,7 +292,7 @@ config OMAP_IOMMU_DEBUG config ROCKCHIP_IOMMU bool "Rockchip IOMMU Support" - depends on ARM || ARM64 + depends on ARM || ARM64 || (COMPILE_TEST && (ARM64 || IA64 || SPARC)) depends on ARCH_ROCKCHIP || COMPILE_TEST select IOMMU_API select ARM_DMA_USE_IOMMU @@ -325,7 +326,7 @@ config TEGRA_IOMMU_SMMU config EXYNOS_IOMMU bool "Exynos IOMMU Support" - depends on ARCH_EXYNOS && MMU + depends on ARCH_EXYNOS && MMU || (COMPILE_TEST && (ARM || ARM64 || IA64 || SPARC)) depends on !CPU_BIG_ENDIAN # revisit driver if we can enable big-endian ptes select IOMMU_API select ARM_DMA_USE_IOMMU @@ -361,7 +362,7 @@ config IPMMU_VMSA config SPAPR_TCE_IOMMU bool "sPAPR TCE IOMMU Support" - depends on PPC_POWERNV || PPC_PSERIES + depends on PPC_POWERNV || PPC_PSERIES || (PPC && COMPILE_TEST) select IOMMU_API help Enables bits of IOMMU API required by VFIO. The iommu_ops @@ -370,7 +371,7 @@ config SPAPR_TCE_IOMMU # ARM IOMMU support config ARM_SMMU tristate "ARM Ltd. System MMU (SMMU) Support" - depends on (ARM64 || ARM) && MMU + depends on (ARM64 || ARM || (COMPILE_TEST && !GENERIC_ATOMIC64)) && MMU select IOMMU_API select IOMMU_IO_PGTABLE_LPAE select ARM_DMA_USE_IOMMU if ARM @@ -440,7 +441,7 @@ config S390_IOMMU config S390_CCW_IOMMU bool "S390 CCW IOMMU Support" - depends on S390 && CCW + depends on S390 && CCW || COMPILE_TEST select IOMMU_API help Enables bits of IOMMU API required by VFIO. The iommu_ops @@ -448,7 +449,7 @@ config S390_CCW_IOMMU config S390_AP_IOMMU bool "S390 AP IOMMU Support" - depends on S390 && ZCRYPT + depends on S390 && ZCRYPT || COMPILE_TEST select IOMMU_API help Enables bits of IOMMU API required by VFIO. The iommu_ops @@ -456,7 +457,7 @@ config S390_AP_IOMMU config MTK_IOMMU bool "MTK IOMMU Support" - depends on ARM || ARM64 + depends on ARM || ARM64 || COMPILE_TEST depends on ARCH_MEDIATEK || COMPILE_TEST select ARM_DMA_USE_IOMMU select IOMMU_API @@ -506,8 +507,8 @@ config HYPERV_IOMMU guests to run with x2APIC mode enabled. config VIRTIO_IOMMU - bool "Virtio IOMMU driver" - depends on VIRTIO=y + tristate "Virtio IOMMU driver" + depends on VIRTIO depends on ARM64 select IOMMU_API select INTERVAL_TREE diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index f8d01d6b00da..ca8c4522045b 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -348,7 +348,7 @@ #define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) #define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) -#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) +#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0x1fffffULL) #define DTE_GCR3_INDEX_A 0 #define DTE_GCR3_INDEX_B 1 diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index aa3ac2a03807..82508730feb7 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -69,6 +69,9 @@ #define IDR1_SSIDSIZE GENMASK(10, 6) #define IDR1_SIDSIZE GENMASK(5, 0) +#define ARM_SMMU_IDR3 0xc +#define IDR3_RIL (1 << 10) + #define ARM_SMMU_IDR5 0x14 #define IDR5_STALL_MAX GENMASK(31, 16) #define IDR5_GRAN64K (1 << 6) @@ -346,9 +349,14 @@ #define CMDQ_CFGI_1_LEAF (1UL << 0) #define CMDQ_CFGI_1_RANGE GENMASK_ULL(4, 0) +#define CMDQ_TLBI_0_NUM GENMASK_ULL(16, 12) +#define CMDQ_TLBI_RANGE_NUM_MAX 31 +#define CMDQ_TLBI_0_SCALE GENMASK_ULL(24, 20) #define CMDQ_TLBI_0_VMID GENMASK_ULL(47, 32) #define CMDQ_TLBI_0_ASID GENMASK_ULL(63, 48) #define CMDQ_TLBI_1_LEAF (1UL << 0) +#define CMDQ_TLBI_1_TTL GENMASK_ULL(9, 8) +#define CMDQ_TLBI_1_TG GENMASK_ULL(11, 10) #define CMDQ_TLBI_1_VA_MASK GENMASK_ULL(63, 12) #define CMDQ_TLBI_1_IPA_MASK GENMASK_ULL(51, 12) @@ -473,9 +481,13 @@ struct arm_smmu_cmdq_ent { #define CMDQ_OP_TLBI_S2_IPA 0x2a #define CMDQ_OP_TLBI_NSNH_ALL 0x30 struct { + u8 num; + u8 scale; u16 asid; u16 vmid; bool leaf; + u8 ttl; + u8 tg; u64 addr; } tlbi; @@ -548,6 +560,11 @@ struct arm_smmu_cmdq { atomic_t lock; }; +struct arm_smmu_cmdq_batch { + u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; + int num; +}; + struct arm_smmu_evtq { struct arm_smmu_queue q; u32 max_stalls; @@ -627,6 +644,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_HYP (1 << 12) #define ARM_SMMU_FEAT_STALL_FORCE (1 << 13) #define ARM_SMMU_FEAT_VAX (1 << 14) +#define ARM_SMMU_FEAT_RANGE_INV (1 << 15) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) @@ -895,14 +913,22 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) cmd[1] |= FIELD_PREP(CMDQ_CFGI_1_RANGE, 31); break; case CMDQ_OP_TLBI_NH_VA: + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_ASID, ent->tlbi.asid); cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK; break; case CMDQ_OP_TLBI_S2_IPA: + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_NUM, ent->tlbi.num); + cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_SCALE, ent->tlbi.scale); cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, ent->tlbi.vmid); cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_LEAF, ent->tlbi.leaf); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TTL, ent->tlbi.ttl); + cmd[1] |= FIELD_PREP(CMDQ_TLBI_1_TG, ent->tlbi.tg); cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK; break; case CMDQ_OP_TLBI_NH_ASID: @@ -1482,6 +1508,24 @@ static int arm_smmu_cmdq_issue_sync(struct arm_smmu_device *smmu) return arm_smmu_cmdq_issue_cmdlist(smmu, NULL, 0, true); } +static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_batch *cmds, + struct arm_smmu_cmdq_ent *cmd) +{ + if (cmds->num == CMDQ_BATCH_ENTRIES) { + arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); + cmds->num = 0; + } + arm_smmu_cmdq_build_cmd(&cmds->cmds[cmds->num * CMDQ_ENT_DWORDS], cmd); + cmds->num++; +} + +static int arm_smmu_cmdq_batch_submit(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_batch *cmds) +{ + return arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); +} + /* Context descriptor manipulation functions */ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, int ssid, bool leaf) @@ -1489,6 +1533,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, size_t i; unsigned long flags; struct arm_smmu_master *master; + struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_cmdq_ent cmd = { .opcode = CMDQ_OP_CFGI_CD, @@ -1502,12 +1547,12 @@ static void arm_smmu_sync_cd(struct arm_smmu_domain *smmu_domain, list_for_each_entry(master, &smmu_domain->devices, domain_head) { for (i = 0; i < master->num_sids; i++) { cmd.cfgi.sid = master->sids[i]; - arm_smmu_cmdq_issue_cmd(smmu, &cmd); + arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); } } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - arm_smmu_cmdq_issue_sync(smmu); + arm_smmu_cmdq_batch_submit(smmu, &cmds); } static int arm_smmu_alloc_cd_leaf_table(struct arm_smmu_device *smmu, @@ -1531,6 +1576,7 @@ static void arm_smmu_write_cd_l1_desc(__le64 *dst, u64 val = (l1_desc->l2ptr_dma & CTXDESC_L1_DESC_L2PTR_MASK) | CTXDESC_L1_DESC_V; + /* See comment in arm_smmu_write_ctx_desc() */ WRITE_ONCE(*dst, cpu_to_le64(val)); } @@ -1726,7 +1772,8 @@ arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc) val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; - *dst = cpu_to_le64(val); + /* See comment in arm_smmu_write_ctx_desc() */ + WRITE_ONCE(*dst, cpu_to_le64(val)); } static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid) @@ -2132,17 +2179,16 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, cmd->atc.size = log2_span; } -static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, - struct arm_smmu_cmdq_ent *cmd) +static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) { int i; + struct arm_smmu_cmdq_ent cmd; - if (!master->ats_enabled) - return 0; + arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); for (i = 0; i < master->num_sids; i++) { - cmd->atc.sid = master->sids[i]; - arm_smmu_cmdq_issue_cmd(master->smmu, cmd); + cmd.atc.sid = master->sids[i]; + arm_smmu_cmdq_issue_cmd(master->smmu, &cmd); } return arm_smmu_cmdq_issue_sync(master->smmu); @@ -2151,10 +2197,11 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, unsigned long iova, size_t size) { - int ret = 0; + int i; unsigned long flags; struct arm_smmu_cmdq_ent cmd; struct arm_smmu_master *master; + struct arm_smmu_cmdq_batch cmds = {}; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) return 0; @@ -2179,11 +2226,18 @@ static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd); spin_lock_irqsave(&smmu_domain->devices_lock, flags); - list_for_each_entry(master, &smmu_domain->devices, domain_head) - ret |= arm_smmu_atc_inv_master(master, &cmd); + list_for_each_entry(master, &smmu_domain->devices, domain_head) { + if (!master->ats_enabled) + continue; + + for (i = 0; i < master->num_sids; i++) { + cmd.atc.sid = master->sids[i]; + arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); + } + } spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); - return ret ? -ETIMEDOUT : 0; + return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds); } /* IO_PGTABLE API */ @@ -2218,10 +2272,10 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, size_t granule, bool leaf, struct arm_smmu_domain *smmu_domain) { - u64 cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS]; struct arm_smmu_device *smmu = smmu_domain->smmu; - unsigned long start = iova, end = iova + size; - int i = 0; + unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0; + size_t inv_range = granule; + struct arm_smmu_cmdq_batch cmds = {}; struct arm_smmu_cmdq_ent cmd = { .tlbi = { .leaf = leaf, @@ -2239,19 +2293,50 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; } + if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { + /* Get the leaf page size */ + tg = __ffs(smmu_domain->domain.pgsize_bitmap); + + /* Convert page size of 12,14,16 (log2) to 1,2,3 */ + cmd.tlbi.tg = (tg - 10) / 2; + + /* Determine what level the granule is at */ + cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); + + num_pages = size >> tg; + } + while (iova < end) { - if (i == CMDQ_BATCH_ENTRIES) { - arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, false); - i = 0; + if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { + /* + * On each iteration of the loop, the range is 5 bits + * worth of the aligned size remaining. + * The range in pages is: + * + * range = (num_pages & (0x1f << __ffs(num_pages))) + */ + unsigned long scale, num; + + /* Determine the power of 2 multiple number of pages */ + scale = __ffs(num_pages); + cmd.tlbi.scale = scale; + + /* Determine how many chunks of 2^scale size we have */ + num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX; + cmd.tlbi.num = num - 1; + + /* range is num * 2^scale * pgsize */ + inv_range = num << (scale + tg); + + /* Clear out the lower order bits for the next iteration */ + num_pages -= num << scale; } cmd.tlbi.addr = iova; - arm_smmu_cmdq_build_cmd(&cmds[i * CMDQ_ENT_DWORDS], &cmd); - iova += granule; - i++; + arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); + iova += inv_range; } - - arm_smmu_cmdq_issue_cmdlist(smmu, cmds, i, true); + arm_smmu_cmdq_batch_submit(smmu, &cmds); /* * Unfortunately, this can't be leaf-only since we may have @@ -2611,7 +2696,6 @@ static void arm_smmu_enable_ats(struct arm_smmu_master *master) static void arm_smmu_disable_ats(struct arm_smmu_master *master) { - struct arm_smmu_cmdq_ent cmd; struct arm_smmu_domain *smmu_domain = master->domain; if (!master->ats_enabled) @@ -2623,11 +2707,57 @@ static void arm_smmu_disable_ats(struct arm_smmu_master *master) * ATC invalidation via the SMMU. */ wmb(); - arm_smmu_atc_inv_to_cmd(0, 0, 0, &cmd); - arm_smmu_atc_inv_master(master, &cmd); + arm_smmu_atc_inv_master(master); atomic_dec(&smmu_domain->nr_ats_masters); } +static int arm_smmu_enable_pasid(struct arm_smmu_master *master) +{ + int ret; + int features; + int num_pasids; + struct pci_dev *pdev; + + if (!dev_is_pci(master->dev)) + return -ENODEV; + + pdev = to_pci_dev(master->dev); + + features = pci_pasid_features(pdev); + if (features < 0) + return features; + + num_pasids = pci_max_pasids(pdev); + if (num_pasids <= 0) + return num_pasids; + + ret = pci_enable_pasid(pdev, features); + if (ret) { + dev_err(&pdev->dev, "Failed to enable PASID\n"); + return ret; + } + + master->ssid_bits = min_t(u8, ilog2(num_pasids), + master->smmu->ssid_bits); + return 0; +} + +static void arm_smmu_disable_pasid(struct arm_smmu_master *master) +{ + struct pci_dev *pdev; + + if (!dev_is_pci(master->dev)) + return; + + pdev = to_pci_dev(master->dev); + + if (!pdev->pasid_enabled) + return; + + master->ssid_bits = 0; + pci_disable_pasid(pdev); +} + static void arm_smmu_detach_dev(struct arm_smmu_master *master) { unsigned long flags; @@ -2659,7 +2789,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) if (!fwspec) return -ENOENT; - master = fwspec->iommu_priv; + master = dev_iommu_priv_get(dev); smmu = master->smmu; arm_smmu_detach_dev(master); @@ -2795,7 +2925,7 @@ static int arm_smmu_add_device(struct device *dev) if (!fwspec || fwspec->ops != &arm_smmu_ops) return -ENODEV; - if (WARN_ON_ONCE(fwspec->iommu_priv)) + if (WARN_ON_ONCE(dev_iommu_priv_get(dev))) return -EBUSY; smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); @@ -2810,7 +2940,7 @@ static int arm_smmu_add_device(struct device *dev) master->smmu = smmu; master->sids = fwspec->ids; master->num_sids = fwspec->num_ids; - fwspec->iommu_priv = master; + dev_iommu_priv_set(dev, master); /* Check the SIDs are in range of the SMMU and our stream table */ for (i = 0; i < master->num_sids; i++) { @@ -2831,13 +2961,23 @@ static int arm_smmu_add_device(struct device *dev) master->ssid_bits = min(smmu->ssid_bits, fwspec->num_pasid_bits); + /* + * Note that PASID must be enabled before, and disabled after ATS: + * PCI Express Base 4.0r1.0 - 10.5.1.3 ATS Control Register + * + * Behavior is undefined if this bit is Set and the value of the PASID + * Enable, Execute Requested Enable, or Privileged Mode Requested bits + * are changed. + */ + arm_smmu_enable_pasid(master); + if (!(smmu->features & ARM_SMMU_FEAT_2_LVL_CDTAB)) master->ssid_bits = min_t(u8, master->ssid_bits, CTXDESC_LINEAR_CDMAX); ret = iommu_device_link(&smmu->iommu, dev); if (ret) - goto err_free_master; + goto err_disable_pasid; group = iommu_group_get_for_dev(dev); if (IS_ERR(group)) { @@ -2850,9 +2990,11 @@ static int arm_smmu_add_device(struct device *dev) err_unlink: iommu_device_unlink(&smmu->iommu, dev); +err_disable_pasid: + arm_smmu_disable_pasid(master); err_free_master: kfree(master); - fwspec->iommu_priv = NULL; + dev_iommu_priv_set(dev, NULL); return ret; } @@ -2865,11 +3007,12 @@ static void arm_smmu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &arm_smmu_ops) return; - master = fwspec->iommu_priv; + master = dev_iommu_priv_get(dev); smmu = master->smmu; arm_smmu_detach_dev(master); iommu_group_remove_device(dev); iommu_device_unlink(&smmu->iommu, dev); + arm_smmu_disable_pasid(master); kfree(master); iommu_fwspec_free(dev); } @@ -3700,6 +3843,11 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) if (smmu->sid_bits <= STRTAB_SPLIT) smmu->features &= ~ARM_SMMU_FEAT_2_LVL_STRTAB; + /* IDR3 */ + reg = readl_relaxed(smmu->base + ARM_SMMU_IDR3); + if (FIELD_GET(IDR3_RIL, reg)) + smmu->features |= ARM_SMMU_FEAT_RANGE_INV; + /* IDR5 */ reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5); diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 16c4b87af42b..a6a5796e9c41 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -98,12 +98,10 @@ struct arm_smmu_master_cfg { s16 smendx[]; }; #define INVALID_SMENDX -1 -#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv) -#define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu) -#define fwspec_smendx(fw, i) \ - (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i]) -#define for_each_cfg_sme(fw, i, idx) \ - for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i) +#define cfg_smendx(cfg, fw, i) \ + (i >= fw->num_ids ? INVALID_SMENDX : cfg->smendx[i]) +#define for_each_cfg_sme(cfg, fw, i, idx) \ + for (i = 0; idx = cfg_smendx(cfg, fw, i), i < fw->num_ids; ++i) static bool using_legacy_binding, using_generic_binding; @@ -1061,7 +1059,7 @@ static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx) static int arm_smmu_master_alloc_smes(struct device *dev) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; + struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); struct arm_smmu_device *smmu = cfg->smmu; struct arm_smmu_smr *smrs = smmu->smrs; struct iommu_group *group; @@ -1069,7 +1067,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev) mutex_lock(&smmu->stream_map_mutex); /* Figure out a viable stream map entry allocation */ - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); @@ -1100,7 +1098,7 @@ static int arm_smmu_master_alloc_smes(struct device *dev) iommu_group_put(group); /* It worked! Now, poke the actual hardware */ - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { arm_smmu_write_sme(smmu, idx); smmu->s2crs[idx].group = group; } @@ -1117,14 +1115,14 @@ out_err: return ret; } -static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec) +static void arm_smmu_master_free_smes(struct arm_smmu_master_cfg *cfg, + struct iommu_fwspec *fwspec) { - struct arm_smmu_device *smmu = fwspec_smmu(fwspec); - struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv; + struct arm_smmu_device *smmu = cfg->smmu; int i, idx; mutex_lock(&smmu->stream_map_mutex); - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { if (arm_smmu_free_sme(smmu, idx)) arm_smmu_write_sme(smmu, idx); cfg->smendx[i] = INVALID_SMENDX; @@ -1133,6 +1131,7 @@ static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec) } static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, + struct arm_smmu_master_cfg *cfg, struct iommu_fwspec *fwspec) { struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -1146,7 +1145,7 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, else type = S2CR_TYPE_TRANS; - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx) continue; @@ -1160,10 +1159,11 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) { - int ret; + struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct arm_smmu_master_cfg *cfg; struct arm_smmu_device *smmu; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); + int ret; if (!fwspec || fwspec->ops != &arm_smmu_ops) { dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n"); @@ -1177,10 +1177,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) * domains, just say no (but more politely than by dereferencing NULL). * This should be at least a WARN_ON once that's sorted. */ - if (!fwspec->iommu_priv) + cfg = dev_iommu_priv_get(dev); + if (!cfg) return -ENODEV; - smmu = fwspec_smmu(fwspec); + smmu = cfg->smmu; ret = arm_smmu_rpm_get(smmu); if (ret < 0) @@ -1204,7 +1205,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) } /* Looks ok, so add the device to the domain */ - ret = arm_smmu_domain_add_master(smmu_domain, fwspec); + ret = arm_smmu_domain_add_master(smmu_domain, cfg, fwspec); /* * Setup an autosuspend delay to avoid bouncing runpm state. @@ -1383,7 +1384,7 @@ struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode) static int arm_smmu_add_device(struct device *dev) { - struct arm_smmu_device *smmu; + struct arm_smmu_device *smmu = NULL; struct arm_smmu_master_cfg *cfg; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); int i, ret; @@ -1429,7 +1430,7 @@ static int arm_smmu_add_device(struct device *dev) goto out_free; cfg->smmu = smmu; - fwspec->iommu_priv = cfg; + dev_iommu_priv_set(dev, cfg); while (i--) cfg->smendx[i] = INVALID_SMENDX; @@ -1467,7 +1468,7 @@ static void arm_smmu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &arm_smmu_ops) return; - cfg = fwspec->iommu_priv; + cfg = dev_iommu_priv_get(dev); smmu = cfg->smmu; ret = arm_smmu_rpm_get(smmu); @@ -1475,23 +1476,25 @@ static void arm_smmu_remove_device(struct device *dev) return; iommu_device_unlink(&smmu->iommu, dev); - arm_smmu_master_free_smes(fwspec); + arm_smmu_master_free_smes(cfg, fwspec); arm_smmu_rpm_put(smmu); + dev_iommu_priv_set(dev, NULL); iommu_group_remove_device(dev); - kfree(fwspec->iommu_priv); + kfree(cfg); iommu_fwspec_free(dev); } static struct iommu_group *arm_smmu_device_group(struct device *dev) { + struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct arm_smmu_device *smmu = fwspec_smmu(fwspec); + struct arm_smmu_device *smmu = cfg->smmu; struct iommu_group *group = NULL; int i, idx; - for_each_cfg_sme(fwspec, i, idx) { + for_each_cfg_sme(cfg, fwspec, i, idx) { if (group && smmu->s2crs[idx].group && group != smmu->s2crs[idx].group) return ERR_PTR(-EINVAL); diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 4be549478691..ef0a5246700e 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4501,7 +4501,8 @@ static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr) struct dmar_atsr_unit *atsru; struct acpi_dmar_atsr *tmp; - list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) { + list_for_each_entry_rcu(atsru, &dmar_atsr_units, list, + dmar_rcu_check()) { tmp = (struct acpi_dmar_atsr *)atsru->hdr; if (atsr->segment != tmp->segment) continue; diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index d7f2a5358900..2998418f0a38 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -531,7 +531,7 @@ struct page_req_dsc { u64 priv_data[2]; }; -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10) +#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req) { @@ -611,14 +611,15 @@ static irqreturn_t prq_event_thread(int irq, void *d) * any faults on kernel addresses. */ if (!svm->mm) goto bad_req; - /* If the mm is already defunct, don't handle faults. */ - if (!mmget_not_zero(svm->mm)) - goto bad_req; /* If address is not canonical, return invalid response */ if (!is_canonical_address(address)) goto bad_req; + /* If the mm is already defunct, don't handle faults. */ + if (!mmget_not_zero(svm->mm)) + goto bad_req; + down_read(&svm->mm->mmap_sem); vma = find_extend_vma(svm->mm, address); if (!vma || address < vma->vm_start) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3e3528436e0b..2b471419e26c 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -152,9 +152,9 @@ void iommu_device_unregister(struct iommu_device *iommu) } EXPORT_SYMBOL_GPL(iommu_device_unregister); -static struct iommu_param *iommu_get_dev_param(struct device *dev) +static struct dev_iommu *dev_iommu_get(struct device *dev) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; if (param) return param; @@ -164,14 +164,14 @@ static struct iommu_param *iommu_get_dev_param(struct device *dev) return NULL; mutex_init(¶m->lock); - dev->iommu_param = param; + dev->iommu = param; return param; } -static void iommu_free_dev_param(struct device *dev) +static void dev_iommu_free(struct device *dev) { - kfree(dev->iommu_param); - dev->iommu_param = NULL; + kfree(dev->iommu); + dev->iommu = NULL; } int iommu_probe_device(struct device *dev) @@ -183,7 +183,7 @@ int iommu_probe_device(struct device *dev) if (!ops) return -EINVAL; - if (!iommu_get_dev_param(dev)) + if (!dev_iommu_get(dev)) return -ENOMEM; if (!try_module_get(ops->owner)) { @@ -200,7 +200,7 @@ int iommu_probe_device(struct device *dev) err_module_put: module_put(ops->owner); err_free_dev_param: - iommu_free_dev_param(dev); + dev_iommu_free(dev); return ret; } @@ -211,9 +211,9 @@ void iommu_release_device(struct device *dev) if (dev->iommu_group) ops->remove_device(dev); - if (dev->iommu_param) { + if (dev->iommu) { module_put(ops->owner); - iommu_free_dev_param(dev); + dev_iommu_free(dev); } } @@ -972,7 +972,7 @@ int iommu_register_device_fault_handler(struct device *dev, iommu_dev_fault_handler_t handler, void *data) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; int ret = 0; if (!param) @@ -1015,7 +1015,7 @@ EXPORT_SYMBOL_GPL(iommu_register_device_fault_handler); */ int iommu_unregister_device_fault_handler(struct device *dev) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; int ret = 0; if (!param) @@ -1055,7 +1055,7 @@ EXPORT_SYMBOL_GPL(iommu_unregister_device_fault_handler); */ int iommu_report_device_fault(struct device *dev, struct iommu_fault_event *evt) { - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; struct iommu_fault_event *evt_pending = NULL; struct iommu_fault_param *fparam; int ret = 0; @@ -1104,7 +1104,7 @@ int iommu_page_response(struct device *dev, int ret = -EINVAL; struct iommu_fault_event *evt; struct iommu_fault_page_request *prm; - struct iommu_param *param = dev->iommu_param; + struct dev_iommu *param = dev->iommu; struct iommu_domain *domain = iommu_get_domain_for_dev(dev); if (!domain || !domain->ops->page_response) @@ -2405,7 +2405,11 @@ int iommu_fwspec_init(struct device *dev, struct fwnode_handle *iommu_fwnode, if (fwspec) return ops == fwspec->ops ? 0 : -EINVAL; - fwspec = kzalloc(sizeof(*fwspec), GFP_KERNEL); + if (!dev_iommu_get(dev)) + return -ENOMEM; + + /* Preallocate for the overwhelmingly common case of 1 ID */ + fwspec = kzalloc(struct_size(fwspec, ids, 1), GFP_KERNEL); if (!fwspec) return -ENOMEM; @@ -2432,15 +2436,15 @@ EXPORT_SYMBOL_GPL(iommu_fwspec_free); int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) { struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - size_t size; - int i; + int i, new_num; if (!fwspec) return -EINVAL; - size = offsetof(struct iommu_fwspec, ids[fwspec->num_ids + num_ids]); - if (size > sizeof(*fwspec)) { - fwspec = krealloc(fwspec, size, GFP_KERNEL); + new_num = fwspec->num_ids + num_ids; + if (new_num > 1) { + fwspec = krealloc(fwspec, struct_size(fwspec, ids, new_num), + GFP_KERNEL); if (!fwspec) return -ENOMEM; @@ -2450,7 +2454,7 @@ int iommu_fwspec_add_ids(struct device *dev, u32 *ids, int num_ids) for (i = 0; i < num_ids; i++) fwspec->ids[fwspec->num_ids + i] = ids[i]; - fwspec->num_ids += num_ids; + fwspec->num_ids = new_num; return 0; } EXPORT_SYMBOL_GPL(iommu_fwspec_add_ids); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ecb3f9464dd5..310cf09feea3 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -89,9 +89,7 @@ static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) static struct ipmmu_vmsa_device *to_ipmmu(struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - - return fwspec ? fwspec->iommu_priv : NULL; + return dev_iommu_priv_get(dev); } #define TLB_LOOP_TIMEOUT 100 /* 100us */ @@ -727,14 +725,13 @@ static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, static int ipmmu_init_platform_device(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct platform_device *ipmmu_pdev; ipmmu_pdev = of_find_device_by_node(args->np); if (!ipmmu_pdev) return -ENODEV; - fwspec->iommu_priv = platform_get_drvdata(ipmmu_pdev); + dev_iommu_priv_set(dev, platform_get_drvdata(ipmmu_pdev)); return 0; } diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 95945f467c03..5f4d6df59cf6 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -358,8 +358,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain) static int mtk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct mtk_iommu_domain *dom = to_mtk_domain(domain); - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; if (!data) return -ENODEV; @@ -378,7 +378,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, static void mtk_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); if (!data) return; @@ -450,7 +450,7 @@ static int mtk_iommu_add_device(struct device *dev) if (!fwspec || fwspec->ops != &mtk_iommu_ops) return -ENODEV; /* Not a iommu client device */ - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); iommu_device_link(&data->iommu, dev); group = iommu_group_get_for_dev(dev); @@ -469,7 +469,7 @@ static void mtk_iommu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &mtk_iommu_ops) return; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); iommu_device_unlink(&data->iommu, dev); iommu_group_remove_device(dev); @@ -496,7 +496,6 @@ static struct iommu_group *mtk_iommu_device_group(struct device *dev) static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct platform_device *m4updev; if (args->args_count != 1) { @@ -505,13 +504,13 @@ static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) return -EINVAL; } - if (!fwspec->iommu_priv) { + if (!dev_iommu_priv_get(dev)) { /* Get the m4u device */ m4updev = of_find_device_by_node(args->np); if (WARN_ON(!m4updev)) return -EINVAL; - fwspec->iommu_priv = platform_get_drvdata(m4updev); + dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); } return iommu_fwspec_add_ids(dev, args->args, 1); diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index e93b94ecac45..a31be05601c9 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c @@ -263,8 +263,8 @@ static void mtk_iommu_domain_free(struct iommu_domain *domain) static int mtk_iommu_attach_device(struct iommu_domain *domain, struct device *dev) { + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); struct mtk_iommu_domain *dom = to_mtk_domain(domain); - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; int ret; if (!data) @@ -286,7 +286,7 @@ static int mtk_iommu_attach_device(struct iommu_domain *domain, static void mtk_iommu_detach_device(struct iommu_domain *domain, struct device *dev) { - struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv; + struct mtk_iommu_data *data = dev_iommu_priv_get(dev); if (!data) return; @@ -387,20 +387,20 @@ static int mtk_iommu_create_mapping(struct device *dev, return -EINVAL; } - if (!fwspec->iommu_priv) { + if (!dev_iommu_priv_get(dev)) { /* Get the m4u device */ m4updev = of_find_device_by_node(args->np); if (WARN_ON(!m4updev)) return -EINVAL; - fwspec->iommu_priv = platform_get_drvdata(m4updev); + dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); } ret = iommu_fwspec_add_ids(dev, args->args, 1); if (ret) return ret; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); m4udev = data->dev; mtk_mapping = m4udev->archdata.iommu; if (!mtk_mapping) { @@ -459,7 +459,7 @@ static int mtk_iommu_add_device(struct device *dev) if (err) return err; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); mtk_mapping = data->dev->archdata.iommu; err = arm_iommu_attach_device(dev, mtk_mapping); if (err) { @@ -478,7 +478,7 @@ static void mtk_iommu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &mtk_iommu_ops) return; - data = fwspec->iommu_priv; + data = dev_iommu_priv_get(dev); iommu_device_unlink(&data->iommu, dev); iommu_group_remove_device(dev); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index be551cc34be4..887fefcb03b4 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -167,7 +167,7 @@ static int omap2_iommu_enable(struct omap_iommu *obj) { u32 l, pa; - if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K)) + if (!obj->iopgd || !IS_ALIGNED((unsigned long)obj->iopgd, SZ_16K)) return -EINVAL; pa = virt_to_phys(obj->iopgd); @@ -434,7 +434,7 @@ static void flush_iotlb_page(struct omap_iommu *obj, u32 da) bytes = iopgsz_to_bytes(cr.cam & 3); if ((start <= da) && (da < start + bytes)) { - dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n", + dev_dbg(obj->dev, "%s: %08x<=%08x(%zx)\n", __func__, start, da, bytes); iotlb_load_cr(obj, &cr); iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); @@ -1352,11 +1352,11 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, omap_pgsz = bytes_to_iopgsz(bytes); if (omap_pgsz < 0) { - dev_err(dev, "invalid size to map: %d\n", bytes); + dev_err(dev, "invalid size to map: %zu\n", bytes); return -EINVAL; } - dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes); + dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%zx\n", da, &pa, bytes); iotlb_init_entry(&e, da, pa, omap_pgsz); @@ -1393,7 +1393,7 @@ static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, size_t bytes = 0; int i; - dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size); + dev_dbg(dev, "unmapping da 0x%lx size %zu\n", da, size); iommu = omap_domain->iommus; for (i = 0; i < omap_domain->num_iommus; i++, iommu++) { diff --git a/drivers/iommu/omap-iopgtable.h b/drivers/iommu/omap-iopgtable.h index 1a4adb59a859..51d74002cc30 100644 --- a/drivers/iommu/omap-iopgtable.h +++ b/drivers/iommu/omap-iopgtable.h @@ -63,7 +63,8 @@ * * va to pa translation */ -static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) +static inline phys_addr_t omap_iommu_translate(unsigned long d, dma_addr_t va, + dma_addr_t mask) { return (d & mask) | (va & (~mask)); } diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c index 4328da0b0a9f..0e2a96467767 100644 --- a/drivers/iommu/qcom_iommu.c +++ b/drivers/iommu/qcom_iommu.c @@ -48,7 +48,7 @@ struct qcom_iommu_dev { void __iomem *local_base; u32 sec_id; u8 num_ctxs; - struct qcom_iommu_ctx *ctxs[0]; /* indexed by asid-1 */ + struct qcom_iommu_ctx *ctxs[]; /* indexed by asid-1 */ }; struct qcom_iommu_ctx { @@ -74,16 +74,19 @@ static struct qcom_iommu_domain *to_qcom_iommu_domain(struct iommu_domain *dom) static const struct iommu_ops qcom_iommu_ops; -static struct qcom_iommu_dev * to_iommu(struct iommu_fwspec *fwspec) +static struct qcom_iommu_dev * to_iommu(struct device *dev) { + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + if (!fwspec || fwspec->ops != &qcom_iommu_ops) return NULL; - return fwspec->iommu_priv; + + return dev_iommu_priv_get(dev); } -static struct qcom_iommu_ctx * to_ctx(struct iommu_fwspec *fwspec, unsigned asid) +static struct qcom_iommu_ctx * to_ctx(struct device *dev, unsigned asid) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return NULL; return qcom_iommu->ctxs[asid - 1]; @@ -115,11 +118,14 @@ iommu_readq(struct qcom_iommu_ctx *ctx, unsigned reg) static void qcom_iommu_tlb_sync(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct iommu_fwspec *fwspec; + struct device *dev = cookie; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); unsigned int val, ret; iommu_writel(ctx, ARM_SMMU_CB_TLBSYNC, 0); @@ -133,11 +139,14 @@ static void qcom_iommu_tlb_sync(void *cookie) static void qcom_iommu_tlb_inv_context(void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); iommu_writel(ctx, ARM_SMMU_CB_S1_TLBIASID, ctx->asid); } @@ -147,13 +156,16 @@ static void qcom_iommu_tlb_inv_context(void *cookie) static void qcom_iommu_tlb_inv_range_nosync(unsigned long iova, size_t size, size_t granule, bool leaf, void *cookie) { - struct iommu_fwspec *fwspec = cookie; + struct device *dev = cookie; + struct iommu_fwspec *fwspec; unsigned i, reg; reg = leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; + fwspec = dev_iommu_fwspec_get(dev); + for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); size_t s = size; iova = (iova >> 12) << 12; @@ -222,9 +234,10 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) static int qcom_iommu_init_domain(struct iommu_domain *domain, struct qcom_iommu_dev *qcom_iommu, - struct iommu_fwspec *fwspec) + struct device *dev) { struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_cfg pgtbl_cfg; int i, ret = 0; @@ -243,7 +256,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, }; qcom_domain->iommu = qcom_iommu; - pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, fwspec); + pgtbl_ops = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &pgtbl_cfg, dev); if (!pgtbl_ops) { dev_err(qcom_iommu->dev, "failed to allocate pagetable ops\n"); ret = -ENOMEM; @@ -256,7 +269,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, domain->geometry.force_aperture = true; for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); if (!ctx->secure_init) { ret = qcom_scm_restore_sec_cfg(qcom_iommu->sec_id, ctx->asid); @@ -363,8 +376,7 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain) static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); int ret; @@ -375,7 +387,7 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev /* Ensure that the domain is finalized */ pm_runtime_get_sync(qcom_iommu->dev); - ret = qcom_iommu_init_domain(domain, qcom_iommu, fwspec); + ret = qcom_iommu_init_domain(domain, qcom_iommu, dev); pm_runtime_put_sync(qcom_iommu->dev); if (ret < 0) return ret; @@ -397,9 +409,9 @@ static int qcom_iommu_attach_dev(struct iommu_domain *domain, struct device *dev static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *dev) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct qcom_iommu_dev *qcom_iommu = to_iommu(fwspec); struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain); + struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); unsigned i; if (WARN_ON(!qcom_domain->iommu)) @@ -407,7 +419,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de pm_runtime_get_sync(qcom_iommu->dev); for (i = 0; i < fwspec->num_ids; i++) { - struct qcom_iommu_ctx *ctx = to_ctx(fwspec, fwspec->ids[i]); + struct qcom_iommu_ctx *ctx = to_ctx(dev, fwspec->ids[i]); /* Disable the context bank: */ iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); @@ -514,7 +526,7 @@ static bool qcom_iommu_capable(enum iommu_cap cap) static int qcom_iommu_add_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); struct iommu_group *group; struct device_link *link; @@ -545,7 +557,7 @@ static int qcom_iommu_add_device(struct device *dev) static void qcom_iommu_remove_device(struct device *dev) { - struct qcom_iommu_dev *qcom_iommu = to_iommu(dev_iommu_fwspec_get(dev)); + struct qcom_iommu_dev *qcom_iommu = to_iommu(dev); if (!qcom_iommu) return; @@ -557,7 +569,6 @@ static void qcom_iommu_remove_device(struct device *dev) static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) { - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct qcom_iommu_dev *qcom_iommu; struct platform_device *iommu_pdev; unsigned asid = args->args[0]; @@ -583,14 +594,14 @@ static int qcom_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) WARN_ON(asid > qcom_iommu->num_ctxs)) return -EINVAL; - if (!fwspec->iommu_priv) { - fwspec->iommu_priv = qcom_iommu; + if (!dev_iommu_priv_get(dev)) { + dev_iommu_priv_set(dev, qcom_iommu); } else { /* make sure devices iommus dt node isn't referring to * multiple different iommu devices. Multiple context * banks are ok, but multiple devices are not: */ - if (WARN_ON(qcom_iommu != fwspec->iommu_priv)) + if (WARN_ON(qcom_iommu != dev_iommu_priv_get(dev))) return -EINVAL; } diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c index 3fb7ba72507d..db6559e8336f 100644 --- a/drivers/iommu/tegra-gart.c +++ b/drivers/iommu/tegra-gart.c @@ -247,7 +247,7 @@ static int gart_iommu_add_device(struct device *dev) { struct iommu_group *group; - if (!dev->iommu_fwspec) + if (!dev_iommu_fwspec_get(dev)) return -ENODEV; group = iommu_group_get_for_dev(dev); diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index cce329d71fba..d5cac4f46ca5 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -466,7 +466,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) struct virtio_iommu_req_probe *probe; struct virtio_iommu_probe_property *prop; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct viommu_endpoint *vdev = fwspec->iommu_priv; + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); if (!fwspec->num_ids) return -EINVAL; @@ -607,24 +607,36 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type) return &vdomain->domain; } -static int viommu_domain_finalise(struct viommu_dev *viommu, +static int viommu_domain_finalise(struct viommu_endpoint *vdev, struct iommu_domain *domain) { int ret; + unsigned long viommu_page_size; + struct viommu_dev *viommu = vdev->viommu; struct viommu_domain *vdomain = to_viommu_domain(domain); - vdomain->viommu = viommu; - vdomain->map_flags = viommu->map_flags; + viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap); + if (viommu_page_size > PAGE_SIZE) { + dev_err(vdev->dev, + "granule 0x%lx larger than system page size 0x%lx\n", + viommu_page_size, PAGE_SIZE); + return -EINVAL; + } + + ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, + viommu->last_domain, GFP_KERNEL); + if (ret < 0) + return ret; + + vdomain->id = (unsigned int)ret; domain->pgsize_bitmap = viommu->pgsize_bitmap; domain->geometry = viommu->geometry; - ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, - viommu->last_domain, GFP_KERNEL); - if (ret >= 0) - vdomain->id = (unsigned int)ret; + vdomain->map_flags = viommu->map_flags; + vdomain->viommu = viommu; - return ret > 0 ? 0 : ret; + return 0; } static void viommu_domain_free(struct iommu_domain *domain) @@ -648,7 +660,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) int ret = 0; struct virtio_iommu_req_attach req; struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct viommu_endpoint *vdev = fwspec->iommu_priv; + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); struct viommu_domain *vdomain = to_viommu_domain(domain); mutex_lock(&vdomain->mutex); @@ -657,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) * Properly initialize the domain now that we know which viommu * owns it. */ - ret = viommu_domain_finalise(vdev->viommu, domain); + ret = viommu_domain_finalise(vdev, domain); } else if (vdomain->viommu != vdev->viommu) { dev_err(dev, "cannot attach to foreign vIOMMU\n"); ret = -EXDEV; @@ -807,8 +819,7 @@ static void viommu_iotlb_sync(struct iommu_domain *domain, static void viommu_get_resv_regions(struct device *dev, struct list_head *head) { struct iommu_resv_region *entry, *new_entry, *msi = NULL; - struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); - struct viommu_endpoint *vdev = fwspec->iommu_priv; + struct viommu_endpoint *vdev = dev_iommu_priv_get(dev); int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; list_for_each_entry(entry, &vdev->resv_regions, list) { @@ -876,7 +887,7 @@ static int viommu_add_device(struct device *dev) vdev->dev = dev; vdev->viommu = viommu; INIT_LIST_HEAD(&vdev->resv_regions); - fwspec->iommu_priv = vdev; + dev_iommu_priv_set(dev, vdev); if (viommu->probe_size) { /* Get additional information for this endpoint */ @@ -920,7 +931,7 @@ static void viommu_remove_device(struct device *dev) if (!fwspec || fwspec->ops != &viommu_ops) return; - vdev = fwspec->iommu_priv; + vdev = dev_iommu_priv_get(dev); iommu_group_remove_device(dev); iommu_device_unlink(&vdev->viommu->iommu, dev); @@ -1082,7 +1093,6 @@ static int viommu_probe(struct virtio_device *vdev) #ifdef CONFIG_PCI if (pci_bus_type.iommu_ops != &viommu_ops) { - pci_request_acs(); ret = bus_set_iommu(&pci_bus_type, &viommu_ops); if (ret) goto err_unregister; diff --git a/drivers/irqchip/irq-xilinx-intc.c b/drivers/irqchip/irq-xilinx-intc.c index 7f811fe5bf69..1d3d273309bd 100644 --- a/drivers/irqchip/irq-xilinx-intc.c +++ b/drivers/irqchip/irq-xilinx-intc.c @@ -124,6 +124,20 @@ static unsigned int xintc_get_irq_local(struct xintc_irq_chip *irqc) return irq; } +unsigned int xintc_get_irq(void) +{ + unsigned int irq = -1; + u32 hwirq; + + hwirq = xintc_read(primary_intc, IVR); + if (hwirq != -1U) + irq = irq_find_mapping(primary_intc->root_domain, hwirq); + + pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq); + + return irq; +} + static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { struct xintc_irq_chip *irqc = d->host_data; @@ -163,25 +177,6 @@ static void xil_intc_irq_handler(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void xil_intc_handle_irq(struct pt_regs *regs) -{ - u32 hwirq; - struct xintc_irq_chip *irqc = primary_intc; - - do { - hwirq = xintc_read(irqc, IVR); - if (likely(hwirq != -1U)) { - int ret; - - ret = handle_domain_irq(irqc->root_domain, hwirq, regs); - WARN_ONCE(ret, "Unhandled HWIRQ %d\n", hwirq); - continue; - } - - break; - } while (1); -} - static int __init xilinx_intc_of_init(struct device_node *intc, struct device_node *parent) { @@ -250,7 +245,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc, } } else { primary_intc = irqc; - set_handle_irq(xil_intc_handle_irq); + irq_set_default_host(primary_intc->root_domain); } return 0; diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index d82f1dea3711..c664d84e1667 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -846,6 +846,17 @@ config LEDS_TPS6105X It is a single boost converter primarily for white LEDs and audio amplifiers. +config LEDS_IP30 + tristate "LED support for SGI Octane machines" + depends on LEDS_CLASS + depends on SGI_MFD_IOC3 + help + This option enables support for the Red and White LEDs of + SGI Octane machines. + + To compile this driver as a module, choose M here: the module + will be called leds-ip30. + comment "LED Triggers" source "drivers/leds/trigger/Kconfig" diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index d7e1107753fb..45235d5fb218 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -6,91 +6,92 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o obj-$(CONFIG_LEDS_CLASS_FLASH) += led-class-flash.o obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o -# LED Platform Drivers +# LED Platform Drivers (keep this sorted, M-| sort) obj-$(CONFIG_LEDS_88PM860X) += leds-88pm860x.o obj-$(CONFIG_LEDS_AAT1290) += leds-aat1290.o +obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o +obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o obj-$(CONFIG_LEDS_APU) += leds-apu.o obj-$(CONFIG_LEDS_AS3645A) += leds-as3645a.o -obj-$(CONFIG_LEDS_AN30259A) += leds-an30259a.o +obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o obj-$(CONFIG_LEDS_BCM6328) += leds-bcm6328.o obj-$(CONFIG_LEDS_BCM6358) += leds-bcm6358.o obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o +obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o +obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o +obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o +obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o obj-$(CONFIG_LEDS_CPCAP) += leds-cpcap.o -obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o +obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o +obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o +obj-$(CONFIG_LEDS_FSG) += leds-fsg.o +obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o +obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o +obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o +obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o +obj-$(CONFIG_LEDS_IP30) += leds-ip30.o +obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o +obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o +obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o +obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o obj-$(CONFIG_LEDS_LM3532) += leds-lm3532.o obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o +obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o +obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o +obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o obj-$(CONFIG_LEDS_LM3642) += leds-lm3642.o -obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o -obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o -obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o -obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o -obj-$(CONFIG_LEDS_COBALT_QUBE) += leds-cobalt-qube.o -obj-$(CONFIG_LEDS_COBALT_RAQ) += leds-cobalt-raq.o -obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o -obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o -obj-$(CONFIG_LEDS_GPIO_REGISTER) += leds-gpio-register.o -obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o +obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o +obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o +obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o obj-$(CONFIG_LEDS_LP3952) += leds-lp3952.o -obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o obj-$(CONFIG_LEDS_LP5562) += leds-lp5562.o +obj-$(CONFIG_LEDS_LP55XX_COMMON) += leds-lp55xx-common.o obj-$(CONFIG_LEDS_LP8501) += leds-lp8501.o obj-$(CONFIG_LEDS_LP8788) += leds-lp8788.o obj-$(CONFIG_LEDS_LP8860) += leds-lp8860.o -obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o -obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o -obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o -obj-$(CONFIG_LEDS_IPAQ_MICRO) += leds-ipaq-micro.o -obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o -obj-$(CONFIG_LEDS_OT200) += leds-ot200.o -obj-$(CONFIG_LEDS_FSG) += leds-fsg.o -obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o -obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o -obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o -obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o -obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o -obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o -obj-$(CONFIG_LEDS_PWM) += leds-pwm.o -obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o -obj-$(CONFIG_LEDS_INTEL_SS4200) += leds-ss4200.o obj-$(CONFIG_LEDS_LT3593) += leds-lt3593.o -obj-$(CONFIG_LEDS_ADP5520) += leds-adp5520.o -obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o -obj-$(CONFIG_LEDS_NS2) += leds-ns2.o -obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o -obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o obj-$(CONFIG_LEDS_MAX77650) += leds-max77650.o obj-$(CONFIG_LEDS_MAX77693) += leds-max77693.o obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o -obj-$(CONFIG_LEDS_LM355x) += leds-lm355x.o -obj-$(CONFIG_LEDS_BLINKM) += leds-blinkm.o -obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o +obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o obj-$(CONFIG_LEDS_MENF21BMC) += leds-menf21bmc.o -obj-$(CONFIG_LEDS_KTD2692) += leds-ktd2692.o -obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o -obj-$(CONFIG_LEDS_IS31FL319X) += leds-is31fl319x.o -obj-$(CONFIG_LEDS_IS31FL32XX) += leds-is31fl32xx.o -obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o +obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o obj-$(CONFIG_LEDS_MLXCPLD) += leds-mlxcpld.o obj-$(CONFIG_LEDS_MLXREG) += leds-mlxreg.o -obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o -obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o obj-$(CONFIG_LEDS_MT6323) += leds-mt6323.o -obj-$(CONFIG_LEDS_LM3692X) += leds-lm3692x.o +obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o +obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o +obj-$(CONFIG_LEDS_NIC78BX) += leds-nic78bx.o +obj-$(CONFIG_LEDS_NS2) += leds-ns2.o +obj-$(CONFIG_LEDS_OT200) += leds-ot200.o +obj-$(CONFIG_LEDS_PCA9532) += leds-pca9532.o +obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o +obj-$(CONFIG_LEDS_PCA963X) += leds-pca963x.o +obj-$(CONFIG_LEDS_PM8058) += leds-pm8058.o +obj-$(CONFIG_LEDS_POWERNV) += leds-powernv.o +obj-$(CONFIG_LEDS_PWM) += leds-pwm.o +obj-$(CONFIG_LEDS_REGULATOR) += leds-regulator.o +obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o obj-$(CONFIG_LEDS_SC27XX_BLTC) += leds-sc27xx-bltc.o -obj-$(CONFIG_LEDS_LM3601X) += leds-lm3601x.o +obj-$(CONFIG_LEDS_SUNFIRE) += leds-sunfire.o +obj-$(CONFIG_LEDS_SYSCON) += leds-syscon.o +obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o obj-$(CONFIG_LEDS_TI_LMU_COMMON) += leds-ti-lmu-common.o -obj-$(CONFIG_LEDS_LM3697) += leds-lm3697.o -obj-$(CONFIG_LEDS_LM36274) += leds-lm36274.o +obj-$(CONFIG_LEDS_TLC591XX) += leds-tlc591xx.o obj-$(CONFIG_LEDS_TPS6105X) += leds-tps6105x.o +obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o +obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o +obj-$(CONFIG_LEDS_WRAP) += leds-wrap.o # LED SPI Drivers obj-$(CONFIG_LEDS_CR0014114) += leds-cr0014114.o obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o obj-$(CONFIG_LEDS_EL15203000) += leds-el15203000.o +obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o # LED Userspace Drivers obj-$(CONFIG_LEDS_USER) += uleds.o diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 1fc40e8af75e..3363a6551a70 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c @@ -376,7 +376,7 @@ int led_classdev_register_ext(struct device *parent, if (ret) dev_warn(parent, "Led %s renamed to %s due to name collision", - led_cdev->name, dev_name(led_cdev->dev)); + proposed_name, dev_name(led_cdev->dev)); if (led_cdev->flags & LED_BRIGHT_HW_CHANGED) { ret = led_add_brightness_hw_changed(led_cdev); diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c index bd61a823d0ca..8bbaef5a2986 100644 --- a/drivers/leds/leds-bd2802.c +++ b/drivers/leds/leds-bd2802.c @@ -660,7 +660,6 @@ static int bd2802_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct bd2802_led *led; - struct bd2802_led_platform_data *pdata; int ret, i; led = devm_kzalloc(&client->dev, sizeof(struct bd2802_led), GFP_KERNEL); @@ -668,7 +667,6 @@ static int bd2802_probe(struct i2c_client *client, return -ENOMEM; led->client = client; - pdata = led->pdata = dev_get_platdata(&client->dev); i2c_set_clientdata(client, led); /* diff --git a/drivers/leds/leds-ip30.c b/drivers/leds/leds-ip30.c new file mode 100644 index 000000000000..d4ec7361c616 --- /dev/null +++ b/drivers/leds/leds-ip30.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * LED Driver for SGI Octane machines + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/leds.h> + +#define IP30_LED_SYSTEM 0 +#define IP30_LED_FAULT 1 + +struct ip30_led { + struct led_classdev cdev; + u32 __iomem *reg; +}; + +static void ip30led_set(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct ip30_led *led = container_of(led_cdev, struct ip30_led, cdev); + + writel(value, led->reg); +} + +static int ip30led_create(struct platform_device *pdev, int num) +{ + struct resource *res; + struct ip30_led *data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, num); + if (!res) + return -EBUSY; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->reg)) + return PTR_ERR(data->reg); + + + switch (num) { + case IP30_LED_SYSTEM: + data->cdev.name = "white:power"; + break; + case IP30_LED_FAULT: + data->cdev.name = "red:fault"; + break; + default: + return -EINVAL; + } + + data->cdev.brightness = readl(data->reg); + data->cdev.max_brightness = 1; + data->cdev.brightness_set = ip30led_set; + + return devm_led_classdev_register(&pdev->dev, &data->cdev); +} + +static int ip30led_probe(struct platform_device *pdev) +{ + int ret; + + ret = ip30led_create(pdev, IP30_LED_SYSTEM); + if (ret < 0) + return ret; + + return ip30led_create(pdev, IP30_LED_FAULT); +} + +static struct platform_driver ip30led_driver = { + .probe = ip30led_probe, + .driver = { + .name = "ip30-leds", + }, +}; + +module_platform_driver(ip30led_driver); + +MODULE_AUTHOR("Thomas Bogendoerfer <tbogendoerfer@suse.de>"); +MODULE_DESCRIPTION("SGI Octane LED driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:ip30-leds"); diff --git a/drivers/leds/leds-is31fl32xx.c b/drivers/leds/leds-is31fl32xx.c index 6f29b8943913..cd768f991da1 100644 --- a/drivers/leds/leds-is31fl32xx.c +++ b/drivers/leds/leds-is31fl32xx.c @@ -44,7 +44,7 @@ struct is31fl32xx_priv { const struct is31fl32xx_chipdef *cdef; struct i2c_client *client; unsigned int num_leds; - struct is31fl32xx_led_data leds[0]; + struct is31fl32xx_led_data leds[]; }; /** diff --git a/drivers/leds/leds-lm3532.c b/drivers/leds/leds-lm3532.c index 188a57da981a..aa9bf8cda673 100644 --- a/drivers/leds/leds-lm3532.c +++ b/drivers/leds/leds-lm3532.c @@ -140,7 +140,7 @@ struct lm3532_led { int ctrl_brt_pointer; int num_leds; int full_scale_current; - int enabled:1; + unsigned int enabled:1; u32 led_strings[LM3532_MAX_CONTROL_BANKS]; char label[LED_MAX_NAME_SIZE]; }; diff --git a/drivers/leds/leds-lm3697.c b/drivers/leds/leds-lm3697.c index b71711aff8a3..872d26f9706a 100644 --- a/drivers/leds/leds-lm3697.c +++ b/drivers/leds/leds-lm3697.c @@ -246,7 +246,7 @@ static int lm3697_probe_dt(struct lm3697 *priv) led->num_leds = fwnode_property_count_u32(child, "led-sources"); if (led->num_leds > LM3697_MAX_LED_STRINGS) { - dev_err(&priv->client->dev, "To many LED strings defined\n"); + dev_err(&priv->client->dev, "Too many LED strings defined\n"); continue; } diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c index 7c500dfdcfa3..538ca5755602 100644 --- a/drivers/leds/leds-ns2.c +++ b/drivers/leds/leds-ns2.c @@ -12,14 +12,38 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/leds.h> #include <linux/module.h> -#include <linux/platform_data/leds-kirkwood-ns2.h> #include <linux/of.h> -#include <linux/of_gpio.h> #include "leds.h" +enum ns2_led_modes { + NS_V2_LED_OFF, + NS_V2_LED_ON, + NS_V2_LED_SATA, +}; + +struct ns2_led_modval { + enum ns2_led_modes mode; + int cmd_level; + int slow_level; +}; + +struct ns2_led { + const char *name; + const char *default_trigger; + struct gpio_desc *cmd; + struct gpio_desc *slow; + int num_modes; + struct ns2_led_modval *modval; +}; + +struct ns2_led_platform_data { + int num_leds; + struct ns2_led *leds; +}; + /* * The Network Space v2 dual-GPIO LED is wired to a CPLD. Three different LED * modes are available: off, on and SATA activity blinking. The LED modes are @@ -29,8 +53,8 @@ struct ns2_led_data { struct led_classdev cdev; - unsigned int cmd; - unsigned int slow; + struct gpio_desc *cmd; + struct gpio_desc *slow; bool can_sleep; unsigned char sata; /* True when SATA mode active. */ rwlock_t rw_lock; /* Lock GPIOs. */ @@ -46,8 +70,8 @@ static int ns2_led_get_mode(struct ns2_led_data *led_dat, int cmd_level; int slow_level; - cmd_level = gpio_get_value_cansleep(led_dat->cmd); - slow_level = gpio_get_value_cansleep(led_dat->slow); + cmd_level = gpiod_get_value_cansleep(led_dat->cmd); + slow_level = gpiod_get_value_cansleep(led_dat->slow); for (i = 0; i < led_dat->num_modes; i++) { if (cmd_level == led_dat->modval[i].cmd_level && @@ -80,15 +104,15 @@ static void ns2_led_set_mode(struct ns2_led_data *led_dat, write_lock_irqsave(&led_dat->rw_lock, flags); if (!led_dat->can_sleep) { - gpio_set_value(led_dat->cmd, - led_dat->modval[i].cmd_level); - gpio_set_value(led_dat->slow, - led_dat->modval[i].slow_level); + gpiod_set_value(led_dat->cmd, + led_dat->modval[i].cmd_level); + gpiod_set_value(led_dat->slow, + led_dat->modval[i].slow_level); goto exit_unlock; } - gpio_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level); - gpio_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level); + gpiod_set_value_cansleep(led_dat->cmd, led_dat->modval[i].cmd_level); + gpiod_set_value_cansleep(led_dat->slow, led_dat->modval[i].slow_level); exit_unlock: write_unlock_irqrestore(&led_dat->rw_lock, flags); @@ -176,26 +200,6 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, int ret; enum ns2_led_modes mode; - ret = devm_gpio_request_one(&pdev->dev, template->cmd, - gpio_get_value_cansleep(template->cmd) ? - GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, - template->name); - if (ret) { - dev_err(&pdev->dev, "%s: failed to setup command GPIO\n", - template->name); - return ret; - } - - ret = devm_gpio_request_one(&pdev->dev, template->slow, - gpio_get_value_cansleep(template->slow) ? - GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, - template->name); - if (ret) { - dev_err(&pdev->dev, "%s: failed to setup slow GPIO\n", - template->name); - return ret; - } - rwlock_init(&led_dat->rw_lock); led_dat->cdev.name = template->name; @@ -205,8 +209,8 @@ create_ns2_led(struct platform_device *pdev, struct ns2_led_data *led_dat, led_dat->cdev.groups = ns2_led_groups; led_dat->cmd = template->cmd; led_dat->slow = template->slow; - led_dat->can_sleep = gpio_cansleep(led_dat->cmd) | - gpio_cansleep(led_dat->slow); + led_dat->can_sleep = gpiod_cansleep(led_dat->cmd) | + gpiod_cansleep(led_dat->slow); if (led_dat->can_sleep) led_dat->cdev.brightness_set_blocking = ns2_led_set_blocking; else @@ -261,17 +265,26 @@ ns2_leds_get_of_pdata(struct device *dev, struct ns2_led_platform_data *pdata) const char *string; int i, num_modes; struct ns2_led_modval *modval; + struct gpio_desc *gd; - ret = of_get_named_gpio(child, "cmd-gpio", 0); - if (ret < 0) - goto err_node_put; - led->cmd = ret; - ret = of_get_named_gpio(child, "slow-gpio", 0); - if (ret < 0) - goto err_node_put; - led->slow = ret; ret = of_property_read_string(child, "label", &string); led->name = (ret == 0) ? string : child->name; + + gd = gpiod_get_from_of_node(child, "cmd-gpio", 0, + GPIOD_ASIS, led->name); + if (IS_ERR(gd)) { + ret = PTR_ERR(gd); + goto err_node_put; + } + led->cmd = gd; + gd = gpiod_get_from_of_node(child, "slow-gpio", 0, + GPIOD_ASIS, led->name); + if (IS_ERR(gd)) { + ret = PTR_ERR(gd); + goto err_node_put; + } + led->slow = gd; + ret = of_property_read_string(child, "linux,default-trigger", &string); if (ret == 0) diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c index 8b6965a563e9..6c8a724aac51 100644 --- a/drivers/leds/leds-pwm.c +++ b/drivers/leds/leds-pwm.c @@ -16,60 +16,55 @@ #include <linux/leds.h> #include <linux/err.h> #include <linux/pwm.h> -#include <linux/leds_pwm.h> #include <linux/slab.h> +struct led_pwm { + const char *name; + const char *default_trigger; + u8 active_low; + unsigned int max_brightness; +}; + +struct led_pwm_platform_data { + int num_leds; + struct led_pwm *leds; +}; + struct led_pwm_data { struct led_classdev cdev; struct pwm_device *pwm; + struct pwm_state pwmstate; unsigned int active_low; - unsigned int period; - int duty; }; struct led_pwm_priv { int num_leds; - struct led_pwm_data leds[0]; + struct led_pwm_data leds[]; }; -static void __led_pwm_set(struct led_pwm_data *led_dat) -{ - int new_duty = led_dat->duty; - - pwm_config(led_dat->pwm, new_duty, led_dat->period); - - if (new_duty == 0) - pwm_disable(led_dat->pwm); - else - pwm_enable(led_dat->pwm); -} - static int led_pwm_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct led_pwm_data *led_dat = container_of(led_cdev, struct led_pwm_data, cdev); unsigned int max = led_dat->cdev.max_brightness; - unsigned long long duty = led_dat->period; + unsigned long long duty = led_dat->pwmstate.period; duty *= brightness; do_div(duty, max); if (led_dat->active_low) - duty = led_dat->period - duty; - - led_dat->duty = duty; - - __led_pwm_set(led_dat); + duty = led_dat->pwmstate.period - duty; - return 0; + led_dat->pwmstate.duty_cycle = duty; + led_dat->pwmstate.enabled = duty > 0; + return pwm_apply_state(led_dat->pwm, &led_dat->pwmstate); } static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, struct led_pwm *led, struct fwnode_handle *fwnode) { struct led_pwm_data *led_data = &priv->leds[priv->num_leds]; - struct pwm_args pargs; int ret; led_data->active_low = led->active_low; @@ -93,17 +88,7 @@ static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, led_data->cdev.brightness_set_blocking = led_pwm_set; - /* - * FIXME: pwm_apply_args() should be removed when switching to the - * atomic PWM API. - */ - pwm_apply_args(led_data->pwm); - - pwm_get_args(led_data->pwm, &pargs); - - led_data->period = pargs.period; - if (!led_data->period && (led->pwm_period_ns > 0)) - led_data->period = led->pwm_period_ns; + pwm_init_state(led_data->pwm, &led_data->pwmstate); ret = devm_led_classdev_register(dev, &led_data->cdev); if (ret == 0) { diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 8d07fdf63a47..e1db43446327 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -201,10 +201,27 @@ static size_t linear_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); } +static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, + size_t nr_pages) +{ + int ret; + struct linear_c *lc = ti->private; + struct block_device *bdev = lc->dev->bdev; + struct dax_device *dax_dev = lc->dev->dax_dev; + sector_t dev_sector, sector = pgoff * PAGE_SECTORS; + + dev_sector = linear_map_sector(ti, sector); + ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); + if (ret) + return ret; + return dax_zero_page_range(dax_dev, pgoff, nr_pages); +} + #else #define linear_dax_direct_access NULL #define linear_dax_copy_from_iter NULL #define linear_dax_copy_to_iter NULL +#define linear_dax_zero_page_range NULL #endif static struct target_type linear_target = { @@ -226,6 +243,7 @@ static struct target_type linear_target = { .direct_access = linear_dax_direct_access, .dax_copy_from_iter = linear_dax_copy_from_iter, .dax_copy_to_iter = linear_dax_copy_to_iter, + .dax_zero_page_range = linear_dax_zero_page_range, }; int __init dm_linear_init(void) diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 99721c76225d..8ea20b56b4d6 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -994,10 +994,26 @@ static size_t log_writes_dax_copy_to_iter(struct dm_target *ti, return dax_copy_to_iter(lc->dev->dax_dev, pgoff, addr, bytes, i); } +static int log_writes_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, + size_t nr_pages) +{ + int ret; + struct log_writes_c *lc = ti->private; + sector_t sector = pgoff * PAGE_SECTORS; + + ret = bdev_dax_pgoff(lc->dev->bdev, sector, nr_pages << PAGE_SHIFT, + &pgoff); + if (ret) + return ret; + return dax_zero_page_range(lc->dev->dax_dev, pgoff, + nr_pages << PAGE_SHIFT); +} + #else #define log_writes_dax_direct_access NULL #define log_writes_dax_copy_from_iter NULL #define log_writes_dax_copy_to_iter NULL +#define log_writes_dax_zero_page_range NULL #endif static struct target_type log_writes_target = { @@ -1016,6 +1032,7 @@ static struct target_type log_writes_target = { .direct_access = log_writes_dax_direct_access, .dax_copy_from_iter = log_writes_dax_copy_from_iter, .dax_copy_to_iter = log_writes_dax_copy_to_iter, + .dax_zero_page_range = log_writes_dax_zero_page_range, }; static int __init dm_log_writes_init(void) diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 63bbcc20f49a..fa813c0f993d 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c @@ -360,10 +360,32 @@ static size_t stripe_dax_copy_to_iter(struct dm_target *ti, pgoff_t pgoff, return dax_copy_to_iter(dax_dev, pgoff, addr, bytes, i); } +static int stripe_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, + size_t nr_pages) +{ + int ret; + sector_t dev_sector, sector = pgoff * PAGE_SECTORS; + struct stripe_c *sc = ti->private; + struct dax_device *dax_dev; + struct block_device *bdev; + uint32_t stripe; + + stripe_map_sector(sc, sector, &stripe, &dev_sector); + dev_sector += sc->stripe[stripe].physical_start; + dax_dev = sc->stripe[stripe].dev->dax_dev; + bdev = sc->stripe[stripe].dev->bdev; + + ret = bdev_dax_pgoff(bdev, dev_sector, nr_pages << PAGE_SHIFT, &pgoff); + if (ret) + return ret; + return dax_zero_page_range(dax_dev, pgoff, nr_pages); +} + #else #define stripe_dax_direct_access NULL #define stripe_dax_copy_from_iter NULL #define stripe_dax_copy_to_iter NULL +#define stripe_dax_zero_page_range NULL #endif /* @@ -486,6 +508,7 @@ static struct target_type stripe_target = { .direct_access = stripe_dax_direct_access, .dax_copy_from_iter = stripe_dax_copy_from_iter, .dax_copy_to_iter = stripe_dax_copy_to_iter, + .dax_zero_page_range = stripe_dax_zero_page_range, }; int __init dm_stripe_init(void) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 21c0207e3207..db9e46114653 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1199,6 +1199,35 @@ static size_t dm_dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, return ret; } +static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages) +{ + struct mapped_device *md = dax_get_private(dax_dev); + sector_t sector = pgoff * PAGE_SECTORS; + struct dm_target *ti; + int ret = -EIO; + int srcu_idx; + + ti = dm_dax_get_live_target(md, sector, &srcu_idx); + + if (!ti) + goto out; + if (WARN_ON(!ti->type->dax_zero_page_range)) { + /* + * ->zero_page_range() is mandatory dax operation. If we are + * here, something is wrong. + */ + dm_put_live_table(md, srcu_idx); + goto out; + } + ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); + + out: + dm_put_live_table(md, srcu_idx); + + return ret; +} + /* * A target may call dm_accept_partial_bio only from the map routine. It is * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_RESET, @@ -1969,7 +1998,7 @@ static struct mapped_device *alloc_dev(int minor) if (IS_ENABLED(CONFIG_DAX_DRIVER)) { md->dax_dev = alloc_dax(md, md->disk->disk_name, &dm_dax_ops, 0); - if (!md->dax_dev) + if (IS_ERR(md->dax_dev)) goto bad; } @@ -3200,6 +3229,7 @@ static const struct dax_operations dm_dax_ops = { .dax_supported = dm_dax_supported, .copy_from_iter = dm_dax_copy_from_iter, .copy_to_iter = dm_dax_copy_to_iter, + .zero_page_range = dm_dax_zero_page_range, }; /* diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2b203290e7b9..0a59249198d3 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -642,6 +642,19 @@ config MFD_IPAQ_MICRO AT90LS8535 microcontroller flashed with a special iPAQ firmware using the custom protocol implemented in this driver. +config MFD_IQS62X + tristate "Azoteq IQS620A/621/622/624/625 core support" + depends on I2C + select MFD_CORE + select REGMAP_I2C + help + Say Y here if you want to build core support for the Azoteq IQS620A, + IQS621, IQS622, IQS624 and IQS625 multi-function sensors. Additional + options must be selected to enable device-specific functions. + + To compile this driver as a module, choose M here: the module will + be called iqs62x. + config MFD_JANZ_CMODIO tristate "Janz CMOD-IO PCI MODULbus Carrier Board" select MFD_CORE @@ -893,6 +906,7 @@ config MFD_CPCAP tristate "Support for Motorola CPCAP" depends on SPI depends on OF || COMPILE_TEST + select MFD_CORE select REGMAP_SPI select REGMAP_IRQ help @@ -1058,6 +1072,7 @@ config MFD_RN5T618 depends on OF select MFD_CORE select REGMAP_I2C + select REGMAP_IRQ help Say yes here to add support for the Ricoh RN5T567, RN5T618, RC5T619 PMIC. @@ -1201,7 +1216,7 @@ config AB8500_CORE chip. This connects to U8500 either on the SSP/SPI bus (deprecated since hardware version v1.0) or the I2C bus via PRCMU. It also adds the irq_chip parts for handling the Mixed Signal chip events. - This chip embeds various other multimedia funtionalities as well. + This chip embeds various other multimedia functionalities as well. config AB8500_DEBUG bool "Enable debug info via debugfs" @@ -1851,7 +1866,7 @@ config MFD_WM8994 has on board GPIO and regulator functionality which is supported via the relevant subsystems. This driver provides core support for the WM8994, in order to use the actual - functionaltiy of the device other drivers must be enabled. + functionality of the device other drivers must be enabled. config MFD_WM97xx tristate "Wolfson Microelectronics WM97xx" @@ -1864,7 +1879,7 @@ config MFD_WM97xx designed for smartphone applications. As well as audio functionality it has on board GPIO and a touchscreen functionality which is supported via the relevant subsystems. This driver provides core - support for the WM97xx, in order to use the actual functionaltiy of + support for the WM97xx, in order to use the actual functionality of the device other drivers must be enabled. config MFD_STW481X @@ -1957,7 +1972,7 @@ config MFD_STPMIC1 Support for ST Microelectronics STPMIC1 PMIC. STPMIC1 has power on key, watchdog and regulator functionalities which are supported via the relevant subsystems. This driver provides core support for the - STPMIC1. In order to use the actual functionaltiy of the device other + STPMIC1. In order to use the actual functionality of the device other drivers must be enabled. To compile this driver as a module, choose M here: the diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index b83f172545e1..f935d10cbf0f 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -226,6 +226,7 @@ obj-$(CONFIG_MFD_AS3711) += as3711.o obj-$(CONFIG_MFD_AS3722) += as3722.o obj-$(CONFIG_MFD_STW481X) += stw481x.o obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o +obj-$(CONFIG_MFD_IQS62X) += iqs62x.o obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o obj-$(CONFIG_MFD_HI655X_PMIC) += hi655x-pmic.o diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c index 78ee4b28fca2..a17cf759739d 100644 --- a/drivers/mfd/aat2870-core.c +++ b/drivers/mfd/aat2870-core.c @@ -221,7 +221,7 @@ static ssize_t aat2870_dump_reg(struct aat2870_data *aat2870, char *buf) count += sprintf(buf, "aat2870 registers\n"); for (addr = 0; addr < AAT2870_REG_NUM; addr++) { - count += sprintf(buf + count, "0x%02x: ", addr); + count += snprintf(buf + count, PAGE_SIZE - count, "0x%02x: ", addr); if (count >= PAGE_SIZE - 1) break; diff --git a/drivers/mfd/cros_ec_dev.c b/drivers/mfd/cros_ec_dev.c index 39e611695053..32c2b912b58b 100644 --- a/drivers/mfd/cros_ec_dev.c +++ b/drivers/mfd/cros_ec_dev.c @@ -211,7 +211,7 @@ static int ec_device_probe(struct platform_device *pdev) * explicitly added on platforms that don't have the PD notifier ACPI * device entry defined. */ - if (IS_ENABLED(CONFIG_OF)) { + if (IS_ENABLED(CONFIG_OF) && ec->ec_dev->dev->of_node) { if (cros_ec_check_features(ec, EC_FEATURE_USB_PD)) { retval = mfd_add_hotplug_devices(ec->dev, cros_usbpd_notify_cells, diff --git a/drivers/mfd/da9062-core.c b/drivers/mfd/da9062-core.c index 419c73533401..fc30726e2e27 100644 --- a/drivers/mfd/da9062-core.c +++ b/drivers/mfd/da9062-core.c @@ -21,6 +21,9 @@ #define DA9062_REG_EVENT_B_OFFSET 1 #define DA9062_REG_EVENT_C_OFFSET 2 +#define DA9062_IRQ_LOW 0 +#define DA9062_IRQ_HIGH 1 + static struct regmap_irq da9061_irqs[] = { /* EVENT A */ [DA9061_IRQ_ONKEY] = { @@ -369,6 +372,33 @@ static int da9062_get_device_type(struct da9062 *chip) return ret; } +static u32 da9062_configure_irq_type(struct da9062 *chip, int irq, u32 *trigger) +{ + u32 irq_type = 0; + struct irq_data *irq_data = irq_get_irq_data(irq); + + if (!irq_data) { + dev_err(chip->dev, "Invalid IRQ: %d\n", irq); + return -EINVAL; + } + *trigger = irqd_get_trigger_type(irq_data); + + switch (*trigger) { + case IRQ_TYPE_LEVEL_HIGH: + irq_type = DA9062_IRQ_HIGH; + break; + case IRQ_TYPE_LEVEL_LOW: + irq_type = DA9062_IRQ_LOW; + break; + default: + dev_warn(chip->dev, "Unsupported IRQ type: %d\n", *trigger); + return -EINVAL; + } + return regmap_update_bits(chip->regmap, DA9062AA_CONFIG_A, + DA9062AA_IRQ_TYPE_MASK, + irq_type << DA9062AA_IRQ_TYPE_SHIFT); +} + static const struct regmap_range da9061_aa_readable_ranges[] = { regmap_reg_range(DA9062AA_PAGE_CON, DA9062AA_STATUS_B), regmap_reg_range(DA9062AA_STATUS_D, DA9062AA_EVENT_C), @@ -388,6 +418,7 @@ static const struct regmap_range da9061_aa_readable_ranges[] = { regmap_reg_range(DA9062AA_VBUCK1_A, DA9062AA_VBUCK4_A), regmap_reg_range(DA9062AA_VBUCK3_A, DA9062AA_VBUCK3_A), regmap_reg_range(DA9062AA_VLDO1_A, DA9062AA_VLDO4_A), + regmap_reg_range(DA9062AA_CONFIG_A, DA9062AA_CONFIG_A), regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B), regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B), regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B), @@ -417,6 +448,7 @@ static const struct regmap_range da9061_aa_writeable_ranges[] = { regmap_reg_range(DA9062AA_VBUCK1_A, DA9062AA_VBUCK4_A), regmap_reg_range(DA9062AA_VBUCK3_A, DA9062AA_VBUCK3_A), regmap_reg_range(DA9062AA_VLDO1_A, DA9062AA_VLDO4_A), + regmap_reg_range(DA9062AA_CONFIG_A, DA9062AA_CONFIG_A), regmap_reg_range(DA9062AA_VBUCK1_B, DA9062AA_VBUCK4_B), regmap_reg_range(DA9062AA_VBUCK3_B, DA9062AA_VBUCK3_B), regmap_reg_range(DA9062AA_VLDO1_B, DA9062AA_VLDO4_B), @@ -596,6 +628,7 @@ static int da9062_i2c_probe(struct i2c_client *i2c, const struct regmap_irq_chip *irq_chip; const struct regmap_config *config; int cell_num; + u32 trigger_type = 0; int ret; chip = devm_kzalloc(&i2c->dev, sizeof(*chip), GFP_KERNEL); @@ -654,10 +687,15 @@ static int da9062_i2c_probe(struct i2c_client *i2c, if (ret) return ret; + ret = da9062_configure_irq_type(chip, i2c->irq, &trigger_type); + if (ret < 0) { + dev_err(chip->dev, "Failed to configure IRQ type\n"); + return ret; + } + ret = regmap_add_irq_chip(chip->regmap, i2c->irq, - IRQF_TRIGGER_LOW | IRQF_ONESHOT | IRQF_SHARED, - -1, irq_chip, - &chip->regmap_irq); + trigger_type | IRQF_SHARED | IRQF_ONESHOT, + -1, irq_chip, &chip->regmap_irq); if (ret) { dev_err(chip->dev, "Failed to request IRQ %d: %d\n", i2c->irq, ret); diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 7841c11411d0..39276fa626d2 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -90,6 +90,11 @@ struct dln2_mod_rx_slots { spinlock_t lock; }; +enum dln2_endpoint { + DLN2_EP_OUT = 0, + DLN2_EP_IN = 1, +}; + struct dln2_dev { struct usb_device *usb_dev; struct usb_interface *interface; @@ -640,35 +645,56 @@ static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp) return 0; } +enum { + DLN2_ACPI_MATCH_GPIO = 0, + DLN2_ACPI_MATCH_I2C = 1, + DLN2_ACPI_MATCH_SPI = 2, +}; + static struct dln2_platform_data dln2_pdata_gpio = { .handle = DLN2_HANDLE_GPIO, }; +static struct mfd_cell_acpi_match dln2_acpi_match_gpio = { + .adr = DLN2_ACPI_MATCH_GPIO, +}; + /* Only one I2C port seems to be supported on current hardware */ static struct dln2_platform_data dln2_pdata_i2c = { .handle = DLN2_HANDLE_I2C, .port = 0, }; +static struct mfd_cell_acpi_match dln2_acpi_match_i2c = { + .adr = DLN2_ACPI_MATCH_I2C, +}; + /* Only one SPI port supported */ static struct dln2_platform_data dln2_pdata_spi = { .handle = DLN2_HANDLE_SPI, .port = 0, }; +static struct mfd_cell_acpi_match dln2_acpi_match_spi = { + .adr = DLN2_ACPI_MATCH_SPI, +}; + static const struct mfd_cell dln2_devs[] = { { .name = "dln2-gpio", + .acpi_match = &dln2_acpi_match_gpio, .platform_data = &dln2_pdata_gpio, .pdata_size = sizeof(struct dln2_platform_data), }, { .name = "dln2-i2c", + .acpi_match = &dln2_acpi_match_i2c, .platform_data = &dln2_pdata_i2c, .pdata_size = sizeof(struct dln2_platform_data), }, { .name = "dln2-spi", + .acpi_match = &dln2_acpi_match_spi, .platform_data = &dln2_pdata_spi, .pdata_size = sizeof(struct dln2_platform_data), }, @@ -733,10 +759,10 @@ static int dln2_probe(struct usb_interface *interface, hostif->desc.bNumEndpoints < 2) return -ENODEV; - epin = &hostif->endpoint[0].desc; - epout = &hostif->endpoint[1].desc; + epout = &hostif->endpoint[DLN2_EP_OUT].desc; if (!usb_endpoint_is_bulk_out(epout)) return -ENODEV; + epin = &hostif->endpoint[DLN2_EP_IN].desc; if (!usb_endpoint_is_bulk_in(epin)) return -ENODEV; diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index c40a6c7d0cf8..7fc0c5d4edff 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c @@ -139,6 +139,11 @@ static const struct intel_lpss_platform_info cnl_i2c_info = { .properties = spt_i2c_properties, }; +static const struct intel_lpss_platform_info ehl_i2c_info = { + .clk_rate = 100000000, + .properties = bxt_i2c_properties, +}; + static const struct pci_device_id intel_lpss_pci_ids[] = { /* CML-LP */ { PCI_VDEVICE(INTEL, 0x02a8), (kernel_ulong_t)&spt_uart_info }, @@ -231,15 +236,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x4b2a), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x4b2b), (kernel_ulong_t)&bxt_info }, { PCI_VDEVICE(INTEL, 0x4b37), (kernel_ulong_t)&bxt_info }, - { PCI_VDEVICE(INTEL, 0x4b44), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x4b45), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x4b4b), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x4b4c), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b44), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b45), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b4b), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b4c), (kernel_ulong_t)&ehl_i2c_info }, { PCI_VDEVICE(INTEL, 0x4b4d), (kernel_ulong_t)&bxt_uart_info }, - { PCI_VDEVICE(INTEL, 0x4b78), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&bxt_i2c_info }, - { PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&bxt_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b78), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b79), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b7a), (kernel_ulong_t)&ehl_i2c_info }, + { PCI_VDEVICE(INTEL, 0x4b7b), (kernel_ulong_t)&ehl_i2c_info }, /* JSL */ { PCI_VDEVICE(INTEL, 0x4da8), (kernel_ulong_t)&spt_uart_info }, { PCI_VDEVICE(INTEL, 0x4da9), (kernel_ulong_t)&spt_uart_info }, @@ -347,6 +352,16 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { { PCI_VDEVICE(INTEL, 0xa36a), (kernel_ulong_t)&cnl_i2c_info }, { PCI_VDEVICE(INTEL, 0xa36b), (kernel_ulong_t)&cnl_i2c_info }, { PCI_VDEVICE(INTEL, 0xa37b), (kernel_ulong_t)&spt_info }, + /* CML-V */ + { PCI_VDEVICE(INTEL, 0xa3a7), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa3a8), (kernel_ulong_t)&spt_uart_info }, + { PCI_VDEVICE(INTEL, 0xa3a9), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0xa3aa), (kernel_ulong_t)&spt_info }, + { PCI_VDEVICE(INTEL, 0xa3e0), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa3e1), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa3e2), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa3e3), (kernel_ulong_t)&spt_i2c_info }, + { PCI_VDEVICE(INTEL, 0xa3e6), (kernel_ulong_t)&spt_uart_info }, { } }; MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); diff --git a/drivers/mfd/iqs62x.c b/drivers/mfd/iqs62x.c new file mode 100644 index 000000000000..af764bc87d7c --- /dev/null +++ b/drivers/mfd/iqs62x.c @@ -0,0 +1,1063 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Azoteq IQS620A/621/622/624/625 Multi-Function Sensors + * + * Copyright (C) 2019 Jeff LaBundy <jeff@labundy.com> + * + * These devices rely on application-specific register settings and calibration + * data developed in and exported from a suite of GUIs offered by the vendor. A + * separate tool converts the GUIs' ASCII-based output into a standard firmware + * file parsed by the driver. + * + * Link to datasheets and GUIs: https://www.azoteq.com/ + * + * Link to conversion tool: https://github.com/jlabundy/iqs62x-h2bin.git + */ + +#include <linux/completion.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/firmware.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/mfd/core.h> +#include <linux/mfd/iqs62x.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/of_device.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <asm/unaligned.h> + +#define IQS62X_PROD_NUM 0x00 + +#define IQS62X_SYS_FLAGS 0x10 +#define IQS62X_SYS_FLAGS_IN_ATI BIT(2) + +#define IQS620_HALL_FLAGS 0x16 +#define IQS621_HALL_FLAGS 0x19 +#define IQS622_HALL_FLAGS IQS621_HALL_FLAGS + +#define IQS624_INTERVAL_NUM 0x18 +#define IQS625_INTERVAL_NUM 0x12 + +#define IQS622_PROX_SETTINGS_4 0x48 +#define IQS620_PROX_SETTINGS_4 0x50 +#define IQS620_PROX_SETTINGS_4_SAR_EN BIT(7) + +#define IQS621_ALS_CAL_DIV_LUX 0x82 +#define IQS621_ALS_CAL_DIV_IR 0x83 + +#define IQS620_TEMP_CAL_MULT 0xC2 +#define IQS620_TEMP_CAL_DIV 0xC3 +#define IQS620_TEMP_CAL_OFFS 0xC4 + +#define IQS62X_SYS_SETTINGS 0xD0 +#define IQS62X_SYS_SETTINGS_SOFT_RESET BIT(7) +#define IQS62X_SYS_SETTINGS_ACK_RESET BIT(6) +#define IQS62X_SYS_SETTINGS_EVENT_MODE BIT(5) +#define IQS62X_SYS_SETTINGS_CLK_DIV BIT(4) +#define IQS62X_SYS_SETTINGS_REDO_ATI BIT(1) + +#define IQS62X_PWR_SETTINGS 0xD2 +#define IQS62X_PWR_SETTINGS_DIS_AUTO BIT(5) +#define IQS62X_PWR_SETTINGS_PWR_MODE_MASK (BIT(4) | BIT(3)) +#define IQS62X_PWR_SETTINGS_PWR_MODE_HALT (BIT(4) | BIT(3)) +#define IQS62X_PWR_SETTINGS_PWR_MODE_NORM 0 + +#define IQS62X_OTP_CMD 0xF0 +#define IQS62X_OTP_CMD_FG3 0x13 +#define IQS62X_OTP_DATA 0xF1 +#define IQS62X_MAX_REG 0xFF + +#define IQS62X_HALL_CAL_MASK GENMASK(3, 0) + +#define IQS62X_FW_REC_TYPE_INFO 0 +#define IQS62X_FW_REC_TYPE_PROD 1 +#define IQS62X_FW_REC_TYPE_HALL 2 +#define IQS62X_FW_REC_TYPE_MASK 3 +#define IQS62X_FW_REC_TYPE_DATA 4 + +#define IQS62X_ATI_POLL_SLEEP_US 10000 +#define IQS62X_ATI_POLL_TIMEOUT_US 500000 +#define IQS62X_ATI_STABLE_DELAY_MS 150 + +struct iqs62x_fw_rec { + u8 type; + u8 addr; + u8 len; + u8 data; +} __packed; + +struct iqs62x_fw_blk { + struct list_head list; + u8 addr; + u8 mask; + u8 len; + u8 data[]; +}; + +struct iqs62x_info { + u8 prod_num; + u8 sw_num; + u8 hw_num; +} __packed; + +static int iqs62x_dev_init(struct iqs62x_core *iqs62x) +{ + struct iqs62x_fw_blk *fw_blk; + unsigned int val; + int ret; + u8 clk_div = 1; + + list_for_each_entry(fw_blk, &iqs62x->fw_blk_head, list) { + if (fw_blk->mask) + ret = regmap_update_bits(iqs62x->regmap, fw_blk->addr, + fw_blk->mask, *fw_blk->data); + else + ret = regmap_raw_write(iqs62x->regmap, fw_blk->addr, + fw_blk->data, fw_blk->len); + if (ret) + return ret; + } + + switch (iqs62x->dev_desc->prod_num) { + case IQS620_PROD_NUM: + case IQS622_PROD_NUM: + ret = regmap_read(iqs62x->regmap, + iqs62x->dev_desc->prox_settings, &val); + if (ret) + return ret; + + if (val & IQS620_PROX_SETTINGS_4_SAR_EN) + iqs62x->ui_sel = IQS62X_UI_SAR1; + + /* fall through */ + + case IQS621_PROD_NUM: + ret = regmap_write(iqs62x->regmap, IQS620_GLBL_EVENT_MASK, + IQS620_GLBL_EVENT_MASK_PMU | + iqs62x->dev_desc->prox_mask | + iqs62x->dev_desc->sar_mask | + iqs62x->dev_desc->hall_mask | + iqs62x->dev_desc->hyst_mask | + iqs62x->dev_desc->temp_mask | + iqs62x->dev_desc->als_mask | + iqs62x->dev_desc->ir_mask); + if (ret) + return ret; + break; + + default: + ret = regmap_write(iqs62x->regmap, IQS624_HALL_UI, + IQS624_HALL_UI_WHL_EVENT | + IQS624_HALL_UI_INT_EVENT | + IQS624_HALL_UI_AUTO_CAL); + if (ret) + return ret; + + /* + * The IQS625 default interval divider is below the minimum + * permissible value, and the datasheet mandates that it is + * corrected during initialization (unless an updated value + * has already been provided by firmware). + * + * To protect against an unacceptably low user-entered value + * stored in the firmware, the same check is extended to the + * IQS624 as well. + */ + ret = regmap_read(iqs62x->regmap, IQS624_INTERVAL_DIV, &val); + if (ret) + return ret; + + if (val >= iqs62x->dev_desc->interval_div) + break; + + ret = regmap_write(iqs62x->regmap, IQS624_INTERVAL_DIV, + iqs62x->dev_desc->interval_div); + if (ret) + return ret; + } + + ret = regmap_read(iqs62x->regmap, IQS62X_SYS_SETTINGS, &val); + if (ret) + return ret; + + if (val & IQS62X_SYS_SETTINGS_CLK_DIV) + clk_div = iqs62x->dev_desc->clk_div; + + ret = regmap_write(iqs62x->regmap, IQS62X_SYS_SETTINGS, val | + IQS62X_SYS_SETTINGS_ACK_RESET | + IQS62X_SYS_SETTINGS_EVENT_MODE | + IQS62X_SYS_SETTINGS_REDO_ATI); + if (ret) + return ret; + + ret = regmap_read_poll_timeout(iqs62x->regmap, IQS62X_SYS_FLAGS, val, + !(val & IQS62X_SYS_FLAGS_IN_ATI), + IQS62X_ATI_POLL_SLEEP_US, + IQS62X_ATI_POLL_TIMEOUT_US * clk_div); + if (ret) + return ret; + + msleep(IQS62X_ATI_STABLE_DELAY_MS * clk_div); + + return 0; +} + +static int iqs62x_firmware_parse(struct iqs62x_core *iqs62x, + const struct firmware *fw) +{ + struct i2c_client *client = iqs62x->client; + struct iqs62x_fw_rec *fw_rec; + struct iqs62x_fw_blk *fw_blk; + unsigned int val; + size_t pos = 0; + int ret = 0; + u8 mask, len, *data; + u8 hall_cal_index = 0; + + while (pos < fw->size) { + if (pos + sizeof(*fw_rec) > fw->size) { + ret = -EINVAL; + break; + } + fw_rec = (struct iqs62x_fw_rec *)(fw->data + pos); + pos += sizeof(*fw_rec); + + if (pos + fw_rec->len - 1 > fw->size) { + ret = -EINVAL; + break; + } + pos += fw_rec->len - 1; + + switch (fw_rec->type) { + case IQS62X_FW_REC_TYPE_INFO: + continue; + + case IQS62X_FW_REC_TYPE_PROD: + if (fw_rec->data == iqs62x->dev_desc->prod_num) + continue; + + dev_err(&client->dev, + "Incompatible product number: 0x%02X\n", + fw_rec->data); + ret = -EINVAL; + break; + + case IQS62X_FW_REC_TYPE_HALL: + if (!hall_cal_index) { + ret = regmap_write(iqs62x->regmap, + IQS62X_OTP_CMD, + IQS62X_OTP_CMD_FG3); + if (ret) + break; + + ret = regmap_read(iqs62x->regmap, + IQS62X_OTP_DATA, &val); + if (ret) + break; + + hall_cal_index = val & IQS62X_HALL_CAL_MASK; + if (!hall_cal_index) { + dev_err(&client->dev, + "Uncalibrated device\n"); + ret = -ENODATA; + break; + } + } + + if (hall_cal_index > fw_rec->len) { + ret = -EINVAL; + break; + } + + mask = 0; + data = &fw_rec->data + hall_cal_index - 1; + len = sizeof(*data); + break; + + case IQS62X_FW_REC_TYPE_MASK: + if (fw_rec->len < (sizeof(mask) + sizeof(*data))) { + ret = -EINVAL; + break; + } + + mask = fw_rec->data; + data = &fw_rec->data + sizeof(mask); + len = sizeof(*data); + break; + + case IQS62X_FW_REC_TYPE_DATA: + mask = 0; + data = &fw_rec->data; + len = fw_rec->len; + break; + + default: + dev_err(&client->dev, + "Unrecognized record type: 0x%02X\n", + fw_rec->type); + ret = -EINVAL; + } + + if (ret) + break; + + fw_blk = devm_kzalloc(&client->dev, + struct_size(fw_blk, data, len), + GFP_KERNEL); + if (!fw_blk) { + ret = -ENOMEM; + break; + } + + fw_blk->addr = fw_rec->addr; + fw_blk->mask = mask; + fw_blk->len = len; + memcpy(fw_blk->data, data, len); + + list_add(&fw_blk->list, &iqs62x->fw_blk_head); + } + + release_firmware(fw); + + return ret; +} + +const struct iqs62x_event_desc iqs62x_events[IQS62X_NUM_EVENTS] = { + [IQS62X_EVENT_PROX_CH0_T] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(4), + .val = BIT(4), + }, + [IQS62X_EVENT_PROX_CH0_P] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(0), + .val = BIT(0), + }, + [IQS62X_EVENT_PROX_CH1_T] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(5), + .val = BIT(5), + }, + [IQS62X_EVENT_PROX_CH1_P] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(1), + .val = BIT(1), + }, + [IQS62X_EVENT_PROX_CH2_T] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(6), + .val = BIT(6), + }, + [IQS62X_EVENT_PROX_CH2_P] = { + .reg = IQS62X_EVENT_PROX, + .mask = BIT(2), + .val = BIT(2), + }, + [IQS62X_EVENT_HYST_POS_T] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(6) | BIT(7), + .val = BIT(6), + }, + [IQS62X_EVENT_HYST_POS_P] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(5) | BIT(7), + .val = BIT(5), + }, + [IQS62X_EVENT_HYST_NEG_T] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(6) | BIT(7), + .val = BIT(6) | BIT(7), + }, + [IQS62X_EVENT_HYST_NEG_P] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(5) | BIT(7), + .val = BIT(5) | BIT(7), + }, + [IQS62X_EVENT_SAR1_ACT] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(4), + .val = BIT(4), + }, + [IQS62X_EVENT_SAR1_QRD] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(2), + .val = BIT(2), + }, + [IQS62X_EVENT_SAR1_MOVE] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(1), + .val = BIT(1), + }, + [IQS62X_EVENT_SAR1_HALT] = { + .reg = IQS62X_EVENT_HYST, + .mask = BIT(0), + .val = BIT(0), + }, + [IQS62X_EVENT_WHEEL_UP] = { + .reg = IQS62X_EVENT_WHEEL, + .mask = BIT(7) | BIT(6), + .val = BIT(7), + }, + [IQS62X_EVENT_WHEEL_DN] = { + .reg = IQS62X_EVENT_WHEEL, + .mask = BIT(7) | BIT(6), + .val = BIT(7) | BIT(6), + }, + [IQS62X_EVENT_HALL_N_T] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(2) | BIT(0), + .val = BIT(2), + }, + [IQS62X_EVENT_HALL_N_P] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(1) | BIT(0), + .val = BIT(1), + }, + [IQS62X_EVENT_HALL_S_T] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(2) | BIT(0), + .val = BIT(2) | BIT(0), + }, + [IQS62X_EVENT_HALL_S_P] = { + .reg = IQS62X_EVENT_HALL, + .mask = BIT(1) | BIT(0), + .val = BIT(1) | BIT(0), + }, + [IQS62X_EVENT_SYS_RESET] = { + .reg = IQS62X_EVENT_SYS, + .mask = BIT(7), + .val = BIT(7), + }, +}; +EXPORT_SYMBOL_GPL(iqs62x_events); + +static irqreturn_t iqs62x_irq(int irq, void *context) +{ + struct iqs62x_core *iqs62x = context; + struct i2c_client *client = iqs62x->client; + struct iqs62x_event_data event_data; + struct iqs62x_event_desc event_desc; + enum iqs62x_event_reg event_reg; + unsigned long event_flags = 0; + int ret, i, j; + u8 event_map[IQS62X_EVENT_SIZE]; + + /* + * The device asserts the RDY output to signal the beginning of a + * communication window, which is closed by an I2C stop condition. + * As such, all interrupt status is captured in a single read and + * broadcast to any interested sub-device drivers. + */ + ret = regmap_raw_read(iqs62x->regmap, IQS62X_SYS_FLAGS, event_map, + sizeof(event_map)); + if (ret) { + dev_err(&client->dev, "Failed to read device status: %d\n", + ret); + return IRQ_NONE; + } + + for (i = 0; i < sizeof(event_map); i++) { + event_reg = iqs62x->dev_desc->event_regs[iqs62x->ui_sel][i]; + + switch (event_reg) { + case IQS62X_EVENT_UI_LO: + event_data.ui_data = get_unaligned_le16(&event_map[i]); + + /* fall through */ + + case IQS62X_EVENT_UI_HI: + case IQS62X_EVENT_NONE: + continue; + + case IQS62X_EVENT_ALS: + event_data.als_flags = event_map[i]; + continue; + + case IQS62X_EVENT_IR: + event_data.ir_flags = event_map[i]; + continue; + + case IQS62X_EVENT_INTER: + event_data.interval = event_map[i]; + continue; + + case IQS62X_EVENT_HYST: + event_map[i] <<= iqs62x->dev_desc->hyst_shift; + + /* fall through */ + + case IQS62X_EVENT_WHEEL: + case IQS62X_EVENT_HALL: + case IQS62X_EVENT_PROX: + case IQS62X_EVENT_SYS: + break; + } + + for (j = 0; j < IQS62X_NUM_EVENTS; j++) { + event_desc = iqs62x_events[j]; + + if (event_desc.reg != event_reg) + continue; + + if ((event_map[i] & event_desc.mask) == event_desc.val) + event_flags |= BIT(j); + } + } + + /* + * The device resets itself in response to the I2C master stalling + * communication past a fixed timeout. In this case, all registers + * are restored and any interested sub-device drivers are notified. + */ + if (event_flags & BIT(IQS62X_EVENT_SYS_RESET)) { + dev_err(&client->dev, "Unexpected device reset\n"); + + ret = iqs62x_dev_init(iqs62x); + if (ret) { + dev_err(&client->dev, + "Failed to re-initialize device: %d\n", ret); + return IRQ_NONE; + } + } + + ret = blocking_notifier_call_chain(&iqs62x->nh, event_flags, + &event_data); + if (ret & NOTIFY_STOP_MASK) + return IRQ_NONE; + + /* + * Once the communication window is closed, a small delay is added to + * ensure the device's RDY output has been deasserted by the time the + * interrupt handler returns. + */ + usleep_range(50, 100); + + return IRQ_HANDLED; +} + +static void iqs62x_firmware_load(const struct firmware *fw, void *context) +{ + struct iqs62x_core *iqs62x = context; + struct i2c_client *client = iqs62x->client; + int ret; + + if (fw) { + ret = iqs62x_firmware_parse(iqs62x, fw); + if (ret) { + dev_err(&client->dev, "Failed to parse firmware: %d\n", + ret); + goto err_out; + } + } + + ret = iqs62x_dev_init(iqs62x); + if (ret) { + dev_err(&client->dev, "Failed to initialize device: %d\n", ret); + goto err_out; + } + + ret = devm_request_threaded_irq(&client->dev, client->irq, + NULL, iqs62x_irq, IRQF_ONESHOT, + client->name, iqs62x); + if (ret) { + dev_err(&client->dev, "Failed to request IRQ: %d\n", ret); + goto err_out; + } + + ret = devm_mfd_add_devices(&client->dev, PLATFORM_DEVID_NONE, + iqs62x->dev_desc->sub_devs, + iqs62x->dev_desc->num_sub_devs, + NULL, 0, NULL); + if (ret) + dev_err(&client->dev, "Failed to add sub-devices: %d\n", ret); + +err_out: + complete_all(&iqs62x->fw_done); +} + +static const struct mfd_cell iqs620at_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs620a-keys", + }, + { + .name = "iqs620a-pwm", + .of_compatible = "azoteq,iqs620a-pwm", + }, + { .name = "iqs620at-temp", }, +}; + +static const struct mfd_cell iqs620a_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs620a-keys", + }, + { + .name = "iqs620a-pwm", + .of_compatible = "azoteq,iqs620a-pwm", + }, +}; + +static const struct mfd_cell iqs621_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs621-keys", + }, + { .name = "iqs621-als", }, +}; + +static const struct mfd_cell iqs622_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs622-keys", + }, + { .name = "iqs621-als", }, +}; + +static const struct mfd_cell iqs624_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs624-keys", + }, + { .name = "iqs624-pos", }, +}; + +static const struct mfd_cell iqs625_sub_devs[] = { + { + .name = "iqs62x-keys", + .of_compatible = "azoteq,iqs625-keys", + }, + { .name = "iqs624-pos", }, +}; + +static const u8 iqs620at_cal_regs[] = { + IQS620_TEMP_CAL_MULT, + IQS620_TEMP_CAL_DIV, + IQS620_TEMP_CAL_OFFS, +}; + +static const u8 iqs621_cal_regs[] = { + IQS621_ALS_CAL_DIV_LUX, + IQS621_ALS_CAL_DIV_IR, +}; + +static const enum iqs62x_event_reg iqs620a_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HALL, /* 0x16 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + }, + [IQS62X_UI_SAR1] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HALL, /* 0x16 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + }, +}; + +static const enum iqs62x_event_reg iqs621_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_ALS, /* 0x16 */ + IQS62X_EVENT_UI_LO, /* 0x17 */ + IQS62X_EVENT_UI_HI, /* 0x18 */ + IQS62X_EVENT_HALL, /* 0x19 */ + }, +}; + +static const enum iqs62x_event_reg iqs622_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_ALS, /* 0x14 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_IR, /* 0x16 */ + IQS62X_EVENT_UI_LO, /* 0x17 */ + IQS62X_EVENT_UI_HI, /* 0x18 */ + IQS62X_EVENT_HALL, /* 0x19 */ + }, + [IQS62X_UI_SAR1] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_HYST, /* 0x13 */ + IQS62X_EVENT_ALS, /* 0x14 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_IR, /* 0x16 */ + IQS62X_EVENT_UI_LO, /* 0x17 */ + IQS62X_EVENT_UI_HI, /* 0x18 */ + IQS62X_EVENT_HALL, /* 0x19 */ + }, +}; + +static const enum iqs62x_event_reg iqs624_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_PROX, /* 0x12 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_WHEEL, /* 0x14 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_UI_LO, /* 0x16 */ + IQS62X_EVENT_UI_HI, /* 0x17 */ + IQS62X_EVENT_INTER, /* 0x18 */ + IQS62X_EVENT_NONE, + }, +}; + +static const enum iqs62x_event_reg iqs625_event_regs[][IQS62X_EVENT_SIZE] = { + [IQS62X_UI_PROX] = { + IQS62X_EVENT_SYS, /* 0x10 */ + IQS62X_EVENT_PROX, /* 0x11 */ + IQS62X_EVENT_INTER, /* 0x12 */ + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + IQS62X_EVENT_NONE, + }, +}; + +static const struct iqs62x_dev_desc iqs62x_devs[] = { + { + .dev_name = "iqs620at", + .sub_devs = iqs620at_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs620at_sub_devs), + + .prod_num = IQS620_PROD_NUM, + .sw_num = 0x08, + .cal_regs = iqs620at_cal_regs, + .num_cal_regs = ARRAY_SIZE(iqs620at_cal_regs), + + .prox_mask = BIT(0), + .sar_mask = BIT(1) | BIT(7), + .hall_mask = BIT(2), + .hyst_mask = BIT(3), + .temp_mask = BIT(4), + + .prox_settings = IQS620_PROX_SETTINGS_4, + .hall_flags = IQS620_HALL_FLAGS, + + .clk_div = 4, + .fw_name = "iqs620a.bin", + .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs620a", + .sub_devs = iqs620a_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs620a_sub_devs), + + .prod_num = IQS620_PROD_NUM, + .sw_num = 0x08, + + .prox_mask = BIT(0), + .sar_mask = BIT(1) | BIT(7), + .hall_mask = BIT(2), + .hyst_mask = BIT(3), + .temp_mask = BIT(4), + + .prox_settings = IQS620_PROX_SETTINGS_4, + .hall_flags = IQS620_HALL_FLAGS, + + .clk_div = 4, + .fw_name = "iqs620a.bin", + .event_regs = &iqs620a_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs621", + .sub_devs = iqs621_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs621_sub_devs), + + .prod_num = IQS621_PROD_NUM, + .sw_num = 0x09, + .cal_regs = iqs621_cal_regs, + .num_cal_regs = ARRAY_SIZE(iqs621_cal_regs), + + .prox_mask = BIT(0), + .hall_mask = BIT(1), + .als_mask = BIT(2), + .hyst_mask = BIT(3), + .temp_mask = BIT(4), + + .als_flags = IQS621_ALS_FLAGS, + .hall_flags = IQS621_HALL_FLAGS, + .hyst_shift = 5, + + .clk_div = 2, + .fw_name = "iqs621.bin", + .event_regs = &iqs621_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs622", + .sub_devs = iqs622_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs622_sub_devs), + + .prod_num = IQS622_PROD_NUM, + .sw_num = 0x06, + + .prox_mask = BIT(0), + .sar_mask = BIT(1), + .hall_mask = BIT(2), + .als_mask = BIT(3), + .ir_mask = BIT(4), + + .prox_settings = IQS622_PROX_SETTINGS_4, + .als_flags = IQS622_ALS_FLAGS, + .hall_flags = IQS622_HALL_FLAGS, + + .clk_div = 2, + .fw_name = "iqs622.bin", + .event_regs = &iqs622_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs624", + .sub_devs = iqs624_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs624_sub_devs), + + .prod_num = IQS624_PROD_NUM, + .sw_num = 0x0B, + + .interval = IQS624_INTERVAL_NUM, + .interval_div = 3, + + .clk_div = 2, + .fw_name = "iqs624.bin", + .event_regs = &iqs624_event_regs[IQS62X_UI_PROX], + }, + { + .dev_name = "iqs625", + .sub_devs = iqs625_sub_devs, + .num_sub_devs = ARRAY_SIZE(iqs625_sub_devs), + + .prod_num = IQS625_PROD_NUM, + .sw_num = 0x0B, + + .interval = IQS625_INTERVAL_NUM, + .interval_div = 10, + + .clk_div = 2, + .fw_name = "iqs625.bin", + .event_regs = &iqs625_event_regs[IQS62X_UI_PROX], + }, +}; + +static const struct regmap_config iqs62x_map_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = IQS62X_MAX_REG, +}; + +static int iqs62x_probe(struct i2c_client *client) +{ + struct iqs62x_core *iqs62x; + struct iqs62x_info info; + unsigned int val; + int ret, i, j; + u8 sw_num = 0; + const char *fw_name = NULL; + + iqs62x = devm_kzalloc(&client->dev, sizeof(*iqs62x), GFP_KERNEL); + if (!iqs62x) + return -ENOMEM; + + i2c_set_clientdata(client, iqs62x); + iqs62x->client = client; + + BLOCKING_INIT_NOTIFIER_HEAD(&iqs62x->nh); + INIT_LIST_HEAD(&iqs62x->fw_blk_head); + init_completion(&iqs62x->fw_done); + + iqs62x->regmap = devm_regmap_init_i2c(client, &iqs62x_map_config); + if (IS_ERR(iqs62x->regmap)) { + ret = PTR_ERR(iqs62x->regmap); + dev_err(&client->dev, "Failed to initialize register map: %d\n", + ret); + return ret; + } + + ret = regmap_raw_read(iqs62x->regmap, IQS62X_PROD_NUM, &info, + sizeof(info)); + if (ret) + return ret; + + /* + * The following sequence validates the device's product and software + * numbers. It then determines if the device is factory-calibrated by + * checking for nonzero values in the device's designated calibration + * registers (if applicable). Depending on the device, the absence of + * calibration data indicates a reduced feature set or invalid device. + * + * For devices given in both calibrated and uncalibrated versions, the + * calibrated version (e.g. IQS620AT) appears first in the iqs62x_devs + * array. The uncalibrated version (e.g. IQS620A) appears next and has + * the same product and software numbers, but no calibration registers + * are specified. + */ + for (i = 0; i < ARRAY_SIZE(iqs62x_devs); i++) { + if (info.prod_num != iqs62x_devs[i].prod_num) + continue; + + iqs62x->dev_desc = &iqs62x_devs[i]; + + if (info.sw_num < iqs62x->dev_desc->sw_num) + continue; + + sw_num = info.sw_num; + + /* + * Read each of the device's designated calibration registers, + * if any, and exit from the inner loop early if any are equal + * to zero (indicating the device is uncalibrated). This could + * be acceptable depending on the device (e.g. IQS620A instead + * of IQS620AT). + */ + for (j = 0; j < iqs62x->dev_desc->num_cal_regs; j++) { + ret = regmap_read(iqs62x->regmap, + iqs62x->dev_desc->cal_regs[j], &val); + if (ret) + return ret; + + if (!val) + break; + } + + /* + * If the number of nonzero values read from the device equals + * the number of designated calibration registers (which could + * be zero), exit from the outer loop early to signal that the + * device's product and software numbers match a known device, + * and the device is calibrated (if applicable). + */ + if (j == iqs62x->dev_desc->num_cal_regs) + break; + } + + if (!iqs62x->dev_desc) { + dev_err(&client->dev, "Unrecognized product number: 0x%02X\n", + info.prod_num); + return -EINVAL; + } + + if (!sw_num) { + dev_err(&client->dev, "Unrecognized software number: 0x%02X\n", + info.sw_num); + return -EINVAL; + } + + if (i == ARRAY_SIZE(iqs62x_devs)) { + dev_err(&client->dev, "Uncalibrated device\n"); + return -ENODATA; + } + + device_property_read_string(&client->dev, "firmware-name", &fw_name); + + ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, + fw_name ? : iqs62x->dev_desc->fw_name, + &client->dev, GFP_KERNEL, iqs62x, + iqs62x_firmware_load); + if (ret) + dev_err(&client->dev, "Failed to request firmware: %d\n", ret); + + return ret; +} + +static int iqs62x_remove(struct i2c_client *client) +{ + struct iqs62x_core *iqs62x = i2c_get_clientdata(client); + + wait_for_completion(&iqs62x->fw_done); + + return 0; +} + +static int __maybe_unused iqs62x_suspend(struct device *dev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(dev); + int ret; + + wait_for_completion(&iqs62x->fw_done); + + /* + * As per the datasheet, automatic mode switching must be disabled + * before the device is placed in or taken out of halt mode. + */ + ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_DIS_AUTO, 0xFF); + if (ret) + return ret; + + return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_PWR_MODE_MASK, + IQS62X_PWR_SETTINGS_PWR_MODE_HALT); +} + +static int __maybe_unused iqs62x_resume(struct device *dev) +{ + struct iqs62x_core *iqs62x = dev_get_drvdata(dev); + int ret; + + ret = regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_PWR_MODE_MASK, + IQS62X_PWR_SETTINGS_PWR_MODE_NORM); + if (ret) + return ret; + + return regmap_update_bits(iqs62x->regmap, IQS62X_PWR_SETTINGS, + IQS62X_PWR_SETTINGS_DIS_AUTO, 0); +} + +static SIMPLE_DEV_PM_OPS(iqs62x_pm, iqs62x_suspend, iqs62x_resume); + +static const struct of_device_id iqs62x_of_match[] = { + { .compatible = "azoteq,iqs620a" }, + { .compatible = "azoteq,iqs621" }, + { .compatible = "azoteq,iqs622" }, + { .compatible = "azoteq,iqs624" }, + { .compatible = "azoteq,iqs625" }, + { } +}; +MODULE_DEVICE_TABLE(of, iqs62x_of_match); + +static struct i2c_driver iqs62x_i2c_driver = { + .driver = { + .name = "iqs62x", + .of_match_table = iqs62x_of_match, + .pm = &iqs62x_pm, + }, + .probe_new = iqs62x_probe, + .remove = iqs62x_remove, +}; +module_i2c_driver(iqs62x_i2c_driver); + +MODULE_AUTHOR("Jeff LaBundy <jeff@labundy.com>"); +MODULE_DESCRIPTION("Azoteq IQS620A/621/622/624/625 Multi-Function Sensors"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 4798d9f3f9d5..1f4f01b02d98 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -840,7 +840,7 @@ MODULE_DEVICE_TABLE(of, usbhs_omap_dt_ids); static struct platform_driver usbhs_omap_driver = { .driver = { - .name = (char *)usbhs_driver_name, + .name = usbhs_driver_name, .pm = &usbhsomap_dev_pm_ops, .of_match_table = usbhs_omap_dt_ids, }, diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c index 265f5e350e1c..4b7f73c317e8 100644 --- a/drivers/mfd/omap-usb-tll.c +++ b/drivers/mfd/omap-usb-tll.c @@ -99,7 +99,7 @@ struct usbtll_omap { void __iomem *base; int nch; /* num. of channels */ - struct clk *ch_clk[0]; /* must be the last member */ + struct clk *ch_clk[]; /* must be the last member */ }; /*-------------------------------------------------------------------------*/ @@ -304,7 +304,7 @@ MODULE_DEVICE_TABLE(of, usbtll_omap_dt_ids); static struct platform_driver usbtll_omap_driver = { .driver = { - .name = (char *)usbtll_driver_name, + .name = usbtll_driver_name, .of_match_table = usbtll_omap_dt_ids, }, .probe = usbtll_omap_probe, diff --git a/drivers/mfd/qcom-pm8xxx.c b/drivers/mfd/qcom-pm8xxx.c index 29133326c6fd..acd172ddcbd6 100644 --- a/drivers/mfd/qcom-pm8xxx.c +++ b/drivers/mfd/qcom-pm8xxx.c @@ -76,7 +76,7 @@ struct pm_irq_chip { unsigned int num_masters; const struct pm_irq_data *pm_irq_data; /* MUST BE AT THE END OF THIS STRUCT */ - u8 config[0]; + u8 config[]; }; static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, unsigned int bp, diff --git a/drivers/mfd/rk808.c b/drivers/mfd/rk808.c index a69a6742ecdc..d109b9f14407 100644 --- a/drivers/mfd/rk808.c +++ b/drivers/mfd/rk808.c @@ -19,7 +19,6 @@ #include <linux/module.h> #include <linux/of_device.h> #include <linux/regmap.h> -#include <linux/syscore_ops.h> struct rk808_reg_data { int addr; @@ -186,7 +185,6 @@ static const struct rk808_reg_data rk805_pre_init_reg[] = { {RK805_BUCK4_CONFIG_REG, RK805_BUCK3_4_ILMAX_MASK, RK805_BUCK4_ILMAX_3500MA}, {RK805_BUCK4_CONFIG_REG, BUCK_ILMIN_MASK, BUCK_ILMIN_400MA}, - {RK805_GPIO_IO_POL_REG, SLP_SD_MSK, SLEEP_FUN}, {RK805_THERMAL_REG, TEMP_HOTDIE_MSK, TEMP115C}, }; @@ -449,88 +447,60 @@ static const struct regmap_irq_chip rk818_irq_chip = { static struct i2c_client *rk808_i2c_client; -static void rk805_device_shutdown(void) +static void rk808_pm_power_off(void) { int ret; + unsigned int reg, bit; struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - if (!rk808) - return; - - ret = regmap_update_bits(rk808->regmap, - RK805_DEV_CTRL_REG, - DEV_OFF, DEV_OFF); - if (ret) - dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); -} - -static void rk805_device_shutdown_prepare(void) -{ - int ret; - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - - if (!rk808) - return; - - ret = regmap_update_bits(rk808->regmap, - RK805_GPIO_IO_POL_REG, - SLP_SD_MSK, SHUTDOWN_FUN); - if (ret) - dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); -} - -static void rk808_device_shutdown(void) -{ - int ret; - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - - if (!rk808) - return; - - ret = regmap_update_bits(rk808->regmap, - RK808_DEVCTRL_REG, - DEV_OFF_RST, DEV_OFF_RST); - if (ret) - dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); -} - -static void rk818_device_shutdown(void) -{ - int ret; - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); - - if (!rk808) + switch (rk808->variant) { + case RK805_ID: + reg = RK805_DEV_CTRL_REG; + bit = DEV_OFF; + break; + case RK808_ID: + reg = RK808_DEVCTRL_REG, + bit = DEV_OFF_RST; + break; + case RK818_ID: + reg = RK818_DEVCTRL_REG; + bit = DEV_OFF; + break; + default: return; - - ret = regmap_update_bits(rk808->regmap, - RK818_DEVCTRL_REG, - DEV_OFF, DEV_OFF); + } + ret = regmap_update_bits(rk808->regmap, reg, bit, bit); if (ret) dev_err(&rk808_i2c_client->dev, "Failed to shutdown device!\n"); } -static void rk8xx_syscore_shutdown(void) +static void rk8xx_shutdown(struct i2c_client *client) { - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); + struct rk808 *rk808 = i2c_get_clientdata(client); int ret; - if (system_state == SYSTEM_POWER_OFF && - (rk808->variant == RK809_ID || rk808->variant == RK817_ID)) { + switch (rk808->variant) { + case RK805_ID: + ret = regmap_update_bits(rk808->regmap, + RK805_GPIO_IO_POL_REG, + SLP_SD_MSK, + SHUTDOWN_FUN); + break; + case RK809_ID: + case RK817_ID: ret = regmap_update_bits(rk808->regmap, RK817_SYS_CFG(3), RK817_SLPPIN_FUNC_MSK, SLPPIN_DN_FUN); - if (ret) { - dev_warn(&rk808_i2c_client->dev, - "Cannot switch to power down function\n"); - } + break; + default: + return; } + if (ret) + dev_warn(&client->dev, + "Cannot switch to power down function\n"); } -static struct syscore_ops rk808_syscore_ops = { - .shutdown = rk8xx_syscore_shutdown, -}; - static const struct of_device_id rk808_of_match[] = { { .compatible = "rockchip,rk805" }, { .compatible = "rockchip,rk808" }, @@ -550,7 +520,7 @@ static int rk808_probe(struct i2c_client *client, const struct mfd_cell *cells; int nr_pre_init_regs; int nr_cells; - int pm_off = 0, msb, lsb; + int msb, lsb; unsigned char pmic_id_msb, pmic_id_lsb; int ret; int i; @@ -594,8 +564,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk805_pre_init_reg); cells = rk805s; nr_cells = ARRAY_SIZE(rk805s); - rk808->pm_pwroff_fn = rk805_device_shutdown; - rk808->pm_pwroff_prep_fn = rk805_device_shutdown_prepare; break; case RK808_ID: rk808->regmap_cfg = &rk808_regmap_config; @@ -604,7 +572,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk808_pre_init_reg); cells = rk808s; nr_cells = ARRAY_SIZE(rk808s); - rk808->pm_pwroff_fn = rk808_device_shutdown; break; case RK818_ID: rk808->regmap_cfg = &rk818_regmap_config; @@ -613,7 +580,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk818_pre_init_reg); cells = rk818s; nr_cells = ARRAY_SIZE(rk818s); - rk808->pm_pwroff_fn = rk818_device_shutdown; break; case RK809_ID: case RK817_ID: @@ -623,7 +589,6 @@ static int rk808_probe(struct i2c_client *client, nr_pre_init_regs = ARRAY_SIZE(rk817_pre_init_reg); cells = rk817s; nr_cells = ARRAY_SIZE(rk817s); - register_syscore_ops(&rk808_syscore_ops); break; default: dev_err(&client->dev, "Unsupported RK8XX ID %lu\n", @@ -674,17 +639,9 @@ static int rk808_probe(struct i2c_client *client, goto err_irq; } - pm_off = of_property_read_bool(np, - "rockchip,system-power-controller"); - if (pm_off && !pm_power_off) { + if (of_property_read_bool(np, "rockchip,system-power-controller")) { rk808_i2c_client = client; - pm_power_off = rk808->pm_pwroff_fn; - } - - if (pm_off && !pm_power_off_prepare) { - if (!rk808_i2c_client) - rk808_i2c_client = client; - pm_power_off_prepare = rk808->pm_pwroff_prep_fn; + pm_power_off = rk808_pm_power_off; } return 0; @@ -704,25 +661,24 @@ static int rk808_remove(struct i2c_client *client) * pm_power_off may points to a function from another module. * Check if the pointer is set by us and only then overwrite it. */ - if (rk808->pm_pwroff_fn && pm_power_off == rk808->pm_pwroff_fn) + if (pm_power_off == rk808_pm_power_off) pm_power_off = NULL; - /** - * As above, check if the pointer is set by us before overwrite. - */ - if (rk808->pm_pwroff_prep_fn && - pm_power_off_prepare == rk808->pm_pwroff_prep_fn) - pm_power_off_prepare = NULL; - return 0; } static int __maybe_unused rk8xx_suspend(struct device *dev) { - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); + struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev)); int ret = 0; switch (rk808->variant) { + case RK805_ID: + ret = regmap_update_bits(rk808->regmap, + RK805_GPIO_IO_POL_REG, + SLP_SD_MSK, + SLEEP_FUN); + break; case RK809_ID: case RK817_ID: ret = regmap_update_bits(rk808->regmap, @@ -739,7 +695,7 @@ static int __maybe_unused rk8xx_suspend(struct device *dev) static int __maybe_unused rk8xx_resume(struct device *dev) { - struct rk808 *rk808 = i2c_get_clientdata(rk808_i2c_client); + struct rk808 *rk808 = i2c_get_clientdata(to_i2c_client(dev)); int ret = 0; switch (rk808->variant) { @@ -766,6 +722,7 @@ static struct i2c_driver rk808_i2c_driver = { }, .probe = rk808_probe, .remove = rk808_remove, + .shutdown = rk8xx_shutdown, }; module_i2c_driver(rk808_i2c_driver); diff --git a/drivers/mfd/rn5t618.c b/drivers/mfd/rn5t618.c index ead2e79036a9..232de50562f9 100644 --- a/drivers/mfd/rn5t618.c +++ b/drivers/mfd/rn5t618.c @@ -8,10 +8,13 @@ #include <linux/delay.h> #include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/mfd/core.h> #include <linux/mfd/rn5t618.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/regmap.h> @@ -20,6 +23,13 @@ static const struct mfd_cell rn5t618_cells[] = { { .name = "rn5t618-wdt" }, }; +static const struct mfd_cell rc5t619_cells[] = { + { .name = "rn5t618-adc" }, + { .name = "rn5t618-regulator" }, + { .name = "rc5t619-rtc" }, + { .name = "rn5t618-wdt" }, +}; + static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { @@ -32,6 +42,8 @@ static bool rn5t618_volatile_reg(struct device *dev, unsigned int reg) case RN5T618_IR_GPF: case RN5T618_MON_IOIN: case RN5T618_INTMON: + case RN5T618_RTC_CTRL1 ... RN5T618_RTC_CTRL2: + case RN5T618_RTC_SECONDS ... RN5T618_RTC_YEAR: return true; default: return false; @@ -46,9 +58,56 @@ static const struct regmap_config rn5t618_regmap_config = { .cache_type = REGCACHE_RBTREE, }; +static const struct regmap_irq rc5t619_irqs[] = { + REGMAP_IRQ_REG(RN5T618_IRQ_SYS, 0, BIT(0)), + REGMAP_IRQ_REG(RN5T618_IRQ_DCDC, 0, BIT(1)), + REGMAP_IRQ_REG(RN5T618_IRQ_RTC, 0, BIT(2)), + REGMAP_IRQ_REG(RN5T618_IRQ_ADC, 0, BIT(3)), + REGMAP_IRQ_REG(RN5T618_IRQ_GPIO, 0, BIT(4)), + REGMAP_IRQ_REG(RN5T618_IRQ_CHG, 0, BIT(6)), +}; + +static const struct regmap_irq_chip rc5t619_irq_chip = { + .name = "rc5t619", + .irqs = rc5t619_irqs, + .num_irqs = ARRAY_SIZE(rc5t619_irqs), + .num_regs = 1, + .status_base = RN5T618_INTMON, + .mask_base = RN5T618_INTEN, + .mask_invert = true, +}; + static struct rn5t618 *rn5t618_pm_power_off; static struct notifier_block rn5t618_restart_handler; +static int rn5t618_irq_init(struct rn5t618 *rn5t618) +{ + const struct regmap_irq_chip *irq_chip = NULL; + int ret; + + if (!rn5t618->irq) + return 0; + + switch (rn5t618->variant) { + case RC5T619: + irq_chip = &rc5t619_irq_chip; + break; + default: + dev_err(rn5t618->dev, "Currently no IRQ support for variant %d\n", + (int)rn5t618->variant); + return -ENOENT; + } + + ret = devm_regmap_add_irq_chip(rn5t618->dev, rn5t618->regmap, + rn5t618->irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + 0, irq_chip, &rn5t618->irq_data); + if (ret) + dev_err(rn5t618->dev, "Failed to register IRQ chip\n"); + + return ret; +} + static void rn5t618_trigger_poweroff_sequence(bool repower) { /* disable automatic repower-on */ @@ -87,8 +146,7 @@ static const struct of_device_id rn5t618_of_match[] = { }; MODULE_DEVICE_TABLE(of, rn5t618_of_match); -static int rn5t618_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int rn5t618_i2c_probe(struct i2c_client *i2c) { const struct of_device_id *of_id; struct rn5t618 *priv; @@ -106,6 +164,8 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c, i2c_set_clientdata(i2c, priv); priv->variant = (long)of_id->data; + priv->irq = i2c->irq; + priv->dev = &i2c->dev; priv->regmap = devm_regmap_init_i2c(i2c, &rn5t618_regmap_config); if (IS_ERR(priv->regmap)) { @@ -114,8 +174,16 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c, return ret; } - ret = devm_mfd_add_devices(&i2c->dev, -1, rn5t618_cells, - ARRAY_SIZE(rn5t618_cells), NULL, 0, NULL); + if (priv->variant == RC5T619) + ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE, + rc5t619_cells, + ARRAY_SIZE(rc5t619_cells), + NULL, 0, NULL); + else + ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE, + rn5t618_cells, + ARRAY_SIZE(rn5t618_cells), + NULL, 0, NULL); if (ret) { dev_err(&i2c->dev, "failed to add sub-devices: %d\n", ret); return ret; @@ -138,7 +206,7 @@ static int rn5t618_i2c_probe(struct i2c_client *i2c, return ret; } - return 0; + return rn5t618_irq_init(priv); } static int rn5t618_i2c_remove(struct i2c_client *i2c) @@ -155,19 +223,38 @@ static int rn5t618_i2c_remove(struct i2c_client *i2c) return 0; } -static const struct i2c_device_id rn5t618_i2c_id[] = { - { } -}; -MODULE_DEVICE_TABLE(i2c, rn5t618_i2c_id); +static int __maybe_unused rn5t618_i2c_suspend(struct device *dev) +{ + struct rn5t618 *priv = dev_get_drvdata(dev); + + if (priv->irq) + disable_irq(priv->irq); + + return 0; +} + +static int __maybe_unused rn5t618_i2c_resume(struct device *dev) +{ + struct rn5t618 *priv = dev_get_drvdata(dev); + + if (priv->irq) + enable_irq(priv->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(rn5t618_i2c_dev_pm_ops, + rn5t618_i2c_suspend, + rn5t618_i2c_resume); static struct i2c_driver rn5t618_i2c_driver = { .driver = { .name = "rn5t618", .of_match_table = of_match_ptr(rn5t618_of_match), + .pm = &rn5t618_i2c_dev_pm_ops, }, - .probe = rn5t618_i2c_probe, + .probe_new = rn5t618_i2c_probe, .remove = rn5t618_i2c_remove, - .id_table = rn5t618_i2c_id, }; module_i2c_driver(rn5t618_i2c_driver); diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c index c0529a1cd5ea..ebdf2f11ae28 100644 --- a/drivers/mfd/sprd-sc27xx-spi.c +++ b/drivers/mfd/sprd-sc27xx-spi.c @@ -10,6 +10,7 @@ #include <linux/of_device.h> #include <linux/regmap.h> #include <linux/spi/spi.h> +#include <uapi/linux/usb/charger.h> #define SPRD_PMIC_INT_MASK_STATUS 0x0 #define SPRD_PMIC_INT_RAW_STATUS 0x4 @@ -17,6 +18,16 @@ #define SPRD_SC2731_IRQ_BASE 0x140 #define SPRD_SC2731_IRQ_NUMS 16 +#define SPRD_SC2731_CHG_DET 0xedc + +/* PMIC charger detection definition */ +#define SPRD_PMIC_CHG_DET_DELAY_US 200000 +#define SPRD_PMIC_CHG_DET_TIMEOUT 2000000 +#define SPRD_PMIC_CHG_DET_DONE BIT(11) +#define SPRD_PMIC_SDP_TYPE BIT(7) +#define SPRD_PMIC_DCP_TYPE BIT(6) +#define SPRD_PMIC_CDP_TYPE BIT(5) +#define SPRD_PMIC_CHG_TYPE_MASK GENMASK(7, 5) struct sprd_pmic { struct regmap *regmap; @@ -24,12 +35,14 @@ struct sprd_pmic { struct regmap_irq *irqs; struct regmap_irq_chip irq_chip; struct regmap_irq_chip_data *irq_data; + const struct sprd_pmic_data *pdata; int irq; }; struct sprd_pmic_data { u32 irq_base; u32 num_irqs; + u32 charger_det; }; /* @@ -40,8 +53,46 @@ struct sprd_pmic_data { static const struct sprd_pmic_data sc2731_data = { .irq_base = SPRD_SC2731_IRQ_BASE, .num_irqs = SPRD_SC2731_IRQ_NUMS, + .charger_det = SPRD_SC2731_CHG_DET, }; +enum usb_charger_type sprd_pmic_detect_charger_type(struct device *dev) +{ + struct spi_device *spi = to_spi_device(dev); + struct sprd_pmic *ddata = spi_get_drvdata(spi); + const struct sprd_pmic_data *pdata = ddata->pdata; + enum usb_charger_type type; + u32 val; + int ret; + + ret = regmap_read_poll_timeout(ddata->regmap, pdata->charger_det, val, + (val & SPRD_PMIC_CHG_DET_DONE), + SPRD_PMIC_CHG_DET_DELAY_US, + SPRD_PMIC_CHG_DET_TIMEOUT); + if (ret) { + dev_err(&spi->dev, "failed to detect charger type\n"); + return UNKNOWN_TYPE; + } + + switch (val & SPRD_PMIC_CHG_TYPE_MASK) { + case SPRD_PMIC_CDP_TYPE: + type = CDP_TYPE; + break; + case SPRD_PMIC_DCP_TYPE: + type = DCP_TYPE; + break; + case SPRD_PMIC_SDP_TYPE: + type = SDP_TYPE; + break; + default: + type = UNKNOWN_TYPE; + break; + } + + return type; +} +EXPORT_SYMBOL_GPL(sprd_pmic_detect_charger_type); + static const struct mfd_cell sprd_pmic_devs[] = { { .name = "sc27xx-wdt", @@ -181,6 +232,7 @@ static int sprd_pmic_probe(struct spi_device *spi) spi_set_drvdata(spi, ddata); ddata->dev = &spi->dev; ddata->irq = spi->irq; + ddata->pdata = pdata; ddata->irq_chip.name = dev_name(&spi->dev); ddata->irq_chip.status_base = diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index cc92bc3ed820..886459e0ddd9 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -11,6 +11,7 @@ #include <linux/sched/signal.h> #include <linux/sched/task_stack.h> #include <linux/uaccess.h> +#include <linux/slab.h> #ifdef CONFIG_X86_32 #include <asm/desc.h> @@ -175,6 +176,80 @@ void lkdtm_HUNG_TASK(void) schedule(); } +volatile unsigned int huge = INT_MAX - 2; +volatile unsigned int ignored; + +void lkdtm_OVERFLOW_SIGNED(void) +{ + int value; + + value = huge; + pr_info("Normal signed addition ...\n"); + value += 1; + ignored = value; + + pr_info("Overflowing signed addition ...\n"); + value += 4; + ignored = value; +} + + +void lkdtm_OVERFLOW_UNSIGNED(void) +{ + unsigned int value; + + value = huge; + pr_info("Normal unsigned addition ...\n"); + value += 1; + ignored = value; + + pr_info("Overflowing unsigned addition ...\n"); + value += 4; + ignored = value; +} + +/* Intentially using old-style flex array definition of 1 byte. */ +struct array_bounds_flex_array { + int one; + int two; + char data[1]; +}; + +struct array_bounds { + int one; + int two; + char data[8]; + int three; +}; + +void lkdtm_ARRAY_BOUNDS(void) +{ + struct array_bounds_flex_array *not_checked; + struct array_bounds *checked; + volatile int i; + + not_checked = kmalloc(sizeof(*not_checked) * 2, GFP_KERNEL); + checked = kmalloc(sizeof(*checked) * 2, GFP_KERNEL); + + pr_info("Array access within bounds ...\n"); + /* For both, touch all bytes in the actual member size. */ + for (i = 0; i < sizeof(checked->data); i++) + checked->data[i] = 'A'; + /* + * For the uninstrumented flex array member, also touch 1 byte + * beyond to verify it is correctly uninstrumented. + */ + for (i = 0; i < sizeof(not_checked->data) + 1; i++) + not_checked->data[i] = 'A'; + + pr_info("Array access beyond bounds ...\n"); + for (i = 0; i < sizeof(checked->data) + 1; i++) + checked->data[i] = 'B'; + + kfree(not_checked); + kfree(checked); +} + void lkdtm_CORRUPT_LIST_ADD(void) { /* diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 5ce4ac8c06fc..a5e344df9166 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -130,6 +130,9 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(HARDLOCKUP), CRASHTYPE(SPINLOCKUP), CRASHTYPE(HUNG_TASK), + CRASHTYPE(OVERFLOW_SIGNED), + CRASHTYPE(OVERFLOW_UNSIGNED), + CRASHTYPE(ARRAY_BOUNDS), CRASHTYPE(EXEC_DATA), CRASHTYPE(EXEC_STACK), CRASHTYPE(EXEC_KMALLOC), diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 8d13d0176624..601a2156a0d4 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -22,6 +22,9 @@ void lkdtm_SOFTLOCKUP(void); void lkdtm_HARDLOCKUP(void); void lkdtm_SPINLOCKUP(void); void lkdtm_HUNG_TASK(void); +void lkdtm_OVERFLOW_SIGNED(void); +void lkdtm_OVERFLOW_UNSIGNED(void); +void lkdtm_ARRAY_BOUNDS(void); void lkdtm_CORRUPT_LIST_ADD(void); void lkdtm_CORRUPT_LIST_DEL(void); void lkdtm_CORRUPT_USER_DS(void); diff --git a/drivers/misc/mic/Kconfig b/drivers/misc/mic/Kconfig index b6841ba6d922..8f201d019f5a 100644 --- a/drivers/misc/mic/Kconfig +++ b/drivers/misc/mic/Kconfig @@ -133,8 +133,4 @@ config VOP OS and tools for MIC to use with this driver are available from <http://software.intel.com/en-us/mic-developer>. -if VOP -source "drivers/vhost/Kconfig.vringh" -endif - endmenu diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c index 426820ab9afe..b486250923c5 100644 --- a/drivers/mtd/ubi/fastmap-wl.c +++ b/drivers/mtd/ubi/fastmap-wl.c @@ -39,6 +39,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root) return victim; } +static inline void return_unused_peb(struct ubi_device *ubi, + struct ubi_wl_entry *e) +{ + wl_tree_add(e, &ubi->free); + ubi->free_count++; +} + /** * return_unused_pool_pebs - returns unused PEB to the free tree. * @ubi: UBI device description object @@ -52,8 +59,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi, for (i = pool->used; i < pool->size; i++) { e = ubi->lookuptbl[pool->pebs[i]]; - wl_tree_add(e, &ubi->free); - ubi->free_count++; + return_unused_peb(ubi, e); } } @@ -361,6 +367,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi) return_unused_pool_pebs(ubi, &ubi->fm_pool); return_unused_pool_pebs(ubi, &ubi->fm_wl_pool); + if (ubi->fm_anchor) { + return_unused_peb(ubi, ubi->fm_anchor); + ubi->fm_anchor = NULL; + } + if (ubi->fm) { for (i = 0; i < ubi->fm->used_blocks; i++) kfree(ubi->fm->e[i]); diff --git a/drivers/mtd/ubi/ubi-media.h b/drivers/mtd/ubi/ubi-media.h index b5fe8f82281b..386db0598e95 100644 --- a/drivers/mtd/ubi/ubi-media.h +++ b/drivers/mtd/ubi/ubi-media.h @@ -498,6 +498,6 @@ struct ubi_fm_volhdr { struct ubi_fm_eba { __be32 magic; __be32 reserved_pebs; - __be32 pnum[0]; + __be32 pnum[]; } __packed; #endif /* !__UBI_MEDIA_H__ */ diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 837d690a8c60..5146cce5fe32 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c @@ -1875,7 +1875,8 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai) goto out_free; #ifdef CONFIG_MTD_UBI_FASTMAP - ubi_ensure_anchor_pebs(ubi); + if (!ubi->ro_mode && !ubi->fm_disabled) + ubi_ensure_anchor_pebs(ubi); #endif return 0; diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index e74e2bb61236..9db0570c5beb 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -58,8 +58,4 @@ config CAIF_VIRTIO ---help--- The CAIF driver for CAIF over Virtio. -if CAIF_VIRTIO -source "drivers/vhost/Kconfig.vringh" -endif - endif # CAIF_DRIVERS diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 086dfb1b9d0b..91cdc0a2b1a7 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -148,7 +148,7 @@ static void slc_bump(struct slcan *sl) u32 tmpid; char *cmd = sl->rbuff; - cf.can_id = 0; + memset(&cf, 0, sizeof(cf)); switch (*cmd) { case 'r': @@ -187,8 +187,6 @@ static void slc_bump(struct slcan *sl) else return; - *(u64 *) (&cf.data) = 0; /* clear payload */ - /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf.can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf.can_dlc; i++) { diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index affa5c6e135c..c7ac63f41918 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -480,7 +480,7 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) priv->slave_mii_bus->parent = ds->dev->parent; priv->slave_mii_bus->phy_mask = ~priv->indir_phy_mask; - err = of_mdiobus_register(priv->slave_mii_bus, dn); + err = mdiobus_register(priv->slave_mii_bus); if (err && dn) of_node_put(dn); @@ -1079,6 +1079,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) const struct bcm_sf2_of_data *data; struct b53_platform_data *pdata; struct dsa_switch_ops *ops; + struct device_node *ports; struct bcm_sf2_priv *priv; struct b53_device *dev; struct dsa_switch *ds; @@ -1146,7 +1147,11 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) set_bit(0, priv->cfp.used); set_bit(0, priv->cfp.unique); - bcm_sf2_identify_ports(priv, dn->child); + ports = of_find_node_by_name(dn, "ports"); + if (ports) { + bcm_sf2_identify_ports(priv, ports); + of_node_put(ports); + } priv->irq0 = irq_of_parse_and_map(dn, 0); priv->irq1 = irq_of_parse_and_map(dn, 1); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index ef57552db260..2d0d91db0ddb 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1403,6 +1403,9 @@ mt7530_setup(struct dsa_switch *ds) continue; phy_node = of_parse_phandle(mac_np, "phy-handle", 0); + if (!phy_node) + continue; + if (phy_node->parent == priv->dev->of_node->parent) { ret = of_get_phy_mode(mac_np, &interface); if (ret && ret != -ENODEV) diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c index 97901c114bfa..fbe9d88b13c7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c +++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c @@ -491,7 +491,7 @@ get_ingress_preclass_record(struct aq_hw_s *hw, rec->snap[1] = packed_record[8] & 0xFF; rec->llc = (packed_record[8] >> 8) & 0xFF; - rec->llc = packed_record[9] << 8; + rec->llc |= packed_record[9] << 8; rec->mac_sa[0] = packed_record[10]; rec->mac_sa[0] |= packed_record[11] << 16; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 9638d65d8261..517caedc0a87 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -6874,7 +6874,8 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY: /* In this option, the first PHY makes sure to pass the * traffic through itself only. - * Its not clear how to reset the link on the second phy + * It's not clear how to reset the link on the second + * phy. */ active_external_phy = EXT_PHY1; break; diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.h b/drivers/net/ethernet/cavium/common/cavium_ptp.h index a04eccbc78e8..1e0ffe8f4152 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.h +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.h @@ -24,7 +24,7 @@ struct cavium_ptp { struct ptp_clock *ptp_clock; }; -#if IS_ENABLED(CONFIG_CAVIUM_PTP) +#if IS_REACHABLE(CONFIG_CAVIUM_PTP) struct cavium_ptp *cavium_ptp_get(void); void cavium_ptp_put(struct cavium_ptp *ptp); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 75fde0d4d493..a70018f067aa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3132,7 +3132,6 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) return ret; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - pi->xact_addr_filt = ret; return 0; } @@ -6672,6 +6671,10 @@ static void shutdown_one(struct pci_dev *pdev) if (adapter->port[i]->reg_state == NETREG_REGISTERED) cxgb_close(adapter->port[i]); + rtnl_lock(); + cxgb4_mqprio_stop_offload(adapter); + rtnl_unlock(); + if (is_uld(adapter)) { detach_ulds(adapter); t4_uld_clean_up(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c index ec3eb45ee3b4..e6af4906d674 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c @@ -301,6 +301,7 @@ static void cxgb4_mqprio_free_hw_resources(struct net_device *dev) cxgb4_clear_msix_aff(eorxq->msix->vec, eorxq->msix->aff_mask); free_irq(eorxq->msix->vec, &eorxq->rspq); + cxgb4_free_msix_idx_in_bmap(adap, eorxq->msix->idx); } free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl); @@ -611,6 +612,28 @@ out: return ret; } +void cxgb4_mqprio_stop_offload(struct adapter *adap) +{ + struct cxgb4_tc_port_mqprio *tc_port_mqprio; + struct net_device *dev; + u8 i; + + if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio) + return; + + for_each_port(adap, i) { + dev = adap->port[i]; + if (!dev) + continue; + + tc_port_mqprio = &adap->tc_mqprio->port_mqprio[i]; + if (!tc_port_mqprio->mqprio.qopt.num_tc) + continue; + + cxgb4_mqprio_disable_offload(dev); + } +} + int cxgb4_init_tc_mqprio(struct adapter *adap) { struct cxgb4_tc_port_mqprio *tc_port_mqprio, *port_mqprio; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h index c532f1ef8451..ff8794132b22 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h @@ -38,6 +38,7 @@ struct cxgb4_tc_mqprio { int cxgb4_setup_tc_mqprio(struct net_device *dev, struct tc_mqprio_qopt_offload *mqprio); +void cxgb4_mqprio_stop_offload(struct adapter *adap); int cxgb4_init_tc_mqprio(struct adapter *adap); void cxgb4_cleanup_tc_mqprio(struct adapter *adap); #endif /* __CXGB4_TC_MQPRIO_H__ */ diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 835b7816e372..87236206366f 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1731,7 +1731,7 @@ static int ftgmac100_setup_clk(struct ftgmac100 *priv) if (rc) goto cleanup_clk; - /* RCLK is for RMII, typically used for NCSI. Optional because its not + /* RCLK is for RMII, typically used for NCSI. Optional because it's not * necessary if it's the AST2400 MAC, or the MAC is configured for * RGMII, or the controller is not an ASPEED-based controller. */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 2f76908cae73..51117a5a6bbf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -150,14 +150,20 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp, u8 prio = act->vlan.prio; u16 vid = act->vlan.vid; - return mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, - act->id, vid, - proto, prio, extack); + err = mlxsw_sp_acl_rulei_act_vlan(mlxsw_sp, rulei, + act->id, vid, + proto, prio, extack); + if (err) + return err; + break; } case FLOW_ACTION_PRIORITY: - return mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, - act->priority, - extack); + err = mlxsw_sp_acl_rulei_act_priority(mlxsw_sp, rulei, + act->priority, + extack); + if (err) + return err; + break; case FLOW_ACTION_MANGLE: { enum flow_action_mangle_base htype = act->mangle.htype; __be32 be_mask = (__force __be32) act->mangle.mask; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c index 9096ffd89e50..fbf714d027d8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_trap.c @@ -643,7 +643,7 @@ static int mlxsw_sp_trap_policer_bs(u64 burst, u8 *p_burst_size, { int bs = fls64(burst) - 1; - if (burst != (1 << bs)) { + if (burst != (BIT_ULL(bs))) { NL_SET_ERR_MSG_MOD(extack, "Policer burst size is not power of two"); return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 1a5fc2ae351c..29810a1aa210 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -369,8 +369,8 @@ int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; u8 abs_vport_id = 0; - int rc = -EINVAL; u16 rx_mode = 0; + int rc; rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); if (rc) diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 1305522f72d6..40efe60eff8d 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -282,7 +282,6 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], { struct rmnet_priv *priv = netdev_priv(dev); struct net_device *real_dev; - struct rmnet_endpoint *ep; struct rmnet_port *port; u16 mux_id; @@ -297,19 +296,27 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], if (data[IFLA_RMNET_MUX_ID]) { mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]); - if (rmnet_get_endpoint(port, mux_id)) { - NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists"); - return -EINVAL; - } - ep = rmnet_get_endpoint(port, priv->mux_id); - if (!ep) - return -ENODEV; - hlist_del_init_rcu(&ep->hlnode); - hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); + if (mux_id != priv->mux_id) { + struct rmnet_endpoint *ep; + + ep = rmnet_get_endpoint(port, priv->mux_id); + if (!ep) + return -ENODEV; - ep->mux_id = mux_id; - priv->mux_id = mux_id; + if (rmnet_get_endpoint(port, mux_id)) { + NL_SET_ERR_MSG_MOD(extack, + "MUX ID already exists"); + return -EINVAL; + } + + hlist_del_init_rcu(&ep->hlnode); + hlist_add_head_rcu(&ep->hlnode, + &port->muxed_ep[mux_id]); + + ep->mux_id = mux_id; + priv->mux_id = mux_id; + } } if (data[IFLA_RMNET_FLAGS]) { diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 55cb5730beb6..bf5bf05970a2 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5441,9 +5441,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &tp->napi, rtl8169_poll, NAPI_POLL_WEIGHT); - dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | - NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX; dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_HIGHDMA; dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; @@ -5460,26 +5459,26 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* Disallow toggling */ dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX; + if (rtl_chip_supports_csum_v2(tp)) + dev->hw_features |= NETIF_F_IPV6_CSUM; + + dev->features |= dev->hw_features; + + /* There has been a number of reports that using SG/TSO results in + * tx timeouts. However for a lot of people SG/TSO works fine. + * Therefore disable both features by default, but allow users to + * enable them. Use at own risk! + */ if (rtl_chip_supports_csum_v2(tp)) { - dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6; + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6; dev->gso_max_size = RTL_GSO_MAX_SIZE_V2; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V2; } else { + dev->hw_features |= NETIF_F_SG | NETIF_F_TSO; dev->gso_max_size = RTL_GSO_MAX_SIZE_V1; dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; } - /* RTL8168e-vl and one RTL8168c variant are known to have a - * HW issue with TSO. - */ - if (tp->mac_version == RTL_GIGA_MAC_VER_34 || - tp->mac_version == RTL_GIGA_MAC_VER_22) { - dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); - dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); - } - - dev->features |= dev->hw_features; - dev->hw_features |= NETIF_F_RXALL; dev->hw_features |= NETIF_F_RXFCS; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 542784300620..efc6ec1b8027 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -207,7 +207,7 @@ static void dwmac1000_set_filter(struct mac_device_info *hw, reg++; } - while (reg <= perfect_addr_number) { + while (reg < perfect_addr_number) { writel(0, ioaddr + GMAC_ADDR_HIGH(reg)); writel(0, ioaddr + GMAC_ADDR_LOW(reg)); reg++; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index 0e4575f7bedb..ad4df9bddcf3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -577,8 +577,13 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, value |= XGMAC_VLAN_EDVLP; value |= XGMAC_VLAN_ESVL; value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; } + value &= ~XGMAC_VLAN_VID; writel(value, ioaddr + XGMAC_VLAN_TAG); } else if (perfect_match) { u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); @@ -589,13 +594,19 @@ static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash, value = readl(ioaddr + XGMAC_VLAN_TAG); + value &= ~XGMAC_VLAN_VTHM; value |= XGMAC_VLAN_ETV; if (is_double) { value |= XGMAC_VLAN_EDVLP; value |= XGMAC_VLAN_ESVL; value |= XGMAC_VLAN_DOVLTC; + } else { + value &= ~XGMAC_VLAN_EDVLP; + value &= ~XGMAC_VLAN_ESVL; + value &= ~XGMAC_VLAN_DOVLTC; } + value &= ~XGMAC_VLAN_VID; writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG); } else { u32 value = readl(ioaddr + XGMAC_PACKET_FILTER); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2fb671e61ee8..e6898fd5223f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4566,9 +4566,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid return ret; } - ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (priv->hw->num_vlan) { + ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + return ret; + } - return ret; + return 0; } static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid) @@ -4581,9 +4585,12 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi is_double = true; clear_bit(vid, priv->active_vlans); - ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); - if (ret) - return ret; + + if (priv->hw->num_vlan) { + ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); + if (ret) + return ret; + } return stmmac_vlan_update(priv, is_double); } diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index da82d7f16a09..0d580d81d910 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -2594,6 +2594,9 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) return PTR_ERR(dev); macsec = macsec_priv(dev); + if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) + return -EINVAL; + offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); if (macsec->offload == offload) return 0; diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 481cf48c9b9e..31f731e6df72 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -425,8 +425,8 @@ static int at803x_parse_dt(struct phy_device *phydev) */ if (at803x_match_phy_id(phydev, ATH8030_PHY_ID) || at803x_match_phy_id(phydev, ATH8035_PHY_ID)) { - priv->clk_25m_reg &= ~AT8035_CLK_OUT_MASK; - priv->clk_25m_mask &= ~AT8035_CLK_OUT_MASK; + priv->clk_25m_reg &= AT8035_CLK_OUT_MASK; + priv->clk_25m_mask &= AT8035_CLK_OUT_MASK; } } diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 2ec19e5540bf..05d20343b816 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -25,6 +25,7 @@ #include <linux/micrel_phy.h> #include <linux/of.h> #include <linux/clk.h> +#include <linux/delay.h> /* Operation Mode Strap Override */ #define MII_KSZPHY_OMSO 0x16 @@ -952,6 +953,12 @@ static int kszphy_resume(struct phy_device *phydev) genphy_resume(phydev); + /* After switching from power-down to normal mode, an internal global + * reset is automatically generated. Wait a minimum of 1 ms before + * read/write access to the PHY registers. + */ + usleep_range(1000, 2000); + ret = kszphy_config_reset(phydev); if (ret) return ret; diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 228fe449dc6d..07476c6510f2 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1678,8 +1678,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, alloc_frag->offset += buflen; } err = tun_xdp_act(tun, xdp_prog, &xdp, act); - if (err < 0) - goto err_xdp; + if (err < 0) { + if (act == XDP_REDIRECT || act == XDP_TX) + put_page(alloc_frag->page); + goto out; + } + if (err == XDP_REDIRECT) xdp_do_flush(); if (err != XDP_PASS) @@ -1693,8 +1697,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); -err_xdp: - put_page(alloc_frag->page); out: rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index 8783e2ab3ec0..0ef7e1f443e3 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -54,6 +54,7 @@ static const char driver_name[] = "pegasus"; #undef PEGASUS_WRITE_EEPROM #define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ BMSR_100FULL | BMSR_ANEGCAPABLE) +#define CARRIER_CHECK_DELAY (2 * HZ) static bool loopback; static bool mii_mode; @@ -1089,17 +1090,12 @@ static inline void setup_pegasus_II(pegasus_t *pegasus) set_register(pegasus, Reg81, 2); } - -static int pegasus_count; -static struct workqueue_struct *pegasus_workqueue; -#define CARRIER_CHECK_DELAY (2 * HZ) - static void check_carrier(struct work_struct *work) { pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); set_carrier(pegasus->net); if (!(pegasus->flags & PEGASUS_UNPLUG)) { - queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, + queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); } } @@ -1120,18 +1116,6 @@ static int pegasus_blacklisted(struct usb_device *udev) return 0; } -/* we rely on probe() and remove() being serialized so we - * don't need extra locking on pegasus_count. - */ -static void pegasus_dec_workqueue(void) -{ - pegasus_count--; - if (pegasus_count == 0) { - destroy_workqueue(pegasus_workqueue); - pegasus_workqueue = NULL; - } -} - static int pegasus_probe(struct usb_interface *intf, const struct usb_device_id *id) { @@ -1144,14 +1128,6 @@ static int pegasus_probe(struct usb_interface *intf, if (pegasus_blacklisted(dev)) return -ENODEV; - if (pegasus_count == 0) { - pegasus_workqueue = alloc_workqueue("pegasus", WQ_MEM_RECLAIM, - 0); - if (!pegasus_workqueue) - return -ENOMEM; - } - pegasus_count++; - net = alloc_etherdev(sizeof(struct pegasus)); if (!net) goto out; @@ -1209,7 +1185,7 @@ static int pegasus_probe(struct usb_interface *intf, res = register_netdev(net); if (res) goto out3; - queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, + queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); dev_info(&intf->dev, "%s, %s, %pM\n", net->name, usb_dev_id[dev_index].name, net->dev_addr); @@ -1222,7 +1198,6 @@ out2: out1: free_netdev(net); out: - pegasus_dec_workqueue(); return res; } @@ -1237,7 +1212,7 @@ static void pegasus_disconnect(struct usb_interface *intf) } pegasus->flags |= PEGASUS_UNPLUG; - cancel_delayed_work(&pegasus->carrier_check); + cancel_delayed_work_sync(&pegasus->carrier_check); unregister_netdev(pegasus->net); unlink_all_urbs(pegasus); free_all_urbs(pegasus); @@ -1246,7 +1221,6 @@ static void pegasus_disconnect(struct usb_interface *intf) pegasus->rx_skb = NULL; } free_netdev(pegasus->net); - pegasus_dec_workqueue(); } static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) @@ -1254,7 +1228,7 @@ static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) struct pegasus *pegasus = usb_get_intfdata(intf); netif_device_detach(pegasus->net); - cancel_delayed_work(&pegasus->carrier_check); + cancel_delayed_work_sync(&pegasus->carrier_check); if (netif_running(pegasus->net)) { usb_kill_urb(pegasus->rx_urb); usb_kill_urb(pegasus->intr_urb); @@ -1276,7 +1250,7 @@ static int pegasus_resume(struct usb_interface *intf) pegasus->intr_urb->actual_length = 0; intr_callback(pegasus->intr_urb); } - queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, + queue_delayed_work(system_long_wq, &pegasus->carrier_check, CARRIER_CHECK_DELAY); return 0; } diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c index f66c0f8f6f4a..ecb3fccca603 100644 --- a/drivers/net/wimax/i2400m/driver.c +++ b/drivers/net/wimax/i2400m/driver.c @@ -740,9 +740,6 @@ EXPORT_SYMBOL_GPL(i2400m_error_recovery); static int i2400m_bm_buf_alloc(struct i2400m *i2400m) { - int result; - - result = -ENOMEM; i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL); if (i2400m->bm_cmd_buf == NULL) goto error_bm_cmd_kzalloc; @@ -754,7 +751,7 @@ int i2400m_bm_buf_alloc(struct i2400m *i2400m) error_bm_ack_buf_kzalloc: kfree(i2400m->bm_cmd_buf); error_bm_cmd_kzalloc: - return result; + return -ENOMEM; } @@ -843,7 +840,7 @@ EXPORT_SYMBOL_GPL(i2400m_reset); */ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) { - int result = -ENODEV; + int result; struct device *dev = i2400m_dev(i2400m); struct wimax_dev *wimax_dev = &i2400m->wimax_dev; struct net_device *net_dev = i2400m->wimax_dev.net_dev; diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c index a8b515968569..09087c38fabd 100644 --- a/drivers/nvdimm/bus.c +++ b/drivers/nvdimm/bus.c @@ -1042,8 +1042,10 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm, return -EFAULT; } - if (!desc || (desc->out_num + desc->in_num == 0) || - !test_bit(cmd, &cmd_mask)) + if (!desc || + (desc->out_num + desc->in_num == 0) || + cmd > ND_CMD_CALL || + !test_bit(cmd, &cmd_mask)) return -ENOTTY; /* fail write commands (when read-only) */ diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c index 64776ed15bb3..7d4ddc4d9322 100644 --- a/drivers/nvdimm/dimm.c +++ b/drivers/nvdimm/dimm.c @@ -99,7 +99,7 @@ static int nvdimm_probe(struct device *dev) if (ndd->ns_current >= 0) { rc = nd_label_reserve_dpa(ndd); if (rc == 0) - nvdimm_set_aliasing(dev); + nvdimm_set_labeling(dev); } nvdimm_bus_unlock(dev); diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index 94ea6dba6b4f..b7b77e8d9027 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -32,7 +32,7 @@ int nvdimm_check_config_data(struct device *dev) if (!nvdimm->cmd_mask || !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { - if (test_bit(NDD_ALIASING, &nvdimm->flags)) + if (test_bit(NDD_LABELING, &nvdimm->flags)) return -ENXIO; else return -ENOTTY; @@ -173,11 +173,11 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, return rc; } -void nvdimm_set_aliasing(struct device *dev) +void nvdimm_set_labeling(struct device *dev) { struct nvdimm *nvdimm = to_nvdimm(dev); - set_bit(NDD_ALIASING, &nvdimm->flags); + set_bit(NDD_LABELING, &nvdimm->flags); } void nvdimm_set_locked(struct device *dev) @@ -312,8 +312,9 @@ static ssize_t flags_show(struct device *dev, { struct nvdimm *nvdimm = to_nvdimm(dev); - return sprintf(buf, "%s%s\n", + return sprintf(buf, "%s%s%s\n", test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", + test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "", test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); } static DEVICE_ATTR_RO(flags); @@ -562,6 +563,21 @@ int nvdimm_security_freeze(struct nvdimm *nvdimm) return rc; } +static unsigned long dpa_align(struct nd_region *nd_region) +{ + struct device *dev = &nd_region->dev; + + if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), + "bus lock required for capacity provision\n")) + return 0; + if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align + % nd_region->ndr_mappings, + "invalid region align %#lx mappings: %d\n", + nd_region->align, nd_region->ndr_mappings)) + return 0; + return nd_region->align / nd_region->ndr_mappings; +} + int alias_dpa_busy(struct device *dev, void *data) { resource_size_t map_end, blk_start, new; @@ -570,6 +586,7 @@ int alias_dpa_busy(struct device *dev, void *data) struct nd_region *nd_region; struct nvdimm_drvdata *ndd; struct resource *res; + unsigned long align; int i; if (!is_memory(dev)) @@ -607,13 +624,21 @@ int alias_dpa_busy(struct device *dev, void *data) * Find the free dpa from the end of the last pmem allocation to * the end of the interleave-set mapping. */ + align = dpa_align(nd_region); + if (!align) + return 0; + for_each_dpa_resource(ndd, res) { + resource_size_t start, end; + if (strncmp(res->name, "pmem", 4) != 0) continue; - if ((res->start >= blk_start && res->start < map_end) - || (res->end >= blk_start - && res->end <= map_end)) { - new = max(blk_start, min(map_end + 1, res->end + 1)); + + start = ALIGN_DOWN(res->start, align); + end = ALIGN(res->end + 1, align) - 1; + if ((start >= blk_start && start < map_end) + || (end >= blk_start && end <= map_end)) { + new = max(blk_start, min(map_end, end) + 1); if (new != blk_start) { blk_start = new; goto retry; @@ -653,6 +678,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) .res = NULL, }; struct resource *res; + unsigned long align; if (!ndd) return 0; @@ -660,10 +686,20 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region) device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); /* now account for busy blk allocations in unaliased dpa */ + align = dpa_align(nd_region); + if (!align) + return 0; for_each_dpa_resource(ndd, res) { + resource_size_t start, end, size; + if (strncmp(res->name, "blk", 3) != 0) continue; - info.available -= resource_size(res); + start = ALIGN_DOWN(res->start, align); + end = ALIGN(res->end + 1, align) - 1; + size = end - start + 1; + if (size >= info.available) + return 0; + info.available -= size; } return info.available; @@ -682,19 +718,31 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region, struct nvdimm_bus *nvdimm_bus; resource_size_t max = 0; struct resource *res; + unsigned long align; /* if a dimm is disabled the available capacity is zero */ if (!ndd) return 0; + align = dpa_align(nd_region); + if (!align) + return 0; + nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) return 0; for_each_dpa_resource(ndd, res) { + resource_size_t start, end; + if (strcmp(res->name, "pmem-reserve") != 0) continue; - if (resource_size(res) > max) - max = resource_size(res); + /* trim free space relative to current alignment setting */ + start = ALIGN(res->start, align); + end = ALIGN_DOWN(res->end + 1, align) - 1; + if (end < start) + continue; + if (end - start + 1 > max) + max = end - start + 1; } release_free_pmem(nvdimm_bus, nd_mapping); return max; @@ -722,24 +770,33 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, struct nvdimm_drvdata *ndd = to_ndd(nd_mapping); struct resource *res; const char *reason; + unsigned long align; if (!ndd) return 0; + align = dpa_align(nd_region); + if (!align) + return 0; + map_start = nd_mapping->start; map_end = map_start + nd_mapping->size - 1; blk_start = max(map_start, map_end + 1 - *overlap); for_each_dpa_resource(ndd, res) { - if (res->start >= map_start && res->start < map_end) { + resource_size_t start, end; + + start = ALIGN_DOWN(res->start, align); + end = ALIGN(res->end + 1, align) - 1; + if (start >= map_start && start < map_end) { if (strncmp(res->name, "blk", 3) == 0) blk_start = min(blk_start, - max(map_start, res->start)); - else if (res->end > map_end) { + max(map_start, start)); + else if (end > map_end) { reason = "misaligned to iset"; goto err; } else - busy += resource_size(res); - } else if (res->end >= map_start && res->end <= map_end) { + busy += end - start + 1; + } else if (end >= map_start && end <= map_end) { if (strncmp(res->name, "blk", 3) == 0) { /* * If a BLK allocation overlaps the start of @@ -748,8 +805,8 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, */ blk_start = map_start; } else - busy += resource_size(res); - } else if (map_start > res->start && map_start < res->end) { + busy += end - start + 1; + } else if (map_start > start && map_start < end) { /* total eclipse of the mapping */ busy += nd_mapping->size; blk_start = map_start; @@ -759,7 +816,7 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region, *overlap = map_end + 1 - blk_start; available = blk_start - map_start; if (busy < available) - return available - busy; + return ALIGN_DOWN(available - busy, align); return 0; err: diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c index e02f60ad6c99..4cd18be9d0e9 100644 --- a/drivers/nvdimm/e820.c +++ b/drivers/nvdimm/e820.c @@ -7,6 +7,7 @@ #include <linux/memory_hotplug.h> #include <linux/libnvdimm.h> #include <linux/module.h> +#include <linux/numa.h> static int e820_pmem_remove(struct platform_device *pdev) { @@ -16,27 +17,16 @@ static int e820_pmem_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_MEMORY_HOTPLUG -static int e820_range_to_nid(resource_size_t addr) -{ - return memory_add_physaddr_to_nid(addr); -} -#else -static int e820_range_to_nid(resource_size_t addr) -{ - return NUMA_NO_NODE; -} -#endif - static int e820_register_one(struct resource *res, void *data) { struct nd_region_desc ndr_desc; struct nvdimm_bus *nvdimm_bus = data; + int nid = phys_to_target_node(res->start); memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.res = res; - ndr_desc.numa_node = e820_range_to_nid(res->start); - ndr_desc.target_node = ndr_desc.numa_node; + ndr_desc.numa_node = numa_map_to_online_node(nid); + ndr_desc.target_node = nid; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); if (!nvdimm_pmem_region_create(nvdimm_bus, &ndr_desc)) return -ENXIO; diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h index 4c7b775c2811..956b6d1bd8cc 100644 --- a/drivers/nvdimm/label.h +++ b/drivers/nvdimm/label.h @@ -62,7 +62,7 @@ struct nd_namespace_index { __le16 major; __le16 minor; __le64 checksum; - u8 free[0]; + u8 free[]; }; /** diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 032dc61725ff..ae155e860fdc 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -10,6 +10,7 @@ #include <linux/nd.h> #include "nd-core.h" #include "pmem.h" +#include "pfn.h" #include "nd.h" static void namespace_io_release(struct device *dev) @@ -541,6 +542,11 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd, { bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0; bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0; + unsigned long align; + + align = nd_region->align / nd_region->ndr_mappings; + valid->start = ALIGN(valid->start, align); + valid->end = ALIGN_DOWN(valid->end + 1, align) - 1; if (valid->start >= valid->end) goto invalid; @@ -980,10 +986,10 @@ static ssize_t __size_store(struct device *dev, unsigned long long val) return -ENXIO; } - div_u64_rem(val, PAGE_SIZE * nd_region->ndr_mappings, &remainder); + div_u64_rem(val, nd_region->align, &remainder); if (remainder) { dev_dbg(dev, "%llu is not %ldK aligned\n", val, - (PAGE_SIZE * nd_region->ndr_mappings) / SZ_1K); + nd_region->align / SZ_1K); return -EINVAL; } @@ -1739,6 +1745,22 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev) return ERR_PTR(-ENODEV); } + /* + * Note, alignment validation for fsdax and devdax mode + * namespaces happens in nd_pfn_validate() where infoblock + * padding parameters can be applied. + */ + if (pmem_should_map_pages(dev)) { + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + struct resource *res = &nsio->res; + + if (!IS_ALIGNED(res->start | (res->end + 1), + memremap_compat_align())) { + dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res); + return ERR_PTR(-EOPNOTSUPP); + } + } + if (is_namespace_pmem(&ndns->dev)) { struct nd_namespace_pmem *nspm; @@ -2521,7 +2543,7 @@ static int init_active_labels(struct nd_region *nd_region) if (!ndd) { if (test_bit(NDD_LOCKED, &nvdimm->flags)) /* fail, label data may be unreadable */; - else if (test_bit(NDD_ALIASING, &nvdimm->flags)) + else if (test_bit(NDD_LABELING, &nvdimm->flags)) /* fail, labels needed to disambiguate dpa */; else return 0; diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h index c9f6a5b5253a..85dbb2a322b9 100644 --- a/drivers/nvdimm/nd.h +++ b/drivers/nvdimm/nd.h @@ -39,7 +39,7 @@ struct nd_region_data { int ns_count; int ns_active; unsigned int hints_shift; - void __iomem *flush_wpq[0]; + void __iomem *flush_wpq[]; }; static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd, @@ -146,6 +146,7 @@ struct nd_region { struct device *btt_seed; struct device *pfn_seed; struct device *dax_seed; + unsigned long align; u16 ndr_mappings; u64 ndr_size; u64 ndr_start; @@ -156,7 +157,7 @@ struct nd_region { struct nd_interleave_set *nd_set; struct nd_percpu_lane __percpu *lane; int (*flush)(struct nd_region *nd_region, struct bio *bio); - struct nd_mapping mapping[0]; + struct nd_mapping mapping[]; }; struct nd_blk_region { @@ -252,7 +253,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len); long nvdimm_clear_poison(struct device *dev, phys_addr_t phys, unsigned int len); -void nvdimm_set_aliasing(struct device *dev); +void nvdimm_set_labeling(struct device *dev); void nvdimm_set_locked(struct device *dev); void nvdimm_clear_locked(struct device *dev); int nvdimm_security_setup_events(struct device *dev); diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 8224d1431ea9..6826a274a1f1 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -62,8 +62,10 @@ static int of_pmem_region_probe(struct platform_device *pdev) if (is_volatile) region = nvdimm_volatile_region_create(bus, &ndr_desc); - else + else { + set_bit(ND_REGION_PERSIST_MEMCTRL, &ndr_desc.flags); region = nvdimm_pmem_region_create(bus, &ndr_desc); + } if (!region) dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n", diff --git a/drivers/nvdimm/pfn.h b/drivers/nvdimm/pfn.h index acb19517f678..37cb1b8a2a39 100644 --- a/drivers/nvdimm/pfn.h +++ b/drivers/nvdimm/pfn.h @@ -24,6 +24,18 @@ struct nd_pfn_sb { __le64 npfns; __le32 mode; /* minor-version-1 additions for section alignment */ + /** + * @start_pad: Deprecated attribute to pad start-misaligned namespaces + * + * start_pad is deprecated because the original definition did + * not comprehend that dataoff is relative to the base address + * of the namespace not the start_pad adjusted base. The result + * is that the dax path is broken, but the block-I/O path is + * not. The kernel will no longer create namespaces using start + * padding, but it still supports block-I/O for legacy + * configurations mainly to allow a backup, reconfigure the + * namespace, and restore flow to repair dax operation. + */ __le32 start_pad; __le32 end_trunc; /* minor-version-2 record the base alignment of the mapping */ diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index b94f7a7e94b8..34db557dbad1 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -446,6 +446,7 @@ static bool nd_supported_alignment(unsigned long align) int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) { u64 checksum, offset; + struct resource *res; enum nd_pfn_mode mode; struct nd_namespace_io *nsio; unsigned long align, start_pad; @@ -561,14 +562,14 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n", nd_pfn->align, align, nd_pfn->mode, mode); - return -EINVAL; + return -EOPNOTSUPP; } } if (align > nvdimm_namespace_capacity(ndns)) { dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n", align, nvdimm_namespace_capacity(ndns)); - return -EINVAL; + return -EOPNOTSUPP; } /* @@ -578,18 +579,31 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig) * established. */ nsio = to_nd_namespace_io(&ndns->dev); - if (offset >= resource_size(&nsio->res)) { + res = &nsio->res; + if (offset >= resource_size(res)) { dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n", dev_name(&ndns->dev)); - return -EBUSY; + return -EOPNOTSUPP; } - if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) + if ((align && !IS_ALIGNED(res->start + offset + start_pad, align)) || !IS_ALIGNED(offset, PAGE_SIZE)) { dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled align: %#lx\n", offset, align); - return -ENXIO; + return -EOPNOTSUPP; + } + + if (!IS_ALIGNED(res->start + le32_to_cpu(pfn_sb->start_pad), + memremap_compat_align())) { + dev_err(&nd_pfn->dev, "resource start misaligned\n"); + return -EOPNOTSUPP; + } + + if (!IS_ALIGNED(res->end + 1 - le32_to_cpu(pfn_sb->end_trunc), + memremap_compat_align())) { + dev_err(&nd_pfn->dev, "resource end misaligned\n"); + return -EOPNOTSUPP; } return 0; @@ -750,7 +764,19 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn) start = nsio->res.start; size = resource_size(&nsio->res); npfns = PHYS_PFN(size - SZ_8K); - align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT)); + align = max(nd_pfn->align, memremap_compat_align()); + + /* + * When @start is misaligned fail namespace creation. See + * the 'struct nd_pfn_sb' commentary on why ->start_pad is not + * an option. + */ + if (!IS_ALIGNED(start, memremap_compat_align())) { + dev_err(&nd_pfn->dev, "%s: start %pa misaligned to %#lx\n", + dev_name(&ndns->dev), &start, + memremap_compat_align()); + return -EINVAL; + } end_trunc = start + size - ALIGN_DOWN(start + size, align); if (nd_pfn->mode == PFN_MODE_PMEM) { /* diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 4ffc6f7ca131..2df6994acf83 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -136,9 +136,25 @@ static blk_status_t read_pmem(struct page *page, unsigned int off, return BLK_STS_OK; } -static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, - unsigned int len, unsigned int off, unsigned int op, - sector_t sector) +static blk_status_t pmem_do_read(struct pmem_device *pmem, + struct page *page, unsigned int page_off, + sector_t sector, unsigned int len) +{ + blk_status_t rc; + phys_addr_t pmem_off = sector * 512 + pmem->data_offset; + void *pmem_addr = pmem->virt_addr + pmem_off; + + if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) + return BLK_STS_IOERR; + + rc = read_pmem(page, page_off, pmem_addr, len); + flush_dcache_page(page); + return rc; +} + +static blk_status_t pmem_do_write(struct pmem_device *pmem, + struct page *page, unsigned int page_off, + sector_t sector, unsigned int len) { blk_status_t rc = BLK_STS_OK; bool bad_pmem = false; @@ -148,34 +164,25 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page, if (unlikely(is_bad_pmem(&pmem->bb, sector, len))) bad_pmem = true; - if (!op_is_write(op)) { - if (unlikely(bad_pmem)) - rc = BLK_STS_IOERR; - else { - rc = read_pmem(page, off, pmem_addr, len); - flush_dcache_page(page); - } - } else { - /* - * Note that we write the data both before and after - * clearing poison. The write before clear poison - * handles situations where the latest written data is - * preserved and the clear poison operation simply marks - * the address range as valid without changing the data. - * In this case application software can assume that an - * interrupted write will either return the new good - * data or an error. - * - * However, if pmem_clear_poison() leaves the data in an - * indeterminate state we need to perform the write - * after clear poison. - */ - flush_dcache_page(page); - write_pmem(pmem_addr, page, off, len); - if (unlikely(bad_pmem)) { - rc = pmem_clear_poison(pmem, pmem_off, len); - write_pmem(pmem_addr, page, off, len); - } + /* + * Note that we write the data both before and after + * clearing poison. The write before clear poison + * handles situations where the latest written data is + * preserved and the clear poison operation simply marks + * the address range as valid without changing the data. + * In this case application software can assume that an + * interrupted write will either return the new good + * data or an error. + * + * However, if pmem_clear_poison() leaves the data in an + * indeterminate state we need to perform the write + * after clear poison. + */ + flush_dcache_page(page); + write_pmem(pmem_addr, page, page_off, len); + if (unlikely(bad_pmem)) { + rc = pmem_clear_poison(pmem, pmem_off, len); + write_pmem(pmem_addr, page, page_off, len); } return rc; @@ -197,8 +204,12 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio) do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) { - rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, - bvec.bv_offset, bio_op(bio), iter.bi_sector); + if (op_is_write(bio_op(bio))) + rc = pmem_do_write(pmem, bvec.bv_page, bvec.bv_offset, + iter.bi_sector, bvec.bv_len); + else + rc = pmem_do_read(pmem, bvec.bv_page, bvec.bv_offset, + iter.bi_sector, bvec.bv_len); if (rc) { bio->bi_status = rc; break; @@ -223,9 +234,12 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector, struct pmem_device *pmem = bdev->bd_queue->queuedata; blk_status_t rc; - rc = pmem_do_bvec(pmem, page, hpage_nr_pages(page) * PAGE_SIZE, - 0, op, sector); - + if (op_is_write(op)) + rc = pmem_do_write(pmem, page, 0, sector, + hpage_nr_pages(page) * PAGE_SIZE); + else + rc = pmem_do_read(pmem, page, 0, sector, + hpage_nr_pages(page) * PAGE_SIZE); /* * The ->rw_page interface is subtle and tricky. The core * retries on any error, so we can only invoke page_endio() in @@ -268,6 +282,16 @@ static const struct block_device_operations pmem_fops = { .revalidate_disk = nvdimm_revalidate_disk, }; +static int pmem_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, + size_t nr_pages) +{ + struct pmem_device *pmem = dax_get_private(dax_dev); + + return blk_status_to_errno(pmem_do_write(pmem, ZERO_PAGE(0), 0, + PFN_PHYS(pgoff) >> SECTOR_SHIFT, + PAGE_SIZE)); +} + static long pmem_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn) { @@ -299,6 +323,7 @@ static const struct dax_operations pmem_dax_ops = { .dax_supported = generic_fsdax_supported, .copy_from_iter = pmem_copy_from_iter, .copy_to_iter = pmem_copy_to_iter, + .zero_page_range = pmem_dax_zero_page_range, }; static const struct attribute_group *pmem_attribute_groups[] = { @@ -461,9 +486,9 @@ static int pmem_attach_disk(struct device *dev, if (is_nvdimm_sync(nd_region)) flags = DAXDEV_F_SYNC; dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops, flags); - if (!dax_dev) { + if (IS_ERR(dax_dev)) { put_disk(disk); - return -ENOMEM; + return PTR_ERR(dax_dev); } dax_write_cache(dax_dev, nvdimm_has_cache(nd_region)); pmem->dax_dev = dax_dev; diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index a19e535830d9..ccbb5b43b8b2 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -195,16 +195,16 @@ EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data); int nd_region_to_nstype(struct nd_region *nd_region) { if (is_memory(&nd_region->dev)) { - u16 i, alias; + u16 i, label; - for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) { + for (i = 0, label = 0; i < nd_region->ndr_mappings; i++) { struct nd_mapping *nd_mapping = &nd_region->mapping[i]; struct nvdimm *nvdimm = nd_mapping->nvdimm; - if (test_bit(NDD_ALIASING, &nvdimm->flags)) - alias++; + if (test_bit(NDD_LABELING, &nvdimm->flags)) + label++; } - if (alias) + if (label) return ND_DEVICE_NAMESPACE_PMEM; else return ND_DEVICE_NAMESPACE_IO; @@ -216,21 +216,25 @@ int nd_region_to_nstype(struct nd_region *nd_region) } EXPORT_SYMBOL(nd_region_to_nstype); -static ssize_t size_show(struct device *dev, - struct device_attribute *attr, char *buf) +static unsigned long long region_size(struct nd_region *nd_region) { - struct nd_region *nd_region = to_nd_region(dev); - unsigned long long size = 0; - - if (is_memory(dev)) { - size = nd_region->ndr_size; + if (is_memory(&nd_region->dev)) { + return nd_region->ndr_size; } else if (nd_region->ndr_mappings == 1) { struct nd_mapping *nd_mapping = &nd_region->mapping[0]; - size = nd_mapping->size; + return nd_mapping->size; } - return sprintf(buf, "%llu\n", size); + return 0; +} + +static ssize_t size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_region *nd_region = to_nd_region(dev); + + return sprintf(buf, "%llu\n", region_size(nd_region)); } static DEVICE_ATTR_RO(size); @@ -529,6 +533,54 @@ static ssize_t read_only_store(struct device *dev, } static DEVICE_ATTR_RW(read_only); +static ssize_t align_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct nd_region *nd_region = to_nd_region(dev); + + return sprintf(buf, "%#lx\n", nd_region->align); +} + +static ssize_t align_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t len) +{ + struct nd_region *nd_region = to_nd_region(dev); + unsigned long val, dpa; + u32 remainder; + int rc; + + rc = kstrtoul(buf, 0, &val); + if (rc) + return rc; + + if (!nd_region->ndr_mappings) + return -ENXIO; + + /* + * Ensure space-align is evenly divisible by the region + * interleave-width because the kernel typically has no facility + * to determine which DIMM(s), dimm-physical-addresses, would + * contribute to the tail capacity in system-physical-address + * space for the namespace. + */ + dpa = div_u64_rem(val, nd_region->ndr_mappings, &remainder); + if (!is_power_of_2(dpa) || dpa < PAGE_SIZE + || val > region_size(nd_region) || remainder) + return -EINVAL; + + /* + * Given that space allocation consults this value multiple + * times ensure it does not change for the duration of the + * allocation. + */ + nvdimm_bus_lock(dev); + nd_region->align = val; + nvdimm_bus_unlock(dev); + + return len; +} +static DEVICE_ATTR_RW(align); + static ssize_t region_badblocks_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -571,6 +623,7 @@ static DEVICE_ATTR_RO(persistence_domain); static struct attribute *nd_region_attributes[] = { &dev_attr_size.attr, + &dev_attr_align.attr, &dev_attr_nstype.attr, &dev_attr_mappings.attr, &dev_attr_btt_seed.attr, @@ -626,6 +679,19 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) return a->mode; } + if (a == &dev_attr_align.attr) { + int i; + + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + + if (test_bit(NDD_LABELING, &nvdimm->flags)) + return a->mode; + } + return 0; + } + if (a != &dev_attr_set_cookie.attr && a != &dev_attr_available_size.attr) return a->mode; @@ -935,6 +1001,41 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane) } EXPORT_SYMBOL(nd_region_release_lane); +/* + * PowerPC requires this alignment for memremap_pages(). All other archs + * should be ok with SUBSECTION_SIZE (see memremap_compat_align()). + */ +#define MEMREMAP_COMPAT_ALIGN_MAX SZ_16M + +static unsigned long default_align(struct nd_region *nd_region) +{ + unsigned long align; + int i, mappings; + u32 remainder; + + if (is_nd_blk(&nd_region->dev)) + align = PAGE_SIZE; + else + align = MEMREMAP_COMPAT_ALIGN_MAX; + + for (i = 0; i < nd_region->ndr_mappings; i++) { + struct nd_mapping *nd_mapping = &nd_region->mapping[i]; + struct nvdimm *nvdimm = nd_mapping->nvdimm; + + if (test_bit(NDD_ALIASING, &nvdimm->flags)) { + align = MEMREMAP_COMPAT_ALIGN_MAX; + break; + } + } + + mappings = max_t(u16, 1, nd_region->ndr_mappings); + div_u64_rem(align, mappings, &remainder); + if (remainder) + align *= mappings; + + return align; +} + static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, struct nd_region_desc *ndr_desc, const struct device_type *dev_type, const char *caller) @@ -1039,6 +1140,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus, dev->of_node = ndr_desc->of_node; nd_region->ndr_size = resource_size(ndr_desc->res); nd_region->ndr_start = ndr_desc->res->start; + nd_region->align = default_align(nd_region); if (ndr_desc->flush) nd_region->flush = ndr_desc->flush; else diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c index 9d00a24277aa..f96e5eaee87e 100644 --- a/drivers/parisc/eisa.c +++ b/drivers/parisc/eisa.c @@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev) return IRQ_HANDLED; } -static struct irqaction irq2_action = { - .handler = dummy_irq2_handler, - .name = "cascade", -}; - static void init_eisa_pic(void) { unsigned long flags; @@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev) } /* Reserve IRQ2 */ - setup_irq(2, &irq2_action); + if (request_irq(2, dummy_irq2_handler, 0, "cascade", NULL)) + pr_err("Failed to request irq 2 (cascade)\n"); for (i = 0; i < 16; i++) { irq_set_chip_and_handler(i, &eisa_interrupt_type, handle_simple_irq); diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c index 3ef0bb281e7c..390e92f2d8d1 100644 --- a/drivers/pci/ats.c +++ b/drivers/pci/ats.c @@ -366,6 +366,7 @@ int pci_enable_pasid(struct pci_dev *pdev, int features) return 0; } +EXPORT_SYMBOL_GPL(pci_enable_pasid); /** * pci_disable_pasid - Disable the PASID capability @@ -390,6 +391,7 @@ void pci_disable_pasid(struct pci_dev *pdev) pdev->pasid_enabled = 0; } +EXPORT_SYMBOL_GPL(pci_disable_pasid); /** * pci_restore_pasid_state - Restore PASID capabilities @@ -441,6 +443,7 @@ int pci_pasid_features(struct pci_dev *pdev) return supported; } +EXPORT_SYMBOL_GPL(pci_pasid_features); #define PASID_NUMBER_SHIFT 8 #define PASID_NUMBER_MASK (0x1f << PASID_NUMBER_SHIFT) @@ -469,4 +472,5 @@ int pci_max_pasids(struct pci_dev *pdev) return (1 << supported); } +EXPORT_SYMBOL_GPL(pci_max_pasids); #endif /* CONFIG_PCI_PASID */ diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 977946e4e613..c5eb509c72f0 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c @@ -140,7 +140,7 @@ static void dlpar_pci_add_bus(struct device_node *dn) struct pci_controller *phb = pdn->phb; struct pci_dev *dev = NULL; - eeh_add_device_tree_early(pdn); + pseries_eeh_init_edev_recursive(pdn); /* Add EADS device to PHB bus, adding new entry to bus->devices */ dev = of_create_pci_dev(dn, phb->bus, pdn->devfn); diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index e408e4021cee..6504869efabc 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -288,11 +288,10 @@ EXPORT_SYMBOL_GPL(rpaphp_check_drc_props); static int is_php_type(char *drc_type) { - unsigned long value; char *endptr; /* PCI Hotplug nodes have an integer for drc_type */ - value = simple_strtoul(drc_type, &endptr, 10); + simple_strtoul(drc_type, &endptr, 10); if (endptr == drc_type) return 0; @@ -494,6 +493,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) return retval; if (state == PRESENT) { + pseries_eeh_init_edev_recursive(PCI_DN(slot->dn)); + pci_lock_rescan_remove(); pci_hp_add_devices(slot->bus); pci_unlock_rescan_remove(); diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index beca61badeea..c380bdacd146 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c @@ -95,8 +95,10 @@ int rpaphp_enable_slot(struct slot *slot) return -EINVAL; } - if (list_empty(&bus->devices)) + if (list_empty(&bus->devices)) { + pseries_eeh_init_edev_recursive(PCI_DN(slot->dn)); pci_hp_add_devices(bus); + } if (!list_empty(&bus->devices)) { slot->state = CONFIGURED; diff --git a/drivers/pcmcia/cs_internal.h b/drivers/pcmcia/cs_internal.h index 33c9b6ea7364..fb9b17fa0fb5 100644 --- a/drivers/pcmcia/cs_internal.h +++ b/drivers/pcmcia/cs_internal.h @@ -40,7 +40,7 @@ struct cis_cache_entry { unsigned int addr; unsigned int len; unsigned int attr; - unsigned char cache[0]; + unsigned char cache[]; }; struct pccard_resource_ops { diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c index 0a04eb04f3a2..d3ef5534991e 100644 --- a/drivers/pcmcia/omap_cf.c +++ b/drivers/pcmcia/omap_cf.c @@ -329,7 +329,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev) static struct platform_driver omap_cf_driver = { .driver = { - .name = (char *) driver_name, + .name = driver_name, }, .remove = __exit_p(omap_cf_remove), }; diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 9e6922c08ef6..3b05760e69d6 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -1076,7 +1076,7 @@ static ssize_t show_io_db(struct device *dev, for (p = data->io_db.next; p != &data->io_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; - ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), + ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); @@ -1133,7 +1133,7 @@ static ssize_t show_mem_db(struct device *dev, p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; - ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), + ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); @@ -1142,7 +1142,7 @@ static ssize_t show_mem_db(struct device *dev, for (p = data->mem_db.next; p != &data->mem_db; p = p->next) { if (ret > (PAGE_SIZE - 10)) continue; - ret += snprintf(&buf[ret], (PAGE_SIZE - ret - 1), + ret += scnprintf(&buf[ret], (PAGE_SIZE - ret - 1), "0x%08lx - 0x%08lx\n", ((unsigned long) p->base), ((unsigned long) p->base + p->num - 1)); diff --git a/drivers/pcmcia/sa1100_simpad.c b/drivers/pcmcia/sa1100_simpad.c index e2e8729afd9d..784ada5b8c4f 100644 --- a/drivers/pcmcia/sa1100_simpad.c +++ b/drivers/pcmcia/sa1100_simpad.c @@ -14,7 +14,7 @@ #include <asm/mach-types.h> #include <mach/simpad.h> #include "sa1100_generic.h" - + static int simpad_pcmcia_hw_init(struct soc_pcmcia_socket *skt) { @@ -66,7 +66,7 @@ simpad_pcmcia_configure_socket(struct soc_pcmcia_socket *skt, simpad_clear_cs3_bit(VCC_3V_EN|VCC_5V_EN|EN0|EN1); break; - case 33: + case 33: simpad_clear_cs3_bit(VCC_3V_EN|EN1); simpad_set_cs3_bit(VCC_5V_EN|EN0); break; @@ -95,7 +95,7 @@ static void simpad_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) simpad_set_cs3_bit(PCMCIA_RESET); } -static struct pcmcia_low_level simpad_pcmcia_ops = { +static struct pcmcia_low_level simpad_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = simpad_pcmcia_hw_init, .hw_shutdown = simpad_pcmcia_hw_shutdown, diff --git a/drivers/pcmcia/soc_common.h b/drivers/pcmcia/soc_common.h index b7f993f1bbd0..222e81c79365 100644 --- a/drivers/pcmcia/soc_common.h +++ b/drivers/pcmcia/soc_common.h @@ -88,7 +88,7 @@ struct soc_pcmcia_socket { struct skt_dev_info { int nskt; - struct soc_pcmcia_socket skt[0]; + struct soc_pcmcia_socket skt[]; }; struct pcmcia_state { diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 49b1c6a1bdbe..bf6529b0b5b0 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -180,12 +180,12 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri for (i = 0; i < 0x24; i += 4) { unsigned val; if (!(i & 15)) - offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i); val = cb_readl(socket, i); - offset += snprintf(buf + offset, PAGE_SIZE - offset, " %08x", val); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %08x", val); } - offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:"); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n\nExCA registers:"); for (i = 0; i < 0x45; i++) { unsigned char val; if (!(i & 7)) { @@ -193,10 +193,10 @@ static ssize_t show_yenta_registers(struct device *yentadev, struct device_attri memcpy(buf + offset, " -", 2); offset += 2; } else - offset += snprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, "\n%02x:", i); } val = exca_readb(socket, i); - offset += snprintf(buf + offset, PAGE_SIZE - offset, " %02x", val); + offset += scnprintf(buf + offset, PAGE_SIZE - offset, " %02x", val); } buf[offset++] = '\n'; return offset; diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index 1ed20ac2243f..c6fe7d64c913 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -103,6 +103,7 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np) { return get_pinctrl_dev_from_of_node(np); } +EXPORT_SYMBOL_GPL(of_pinctrl_get); static int dt_to_map_one_config(struct pinctrl *p, struct pinctrl_dev *hog_pctldev, diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 7f5376ce7fb1..e5dcf77fe43d 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -2323,11 +2323,8 @@ static int __init ingenic_gpio_probe(struct ingenic_pinctrl *jzpc, jzgc->gc.direction_input = ingenic_gpio_direction_input; jzgc->gc.direction_output = ingenic_gpio_direction_output; jzgc->gc.get_direction = ingenic_gpio_get_direction; - - if (of_property_read_bool(node, "gpio-ranges")) { - jzgc->gc.request = gpiochip_generic_request; - jzgc->gc.free = gpiochip_generic_free; - } + jzgc->gc.request = gpiochip_generic_request; + jzgc->gc.free = gpiochip_generic_free; jzgc->irq = irq_of_parse_and_map(node, 0); if (!jzgc->irq) diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index 5f57282a28da..03ea5129ed0c 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -7,7 +7,7 @@ config MFD_CROS_EC tristate "Platform support for Chrome hardware (transitional)" select CHROME_PLATFORMS select CROS_EC - select CONFIG_MFD_CROS_EC_DEV + select MFD_CROS_EC_DEV depends on X86 || ARM || ARM64 || COMPILE_TEST help This is a transitional Kconfig option and will be removed after @@ -214,6 +214,17 @@ config CROS_EC_SYSFS To compile this driver as a module, choose M here: the module will be called cros_ec_sysfs. +config CROS_EC_TYPEC + tristate "ChromeOS EC Type-C Connector Control" + depends on MFD_CROS_EC_DEV && TYPEC + default MFD_CROS_EC_DEV + help + If you say Y here, you get support for accessing Type C connector + information from the Chrome OS EC. + + To compile this driver as a module, choose M here: the module will be + called cros_ec_typec. + config CROS_USBPD_LOGGER tristate "Logging driver for USB PD charger" depends on CHARGER_CROS_USBPD @@ -226,6 +237,20 @@ config CROS_USBPD_LOGGER To compile this driver as a module, choose M here: the module will be called cros_usbpd_logger. +config CROS_USBPD_NOTIFY + tristate "ChromeOS Type-C power delivery event notifier" + depends on MFD_CROS_EC_DEV + default MFD_CROS_EC_DEV + help + If you say Y here, you get support for Type-C PD event notifications + from the ChromeOS EC. On ACPI platorms this driver will bind to the + GOOG0003 ACPI device, and on platforms which don't have this device it + will get initialized on ECs which support the feature + EC_FEATURE_USB_PD. + + To compile this driver as a module, choose M here: the + module will be called cros_usbpd_notify. + source "drivers/platform/chrome/wilco_ec/Kconfig" endif # CHROMEOS_PLATFORMS diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index aacd5920d8a1..41baccba033f 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_CROS_EC_ISHTP) += cros_ec_ishtp.o obj-$(CONFIG_CROS_EC_RPMSG) += cros_ec_rpmsg.o obj-$(CONFIG_CROS_EC_SPI) += cros_ec_spi.o cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_mec.o +obj-$(CONFIG_CROS_EC_TYPEC) += cros_ec_typec.o obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o cros_ec_trace.o obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o @@ -19,8 +20,10 @@ obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_chardev.o obj-$(CONFIG_CROS_EC_LIGHTBAR) += cros_ec_lightbar.o obj-$(CONFIG_CROS_EC_VBC) += cros_ec_vbc.o obj-$(CONFIG_CROS_EC_DEBUGFS) += cros_ec_debugfs.o -obj-$(CONFIG_CROS_EC_SENSORHUB) += cros_ec_sensorhub.o +cros-ec-sensorhub-objs := cros_ec_sensorhub.o cros_ec_sensorhub_ring.o +obj-$(CONFIG_CROS_EC_SENSORHUB) += cros-ec-sensorhub.o obj-$(CONFIG_CROS_EC_SYSFS) += cros_ec_sysfs.o obj-$(CONFIG_CROS_USBPD_LOGGER) += cros_usbpd_logger.o +obj-$(CONFIG_CROS_USBPD_NOTIFY) += cros_usbpd_notify.o obj-$(CONFIG_WILCO_EC) += wilco_ec/ diff --git a/drivers/platform/chrome/chromeos_laptop.c b/drivers/platform/chrome/chromeos_laptop.c index 4f3651fcd9fe..472a03daa869 100644 --- a/drivers/platform/chrome/chromeos_laptop.c +++ b/drivers/platform/chrome/chromeos_laptop.c @@ -103,7 +103,7 @@ chromes_laptop_instantiate_i2c_device(struct i2c_adapter *adapter, pr_debug("%d-%02x is probed at %02x\n", adapter->nr, info->addr, dummy->addr); i2c_unregister_device(dummy); - client = i2c_new_device(adapter, info); + client = i2c_new_client_device(adapter, info); } } diff --git a/drivers/platform/chrome/cros_ec.c b/drivers/platform/chrome/cros_ec.c index 6fc8f2c3ac51..3104680b7485 100644 --- a/drivers/platform/chrome/cros_ec.c +++ b/drivers/platform/chrome/cros_ec.c @@ -120,7 +120,7 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event) buf.msg.command = EC_CMD_HOST_SLEEP_EVENT; - ret = cros_ec_cmd_xfer(ec_dev, &buf.msg); + ret = cros_ec_cmd_xfer_status(ec_dev, &buf.msg); /* For now, report failure to transition to S0ix with a warning. */ if (ret >= 0 && ec_dev->host_sleep_v1 && @@ -138,6 +138,24 @@ static int cros_ec_sleep_event(struct cros_ec_device *ec_dev, u8 sleep_event) return ret; } +static int cros_ec_ready_event(struct notifier_block *nb, + unsigned long queued_during_suspend, + void *_notify) +{ + struct cros_ec_device *ec_dev = container_of(nb, struct cros_ec_device, + notifier_ready); + u32 host_event = cros_ec_get_host_event(ec_dev); + + if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_INTERFACE_READY)) { + mutex_lock(&ec_dev->lock); + cros_ec_query_all(ec_dev); + mutex_unlock(&ec_dev->lock); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + /** * cros_ec_register() - Register a new ChromeOS EC, using the provided info. * @ec_dev: Device to register. @@ -237,6 +255,18 @@ int cros_ec_register(struct cros_ec_device *ec_dev) dev_dbg(ec_dev->dev, "Error %d clearing sleep event to ec", err); + if (ec_dev->mkbp_event_supported) { + /* + * Register the notifier for EC_HOST_EVENT_INTERFACE_READY + * event. + */ + ec_dev->notifier_ready.notifier_call = cros_ec_ready_event; + err = blocking_notifier_chain_register(&ec_dev->event_notifier, + &ec_dev->notifier_ready); + if (err) + return err; + } + dev_info(dev, "Chrome EC device registered\n"); return 0; diff --git a/drivers/platform/chrome/cros_ec_chardev.c b/drivers/platform/chrome/cros_ec_chardev.c index c65e70bc168d..e0bce869c49a 100644 --- a/drivers/platform/chrome/cros_ec_chardev.c +++ b/drivers/platform/chrome/cros_ec_chardev.c @@ -48,7 +48,7 @@ struct ec_event { struct list_head node; size_t size; u8 event_type; - u8 data[0]; + u8 data[]; }; static int ec_get_version(struct cros_ec_dev *ec, char *str, int maxlen) @@ -301,7 +301,7 @@ static long cros_ec_chardev_ioctl_xcmd(struct cros_ec_dev *ec, void __user *arg) } s_cmd->command += ec->cmd_offset; - ret = cros_ec_cmd_xfer(ec->ec_dev, s_cmd); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, s_cmd); /* Only copy data to userland if data was received. */ if (ret < 0) goto exit; diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c index b4c110c5fee0..b59180bff5a3 100644 --- a/drivers/platform/chrome/cros_ec_lightbar.c +++ b/drivers/platform/chrome/cros_ec_lightbar.c @@ -116,7 +116,7 @@ static int get_lightbar_version(struct cros_ec_dev *ec, param = (struct ec_params_lightbar *)msg->data; param->cmd = LIGHTBAR_CMD_VERSION; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) { ret = 0; goto exit; @@ -193,15 +193,10 @@ static ssize_t brightness_store(struct device *dev, if (ret) goto exit; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) goto exit; - if (msg->result != EC_RES_SUCCESS) { - ret = -EINVAL; - goto exit; - } - ret = count; exit: kfree(msg); @@ -258,13 +253,10 @@ static ssize_t led_rgb_store(struct device *dev, struct device_attribute *attr, goto exit; } - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) goto exit; - if (msg->result != EC_RES_SUCCESS) - goto exit; - i = 0; ok = 1; } @@ -305,14 +297,13 @@ static ssize_t sequence_show(struct device *dev, if (ret) goto exit; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); - if (ret < 0) - goto exit; - - if (msg->result != EC_RES_SUCCESS) { + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); + if (ret == -EPROTO) { ret = scnprintf(buf, PAGE_SIZE, "ERROR: EC returned %d\n", msg->result); goto exit; + } else if (ret < 0) { + goto exit; } resp = (struct ec_response_lightbar *)msg->data; @@ -344,13 +335,10 @@ static int lb_send_empty_cmd(struct cros_ec_dev *ec, uint8_t cmd) if (ret) goto error; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) goto error; - if (msg->result != EC_RES_SUCCESS) { - ret = -EINVAL; - goto error; - } + ret = 0; error: kfree(msg); @@ -377,13 +365,10 @@ static int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable) if (ret) goto error; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) goto error; - if (msg->result != EC_RES_SUCCESS) { - ret = -EINVAL; - goto error; - } + ret = 0; error: kfree(msg); @@ -425,15 +410,10 @@ static ssize_t sequence_store(struct device *dev, struct device_attribute *attr, if (ret) goto exit; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) goto exit; - if (msg->result != EC_RES_SUCCESS) { - ret = -EINVAL; - goto exit; - } - ret = count; exit: kfree(msg); @@ -487,13 +467,9 @@ static ssize_t program_store(struct device *dev, struct device_attribute *attr, */ msg->outsize = count + extra_bytes; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) goto exit; - if (msg->result != EC_RES_SUCCESS) { - ret = -EINVAL; - goto exit; - } ret = count; exit: diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index 3cfa643f1d07..3e745e0fe092 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -553,7 +553,10 @@ EXPORT_SYMBOL(cros_ec_cmd_xfer); * replied with success status. It's not necessary to check msg->result when * using this function. * - * Return: The number of bytes transferred on success or negative error code. + * Return: + * >=0 - The number of bytes transferred + * -ENOTSUPP - Operation not supported + * -EPROTO - Protocol error */ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, struct cros_ec_command *msg) @@ -563,6 +566,10 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, ret = cros_ec_cmd_xfer(ec_dev, msg); if (ret < 0) { dev_err(ec_dev->dev, "Command xfer error (err:%d)\n", ret); + } else if (msg->result == EC_RES_INVALID_VERSION) { + dev_dbg(ec_dev->dev, "Command invalid version (err:%d)\n", + msg->result); + return -ENOTSUPP; } else if (msg->result != EC_RES_SUCCESS) { dev_dbg(ec_dev->dev, "Command result (err: %d)\n", msg->result); return -EPROTO; diff --git a/drivers/platform/chrome/cros_ec_rpmsg.c b/drivers/platform/chrome/cros_ec_rpmsg.c index dbc3f5523b83..7e8629e3db74 100644 --- a/drivers/platform/chrome/cros_ec_rpmsg.c +++ b/drivers/platform/chrome/cros_ec_rpmsg.c @@ -44,6 +44,8 @@ struct cros_ec_rpmsg { struct completion xfer_ack; struct work_struct host_event_work; struct rpmsg_endpoint *ept; + bool has_pending_host_event; + bool probe_done; }; /** @@ -177,7 +179,14 @@ static int cros_ec_rpmsg_callback(struct rpmsg_device *rpdev, void *data, memcpy(ec_dev->din, resp->data, len); complete(&ec_rpmsg->xfer_ack); } else if (resp->type == HOST_EVENT_MARK) { - schedule_work(&ec_rpmsg->host_event_work); + /* + * If the host event is sent before cros_ec_register is + * finished, queue the host event. + */ + if (ec_rpmsg->probe_done) + schedule_work(&ec_rpmsg->host_event_work); + else + ec_rpmsg->has_pending_host_event = true; } else { dev_warn(ec_dev->dev, "rpmsg received invalid type = %d", resp->type); @@ -240,6 +249,11 @@ static int cros_ec_rpmsg_probe(struct rpmsg_device *rpdev) return ret; } + ec_rpmsg->probe_done = true; + + if (ec_rpmsg->has_pending_host_event) + schedule_work(&ec_rpmsg->host_event_work); + return 0; } diff --git a/drivers/platform/chrome/cros_ec_sensorhub.c b/drivers/platform/chrome/cros_ec_sensorhub.c index 79fefd3bb0fa..b7f2c00db5e1 100644 --- a/drivers/platform/chrome/cros_ec_sensorhub.c +++ b/drivers/platform/chrome/cros_ec_sensorhub.c @@ -50,10 +50,8 @@ static int cros_ec_sensorhub_register(struct device *dev, struct cros_ec_sensorhub *sensorhub) { int sensor_type[MOTIONSENSE_TYPE_MAX] = { 0 }; + struct cros_ec_command *msg = sensorhub->msg; struct cros_ec_dev *ec = sensorhub->ec; - struct ec_params_motion_sense *params; - struct ec_response_motion_sense *resp; - struct cros_ec_command *msg; int ret, i, sensor_num; char *name; @@ -65,27 +63,19 @@ static int cros_ec_sensorhub_register(struct device *dev, return sensor_num; } + sensorhub->sensor_num = sensor_num; if (sensor_num == 0) { dev_err(dev, "Zero sensors reported.\n"); return -EINVAL; } - /* Prepare a message to send INFO command to each sensor. */ - msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*resp)), - GFP_KERNEL); - if (!msg) - return -ENOMEM; - msg->version = 1; - msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset; - msg->outsize = sizeof(*params); - msg->insize = sizeof(*resp); - params = (struct ec_params_motion_sense *)msg->data; - resp = (struct ec_response_motion_sense *)msg->data; + msg->insize = sizeof(struct ec_response_motion_sense); + msg->outsize = sizeof(struct ec_params_motion_sense); for (i = 0; i < sensor_num; i++) { - params->cmd = MOTIONSENSE_CMD_INFO; - params->info.sensor_num = i; + sensorhub->params->cmd = MOTIONSENSE_CMD_INFO; + sensorhub->params->info.sensor_num = i; ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); if (ret < 0) { @@ -94,7 +84,7 @@ static int cros_ec_sensorhub_register(struct device *dev, continue; } - switch (resp->info.type) { + switch (sensorhub->resp->info.type) { case MOTIONSENSE_TYPE_ACCEL: name = "cros-ec-accel"; break; @@ -117,15 +107,16 @@ static int cros_ec_sensorhub_register(struct device *dev, name = "cros-ec-activity"; break; default: - dev_warn(dev, "unknown type %d\n", resp->info.type); + dev_warn(dev, "unknown type %d\n", + sensorhub->resp->info.type); continue; } ret = cros_ec_sensorhub_allocate_sensor(dev, name, i); if (ret) - goto error; + return ret; - sensor_type[resp->info.type]++; + sensor_type[sensorhub->resp->info.type]++; } if (sensor_type[MOTIONSENSE_TYPE_ACCEL] >= 2) @@ -137,29 +128,41 @@ static int cros_ec_sensorhub_register(struct device *dev, "cros-ec-lid-angle", 0); if (ret) - goto error; + return ret; } - kfree(msg); return 0; - -error: - kfree(msg); - return ret; } static int cros_ec_sensorhub_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct cros_ec_dev *ec = dev_get_drvdata(dev->parent); struct cros_ec_sensorhub *data; + struct cros_ec_command *msg; int ret; int i; + msg = devm_kzalloc(dev, sizeof(struct cros_ec_command) + + max((u16)sizeof(struct ec_params_motion_sense), + ec->ec_dev->max_response), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->command = EC_CMD_MOTION_SENSE_CMD + ec->cmd_offset; + data = devm_kzalloc(dev, sizeof(struct cros_ec_sensorhub), GFP_KERNEL); if (!data) return -ENOMEM; - data->ec = dev_get_drvdata(dev->parent); + mutex_init(&data->cmd_lock); + + data->dev = dev; + data->ec = ec; + data->msg = msg; + data->params = (struct ec_params_motion_sense *)msg->data; + data->resp = (struct ec_response_motion_sense *)msg->data; + dev_set_drvdata(dev, data); /* Check whether this EC is a sensor hub. */ @@ -172,7 +175,8 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev) * If the device has sensors but does not claim to * be a sensor hub, we are in legacy mode. */ - for (i = 0; i < 2; i++) { + data->sensor_num = 2; + for (i = 0; i < data->sensor_num; i++) { ret = cros_ec_sensorhub_allocate_sensor(dev, "cros-ec-accel-legacy", i); if (ret) @@ -180,12 +184,63 @@ static int cros_ec_sensorhub_probe(struct platform_device *pdev) } } + /* + * If the EC does not have a FIFO, the sensors will query their data + * themselves via sysfs or a software trigger. + */ + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) { + ret = cros_ec_sensorhub_ring_add(data); + if (ret) + return ret; + /* + * The msg and its data is not under the control of the ring + * handler. + */ + return devm_add_action_or_reset(dev, + cros_ec_sensorhub_ring_remove, + data); + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +/* + * When the EC is suspending, we must stop sending interrupt, + * we may use the same interrupt line for waking up the device. + * Tell the EC to stop sending non-interrupt event on the iio ring. + */ +static int cros_ec_sensorhub_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev); + struct cros_ec_dev *ec = sensorhub->ec; + + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) + return cros_ec_sensorhub_ring_fifo_enable(sensorhub, false); return 0; } +static int cros_ec_sensorhub_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct cros_ec_sensorhub *sensorhub = platform_get_drvdata(pdev); + struct cros_ec_dev *ec = sensorhub->ec; + + if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE_FIFO)) + return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true); + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(cros_ec_sensorhub_pm_ops, + cros_ec_sensorhub_suspend, + cros_ec_sensorhub_resume); + static struct platform_driver cros_ec_sensorhub_driver = { .driver = { .name = DRV_NAME, + .pm = &cros_ec_sensorhub_pm_ops, }, .probe = cros_ec_sensorhub_probe, }; diff --git a/drivers/platform/chrome/cros_ec_sensorhub_ring.c b/drivers/platform/chrome/cros_ec_sensorhub_ring.c new file mode 100644 index 000000000000..230e6cf3da2f --- /dev/null +++ b/drivers/platform/chrome/cros_ec_sensorhub_ring.c @@ -0,0 +1,1046 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Driver for Chrome OS EC Sensor hub FIFO. + * + * Copyright 2020 Google LLC + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/iio/iio.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_data/cros_ec_commands.h> +#include <linux/platform_data/cros_ec_proto.h> +#include <linux/platform_data/cros_ec_sensorhub.h> +#include <linux/platform_device.h> +#include <linux/sort.h> +#include <linux/slab.h> + +/* Precision of fixed point for the m values from the filter */ +#define M_PRECISION BIT(23) + +/* Only activate the filter once we have at least this many elements. */ +#define TS_HISTORY_THRESHOLD 8 + +/* + * If we don't have any history entries for this long, empty the filter to + * make sure there are no big discontinuities. + */ +#define TS_HISTORY_BORED_US 500000 + +/* To measure by how much the filter is overshooting, if it happens. */ +#define FUTURE_TS_ANALYTICS_COUNT_MAX 100 + +static inline int +cros_sensorhub_send_sample(struct cros_ec_sensorhub *sensorhub, + struct cros_ec_sensors_ring_sample *sample) +{ + cros_ec_sensorhub_push_data_cb_t cb; + int id = sample->sensor_id; + struct iio_dev *indio_dev; + + if (id > sensorhub->sensor_num) + return -EINVAL; + + cb = sensorhub->push_data[id].push_data_cb; + if (!cb) + return 0; + + indio_dev = sensorhub->push_data[id].indio_dev; + + if (sample->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) + return 0; + + return cb(indio_dev, sample->vector, sample->timestamp); +} + +/** + * cros_ec_sensorhub_register_push_data() - register the callback to the hub. + * + * @sensorhub : Sensor Hub object + * @sensor_num : The sensor the caller is interested in. + * @indio_dev : The iio device to use when a sample arrives. + * @cb : The callback to call when a sample arrives. + * + * The callback cb will be used by cros_ec_sensorhub_ring to distribute events + * from the EC. + * + * Return: 0 when callback is registered. + * EINVAL is the sensor number is invalid or the slot already used. + */ +int cros_ec_sensorhub_register_push_data(struct cros_ec_sensorhub *sensorhub, + u8 sensor_num, + struct iio_dev *indio_dev, + cros_ec_sensorhub_push_data_cb_t cb) +{ + if (sensor_num >= sensorhub->sensor_num) + return -EINVAL; + if (sensorhub->push_data[sensor_num].indio_dev) + return -EINVAL; + + sensorhub->push_data[sensor_num].indio_dev = indio_dev; + sensorhub->push_data[sensor_num].push_data_cb = cb; + + return 0; +} +EXPORT_SYMBOL_GPL(cros_ec_sensorhub_register_push_data); + +void cros_ec_sensorhub_unregister_push_data(struct cros_ec_sensorhub *sensorhub, + u8 sensor_num) +{ + sensorhub->push_data[sensor_num].indio_dev = NULL; + sensorhub->push_data[sensor_num].push_data_cb = NULL; +} +EXPORT_SYMBOL_GPL(cros_ec_sensorhub_unregister_push_data); + +/** + * cros_ec_sensorhub_ring_fifo_enable() - Enable or disable interrupt generation + * for FIFO events. + * @sensorhub: Sensor Hub object + * @on: true when events are requested. + * + * To be called before sleeping or when noone is listening. + * Return: 0 on success, or an error when we can not communicate with the EC. + * + */ +int cros_ec_sensorhub_ring_fifo_enable(struct cros_ec_sensorhub *sensorhub, + bool on) +{ + int ret, i; + + mutex_lock(&sensorhub->cmd_lock); + if (sensorhub->tight_timestamps) + for (i = 0; i < sensorhub->sensor_num; i++) + sensorhub->batch_state[i].last_len = 0; + + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INT_ENABLE; + sensorhub->params->fifo_int_enable.enable = on; + + sensorhub->msg->outsize = sizeof(struct ec_params_motion_sense); + sensorhub->msg->insize = sizeof(struct ec_response_motion_sense); + + ret = cros_ec_cmd_xfer_status(sensorhub->ec->ec_dev, sensorhub->msg); + mutex_unlock(&sensorhub->cmd_lock); + + /* We expect to receive a payload of 4 bytes, ignore. */ + if (ret > 0) + ret = 0; + + return ret; +} + +static int cros_ec_sensor_ring_median_cmp(const void *pv1, const void *pv2) +{ + s64 v1 = *(s64 *)pv1; + s64 v2 = *(s64 *)pv2; + + if (v1 > v2) + return 1; + else if (v1 < v2) + return -1; + else + return 0; +} + +/* + * cros_ec_sensor_ring_median: Gets median of an array of numbers + * + * For now it's implemented using an inefficient > O(n) sort then return + * the middle element. A more optimal method would be something like + * quickselect, but given that n = 64 we can probably live with it in the + * name of clarity. + * + * Warning: the input array gets modified (sorted)! + */ +static s64 cros_ec_sensor_ring_median(s64 *array, size_t length) +{ + sort(array, length, sizeof(s64), cros_ec_sensor_ring_median_cmp, NULL); + return array[length / 2]; +} + +/* + * IRQ Timestamp Filtering + * + * Lower down in cros_ec_sensor_ring_process_event(), for each sensor event + * we have to calculate it's timestamp in the AP timebase. There are 3 time + * points: + * a - EC timebase, sensor event + * b - EC timebase, IRQ + * c - AP timebase, IRQ + * a' - what we want: sensor even in AP timebase + * + * While a and b are recorded at accurate times (due to the EC real time + * nature); c is pretty untrustworthy, even though it's recorded the + * first thing in ec_irq_handler(). There is a very good change we'll get + * added lantency due to: + * other irqs + * ddrfreq + * cpuidle + * + * Normally a' = c - b + a, but if we do that naive math any jitter in c + * will get coupled in a', which we don't want. We want a function + * a' = cros_ec_sensor_ring_ts_filter(a) which will filter out outliers in c. + * + * Think of a graph of AP time(b) on the y axis vs EC time(c) on the x axis. + * The slope of the line won't be exactly 1, there will be some clock drift + * between the 2 chips for various reasons (mechanical stress, temperature, + * voltage). We need to extrapolate values for a future x, without trusting + * recent y values too much. + * + * We use a median filter for the slope, then another median filter for the + * y-intercept to calculate this function: + * dx[n] = x[n-1] - x[n] + * dy[n] = x[n-1] - x[n] + * m[n] = dy[n] / dx[n] + * median_m = median(m[n-k:n]) + * error[i] = y[n-i] - median_m * x[n-i] + * median_error = median(error[:k]) + * predicted_y = median_m * x + median_error + * + * Implementation differences from above: + * - Redefined y to be actually c - b, this gives us a lot more precision + * to do the math. (c-b)/b variations are more obvious than c/b variations. + * - Since we don't have floating point, any operations involving slope are + * done using fixed point math (*M_PRECISION) + * - Since x and y grow with time, we keep zeroing the graph (relative to + * the last sample), this way math involving *x[n-i] will not overflow + * - EC timestamps are kept in us, it improves the slope calculation precision + */ + +/** + * cros_ec_sensor_ring_ts_filter_update() - Update filter history. + * + * @state: Filter information. + * @b: IRQ timestamp, EC timebase (us) + * @c: IRQ timestamp, AP timebase (ns) + * + * Given a new IRQ timestamp pair (EC and AP timebases), add it to the filter + * history. + */ +static void +cros_ec_sensor_ring_ts_filter_update(struct cros_ec_sensors_ts_filter_state + *state, + s64 b, s64 c) +{ + s64 x, y; + s64 dx, dy; + s64 m; /* stored as *M_PRECISION */ + s64 *m_history_copy = state->temp_buf; + s64 *error = state->temp_buf; + int i; + + /* we trust b the most, that'll be our independent variable */ + x = b; + /* y is the offset between AP and EC times, in ns */ + y = c - b * 1000; + + dx = (state->x_history[0] + state->x_offset) - x; + if (dx == 0) + return; /* we already have this irq in the history */ + dy = (state->y_history[0] + state->y_offset) - y; + m = div64_s64(dy * M_PRECISION, dx); + + /* Empty filter if we haven't seen any action in a while. */ + if (-dx > TS_HISTORY_BORED_US) + state->history_len = 0; + + /* Move everything over, also update offset to all absolute coords .*/ + for (i = state->history_len - 1; i >= 1; i--) { + state->x_history[i] = state->x_history[i - 1] + dx; + state->y_history[i] = state->y_history[i - 1] + dy; + + state->m_history[i] = state->m_history[i - 1]; + /* + * Also use the same loop to copy m_history for future + * median extraction. + */ + m_history_copy[i] = state->m_history[i - 1]; + } + + /* Store the x and y, but remember offset is actually last sample. */ + state->x_offset = x; + state->y_offset = y; + state->x_history[0] = 0; + state->y_history[0] = 0; + + state->m_history[0] = m; + m_history_copy[0] = m; + + if (state->history_len < CROS_EC_SENSORHUB_TS_HISTORY_SIZE) + state->history_len++; + + /* Precalculate things for the filter. */ + if (state->history_len > TS_HISTORY_THRESHOLD) { + state->median_m = + cros_ec_sensor_ring_median(m_history_copy, + state->history_len - 1); + + /* + * Calculate y-intercepts as if m_median is the slope and + * points in the history are on the line. median_error will + * still be in the offset coordinate system. + */ + for (i = 0; i < state->history_len; i++) + error[i] = state->y_history[i] - + div_s64(state->median_m * state->x_history[i], + M_PRECISION); + state->median_error = + cros_ec_sensor_ring_median(error, state->history_len); + } else { + state->median_m = 0; + state->median_error = 0; + } +} + +/** + * cros_ec_sensor_ring_ts_filter() - Translate EC timebase timestamp to AP + * timebase + * + * @state: filter information. + * @x: any ec timestamp (us): + * + * cros_ec_sensor_ring_ts_filter(a) => a' event timestamp, AP timebase + * cros_ec_sensor_ring_ts_filter(b) => calculated timestamp when the EC IRQ + * should have happened on the AP, with low jitter + * + * Note: The filter will only activate once state->history_len goes + * over TS_HISTORY_THRESHOLD. Otherwise it'll just do the naive c - b + a + * transform. + * + * How to derive the formula, starting from: + * f(x) = median_m * x + median_error + * That's the calculated AP - EC offset (at the x point in time) + * Undo the coordinate system transform: + * f(x) = median_m * (x - x_offset) + median_error + y_offset + * Remember to undo the "y = c - b * 1000" modification: + * f(x) = median_m * (x - x_offset) + median_error + y_offset + x * 1000 + * + * Return: timestamp in AP timebase (ns) + */ +static s64 +cros_ec_sensor_ring_ts_filter(struct cros_ec_sensors_ts_filter_state *state, + s64 x) +{ + return div_s64(state->median_m * (x - state->x_offset), M_PRECISION) + + state->median_error + state->y_offset + x * 1000; +} + +/* + * Since a and b were originally 32 bit values from the EC, + * they overflow relatively often, casting is not enough, so we need to + * add an offset. + */ +static void +cros_ec_sensor_ring_fix_overflow(s64 *ts, + const s64 overflow_period, + struct cros_ec_sensors_ec_overflow_state + *state) +{ + s64 adjust; + + *ts += state->offset; + if (abs(state->last - *ts) > (overflow_period / 2)) { + adjust = state->last > *ts ? overflow_period : -overflow_period; + state->offset += adjust; + *ts += adjust; + } + state->last = *ts; +} + +static void +cros_ec_sensor_ring_check_for_past_timestamp(struct cros_ec_sensorhub + *sensorhub, + struct cros_ec_sensors_ring_sample + *sample) +{ + const u8 sensor_id = sample->sensor_id; + + /* If this event is earlier than one we saw before... */ + if (sensorhub->batch_state[sensor_id].newest_sensor_event > + sample->timestamp) + /* mark it for spreading. */ + sample->timestamp = + sensorhub->batch_state[sensor_id].last_ts; + else + sensorhub->batch_state[sensor_id].newest_sensor_event = + sample->timestamp; +} + +/** + * cros_ec_sensor_ring_process_event() - Process one EC FIFO event + * + * @sensorhub: Sensor Hub object. + * @fifo_info: FIFO information from the EC (includes b point, EC timebase). + * @fifo_timestamp: EC IRQ, kernel timebase (aka c). + * @current_timestamp: calculated event timestamp, kernel timebase (aka a'). + * @in: incoming FIFO event from EC (includes a point, EC timebase). + * @out: outgoing event to user space (includes a'). + * + * Process one EC event, add it in the ring if necessary. + * + * Return: true if out event has been populated. + */ +static bool +cros_ec_sensor_ring_process_event(struct cros_ec_sensorhub *sensorhub, + const struct ec_response_motion_sense_fifo_info + *fifo_info, + const ktime_t fifo_timestamp, + ktime_t *current_timestamp, + struct ec_response_motion_sensor_data *in, + struct cros_ec_sensors_ring_sample *out) +{ + const s64 now = cros_ec_get_time_ns(); + int axis, async_flags; + + /* Do not populate the filter based on asynchronous events. */ + async_flags = in->flags & + (MOTIONSENSE_SENSOR_FLAG_ODR | MOTIONSENSE_SENSOR_FLAG_FLUSH); + + if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP && !async_flags) { + s64 a = in->timestamp; + s64 b = fifo_info->timestamp; + s64 c = fifo_timestamp; + + cros_ec_sensor_ring_fix_overflow(&a, 1LL << 32, + &sensorhub->overflow_a); + cros_ec_sensor_ring_fix_overflow(&b, 1LL << 32, + &sensorhub->overflow_b); + + if (sensorhub->tight_timestamps) { + cros_ec_sensor_ring_ts_filter_update( + &sensorhub->filter, b, c); + *current_timestamp = cros_ec_sensor_ring_ts_filter( + &sensorhub->filter, a); + } else { + s64 new_timestamp; + + /* + * Disable filtering since we might add more jitter + * if b is in a random point in time. + */ + new_timestamp = fifo_timestamp - + fifo_info->timestamp * 1000 + + in->timestamp * 1000; + /* + * The timestamp can be stale if we had to use the fifo + * info timestamp. + */ + if (new_timestamp - *current_timestamp > 0) + *current_timestamp = new_timestamp; + } + } + + if (in->flags & MOTIONSENSE_SENSOR_FLAG_ODR) { + if (sensorhub->tight_timestamps) { + sensorhub->batch_state[in->sensor_num].last_len = 0; + sensorhub->batch_state[in->sensor_num].penul_len = 0; + } + /* + * ODR change is only useful for the sensor_ring, it does not + * convey information to clients. + */ + return false; + } + + if (in->flags & MOTIONSENSE_SENSOR_FLAG_FLUSH) { + out->sensor_id = in->sensor_num; + out->timestamp = *current_timestamp; + out->flag = in->flags; + if (sensorhub->tight_timestamps) + sensorhub->batch_state[out->sensor_id].last_len = 0; + /* + * No other payload information provided with + * flush ack. + */ + return true; + } + + if (in->flags & MOTIONSENSE_SENSOR_FLAG_TIMESTAMP) + /* If we just have a timestamp, skip this entry. */ + return false; + + /* Regular sample */ + out->sensor_id = in->sensor_num; + if (*current_timestamp - now > 0) { + /* + * This fix is needed to overcome the timestamp filter putting + * events in the future. + */ + sensorhub->future_timestamp_total_ns += + *current_timestamp - now; + if (++sensorhub->future_timestamp_count == + FUTURE_TS_ANALYTICS_COUNT_MAX) { + s64 avg = div_s64(sensorhub->future_timestamp_total_ns, + sensorhub->future_timestamp_count); + dev_warn_ratelimited(sensorhub->dev, + "100 timestamps in the future, %lldns shaved on average\n", + avg); + sensorhub->future_timestamp_count = 0; + sensorhub->future_timestamp_total_ns = 0; + } + out->timestamp = now; + } else { + out->timestamp = *current_timestamp; + } + + out->flag = in->flags; + for (axis = 0; axis < 3; axis++) + out->vector[axis] = in->data[axis]; + + if (sensorhub->tight_timestamps) + cros_ec_sensor_ring_check_for_past_timestamp(sensorhub, out); + return true; +} + +/* + * cros_ec_sensor_ring_spread_add: Calculate proper timestamps then add to + * ringbuffer. + * + * This is the new spreading code, assumes every sample's timestamp + * preceeds the sample. Run if tight_timestamps == true. + * + * Sometimes the EC receives only one interrupt (hence timestamp) for + * a batch of samples. Only the first sample will have the correct + * timestamp. So we must interpolate the other samples. + * We use the previous batch timestamp and our current batch timestamp + * as a way to calculate period, then spread the samples evenly. + * + * s0 int, 0ms + * s1 int, 10ms + * s2 int, 20ms + * 30ms point goes by, no interrupt, previous one is still asserted + * downloading s2 and s3 + * s3 sample, 20ms (incorrect timestamp) + * s4 int, 40ms + * + * The batches are [(s0), (s1), (s2, s3), (s4)]. Since the 3rd batch + * has 2 samples in them, we adjust the timestamp of s3. + * s2 - s1 = 10ms, so s3 must be s2 + 10ms => 20ms. If s1 would have + * been part of a bigger batch things would have gotten a little + * more complicated. + * + * Note: we also assume another sensor sample doesn't break up a batch + * in 2 or more partitions. Example, there can't ever be a sync sensor + * in between S2 and S3. This simplifies the following code. + */ +static void +cros_ec_sensor_ring_spread_add(struct cros_ec_sensorhub *sensorhub, + unsigned long sensor_mask, + struct cros_ec_sensors_ring_sample *last_out) +{ + struct cros_ec_sensors_ring_sample *batch_start, *next_batch_start; + int id; + + for_each_set_bit(id, &sensor_mask, sensorhub->sensor_num) { + for (batch_start = sensorhub->ring; batch_start < last_out; + batch_start = next_batch_start) { + /* + * For each batch (where all samples have the same + * timestamp). + */ + int batch_len, sample_idx; + struct cros_ec_sensors_ring_sample *batch_end = + batch_start; + struct cros_ec_sensors_ring_sample *s; + s64 batch_timestamp = batch_start->timestamp; + s64 sample_period; + + /* + * Skip over batches that start with the sensor types + * we're not looking at right now. + */ + if (batch_start->sensor_id != id) { + next_batch_start = batch_start + 1; + continue; + } + + /* + * Do not start a batch + * from a flush, as it happens asynchronously to the + * regular flow of events. + */ + if (batch_start->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) { + cros_sensorhub_send_sample(sensorhub, + batch_start); + next_batch_start = batch_start + 1; + continue; + } + + if (batch_start->timestamp <= + sensorhub->batch_state[id].last_ts) { + batch_timestamp = + sensorhub->batch_state[id].last_ts; + batch_len = sensorhub->batch_state[id].last_len; + + sample_idx = batch_len; + + sensorhub->batch_state[id].last_ts = + sensorhub->batch_state[id].penul_ts; + sensorhub->batch_state[id].last_len = + sensorhub->batch_state[id].penul_len; + } else { + /* + * Push first sample in the batch to the, + * kifo, it's guaranteed to be correct, the + * rest will follow later on. + */ + sample_idx = 1; + batch_len = 1; + cros_sensorhub_send_sample(sensorhub, + batch_start); + batch_start++; + } + + /* Find all samples have the same timestamp. */ + for (s = batch_start; s < last_out; s++) { + if (s->sensor_id != id) + /* + * Skip over other sensor types that + * are interleaved, don't count them. + */ + continue; + if (s->timestamp != batch_timestamp) + /* we discovered the next batch */ + break; + if (s->flag & MOTIONSENSE_SENSOR_FLAG_FLUSH) + /* break on flush packets */ + break; + batch_end = s; + batch_len++; + } + + if (batch_len == 1) + goto done_with_this_batch; + + /* Can we calculate period? */ + if (sensorhub->batch_state[id].last_len == 0) { + dev_warn(sensorhub->dev, "Sensor %d: lost %d samples when spreading\n", + id, batch_len - 1); + goto done_with_this_batch; + /* + * Note: we're dropping the rest of the samples + * in this batch since we have no idea where + * they're supposed to go without a period + * calculation. + */ + } + + sample_period = div_s64(batch_timestamp - + sensorhub->batch_state[id].last_ts, + sensorhub->batch_state[id].last_len); + dev_dbg(sensorhub->dev, + "Adjusting %d samples, sensor %d last_batch @%lld (%d samples) batch_timestamp=%lld => period=%lld\n", + batch_len, id, + sensorhub->batch_state[id].last_ts, + sensorhub->batch_state[id].last_len, + batch_timestamp, + sample_period); + + /* + * Adjust timestamps of the samples then push them to + * kfifo. + */ + for (s = batch_start; s <= batch_end; s++) { + if (s->sensor_id != id) + /* + * Skip over other sensor types that + * are interleaved, don't change them. + */ + continue; + + s->timestamp = batch_timestamp + + sample_period * sample_idx; + sample_idx++; + + cros_sensorhub_send_sample(sensorhub, s); + } + +done_with_this_batch: + sensorhub->batch_state[id].penul_ts = + sensorhub->batch_state[id].last_ts; + sensorhub->batch_state[id].penul_len = + sensorhub->batch_state[id].last_len; + + sensorhub->batch_state[id].last_ts = + batch_timestamp; + sensorhub->batch_state[id].last_len = batch_len; + + next_batch_start = batch_end + 1; + } + } +} + +/* + * cros_ec_sensor_ring_spread_add_legacy: Calculate proper timestamps then + * add to ringbuffer (legacy). + * + * Note: This assumes we're running old firmware, where every sample's timestamp + * is after the sample. Run if tight_timestamps == false. + * + * If there is a sample with a proper timestamp + * + * timestamp | count + * ----------------- + * older_unprocess_out --> TS1 | 1 + * TS1 | 2 + * out --> TS1 | 3 + * next_out --> TS2 | + * + * We spread time for the samples [older_unprocess_out .. out] + * between TS1 and TS2: [TS1+1/4, TS1+2/4, TS1+3/4, TS2]. + * + * If we reach the end of the samples, we compare with the + * current timestamp: + * + * older_unprocess_out --> TS1 | 1 + * TS1 | 2 + * out --> TS1 | 3 + * + * We know have [TS1+1/3, TS1+2/3, current timestamp] + */ +static void +cros_ec_sensor_ring_spread_add_legacy(struct cros_ec_sensorhub *sensorhub, + unsigned long sensor_mask, + s64 current_timestamp, + struct cros_ec_sensors_ring_sample + *last_out) +{ + struct cros_ec_sensors_ring_sample *out; + int i; + + for_each_set_bit(i, &sensor_mask, sensorhub->sensor_num) { + s64 older_timestamp; + s64 timestamp; + struct cros_ec_sensors_ring_sample *older_unprocess_out = + sensorhub->ring; + struct cros_ec_sensors_ring_sample *next_out; + int count = 1; + + for (out = sensorhub->ring; out < last_out; out = next_out) { + s64 time_period; + + next_out = out + 1; + if (out->sensor_id != i) + continue; + + /* Timestamp to start with */ + older_timestamp = out->timestamp; + + /* Find next sample. */ + while (next_out < last_out && next_out->sensor_id != i) + next_out++; + + if (next_out >= last_out) { + timestamp = current_timestamp; + } else { + timestamp = next_out->timestamp; + if (timestamp == older_timestamp) { + count++; + continue; + } + } + + /* + * The next sample has a new timestamp, spread the + * unprocessed samples. + */ + if (next_out < last_out) + count++; + time_period = div_s64(timestamp - older_timestamp, + count); + + for (; older_unprocess_out <= out; + older_unprocess_out++) { + if (older_unprocess_out->sensor_id != i) + continue; + older_timestamp += time_period; + older_unprocess_out->timestamp = + older_timestamp; + } + count = 1; + /* The next_out sample has a valid timestamp, skip. */ + next_out++; + older_unprocess_out = next_out; + } + } + + /* Push the event into the kfifo */ + for (out = sensorhub->ring; out < last_out; out++) + cros_sensorhub_send_sample(sensorhub, out); +} + +/** + * cros_ec_sensorhub_ring_handler() - The trigger handler function + * + * @sensorhub: Sensor Hub object. + * + * Called by the notifier, process the EC sensor FIFO queue. + */ +static void cros_ec_sensorhub_ring_handler(struct cros_ec_sensorhub *sensorhub) +{ + struct ec_response_motion_sense_fifo_info *fifo_info = + sensorhub->fifo_info; + struct cros_ec_dev *ec = sensorhub->ec; + ktime_t fifo_timestamp, current_timestamp; + int i, j, number_data, ret; + unsigned long sensor_mask = 0; + struct ec_response_motion_sensor_data *in; + struct cros_ec_sensors_ring_sample *out, *last_out; + + mutex_lock(&sensorhub->cmd_lock); + + /* Get FIFO information if there are lost vectors. */ + if (fifo_info->total_lost) { + int fifo_info_length = + sizeof(struct ec_response_motion_sense_fifo_info) + + sizeof(u16) * sensorhub->sensor_num; + + /* Need to retrieve the number of lost vectors per sensor */ + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO; + sensorhub->msg->outsize = 1; + sensorhub->msg->insize = fifo_info_length; + + if (cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg) < 0) + goto error; + + memcpy(fifo_info, &sensorhub->resp->fifo_info, + fifo_info_length); + + /* + * Update collection time, will not be as precise as the + * non-error case. + */ + fifo_timestamp = cros_ec_get_time_ns(); + } else { + fifo_timestamp = sensorhub->fifo_timestamp[ + CROS_EC_SENSOR_NEW_TS]; + } + + if (fifo_info->count > sensorhub->fifo_size || + fifo_info->size != sensorhub->fifo_size) { + dev_warn(sensorhub->dev, + "Mismatch EC data: count %d, size %d - expected %d", + fifo_info->count, fifo_info->size, + sensorhub->fifo_size); + goto error; + } + + /* Copy elements in the main fifo */ + current_timestamp = sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS]; + out = sensorhub->ring; + for (i = 0; i < fifo_info->count; i += number_data) { + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_READ; + sensorhub->params->fifo_read.max_data_vector = + fifo_info->count - i; + sensorhub->msg->outsize = + sizeof(struct ec_params_motion_sense); + sensorhub->msg->insize = + sizeof(sensorhub->resp->fifo_read) + + sensorhub->params->fifo_read.max_data_vector * + sizeof(struct ec_response_motion_sensor_data); + ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg); + if (ret < 0) { + dev_warn(sensorhub->dev, "Fifo error: %d\n", ret); + break; + } + number_data = sensorhub->resp->fifo_read.number_data; + if (number_data == 0) { + dev_dbg(sensorhub->dev, "Unexpected empty FIFO\n"); + break; + } + if (number_data > fifo_info->count - i) { + dev_warn(sensorhub->dev, + "Invalid EC data: too many entry received: %d, expected %d", + number_data, fifo_info->count - i); + break; + } + if (out + number_data > + sensorhub->ring + fifo_info->count) { + dev_warn(sensorhub->dev, + "Too many samples: %d (%zd data) to %d entries for expected %d entries", + i, out - sensorhub->ring, i + number_data, + fifo_info->count); + break; + } + + for (in = sensorhub->resp->fifo_read.data, j = 0; + j < number_data; j++, in++) { + if (cros_ec_sensor_ring_process_event( + sensorhub, fifo_info, + fifo_timestamp, + ¤t_timestamp, + in, out)) { + sensor_mask |= BIT(in->sensor_num); + out++; + } + } + } + mutex_unlock(&sensorhub->cmd_lock); + last_out = out; + + if (out == sensorhub->ring) + /* Unexpected empty FIFO. */ + goto ring_handler_end; + + /* + * Check if current_timestamp is ahead of the last sample. Normally, + * the EC appends a timestamp after the last sample, but if the AP + * is slow to respond to the IRQ, the EC may have added new samples. + * Use the FIFO info timestamp as last timestamp then. + */ + if (!sensorhub->tight_timestamps && + (last_out - 1)->timestamp == current_timestamp) + current_timestamp = fifo_timestamp; + + /* Warn on lost samples. */ + if (fifo_info->total_lost) + for (i = 0; i < sensorhub->sensor_num; i++) { + if (fifo_info->lost[i]) { + dev_warn_ratelimited(sensorhub->dev, + "Sensor %d: lost: %d out of %d\n", + i, fifo_info->lost[i], + fifo_info->total_lost); + if (sensorhub->tight_timestamps) + sensorhub->batch_state[i].last_len = 0; + } + } + + /* + * Spread samples in case of batching, then add them to the + * ringbuffer. + */ + if (sensorhub->tight_timestamps) + cros_ec_sensor_ring_spread_add(sensorhub, sensor_mask, + last_out); + else + cros_ec_sensor_ring_spread_add_legacy(sensorhub, sensor_mask, + current_timestamp, + last_out); + +ring_handler_end: + sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = current_timestamp; + return; + +error: + mutex_unlock(&sensorhub->cmd_lock); +} + +static int cros_ec_sensorhub_event(struct notifier_block *nb, + unsigned long queued_during_suspend, + void *_notify) +{ + struct cros_ec_sensorhub *sensorhub; + struct cros_ec_device *ec_dev; + + sensorhub = container_of(nb, struct cros_ec_sensorhub, notifier); + ec_dev = sensorhub->ec->ec_dev; + + if (ec_dev->event_data.event_type != EC_MKBP_EVENT_SENSOR_FIFO) + return NOTIFY_DONE; + + if (ec_dev->event_size != sizeof(ec_dev->event_data.data.sensor_fifo)) { + dev_warn(ec_dev->dev, "Invalid fifo info size\n"); + return NOTIFY_DONE; + } + + if (queued_during_suspend) + return NOTIFY_OK; + + memcpy(sensorhub->fifo_info, &ec_dev->event_data.data.sensor_fifo.info, + sizeof(*sensorhub->fifo_info)); + sensorhub->fifo_timestamp[CROS_EC_SENSOR_NEW_TS] = + ec_dev->last_event_time; + cros_ec_sensorhub_ring_handler(sensorhub); + + return NOTIFY_OK; +} + +/** + * cros_ec_sensorhub_ring_add() - Add the FIFO functionality if the EC + * supports it. + * + * @sensorhub : Sensor Hub object. + * + * Return: 0 on success. + */ +int cros_ec_sensorhub_ring_add(struct cros_ec_sensorhub *sensorhub) +{ + struct cros_ec_dev *ec = sensorhub->ec; + int ret; + int fifo_info_length = + sizeof(struct ec_response_motion_sense_fifo_info) + + sizeof(u16) * sensorhub->sensor_num; + + /* Allocate the array for lost events. */ + sensorhub->fifo_info = devm_kzalloc(sensorhub->dev, fifo_info_length, + GFP_KERNEL); + if (!sensorhub->fifo_info) + return -ENOMEM; + + /* Retrieve FIFO information */ + sensorhub->msg->version = 2; + sensorhub->params->cmd = MOTIONSENSE_CMD_FIFO_INFO; + sensorhub->msg->outsize = 1; + sensorhub->msg->insize = fifo_info_length; + + ret = cros_ec_cmd_xfer_status(ec->ec_dev, sensorhub->msg); + if (ret < 0) + return ret; + + /* + * Allocate the full fifo. We need to copy the whole FIFO to set + * timestamps properly. + */ + sensorhub->fifo_size = sensorhub->resp->fifo_info.size; + sensorhub->ring = devm_kcalloc(sensorhub->dev, sensorhub->fifo_size, + sizeof(*sensorhub->ring), GFP_KERNEL); + if (!sensorhub->ring) + return -ENOMEM; + + /* + * Allocate the callback area based on the number of sensors. + */ + sensorhub->push_data = devm_kcalloc( + sensorhub->dev, sensorhub->sensor_num, + sizeof(*sensorhub->push_data), + GFP_KERNEL); + if (!sensorhub->push_data) + return -ENOMEM; + + sensorhub->fifo_timestamp[CROS_EC_SENSOR_LAST_TS] = + cros_ec_get_time_ns(); + + sensorhub->tight_timestamps = cros_ec_check_features( + ec, EC_FEATURE_MOTION_SENSE_TIGHT_TIMESTAMPS); + + if (sensorhub->tight_timestamps) { + sensorhub->batch_state = devm_kcalloc(sensorhub->dev, + sensorhub->sensor_num, + sizeof(*sensorhub->batch_state), + GFP_KERNEL); + if (!sensorhub->batch_state) + return -ENOMEM; + } + + /* Register the notifier that will act as a top half interrupt. */ + sensorhub->notifier.notifier_call = cros_ec_sensorhub_event; + ret = blocking_notifier_chain_register(&ec->ec_dev->event_notifier, + &sensorhub->notifier); + if (ret < 0) + return ret; + + /* Start collection samples. */ + return cros_ec_sensorhub_ring_fifo_enable(sensorhub, true); +} + +void cros_ec_sensorhub_ring_remove(void *arg) +{ + struct cros_ec_sensorhub *sensorhub = arg; + struct cros_ec_device *ec_dev = sensorhub->ec->ec_dev; + + /* Disable the ring, prevent EC interrupt to the AP for nothing. */ + cros_ec_sensorhub_ring_fifo_enable(sensorhub, false); + blocking_notifier_chain_unregister(&ec_dev->event_notifier, + &sensorhub->notifier); +} diff --git a/drivers/platform/chrome/cros_ec_spi.c b/drivers/platform/chrome/cros_ec_spi.c index 46786d2d679a..debea5c4c829 100644 --- a/drivers/platform/chrome/cros_ec_spi.c +++ b/drivers/platform/chrome/cros_ec_spi.c @@ -127,7 +127,8 @@ static int terminate_request(struct cros_ec_device *ec_dev) */ spi_message_init(&msg); memset(&trans, 0, sizeof(trans)); - trans.delay_usecs = ec_spi->end_of_msg_delay; + trans.delay.value = ec_spi->end_of_msg_delay; + trans.delay.unit = SPI_DELAY_UNIT_USECS; spi_message_add_tail(&trans, &msg); ret = spi_sync_locked(ec_spi->spi, &msg); @@ -416,7 +417,8 @@ static int do_cros_ec_pkt_xfer_spi(struct cros_ec_device *ec_dev, spi_message_init(&msg); if (ec_spi->start_of_msg_delay) { memset(&trans_delay, 0, sizeof(trans_delay)); - trans_delay.delay_usecs = ec_spi->start_of_msg_delay; + trans_delay.delay.value = ec_spi->start_of_msg_delay; + trans_delay.delay.unit = SPI_DELAY_UNIT_USECS; spi_message_add_tail(&trans_delay, &msg); } diff --git a/drivers/platform/chrome/cros_ec_sysfs.c b/drivers/platform/chrome/cros_ec_sysfs.c index 07dac97ad57c..d45ea5d5bfa4 100644 --- a/drivers/platform/chrome/cros_ec_sysfs.c +++ b/drivers/platform/chrome/cros_ec_sysfs.c @@ -149,14 +149,14 @@ static ssize_t version_show(struct device *dev, /* Get build info. */ msg->command = EC_CMD_GET_BUILD_INFO + ec->cmd_offset; msg->insize = EC_HOST_PARAM_SIZE; - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); - if (ret < 0) - count += scnprintf(buf + count, PAGE_SIZE - count, - "Build info: XFER ERROR %d\n", ret); - else if (msg->result != EC_RES_SUCCESS) + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); + if (ret == -EPROTO) { count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: EC error %d\n", msg->result); - else { + } else if (ret < 0) { + count += scnprintf(buf + count, PAGE_SIZE - count, + "Build info: XFER ERROR %d\n", ret); + } else { msg->data[EC_HOST_PARAM_SIZE - 1] = '\0'; count += scnprintf(buf + count, PAGE_SIZE - count, "Build info: %s\n", msg->data); @@ -165,14 +165,14 @@ static ssize_t version_show(struct device *dev, /* Get chip info. */ msg->command = EC_CMD_GET_CHIP_INFO + ec->cmd_offset; msg->insize = sizeof(*r_chip); - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); - if (ret < 0) - count += scnprintf(buf + count, PAGE_SIZE - count, - "Chip info: XFER ERROR %d\n", ret); - else if (msg->result != EC_RES_SUCCESS) + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); + if (ret == -EPROTO) { count += scnprintf(buf + count, PAGE_SIZE - count, "Chip info: EC error %d\n", msg->result); - else { + } else if (ret < 0) { + count += scnprintf(buf + count, PAGE_SIZE - count, + "Chip info: XFER ERROR %d\n", ret); + } else { r_chip = (struct ec_response_get_chip_info *)msg->data; r_chip->vendor[sizeof(r_chip->vendor) - 1] = '\0'; @@ -189,14 +189,14 @@ static ssize_t version_show(struct device *dev, /* Get board version */ msg->command = EC_CMD_GET_BOARD_VERSION + ec->cmd_offset; msg->insize = sizeof(*r_board); - ret = cros_ec_cmd_xfer(ec->ec_dev, msg); - if (ret < 0) - count += scnprintf(buf + count, PAGE_SIZE - count, - "Board version: XFER ERROR %d\n", ret); - else if (msg->result != EC_RES_SUCCESS) + ret = cros_ec_cmd_xfer_status(ec->ec_dev, msg); + if (ret == -EPROTO) { count += scnprintf(buf + count, PAGE_SIZE - count, "Board version: EC error %d\n", msg->result); - else { + } else if (ret < 0) { + count += scnprintf(buf + count, PAGE_SIZE - count, + "Board version: XFER ERROR %d\n", ret); + } else { r_board = (struct ec_response_board_version *)msg->data; count += scnprintf(buf + count, PAGE_SIZE - count, diff --git a/drivers/platform/chrome/cros_ec_typec.c b/drivers/platform/chrome/cros_ec_typec.c new file mode 100644 index 000000000000..874269c07073 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_typec.c @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Google LLC + * + * This driver provides the ability to view and manage Type C ports through the + * Chrome OS EC. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_data/cros_ec_commands.h> +#include <linux/platform_data/cros_ec_proto.h> +#include <linux/platform_device.h> +#include <linux/usb/typec.h> + +#define DRV_NAME "cros-ec-typec" + +/* Platform-specific data for the Chrome OS EC Type C controller. */ +struct cros_typec_data { + struct device *dev; + struct cros_ec_device *ec; + int num_ports; + unsigned int cmd_ver; + /* Array of ports, indexed by port number. */ + struct typec_port *ports[EC_USB_PD_MAX_PORTS]; + /* Initial capabilities for each port. */ + struct typec_capability *caps[EC_USB_PD_MAX_PORTS]; +}; + +static int cros_typec_parse_port_props(struct typec_capability *cap, + struct fwnode_handle *fwnode, + struct device *dev) +{ + const char *buf; + int ret; + + memset(cap, 0, sizeof(*cap)); + ret = fwnode_property_read_string(fwnode, "power-role", &buf); + if (ret) { + dev_err(dev, "power-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_port_power_role(buf); + if (ret < 0) + return ret; + cap->type = ret; + + ret = fwnode_property_read_string(fwnode, "data-role", &buf); + if (ret) { + dev_err(dev, "data-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_port_data_role(buf); + if (ret < 0) + return ret; + cap->data = ret; + + ret = fwnode_property_read_string(fwnode, "try-power-role", &buf); + if (ret) { + dev_err(dev, "try-power-role not found: %d\n", ret); + return ret; + } + + ret = typec_find_power_role(buf); + if (ret < 0) + return ret; + cap->prefer_role = ret; + + cap->fwnode = fwnode; + + return 0; +} + +static int cros_typec_init_ports(struct cros_typec_data *typec) +{ + struct device *dev = typec->dev; + struct typec_capability *cap; + struct fwnode_handle *fwnode; + const char *port_prop; + int ret; + int i; + int nports; + u32 port_num = 0; + + nports = device_get_child_node_count(dev); + if (nports == 0) { + dev_err(dev, "No port entries found.\n"); + return -ENODEV; + } + + if (nports > typec->num_ports) { + dev_err(dev, "More ports listed than can be supported.\n"); + return -EINVAL; + } + + /* DT uses "reg" to specify port number. */ + port_prop = dev->of_node ? "reg" : "port-number"; + device_for_each_child_node(dev, fwnode) { + if (fwnode_property_read_u32(fwnode, port_prop, &port_num)) { + ret = -EINVAL; + dev_err(dev, "No port-number for port, aborting.\n"); + goto unregister_ports; + } + + if (port_num >= typec->num_ports) { + dev_err(dev, "Invalid port number.\n"); + ret = -EINVAL; + goto unregister_ports; + } + + dev_dbg(dev, "Registering port %d\n", port_num); + + cap = devm_kzalloc(dev, sizeof(*cap), GFP_KERNEL); + if (!cap) { + ret = -ENOMEM; + goto unregister_ports; + } + + typec->caps[port_num] = cap; + + ret = cros_typec_parse_port_props(cap, fwnode, dev); + if (ret < 0) + goto unregister_ports; + + typec->ports[port_num] = typec_register_port(dev, cap); + if (IS_ERR(typec->ports[port_num])) { + dev_err(dev, "Failed to register port %d\n", port_num); + ret = PTR_ERR(typec->ports[port_num]); + goto unregister_ports; + } + } + + return 0; + +unregister_ports: + for (i = 0; i < typec->num_ports; i++) + typec_unregister_port(typec->ports[i]); + return ret; +} + +static int cros_typec_ec_command(struct cros_typec_data *typec, + unsigned int version, + unsigned int command, + void *outdata, + unsigned int outsize, + void *indata, + unsigned int insize) +{ + struct cros_ec_command *msg; + int ret; + + msg = kzalloc(sizeof(*msg) + max(outsize, insize), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->version = version; + msg->command = command; + msg->outsize = outsize; + msg->insize = insize; + + if (outsize) + memcpy(msg->data, outdata, outsize); + + ret = cros_ec_cmd_xfer_status(typec->ec, msg); + if (ret >= 0 && insize) + memcpy(indata, msg->data, insize); + + kfree(msg); + return ret; +} + +static void cros_typec_set_port_params_v0(struct cros_typec_data *typec, + int port_num, struct ec_response_usb_pd_control *resp) +{ + struct typec_port *port = typec->ports[port_num]; + enum typec_orientation polarity; + + if (!resp->enabled) + polarity = TYPEC_ORIENTATION_NONE; + else if (!resp->polarity) + polarity = TYPEC_ORIENTATION_NORMAL; + else + polarity = TYPEC_ORIENTATION_REVERSE; + + typec_set_pwr_role(port, resp->role ? TYPEC_SOURCE : TYPEC_SINK); + typec_set_orientation(port, polarity); +} + +static void cros_typec_set_port_params_v1(struct cros_typec_data *typec, + int port_num, struct ec_response_usb_pd_control_v1 *resp) +{ + struct typec_port *port = typec->ports[port_num]; + enum typec_orientation polarity; + + if (!(resp->enabled & PD_CTRL_RESP_ENABLED_CONNECTED)) + polarity = TYPEC_ORIENTATION_NONE; + else if (!resp->polarity) + polarity = TYPEC_ORIENTATION_NORMAL; + else + polarity = TYPEC_ORIENTATION_REVERSE; + typec_set_orientation(port, polarity); + typec_set_data_role(port, resp->role & PD_CTRL_RESP_ROLE_DATA ? + TYPEC_HOST : TYPEC_DEVICE); + typec_set_pwr_role(port, resp->role & PD_CTRL_RESP_ROLE_POWER ? + TYPEC_SOURCE : TYPEC_SINK); + typec_set_vconn_role(port, resp->role & PD_CTRL_RESP_ROLE_VCONN ? + TYPEC_SOURCE : TYPEC_SINK); +} + +static int cros_typec_port_update(struct cros_typec_data *typec, int port_num) +{ + struct ec_params_usb_pd_control req; + struct ec_response_usb_pd_control_v1 resp; + int ret; + + if (port_num < 0 || port_num >= typec->num_ports) { + dev_err(typec->dev, "cannot get status for invalid port %d\n", + port_num); + return -EINVAL; + } + + req.port = port_num; + req.role = USB_PD_CTRL_ROLE_NO_CHANGE; + req.mux = USB_PD_CTRL_MUX_NO_CHANGE; + req.swap = USB_PD_CTRL_SWAP_NONE; + + ret = cros_typec_ec_command(typec, typec->cmd_ver, + EC_CMD_USB_PD_CONTROL, &req, sizeof(req), + &resp, sizeof(resp)); + if (ret < 0) + return ret; + + dev_dbg(typec->dev, "Enabled %d: 0x%hhx\n", port_num, resp.enabled); + dev_dbg(typec->dev, "Role %d: 0x%hhx\n", port_num, resp.role); + dev_dbg(typec->dev, "Polarity %d: 0x%hhx\n", port_num, resp.polarity); + dev_dbg(typec->dev, "State %d: %s\n", port_num, resp.state); + + if (typec->cmd_ver == 1) + cros_typec_set_port_params_v1(typec, port_num, &resp); + else + cros_typec_set_port_params_v0(typec, port_num, + (struct ec_response_usb_pd_control *) &resp); + + return 0; +} + +static int cros_typec_get_cmd_version(struct cros_typec_data *typec) +{ + struct ec_params_get_cmd_versions_v1 req_v1; + struct ec_response_get_cmd_versions resp; + int ret; + + /* We're interested in the PD control command version. */ + req_v1.cmd = EC_CMD_USB_PD_CONTROL; + ret = cros_typec_ec_command(typec, 1, EC_CMD_GET_CMD_VERSIONS, + &req_v1, sizeof(req_v1), &resp, + sizeof(resp)); + if (ret < 0) + return ret; + + if (resp.version_mask & EC_VER_MASK(1)) + typec->cmd_ver = 1; + else + typec->cmd_ver = 0; + + dev_dbg(typec->dev, "PD Control has version mask 0x%hhx\n", + typec->cmd_ver); + + return 0; +} + +#ifdef CONFIG_ACPI +static const struct acpi_device_id cros_typec_acpi_id[] = { + { "GOOG0014", 0 }, + {} +}; +MODULE_DEVICE_TABLE(acpi, cros_typec_acpi_id); +#endif + +#ifdef CONFIG_OF +static const struct of_device_id cros_typec_of_match[] = { + { .compatible = "google,cros-ec-typec", }, + {} +}; +MODULE_DEVICE_TABLE(of, cros_typec_of_match); +#endif + +static int cros_typec_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cros_typec_data *typec; + struct ec_response_usb_pd_ports resp; + int ret, i; + + typec = devm_kzalloc(dev, sizeof(*typec), GFP_KERNEL); + if (!typec) + return -ENOMEM; + + typec->dev = dev; + typec->ec = dev_get_drvdata(pdev->dev.parent); + platform_set_drvdata(pdev, typec); + + ret = cros_typec_get_cmd_version(typec); + if (ret < 0) { + dev_err(dev, "failed to get PD command version info\n"); + return ret; + } + + ret = cros_typec_ec_command(typec, 0, EC_CMD_USB_PD_PORTS, NULL, 0, + &resp, sizeof(resp)); + if (ret < 0) + return ret; + + typec->num_ports = resp.num_ports; + if (typec->num_ports > EC_USB_PD_MAX_PORTS) { + dev_warn(typec->dev, + "Too many ports reported: %d, limiting to max: %d\n", + typec->num_ports, EC_USB_PD_MAX_PORTS); + typec->num_ports = EC_USB_PD_MAX_PORTS; + } + + ret = cros_typec_init_ports(typec); + if (ret < 0) + return ret; + + for (i = 0; i < typec->num_ports; i++) { + ret = cros_typec_port_update(typec, i); + if (ret < 0) + goto unregister_ports; + } + + return 0; + +unregister_ports: + for (i = 0; i < typec->num_ports; i++) + if (typec->ports[i]) + typec_unregister_port(typec->ports[i]); + return ret; +} + +static struct platform_driver cros_typec_driver = { + .driver = { + .name = DRV_NAME, + .acpi_match_table = ACPI_PTR(cros_typec_acpi_id), + .of_match_table = of_match_ptr(cros_typec_of_match), + }, + .probe = cros_typec_probe, +}; + +module_platform_driver(cros_typec_driver); + +MODULE_AUTHOR("Prashant Malani <pmalani@chromium.org>"); +MODULE_DESCRIPTION("Chrome OS EC Type C control"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/chrome/cros_ec_vbc.c b/drivers/platform/chrome/cros_ec_vbc.c index 8edae465105c..46482d12cffe 100644 --- a/drivers/platform/chrome/cros_ec_vbc.c +++ b/drivers/platform/chrome/cros_ec_vbc.c @@ -40,7 +40,7 @@ static ssize_t vboot_context_read(struct file *filp, struct kobject *kobj, msg->outsize = para_sz; msg->insize = resp_sz; - err = cros_ec_cmd_xfer(ecdev, msg); + err = cros_ec_cmd_xfer_status(ecdev, msg); if (err < 0) { dev_err(dev, "Error sending read request: %d\n", err); kfree(msg); @@ -83,7 +83,7 @@ static ssize_t vboot_context_write(struct file *filp, struct kobject *kobj, msg->outsize = para_sz; msg->insize = 0; - err = cros_ec_cmd_xfer(ecdev, msg); + err = cros_ec_cmd_xfer_status(ecdev, msg); if (err < 0) { dev_err(dev, "Error sending write request: %d\n", err); kfree(msg); diff --git a/drivers/platform/chrome/cros_usbpd_notify.c b/drivers/platform/chrome/cros_usbpd_notify.c new file mode 100644 index 000000000000..7f36142ab12a --- /dev/null +++ b/drivers/platform/chrome/cros_usbpd_notify.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2020 Google LLC + * + * This driver serves as the receiver of cros_ec PD host events. + */ + +#include <linux/acpi.h> +#include <linux/module.h> +#include <linux/platform_data/cros_ec_proto.h> +#include <linux/platform_data/cros_usbpd_notify.h> +#include <linux/platform_device.h> + +#define DRV_NAME "cros-usbpd-notify" +#define DRV_NAME_PLAT_ACPI "cros-usbpd-notify-acpi" +#define ACPI_DRV_NAME "GOOG0003" + +static BLOCKING_NOTIFIER_HEAD(cros_usbpd_notifier_list); + +struct cros_usbpd_notify_data { + struct device *dev; + struct cros_ec_device *ec; + struct notifier_block nb; +}; + +/** + * cros_usbpd_register_notify - Register a notifier callback for PD events. + * @nb: Notifier block pointer to register + * + * On ACPI platforms this corresponds to host events on the ECPD + * "GOOG0003" ACPI device. On non-ACPI platforms this will filter mkbp events + * for USB PD events. + * + * Return: 0 on success or negative error code. + */ +int cros_usbpd_register_notify(struct notifier_block *nb) +{ + return blocking_notifier_chain_register(&cros_usbpd_notifier_list, + nb); +} +EXPORT_SYMBOL_GPL(cros_usbpd_register_notify); + +/** + * cros_usbpd_unregister_notify - Unregister notifier callback for PD events. + * @nb: Notifier block pointer to unregister + * + * Unregister a notifier callback that was previously registered with + * cros_usbpd_register_notify(). + */ +void cros_usbpd_unregister_notify(struct notifier_block *nb) +{ + blocking_notifier_chain_unregister(&cros_usbpd_notifier_list, nb); +} +EXPORT_SYMBOL_GPL(cros_usbpd_unregister_notify); + +/** + * cros_ec_pd_command - Send a command to the EC. + * + * @ec_dev: EC device + * @command: EC command + * @outdata: EC command output data + * @outsize: Size of outdata + * @indata: EC command input data + * @insize: Size of indata + * + * Return: >= 0 on success, negative error number on failure. + */ +static int cros_ec_pd_command(struct cros_ec_device *ec_dev, + int command, + uint8_t *outdata, + int outsize, + uint8_t *indata, + int insize) +{ + struct cros_ec_command *msg; + int ret; + + msg = kzalloc(sizeof(*msg) + max(insize, outsize), GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->command = command; + msg->outsize = outsize; + msg->insize = insize; + + if (outsize) + memcpy(msg->data, outdata, outsize); + + ret = cros_ec_cmd_xfer_status(ec_dev, msg); + if (ret < 0) + goto error; + + if (insize) + memcpy(indata, msg->data, insize); +error: + kfree(msg); + return ret; +} + +static void cros_usbpd_get_event_and_notify(struct device *dev, + struct cros_ec_device *ec_dev) +{ + struct ec_response_host_event_status host_event_status; + u32 event = 0; + int ret; + + /* + * We still send a 0 event out to older devices which don't + * have the updated device heirarchy. + */ + if (!ec_dev) { + dev_dbg(dev, + "EC device inaccessible; sending 0 event status.\n"); + goto send_notify; + } + + /* Check for PD host events on EC. */ + ret = cros_ec_pd_command(ec_dev, EC_CMD_PD_HOST_EVENT_STATUS, + NULL, 0, + (uint8_t *)&host_event_status, + sizeof(host_event_status)); + if (ret < 0) { + dev_warn(dev, "Can't get host event status (err: %d)\n", ret); + goto send_notify; + } + + event = host_event_status.status; + +send_notify: + blocking_notifier_call_chain(&cros_usbpd_notifier_list, event, NULL); +} + +#ifdef CONFIG_ACPI + +static void cros_usbpd_notify_acpi(acpi_handle device, u32 event, void *data) +{ + struct cros_usbpd_notify_data *pdnotify = data; + + cros_usbpd_get_event_and_notify(pdnotify->dev, pdnotify->ec); +} + +static int cros_usbpd_notify_probe_acpi(struct platform_device *pdev) +{ + struct cros_usbpd_notify_data *pdnotify; + struct device *dev = &pdev->dev; + struct acpi_device *adev; + struct cros_ec_device *ec_dev; + acpi_status status; + + adev = ACPI_COMPANION(dev); + + pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL); + if (!pdnotify) + return -ENOMEM; + + /* Get the EC device pointer needed to talk to the EC. */ + ec_dev = dev_get_drvdata(dev->parent); + if (!ec_dev) { + /* + * We continue even for older devices which don't have the + * correct device heirarchy, namely, GOOG0003 is a child + * of GOOG0004. + */ + dev_warn(dev, "Couldn't get Chrome EC device pointer.\n"); + } + + pdnotify->dev = dev; + pdnotify->ec = ec_dev; + + status = acpi_install_notify_handler(adev->handle, + ACPI_ALL_NOTIFY, + cros_usbpd_notify_acpi, + pdnotify); + if (ACPI_FAILURE(status)) { + dev_warn(dev, "Failed to register notify handler %08x\n", + status); + return -EINVAL; + } + + return 0; +} + +static int cros_usbpd_notify_remove_acpi(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct acpi_device *adev = ACPI_COMPANION(dev); + + acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY, + cros_usbpd_notify_acpi); + + return 0; +} + +static const struct acpi_device_id cros_usbpd_notify_acpi_device_ids[] = { + { ACPI_DRV_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, cros_usbpd_notify_acpi_device_ids); + +static struct platform_driver cros_usbpd_notify_acpi_driver = { + .driver = { + .name = DRV_NAME_PLAT_ACPI, + .acpi_match_table = cros_usbpd_notify_acpi_device_ids, + }, + .probe = cros_usbpd_notify_probe_acpi, + .remove = cros_usbpd_notify_remove_acpi, +}; + +#endif /* CONFIG_ACPI */ + +static int cros_usbpd_notify_plat(struct notifier_block *nb, + unsigned long queued_during_suspend, + void *data) +{ + struct cros_usbpd_notify_data *pdnotify = container_of(nb, + struct cros_usbpd_notify_data, nb); + struct cros_ec_device *ec_dev = (struct cros_ec_device *)data; + u32 host_event = cros_ec_get_host_event(ec_dev); + + if (!host_event) + return NOTIFY_DONE; + + if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) { + cros_usbpd_get_event_and_notify(pdnotify->dev, ec_dev); + return NOTIFY_OK; + } + return NOTIFY_DONE; +} + +static int cros_usbpd_notify_probe_plat(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent); + struct cros_usbpd_notify_data *pdnotify; + int ret; + + pdnotify = devm_kzalloc(dev, sizeof(*pdnotify), GFP_KERNEL); + if (!pdnotify) + return -ENOMEM; + + pdnotify->dev = dev; + pdnotify->ec = ecdev->ec_dev; + pdnotify->nb.notifier_call = cros_usbpd_notify_plat; + + dev_set_drvdata(dev, pdnotify); + + ret = blocking_notifier_chain_register(&ecdev->ec_dev->event_notifier, + &pdnotify->nb); + if (ret < 0) { + dev_err(dev, "Failed to register notifier\n"); + return ret; + } + + return 0; +} + +static int cros_usbpd_notify_remove_plat(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct cros_ec_dev *ecdev = dev_get_drvdata(dev->parent); + struct cros_usbpd_notify_data *pdnotify = + (struct cros_usbpd_notify_data *)dev_get_drvdata(dev); + + blocking_notifier_chain_unregister(&ecdev->ec_dev->event_notifier, + &pdnotify->nb); + + return 0; +} + +static struct platform_driver cros_usbpd_notify_plat_driver = { + .driver = { + .name = DRV_NAME, + }, + .probe = cros_usbpd_notify_probe_plat, + .remove = cros_usbpd_notify_remove_plat, +}; + +static int __init cros_usbpd_notify_init(void) +{ + int ret; + + ret = platform_driver_register(&cros_usbpd_notify_plat_driver); + if (ret < 0) + return ret; + +#ifdef CONFIG_ACPI + platform_driver_register(&cros_usbpd_notify_acpi_driver); +#endif + return 0; +} + +static void __exit cros_usbpd_notify_exit(void) +{ +#ifdef CONFIG_ACPI + platform_driver_unregister(&cros_usbpd_notify_acpi_driver); +#endif + platform_driver_unregister(&cros_usbpd_notify_plat_driver); +} + +module_init(cros_usbpd_notify_init); +module_exit(cros_usbpd_notify_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("ChromeOS power delivery notifier device"); +MODULE_AUTHOR("Jon Flatley <jflat@chromium.org>"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/platform/chrome/wilco_ec/event.c b/drivers/platform/chrome/wilco_ec/event.c index dba3d445623f..814518509739 100644 --- a/drivers/platform/chrome/wilco_ec/event.c +++ b/drivers/platform/chrome/wilco_ec/event.c @@ -79,7 +79,7 @@ static DEFINE_IDA(event_ida); struct ec_event { u16 size; u16 type; - u16 event[0]; + u16 event[]; } __packed; #define ec_event_num_words(ev) (ev->size - 1) @@ -96,7 +96,7 @@ struct ec_event_queue { int capacity; int head; int tail; - struct ec_event *entries[0]; + struct ec_event *entries[]; }; /* Maximum number of events to store in ec_event_queue */ diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c index 62f27610dd33..c2bf4c95c5d2 100644 --- a/drivers/platform/chrome/wilco_ec/properties.c +++ b/drivers/platform/chrome/wilco_ec/properties.c @@ -3,8 +3,11 @@ * Copyright 2019 Google LLC */ +#include <linux/errno.h> +#include <linux/export.h> #include <linux/platform_data/wilco-ec.h> #include <linux/string.h> +#include <linux/types.h> #include <asm/unaligned.h> /* Operation code; what the EC should do with the property */ diff --git a/drivers/platform/chrome/wilco_ec/sysfs.c b/drivers/platform/chrome/wilco_ec/sysfs.c index f0d174b6bb21..3c587b4054a5 100644 --- a/drivers/platform/chrome/wilco_ec/sysfs.c +++ b/drivers/platform/chrome/wilco_ec/sysfs.c @@ -8,8 +8,12 @@ * See Documentation/ABI/testing/sysfs-platform-wilco-ec for more information. */ +#include <linux/device.h> +#include <linux/kernel.h> #include <linux/platform_data/wilco-ec.h> +#include <linux/string.h> #include <linux/sysfs.h> +#include <linux/types.h> #define CMD_KB_CMOS 0x7C #define SUB_CMD_KB_CMOS_AUTO_ON 0x03 diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c index 5f4f5716c893..cc7dd4d87cce 100644 --- a/drivers/platform/x86/intel-hid.c +++ b/drivers/platform/x86/intel-hid.c @@ -19,8 +19,8 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Alex Hung"); static const struct acpi_device_id intel_hid_ids[] = { - {"INT1051", 0}, {"INT33D5", 0}, + {"INTC1051", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); diff --git a/drivers/platform/x86/intel_int0002_vgpio.c b/drivers/platform/x86/intel_int0002_vgpio.c index 7b23efc46a43..289c6655d425 100644 --- a/drivers/platform/x86/intel_int0002_vgpio.c +++ b/drivers/platform/x86/intel_int0002_vgpio.c @@ -127,6 +127,14 @@ static irqreturn_t int0002_irq(int irq, void *data) return IRQ_HANDLED; } +static bool int0002_check_wake(void *data) +{ + u32 gpe_sts_reg; + + gpe_sts_reg = inl(GPE0A_STS_PORT); + return (gpe_sts_reg & GPE0A_PME_B0_STS_BIT); +} + static struct irq_chip int0002_byt_irqchip = { .name = DRV_NAME, .irq_ack = int0002_irq_ack, @@ -220,6 +228,7 @@ static int int0002_probe(struct platform_device *pdev) return ret; } + acpi_register_wakeup_handler(irq, int0002_check_wake, NULL); device_init_wakeup(dev, true); return 0; } @@ -227,6 +236,7 @@ static int int0002_probe(struct platform_device *pdev) static int int0002_remove(struct platform_device *pdev) { device_init_wakeup(&pdev->dev, false); + acpi_unregister_wakeup_handler(int0002_check_wake, NULL); return 0; } diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig index 513efe8e7628..890380302080 100644 --- a/drivers/power/reset/Kconfig +++ b/drivers/power/reset/Kconfig @@ -248,7 +248,7 @@ config SYSCON_REBOOT_MODE action according to the mode. config POWER_RESET_SC27XX - bool "Spreadtrum SC27xx PMIC power-off driver" + tristate "Spreadtrum SC27xx PMIC power-off driver" depends on MFD_SC27XX_PMIC || COMPILE_TEST help This driver supports powering off a system through diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c index d94e3267c3b6..3ff9d93a5226 100644 --- a/drivers/power/reset/at91-reset.c +++ b/drivers/power/reset/at91-reset.c @@ -35,6 +35,7 @@ #define AT91_RSTC_MR 0x08 /* Reset Controller Mode Register */ #define AT91_RSTC_URSTEN BIT(0) /* User Reset Enable */ +#define AT91_RSTC_URSTASYNC BIT(2) /* User Reset Asynchronous Control */ #define AT91_RSTC_URSTIEN BIT(4) /* User Reset Interrupt Enable */ #define AT91_RSTC_ERSTL GENMASK(11, 8) /* External Reset Length */ @@ -49,105 +50,63 @@ enum reset_type { RESET_TYPE_ULP2 = 8, }; -static void __iomem *at91_ramc_base[2], *at91_rstc_base; -static struct clk *sclk; +struct at91_reset { + void __iomem *rstc_base; + void __iomem *ramc_base[2]; + struct clk *sclk; + struct notifier_block nb; + u32 args; + u32 ramc_lpr; +}; /* * unless the SDRAM is cleanly shutdown before we hit the * reset register it can be left driving the data bus and * killing the chance of a subsequent boot from NAND */ -static int at91sam9260_restart(struct notifier_block *this, unsigned long mode, - void *cmd) +static int at91_reset(struct notifier_block *this, unsigned long mode, + void *cmd) { - asm volatile( - /* Align to cache lines */ - ".balign 32\n\t" - - /* Disable SDRAM accesses */ - "str %2, [%0, #" __stringify(AT91_SDRAMC_TR) "]\n\t" - - /* Power down SDRAM */ - "str %3, [%0, #" __stringify(AT91_SDRAMC_LPR) "]\n\t" - - /* Reset CPU */ - "str %4, [%1, #" __stringify(AT91_RSTC_CR) "]\n\t" - - "b .\n\t" - : - : "r" (at91_ramc_base[0]), - "r" (at91_rstc_base), - "r" (1), - "r" cpu_to_le32(AT91_SDRAMC_LPCB_POWER_DOWN), - "r" cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST)); + struct at91_reset *reset = container_of(this, struct at91_reset, nb); - return NOTIFY_DONE; -} - -static int at91sam9g45_restart(struct notifier_block *this, unsigned long mode, - void *cmd) -{ asm volatile( - /* - * Test wether we have a second RAM controller to care - * about. - * - * First, test that we can dereference the virtual address. - */ - "cmp %1, #0\n\t" - "beq 1f\n\t" - - /* Then, test that the RAM controller is enabled */ - "ldr r0, [%1]\n\t" - "cmp r0, #0\n\t" - /* Align to cache lines */ ".balign 32\n\t" /* Disable SDRAM0 accesses */ - "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" + " tst %0, #0\n\t" + " beq 1f\n\t" + " str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" /* Power down SDRAM0 */ - " str %4, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t" + " str %4, [%0, %6]\n\t" /* Disable SDRAM1 accesses */ + "1: tst %1, #0\n\t" + " beq 2f\n\t" " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" /* Power down SDRAM1 */ - " strne %4, [%1, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t" + " strne %4, [%1, %6]\n\t" /* Reset CPU */ - " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" + "2: str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" " b .\n\t" : - : "r" (at91_ramc_base[0]), - "r" (at91_ramc_base[1]), - "r" (at91_rstc_base), + : "r" (reset->ramc_base[0]), + "r" (reset->ramc_base[1]), + "r" (reset->rstc_base), "r" (1), "r" cpu_to_le32(AT91_DDRSDRC_LPCB_POWER_DOWN), - "r" cpu_to_le32(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST) - : "r0"); - - return NOTIFY_DONE; -} - -static int sama5d3_restart(struct notifier_block *this, unsigned long mode, - void *cmd) -{ - writel(AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST, - at91_rstc_base); + "r" (reset->args), + "r" (reset->ramc_lpr) + : "r4"); return NOTIFY_DONE; } -static int samx7_restart(struct notifier_block *this, unsigned long mode, - void *cmd) -{ - writel(AT91_RSTC_KEY | AT91_RSTC_PROCRST, at91_rstc_base); - return NOTIFY_DONE; -} - -static void __init at91_reset_status(struct platform_device *pdev) +static void __init at91_reset_status(struct platform_device *pdev, + void __iomem *base) { const char *reason; - u32 reg = readl(at91_rstc_base + AT91_RSTC_SR); + u32 reg = readl(base + AT91_RSTC_SR); switch ((reg & AT91_RSTC_RSTTYP) >> 8) { case RESET_TYPE_GENERAL: @@ -183,42 +142,68 @@ static void __init at91_reset_status(struct platform_device *pdev) } static const struct of_device_id at91_ramc_of_match[] = { - { .compatible = "atmel,at91sam9260-sdramc", }, - { .compatible = "atmel,at91sam9g45-ddramc", }, + { + .compatible = "atmel,at91sam9260-sdramc", + .data = (void *)AT91_SDRAMC_LPR, + }, + { + .compatible = "atmel,at91sam9g45-ddramc", + .data = (void *)AT91_DDRSDRC_LPR, + }, { /* sentinel */ } }; static const struct of_device_id at91_reset_of_match[] = { - { .compatible = "atmel,at91sam9260-rstc", .data = at91sam9260_restart }, - { .compatible = "atmel,at91sam9g45-rstc", .data = at91sam9g45_restart }, - { .compatible = "atmel,sama5d3-rstc", .data = sama5d3_restart }, - { .compatible = "atmel,samx7-rstc", .data = samx7_restart }, - { .compatible = "microchip,sam9x60-rstc", .data = samx7_restart }, + { + .compatible = "atmel,at91sam9260-rstc", + .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST | + AT91_RSTC_PROCRST), + }, + { + .compatible = "atmel,at91sam9g45-rstc", + .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST | + AT91_RSTC_PROCRST) + }, + { + .compatible = "atmel,sama5d3-rstc", + .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PERRST | + AT91_RSTC_PROCRST) + }, + { + .compatible = "atmel,samx7-rstc", + .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST) + }, + { + .compatible = "microchip,sam9x60-rstc", + .data = (void *)(AT91_RSTC_KEY | AT91_RSTC_PROCRST) + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, at91_reset_of_match); -static struct notifier_block at91_restart_nb = { - .priority = 192, -}; - static int __init at91_reset_probe(struct platform_device *pdev) { const struct of_device_id *match; + struct at91_reset *reset; struct device_node *np; int ret, idx = 0; - at91_rstc_base = of_iomap(pdev->dev.of_node, 0); - if (!at91_rstc_base) { + reset = devm_kzalloc(&pdev->dev, sizeof(*reset), GFP_KERNEL); + if (!reset) + return -ENOMEM; + + reset->rstc_base = of_iomap(pdev->dev.of_node, 0); + if (!reset->rstc_base) { dev_err(&pdev->dev, "Could not map reset controller address\n"); return -ENODEV; } if (!of_device_is_compatible(pdev->dev.of_node, "atmel,sama5d3-rstc")) { /* we need to shutdown the ddr controller, so get ramc base */ - for_each_matching_node(np, at91_ramc_of_match) { - at91_ramc_base[idx] = of_iomap(np, 0); - if (!at91_ramc_base[idx]) { + for_each_matching_node_and_match(np, at91_ramc_of_match, &match) { + reset->ramc_lpr = (u32)match->data; + reset->ramc_base[idx] = of_iomap(np, 0); + if (!reset->ramc_base[idx]) { dev_err(&pdev->dev, "Could not map ram controller address\n"); of_node_put(np); return -ENODEV; @@ -228,33 +213,46 @@ static int __init at91_reset_probe(struct platform_device *pdev) } match = of_match_node(at91_reset_of_match, pdev->dev.of_node); - at91_restart_nb.notifier_call = match->data; + reset->nb.notifier_call = at91_reset; + reset->nb.priority = 192; + reset->args = (u32)match->data; - sclk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(sclk)) - return PTR_ERR(sclk); + reset->sclk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(reset->sclk)) + return PTR_ERR(reset->sclk); - ret = clk_prepare_enable(sclk); + ret = clk_prepare_enable(reset->sclk); if (ret) { dev_err(&pdev->dev, "Could not enable slow clock\n"); return ret; } - ret = register_restart_handler(&at91_restart_nb); + platform_set_drvdata(pdev, reset); + + if (of_device_is_compatible(pdev->dev.of_node, "microchip,sam9x60-rstc")) { + u32 val = readl(reset->rstc_base + AT91_RSTC_MR); + + writel(AT91_RSTC_KEY | AT91_RSTC_URSTASYNC | val, + reset->rstc_base + AT91_RSTC_MR); + } + + ret = register_restart_handler(&reset->nb); if (ret) { - clk_disable_unprepare(sclk); + clk_disable_unprepare(reset->sclk); return ret; } - at91_reset_status(pdev); + at91_reset_status(pdev, reset->rstc_base); return 0; } static int __exit at91_reset_remove(struct platform_device *pdev) { - unregister_restart_handler(&at91_restart_nb); - clk_disable_unprepare(sclk); + struct at91_reset *reset = platform_get_drvdata(pdev); + + unregister_restart_handler(&reset->nb); + clk_disable_unprepare(reset->sclk); return 0; } diff --git a/drivers/power/reset/sc27xx-poweroff.c b/drivers/power/reset/sc27xx-poweroff.c index 29fb08b8faa0..90287c31992c 100644 --- a/drivers/power/reset/sc27xx-poweroff.c +++ b/drivers/power/reset/sc27xx-poweroff.c @@ -6,6 +6,7 @@ #include <linux/cpu.h> #include <linux/kernel.h> +#include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/regmap.h> @@ -13,6 +14,8 @@ #define SC27XX_PWR_PD_HW 0xc2c #define SC27XX_PWR_OFF_EN BIT(0) +#define SC27XX_SLP_CTRL 0xdf0 +#define SC27XX_LDO_XTL_EN BIT(3) static struct regmap *regmap; @@ -27,10 +30,13 @@ static struct regmap *regmap; */ static void sc27xx_poweroff_shutdown(void) { -#ifdef CONFIG_PM_SLEEP_SMP - int cpu = smp_processor_id(); +#ifdef CONFIG_HOTPLUG_CPU + int cpu; - freeze_secondary_cpus(cpu); + for_each_online_cpu(cpu) { + if (cpu != smp_processor_id()) + remove_cpu(cpu); + } #endif } @@ -40,6 +46,9 @@ static struct syscore_ops poweroff_syscore_ops = { static void sc27xx_poweroff_do_poweroff(void) { + /* Disable the external subsys connection's power firstly */ + regmap_write(regmap, SC27XX_SLP_CTRL, SC27XX_LDO_XTL_EN); + regmap_write(regmap, SC27XX_PWR_PD_HW, SC27XX_PWR_OFF_EN); } @@ -63,4 +72,8 @@ static struct platform_driver sc27xx_poweroff_driver = { .name = "sc27xx-poweroff", }, }; -builtin_platform_driver(sc27xx_poweroff_driver); +module_platform_driver(sc27xx_poweroff_driver); + +MODULE_DESCRIPTION("Power off driver for SC27XX PMIC Device"); +MODULE_AUTHOR("Baolin Wang <baolin.wang@unisoc.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/power/supply/Kconfig b/drivers/power/supply/Kconfig index 9a5591ab90d0..f3424fdce341 100644 --- a/drivers/power/supply/Kconfig +++ b/drivers/power/supply/Kconfig @@ -480,7 +480,7 @@ config CHARGER_GPIO called gpio-charger. config CHARGER_MANAGER - bool "Battery charger manager for multiple chargers" + tristate "Battery charger manager for multiple chargers" depends on REGULATOR select EXTCON help @@ -659,7 +659,7 @@ config CHARGER_RT9455 config CHARGER_CROS_USBPD tristate "ChromeOS EC based USBPD charger" - depends on CROS_EC + depends on CROS_USBPD_NOTIFY default n help Say Y here to enable ChromeOS EC based USBPD charger diff --git a/drivers/power/supply/ab8500_charger.c b/drivers/power/supply/ab8500_charger.c index f69550d64f09..9469fe182d02 100644 --- a/drivers/power/supply/ab8500_charger.c +++ b/drivers/power/supply/ab8500_charger.c @@ -404,7 +404,7 @@ disable_otp: } /** - * ab8500_power_supply_changed - a wrapper with local extentions for + * ab8500_power_supply_changed - a wrapper with local extensions for * power_supply_changed * @di: pointer to the ab8500_charger structure * @psy: pointer to power_supply_that have changed. @@ -683,7 +683,7 @@ static int ab8500_charger_max_usb_curr(struct ab8500_charger *di, /* * Platform only supports USB 2.0. * This means that charging current from USB source - * is maximum 500 mA. Every occurence of USB_STAT_*_HOST_* + * is maximum 500 mA. Every occurrence of USB_STAT_*_HOST_* * should set USB_CH_IP_CUR_LVL_0P5. */ @@ -1379,13 +1379,13 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger, /* * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts - * will be triggered everytime we enable the VDD ADC supply. + * will be triggered every time we enable the VDD ADC supply. * This will turn off charging for a short while. * It can be avoided by having the supply on when * there is a charger enabled. Normally the VDD ADC supply - * is enabled everytime a GPADC conversion is triggered. We will - * force it to be enabled from this driver to have - * the GPADC module independant of the AB8500 chargers + * is enabled every time a GPADC conversion is triggered. + * We will force it to be enabled from this driver to have + * the GPADC module independent of the AB8500 chargers */ if (!di->vddadc_en_ac) { ret = regulator_enable(di->regu); @@ -1455,7 +1455,7 @@ static int ab8500_charger_ac_en(struct ux500_charger *charger, if (is_ab8500_1p1_or_earlier(di->parent)) { /* * For ABB revision 1.0 and 1.1 there is a bug in the - * watchdog logic. That means we have to continously + * watchdog logic. That means we have to continuously * kick the charger watchdog even when no charger is * connected. This is only valid once the AC charger * has been enabled. This is a bug that is not handled @@ -1552,13 +1552,13 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger, /* * Due to a bug in AB8500, BTEMP_HIGH/LOW interrupts - * will be triggered everytime we enable the VDD ADC supply. + * will be triggered every time we enable the VDD ADC supply. * This will turn off charging for a short while. * It can be avoided by having the supply on when * there is a charger enabled. Normally the VDD ADC supply - * is enabled everytime a GPADC conversion is triggered. We will - * force it to be enabled from this driver to have - * the GPADC module independant of the AB8500 chargers + * is enabled every time a GPADC conversion is triggered. + * We will force it to be enabled from this driver to have + * the GPADC module independent of the AB8500 chargers */ if (!di->vddadc_en_usb) { ret = regulator_enable(di->regu); @@ -1582,7 +1582,10 @@ static int ab8500_charger_usb_en(struct ux500_charger *charger, return -ENXIO; } - /* ChVoltLevel: max voltage upto which battery can be charged */ + /* + * ChVoltLevel: max voltage up to which battery can be + * charged + */ ret = abx500_set_register_interruptible(di->dev, AB8500_CHARGER, AB8500_CH_VOLT_LVL_REG, (u8) volt_index); if (ret) { @@ -2014,7 +2017,7 @@ static void ab8500_charger_check_hw_failure_work(struct work_struct *work) * Work queue function for kicking the charger watchdog. * * For ABB revision 1.0 and 1.1 there is a bug in the watchdog - * logic. That means we have to continously kick the charger + * logic. That means we have to continuously kick the charger * watchdog even when no charger is connected. This is only * valid once the AC charger has been enabled. This is * a bug that is not handled by the algorithm and the @@ -2262,7 +2265,7 @@ static void ab8500_charger_usb_link_status_work(struct work_struct *work) * Some chargers that breaks the USB spec is * identified as invalid by AB8500 and it refuse * to start the charging process. but by jumping - * thru a few hoops it can be forced to start. + * through a few hoops it can be forced to start. */ if (is_ab8500(di->parent)) ret = abx500_get_register_interruptible(di->dev, AB8500_USB, @@ -3214,7 +3217,7 @@ static int ab8500_charger_resume(struct platform_device *pdev) /* * For ABB revision 1.0 and 1.1 there is a bug in the watchdog - * logic. That means we have to continously kick the charger + * logic. That means we have to continuously kick the charger * watchdog even when no charger is connected. This is only * valid once the AC charger has been enabled. This is * a bug that is not handled by the algorithm and the @@ -3483,7 +3486,7 @@ static int ab8500_charger_probe(struct platform_device *pdev) /* * For ABB revision 1.0 and 1.1 there is a bug in the watchdog - * logic. That means we have to continously kick the charger + * logic. That means we have to continuously kick the charger * watchdog even when no charger is connected. This is only * valid once the AC charger has been enabled. This is * a bug that is not handled by the algorithm and the diff --git a/drivers/power/supply/axp288_charger.c b/drivers/power/supply/axp288_charger.c index 1bbba6bba673..cf4c67b2d235 100644 --- a/drivers/power/supply/axp288_charger.c +++ b/drivers/power/supply/axp288_charger.c @@ -21,6 +21,7 @@ #include <linux/property.h> #include <linux/mfd/axp20x.h> #include <linux/extcon.h> +#include <linux/dmi.h> #define PS_STAT_VBUS_TRIGGER BIT(0) #define PS_STAT_BAT_CHRG_DIR BIT(2) @@ -545,6 +546,49 @@ out: return IRQ_HANDLED; } +/* + * The HP Pavilion x2 10 series comes in a number of variants: + * Bay Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "815D" + * Cherry Trail SoC + AXP288 PMIC, DMI_BOARD_NAME: "813E" + * Cherry Trail SoC + TI PMIC, DMI_BOARD_NAME: "827C" or "82F4" + * + * The variants with the AXP288 PMIC are all kinds of special: + * + * 1. All variants use a Type-C connector which the AXP288 does not support, so + * when using a Type-C charger it is not recognized. Unlike most AXP288 devices, + * this model actually has mostly working ACPI AC / Battery code, the ACPI code + * "solves" this by simply setting the input_current_limit to 3A. + * There are still some issues with the ACPI code, so we use this native driver, + * and to solve the charging not working (500mA is not enough) issue we hardcode + * the 3A input_current_limit like the ACPI code does. + * + * 2. If no charger is connected the machine boots with the vbus-path disabled. + * Normally this is done when a 5V boost converter is active to avoid the PMIC + * trying to charge from the 5V boost converter's output. This is done when + * an OTG host cable is inserted and the ID pin on the micro-B receptacle is + * pulled low and the ID pin has an ACPI event handler associated with it + * which re-enables the vbus-path when the ID pin is pulled high when the + * OTG host cable is removed. The Type-C connector has no ID pin, there is + * no ID pin handler and there appears to be no 5V boost converter, so we + * end up not charging because the vbus-path is disabled, until we unplug + * the charger which automatically clears the vbus-path disable bit and then + * on the second plug-in of the adapter we start charging. To solve the not + * charging on first charger plugin we unconditionally enable the vbus-path at + * probe on this model, which is safe since there is no 5V boost converter. + */ +static const struct dmi_system_id axp288_hp_x2_dmi_ids[] = { + { + /* + * Bay Trail model has "Hewlett-Packard" as sys_vendor, Cherry + * Trail model has "HP", so we only match on product_name. + */ + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion x2 Detachable"), + }, + }, + {} /* Terminating entry */ +}; + static void axp288_charger_extcon_evt_worker(struct work_struct *work) { struct axp288_chrg_info *info = @@ -568,7 +612,11 @@ static void axp288_charger_extcon_evt_worker(struct work_struct *work) } /* Determine cable/charger type */ - if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) { + if (dmi_check_system(axp288_hp_x2_dmi_ids)) { + /* See comment above axp288_hp_x2_dmi_ids declaration */ + dev_dbg(&info->pdev->dev, "HP X2 with Type-C, setting inlmt to 3A\n"); + current_limit = 3000000; + } else if (extcon_get_state(edev, EXTCON_CHG_USB_SDP) > 0) { dev_dbg(&info->pdev->dev, "USB SDP charger is connected\n"); current_limit = 500000; } else if (extcon_get_state(edev, EXTCON_CHG_USB_CDP) > 0) { @@ -685,6 +733,13 @@ static int charger_init_hw_regs(struct axp288_chrg_info *info) return ret; } + if (dmi_check_system(axp288_hp_x2_dmi_ids)) { + /* See comment above axp288_hp_x2_dmi_ids declaration */ + ret = axp288_charger_vbus_path_select(info, true); + if (ret < 0) + return ret; + } + /* Read current charge voltage and current limit */ ret = regmap_read(info->regmap, AXP20X_CHRG_CTRL1, &val); if (ret < 0) { diff --git a/drivers/power/supply/axp288_fuel_gauge.c b/drivers/power/supply/axp288_fuel_gauge.c index e1bc4e6e6f30..f40fa0e63b6e 100644 --- a/drivers/power/supply/axp288_fuel_gauge.c +++ b/drivers/power/supply/axp288_fuel_gauge.c @@ -706,14 +706,14 @@ static const struct dmi_system_id axp288_fuel_gauge_blacklist[] = { { /* Intel Cherry Trail Compute Stick, Windows version */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1AW32SC"), }, }, { /* Intel Cherry Trail Compute Stick, version without an OS */ .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_MATCH(DMI_SYS_VENDOR, "Intel"), DMI_MATCH(DMI_PRODUCT_NAME, "STK1A32SC"), }, }, diff --git a/drivers/power/supply/bq27xxx_battery.c b/drivers/power/supply/bq27xxx_battery.c index 195c18c2f426..664e50103eaa 100644 --- a/drivers/power/supply/bq27xxx_battery.c +++ b/drivers/power/supply/bq27xxx_battery.c @@ -1885,7 +1885,10 @@ int bq27xxx_battery_setup(struct bq27xxx_device_info *di) di->bat = power_supply_register_no_ws(di->dev, psy_desc, &psy_cfg); if (IS_ERR(di->bat)) { - dev_err(di->dev, "failed to register battery\n"); + if (PTR_ERR(di->bat) == -EPROBE_DEFER) + dev_dbg(di->dev, "failed to register battery, deferring probe\n"); + else + dev_err(di->dev, "failed to register battery\n"); return PTR_ERR(di->bat); } diff --git a/drivers/power/supply/cros_usbpd-charger.c b/drivers/power/supply/cros_usbpd-charger.c index 30c3d37511c9..2a45e84447fe 100644 --- a/drivers/power/supply/cros_usbpd-charger.c +++ b/drivers/power/supply/cros_usbpd-charger.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/platform_data/cros_ec_commands.h> #include <linux/platform_data/cros_ec_proto.h> +#include <linux/platform_data/cros_usbpd_notify.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/slab.h> @@ -517,32 +518,21 @@ static int cros_usbpd_charger_property_is_writeable(struct power_supply *psy, } static int cros_usbpd_charger_ec_event(struct notifier_block *nb, - unsigned long queued_during_suspend, + unsigned long host_event, void *_notify) { - struct cros_ec_device *ec_device; - struct charger_data *charger; - u32 host_event; + struct charger_data *charger = container_of(nb, struct charger_data, + notifier); - charger = container_of(nb, struct charger_data, notifier); - ec_device = charger->ec_device; - - host_event = cros_ec_get_host_event(ec_device); - if (host_event & EC_HOST_EVENT_MASK(EC_HOST_EVENT_PD_MCU)) { - cros_usbpd_charger_power_changed(charger->ports[0]->psy); - return NOTIFY_OK; - } else { - return NOTIFY_DONE; - } + cros_usbpd_charger_power_changed(charger->ports[0]->psy); + return NOTIFY_OK; } static void cros_usbpd_charger_unregister_notifier(void *data) { struct charger_data *charger = data; - struct cros_ec_device *ec_device = charger->ec_device; - blocking_notifier_chain_unregister(&ec_device->event_notifier, - &charger->notifier); + cros_usbpd_unregister_notify(&charger->notifier); } static int cros_usbpd_charger_probe(struct platform_device *pd) @@ -676,21 +666,17 @@ static int cros_usbpd_charger_probe(struct platform_device *pd) goto fail; } - if (ec_device->mkbp_event_supported) { - /* Get PD events from the EC */ - charger->notifier.notifier_call = cros_usbpd_charger_ec_event; - ret = blocking_notifier_chain_register( - &ec_device->event_notifier, - &charger->notifier); - if (ret < 0) { - dev_warn(dev, "failed to register notifier\n"); - } else { - ret = devm_add_action_or_reset(dev, - cros_usbpd_charger_unregister_notifier, - charger); - if (ret < 0) - goto fail; - } + /* Get PD events from the EC */ + charger->notifier.notifier_call = cros_usbpd_charger_ec_event; + ret = cros_usbpd_register_notify(&charger->notifier); + if (ret < 0) { + dev_warn(dev, "failed to register notifier\n"); + } else { + ret = devm_add_action_or_reset(dev, + cros_usbpd_charger_unregister_notifier, + charger); + if (ret < 0) + goto fail; } return 0; diff --git a/drivers/power/supply/ingenic-battery.c b/drivers/power/supply/ingenic-battery.c index 2748715c4c75..dd3d93dfe3eb 100644 --- a/drivers/power/supply/ingenic-battery.c +++ b/drivers/power/supply/ingenic-battery.c @@ -148,7 +148,8 @@ static int ingenic_battery_probe(struct platform_device *pdev) bat->battery = devm_power_supply_register(dev, desc, &psy_cfg); if (IS_ERR(bat->battery)) { - dev_err(dev, "Unable to register battery\n"); + if (PTR_ERR(bat->battery) != -EPROBE_DEFER) + dev_err(dev, "Unable to register battery\n"); return PTR_ERR(bat->battery); } diff --git a/drivers/power/supply/sc27xx_fuel_gauge.c b/drivers/power/supply/sc27xx_fuel_gauge.c index 469c83fdaa8e..a7c8a8453db1 100644 --- a/drivers/power/supply/sc27xx_fuel_gauge.c +++ b/drivers/power/supply/sc27xx_fuel_gauge.c @@ -614,6 +614,17 @@ static int sc27xx_fgu_get_property(struct power_supply *psy, val->intval = data->total_cap * 1000; break; + case POWER_SUPPLY_PROP_CHARGE_NOW: + ret = sc27xx_fgu_get_clbcnt(data, &value); + if (ret) + goto error; + + value = DIV_ROUND_CLOSEST(value * 10, + 36 * SC27XX_FGU_SAMPLE_HZ); + val->intval = sc27xx_fgu_adc_to_current(data, value); + + break; + default: ret = -EINVAL; break; @@ -682,6 +693,7 @@ static enum power_supply_property sc27xx_fgu_props[] = { POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_CALIBRATE, + POWER_SUPPLY_PROP_CHARGE_NOW }; static const struct power_supply_desc sc27xx_fgu_desc = { diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index 648ab80288c9..1bc49b2e12e8 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -726,10 +726,10 @@ twl4030_bci_mode_show(struct device *dev, for (i = 0; i < ARRAY_SIZE(modes); i++) if (mode == i) - len += snprintf(buf+len, PAGE_SIZE-len, + len += scnprintf(buf+len, PAGE_SIZE-len, "[%s] ", modes[i]); else - len += snprintf(buf+len, PAGE_SIZE-len, + len += scnprintf(buf+len, PAGE_SIZE-len, "%s ", modes[i]); buf[len-1] = '\n'; return len; diff --git a/drivers/ps3/sys-manager-core.c b/drivers/ps3/sys-manager-core.c index 24709c572c0c..e061b7d0632b 100644 --- a/drivers/ps3/sys-manager-core.c +++ b/drivers/ps3/sys-manager-core.c @@ -31,7 +31,7 @@ void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops) { BUG_ON(!ops); BUG_ON(!ops->dev); - ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops; + ps3_sys_manager_ops = *ops; } EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops); diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 8e503881d9d6..ec873f09c763 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -41,9 +41,6 @@ config RTC_HCTOSYS_DEVICE device should record time in UTC, since the kernel won't do timezone correction. - The driver for this RTC device must be loaded before late_initcall - functions run, so it must usually be statically linked. - This clock should be battery-backed, so that it reads the correct time when the system boots from a power-off state. Otherwise, your system will need an external clock source (like an NTP server). @@ -241,6 +238,7 @@ config RTC_DRV_AS3722 config RTC_DRV_DS1307 tristate "Dallas/Maxim DS1307/37/38/39/40/41, ST M41T00, EPSON RX-8025, ISL12057" select REGMAP_I2C + select WATCHDOG_CORE if WATCHDOG help If you say yes here you get support for various compatible RTC chips (often with battery backup) connected with I2C. This driver @@ -592,6 +590,16 @@ config RTC_DRV_RC5T583 This driver can also be built as a module. If so, the module will be called rtc-rc5t583. +config RTC_DRV_RC5T619 + tristate "RICOH RC5T619 RTC driver" + depends on MFD_RN5T618 + help + If you say yes here you get support for the RTC on the + RICOH RC5T619 chips. + + This driver can also be built as a module. If so, the module + will be called rtc-rc5t619. + config RTC_DRV_S35390A tristate "Seiko Instruments S-35390A" select BITREVERSE @@ -1335,7 +1343,7 @@ config RTC_DRV_IMXDI config RTC_DRV_FSL_FTM_ALARM tristate "Freescale FlexTimer alarm timer" - depends on ARCH_LAYERSCAPE || SOC_LS1021A + depends on ARCH_LAYERSCAPE || SOC_LS1021A || COMPILE_TEST help For the FlexTimer in LS1012A, LS1021A, LS1028A, LS1043A, LS1046A, LS1088A, LS208xA, we can use FTM as the wakeup source. @@ -1762,6 +1770,7 @@ config RTC_DRV_MXC_V2 config RTC_DRV_SNVS tristate "Freescale SNVS RTC support" select REGMAP_MMIO + depends on ARCH_MXC || COMPILE_TEST depends on HAS_IOMEM depends on OF help @@ -1807,6 +1816,16 @@ config RTC_DRV_MOXART This driver can also be built as a module. If so, the module will be called rtc-moxart +config RTC_DRV_MT2712 + tristate "MediaTek MT2712 SoC based RTC" + depends on ARCH_MEDIATEK || COMPILE_TEST + help + This enables support for the real time clock built in the MediaTek + SoCs for MT2712. + + This drive can also be built as a module. If so, the module + will be called rtc-mt2712. + config RTC_DRV_MT6397 tristate "MediaTek PMIC based RTC" depends on MFD_MT6397 || (COMPILE_TEST && IRQ_DOMAIN) diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 24c7dfa1bd7d..0721752c6ed4 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -6,7 +6,6 @@ ccflags-$(CONFIG_RTC_DEBUG) := -DDEBUG obj-$(CONFIG_RTC_LIB) += lib.o -obj-$(CONFIG_RTC_HCTOSYS) += hctosys.o obj-$(CONFIG_RTC_SYSTOHC) += systohc.o obj-$(CONFIG_RTC_CLASS) += rtc-core.o obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o @@ -106,6 +105,7 @@ obj-$(CONFIG_RTC_DRV_MESON) += rtc-meson.o obj-$(CONFIG_RTC_DRV_MOXART) += rtc-moxart.o obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o +obj-$(CONFIG_RTC_DRV_MT2712) += rtc-mt2712.o obj-$(CONFIG_RTC_DRV_MT6397) += rtc-mt6397.o obj-$(CONFIG_RTC_DRV_MT7622) += rtc-mt7622.o obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o @@ -133,6 +133,7 @@ obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o obj-$(CONFIG_RTC_DRV_R7301) += rtc-r7301.o obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o obj-$(CONFIG_RTC_DRV_RC5T583) += rtc-rc5t583.o +obj-$(CONFIG_RTC_DRV_RC5T619) += rtc-rc5t619.o obj-$(CONFIG_RTC_DRV_RK808) += rtc-rk808.o obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 9458e6d6686a..7c88d190c51f 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -34,6 +34,50 @@ static void rtc_device_release(struct device *dev) #ifdef CONFIG_RTC_HCTOSYS_DEVICE /* Result of the last RTC to system clock attempt. */ int rtc_hctosys_ret = -ENODEV; + +/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary + * whether it stores the most close value or the value with partial + * seconds truncated. However, it is important that we use it to store + * the truncated value. This is because otherwise it is necessary, + * in an rtc sync function, to read both xtime.tv_sec and + * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read + * of >32bits is not possible. So storing the most close value would + * slow down the sync API. So here we have the truncated value and + * the best guess is to add 0.5s. + */ + +static void rtc_hctosys(struct rtc_device *rtc) +{ + int err; + struct rtc_time tm; + struct timespec64 tv64 = { + .tv_nsec = NSEC_PER_SEC >> 1, + }; + + err = rtc_read_time(rtc, &tm); + if (err) { + dev_err(rtc->dev.parent, + "hctosys: unable to read the hardware clock\n"); + goto err_read; + } + + tv64.tv_sec = rtc_tm_to_time64(&tm); + +#if BITS_PER_LONG == 32 + if (tv64.tv_sec > INT_MAX) { + err = -ERANGE; + goto err_read; + } +#endif + + err = do_settimeofday64(&tv64); + + dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n", + &tm, (long long)tv64.tv_sec); + +err_read: + rtc_hctosys_ret = err; +} #endif #if defined(CONFIG_PM_SLEEP) && defined(CONFIG_RTC_HCTOSYS_DEVICE) @@ -375,6 +419,11 @@ int __rtc_register_device(struct module *owner, struct rtc_device *rtc) dev_info(rtc->dev.parent, "registered as %s\n", dev_name(&rtc->dev)); +#ifdef CONFIG_RTC_HCTOSYS_DEVICE + if (!strcmp(dev_name(&rtc->dev), CONFIG_RTC_HCTOSYS_DEVICE)) + rtc_hctosys(rtc); +#endif + return 0; } EXPORT_SYMBOL_GPL(__rtc_register_device); diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c deleted file mode 100644 index a74d0d890600..000000000000 --- a/drivers/rtc/hctosys.c +++ /dev/null @@ -1,69 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * RTC subsystem, initialize system time on startup - * - * Copyright (C) 2005 Tower Technologies - * Author: Alessandro Zummo <a.zummo@towertech.it> - */ - -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include <linux/rtc.h> - -/* IMPORTANT: the RTC only stores whole seconds. It is arbitrary - * whether it stores the most close value or the value with partial - * seconds truncated. However, it is important that we use it to store - * the truncated value. This is because otherwise it is necessary, - * in an rtc sync function, to read both xtime.tv_sec and - * xtime.tv_nsec. On some processors (i.e. ARM), an atomic read - * of >32bits is not possible. So storing the most close value would - * slow down the sync API. So here we have the truncated value and - * the best guess is to add 0.5s. - */ - -static int __init rtc_hctosys(void) -{ - int err = -ENODEV; - struct rtc_time tm; - struct timespec64 tv64 = { - .tv_nsec = NSEC_PER_SEC >> 1, - }; - struct rtc_device *rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE); - - if (!rtc) { - pr_info("unable to open rtc device (%s)\n", - CONFIG_RTC_HCTOSYS_DEVICE); - goto err_open; - } - - err = rtc_read_time(rtc, &tm); - if (err) { - dev_err(rtc->dev.parent, - "hctosys: unable to read the hardware clock\n"); - goto err_read; - } - - tv64.tv_sec = rtc_tm_to_time64(&tm); - -#if BITS_PER_LONG == 32 - if (tv64.tv_sec > INT_MAX) { - err = -ERANGE; - goto err_read; - } -#endif - - err = do_settimeofday64(&tv64); - - dev_info(rtc->dev.parent, "setting system clock to %ptR UTC (%lld)\n", - &tm, (long long)tv64.tv_sec); - -err_read: - rtc_class_close(rtc); - -err_open: - rtc_hctosys_ret = err; - - return err; -} - -late_initcall(rtc_hctosys); diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c index 4743b16a8d84..cc9b14ef90f1 100644 --- a/drivers/rtc/rtc-88pm860x.c +++ b/drivers/rtc/rtc-88pm860x.c @@ -28,7 +28,6 @@ struct pm860x_rtc_info { int irq; int vrtc; - int (*sync)(unsigned int ticks); }; #define REG_VRTC_MEAS1 0x7D @@ -76,33 +75,6 @@ static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -/* - * Calculate the next alarm time given the requested alarm time mask - * and the current time. - */ -static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, - struct rtc_time *alrm) -{ - unsigned long next_time; - unsigned long now_time; - - next->tm_year = now->tm_year; - next->tm_mon = now->tm_mon; - next->tm_mday = now->tm_mday; - next->tm_hour = alrm->tm_hour; - next->tm_min = alrm->tm_min; - next->tm_sec = alrm->tm_sec; - - rtc_tm_to_time(now, &now_time); - rtc_tm_to_time(next, &next_time); - - if (next_time < now_time) { - /* Advance one day */ - next_time += 60 * 60 * 24; - rtc_time_to_tm(next_time, next); - } -} - static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); @@ -123,7 +95,7 @@ static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm) dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); - rtc_time_to_tm(ticks, tm); + rtc_time64_to_tm(ticks, tm); return 0; } @@ -140,7 +112,7 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm) 1900 + tm->tm_year); return -EINVAL; } - rtc_tm_to_time(tm, &ticks); + ticks = rtc_tm_to_time64(tm); /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); @@ -155,8 +127,6 @@ static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm) pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF); pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF); - if (info->sync) - info->sync(ticks); return 0; } @@ -180,7 +150,7 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); - rtc_time_to_tm(ticks, &alrm->time); + rtc_time64_to_tm(ticks, &alrm->time); ret = pm860x_reg_read(info->i2c, PM8607_RTC1); alrm->enabled = (ret & ALARM_EN) ? 1 : 0; alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0; @@ -190,7 +160,6 @@ static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); - struct rtc_time now_tm, alarm_tm; unsigned long ticks, base, data; unsigned char buf[8]; int mask; @@ -203,18 +172,7 @@ static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) base = ((unsigned long)buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; - /* load 32-bit read-only counter */ - pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); - data = ((unsigned long)buf[3] << 24) | (buf[2] << 16) | - (buf[1] << 8) | buf[0]; - ticks = base + data; - dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", - base, data, ticks); - - rtc_time_to_tm(ticks, &now_tm); - rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time); - /* get new ticks for alarm in 24 hours */ - rtc_tm_to_time(&alarm_tm, &ticks); + ticks = rtc_tm_to_time64(&alrm->time); data = ticks - base; buf[0] = data & 0xff; @@ -309,20 +267,15 @@ static int pm860x_rtc_dt_init(struct platform_device *pdev, return 0; } #else -#define pm860x_rtc_dt_init(x, y) (-1) +#define pm860x_rtc_dt_init(x, y) do { } while (0) #endif static int pm860x_rtc_probe(struct platform_device *pdev) { struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); - struct pm860x_rtc_pdata *pdata = NULL; struct pm860x_rtc_info *info; - struct rtc_time tm; - unsigned long ticks = 0; int ret; - pdata = dev_get_platdata(&pdev->dev); - info = devm_kzalloc(&pdev->dev, sizeof(struct pm860x_rtc_info), GFP_KERNEL); if (!info) @@ -336,6 +289,10 @@ static int pm860x_rtc_probe(struct platform_device *pdev) info->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, info); + info->rtc_dev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc_dev)) + return PTR_ERR(info->rtc_dev); + ret = devm_request_threaded_irq(&pdev->dev, info->irq, NULL, rtc_update_handler, IRQF_ONESHOT, "rtc", info); @@ -351,39 +308,14 @@ static int pm860x_rtc_probe(struct platform_device *pdev) pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA); pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA); - ret = pm860x_rtc_read_time(&pdev->dev, &tm); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to read initial time.\n"); - return ret; - } - if ((tm.tm_year < 70) || (tm.tm_year > 138)) { - tm.tm_year = 70; - tm.tm_mon = 0; - tm.tm_mday = 1; - tm.tm_hour = 0; - tm.tm_min = 0; - tm.tm_sec = 0; - ret = pm860x_rtc_set_time(&pdev->dev, &tm); - if (ret < 0) { - dev_err(&pdev->dev, "Failed to set initial time.\n"); - return ret; - } - } - rtc_tm_to_time(&tm, &ticks); - if (pm860x_rtc_dt_init(pdev, info)) { - if (pdata && pdata->sync) { - pdata->sync(ticks); - info->sync = pdata->sync; - } - } + pm860x_rtc_dt_init(pdev, info); + + info->rtc_dev->ops = &pm860x_rtc_ops; + info->rtc_dev->range_max = U32_MAX; - info->rtc_dev = devm_rtc_device_register(&pdev->dev, "88pm860x-rtc", - &pm860x_rtc_ops, THIS_MODULE); - ret = PTR_ERR(info->rtc_dev); - if (IS_ERR(info->rtc_dev)) { - dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); + ret = rtc_register_device(info->rtc_dev); + if (ret) return ret; - } /* * enable internal XO instead of internal 3.25MHz clock since it can @@ -393,12 +325,6 @@ static int pm860x_rtc_probe(struct platform_device *pdev) #ifdef VRTC_CALIBRATION /* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */ - if (pm860x_rtc_dt_init(pdev, info)) { - if (pdata && pdata->vrtc) - info->vrtc = pdata->vrtc & 0x3; - else - info->vrtc = 1; - } pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC); /* calibrate VRTC */ diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c index 8492ffed4ca6..3d60f3283f11 100644 --- a/drivers/rtc/rtc-ab8500.c +++ b/drivers/rtc/rtc-ab8500.c @@ -100,7 +100,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm) secs = secs / COUNTS_PER_SEC; secs = secs + (mins * 60); - rtc_time_to_tm(secs, tm); + rtc_time64_to_tm(secs, tm); return 0; } @@ -110,7 +110,7 @@ static int ab8500_rtc_set_time(struct device *dev, struct rtc_time *tm) unsigned char buf[ARRAY_SIZE(ab8500_rtc_time_regs)]; unsigned long no_secs, no_mins, secs = 0; - rtc_tm_to_time(tm, &secs); + secs = rtc_tm_to_time64(tm); no_mins = secs / 60; @@ -168,7 +168,7 @@ static int ab8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) mins = (buf[0] << 16) | (buf[1] << 8) | (buf[2]); secs = mins * 60; - rtc_time_to_tm(secs, &alarm->time); + rtc_time64_to_tm(secs, &alarm->time); return 0; } @@ -188,7 +188,7 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) struct rtc_time curtm; /* Get the number of seconds since 1970 */ - rtc_tm_to_time(&alarm->time, &secs); + secs = rtc_tm_to_time64(&alarm->time); /* * Check whether alarm is set less than 1min. @@ -196,7 +196,7 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) * return -EINVAL, so UIE EMUL can take it up, incase of UIE_ON */ ab8500_rtc_read_time(dev, &curtm); /* Read current time */ - rtc_tm_to_time(&curtm, &cursec); + cursec = rtc_tm_to_time64(&curtm); if ((secs - cursec) < 59) { dev_dbg(dev, "Alarm less than 1 minute not supported\r\n"); return -EINVAL; diff --git a/drivers/rtc/rtc-au1xxx.c b/drivers/rtc/rtc-au1xxx.c index 7c5530c71285..791bebcb6f47 100644 --- a/drivers/rtc/rtc-au1xxx.c +++ b/drivers/rtc/rtc-au1xxx.c @@ -34,7 +34,7 @@ static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm) t = alchemy_rdsys(AU1000_SYS_TOYREAD); - rtc_time_to_tm(t, tm); + rtc_time64_to_tm(t, tm); return 0; } @@ -43,7 +43,7 @@ static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned long t; - rtc_tm_to_time(tm, &t); + t = rtc_tm_to_time64(tm); alchemy_wrsys(t, AU1000_SYS_TOYWRITE); @@ -65,17 +65,13 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtcdev; unsigned long t; - int ret; t = alchemy_rdsys(AU1000_SYS_CNTRCTRL); if (!(t & CNTR_OK)) { dev_err(&pdev->dev, "counters not working; aborting.\n"); - ret = -ENODEV; - goto out_err; + return -ENODEV; } - ret = -ETIMEDOUT; - /* set counter0 tickrate to 1Hz if necessary */ if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767) { /* wait until hardware gives access to TRIM register */ @@ -88,7 +84,7 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev) * counters are unusable. */ dev_err(&pdev->dev, "timeout waiting for access\n"); - goto out_err; + return -ETIMEDOUT; } /* set 1Hz TOY tick rate */ @@ -99,19 +95,16 @@ static int au1xtoy_rtc_probe(struct platform_device *pdev) while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S) msleep(1); - rtcdev = devm_rtc_device_register(&pdev->dev, "rtc-au1xxx", - &au1xtoy_rtc_ops, THIS_MODULE); - if (IS_ERR(rtcdev)) { - ret = PTR_ERR(rtcdev); - goto out_err; - } + rtcdev = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtcdev)) + return PTR_ERR(rtcdev); - platform_set_drvdata(pdev, rtcdev); + rtcdev->ops = &au1xtoy_rtc_ops; + rtcdev->range_max = U32_MAX; - return 0; + platform_set_drvdata(pdev, rtcdev); -out_err: - return ret; + return rtc_register_device(rtcdev); } static struct platform_driver au1xrtc_driver = { diff --git a/drivers/rtc/rtc-bd70528.c b/drivers/rtc/rtc-bd70528.c index bbbb1f07c91f..4492b770422c 100644 --- a/drivers/rtc/rtc-bd70528.c +++ b/drivers/rtc/rtc-bd70528.c @@ -542,10 +542,8 @@ static int bd70528_probe(struct platform_device *pdev) irq = platform_get_irq_byname(pdev, irq_name); - if (irq < 0) { - dev_err(&pdev->dev, "Failed to get irq\n"); + if (irq < 0) return irq; - } platform_set_drvdata(pdev, bd_rtc); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 82bfe009a50f..bcc96ab7793f 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -649,10 +649,11 @@ static struct cmos_rtc cmos_rtc; static irqreturn_t cmos_interrupt(int irq, void *p) { + unsigned long flags; u8 irqstat; u8 rtc_control; - spin_lock(&rtc_lock); + spin_lock_irqsave(&rtc_lock, flags); /* When the HPET interrupt handler calls us, the interrupt * status is passed as arg1 instead of the irq number. But @@ -686,7 +687,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); } - spin_unlock(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); if (is_intr(irqstat)) { rtc_update_irq(p, 1, irqstat); diff --git a/drivers/rtc/rtc-cpcap.c b/drivers/rtc/rtc-cpcap.c index 6a3b70fd7e1f..a603f1f21125 100644 --- a/drivers/rtc/rtc-cpcap.c +++ b/drivers/rtc/rtc-cpcap.c @@ -56,14 +56,14 @@ static void cpcap2rtc_time(struct rtc_time *rtc, struct cpcap_time *cpcap) tod = (cpcap->tod1 & TOD1_MASK) | ((cpcap->tod2 & TOD2_MASK) << 8); time = tod + ((cpcap->day & DAY_MASK) * SECS_PER_DAY); - rtc_time_to_tm(time, rtc); + rtc_time64_to_tm(time, rtc); } static void rtc2cpcap_time(struct cpcap_time *cpcap, struct rtc_time *rtc) { unsigned long time; - rtc_tm_to_time(rtc, &time); + time = rtc_tm_to_time64(rtc); cpcap->day = time / SECS_PER_DAY; time %= SECS_PER_DAY; @@ -256,12 +256,13 @@ static int cpcap_rtc_probe(struct platform_device *pdev) return -ENODEV; platform_set_drvdata(pdev, rtc); - rtc->rtc_dev = devm_rtc_device_register(dev, "cpcap_rtc", - &cpcap_rtc_ops, THIS_MODULE); - + rtc->rtc_dev = devm_rtc_allocate_device(dev); if (IS_ERR(rtc->rtc_dev)) return PTR_ERR(rtc->rtc_dev); + rtc->rtc_dev->ops = &cpcap_rtc_ops; + rtc->rtc_dev->range_max = (1 << 14) * SECS_PER_DAY - 1; + err = cpcap_get_vendor(dev, rtc->regmap, &rtc->vendor); if (err) return err; @@ -298,7 +299,7 @@ static int cpcap_rtc_probe(struct platform_device *pdev) /* ignore error and continue without wakeup support */ } - return 0; + return rtc_register_device(rtc->rtc_dev); } static const struct of_device_id cpcap_rtc_of_match[] = { diff --git a/drivers/rtc/rtc-da9052.c b/drivers/rtc/rtc-da9052.c index 204eb7cf1aa4..58de10da37b1 100644 --- a/drivers/rtc/rtc-da9052.c +++ b/drivers/rtc/rtc-da9052.c @@ -103,13 +103,11 @@ static int da9052_set_alarm(struct da9052_rtc *rtc, struct rtc_time *rtc_tm) int ret; uint8_t v[3]; - ret = rtc_tm_to_time(rtc_tm, &alm_time); - if (ret != 0) - return ret; + alm_time = rtc_tm_to_time64(rtc_tm); if (rtc_tm->tm_sec > 0) { alm_time += 60 - rtc_tm->tm_sec; - rtc_time_to_tm(alm_time, rtc_tm); + rtc_time64_to_tm(alm_time, rtc_tm); } BUG_ON(rtc_tm->tm_sec); /* it will cause repeated irqs if not zero */ @@ -298,12 +296,18 @@ static int da9052_rtc_probe(struct platform_device *pdev) rtc_err(rtc, "Failed to disable TICKS: %d\n", ret); device_init_wakeup(&pdev->dev, true); - rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &da9052_rtc_ops, THIS_MODULE); - + rtc->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc->rtc)) return PTR_ERR(rtc->rtc); + rtc->rtc->ops = &da9052_rtc_ops; + rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + rtc->rtc->range_max = RTC_TIMESTAMP_END_2063; + + ret = rtc_register_device(rtc->rtc); + if (ret) + return ret; + ret = da9052_request_irq(rtc->da9052, DA9052_IRQ_ALARM, "ALM", da9052_rtc_irq, rtc); if (ret != 0) { diff --git a/drivers/rtc/rtc-davinci.c b/drivers/rtc/rtc-davinci.c index 390b7351e0fe..73f87a17cdf3 100644 --- a/drivers/rtc/rtc-davinci.c +++ b/drivers/rtc/rtc-davinci.c @@ -227,7 +227,7 @@ davinci_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) return ret; } -static int convertfromdays(u16 days, struct rtc_time *tm) +static void convertfromdays(u16 days, struct rtc_time *tm) { int tmp_days, year, mon; @@ -250,24 +250,17 @@ static int convertfromdays(u16 days, struct rtc_time *tm) break; } } - return 0; } -static int convert2days(u16 *days, struct rtc_time *tm) +static void convert2days(u16 *days, struct rtc_time *tm) { int i; *days = 0; - /* epoch == 1900 */ - if (tm->tm_year < 100 || tm->tm_year > 199) - return -EINVAL; - for (i = 2000; i < 1900 + tm->tm_year; i++) *days += rtc_year_days(1, 12, i); *days += rtc_year_days(tm->tm_mday, tm->tm_mon, 1900 + tm->tm_year); - - return 0; } static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm) @@ -300,8 +293,7 @@ static int davinci_rtc_read_time(struct device *dev, struct rtc_time *tm) days <<= 8; days |= day0; - if (convertfromdays(days, tm) < 0) - return -EINVAL; + convertfromdays(days, tm); return 0; } @@ -313,8 +305,7 @@ static int davinci_rtc_set_time(struct device *dev, struct rtc_time *tm) u8 rtc_cctrl; unsigned long flags; - if (convert2days(&days, tm) < 0) - return -EINVAL; + convert2days(&days, tm); spin_lock_irqsave(&davinci_rtc_lock, flags); @@ -396,8 +387,7 @@ static int davinci_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) days <<= 8; days |= day0; - if (convertfromdays(days, &alm->time) < 0) - return -EINVAL; + convertfromdays(days, &alm->time); alm->pending = !!(rtcss_read(davinci_rtc, PRTCSS_RTC_CCTRL) & @@ -413,29 +403,7 @@ static int davinci_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) unsigned long flags; u16 days; - if (alm->time.tm_mday <= 0 && alm->time.tm_mon < 0 - && alm->time.tm_year < 0) { - struct rtc_time tm; - unsigned long now, then; - - davinci_rtc_read_time(dev, &tm); - rtc_tm_to_time(&tm, &now); - - alm->time.tm_mday = tm.tm_mday; - alm->time.tm_mon = tm.tm_mon; - alm->time.tm_year = tm.tm_year; - rtc_tm_to_time(&alm->time, &then); - - if (then < now) { - rtc_time_to_tm(now + 24 * 60 * 60, &tm); - alm->time.tm_mday = tm.tm_mday; - alm->time.tm_mon = tm.tm_mon; - alm->time.tm_year = tm.tm_year; - } - } - - if (convert2days(&days, &alm->time) < 0) - return -EINVAL; + convert2days(&days, &alm->time); spin_lock_irqsave(&davinci_rtc_lock, flags); @@ -485,13 +453,13 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, davinci_rtc); - davinci_rtc->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &davinci_rtc_ops, THIS_MODULE); - if (IS_ERR(davinci_rtc->rtc)) { - dev_err(dev, "unable to register RTC device, err %d\n", - ret); + davinci_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(davinci_rtc->rtc)) return PTR_ERR(davinci_rtc->rtc); - } + + davinci_rtc->rtc->ops = &davinci_rtc_ops; + davinci_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + davinci_rtc->rtc->range_max = RTC_TIMESTAMP_BEGIN_2000 + (1 << 16) * 86400ULL - 1; rtcif_write(davinci_rtc, PRTCIF_INTFLG_RTCSS, PRTCIF_INTFLG); rtcif_write(davinci_rtc, 0, PRTCIF_INTEN); @@ -516,7 +484,7 @@ static int __init davinci_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 0); - return 0; + return rtc_register_device(davinci_rtc->rtc); } static int __exit davinci_rtc_remove(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c index 4420fbf2f8fe..a3d790889eea 100644 --- a/drivers/rtc/rtc-ds1305.c +++ b/drivers/rtc/rtc-ds1305.c @@ -325,17 +325,13 @@ static int ds1305_set_alarm(struct device *dev, struct rtc_wkalrm *alm) u8 buf[1 + DS1305_ALM_LEN]; /* convert desired alarm to time_t */ - status = rtc_tm_to_time(&alm->time, &later); - if (status < 0) - return status; + later = rtc_tm_to_time64(&alm->time); /* Read current time as time_t */ status = ds1305_get_time(dev, &tm); if (status < 0) return status; - status = rtc_tm_to_time(&tm, &now); - if (status < 0) - return status; + now = rtc_tm_to_time64(&tm); /* make sure alarm fires within the next 24 hours */ if (later <= now) @@ -694,6 +690,8 @@ static int ds1305_probe(struct spi_device *spi) return PTR_ERR(ds1305->rtc); ds1305->rtc->ops = &ds1305_ops; + ds1305->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + ds1305->rtc->range_max = RTC_TIMESTAMP_END_2099; ds1305_nvmem_cfg.priv = ds1305; ds1305->rtc->nvram_old_abi = true; diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 1f7e8aefc1eb..49702942bb08 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -22,6 +22,7 @@ #include <linux/hwmon-sysfs.h> #include <linux/clk-provider.h> #include <linux/regmap.h> +#include <linux/watchdog.h> /* * We can't determine type by probing, but if we expect pre-Linux code @@ -144,6 +145,15 @@ enum ds_type { # define M41TXX_BIT_CALIB_SIGN BIT(5) # define M41TXX_M_CALIBRATION GENMASK(4, 0) +#define DS1388_REG_WDOG_HUN_SECS 0x08 +#define DS1388_REG_WDOG_SECS 0x09 +#define DS1388_REG_FLAG 0x0b +# define DS1388_BIT_WF BIT(6) +# define DS1388_BIT_OSF BIT(7) +#define DS1388_REG_CONTROL 0x0c +# define DS1388_BIT_RST BIT(0) +# define DS1388_BIT_WDE BIT(1) + /* negative offset step is -2.034ppm */ #define M41TXX_NEG_OFFSET_STEP_PPB 2034 /* positive offset step is +4.068ppm */ @@ -252,6 +262,13 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) if (tmp & DS1340_BIT_OSF) return -EINVAL; break; + case ds_1388: + ret = regmap_read(ds1307->regmap, DS1388_REG_FLAG, &tmp); + if (ret) + return ret; + if (tmp & DS1388_BIT_OSF) + return -EINVAL; + break; case mcp794xx: if (!(tmp & MCP794XX_BIT_ST)) return -EINVAL; @@ -845,6 +862,72 @@ static int m41txx_rtc_set_offset(struct device *dev, long offset) ctrl_reg); } +#ifdef CONFIG_WATCHDOG_CORE +static int ds1388_wdt_start(struct watchdog_device *wdt_dev) +{ + struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev); + u8 regs[2]; + int ret; + + ret = regmap_update_bits(ds1307->regmap, DS1388_REG_FLAG, + DS1388_BIT_WF, 0); + if (ret) + return ret; + + ret = regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL, + DS1388_BIT_WDE | DS1388_BIT_RST, 0); + if (ret) + return ret; + + /* + * watchdog timeouts are measured in seconds. So ignore hundredths of + * seconds field. + */ + regs[0] = 0; + regs[1] = bin2bcd(wdt_dev->timeout); + + ret = regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs, + sizeof(regs)); + if (ret) + return ret; + + return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL, + DS1388_BIT_WDE | DS1388_BIT_RST, + DS1388_BIT_WDE | DS1388_BIT_RST); +} + +static int ds1388_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev); + + return regmap_update_bits(ds1307->regmap, DS1388_REG_CONTROL, + DS1388_BIT_WDE | DS1388_BIT_RST, 0); +} + +static int ds1388_wdt_ping(struct watchdog_device *wdt_dev) +{ + struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev); + u8 regs[2]; + + return regmap_bulk_read(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs, + sizeof(regs)); +} + +static int ds1388_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int val) +{ + struct ds1307 *ds1307 = watchdog_get_drvdata(wdt_dev); + u8 regs[2]; + + wdt_dev->timeout = val; + regs[0] = 0; + regs[1] = bin2bcd(wdt_dev->timeout); + + return regmap_bulk_write(ds1307->regmap, DS1388_REG_WDOG_HUN_SECS, regs, + sizeof(regs)); +} +#endif + static const struct rtc_class_ops rx8130_rtc_ops = { .read_time = ds1307_get_time, .set_time = ds1307_set_time, @@ -1567,6 +1650,48 @@ static void ds1307_clks_register(struct ds1307 *ds1307) #endif /* CONFIG_COMMON_CLK */ +#ifdef CONFIG_WATCHDOG_CORE +static const struct watchdog_info ds1388_wdt_info = { + .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, + .identity = "DS1388 watchdog", +}; + +static const struct watchdog_ops ds1388_wdt_ops = { + .owner = THIS_MODULE, + .start = ds1388_wdt_start, + .stop = ds1388_wdt_stop, + .ping = ds1388_wdt_ping, + .set_timeout = ds1388_wdt_set_timeout, + +}; + +static void ds1307_wdt_register(struct ds1307 *ds1307) +{ + struct watchdog_device *wdt; + + if (ds1307->type != ds_1388) + return; + + wdt = devm_kzalloc(ds1307->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return; + + wdt->info = &ds1388_wdt_info; + wdt->ops = &ds1388_wdt_ops; + wdt->timeout = 99; + wdt->max_timeout = 99; + wdt->min_timeout = 1; + + watchdog_init_timeout(wdt, 0, ds1307->dev); + watchdog_set_drvdata(wdt, ds1307); + devm_watchdog_register_device(ds1307->dev, wdt); +} +#else +static void ds1307_wdt_register(struct ds1307 *ds1307) +{ +} +#endif /* CONFIG_WATCHDOG_CORE */ + static const struct regmap_config regmap_config = { .reg_bits = 8, .val_bits = 8, @@ -1856,6 +1981,7 @@ static int ds1307_probe(struct i2c_client *client, ds1307_hwmon_register(ds1307); ds1307_clks_register(ds1307); + ds1307_wdt_register(ds1307); return 0; diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c index 6e9ddcd03992..9c51a12cf70f 100644 --- a/drivers/rtc/rtc-ds1374.c +++ b/drivers/rtc/rtc-ds1374.c @@ -164,7 +164,7 @@ static int ds1374_read_time(struct device *dev, struct rtc_time *time) ret = ds1374_read_rtc(client, &itime, DS1374_REG_TOD0, 4); if (!ret) - rtc_time_to_tm(itime, time); + rtc_time64_to_tm(itime, time); return ret; } @@ -172,9 +172,8 @@ static int ds1374_read_time(struct device *dev, struct rtc_time *time) static int ds1374_set_time(struct device *dev, struct rtc_time *time) { struct i2c_client *client = to_i2c_client(dev); - unsigned long itime; + unsigned long itime = rtc_tm_to_time64(time); - rtc_tm_to_time(time, &itime); return ds1374_write_rtc(client, itime, DS1374_REG_TOD0, 4); } @@ -212,7 +211,7 @@ static int ds1374_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) if (ret) goto out; - rtc_time_to_tm(now + cur_alarm, &alarm->time); + rtc_time64_to_tm(now + cur_alarm, &alarm->time); alarm->enabled = !!(cr & DS1374_REG_CR_WACE); alarm->pending = !!(sr & DS1374_REG_SR_AF); @@ -237,8 +236,8 @@ static int ds1374_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) if (ret < 0) return ret; - rtc_tm_to_time(&alarm->time, &new_alarm); - rtc_tm_to_time(&now, &itime); + new_alarm = rtc_tm_to_time64(&alarm->time); + itime = rtc_tm_to_time64(&now); /* This can happen due to races, in addition to dates that are * truly in the past. To avoid requiring the caller to check for @@ -620,6 +619,10 @@ static int ds1374_probe(struct i2c_client *client, if (!ds1374) return -ENOMEM; + ds1374->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(ds1374->rtc)) + return PTR_ERR(ds1374->rtc); + ds1374->client = client; i2c_set_clientdata(client, ds1374); @@ -641,12 +644,12 @@ static int ds1374_probe(struct i2c_client *client, device_set_wakeup_capable(&client->dev, 1); } - ds1374->rtc = devm_rtc_device_register(&client->dev, client->name, - &ds1374_rtc_ops, THIS_MODULE); - if (IS_ERR(ds1374->rtc)) { - dev_err(&client->dev, "unable to register the class device\n"); - return PTR_ERR(ds1374->rtc); - } + ds1374->rtc->ops = &ds1374_rtc_ops; + ds1374->rtc->range_max = U32_MAX; + + ret = rtc_register_device(ds1374->rtc); + if (ret) + return ret; #ifdef CONFIG_RTC_DRV_DS1374_WDT save_client = client; diff --git a/drivers/rtc/rtc-fsl-ftm-alarm.c b/drivers/rtc/rtc-fsl-ftm-alarm.c index 9e6e994cce99..756af62b0486 100644 --- a/drivers/rtc/rtc-fsl-ftm-alarm.c +++ b/drivers/rtc/rtc-fsl-ftm-alarm.c @@ -20,6 +20,7 @@ #include <linux/fsl/ftm.h> #include <linux/rtc.h> #include <linux/time.h> +#include <linux/acpi.h> #define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_MASK_SHIFT) @@ -151,6 +152,8 @@ static irqreturn_t ftm_rtc_alarm_interrupt(int irq, void *dev) { struct ftm_rtc *rtc = dev; + rtc_update_irq(rtc->rtc_dev, 1, RTC_IRQF | RTC_AF); + ftm_irq_acknowledge(rtc); ftm_irq_disable(rtc); ftm_clean_alarm(rtc); @@ -242,7 +245,6 @@ static const struct rtc_class_ops ftm_rtc_ops = { static int ftm_rtc_probe(struct platform_device *pdev) { - struct device_node *np = pdev->dev.of_node; int irq; int ret; struct ftm_rtc *rtc; @@ -265,10 +267,10 @@ static int ftm_rtc_probe(struct platform_device *pdev) return PTR_ERR(rtc->base); } - irq = irq_of_parse_and_map(np, 0); - if (irq <= 0) { - dev_err(&pdev->dev, "unable to get IRQ from DT, %d\n", irq); - return -EINVAL; + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "can't get irq number\n"); + return irq; } ret = devm_request_irq(&pdev->dev, irq, ftm_rtc_alarm_interrupt, @@ -278,7 +280,9 @@ static int ftm_rtc_probe(struct platform_device *pdev) return ret; } - rtc->big_endian = of_property_read_bool(np, "big-endian"); + rtc->big_endian = + device_property_read_bool(&pdev->dev, "big-endian"); + rtc->alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV; rtc->rtc_dev->ops = &ftm_rtc_ops; @@ -305,11 +309,18 @@ static const struct of_device_id ftm_rtc_match[] = { { }, }; +static const struct acpi_device_id ftm_imx_acpi_ids[] = { + {"NXP0011",}, + { } +}; +MODULE_DEVICE_TABLE(acpi, ftm_imx_acpi_ids); + static struct platform_driver ftm_rtc_driver = { .probe = ftm_rtc_probe, .driver = { .name = "ftm-alarm", .of_match_table = ftm_rtc_match, + .acpi_match_table = ACPI_PTR(ftm_imx_acpi_ids), }, }; diff --git a/drivers/rtc/rtc-imx-sc.c b/drivers/rtc/rtc-imx-sc.c index cf2c12107f2b..a5f59e6f862e 100644 --- a/drivers/rtc/rtc-imx-sc.c +++ b/drivers/rtc/rtc-imx-sc.c @@ -37,7 +37,7 @@ struct imx_sc_msg_timer_rtc_set_alarm { u8 hour; u8 min; u8 sec; -} __packed; +} __packed __aligned(4); static int imx_sc_rtc_read_time(struct device *dev, struct rtc_time *tm) { diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c index 18023e472cbc..e4c719085c31 100644 --- a/drivers/rtc/rtc-jz4740.c +++ b/drivers/rtc/rtc-jz4740.c @@ -46,6 +46,7 @@ enum jz4740_rtc_type { ID_JZ4740, + ID_JZ4760, ID_JZ4780, }; @@ -106,7 +107,7 @@ static inline int jz4740_rtc_reg_write(struct jz4740_rtc *rtc, size_t reg, { int ret = 0; - if (rtc->type >= ID_JZ4780) + if (rtc->type >= ID_JZ4760) ret = jz4780_rtc_enable_write(rtc); if (ret == 0) ret = jz4740_rtc_wait_write_ready(rtc); @@ -298,6 +299,7 @@ static void jz4740_rtc_power_off(void) static const struct of_device_id jz4740_rtc_of_match[] = { { .compatible = "ingenic,jz4740-rtc", .data = (void *)ID_JZ4740 }, + { .compatible = "ingenic,jz4760-rtc", .data = (void *)ID_JZ4760 }, { .compatible = "ingenic,jz4780-rtc", .data = (void *)ID_JZ4780 }, {}, }; @@ -372,13 +374,14 @@ static int jz4740_rtc_probe(struct platform_device *pdev) if (!pm_power_off) { /* Default: 60ms */ rtc->reset_pin_assert_time = 60; - of_property_read_u32(np, "reset-pin-assert-time-ms", + of_property_read_u32(np, + "ingenic,reset-pin-assert-time-ms", &rtc->reset_pin_assert_time); /* Default: 100ms */ rtc->min_wakeup_pin_assert_time = 100; of_property_read_u32(np, - "min-wakeup-pin-assert-time-ms", + "ingenic,min-wakeup-pin-assert-time-ms", &rtc->min_wakeup_pin_assert_time); dev_for_power_off = &pdev->dev; diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c index e8194f1f01a8..92f19bf997b2 100644 --- a/drivers/rtc/rtc-m48t35.c +++ b/drivers/rtc/rtc-m48t35.c @@ -160,15 +160,10 @@ static int m48t35_probe(struct platform_device *pdev) return -ENOMEM; priv->size = resource_size(res); - /* - * kludge: remove the #ifndef after ioc3 resource - * conflicts are resolved - */ -#ifndef CONFIG_SGI_IP27 if (!devm_request_mem_region(&pdev->dev, res->start, priv->size, pdev->name)) return -EBUSY; -#endif + priv->baseaddr = res->start; priv->reg = devm_ioremap(&pdev->dev, priv->baseaddr, priv->size); if (!priv->reg) diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c index 15a9d0278778..3040844129ce 100644 --- a/drivers/rtc/rtc-mpc5121.c +++ b/drivers/rtc/rtc-mpc5121.c @@ -111,7 +111,7 @@ static int mpc5121_rtc_read_time(struct device *dev, struct rtc_time *tm) */ now = in_be32(®s->actual_time) + in_be32(®s->target_time); - rtc_time_to_tm(now, tm); + rtc_time64_to_tm(now, tm); /* * update second minute hour registers @@ -126,16 +126,14 @@ static int mpc5121_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct mpc5121_rtc_data *rtc = dev_get_drvdata(dev); struct mpc5121_rtc_regs __iomem *regs = rtc->regs; - int ret; unsigned long now; /* * The actual_time register is read only so we write the offset * between it and linux time to the target_time register. */ - ret = rtc_tm_to_time(tm, &now); - if (ret == 0) - out_be32(®s->target_time, now - in_be32(®s->actual_time)); + now = rtc_tm_to_time64(tm); + out_be32(®s->target_time, now - in_be32(®s->actual_time)); /* * update second minute hour registers @@ -315,8 +313,8 @@ static int mpc5121_rtc_probe(struct platform_device *op) if (!rtc) return -ENOMEM; - rtc->regs = of_iomap(op->dev.of_node, 0); - if (!rtc->regs) { + rtc->regs = devm_platform_ioremap_resource(op, 0); + if (IS_ERR(rtc->regs)) { dev_err(&op->dev, "%s: couldn't map io space\n", __func__); return -ENOSYS; } @@ -326,8 +324,8 @@ static int mpc5121_rtc_probe(struct platform_device *op) platform_set_drvdata(op, rtc); rtc->irq = irq_of_parse_and_map(op->dev.of_node, 1); - err = request_irq(rtc->irq, mpc5121_rtc_handler, 0, - "mpc5121-rtc", &op->dev); + err = devm_request_irq(&op->dev, rtc->irq, mpc5121_rtc_handler, 0, + "mpc5121-rtc", &op->dev); if (err) { dev_err(&op->dev, "%s: could not request irq: %i\n", __func__, rtc->irq); @@ -335,14 +333,26 @@ static int mpc5121_rtc_probe(struct platform_device *op) } rtc->irq_periodic = irq_of_parse_and_map(op->dev.of_node, 0); - err = request_irq(rtc->irq_periodic, mpc5121_rtc_handler_upd, - 0, "mpc5121-rtc_upd", &op->dev); + err = devm_request_irq(&op->dev, rtc->irq_periodic, + mpc5121_rtc_handler_upd, 0, "mpc5121-rtc_upd", + &op->dev); if (err) { dev_err(&op->dev, "%s: could not request irq: %i\n", __func__, rtc->irq_periodic); goto out_dispose2; } + rtc->rtc = devm_rtc_allocate_device(&op->dev); + if (IS_ERR(rtc->rtc)) { + err = PTR_ERR(rtc->rtc); + goto out_dispose2; + } + + rtc->rtc->ops = &mpc5200_rtc_ops; + rtc->rtc->uie_unsupported = 1; + rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_0000; + rtc->rtc->range_max = 65733206399ULL; /* 4052-12-31 23:59:59 */ + if (of_device_is_compatible(op->dev.of_node, "fsl,mpc5121-rtc")) { u32 ka; ka = in_be32(&rtc->regs->keep_alive); @@ -351,30 +361,26 @@ static int mpc5121_rtc_probe(struct platform_device *op) "mpc5121-rtc: Battery or oscillator failure!\n"); out_be32(&rtc->regs->keep_alive, ka); } - - rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5121-rtc", - &mpc5121_rtc_ops, THIS_MODULE); - } else { - rtc->rtc = devm_rtc_device_register(&op->dev, "mpc5200-rtc", - &mpc5200_rtc_ops, THIS_MODULE); + rtc->rtc->ops = &mpc5121_rtc_ops; + /* + * This is a limitation of the driver that abuses the target + * time register, the actual maximum year for the mpc5121 is + * also 4052. + */ + rtc->rtc->range_min = 0; + rtc->rtc->range_max = U32_MAX; } - if (IS_ERR(rtc->rtc)) { - err = PTR_ERR(rtc->rtc); - goto out_free_irq; - } - rtc->rtc->uie_unsupported = 1; + err = rtc_register_device(rtc->rtc); + if (err) + goto out_dispose2; return 0; -out_free_irq: - free_irq(rtc->irq_periodic, &op->dev); out_dispose2: irq_dispose_mapping(rtc->irq_periodic); - free_irq(rtc->irq, &op->dev); out_dispose: irq_dispose_mapping(rtc->irq); - iounmap(rtc->regs); return err; } @@ -388,9 +394,6 @@ static int mpc5121_rtc_remove(struct platform_device *op) out_8(®s->alm_enable, 0); out_8(®s->int_enable, in_8(®s->int_enable) & ~0x1); - iounmap(rtc->regs); - free_irq(rtc->irq, &op->dev); - free_irq(rtc->irq_periodic, &op->dev); irq_dispose_mapping(rtc->irq); irq_dispose_mapping(rtc->irq_periodic); diff --git a/drivers/rtc/rtc-mt2712.c b/drivers/rtc/rtc-mt2712.c new file mode 100644 index 000000000000..581b8731fb8a --- /dev/null +++ b/drivers/rtc/rtc-mt2712.c @@ -0,0 +1,423 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Ran Bi <ran.bi@mediatek.com> + */ + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/irqdomain.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/rtc.h> + +#define MT2712_BBPU 0x0000 +#define MT2712_BBPU_CLRPKY BIT(4) +#define MT2712_BBPU_RELOAD BIT(5) +#define MT2712_BBPU_CBUSY BIT(6) +#define MT2712_BBPU_KEY (0x43 << 8) + +#define MT2712_IRQ_STA 0x0004 +#define MT2712_IRQ_STA_AL BIT(0) +#define MT2712_IRQ_STA_TC BIT(1) + +#define MT2712_IRQ_EN 0x0008 +#define MT2712_IRQ_EN_AL BIT(0) +#define MT2712_IRQ_EN_TC BIT(1) +#define MT2712_IRQ_EN_ONESHOT BIT(2) + +#define MT2712_CII_EN 0x000c + +#define MT2712_AL_MASK 0x0010 +#define MT2712_AL_MASK_DOW BIT(4) + +#define MT2712_TC_SEC 0x0014 +#define MT2712_TC_MIN 0x0018 +#define MT2712_TC_HOU 0x001c +#define MT2712_TC_DOM 0x0020 +#define MT2712_TC_DOW 0x0024 +#define MT2712_TC_MTH 0x0028 +#define MT2712_TC_YEA 0x002c + +#define MT2712_AL_SEC 0x0030 +#define MT2712_AL_MIN 0x0034 +#define MT2712_AL_HOU 0x0038 +#define MT2712_AL_DOM 0x003c +#define MT2712_AL_DOW 0x0040 +#define MT2712_AL_MTH 0x0044 +#define MT2712_AL_YEA 0x0048 + +#define MT2712_SEC_MASK 0x003f +#define MT2712_MIN_MASK 0x003f +#define MT2712_HOU_MASK 0x001f +#define MT2712_DOM_MASK 0x001f +#define MT2712_DOW_MASK 0x0007 +#define MT2712_MTH_MASK 0x000f +#define MT2712_YEA_MASK 0x007f + +#define MT2712_POWERKEY1 0x004c +#define MT2712_POWERKEY2 0x0050 +#define MT2712_POWERKEY1_KEY 0xa357 +#define MT2712_POWERKEY2_KEY 0x67d2 + +#define MT2712_CON0 0x005c +#define MT2712_CON1 0x0060 + +#define MT2712_PROT 0x0070 +#define MT2712_PROT_UNLOCK1 0x9136 +#define MT2712_PROT_UNLOCK2 0x586a + +#define MT2712_WRTGR 0x0078 + +#define MT2712_RTC_TIMESTAMP_END_2127 4985971199LL + +struct mt2712_rtc { + struct rtc_device *rtc; + void __iomem *base; + int irq; + u8 irq_wake_enabled; + u8 powerlost; +}; + +static inline u32 mt2712_readl(struct mt2712_rtc *mt2712_rtc, u32 reg) +{ + return readl(mt2712_rtc->base + reg); +} + +static inline void mt2712_writel(struct mt2712_rtc *mt2712_rtc, + u32 reg, u32 val) +{ + writel(val, mt2712_rtc->base + reg); +} + +static void mt2712_rtc_write_trigger(struct mt2712_rtc *mt2712_rtc) +{ + unsigned long timeout = jiffies + HZ / 10; + + mt2712_writel(mt2712_rtc, MT2712_WRTGR, 1); + while (1) { + if (!(mt2712_readl(mt2712_rtc, MT2712_BBPU) + & MT2712_BBPU_CBUSY)) + break; + + if (time_after(jiffies, timeout)) { + dev_err(&mt2712_rtc->rtc->dev, + "%s time out!\n", __func__); + break; + } + cpu_relax(); + } +} + +static void mt2712_rtc_writeif_unlock(struct mt2712_rtc *mt2712_rtc) +{ + mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK1); + mt2712_rtc_write_trigger(mt2712_rtc); + mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK2); + mt2712_rtc_write_trigger(mt2712_rtc); +} + +static irqreturn_t rtc_irq_handler_thread(int irq, void *data) +{ + struct mt2712_rtc *mt2712_rtc = data; + u16 irqsta; + + /* Clear interrupt */ + irqsta = mt2712_readl(mt2712_rtc, MT2712_IRQ_STA); + if (irqsta & MT2712_IRQ_STA_AL) { + rtc_update_irq(mt2712_rtc->rtc, 1, RTC_IRQF | RTC_AF); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + +static void __mt2712_rtc_read_time(struct mt2712_rtc *mt2712_rtc, + struct rtc_time *tm, int *sec) +{ + tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC) + & MT2712_SEC_MASK; + tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_TC_MIN) + & MT2712_MIN_MASK; + tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_TC_HOU) + & MT2712_HOU_MASK; + tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_TC_DOM) + & MT2712_DOM_MASK; + tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_TC_MTH) - 1) + & MT2712_MTH_MASK; + tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_TC_YEA) + 100) + & MT2712_YEA_MASK; + + *sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC) & MT2712_SEC_MASK; +} + +static int mt2712_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); + int sec; + + if (mt2712_rtc->powerlost) + return -EINVAL; + + do { + __mt2712_rtc_read_time(mt2712_rtc, tm, &sec); + } while (sec < tm->tm_sec); /* SEC has carried */ + + return 0; +} + +static int mt2712_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); + + mt2712_writel(mt2712_rtc, MT2712_TC_SEC, tm->tm_sec & MT2712_SEC_MASK); + mt2712_writel(mt2712_rtc, MT2712_TC_MIN, tm->tm_min & MT2712_MIN_MASK); + mt2712_writel(mt2712_rtc, MT2712_TC_HOU, tm->tm_hour & MT2712_HOU_MASK); + mt2712_writel(mt2712_rtc, MT2712_TC_DOM, tm->tm_mday & MT2712_DOM_MASK); + mt2712_writel(mt2712_rtc, MT2712_TC_MTH, + (tm->tm_mon + 1) & MT2712_MTH_MASK); + mt2712_writel(mt2712_rtc, MT2712_TC_YEA, + (tm->tm_year - 100) & MT2712_YEA_MASK); + + mt2712_rtc_write_trigger(mt2712_rtc); + + if (mt2712_rtc->powerlost) + mt2712_rtc->powerlost = false; + + return 0; +} + +static int mt2712_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alm->time; + u16 irqen; + + irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN); + alm->enabled = !!(irqen & MT2712_IRQ_EN_AL); + + tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_AL_SEC) & MT2712_SEC_MASK; + tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_AL_MIN) & MT2712_MIN_MASK; + tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_AL_HOU) & MT2712_HOU_MASK; + tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_AL_DOM) & MT2712_DOM_MASK; + tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_AL_MTH) - 1) + & MT2712_MTH_MASK; + tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_AL_YEA) + 100) + & MT2712_YEA_MASK; + + return 0; +} + +static int mt2712_rtc_alarm_irq_enable(struct device *dev, + unsigned int enabled) +{ + struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); + u16 irqen; + + irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN); + if (enabled) + irqen |= MT2712_IRQ_EN_AL; + else + irqen &= ~MT2712_IRQ_EN_AL; + mt2712_writel(mt2712_rtc, MT2712_IRQ_EN, irqen); + mt2712_rtc_write_trigger(mt2712_rtc); + + return 0; +} + +static int mt2712_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) +{ + struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); + struct rtc_time *tm = &alm->time; + + dev_dbg(&mt2712_rtc->rtc->dev, "set al time: %ptR, alm en: %d\n", + tm, alm->enabled); + + mt2712_writel(mt2712_rtc, MT2712_AL_SEC, + (mt2712_readl(mt2712_rtc, MT2712_AL_SEC) + & ~(MT2712_SEC_MASK)) | (tm->tm_sec & MT2712_SEC_MASK)); + mt2712_writel(mt2712_rtc, MT2712_AL_MIN, + (mt2712_readl(mt2712_rtc, MT2712_AL_MIN) + & ~(MT2712_MIN_MASK)) | (tm->tm_min & MT2712_MIN_MASK)); + mt2712_writel(mt2712_rtc, MT2712_AL_HOU, + (mt2712_readl(mt2712_rtc, MT2712_AL_HOU) + & ~(MT2712_HOU_MASK)) | (tm->tm_hour & MT2712_HOU_MASK)); + mt2712_writel(mt2712_rtc, MT2712_AL_DOM, + (mt2712_readl(mt2712_rtc, MT2712_AL_DOM) + & ~(MT2712_DOM_MASK)) | (tm->tm_mday & MT2712_DOM_MASK)); + mt2712_writel(mt2712_rtc, MT2712_AL_MTH, + (mt2712_readl(mt2712_rtc, MT2712_AL_MTH) + & ~(MT2712_MTH_MASK)) + | ((tm->tm_mon + 1) & MT2712_MTH_MASK)); + mt2712_writel(mt2712_rtc, MT2712_AL_YEA, + (mt2712_readl(mt2712_rtc, MT2712_AL_YEA) + & ~(MT2712_YEA_MASK)) + | ((tm->tm_year - 100) & MT2712_YEA_MASK)); + + /* mask day of week */ + mt2712_writel(mt2712_rtc, MT2712_AL_MASK, MT2712_AL_MASK_DOW); + mt2712_rtc_write_trigger(mt2712_rtc); + + mt2712_rtc_alarm_irq_enable(dev, alm->enabled); + + return 0; +} + +/* Init RTC register */ +static void mt2712_rtc_hw_init(struct mt2712_rtc *mt2712_rtc) +{ + u32 p1, p2; + + mt2712_writel(mt2712_rtc, MT2712_BBPU, + MT2712_BBPU_KEY | MT2712_BBPU_RELOAD); + + mt2712_writel(mt2712_rtc, MT2712_CII_EN, 0); + mt2712_writel(mt2712_rtc, MT2712_AL_MASK, 0); + /* necessary before set MT2712_POWERKEY */ + mt2712_writel(mt2712_rtc, MT2712_CON0, 0x4848); + mt2712_writel(mt2712_rtc, MT2712_CON1, 0x0048); + + mt2712_rtc_write_trigger(mt2712_rtc); + + p1 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY1); + p2 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY2); + if (p1 != MT2712_POWERKEY1_KEY || p2 != MT2712_POWERKEY2_KEY) { + mt2712_rtc->powerlost = true; + dev_dbg(&mt2712_rtc->rtc->dev, + "powerkey not set (lost power)\n"); + } else { + mt2712_rtc->powerlost = false; + } + + /* RTC need POWERKEY1/2 match, then goto normal work mode */ + mt2712_writel(mt2712_rtc, MT2712_POWERKEY1, MT2712_POWERKEY1_KEY); + mt2712_writel(mt2712_rtc, MT2712_POWERKEY2, MT2712_POWERKEY2_KEY); + mt2712_rtc_write_trigger(mt2712_rtc); + + mt2712_rtc_writeif_unlock(mt2712_rtc); +} + +static const struct rtc_class_ops mt2712_rtc_ops = { + .read_time = mt2712_rtc_read_time, + .set_time = mt2712_rtc_set_time, + .read_alarm = mt2712_rtc_read_alarm, + .set_alarm = mt2712_rtc_set_alarm, + .alarm_irq_enable = mt2712_rtc_alarm_irq_enable, +}; + +static int mt2712_rtc_probe(struct platform_device *pdev) +{ + struct resource *res; + struct mt2712_rtc *mt2712_rtc; + int ret; + + mt2712_rtc = devm_kzalloc(&pdev->dev, + sizeof(struct mt2712_rtc), GFP_KERNEL); + if (!mt2712_rtc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mt2712_rtc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mt2712_rtc->base)) + return PTR_ERR(mt2712_rtc->base); + + /* rtc hw init */ + mt2712_rtc_hw_init(mt2712_rtc); + + mt2712_rtc->irq = platform_get_irq(pdev, 0); + if (mt2712_rtc->irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + return mt2712_rtc->irq; + } + + platform_set_drvdata(pdev, mt2712_rtc); + + mt2712_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(mt2712_rtc->rtc)) + return PTR_ERR(mt2712_rtc->rtc); + + ret = devm_request_threaded_irq(&pdev->dev, mt2712_rtc->irq, NULL, + rtc_irq_handler_thread, + IRQF_ONESHOT | IRQF_TRIGGER_LOW, + dev_name(&mt2712_rtc->rtc->dev), + mt2712_rtc); + if (ret) { + dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n", + mt2712_rtc->irq, ret); + return ret; + } + + device_init_wakeup(&pdev->dev, true); + + mt2712_rtc->rtc->ops = &mt2712_rtc_ops; + mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; + mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127; + + ret = rtc_register_device(mt2712_rtc->rtc); + if (ret) { + dev_err(&pdev->dev, "register rtc device failed\n"); + return ret; + } + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int mt2712_rtc_suspend(struct device *dev) +{ + int wake_status = 0; + struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) { + wake_status = enable_irq_wake(mt2712_rtc->irq); + if (!wake_status) + mt2712_rtc->irq_wake_enabled = true; + } + + return 0; +} + +static int mt2712_rtc_resume(struct device *dev) +{ + int wake_status = 0; + struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); + + if (device_may_wakeup(dev) && mt2712_rtc->irq_wake_enabled) { + wake_status = disable_irq_wake(mt2712_rtc->irq); + if (!wake_status) + mt2712_rtc->irq_wake_enabled = false; + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(mt2712_pm_ops, mt2712_rtc_suspend, + mt2712_rtc_resume); +#endif + +static const struct of_device_id mt2712_rtc_of_match[] = { + { .compatible = "mediatek,mt2712-rtc", }, + { }, +}; + +MODULE_DEVICE_TABLE(of, mt2712_rtc_of_match); + +static struct platform_driver mt2712_rtc_driver = { + .driver = { + .name = "mt2712-rtc", + .of_match_table = mt2712_rtc_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &mt2712_pm_ops, +#endif + }, + .probe = mt2712_rtc_probe, +}; + +module_platform_driver(mt2712_rtc_driver); + +MODULE_DESCRIPTION("MediaTek MT2712 SoC based RTC Driver"); +MODULE_AUTHOR("Ran Bi <ran.bi@mediatek.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 902d57dcd0d4..a8cfbde048f4 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -307,6 +307,14 @@ static const struct rtc_class_ops mxc_rtc_ops = { .alarm_irq_enable = mxc_rtc_alarm_irq_enable, }; +static void mxc_rtc_action(void *p) +{ + struct rtc_plat_data *pdata = p; + + clk_disable_unprepare(pdata->clk_ref); + clk_disable_unprepare(pdata->clk_ipg); +} + static int mxc_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; @@ -366,14 +374,20 @@ static int mxc_rtc_probe(struct platform_device *pdev) pdata->clk_ref = devm_clk_get(&pdev->dev, "ref"); if (IS_ERR(pdata->clk_ref)) { + clk_disable_unprepare(pdata->clk_ipg); dev_err(&pdev->dev, "unable to get ref clock!\n"); - ret = PTR_ERR(pdata->clk_ref); - goto exit_put_clk_ipg; + return PTR_ERR(pdata->clk_ref); } ret = clk_prepare_enable(pdata->clk_ref); + if (ret) { + clk_disable_unprepare(pdata->clk_ipg); + return ret; + } + + ret = devm_add_action_or_reset(&pdev->dev, mxc_rtc_action, pdata); if (ret) - goto exit_put_clk_ipg; + return ret; rate = clk_get_rate(pdata->clk_ref); @@ -385,16 +399,14 @@ static int mxc_rtc_probe(struct platform_device *pdev) reg = RTC_INPUT_CLK_38400HZ; else { dev_err(&pdev->dev, "rtc clock is not valid (%lu)\n", rate); - ret = -EINVAL; - goto exit_put_clk_ref; + return -EINVAL; } reg |= RTC_ENABLE_BIT; writew(reg, (pdata->ioaddr + RTC_RTCCTL)); if (((readw(pdata->ioaddr + RTC_RTCCTL)) & RTC_ENABLE_BIT) == 0) { dev_err(&pdev->dev, "hardware module can't be enabled!\n"); - ret = -EIO; - goto exit_put_clk_ref; + return -EIO; } platform_set_drvdata(pdev, pdata); @@ -417,29 +429,10 @@ static int mxc_rtc_probe(struct platform_device *pdev) } ret = rtc_register_device(rtc); - if (ret) - goto exit_put_clk_ref; - - return 0; - -exit_put_clk_ref: - clk_disable_unprepare(pdata->clk_ref); -exit_put_clk_ipg: - clk_disable_unprepare(pdata->clk_ipg); return ret; } -static int mxc_rtc_remove(struct platform_device *pdev) -{ - struct rtc_plat_data *pdata = platform_get_drvdata(pdev); - - clk_disable_unprepare(pdata->clk_ref); - clk_disable_unprepare(pdata->clk_ipg); - - return 0; -} - static struct platform_driver mxc_rtc_driver = { .driver = { .name = "mxc_rtc", @@ -447,7 +440,6 @@ static struct platform_driver mxc_rtc_driver = { }, .id_table = imx_rtc_devtype, .probe = mxc_rtc_probe, - .remove = mxc_rtc_remove, }; module_platform_driver(mxc_rtc_driver) diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index d4ed20fb3194..c20fc7937dfa 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c @@ -9,7 +9,6 @@ * Copyright (C) 2014 Johan Hovold <johan@kernel.org> */ -#include <dt-bindings/gpio/gpio.h> #include <linux/bcd.h> #include <linux/clk.h> #include <linux/delay.h> diff --git a/drivers/rtc/rtc-pcf85063.c b/drivers/rtc/rtc-pcf85063.c index 1db17ba1fc64..7a87f461bec8 100644 --- a/drivers/rtc/rtc-pcf85063.c +++ b/drivers/rtc/rtc-pcf85063.c @@ -9,6 +9,7 @@ * Copyright (C) 2019 Micro Crystal AG * Author: Alexandre Belloni <alexandre.belloni@bootlin.com> */ +#include <linux/clk-provider.h> #include <linux/i2c.h> #include <linux/bcd.h> #include <linux/rtc.h> @@ -44,6 +45,10 @@ #define PCF85063_OFFSET_STEP0 4340 #define PCF85063_OFFSET_STEP1 4069 +#define PCF85063_REG_CLKO_F_MASK 0x07 /* frequency mask */ +#define PCF85063_REG_CLKO_F_32768HZ 0x00 +#define PCF85063_REG_CLKO_F_OFF 0x07 + #define PCF85063_REG_RAM 0x03 #define PCF85063_REG_SC 0x04 /* datetime */ @@ -61,6 +66,9 @@ struct pcf85063_config { struct pcf85063 { struct rtc_device *rtc; struct regmap *regmap; +#ifdef CONFIG_COMMON_CLK + struct clk_hw clkout_hw; +#endif }; static int pcf85063_rtc_read_time(struct device *dev, struct rtc_time *tm) @@ -357,6 +365,150 @@ static int pcf85063_load_capacitance(struct pcf85063 *pcf85063, PCF85063_REG_CTRL1_CAP_SEL, reg); } +#ifdef CONFIG_COMMON_CLK +/* + * Handling of the clkout + */ + +#define clkout_hw_to_pcf85063(_hw) container_of(_hw, struct pcf85063, clkout_hw) + +static int clkout_rates[] = { + 32768, + 16384, + 8192, + 4096, + 2048, + 1024, + 1, + 0 +}; + +static unsigned long pcf85063_clkout_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw); + unsigned int buf; + int ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf); + + if (ret < 0) + return 0; + + buf &= PCF85063_REG_CLKO_F_MASK; + return clkout_rates[buf]; +} + +static long pcf85063_clkout_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) + if (clkout_rates[i] <= rate) + return clkout_rates[i]; + + return 0; +} + +static int pcf85063_clkout_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw); + int i; + + for (i = 0; i < ARRAY_SIZE(clkout_rates); i++) + if (clkout_rates[i] == rate) + return regmap_update_bits(pcf85063->regmap, + PCF85063_REG_CTRL2, + PCF85063_REG_CLKO_F_MASK, i); + + return -EINVAL; +} + +static int pcf85063_clkout_control(struct clk_hw *hw, bool enable) +{ + struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw); + unsigned int buf; + int ret; + + ret = regmap_read(pcf85063->regmap, PCF85063_REG_OFFSET, &buf); + if (ret < 0) + return ret; + buf &= PCF85063_REG_CLKO_F_MASK; + + if (enable) { + if (buf == PCF85063_REG_CLKO_F_OFF) + buf = PCF85063_REG_CLKO_F_32768HZ; + else + return 0; + } else { + if (buf != PCF85063_REG_CLKO_F_OFF) + buf = PCF85063_REG_CLKO_F_OFF; + else + return 0; + } + + return regmap_update_bits(pcf85063->regmap, PCF85063_REG_CTRL2, + PCF85063_REG_CLKO_F_MASK, buf); +} + +static int pcf85063_clkout_prepare(struct clk_hw *hw) +{ + return pcf85063_clkout_control(hw, 1); +} + +static void pcf85063_clkout_unprepare(struct clk_hw *hw) +{ + pcf85063_clkout_control(hw, 0); +} + +static int pcf85063_clkout_is_prepared(struct clk_hw *hw) +{ + struct pcf85063 *pcf85063 = clkout_hw_to_pcf85063(hw); + unsigned int buf; + int ret = regmap_read(pcf85063->regmap, PCF85063_REG_CTRL2, &buf); + + if (ret < 0) + return 0; + + return (buf & PCF85063_REG_CLKO_F_MASK) != PCF85063_REG_CLKO_F_OFF; +} + +static const struct clk_ops pcf85063_clkout_ops = { + .prepare = pcf85063_clkout_prepare, + .unprepare = pcf85063_clkout_unprepare, + .is_prepared = pcf85063_clkout_is_prepared, + .recalc_rate = pcf85063_clkout_recalc_rate, + .round_rate = pcf85063_clkout_round_rate, + .set_rate = pcf85063_clkout_set_rate, +}; + +static struct clk *pcf85063_clkout_register_clk(struct pcf85063 *pcf85063) +{ + struct clk *clk; + struct clk_init_data init; + + init.name = "pcf85063-clkout"; + init.ops = &pcf85063_clkout_ops; + init.flags = 0; + init.parent_names = NULL; + init.num_parents = 0; + pcf85063->clkout_hw.init = &init; + + /* optional override of the clockname */ + of_property_read_string(pcf85063->rtc->dev.of_node, + "clock-output-names", &init.name); + + /* register the clock */ + clk = devm_clk_register(&pcf85063->rtc->dev, &pcf85063->clkout_hw); + + if (!IS_ERR(clk)) + of_clk_add_provider(pcf85063->rtc->dev.of_node, + of_clk_src_simple_get, clk); + + return clk; +} +#endif + static const struct pcf85063_config pcf85063a_config = { .regmap = { .reg_bits = 8, @@ -457,6 +609,11 @@ static int pcf85063_probe(struct i2c_client *client) nvmem_cfg.priv = pcf85063->regmap; rtc_nvmem_register(pcf85063->rtc, &nvmem_cfg); +#ifdef CONFIG_COMMON_CLK + /* register clk in common clk framework */ + pcf85063_clkout_register_clk(pcf85063); +#endif + return rtc_register_device(pcf85063->rtc); } diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c index d4a5f8afafbc..ebe03eba8f5f 100644 --- a/drivers/rtc/rtc-pl030.c +++ b/drivers/rtc/rtc-pl030.c @@ -36,32 +36,24 @@ static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); - rtc_time_to_tm(readl(rtc->base + RTC_MR), &alrm->time); + rtc_time64_to_tm(readl(rtc->base + RTC_MR), &alrm->time); return 0; } static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); - unsigned long time; - int ret; - /* - * At the moment, we can only deal with non-wildcarded alarm times. - */ - ret = rtc_valid_tm(&alrm->time); - if (ret == 0) - ret = rtc_tm_to_time(&alrm->time, &time); - if (ret == 0) - writel(time, rtc->base + RTC_MR); - return ret; + writel(rtc_tm_to_time64(&alrm->time), rtc->base + RTC_MR); + + return 0; } static int pl030_read_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); - rtc_time_to_tm(readl(rtc->base + RTC_DR), tm); + rtc_time64_to_tm(readl(rtc->base + RTC_DR), tm); return 0; } @@ -77,14 +69,10 @@ static int pl030_read_time(struct device *dev, struct rtc_time *tm) static int pl030_set_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); - unsigned long time; - int ret; - ret = rtc_tm_to_time(tm, &time); - if (ret == 0) - writel(time + 1, rtc->base + RTC_LR); + writel(rtc_tm_to_time64(tm) + 1, rtc->base + RTC_LR); - return ret; + return 0; } static const struct rtc_class_ops pl030_ops = { @@ -116,6 +104,7 @@ static int pl030_probe(struct amba_device *dev, const struct amba_id *id) } rtc->rtc->ops = &pl030_ops; + rtc->rtc->range_max = U32_MAX; rtc->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!rtc->base) { ret = -ENOMEM; diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c index 180caebbd355..40d7450a1ce4 100644 --- a/drivers/rtc/rtc-pl031.c +++ b/drivers/rtc/rtc-pl031.c @@ -80,6 +80,8 @@ struct pl031_vendor_data { bool clockwatch; bool st_weekday; unsigned long irqflags; + time64_t range_min; + timeu64_t range_max; }; struct pl031_local { @@ -123,11 +125,9 @@ static int pl031_stv2_tm_to_time(struct device *dev, return -EINVAL; } else if (wday == -1) { /* wday is not provided, calculate it here */ - unsigned long time; struct rtc_time calc_tm; - rtc_tm_to_time(tm, &time); - rtc_time_to_tm(time, &calc_tm); + rtc_time64_to_tm(rtc_tm_to_time64(tm), &calc_tm); wday = calc_tm.tm_wday; } @@ -210,17 +210,13 @@ static int pl031_stv2_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) unsigned long bcd_year; int ret; - /* At the moment, we can only deal with non-wildcarded alarm times. */ - ret = rtc_valid_tm(&alarm->time); + ret = pl031_stv2_tm_to_time(dev, &alarm->time, + &time, &bcd_year); if (ret == 0) { - ret = pl031_stv2_tm_to_time(dev, &alarm->time, - &time, &bcd_year); - if (ret == 0) { - writel(bcd_year, ldata->base + RTC_YMR); - writel(time, ldata->base + RTC_MR); + writel(bcd_year, ldata->base + RTC_YMR); + writel(time, ldata->base + RTC_MR); - pl031_alarm_irq_enable(dev, alarm->enabled); - } + pl031_alarm_irq_enable(dev, alarm->enabled); } return ret; @@ -248,30 +244,25 @@ static int pl031_read_time(struct device *dev, struct rtc_time *tm) { struct pl031_local *ldata = dev_get_drvdata(dev); - rtc_time_to_tm(readl(ldata->base + RTC_DR), tm); + rtc_time64_to_tm(readl(ldata->base + RTC_DR), tm); return 0; } static int pl031_set_time(struct device *dev, struct rtc_time *tm) { - unsigned long time; struct pl031_local *ldata = dev_get_drvdata(dev); - int ret; - ret = rtc_tm_to_time(tm, &time); + writel(rtc_tm_to_time64(tm), ldata->base + RTC_LR); - if (ret == 0) - writel(time, ldata->base + RTC_LR); - - return ret; + return 0; } static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pl031_local *ldata = dev_get_drvdata(dev); - rtc_time_to_tm(readl(ldata->base + RTC_MR), &alarm->time); + rtc_time64_to_tm(readl(ldata->base + RTC_MR), &alarm->time); alarm->pending = readl(ldata->base + RTC_RIS) & RTC_BIT_AI; alarm->enabled = readl(ldata->base + RTC_IMSC) & RTC_BIT_AI; @@ -282,20 +273,10 @@ static int pl031_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) static int pl031_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pl031_local *ldata = dev_get_drvdata(dev); - unsigned long time; - int ret; - /* At the moment, we can only deal with non-wildcarded alarm times. */ - ret = rtc_valid_tm(&alarm->time); - if (ret == 0) { - ret = rtc_tm_to_time(&alarm->time, &time); - if (ret == 0) { - writel(time, ldata->base + RTC_MR); - pl031_alarm_irq_enable(dev, alarm->enabled); - } - } + writel(rtc_tm_to_time64(&alarm->time), ldata->base + RTC_MR); - return ret; + return 0; } static int pl031_remove(struct amba_device *adev) @@ -383,6 +364,8 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id) return PTR_ERR(ldata->rtc); ldata->rtc->ops = ops; + ldata->rtc->range_min = vendor->range_min; + ldata->rtc->range_max = vendor->range_max; ret = rtc_register_device(ldata->rtc); if (ret) @@ -413,6 +396,7 @@ static struct pl031_vendor_data arm_pl031 = { .set_alarm = pl031_set_alarm, .alarm_irq_enable = pl031_alarm_irq_enable, }, + .range_max = U32_MAX, }; /* The First ST derivative */ @@ -426,6 +410,7 @@ static struct pl031_vendor_data stv1_pl031 = { }, .clockwatch = true, .st_weekday = true, + .range_max = U32_MAX, }; /* And the second ST derivative */ @@ -446,6 +431,8 @@ static struct pl031_vendor_data stv2_pl031 = { * remove IRQF_COND_SUSPEND */ .irqflags = IRQF_SHARED | IRQF_COND_SUSPEND, + .range_min = RTC_TIMESTAMP_BEGIN_0000, + .range_max = RTC_TIMESTAMP_END_9999, }; static const struct amba_id pl031_ids[] = { diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c index 07ea1be3abb9..b45ee2cb2c04 100644 --- a/drivers/rtc/rtc-pm8xxx.c +++ b/drivers/rtc/rtc-pm8xxx.c @@ -84,7 +84,7 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) if (!rtc_dd->allow_set_time) return -EACCES; - rtc_tm_to_time(tm, &secs); + secs = rtc_tm_to_time64(tm); dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs); @@ -208,7 +208,7 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) secs = value[0] | (value[1] << 8) | (value[2] << 16) | ((unsigned long)value[3] << 24); - rtc_time_to_tm(secs, tm); + rtc_time64_to_tm(secs, tm); dev_dbg(dev, "secs = %lu, h:m:s == %ptRt, y-m-d = %ptRdr\n", secs, tm, tm); @@ -224,7 +224,7 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; - rtc_tm_to_time(&alarm->time, &secs); + secs = rtc_tm_to_time64(&alarm->time); for (i = 0; i < NUM_8_BIT_RTC_REGS; i++) { value[i] = secs & 0xFF; @@ -280,13 +280,7 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) secs = value[0] | (value[1] << 8) | (value[2] << 16) | ((unsigned long)value[3] << 24); - rtc_time_to_tm(secs, &alarm->time); - - rc = rtc_valid_tm(&alarm->time); - if (rc < 0) { - dev_err(dev, "Invalid alarm time read from RTC\n"); - return rc; - } + rtc_time64_to_tm(secs, &alarm->time); dev_dbg(dev, "Alarm set for - h:m:s=%ptRt, y-m-d=%ptRdr\n", &alarm->time, &alarm->time); @@ -301,6 +295,7 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; unsigned int ctrl_reg; + u8 value[NUM_8_BIT_RTC_REGS] = {0}; spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); @@ -319,6 +314,16 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) goto rtc_rw_fail; } + /* Clear Alarm register */ + if (!enable) { + rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value, + sizeof(value)); + if (rc) { + dev_err(dev, "Clear RTC ALARM register failed\n"); + goto rtc_rw_fail; + } + } + rtc_rw_fail: spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); return rc; @@ -486,13 +491,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); /* Register the RTC device */ - rtc_dd->rtc = devm_rtc_device_register(&pdev->dev, "pm8xxx_rtc", - &pm8xxx_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc_dd->rtc)) { - dev_err(&pdev->dev, "%s: RTC registration failed (%ld)\n", - __func__, PTR_ERR(rtc_dd->rtc)); + rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc_dd->rtc)) return PTR_ERR(rtc_dd->rtc); - } + + rtc_dd->rtc->ops = &pm8xxx_rtc_ops; + rtc_dd->rtc->range_max = U32_MAX; /* Request the alarm IRQ */ rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->rtc_alarm_irq, @@ -504,9 +508,7 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev) return rc; } - dev_dbg(&pdev->dev, "Probe success !!\n"); - - return 0; + return rtc_register_device(rtc_dd->rtc); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c index 89ff713163dd..954b88d2485f 100644 --- a/drivers/rtc/rtc-puv3.c +++ b/drivers/rtc/rtc-puv3.c @@ -85,7 +85,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled) /* Time read/write */ static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { - rtc_time_to_tm(readl(RTC_RCNR), rtc_tm); + rtc_time64_to_tm(readl(RTC_RCNR), rtc_tm); dev_dbg(dev, "read time %ptRr\n", rtc_tm); @@ -94,12 +94,9 @@ static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) static int puv3_rtc_settime(struct device *dev, struct rtc_time *tm) { - unsigned long rtc_count = 0; - dev_dbg(dev, "set time %ptRr\n", tm); - rtc_tm_to_time(tm, &rtc_count); - writel(rtc_count, RTC_RCNR); + writel(rtc_tm_to_time64(tm), RTC_RCNR); return 0; } @@ -108,7 +105,7 @@ static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *alm_tm = &alrm->time; - rtc_time_to_tm(readl(RTC_RTAR), alm_tm); + rtc_time64_to_tm(readl(RTC_RTAR), alm_tm); alrm->enabled = readl(RTC_RTSR) & RTC_RTSR_ALE; @@ -120,12 +117,10 @@ static int puv3_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *tm = &alrm->time; - unsigned long rtcalarm_count = 0; dev_dbg(dev, "set alarm: %d, %ptRr\n", alrm->enabled, tm); - rtc_tm_to_time(tm, &rtcalarm_count); - writel(rtcalarm_count, RTC_RTAR); + writel(rtc_tm_to_time64(tm), RTC_RTAR); puv3_rtc_setaie(dev, alrm->enabled); @@ -234,6 +229,7 @@ static int puv3_rtc_probe(struct platform_device *pdev) /* register RTC and exit */ rtc->ops = &puv3_rtcops; + rtc->range_max = U32_MAX; ret = rtc_register_device(rtc); if (ret) goto err_nortc; diff --git a/drivers/rtc/rtc-rc5t619.c b/drivers/rtc/rtc-rc5t619.c new file mode 100644 index 000000000000..24e386ecbc7e --- /dev/null +++ b/drivers/rtc/rtc-rc5t619.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * drivers/rtc/rtc-rc5t619.c + * + * Real time clock driver for RICOH RC5T619 power management chip. + * + * Copyright (C) 2019 Andreas Kemnade + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mfd/rn5t618.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/bcd.h> +#include <linux/rtc.h> +#include <linux/slab.h> +#include <linux/irqdomain.h> + +struct rc5t619_rtc { + int irq; + struct rtc_device *rtc; + struct rn5t618 *rn5t618; +}; + +#define CTRL1_ALARM_ENABLED 0x40 +#define CTRL1_24HR 0x20 +#define CTRL1_PERIODIC_MASK 0xf + +#define CTRL2_PON 0x10 +#define CTRL2_ALARM_STATUS 0x80 +#define CTRL2_CTFG 0x4 +#define CTRL2_CTC 0x1 + +#define MONTH_CENTFLAG 0x80 +#define HOUR_PMFLAG 0x20 +#define MDAY_DAL_EXT 0x80 + +static uint8_t rtc5t619_12hour_bcd2bin(uint8_t hour) +{ + if (hour & HOUR_PMFLAG) { + hour = bcd2bin(hour & ~HOUR_PMFLAG); + return hour == 12 ? 12 : 12 + hour; + } + + hour = bcd2bin(hour); + return hour == 12 ? 0 : hour; +} + +static uint8_t rtc5t619_12hour_bin2bcd(uint8_t hour) +{ + if (!hour) + return 0x12; + + if (hour < 12) + return bin2bcd(hour); + + if (hour == 12) + return 0x12 | HOUR_PMFLAG; + + return bin2bcd(hour - 12) | HOUR_PMFLAG; +} + +static int rc5t619_rtc_periodic_disable(struct device *dev) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + int err; + + /* disable function */ + err = regmap_update_bits(rtc->rn5t618->regmap, + RN5T618_RTC_CTRL1, CTRL1_PERIODIC_MASK, 0); + if (err < 0) + return err; + + /* clear alarm flag and CTFG */ + err = regmap_update_bits(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, + CTRL2_ALARM_STATUS | CTRL2_CTFG | CTRL2_CTC, + 0); + if (err < 0) + return err; + + return 0; +} + +/* things to be done once after power on */ +static int rc5t619_rtc_pon_setup(struct device *dev) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + int err; + unsigned int reg_data; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, ®_data); + if (err < 0) + return err; + + /* clear VDET PON */ + reg_data &= ~(CTRL2_PON | CTRL2_CTC | 0x4a); /* 0101-1011 */ + reg_data |= 0x20; /* 0010-0000 */ + err = regmap_write(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, reg_data); + if (err < 0) + return err; + + /* clearing RTC Adjust register */ + err = regmap_write(rtc->rn5t618->regmap, RN5T618_RTC_ADJUST, 0); + if (err) + return err; + + return regmap_update_bits(rtc->rn5t618->regmap, + RN5T618_RTC_CTRL1, + CTRL1_24HR, CTRL1_24HR); +} + +static int rc5t619_rtc_read_time(struct device *dev, struct rtc_time *tm) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + u8 buff[7]; + int err; + int cent_flag; + unsigned int ctrl1; + unsigned int ctrl2; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2); + if (err < 0) + return err; + + if (ctrl2 & CTRL2_PON) + return -EINVAL; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1); + if (err < 0) + return err; + + err = regmap_bulk_read(rtc->rn5t618->regmap, RN5T618_RTC_SECONDS, + buff, sizeof(buff)); + if (err < 0) + return err; + + if (buff[5] & MONTH_CENTFLAG) + cent_flag = 1; + else + cent_flag = 0; + + tm->tm_sec = bcd2bin(buff[0]); + tm->tm_min = bcd2bin(buff[1]); + + if (ctrl1 & CTRL1_24HR) + tm->tm_hour = bcd2bin(buff[2]); + else + tm->tm_hour = rtc5t619_12hour_bcd2bin(buff[2]); + + tm->tm_wday = bcd2bin(buff[3]); + tm->tm_mday = bcd2bin(buff[4]); + tm->tm_mon = bcd2bin(buff[5] & 0x1f) - 1; /* back to system 0-11 */ + tm->tm_year = bcd2bin(buff[6]) + 100 * cent_flag; + + return 0; +} + +static int rc5t619_rtc_set_time(struct device *dev, struct rtc_time *tm) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + u8 buff[7]; + int err; + int cent_flag; + unsigned int ctrl1; + unsigned int ctrl2; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2); + if (err < 0) + return err; + + if (ctrl2 & CTRL2_PON) + rc5t619_rtc_pon_setup(dev); + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1); + if (err < 0) + return err; + + if (tm->tm_year >= 100) + cent_flag = 1; + else + cent_flag = 0; + + buff[0] = bin2bcd(tm->tm_sec); + buff[1] = bin2bcd(tm->tm_min); + + if (ctrl1 & CTRL1_24HR) + buff[2] = bin2bcd(tm->tm_hour); + else + buff[2] = rtc5t619_12hour_bin2bcd(tm->tm_hour); + + buff[3] = bin2bcd(tm->tm_wday); + buff[4] = bin2bcd(tm->tm_mday); + buff[5] = bin2bcd(tm->tm_mon + 1); /* system set 0-11 */ + buff[6] = bin2bcd(tm->tm_year - cent_flag * 100); + + if (cent_flag) + buff[5] |= MONTH_CENTFLAG; + + err = regmap_bulk_write(rtc->rn5t618->regmap, RN5T618_RTC_SECONDS, + buff, sizeof(buff)); + if (err < 0) { + dev_err(dev, "failed to program new time: %d\n", err); + return err; + } + + return 0; +} + +/* 0-disable, 1-enable */ +static int rc5t619_rtc_alarm_enable(struct device *dev, unsigned int enabled) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + + return regmap_update_bits(rtc->rn5t618->regmap, + RN5T618_RTC_CTRL1, + CTRL1_ALARM_ENABLED, + enabled ? CTRL1_ALARM_ENABLED : 0); +} + +static int rc5t619_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + u8 buff[6]; + unsigned int buff_cent; + int err; + int cent_flag; + unsigned int ctrl1; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1); + if (err) + return err; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_MONTH, &buff_cent); + if (err < 0) { + dev_err(dev, "failed to read time: %d\n", err); + return err; + } + + if (buff_cent & MONTH_CENTFLAG) + cent_flag = 1; + else + cent_flag = 0; + + err = regmap_bulk_read(rtc->rn5t618->regmap, RN5T618_RTC_ALARM_Y_SEC, + buff, sizeof(buff)); + if (err) + return err; + + buff[3] = buff[3] & 0x3f; + + alrm->time.tm_sec = bcd2bin(buff[0]); + alrm->time.tm_min = bcd2bin(buff[1]); + + if (ctrl1 & CTRL1_24HR) + alrm->time.tm_hour = bcd2bin(buff[2]); + else + alrm->time.tm_hour = rtc5t619_12hour_bcd2bin(buff[2]); + + alrm->time.tm_mday = bcd2bin(buff[3]); + alrm->time.tm_mon = bcd2bin(buff[4]) - 1; + alrm->time.tm_year = bcd2bin(buff[5]) + 100 * cent_flag; + alrm->enabled = !!(ctrl1 & CTRL1_ALARM_ENABLED); + dev_dbg(dev, "read alarm: %ptR\n", &alrm->time); + + return 0; +} + +static int rc5t619_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + u8 buff[6]; + int err; + int cent_flag; + unsigned int ctrl1; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL1, &ctrl1); + if (err) + return err; + + err = rc5t619_rtc_alarm_enable(dev, 0); + if (err < 0) + return err; + + if (rtc->irq == -1) + return -EINVAL; + + if (alrm->enabled == 0) + return 0; + + if (alrm->time.tm_year >= 100) + cent_flag = 1; + else + cent_flag = 0; + + alrm->time.tm_mon += 1; + buff[0] = bin2bcd(alrm->time.tm_sec); + buff[1] = bin2bcd(alrm->time.tm_min); + + if (ctrl1 & CTRL1_24HR) + buff[2] = bin2bcd(alrm->time.tm_hour); + else + buff[2] = rtc5t619_12hour_bin2bcd(alrm->time.tm_hour); + + buff[3] = bin2bcd(alrm->time.tm_mday); + buff[4] = bin2bcd(alrm->time.tm_mon); + buff[5] = bin2bcd(alrm->time.tm_year - 100 * cent_flag); + buff[3] |= MDAY_DAL_EXT; + + err = regmap_bulk_write(rtc->rn5t618->regmap, RN5T618_RTC_ALARM_Y_SEC, + buff, sizeof(buff)); + if (err < 0) + return err; + + return rc5t619_rtc_alarm_enable(dev, alrm->enabled); +} + +static const struct rtc_class_ops rc5t619_rtc_ops = { + .read_time = rc5t619_rtc_read_time, + .set_time = rc5t619_rtc_set_time, + .set_alarm = rc5t619_rtc_set_alarm, + .read_alarm = rc5t619_rtc_read_alarm, + .alarm_irq_enable = rc5t619_rtc_alarm_enable, +}; + +static int rc5t619_rtc_alarm_flag_clr(struct device *dev) +{ + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + + /* clear alarm-D status bits.*/ + return regmap_update_bits(rtc->rn5t618->regmap, + RN5T618_RTC_CTRL2, + CTRL2_ALARM_STATUS | CTRL2_CTC, 0); +} + +static irqreturn_t rc5t619_rtc_irq(int irq, void *data) +{ + struct device *dev = data; + struct rc5t619_rtc *rtc = dev_get_drvdata(dev); + + rc5t619_rtc_alarm_flag_clr(dev); + + rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF); + return IRQ_HANDLED; +} + +static int rc5t619_rtc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent); + struct rc5t619_rtc *rtc; + unsigned int ctrl2; + int err; + + rtc = devm_kzalloc(dev, sizeof(*rtc), GFP_KERNEL); + if (IS_ERR(rtc)) { + err = PTR_ERR(rtc); + return -ENOMEM; + } + + rtc->rn5t618 = rn5t618; + + dev_set_drvdata(dev, rtc); + rtc->irq = -1; + + if (rn5t618->irq_data) + rtc->irq = regmap_irq_get_virq(rn5t618->irq_data, + RN5T618_IRQ_RTC); + + if (rtc->irq < 0) + rtc->irq = -1; + + err = regmap_read(rtc->rn5t618->regmap, RN5T618_RTC_CTRL2, &ctrl2); + if (err < 0) + return err; + + /* disable rtc periodic function */ + err = rc5t619_rtc_periodic_disable(&pdev->dev); + if (err) + return err; + + if (ctrl2 & CTRL2_PON) { + err = rc5t619_rtc_alarm_flag_clr(&pdev->dev); + if (err) + return err; + } + + rtc->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc->rtc)) { + err = PTR_ERR(rtc->rtc); + dev_err(dev, "RTC device register: err %d\n", err); + return err; + } + + rtc->rtc->ops = &rc5t619_rtc_ops; + rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_1900; + rtc->rtc->range_max = RTC_TIMESTAMP_END_2099; + + /* set interrupt and enable it */ + if (rtc->irq != -1) { + err = devm_request_threaded_irq(&pdev->dev, rtc->irq, NULL, + rc5t619_rtc_irq, + IRQF_ONESHOT, + "rtc-rc5t619", + &pdev->dev); + if (err < 0) { + dev_err(&pdev->dev, "request IRQ:%d fail\n", rtc->irq); + rtc->irq = -1; + + err = rc5t619_rtc_alarm_enable(&pdev->dev, 0); + if (err) + return err; + + } else { + /* enable wake */ + device_init_wakeup(&pdev->dev, 1); + enable_irq_wake(rtc->irq); + } + } else { + /* system don't want to using alarm interrupt, so close it */ + err = rc5t619_rtc_alarm_enable(&pdev->dev, 0); + if (err) + return err; + + dev_warn(&pdev->dev, "rc5t619 interrupt is disabled\n"); + } + + return rtc_register_device(rtc->rtc); +} + +static struct platform_driver rc5t619_rtc_driver = { + .driver = { + .name = "rc5t619-rtc", + }, + .probe = rc5t619_rtc_probe, +}; + +module_platform_driver(rc5t619_rtc_driver); +MODULE_ALIAS("platform:rc5t619-rtc"); +MODULE_DESCRIPTION("RICOH RC5T619 RTC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index d37893f6eaee..9ccc97cf5e09 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c @@ -111,20 +111,17 @@ static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct sa1100_rtc *info = dev_get_drvdata(dev); - rtc_time_to_tm(readl_relaxed(info->rcnr), tm); + rtc_time64_to_tm(readl_relaxed(info->rcnr), tm); return 0; } static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct sa1100_rtc *info = dev_get_drvdata(dev); - unsigned long time; - int ret; - ret = rtc_tm_to_time(tm, &time); - if (ret == 0) - writel_relaxed(time, info->rcnr); - return ret; + writel_relaxed(rtc_tm_to_time64(tm), info->rcnr); + + return 0; } static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) @@ -141,24 +138,18 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct sa1100_rtc *info = dev_get_drvdata(dev); - unsigned long time; - int ret; spin_lock_irq(&info->lock); - ret = rtc_tm_to_time(&alrm->time, &time); - if (ret != 0) - goto out; writel_relaxed(readl_relaxed(info->rtsr) & (RTSR_HZE | RTSR_ALE | RTSR_AL), info->rtsr); - writel_relaxed(time, info->rtar); + writel_relaxed(rtc_tm_to_time64(&alrm->time), info->rtar); if (alrm->enabled) writel_relaxed(readl_relaxed(info->rtsr) | RTSR_ALE, info->rtsr); else writel_relaxed(readl_relaxed(info->rtsr) & ~RTSR_ALE, info->rtsr); -out: spin_unlock_irq(&info->lock); - return ret; + return 0; } static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq) @@ -182,7 +173,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = { int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) { - struct rtc_device *rtc; int ret; spin_lock_init(&info->lock); @@ -211,15 +201,15 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info) writel_relaxed(0, info->rcnr); } - rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops, - THIS_MODULE); - if (IS_ERR(rtc)) { + info->rtc->ops = &sa1100_rtc_ops; + info->rtc->max_user_freq = RTC_FREQ; + info->rtc->range_max = U32_MAX; + + ret = rtc_register_device(info->rtc); + if (ret) { clk_disable_unprepare(info->clk); - return PTR_ERR(rtc); + return ret; } - info->rtc = rtc; - - rtc->max_user_freq = RTC_FREQ; /* Fix for a nasty initialization problem the in SA11xx RTSR register. * See also the comments in sa1100_rtc_interrupt(). @@ -267,6 +257,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev) info->irq_1hz = irq_1hz; info->irq_alarm = irq_alarm; + info->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(info->rtc)) + return PTR_ERR(info->rtc); + ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0, "rtc 1Hz", &pdev->dev); if (ret) { diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c index feb1f8e52c00..9167b48014a1 100644 --- a/drivers/rtc/rtc-sh.c +++ b/drivers/rtc/rtc-sh.c @@ -504,8 +504,7 @@ static int __init sh_rtc_probe(struct platform_device *pdev) if (unlikely(!rtc->res)) return -EBUSY; - rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, - rtc->regsize); + rtc->regbase = devm_ioremap(&pdev->dev, rtc->res->start, rtc->regsize); if (unlikely(!rtc->regbase)) return -EINVAL; diff --git a/drivers/rtc/rtc-sirfsoc.c b/drivers/rtc/rtc-sirfsoc.c index a2c9c55667cd..abf19435dbad 100644 --- a/drivers/rtc/rtc-sirfsoc.c +++ b/drivers/rtc/rtc-sirfsoc.c @@ -90,13 +90,13 @@ static int sirfsoc_rtc_read_alarm(struct device *dev, */ /* if alarm is in next overflow cycle */ if (rtc_count > rtc_alarm) - rtc_time_to_tm((rtcdrv->overflow_rtc + 1) - << (BITS_PER_LONG - RTC_SHIFT) - | rtc_alarm >> RTC_SHIFT, &(alrm->time)); + rtc_time64_to_tm((rtcdrv->overflow_rtc + 1) + << (BITS_PER_LONG - RTC_SHIFT) + | rtc_alarm >> RTC_SHIFT, &alrm->time); else - rtc_time_to_tm(rtcdrv->overflow_rtc - << (BITS_PER_LONG - RTC_SHIFT) - | rtc_alarm >> RTC_SHIFT, &(alrm->time)); + rtc_time64_to_tm(rtcdrv->overflow_rtc + << (BITS_PER_LONG - RTC_SHIFT) + | rtc_alarm >> RTC_SHIFT, &alrm->time); if (sirfsoc_rtc_readl(rtcdrv, RTC_STATUS) & SIRFSOC_RTC_AL0E) alrm->enabled = 1; @@ -113,7 +113,7 @@ static int sirfsoc_rtc_set_alarm(struct device *dev, rtcdrv = dev_get_drvdata(dev); if (alrm->enabled) { - rtc_tm_to_time(&(alrm->time), &rtc_alarm); + rtc_alarm = rtc_tm_to_time64(&alrm->time); spin_lock_irq(&rtcdrv->lock); @@ -181,8 +181,8 @@ static int sirfsoc_rtc_read_time(struct device *dev, cpu_relax(); } while (tmp_rtc != sirfsoc_rtc_readl(rtcdrv, RTC_CN)); - rtc_time_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT) | - tmp_rtc >> RTC_SHIFT, tm); + rtc_time64_to_tm(rtcdrv->overflow_rtc << (BITS_PER_LONG - RTC_SHIFT) + | tmp_rtc >> RTC_SHIFT, tm); return 0; } @@ -193,7 +193,7 @@ static int sirfsoc_rtc_set_time(struct device *dev, struct sirfsoc_rtc_drv *rtcdrv; rtcdrv = dev_get_drvdata(dev); - rtc_tm_to_time(tm, &rtc_time); + rtc_time = rtc_tm_to_time64(tm); rtcdrv->overflow_rtc = rtc_time >> (BITS_PER_LONG - RTC_SHIFT); @@ -341,28 +341,22 @@ static int sirfsoc_rtc_probe(struct platform_device *pdev) rtcdrv->overflow_rtc = sirfsoc_rtc_readl(rtcdrv, RTC_SW_VALUE); - rtcdrv->rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &sirfsoc_rtc_ops, THIS_MODULE); - if (IS_ERR(rtcdrv->rtc)) { - err = PTR_ERR(rtcdrv->rtc); - dev_err(&pdev->dev, "can't register RTC device\n"); - return err; - } + rtcdrv->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtcdrv->rtc)) + return PTR_ERR(rtcdrv->rtc); + + rtcdrv->rtc->ops = &sirfsoc_rtc_ops; + rtcdrv->rtc->range_max = (1ULL << 60) - 1; rtcdrv->irq = platform_get_irq(pdev, 0); - err = devm_request_irq( - &pdev->dev, - rtcdrv->irq, - sirfsoc_rtc_irq_handler, - IRQF_SHARED, - pdev->name, - rtcdrv); + err = devm_request_irq(&pdev->dev, rtcdrv->irq, sirfsoc_rtc_irq_handler, + IRQF_SHARED, pdev->name, rtcdrv); if (err) { dev_err(&pdev->dev, "Unable to register for the SiRF SOC RTC IRQ\n"); return err; } - return 0; + return rtc_register_device(rtcdrv->rtc); } #ifdef CONFIG_PM_SLEEP diff --git a/drivers/rtc/rtc-snvs.c b/drivers/rtc/rtc-snvs.c index 757f4daa7181..35ee08aa7584 100644 --- a/drivers/rtc/rtc-snvs.c +++ b/drivers/rtc/rtc-snvs.c @@ -7,7 +7,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> -#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_wakeirq.h> #include <linux/rtc.h> @@ -264,6 +263,12 @@ static const struct regmap_config snvs_rtc_config = { .reg_stride = 4, }; +static void snvs_rtc_action(void *data) +{ + if (data) + clk_disable_unprepare(data); +} + static int snvs_rtc_probe(struct platform_device *pdev) { struct snvs_rtc_data *data; @@ -314,6 +319,10 @@ static int snvs_rtc_probe(struct platform_device *pdev) } } + ret = devm_add_action_or_reset(&pdev->dev, snvs_rtc_action, data->clk); + if (ret) + return ret; + platform_set_drvdata(pdev, data); /* Initialize glitch detect */ @@ -326,7 +335,7 @@ static int snvs_rtc_probe(struct platform_device *pdev) ret = snvs_rtc_enable(data, true); if (ret) { dev_err(&pdev->dev, "failed to enable rtc %d\n", ret); - goto error_rtc_device_register; + return ret; } device_init_wakeup(&pdev->dev, true); @@ -339,24 +348,13 @@ static int snvs_rtc_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to request irq %d: %d\n", data->irq, ret); - goto error_rtc_device_register; + return ret; } data->rtc->ops = &snvs_rtc_ops; data->rtc->range_max = U32_MAX; - ret = rtc_register_device(data->rtc); - if (ret) { - dev_err(&pdev->dev, "failed to register rtc: %d\n", ret); - goto error_rtc_device_register; - } - - return 0; -error_rtc_device_register: - if (data->clk) - clk_disable_unprepare(data->clk); - - return ret; + return rtc_register_device(data->rtc); } static int __maybe_unused snvs_rtc_suspend_noirq(struct device *dev) diff --git a/drivers/rtc/rtc-starfire.c b/drivers/rtc/rtc-starfire.c index a7d49329d626..37a26279e107 100644 --- a/drivers/rtc/rtc-starfire.c +++ b/drivers/rtc/rtc-starfire.c @@ -27,7 +27,7 @@ static u32 starfire_get_time(void) static int starfire_read_time(struct device *dev, struct rtc_time *tm) { - rtc_time_to_tm(starfire_get_time(), tm); + rtc_time64_to_tm(starfire_get_time(), tm); return 0; } @@ -39,14 +39,16 @@ static int __init starfire_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; - rtc = devm_rtc_device_register(&pdev->dev, "starfire", - &starfire_rtc_ops, THIS_MODULE); + rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc)) return PTR_ERR(rtc); + rtc->ops = &starfire_rtc_ops; + rtc->range_max = U32_MAX; + platform_set_drvdata(pdev, rtc); - return 0; + return rtc_register_device(rtc); } static struct platform_driver starfire_rtc_driver = { diff --git a/drivers/rtc/rtc-sun6i.c b/drivers/rtc/rtc-sun6i.c index 852f5f3b3592..e2b8b150bcb4 100644 --- a/drivers/rtc/rtc-sun6i.c +++ b/drivers/rtc/rtc-sun6i.c @@ -108,7 +108,6 @@ * driver, even though it is somewhat limited. */ #define SUN6I_YEAR_MIN 1970 -#define SUN6I_YEAR_MAX 2033 #define SUN6I_YEAR_OFF (SUN6I_YEAR_MIN - 1900) /* @@ -250,19 +249,17 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, writel(reg, rtc->base + SUN6I_LOSC_CTRL); } - /* Switch to the external, more precise, oscillator */ - reg |= SUN6I_LOSC_CTRL_EXT_OSC; - if (rtc->data->has_losc_en) - reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN; + /* Switch to the external, more precise, oscillator, if present */ + if (of_get_property(node, "clocks", NULL)) { + reg |= SUN6I_LOSC_CTRL_EXT_OSC; + if (rtc->data->has_losc_en) + reg |= SUN6I_LOSC_CTRL_EXT_LOSC_EN; + } writel(reg, rtc->base + SUN6I_LOSC_CTRL); /* Yes, I know, this is ugly. */ sun6i_rtc = rtc; - /* Deal with old DTs */ - if (!of_get_property(node, "clocks", NULL)) - goto err; - /* Only read IOSC name from device tree if it is exported */ if (rtc->data->export_iosc) of_property_read_string_index(node, "clock-output-names", 2, @@ -279,11 +276,13 @@ static void __init sun6i_rtc_clk_init(struct device_node *node, } parents[0] = clk_hw_get_name(rtc->int_osc); + /* If there is no external oscillator, this will be NULL and ... */ parents[1] = of_clk_get_parent_name(node, 0); rtc->hw.init = &init; init.parent_names = parents; + /* ... number of clock parents will be 1. */ init.num_parents = of_clk_get_parent_count(node) + 1; of_property_read_string_index(node, "clock-output-names", 0, &init.name); @@ -499,7 +498,7 @@ static int sun6i_rtc_getalarm(struct device *dev, struct rtc_wkalrm *wkalrm) wkalrm->enabled = !!(alrm_en & SUN6I_ALRM_EN_CNT_EN); wkalrm->pending = !!(alrm_st & SUN6I_ALRM_EN_CNT_EN); - rtc_time_to_tm(chip->alarm, &wkalrm->time); + rtc_time64_to_tm(chip->alarm, &wkalrm->time); return 0; } @@ -520,8 +519,8 @@ static int sun6i_rtc_setalarm(struct device *dev, struct rtc_wkalrm *wkalrm) return -EINVAL; } - rtc_tm_to_time(alrm_tm, &time_set); - rtc_tm_to_time(&tm_now, &time_now); + time_set = rtc_tm_to_time64(alrm_tm); + time_now = rtc_tm_to_time64(&tm_now); if (time_set <= time_now) { dev_err(dev, "Date to set in the past\n"); return -EINVAL; @@ -569,14 +568,6 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm) struct sun6i_rtc_dev *chip = dev_get_drvdata(dev); u32 date = 0; u32 time = 0; - int year; - - year = rtc_tm->tm_year + 1900; - if (year < SUN6I_YEAR_MIN || year > SUN6I_YEAR_MAX) { - dev_err(dev, "rtc only supports year in range %d - %d\n", - SUN6I_YEAR_MIN, SUN6I_YEAR_MAX); - return -EINVAL; - } rtc_tm->tm_year -= SUN6I_YEAR_OFF; rtc_tm->tm_mon += 1; @@ -585,7 +576,7 @@ static int sun6i_rtc_settime(struct device *dev, struct rtc_time *rtc_tm) SUN6I_DATE_SET_MON_VALUE(rtc_tm->tm_mon) | SUN6I_DATE_SET_YEAR_VALUE(rtc_tm->tm_year); - if (is_leap_year(year)) + if (is_leap_year(rtc_tm->tm_year + SUN6I_YEAR_MIN)) date |= SUN6I_LEAP_SET_VALUE(1); time = SUN6I_TIME_SET_SEC_VALUE(rtc_tm->tm_sec) | @@ -726,12 +717,16 @@ static int sun6i_rtc_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); - chip->rtc = devm_rtc_device_register(&pdev->dev, "rtc-sun6i", - &sun6i_rtc_ops, THIS_MODULE); - if (IS_ERR(chip->rtc)) { - dev_err(&pdev->dev, "unable to register device\n"); + chip->rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(chip->rtc)) return PTR_ERR(chip->rtc); - } + + chip->rtc->ops = &sun6i_rtc_ops; + chip->rtc->range_max = 2019686399LL; /* 2033-12-31 23:59:59 */ + + ret = rtc_register_device(chip->rtc); + if (ret) + return ret; dev_info(&pdev->dev, "RTC enabled\n"); diff --git a/drivers/rtc/rtc-zynqmp.c b/drivers/rtc/rtc-zynqmp.c index 5786866c09e9..4b1077e2f826 100644 --- a/drivers/rtc/rtc-zynqmp.c +++ b/drivers/rtc/rtc-zynqmp.c @@ -38,6 +38,8 @@ #define RTC_CALIB_DEF 0x198233 #define RTC_CALIB_MASK 0x1FFFFF +#define RTC_ALRM_MASK BIT(1) +#define RTC_MSEC 1000 struct xlnx_rtc_dev { struct rtc_device *rtc; @@ -123,11 +125,28 @@ static int xlnx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) static int xlnx_rtc_alarm_irq_enable(struct device *dev, u32 enabled) { struct xlnx_rtc_dev *xrtcdev = dev_get_drvdata(dev); + unsigned int status; + ulong timeout; + + timeout = jiffies + msecs_to_jiffies(RTC_MSEC); + + if (enabled) { + while (1) { + status = readl(xrtcdev->reg_base + RTC_INT_STS); + if (!((status & RTC_ALRM_MASK) == RTC_ALRM_MASK)) + break; + + if (time_after_eq(jiffies, timeout)) { + dev_err(dev, "Time out occur, while clearing alarm status bit\n"); + return -ETIMEDOUT; + } + writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS); + } - if (enabled) writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_EN); - else + } else { writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS); + } return 0; } @@ -183,8 +202,8 @@ static irqreturn_t xlnx_rtc_interrupt(int irq, void *id) if (!(status & (RTC_INT_SEC | RTC_INT_ALRM))) return IRQ_NONE; - /* Clear RTC_INT_ALRM interrupt only */ - writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_STS); + /* Disable RTC_INT_ALRM interrupt only */ + writel(RTC_INT_ALRM, xrtcdev->reg_base + RTC_INT_DIS); if (status & RTC_INT_ALRM) rtc_update_irq(xrtcdev->rtc, 1, RTC_IRQF | RTC_AF); diff --git a/drivers/rtc/sysfs.c b/drivers/rtc/sysfs.c index b7ca7d79fb28..950fac0d41ff 100644 --- a/drivers/rtc/sysfs.c +++ b/drivers/rtc/sysfs.c @@ -279,7 +279,7 @@ static bool rtc_does_wakealarm(struct rtc_device *rtc) static umode_t rtc_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct rtc_device *rtc = to_rtc_device(dev); umode_t mode = attr->mode; diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 80d22290f268..384edffe5cb4 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -57,11 +57,26 @@ static size_t dcssblk_dax_copy_to_iter(struct dax_device *dax_dev, return copy_to_iter(addr, bytes, i); } +static int dcssblk_dax_zero_page_range(struct dax_device *dax_dev, + pgoff_t pgoff, size_t nr_pages) +{ + long rc; + void *kaddr; + + rc = dax_direct_access(dax_dev, pgoff, nr_pages, &kaddr, NULL); + if (rc < 0) + return rc; + memset(kaddr, 0, nr_pages << PAGE_SHIFT); + dax_flush(dax_dev, kaddr, nr_pages << PAGE_SHIFT); + return 0; +} + static const struct dax_operations dcssblk_dax_ops = { .direct_access = dcssblk_dax_direct_access, .dax_supported = generic_fsdax_supported, .copy_from_iter = dcssblk_dax_copy_from_iter, .copy_to_iter = dcssblk_dax_copy_to_iter, + .zero_page_range = dcssblk_dax_zero_page_range, }; struct dcssblk_dev_info { @@ -680,8 +695,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->dax_dev = alloc_dax(dev_info, dev_info->gd->disk_name, &dcssblk_dax_ops, DAXDEV_F_SYNC); - if (!dev_info->dax_dev) { - rc = -ENOMEM; + if (IS_ERR(dev_info->dax_dev)) { + rc = PTR_ERR(dev_info->dax_dev); + dev_info->dax_dev = NULL; goto put_dev; } diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig index 1778f8c62861..425ab6f7e375 100644 --- a/drivers/soc/Kconfig +++ b/drivers/soc/Kconfig @@ -22,5 +22,6 @@ source "drivers/soc/ux500/Kconfig" source "drivers/soc/versatile/Kconfig" source "drivers/soc/xilinx/Kconfig" source "drivers/soc/zte/Kconfig" +source "drivers/soc/kendryte/Kconfig" endmenu diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile index a39f17cea376..36452bed86ef 100644 --- a/drivers/soc/Makefile +++ b/drivers/soc/Makefile @@ -28,3 +28,4 @@ obj-$(CONFIG_ARCH_U8500) += ux500/ obj-$(CONFIG_PLAT_VERSATILE) += versatile/ obj-y += xilinx/ obj-$(CONFIG_ARCH_ZX) += zte/ +obj-$(CONFIG_SOC_KENDRYTE) += kendryte/ diff --git a/drivers/soc/kendryte/Kconfig b/drivers/soc/kendryte/Kconfig new file mode 100644 index 000000000000..49785b1b0217 --- /dev/null +++ b/drivers/soc/kendryte/Kconfig @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0 + +if SOC_KENDRYTE + +config K210_SYSCTL + bool "Kendryte K210 system controller" + default y + depends on RISCV + help + Enables controlling the K210 various clocks and to enable + general purpose use of the extra 2MB of SRAM normally + reserved for the AI engine. + +endif diff --git a/drivers/soc/kendryte/Makefile b/drivers/soc/kendryte/Makefile new file mode 100644 index 000000000000..002d9ce95c0d --- /dev/null +++ b/drivers/soc/kendryte/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_K210_SYSCTL) += k210-sysctl.o diff --git a/drivers/soc/kendryte/k210-sysctl.c b/drivers/soc/kendryte/k210-sysctl.c new file mode 100644 index 000000000000..4608fbca20e1 --- /dev/null +++ b/drivers/soc/kendryte/k210-sysctl.c @@ -0,0 +1,248 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (c) 2019 Christoph Hellwig. + * Copyright (c) 2019 Western Digital Corporation or its affiliates. + */ +#include <linux/types.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/clk-provider.h> +#include <linux/clkdev.h> +#include <linux/bitfield.h> +#include <asm/soc.h> + +#define K210_SYSCTL_CLK0_FREQ 26000000UL + +/* Registers base address */ +#define K210_SYSCTL_SYSCTL_BASE_ADDR 0x50440000ULL + +/* Registers */ +#define K210_SYSCTL_PLL0 0x08 +#define K210_SYSCTL_PLL1 0x0c +/* clkr: 4bits, clkf1: 6bits, clkod: 4bits, bwadj: 4bits */ +#define PLL_RESET (1 << 20) +#define PLL_PWR (1 << 21) +#define PLL_INTFB (1 << 22) +#define PLL_BYPASS (1 << 23) +#define PLL_TEST (1 << 24) +#define PLL_OUT_EN (1 << 25) +#define PLL_TEST_EN (1 << 26) +#define K210_SYSCTL_PLL_LOCK 0x18 +#define PLL0_LOCK1 (1 << 0) +#define PLL0_LOCK2 (1 << 1) +#define PLL0_SLIP_CLEAR (1 << 2) +#define PLL0_TEST_CLK_OUT (1 << 3) +#define PLL1_LOCK1 (1 << 8) +#define PLL1_LOCK2 (1 << 9) +#define PLL1_SLIP_CLEAR (1 << 10) +#define PLL1_TEST_CLK_OUT (1 << 11) +#define PLL2_LOCK1 (1 << 16) +#define PLL2_LOCK2 (1 << 16) +#define PLL2_SLIP_CLEAR (1 << 18) +#define PLL2_TEST_CLK_OUT (1 << 19) +#define K210_SYSCTL_CLKSEL0 0x20 +#define CLKSEL_ACLK (1 << 0) +#define K210_SYSCTL_CLKEN_CENT 0x28 +#define CLKEN_CPU (1 << 0) +#define CLKEN_SRAM0 (1 << 1) +#define CLKEN_SRAM1 (1 << 2) +#define CLKEN_APB0 (1 << 3) +#define CLKEN_APB1 (1 << 4) +#define CLKEN_APB2 (1 << 5) +#define K210_SYSCTL_CLKEN_PERI 0x2c +#define CLKEN_ROM (1 << 0) +#define CLKEN_DMA (1 << 1) +#define CLKEN_AI (1 << 2) +#define CLKEN_DVP (1 << 3) +#define CLKEN_FFT (1 << 4) +#define CLKEN_GPIO (1 << 5) +#define CLKEN_SPI0 (1 << 6) +#define CLKEN_SPI1 (1 << 7) +#define CLKEN_SPI2 (1 << 8) +#define CLKEN_SPI3 (1 << 9) +#define CLKEN_I2S0 (1 << 10) +#define CLKEN_I2S1 (1 << 11) +#define CLKEN_I2S2 (1 << 12) +#define CLKEN_I2C0 (1 << 13) +#define CLKEN_I2C1 (1 << 14) +#define CLKEN_I2C2 (1 << 15) +#define CLKEN_UART1 (1 << 16) +#define CLKEN_UART2 (1 << 17) +#define CLKEN_UART3 (1 << 18) +#define CLKEN_AES (1 << 19) +#define CLKEN_FPIO (1 << 20) +#define CLKEN_TIMER0 (1 << 21) +#define CLKEN_TIMER1 (1 << 22) +#define CLKEN_TIMER2 (1 << 23) +#define CLKEN_WDT0 (1 << 24) +#define CLKEN_WDT1 (1 << 25) +#define CLKEN_SHA (1 << 26) +#define CLKEN_OTP (1 << 27) +#define CLKEN_RTC (1 << 29) + +struct k210_sysctl { + void __iomem *regs; + struct clk_hw hw; +}; + +static void k210_set_bits(u32 val, void __iomem *reg) +{ + writel(readl(reg) | val, reg); +} + +static void k210_clear_bits(u32 val, void __iomem *reg) +{ + writel(readl(reg) & ~val, reg); +} + +static void k210_pll1_enable(void __iomem *regs) +{ + u32 val; + + val = readl(regs + K210_SYSCTL_PLL1); + val &= ~GENMASK(19, 0); /* clkr1 = 0 */ + val |= FIELD_PREP(GENMASK(9, 4), 0x3B); /* clkf1 = 59 */ + val |= FIELD_PREP(GENMASK(13, 10), 0x3); /* clkod1 = 3 */ + val |= FIELD_PREP(GENMASK(19, 14), 0x3B); /* bwadj1 = 59 */ + writel(val, regs + K210_SYSCTL_PLL1); + + k210_clear_bits(PLL_BYPASS, regs + K210_SYSCTL_PLL1); + k210_set_bits(PLL_PWR, regs + K210_SYSCTL_PLL1); + + /* + * Reset the pll. The magic NOPs come from the Kendryte reference SDK. + */ + k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1); + k210_set_bits(PLL_RESET, regs + K210_SYSCTL_PLL1); + nop(); + nop(); + k210_clear_bits(PLL_RESET, regs + K210_SYSCTL_PLL1); + + for (;;) { + val = readl(regs + K210_SYSCTL_PLL_LOCK); + if (val & PLL1_LOCK2) + break; + writel(val | PLL1_SLIP_CLEAR, regs + K210_SYSCTL_PLL_LOCK); + } + + k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL1); +} + +static unsigned long k210_sysctl_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct k210_sysctl *s = container_of(hw, struct k210_sysctl, hw); + u32 clksel0, pll0; + u64 pll0_freq, clkr0, clkf0, clkod0; + + /* + * If the clock selector is not set, use the base frequency. + * Otherwise, use PLL0 frequency with a frequency divisor. + */ + clksel0 = readl(s->regs + K210_SYSCTL_CLKSEL0); + if (!(clksel0 & CLKSEL_ACLK)) + return K210_SYSCTL_CLK0_FREQ; + + /* + * Get PLL0 frequency: + * freq = base frequency * clkf0 / (clkr0 * clkod0) + */ + pll0 = readl(s->regs + K210_SYSCTL_PLL0); + clkr0 = 1 + FIELD_GET(GENMASK(3, 0), pll0); + clkf0 = 1 + FIELD_GET(GENMASK(9, 4), pll0); + clkod0 = 1 + FIELD_GET(GENMASK(13, 10), pll0); + pll0_freq = clkf0 * K210_SYSCTL_CLK0_FREQ / (clkr0 * clkod0); + + /* Get the frequency divisor from the clock selector */ + return pll0_freq / (2ULL << FIELD_GET(0x00000006, clksel0)); +} + +static const struct clk_ops k210_sysctl_clk_ops = { + .recalc_rate = k210_sysctl_clk_recalc_rate, +}; + +static const struct clk_init_data k210_clk_init_data = { + .name = "k210-sysctl-pll1", + .ops = &k210_sysctl_clk_ops, +}; + +static int k210_sysctl_probe(struct platform_device *pdev) +{ + struct k210_sysctl *s; + int error; + + pr_info("Kendryte K210 SoC sysctl\n"); + + s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->regs = devm_ioremap_resource(&pdev->dev, + platform_get_resource(pdev, IORESOURCE_MEM, 0)); + if (IS_ERR(s->regs)) + return PTR_ERR(s->regs); + + s->hw.init = &k210_clk_init_data; + error = devm_clk_hw_register(&pdev->dev, &s->hw); + if (error) { + dev_err(&pdev->dev, "failed to register clk"); + return error; + } + + error = devm_of_clk_add_hw_provider(&pdev->dev, of_clk_hw_simple_get, + &s->hw); + if (error) { + dev_err(&pdev->dev, "adding clk provider failed\n"); + return error; + } + + return 0; +} + +static const struct of_device_id k210_sysctl_of_match[] = { + { .compatible = "kendryte,k210-sysctl", }, + {} +}; + +static struct platform_driver k210_sysctl_driver = { + .driver = { + .name = "k210-sysctl", + .of_match_table = k210_sysctl_of_match, + }, + .probe = k210_sysctl_probe, +}; + +static int __init k210_sysctl_init(void) +{ + return platform_driver_register(&k210_sysctl_driver); +} +core_initcall(k210_sysctl_init); + +/* + * This needs to be called very early during initialization, given that + * PLL1 needs to be enabled to be able to use all SRAM. + */ +static void __init k210_soc_early_init(const void *fdt) +{ + void __iomem *regs; + + regs = ioremap(K210_SYSCTL_SYSCTL_BASE_ADDR, 0x1000); + if (!regs) + panic("K210 sysctl ioremap"); + + /* Enable PLL1 to make the KPU SRAM useable */ + k210_pll1_enable(regs); + + k210_set_bits(PLL_OUT_EN, regs + K210_SYSCTL_PLL0); + + k210_set_bits(CLKEN_CPU | CLKEN_SRAM0 | CLKEN_SRAM1, + regs + K210_SYSCTL_CLKEN_CENT); + k210_set_bits(CLKEN_ROM | CLKEN_TIMER0 | CLKEN_RTC, + regs + K210_SYSCTL_CLKEN_PERI); + + k210_set_bits(CLKSEL_ACLK, regs + K210_SYSCTL_CLKSEL0); + + iounmap(regs); +} +SOC_EARLY_INIT_DECLARE(generic_k210, "kendryte,k210", k210_soc_early_init); diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 5a05db5438d6..91af271e9bb0 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -1,17 +1,18 @@ # SPDX-License-Identifier: GPL-2.0-only # -# Generic thermal sysfs drivers configuration +# Generic thermal drivers configuration # menuconfig THERMAL - bool "Generic Thermal sysfs driver" + bool "Thermal drivers" help - Generic Thermal Sysfs driver offers a generic mechanism for + Thermal drivers offer a generic mechanism for thermal management. Usually it's made up of one or more thermal - zone and cooling device. + zones and cooling devices. Each thermal zone contains its own temperature, trip points, - cooling devices. - All platforms with ACPI thermal support can use this driver. + and cooling devices. + All platforms with ACPI or Open Firmware thermal support can use + this driver. If you want this support, you should say Y here. if THERMAL @@ -251,6 +252,27 @@ config IMX_THERMAL cpufreq is used as the cooling device to throttle CPUs when the passive trip is crossed. +config IMX_SC_THERMAL + tristate "Temperature sensor driver for NXP i.MX SoCs with System Controller" + depends on IMX_SCU + depends on OF + help + Support for Temperature Monitor (TEMPMON) found on NXP i.MX SoCs with + system controller inside, Linux kernel has to communicate with system + controller via MU (message unit) IPC to get temperature from thermal + sensor. It supports one critical trip point and one + passive trip point for each thermal sensor. + +config IMX8MM_THERMAL + tristate "Temperature sensor driver for Freescale i.MX8MM SoC" + depends on ARCH_MXC || COMPILE_TEST + depends on OF + help + Support for Thermal Monitoring Unit (TMU) found on Freescale i.MX8MM SoC. + It supports one critical trip point and one passive trip point. The + cpufreq is used as the cooling device to throttle CPUs when the passive + trip is crossed. + config MAX77620_THERMAL tristate "Temperature sensor driver for Maxim MAX77620 PMIC" depends on MFD_MAX77620 @@ -265,6 +287,7 @@ config QORIQ_THERMAL tristate "QorIQ Thermal Monitoring Unit" depends on THERMAL_OF depends on HAS_IOMEM + select REGMAP_MMIO help Support for Thermal Monitoring Unit (TMU) found on QorIQ platforms. It supports one critical trip point and one passive trip point. The @@ -460,4 +483,11 @@ config UNIPHIER_THERMAL Enable this to plug in UniPhier on-chip PVT thermal driver into the thermal framework. The driver supports CPU thermal zone temperature reporting and a couple of trip points. + +config SPRD_THERMAL + tristate "Temperature sensor on Spreadtrum SoCs" + depends on ARCH_SPRD || COMPILE_TEST + help + Support for the Spreadtrum thermal sensor driver in the Linux thermal + framework. endif diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile index 9fb88e26fb10..8c8ed7b79915 100644 --- a/drivers/thermal/Makefile +++ b/drivers/thermal/Makefile @@ -43,6 +43,8 @@ obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o obj-$(CONFIG_TANGO_THERMAL) += tango_thermal.o obj-$(CONFIG_IMX_THERMAL) += imx_thermal.o +obj-$(CONFIG_IMX_SC_THERMAL) += imx_sc_thermal.o +obj-$(CONFIG_IMX8MM_THERMAL) += imx8mm_thermal.o obj-$(CONFIG_MAX77620_THERMAL) += max77620_thermal.o obj-$(CONFIG_QORIQ_THERMAL) += qoriq_thermal.o obj-$(CONFIG_DA9062_THERMAL) += da9062-thermal.o @@ -57,3 +59,4 @@ obj-$(CONFIG_GENERIC_ADC_THERMAL) += thermal-generic-adc.o obj-$(CONFIG_ZX2967_THERMAL) += zx2967_thermal.o obj-$(CONFIG_UNIPHIER_THERMAL) += uniphier_thermal.o obj-$(CONFIG_AMLOGIC_THERMAL) += amlogic_thermal.o +obj-$(CONFIG_SPRD_THERMAL) += sprd_thermal.o diff --git a/drivers/thermal/cpufreq_cooling.c b/drivers/thermal/cpufreq_cooling.c index 4ae8c856c88e..e297e135c031 100644 --- a/drivers/thermal/cpufreq_cooling.c +++ b/drivers/thermal/cpufreq_cooling.c @@ -273,7 +273,7 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev, struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; /* Request state should be less than max_level */ - if (WARN_ON(state > cpufreq_cdev->max_level)) + if (state > cpufreq_cdev->max_level) return -EINVAL; num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); @@ -437,7 +437,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, int ret; /* Request state should be less than max_level */ - if (WARN_ON(state > cpufreq_cdev->max_level)) + if (state > cpufreq_cdev->max_level) return -EINVAL; /* Check if the old cooling action is same as new cooling action */ @@ -456,6 +456,7 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, capacity = frequency * max_capacity; capacity /= cpufreq_cdev->policy->cpuinfo.max_freq; arch_set_thermal_pressure(cpus, max_capacity - capacity); + ret = 0; } return ret; diff --git a/drivers/thermal/imx8mm_thermal.c b/drivers/thermal/imx8mm_thermal.c new file mode 100644 index 000000000000..0d60f8d7894f --- /dev/null +++ b/drivers/thermal/imx8mm_thermal.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020 NXP. + * + * Author: Anson Huang <Anson.Huang@nxp.com> + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/thermal.h> + +#include "thermal_core.h" + +#define TER 0x0 /* TMU enable */ +#define TPS 0x4 +#define TRITSR 0x20 /* TMU immediate temp */ + +#define TER_EN BIT(31) +#define TRITSR_TEMP0_VAL_MASK 0xff +#define TRITSR_TEMP1_VAL_MASK 0xff0000 + +#define PROBE_SEL_ALL GENMASK(31, 30) + +#define probe_status_offset(x) (30 + x) +#define SIGN_BIT BIT(7) +#define TEMP_VAL_MASK GENMASK(6, 0) + +#define VER1_TEMP_LOW_LIMIT 10000 +#define VER2_TEMP_LOW_LIMIT -40000 +#define VER2_TEMP_HIGH_LIMIT 125000 + +#define TMU_VER1 0x1 +#define TMU_VER2 0x2 + +struct thermal_soc_data { + u32 num_sensors; + u32 version; + int (*get_temp)(void *, int *); +}; + +struct tmu_sensor { + struct imx8mm_tmu *priv; + u32 hw_id; + struct thermal_zone_device *tzd; +}; + +struct imx8mm_tmu { + void __iomem *base; + struct clk *clk; + const struct thermal_soc_data *socdata; + struct tmu_sensor sensors[0]; +}; + +static int imx8mm_tmu_get_temp(void *data, int *temp) +{ + struct tmu_sensor *sensor = data; + struct imx8mm_tmu *tmu = sensor->priv; + u32 val; + + val = readl_relaxed(tmu->base + TRITSR) & TRITSR_TEMP0_VAL_MASK; + *temp = val * 1000; + if (*temp < VER1_TEMP_LOW_LIMIT) + return -EAGAIN; + + return 0; +} + +static int imx8mp_tmu_get_temp(void *data, int *temp) +{ + struct tmu_sensor *sensor = data; + struct imx8mm_tmu *tmu = sensor->priv; + unsigned long val; + bool ready; + + val = readl_relaxed(tmu->base + TRITSR); + ready = test_bit(probe_status_offset(sensor->hw_id), &val); + if (!ready) + return -EAGAIN; + + val = sensor->hw_id ? FIELD_GET(TRITSR_TEMP1_VAL_MASK, val) : + FIELD_GET(TRITSR_TEMP0_VAL_MASK, val); + if (val & SIGN_BIT) /* negative */ + val = (~(val & TEMP_VAL_MASK) + 1); + + *temp = val * 1000; + if (*temp < VER2_TEMP_LOW_LIMIT || *temp > VER2_TEMP_HIGH_LIMIT) + return -EAGAIN; + + return 0; +} + +static int tmu_get_temp(void *data, int *temp) +{ + struct tmu_sensor *sensor = data; + struct imx8mm_tmu *tmu = sensor->priv; + + return tmu->socdata->get_temp(data, temp); +} + +static struct thermal_zone_of_device_ops tmu_tz_ops = { + .get_temp = tmu_get_temp, +}; + +static void imx8mm_tmu_enable(struct imx8mm_tmu *tmu, bool enable) +{ + u32 val; + + val = readl_relaxed(tmu->base + TER); + val = enable ? (val | TER_EN) : (val & ~TER_EN); + writel_relaxed(val, tmu->base + TER); +} + +static void imx8mm_tmu_probe_sel_all(struct imx8mm_tmu *tmu) +{ + u32 val; + + val = readl_relaxed(tmu->base + TPS); + val |= PROBE_SEL_ALL; + writel_relaxed(val, tmu->base + TPS); +} + +static int imx8mm_tmu_probe(struct platform_device *pdev) +{ + const struct thermal_soc_data *data; + struct imx8mm_tmu *tmu; + int ret; + int i; + + data = of_device_get_match_data(&pdev->dev); + + tmu = devm_kzalloc(&pdev->dev, struct_size(tmu, sensors, + data->num_sensors), GFP_KERNEL); + if (!tmu) + return -ENOMEM; + + tmu->socdata = data; + + tmu->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(tmu->base)) + return PTR_ERR(tmu->base); + + tmu->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(tmu->clk)) { + ret = PTR_ERR(tmu->clk); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, + "failed to get tmu clock: %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(tmu->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable tmu clock: %d\n", ret); + return ret; + } + + /* disable the monitor during initialization */ + imx8mm_tmu_enable(tmu, false); + + for (i = 0; i < data->num_sensors; i++) { + tmu->sensors[i].priv = tmu; + tmu->sensors[i].tzd = + devm_thermal_zone_of_sensor_register(&pdev->dev, i, + &tmu->sensors[i], + &tmu_tz_ops); + if (IS_ERR(tmu->sensors[i].tzd)) { + dev_err(&pdev->dev, + "failed to register thermal zone sensor[%d]: %d\n", + i, ret); + return PTR_ERR(tmu->sensors[i].tzd); + } + tmu->sensors[i].hw_id = i; + } + + platform_set_drvdata(pdev, tmu); + + /* enable all the probes for V2 TMU */ + if (tmu->socdata->version == TMU_VER2) + imx8mm_tmu_probe_sel_all(tmu); + + /* enable the monitor */ + imx8mm_tmu_enable(tmu, true); + + return 0; +} + +static int imx8mm_tmu_remove(struct platform_device *pdev) +{ + struct imx8mm_tmu *tmu = platform_get_drvdata(pdev); + + /* disable TMU */ + imx8mm_tmu_enable(tmu, false); + + clk_disable_unprepare(tmu->clk); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct thermal_soc_data imx8mm_tmu_data = { + .num_sensors = 1, + .version = TMU_VER1, + .get_temp = imx8mm_tmu_get_temp, +}; + +static struct thermal_soc_data imx8mp_tmu_data = { + .num_sensors = 2, + .version = TMU_VER2, + .get_temp = imx8mp_tmu_get_temp, +}; + +static const struct of_device_id imx8mm_tmu_table[] = { + { .compatible = "fsl,imx8mm-tmu", .data = &imx8mm_tmu_data, }, + { .compatible = "fsl,imx8mp-tmu", .data = &imx8mp_tmu_data, }, + { }, +}; + +static struct platform_driver imx8mm_tmu = { + .driver = { + .name = "i.mx8mm_thermal", + .of_match_table = imx8mm_tmu_table, + }, + .probe = imx8mm_tmu_probe, + .remove = imx8mm_tmu_remove, +}; +module_platform_driver(imx8mm_tmu); + +MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>"); +MODULE_DESCRIPTION("i.MX8MM Thermal Monitor Unit driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/imx_sc_thermal.c b/drivers/thermal/imx_sc_thermal.c new file mode 100644 index 000000000000..a8723b1eb8b0 --- /dev/null +++ b/drivers/thermal/imx_sc_thermal.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Copyright 2018-2020 NXP. + */ + +#include <linux/err.h> +#include <linux/firmware/imx/sci.h> +#include <linux/firmware/imx/types.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/thermal.h> + +#include "thermal_core.h" + +#define IMX_SC_MISC_FUNC_GET_TEMP 13 + +static struct imx_sc_ipc *thermal_ipc_handle; + +struct imx_sc_sensor { + struct thermal_zone_device *tzd; + u32 resource_id; +}; + +struct req_get_temp { + u16 resource_id; + u8 type; +} __packed __aligned(4); + +struct resp_get_temp { + s16 celsius; + s8 tenths; +} __packed __aligned(4); + +struct imx_sc_msg_misc_get_temp { + struct imx_sc_rpc_msg hdr; + union { + struct req_get_temp req; + struct resp_get_temp resp; + } data; +} __packed __aligned(4); + +static int imx_sc_thermal_get_temp(void *data, int *temp) +{ + struct imx_sc_msg_misc_get_temp msg; + struct imx_sc_rpc_msg *hdr = &msg.hdr; + struct imx_sc_sensor *sensor = data; + int ret; + + msg.data.req.resource_id = sensor->resource_id; + msg.data.req.type = IMX_SC_C_TEMP; + + hdr->ver = IMX_SC_RPC_VERSION; + hdr->svc = IMX_SC_RPC_SVC_MISC; + hdr->func = IMX_SC_MISC_FUNC_GET_TEMP; + hdr->size = 2; + + ret = imx_scu_call_rpc(thermal_ipc_handle, &msg, true); + if (ret) { + dev_err(&sensor->tzd->device, "read temp sensor %d failed, ret %d\n", + sensor->resource_id, ret); + return ret; + } + + *temp = msg.data.resp.celsius * 1000 + msg.data.resp.tenths * 100; + + return 0; +} + +static const struct thermal_zone_of_device_ops imx_sc_thermal_ops = { + .get_temp = imx_sc_thermal_get_temp, +}; + +static int imx_sc_thermal_probe(struct platform_device *pdev) +{ + struct device_node *np, *child, *sensor_np; + struct imx_sc_sensor *sensor; + int ret; + + ret = imx_scu_get_handle(&thermal_ipc_handle); + if (ret) + return ret; + + np = of_find_node_by_name(NULL, "thermal-zones"); + if (!np) + return -ENODEV; + + sensor_np = of_node_get(pdev->dev.of_node); + + for_each_available_child_of_node(np, child) { + sensor = devm_kzalloc(&pdev->dev, sizeof(*sensor), GFP_KERNEL); + if (!sensor) { + of_node_put(sensor_np); + return -ENOMEM; + } + + ret = thermal_zone_of_get_sensor_id(child, + sensor_np, + &sensor->resource_id); + if (ret < 0) { + dev_err(&pdev->dev, + "failed to get valid sensor resource id: %d\n", + ret); + break; + } + + sensor->tzd = devm_thermal_zone_of_sensor_register(&pdev->dev, + sensor->resource_id, + sensor, + &imx_sc_thermal_ops); + if (IS_ERR(sensor->tzd)) { + dev_err(&pdev->dev, "failed to register thermal zone\n"); + ret = PTR_ERR(sensor->tzd); + break; + } + } + + of_node_put(sensor_np); + + return ret; +} + +static int imx_sc_thermal_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id imx_sc_thermal_table[] = { + { .compatible = "fsl,imx-sc-thermal", }, + {} +}; +MODULE_DEVICE_TABLE(of, imx_sc_thermal_table); + +static struct platform_driver imx_sc_thermal_driver = { + .probe = imx_sc_thermal_probe, + .remove = imx_sc_thermal_remove, + .driver = { + .name = "imx-sc-thermal", + .of_match_table = imx_sc_thermal_table, + }, +}; +module_platform_driver(imx_sc_thermal_driver); + +MODULE_AUTHOR("Anson Huang <Anson.Huang@nxp.com>"); +MODULE_DESCRIPTION("Thermal driver for NXP i.MX SoCs with system controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index bb6754a5342c..e761c9b42217 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -3,24 +3,17 @@ // Copyright 2013 Freescale Semiconductor, Inc. #include <linux/clk.h> -#include <linux/cpu.h> #include <linux/cpufreq.h> #include <linux/cpu_cooling.h> #include <linux/delay.h> -#include <linux/device.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> -#include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> -#include <linux/platform_device.h> #include <linux/regmap.h> -#include <linux/slab.h> #include <linux/thermal.h> -#include <linux/types.h> #include <linux/nvmem-consumer.h> #define REG_SET 0x4 @@ -872,14 +865,12 @@ static int imx_thermal_remove(struct platform_device *pdev) clk_disable_unprepare(data->thermal_clk); thermal_zone_device_unregister(data->tz); - cpufreq_cooling_unregister(data->cdev); - cpufreq_cpu_put(data->policy); + imx_thermal_unregister_legacy_cooling(data); return 0; } -#ifdef CONFIG_PM_SLEEP -static int imx_thermal_suspend(struct device *dev) +static int __maybe_unused imx_thermal_suspend(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); struct regmap *map = data->tempmon; @@ -900,7 +891,7 @@ static int imx_thermal_suspend(struct device *dev) return 0; } -static int imx_thermal_resume(struct device *dev) +static int __maybe_unused imx_thermal_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); struct regmap *map = data->tempmon; @@ -918,7 +909,6 @@ static int imx_thermal_resume(struct device *dev) return 0; } -#endif static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops, imx_thermal_suspend, imx_thermal_resume); diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c index efae0c02d898..ceef89c956bd 100644 --- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c @@ -65,7 +65,7 @@ static ssize_t available_uuids_show(struct device *dev, for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) { if (priv->uuid_bitmap & (1 << i)) if (PAGE_SIZE - length > 0) - length += snprintf(&buf[length], + length += scnprintf(&buf[length], PAGE_SIZE - length, "%s\n", int3400_thermal_uuids[i]); @@ -369,8 +369,8 @@ static int int3400_thermal_remove(struct platform_device *pdev) } static const struct acpi_device_id int3400_thermal_match[] = { - {"INT1040", 0}, {"INT3400", 0}, + {"INTC1040", 0}, {} }; diff --git a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c index aeece1e136a5..f86cbb125e2f 100644 --- a/drivers/thermal/intel/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/intel/int340x_thermal/int3403_thermal.c @@ -282,8 +282,8 @@ static int int3403_remove(struct platform_device *pdev) } static const struct acpi_device_id int3403_device_ids[] = { - {"INT1043", 0}, {"INT3403", 0}, + {"INTC1043", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, int3403_device_ids); diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c index b1fd34516e28..297db1d2d960 100644 --- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c @@ -45,6 +45,9 @@ /* JasperLake thermal reporting device */ #define PCI_DEVICE_ID_PROC_JSL_THERMAL 0x4503 +/* TigerLake thermal reporting device */ +#define PCI_DEVICE_ID_PROC_TGL_THERMAL 0x9A03 + #define DRV_NAME "proc_thermal" struct power_config { @@ -728,6 +731,8 @@ static const struct pci_device_id proc_thermal_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_ICL_THERMAL), .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_JSL_THERMAL)}, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_TGL_THERMAL), + .driver_data = (kernel_ulong_t)&rapl_mmio_hsw, }, { 0, }, }; diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index ef0baa954ff0..874a47d6923f 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -449,6 +449,50 @@ thermal_zone_of_add_sensor(struct device_node *zone, } /** + * thermal_zone_of_get_sensor_id - get sensor ID from a DT thermal zone + * @tz_np: a valid thermal zone device node. + * @sensor_np: a sensor node of a valid sensor device. + * @id: the sensor ID returned if success. + * + * This function will get sensor ID from a given thermal zone node and + * the sensor node must match the temperature provider @sensor_np. + * + * Return: 0 on success, proper error code otherwise. + */ + +int thermal_zone_of_get_sensor_id(struct device_node *tz_np, + struct device_node *sensor_np, + u32 *id) +{ + struct of_phandle_args sensor_specs; + int ret; + + ret = of_parse_phandle_with_args(tz_np, + "thermal-sensors", + "#thermal-sensor-cells", + 0, + &sensor_specs); + if (ret) + return ret; + + if (sensor_specs.np != sensor_np) { + of_node_put(sensor_specs.np); + return -ENODEV; + } + + if (sensor_specs.args_count > 1) + pr_warn("%pOFn: too many cells in sensor specifier %d\n", + sensor_specs.np, sensor_specs.args_count); + + *id = sensor_specs.args_count ? sensor_specs.args[0] : 0; + + of_node_put(sensor_specs.np); + + return 0; +} +EXPORT_SYMBOL_GPL(thermal_zone_of_get_sensor_id); + +/** * thermal_zone_of_sensor_register - registers a sensor to a DT thermal zone * @dev: a valid struct device pointer of a sensor device. Must contain * a valid .of_node, for the sensor node. @@ -499,36 +543,22 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, sensor_np = of_node_get(dev->of_node); for_each_available_child_of_node(np, child) { - struct of_phandle_args sensor_specs; int ret, id; /* For now, thermal framework supports only 1 sensor per zone */ - ret = of_parse_phandle_with_args(child, "thermal-sensors", - "#thermal-sensor-cells", - 0, &sensor_specs); + ret = thermal_zone_of_get_sensor_id(child, sensor_np, &id); if (ret) continue; - if (sensor_specs.args_count >= 1) { - id = sensor_specs.args[0]; - WARN(sensor_specs.args_count > 1, - "%pOFn: too many cells in sensor specifier %d\n", - sensor_specs.np, sensor_specs.args_count); - } else { - id = 0; - } - - if (sensor_specs.np == sensor_np && id == sensor_id) { + if (id == sensor_id) { tzd = thermal_zone_of_add_sensor(child, sensor_np, data, ops); if (!IS_ERR(tzd)) tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED); - of_node_put(sensor_specs.np); of_node_put(child); goto exit; } - of_node_put(sensor_specs.np); } exit: of_node_put(sensor_np); diff --git a/drivers/thermal/qcom/tsens-8960.c b/drivers/thermal/qcom/tsens-8960.c index fb77acb8d13b..2a28a5af209e 100644 --- a/drivers/thermal/qcom/tsens-8960.c +++ b/drivers/thermal/qcom/tsens-8960.c @@ -245,7 +245,7 @@ static inline int code_to_mdegC(u32 adc_code, const struct tsens_sensor *s) return adc_code * slope + offset; } -static int get_temp_8960(struct tsens_sensor *s, int *temp) +static int get_temp_8960(const struct tsens_sensor *s, int *temp) { int ret; u32 code, trdy; @@ -279,7 +279,7 @@ static const struct tsens_ops ops_8960 = { .resume = resume_8960, }; -const struct tsens_plat_data data_8960 = { +struct tsens_plat_data data_8960 = { .num_sensors = 11, .ops = &ops_8960, }; diff --git a/drivers/thermal/qcom/tsens-common.c b/drivers/thermal/qcom/tsens-common.c index c8d57ee0a5bb..172545366636 100644 --- a/drivers/thermal/qcom/tsens-common.c +++ b/drivers/thermal/qcom/tsens-common.c @@ -23,6 +23,10 @@ * @low_thresh: lower threshold temperature value * @low_irq_mask: mask register for lower threshold irqs * @low_irq_clear: clear register for lower threshold irqs + * @crit_viol: critical threshold violated + * @crit_thresh: critical threshold temperature value + * @crit_irq_mask: mask register for critical threshold irqs + * @crit_irq_clear: clear register for critical threshold irqs * * Structure containing data about temperature threshold settings and * irq status if they were violated. @@ -36,6 +40,10 @@ struct tsens_irq_data { int low_thresh; u32 low_irq_mask; u32 low_irq_clear; + u32 crit_viol; + u32 crit_thresh; + u32 crit_irq_mask; + u32 crit_irq_clear; }; char *qfprom_read(struct device *dev, const char *cname) @@ -128,7 +136,7 @@ static inline int code_to_degc(u32 adc_code, const struct tsens_sensor *s) * Return: Temperature in milliCelsius on success, a negative errno will * be returned in error cases */ -static int tsens_hw_to_mC(struct tsens_sensor *s, int field) +static int tsens_hw_to_mC(const struct tsens_sensor *s, int field) { struct tsens_priv *priv = s->priv; u32 resolution; @@ -160,7 +168,7 @@ static int tsens_hw_to_mC(struct tsens_sensor *s, int field) * * Return: ADC code or temperature in deciCelsius. */ -static int tsens_mC_to_hw(struct tsens_sensor *s, int temp) +static int tsens_mC_to_hw(const struct tsens_sensor *s, int temp) { struct tsens_priv *priv = s->priv; @@ -189,6 +197,9 @@ static void tsens_set_interrupt_v1(struct tsens_priv *priv, u32 hw_id, case LOWER: index = LOW_INT_CLEAR_0 + hw_id; break; + case CRITICAL: + /* No critical interrupts before v2 */ + return; } regmap_field_write(priv->rf[index], enable ? 0 : 1); } @@ -214,6 +225,10 @@ static void tsens_set_interrupt_v2(struct tsens_priv *priv, u32 hw_id, index_mask = LOW_INT_MASK_0 + hw_id; index_clear = LOW_INT_CLEAR_0 + hw_id; break; + case CRITICAL: + index_mask = CRIT_INT_MASK_0 + hw_id; + index_clear = CRIT_INT_CLEAR_0 + hw_id; + break; } if (enable) { @@ -268,14 +283,23 @@ static int tsens_threshold_violated(struct tsens_priv *priv, u32 hw_id, ret = regmap_field_read(priv->rf[LOWER_STATUS_0 + hw_id], &d->low_viol); if (ret) return ret; - if (d->up_viol || d->low_viol) + + if (priv->feat->crit_int) { + ret = regmap_field_read(priv->rf[CRITICAL_STATUS_0 + hw_id], + &d->crit_viol); + if (ret) + return ret; + } + + if (d->up_viol || d->low_viol || d->crit_viol) return 1; return 0; } static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id, - struct tsens_sensor *s, struct tsens_irq_data *d) + const struct tsens_sensor *s, + struct tsens_irq_data *d) { int ret; @@ -292,22 +316,37 @@ static int tsens_read_irq_state(struct tsens_priv *priv, u32 hw_id, ret = regmap_field_read(priv->rf[LOW_INT_MASK_0 + hw_id], &d->low_irq_mask); if (ret) return ret; + ret = regmap_field_read(priv->rf[CRIT_INT_CLEAR_0 + hw_id], + &d->crit_irq_clear); + if (ret) + return ret; + ret = regmap_field_read(priv->rf[CRIT_INT_MASK_0 + hw_id], + &d->crit_irq_mask); + if (ret) + return ret; + + d->crit_thresh = tsens_hw_to_mC(s, CRIT_THRESH_0 + hw_id); } else { /* No mask register on older TSENS */ d->up_irq_mask = 0; d->low_irq_mask = 0; + d->crit_irq_clear = 0; + d->crit_irq_mask = 0; + d->crit_thresh = 0; } d->up_thresh = tsens_hw_to_mC(s, UP_THRESH_0 + hw_id); d->low_thresh = tsens_hw_to_mC(s, LOW_THRESH_0 + hw_id); - dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u) | clr(%u|%u) | mask(%u|%u)\n", - hw_id, __func__, (d->up_viol || d->low_viol) ? "(V)" : "", - d->low_viol, d->up_viol, d->low_irq_clear, d->up_irq_clear, - d->low_irq_mask, d->up_irq_mask); - dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d)\n", hw_id, __func__, - (d->up_viol || d->low_viol) ? "(violation)" : "", - d->low_thresh, d->up_thresh); + dev_dbg(priv->dev, "[%u] %s%s: status(%u|%u|%u) | clr(%u|%u|%u) | mask(%u|%u|%u)\n", + hw_id, __func__, + (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", + d->low_viol, d->up_viol, d->crit_viol, + d->low_irq_clear, d->up_irq_clear, d->crit_irq_clear, + d->low_irq_mask, d->up_irq_mask, d->crit_irq_mask); + dev_dbg(priv->dev, "[%u] %s%s: thresh: (%d:%d:%d)\n", hw_id, __func__, + (d->up_viol || d->low_viol || d->crit_viol) ? "(V)" : "", + d->low_thresh, d->up_thresh, d->crit_thresh); return 0; } @@ -322,6 +361,76 @@ static inline u32 masked_irq(u32 hw_id, u32 mask, enum tsens_ver ver) } /** + * tsens_critical_irq_thread() - Threaded handler for critical interrupts + * @irq: irq number + * @data: tsens controller private data + * + * Check FSM watchdog bark status and clear if needed. + * Check all sensors to find ones that violated their critical threshold limits. + * Clear and then re-enable the interrupt. + * + * The level-triggered interrupt might deassert if the temperature returned to + * within the threshold limits by the time the handler got scheduled. We + * consider the irq to have been handled in that case. + * + * Return: IRQ_HANDLED + */ +irqreturn_t tsens_critical_irq_thread(int irq, void *data) +{ + struct tsens_priv *priv = data; + struct tsens_irq_data d; + int temp, ret, i; + u32 wdog_status, wdog_count; + + if (priv->feat->has_watchdog) { + ret = regmap_field_read(priv->rf[WDOG_BARK_STATUS], + &wdog_status); + if (ret) + return ret; + + if (wdog_status) { + /* Clear WDOG interrupt */ + regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 1); + regmap_field_write(priv->rf[WDOG_BARK_CLEAR], 0); + ret = regmap_field_read(priv->rf[WDOG_BARK_COUNT], + &wdog_count); + if (ret) + return ret; + if (wdog_count) + dev_dbg(priv->dev, "%s: watchdog count: %d\n", + __func__, wdog_count); + + /* Fall through to handle critical interrupts if any */ + } + } + + for (i = 0; i < priv->num_sensors; i++) { + const struct tsens_sensor *s = &priv->sensor[i]; + u32 hw_id = s->hw_id; + + if (IS_ERR(s->tzd)) + continue; + if (!tsens_threshold_violated(priv, hw_id, &d)) + continue; + ret = get_temp_tsens_valid(s, &temp); + if (ret) { + dev_err(priv->dev, "[%u] %s: error reading sensor\n", + hw_id, __func__); + continue; + } + + tsens_read_irq_state(priv, hw_id, s, &d); + if (d.crit_viol && + !masked_irq(hw_id, d.crit_irq_mask, tsens_version(priv))) { + /* Mask critical interrupts, unused on Linux */ + tsens_set_interrupt(priv, hw_id, CRITICAL, false); + } + } + + return IRQ_HANDLED; +} + +/** * tsens_irq_thread - Threaded interrupt handler for uplow interrupts * @irq: irq number * @data: tsens controller private data @@ -346,10 +455,10 @@ irqreturn_t tsens_irq_thread(int irq, void *data) for (i = 0; i < priv->num_sensors; i++) { bool trigger = false; - struct tsens_sensor *s = &priv->sensor[i]; + const struct tsens_sensor *s = &priv->sensor[i]; u32 hw_id = s->hw_id; - if (IS_ERR(priv->sensor[i].tzd)) + if (IS_ERR(s->tzd)) continue; if (!tsens_threshold_violated(priv, hw_id, &d)) continue; @@ -368,7 +477,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data) tsens_set_interrupt(priv, hw_id, UPPER, disable); if (d.up_thresh > temp) { dev_dbg(priv->dev, "[%u] %s: re-arm upper\n", - priv->sensor[i].hw_id, __func__); + hw_id, __func__); tsens_set_interrupt(priv, hw_id, UPPER, enable); } else { trigger = true; @@ -379,7 +488,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data) tsens_set_interrupt(priv, hw_id, LOWER, disable); if (d.low_thresh < temp) { dev_dbg(priv->dev, "[%u] %s: re-arm low\n", - priv->sensor[i].hw_id, __func__); + hw_id, __func__); tsens_set_interrupt(priv, hw_id, LOWER, enable); } else { trigger = true; @@ -392,7 +501,7 @@ irqreturn_t tsens_irq_thread(int irq, void *data) if (trigger) { dev_dbg(priv->dev, "[%u] %s: TZ update trigger (%d mC)\n", hw_id, __func__, temp); - thermal_zone_device_update(priv->sensor[i].tzd, + thermal_zone_device_update(s->tzd, THERMAL_EVENT_UNSPECIFIED); } else { dev_dbg(priv->dev, "[%u] %s: no violation: %d\n", @@ -435,7 +544,7 @@ int tsens_set_trips(void *_sensor, int low, int high) spin_unlock_irqrestore(&priv->ul_lock, flags); dev_dbg(dev, "[%u] %s: (%d:%d)->(%d:%d)\n", - s->hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high); + hw_id, __func__, d.low_thresh, d.up_thresh, cl_low, cl_high); return 0; } @@ -457,7 +566,7 @@ void tsens_disable_irq(struct tsens_priv *priv) regmap_field_write(priv->rf[INT_EN], 0); } -int get_temp_tsens_valid(struct tsens_sensor *s, int *temp) +int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp) { struct tsens_priv *priv = s->priv; int hw_id = s->hw_id; @@ -486,7 +595,7 @@ int get_temp_tsens_valid(struct tsens_sensor *s, int *temp) return 0; } -int get_temp_common(struct tsens_sensor *s, int *temp) +int get_temp_common(const struct tsens_sensor *s, int *temp) { struct tsens_priv *priv = s->priv; int hw_id = s->hw_id; @@ -590,6 +699,7 @@ int __init init_common(struct tsens_priv *priv) { void __iomem *tm_base, *srot_base; struct device *dev = priv->dev; + u32 ver_minor; struct resource *res; u32 enabled; int ret, i, j; @@ -602,7 +712,7 @@ int __init init_common(struct tsens_priv *priv) /* DT with separate SROT and TM address space */ priv->tm_offset = 0; res = platform_get_resource(op, IORESOURCE_MEM, 1); - srot_base = devm_ioremap_resource(&op->dev, res); + srot_base = devm_ioremap_resource(dev, res); if (IS_ERR(srot_base)) { ret = PTR_ERR(srot_base); goto err_put_device; @@ -620,7 +730,7 @@ int __init init_common(struct tsens_priv *priv) } res = platform_get_resource(op, IORESOURCE_MEM, 0); - tm_base = devm_ioremap_resource(&op->dev, res); + tm_base = devm_ioremap_resource(dev, res); if (IS_ERR(tm_base)) { ret = PTR_ERR(tm_base); goto err_put_device; @@ -639,6 +749,9 @@ int __init init_common(struct tsens_priv *priv) if (IS_ERR(priv->rf[i])) return PTR_ERR(priv->rf[i]); } + ret = regmap_field_read(priv->rf[VER_MINOR], &ver_minor); + if (ret) + goto err_put_device; } priv->rf[TSENS_EN] = devm_regmap_field_alloc(dev, priv->srot_map, @@ -683,12 +796,47 @@ int __init init_common(struct tsens_priv *priv) } } + if (priv->feat->crit_int) { + /* Loop might need changes if enum regfield_ids is reordered */ + for (j = CRITICAL_STATUS_0; j <= CRIT_THRESH_15; j += 16) { + for (i = 0; i < priv->feat->max_sensors; i++) { + int idx = j + i; + + priv->rf[idx] = + devm_regmap_field_alloc(dev, + priv->tm_map, + priv->fields[idx]); + if (IS_ERR(priv->rf[idx])) { + ret = PTR_ERR(priv->rf[idx]); + goto err_put_device; + } + } + } + } + + if (tsens_version(priv) > VER_1_X && ver_minor > 2) { + /* Watchdog is present only on v2.3+ */ + priv->feat->has_watchdog = 1; + for (i = WDOG_BARK_STATUS; i <= CC_MON_MASK; i++) { + priv->rf[i] = devm_regmap_field_alloc(dev, priv->tm_map, + priv->fields[i]); + if (IS_ERR(priv->rf[i])) { + ret = PTR_ERR(priv->rf[i]); + goto err_put_device; + } + } + /* + * Watchdog is already enabled, unmask the bark. + * Disable cycle completion monitoring + */ + regmap_field_write(priv->rf[WDOG_BARK_MASK], 0); + regmap_field_write(priv->rf[CC_MON_MASK], 1); + } + spin_lock_init(&priv->ul_lock); tsens_enable_irq(priv); tsens_debug_init(op); - return 0; - err_put_device: put_device(&op->dev); return ret; diff --git a/drivers/thermal/qcom/tsens-v0_1.c b/drivers/thermal/qcom/tsens-v0_1.c index 4b8dd6de02ce..959a9371d205 100644 --- a/drivers/thermal/qcom/tsens-v0_1.c +++ b/drivers/thermal/qcom/tsens-v0_1.c @@ -327,7 +327,7 @@ static int calibrate_8974(struct tsens_priv *priv) /* v0.1: 8916, 8974 */ -static const struct tsens_features tsens_v0_1_feat = { +static struct tsens_features tsens_v0_1_feat = { .ver_major = VER_0_1, .crit_int = 0, .adc = 1, @@ -377,7 +377,7 @@ static const struct tsens_ops ops_8916 = { .get_temp = get_temp_common, }; -const struct tsens_plat_data data_8916 = { +struct tsens_plat_data data_8916 = { .num_sensors = 5, .ops = &ops_8916, .hw_ids = (unsigned int []){0, 1, 2, 4, 5 }, @@ -392,7 +392,7 @@ static const struct tsens_ops ops_8974 = { .get_temp = get_temp_common, }; -const struct tsens_plat_data data_8974 = { +struct tsens_plat_data data_8974 = { .num_sensors = 11, .ops = &ops_8974, .feat = &tsens_v0_1_feat, diff --git a/drivers/thermal/qcom/tsens-v1.c b/drivers/thermal/qcom/tsens-v1.c index bd2ddb684a45..b682a4df0081 100644 --- a/drivers/thermal/qcom/tsens-v1.c +++ b/drivers/thermal/qcom/tsens-v1.c @@ -299,7 +299,7 @@ static int calibrate_8976(struct tsens_priv *priv) /* v1.x: msm8956,8976,qcs404,405 */ -static const struct tsens_features tsens_v1_feat = { +static struct tsens_features tsens_v1_feat = { .ver_major = VER_1_X, .crit_int = 0, .adc = 1, @@ -368,7 +368,7 @@ static const struct tsens_ops ops_generic_v1 = { .get_temp = get_temp_tsens_valid, }; -const struct tsens_plat_data data_tsens_v1 = { +struct tsens_plat_data data_tsens_v1 = { .ops = &ops_generic_v1, .feat = &tsens_v1_feat, .fields = tsens_v1_regfields, @@ -381,7 +381,7 @@ static const struct tsens_ops ops_8976 = { }; /* Valid for both MSM8956 and MSM8976. Sensor ID 3 is unused. */ -const struct tsens_plat_data data_8976 = { +struct tsens_plat_data data_8976 = { .num_sensors = 11, .ops = &ops_8976, .hw_ids = (unsigned int[]){0, 1, 2, 4, 5, 6, 7, 8, 9, 10}, diff --git a/drivers/thermal/qcom/tsens-v2.c b/drivers/thermal/qcom/tsens-v2.c index a4d15e1abfdd..b293ed32174b 100644 --- a/drivers/thermal/qcom/tsens-v2.c +++ b/drivers/thermal/qcom/tsens-v2.c @@ -24,10 +24,11 @@ #define TM_Sn_CRITICAL_THRESHOLD_OFF 0x0060 #define TM_Sn_STATUS_OFF 0x00a0 #define TM_TRDY_OFF 0x00e4 +#define TM_WDOG_LOG_OFF 0x013c /* v2.x: 8996, 8998, sdm845 */ -static const struct tsens_features tsens_v2_feat = { +static struct tsens_features tsens_v2_feat = { .ver_major = VER_2_X, .crit_int = 1, .adc = 0, @@ -51,8 +52,9 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = { [INT_EN] = REG_FIELD(TM_INT_EN_OFF, 0, 2), /* TEMPERATURE THRESHOLDS */ - REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11), - REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23), + REG_FIELD_FOR_EACH_SENSOR16(LOW_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 0, 11), + REG_FIELD_FOR_EACH_SENSOR16(UP_THRESH, TM_Sn_UPPER_LOWER_THRESHOLD_OFF, 12, 23), + REG_FIELD_FOR_EACH_SENSOR16(CRIT_THRESH, TM_Sn_CRITICAL_THRESHOLD_OFF, 0, 11), /* INTERRUPTS [CLEAR/STATUS/MASK] */ REG_FIELD_SPLIT_BITS_0_15(LOW_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF), @@ -61,6 +63,18 @@ static const struct reg_field tsens_v2_regfields[MAX_REGFIELDS] = { REG_FIELD_SPLIT_BITS_16_31(UP_INT_STATUS, TM_UPPER_LOWER_INT_STATUS_OFF), REG_FIELD_SPLIT_BITS_16_31(UP_INT_CLEAR, TM_UPPER_LOWER_INT_CLEAR_OFF), REG_FIELD_SPLIT_BITS_16_31(UP_INT_MASK, TM_UPPER_LOWER_INT_MASK_OFF), + REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_STATUS, TM_CRITICAL_INT_STATUS_OFF), + REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_CLEAR, TM_CRITICAL_INT_CLEAR_OFF), + REG_FIELD_SPLIT_BITS_0_15(CRIT_INT_MASK, TM_CRITICAL_INT_MASK_OFF), + + /* WATCHDOG on v2.3 or later */ + [WDOG_BARK_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 31, 31), + [WDOG_BARK_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 31, 31), + [WDOG_BARK_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 31, 31), + [CC_MON_STATUS] = REG_FIELD(TM_CRITICAL_INT_STATUS_OFF, 30, 30), + [CC_MON_CLEAR] = REG_FIELD(TM_CRITICAL_INT_CLEAR_OFF, 30, 30), + [CC_MON_MASK] = REG_FIELD(TM_CRITICAL_INT_MASK_OFF, 30, 30), + [WDOG_BARK_COUNT] = REG_FIELD(TM_WDOG_LOG_OFF, 0, 7), /* Sn_STATUS */ REG_FIELD_FOR_EACH_SENSOR16(LAST_TEMP, TM_Sn_STATUS_OFF, 0, 11), @@ -81,14 +95,14 @@ static const struct tsens_ops ops_generic_v2 = { .get_temp = get_temp_tsens_valid, }; -const struct tsens_plat_data data_tsens_v2 = { +struct tsens_plat_data data_tsens_v2 = { .ops = &ops_generic_v2, .feat = &tsens_v2_feat, .fields = tsens_v2_regfields, }; /* Kept around for backward compatibility with old msm8996.dtsi */ -const struct tsens_plat_data data_8996 = { +struct tsens_plat_data data_8996 = { .num_sensors = 13, .ops = &ops_generic_v2, .feat = &tsens_v2_feat, diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c index 0e7cf5236932..2f77d235cf73 100644 --- a/drivers/thermal/qcom/tsens.c +++ b/drivers/thermal/qcom/tsens.c @@ -85,11 +85,42 @@ static const struct thermal_zone_of_device_ops tsens_of_ops = { .set_trips = tsens_set_trips, }; +static int tsens_register_irq(struct tsens_priv *priv, char *irqname, + irq_handler_t thread_fn) +{ + struct platform_device *pdev; + int ret, irq; + + pdev = of_find_device_by_node(priv->dev->of_node); + if (!pdev) + return -ENODEV; + + irq = platform_get_irq_byname(pdev, irqname); + if (irq < 0) { + ret = irq; + /* For old DTs with no IRQ defined */ + if (irq == -ENXIO) + ret = 0; + } else { + ret = devm_request_threaded_irq(&pdev->dev, irq, + NULL, thread_fn, + IRQF_ONESHOT, + dev_name(&pdev->dev), priv); + if (ret) + dev_err(&pdev->dev, "%s: failed to get irq\n", + __func__); + else + enable_irq_wake(irq); + } + + put_device(&pdev->dev); + return ret; +} + static int tsens_register(struct tsens_priv *priv) { - int i, ret, irq; + int i, ret; struct thermal_zone_device *tzd; - struct platform_device *pdev; for (i = 0; i < priv->num_sensors; i++) { priv->sensor[i].priv = priv; @@ -103,32 +134,14 @@ static int tsens_register(struct tsens_priv *priv) priv->ops->enable(priv, i); } - pdev = of_find_device_by_node(priv->dev->of_node); - if (!pdev) - return -ENODEV; - - irq = platform_get_irq_byname(pdev, "uplow"); - if (irq < 0) { - ret = irq; - /* For old DTs with no IRQ defined */ - if (irq == -ENXIO) - ret = 0; - goto err_put_device; - } - - ret = devm_request_threaded_irq(&pdev->dev, irq, - NULL, tsens_irq_thread, - IRQF_TRIGGER_HIGH | IRQF_ONESHOT, - dev_name(&pdev->dev), priv); - if (ret) { - dev_err(&pdev->dev, "%s: failed to get irq\n", __func__); - goto err_put_device; - } + ret = tsens_register_irq(priv, "uplow", tsens_irq_thread); + if (ret < 0) + return ret; - enable_irq_wake(irq); + if (priv->feat->crit_int) + ret = tsens_register_irq(priv, "critical", + tsens_critical_irq_thread); -err_put_device: - put_device(&pdev->dev); return ret; } diff --git a/drivers/thermal/qcom/tsens.h b/drivers/thermal/qcom/tsens.h index e24a865fbc34..502acf0e6828 100644 --- a/drivers/thermal/qcom/tsens.h +++ b/drivers/thermal/qcom/tsens.h @@ -23,6 +23,7 @@ struct tsens_priv; +/* IP version numbers in ascending order */ enum tsens_ver { VER_0_1 = 0, VER_1_X, @@ -32,6 +33,7 @@ enum tsens_ver { enum tsens_irq_type { LOWER, UPPER, + CRITICAL, }; /** @@ -67,7 +69,7 @@ struct tsens_ops { /* mandatory callbacks */ int (*init)(struct tsens_priv *priv); int (*calibrate)(struct tsens_priv *priv); - int (*get_temp)(struct tsens_sensor *s, int *temp); + int (*get_temp)(const struct tsens_sensor *s, int *temp); /* optional callbacks */ int (*enable)(struct tsens_priv *priv, int i); void (*disable)(struct tsens_priv *priv); @@ -374,6 +376,82 @@ enum regfield_ids { CRITICAL_STATUS_13, CRITICAL_STATUS_14, CRITICAL_STATUS_15, + CRIT_INT_STATUS_0, /* CRITICAL interrupt status */ + CRIT_INT_STATUS_1, + CRIT_INT_STATUS_2, + CRIT_INT_STATUS_3, + CRIT_INT_STATUS_4, + CRIT_INT_STATUS_5, + CRIT_INT_STATUS_6, + CRIT_INT_STATUS_7, + CRIT_INT_STATUS_8, + CRIT_INT_STATUS_9, + CRIT_INT_STATUS_10, + CRIT_INT_STATUS_11, + CRIT_INT_STATUS_12, + CRIT_INT_STATUS_13, + CRIT_INT_STATUS_14, + CRIT_INT_STATUS_15, + CRIT_INT_CLEAR_0, /* CRITICAL interrupt clear */ + CRIT_INT_CLEAR_1, + CRIT_INT_CLEAR_2, + CRIT_INT_CLEAR_3, + CRIT_INT_CLEAR_4, + CRIT_INT_CLEAR_5, + CRIT_INT_CLEAR_6, + CRIT_INT_CLEAR_7, + CRIT_INT_CLEAR_8, + CRIT_INT_CLEAR_9, + CRIT_INT_CLEAR_10, + CRIT_INT_CLEAR_11, + CRIT_INT_CLEAR_12, + CRIT_INT_CLEAR_13, + CRIT_INT_CLEAR_14, + CRIT_INT_CLEAR_15, + CRIT_INT_MASK_0, /* CRITICAL interrupt mask */ + CRIT_INT_MASK_1, + CRIT_INT_MASK_2, + CRIT_INT_MASK_3, + CRIT_INT_MASK_4, + CRIT_INT_MASK_5, + CRIT_INT_MASK_6, + CRIT_INT_MASK_7, + CRIT_INT_MASK_8, + CRIT_INT_MASK_9, + CRIT_INT_MASK_10, + CRIT_INT_MASK_11, + CRIT_INT_MASK_12, + CRIT_INT_MASK_13, + CRIT_INT_MASK_14, + CRIT_INT_MASK_15, + CRIT_THRESH_0, /* CRITICAL threshold values */ + CRIT_THRESH_1, + CRIT_THRESH_2, + CRIT_THRESH_3, + CRIT_THRESH_4, + CRIT_THRESH_5, + CRIT_THRESH_6, + CRIT_THRESH_7, + CRIT_THRESH_8, + CRIT_THRESH_9, + CRIT_THRESH_10, + CRIT_THRESH_11, + CRIT_THRESH_12, + CRIT_THRESH_13, + CRIT_THRESH_14, + CRIT_THRESH_15, + + /* WATCHDOG */ + WDOG_BARK_STATUS, + WDOG_BARK_CLEAR, + WDOG_BARK_MASK, + WDOG_BARK_COUNT, + + /* CYCLE COMPLETION MONITOR */ + CC_MON_STATUS, + CC_MON_CLEAR, + CC_MON_MASK, + MIN_STATUS_0, /* MIN threshold violated */ MIN_STATUS_1, MIN_STATUS_2, @@ -418,6 +496,7 @@ enum regfield_ids { * @adc: do the sensors only output adc code (instead of temperature)? * @srot_split: does the IP neatly splits the register space into SROT and TM, * with SROT only being available to secure boot firmware? + * @has_watchdog: does this IP support watchdog functionality? * @max_sensors: maximum sensors supported by this version of the IP */ struct tsens_features { @@ -425,6 +504,7 @@ struct tsens_features { unsigned int crit_int:1; unsigned int adc:1; unsigned int srot_split:1; + unsigned int has_watchdog:1; unsigned int max_sensors; }; @@ -440,12 +520,14 @@ struct tsens_plat_data { const u32 num_sensors; const struct tsens_ops *ops; unsigned int *hw_ids; - const struct tsens_features *feat; + struct tsens_features *feat; const struct reg_field *fields; }; /** * struct tsens_context - Registers to be saved/restored across a context loss + * @threshold: Threshold register value + * @control: Control register value */ struct tsens_context { int threshold; @@ -460,6 +542,8 @@ struct tsens_context { * @srot_map: pointer to SROT register address space * @tm_offset: deal with old device trees that don't address TM and SROT * address space separately + * @ul_lock: lock while processing upper/lower threshold interrupts + * @crit_lock: lock while processing critical threshold interrupts * @rf: array of regmap_fields used to store value of the field * @ctx: registers to be saved and restored during suspend/resume * @feat: features of the IP @@ -481,36 +565,37 @@ struct tsens_priv { struct regmap_field *rf[MAX_REGFIELDS]; struct tsens_context ctx; - const struct tsens_features *feat; + struct tsens_features *feat; const struct reg_field *fields; const struct tsens_ops *ops; struct dentry *debug_root; struct dentry *debug; - struct tsens_sensor sensor[0]; + struct tsens_sensor sensor[]; }; char *qfprom_read(struct device *dev, const char *cname); void compute_intercept_slope(struct tsens_priv *priv, u32 *pt1, u32 *pt2, u32 mode); int init_common(struct tsens_priv *priv); -int get_temp_tsens_valid(struct tsens_sensor *s, int *temp); -int get_temp_common(struct tsens_sensor *s, int *temp); +int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp); +int get_temp_common(const struct tsens_sensor *s, int *temp); int tsens_enable_irq(struct tsens_priv *priv); void tsens_disable_irq(struct tsens_priv *priv); int tsens_set_trips(void *_sensor, int low, int high); irqreturn_t tsens_irq_thread(int irq, void *data); +irqreturn_t tsens_critical_irq_thread(int irq, void *data); /* TSENS target */ -extern const struct tsens_plat_data data_8960; +extern struct tsens_plat_data data_8960; /* TSENS v0.1 targets */ -extern const struct tsens_plat_data data_8916, data_8974; +extern struct tsens_plat_data data_8916, data_8974; /* TSENS v1 targets */ -extern const struct tsens_plat_data data_tsens_v1, data_8976; +extern struct tsens_plat_data data_tsens_v1, data_8976; /* TSENS v2 targets */ -extern const struct tsens_plat_data data_8996, data_tsens_v2; +extern struct tsens_plat_data data_8996, data_tsens_v2; #endif /* __QCOM_TSENS_H__ */ diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 874bc46e6c73..028a6bbf75dc 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c @@ -3,12 +3,11 @@ // Copyright 2016 Freescale Semiconductor, Inc. #include <linux/clk.h> -#include <linux/module.h> -#include <linux/platform_device.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/module.h> #include <linux/of.h> -#include <linux/of_address.h> +#include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/sizes.h> #include <linux/thermal.h> @@ -228,6 +227,14 @@ static const struct regmap_access_table qoriq_rd_table = { .n_yes_ranges = ARRAY_SIZE(qoriq_yes_ranges), }; +static void qoriq_tmu_action(void *p) +{ + struct qoriq_tmu_data *data = p; + + regmap_write(data->regmap, REGS_TMR, TMR_DISABLE); + clk_disable_unprepare(data->clk); +} + static int qoriq_tmu_probe(struct platform_device *pdev) { int ret; @@ -278,6 +285,10 @@ static int qoriq_tmu_probe(struct platform_device *pdev) return ret; } + ret = devm_add_action_or_reset(dev, qoriq_tmu_action, data); + if (ret) + return ret; + /* version register offset at: 0xbf8 on both v1 and v2 */ ret = regmap_read(data->regmap, REGS_IPBRR(0), &ver); if (ret) { @@ -290,35 +301,17 @@ static int qoriq_tmu_probe(struct platform_device *pdev) ret = qoriq_tmu_calibration(dev, data); /* TMU calibration */ if (ret < 0) - goto err; + return ret; ret = qoriq_tmu_register_tmu_zone(dev, data); if (ret < 0) { dev_err(dev, "Failed to register sensors\n"); - ret = -ENODEV; - goto err; + return ret; } platform_set_drvdata(pdev, data); return 0; - -err: - clk_disable_unprepare(data->clk); - - return ret; -} - -static int qoriq_tmu_remove(struct platform_device *pdev) -{ - struct qoriq_tmu_data *data = platform_get_drvdata(pdev); - - /* Disable monitoring */ - regmap_write(data->regmap, REGS_TMR, TMR_DISABLE); - - clk_disable_unprepare(data->clk); - - return 0; } static int __maybe_unused qoriq_tmu_suspend(struct device *dev) @@ -365,7 +358,6 @@ static struct platform_driver qoriq_tmu = { .of_match_table = qoriq_tmu_match, }, .probe = qoriq_tmu_probe, - .remove = qoriq_tmu_remove, }; module_platform_driver(qoriq_tmu); diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c index 72877bdc072d..58fe7c1ef00b 100644 --- a/drivers/thermal/rcar_gen3_thermal.c +++ b/drivers/thermal/rcar_gen3_thermal.c @@ -81,8 +81,6 @@ struct rcar_gen3_thermal_tsc { void __iomem *base; struct thermal_zone_device *zone; struct equation_coefs coef; - int low; - int high; int tj_t; int id; /* thermal channel id */ }; @@ -204,12 +202,14 @@ static int rcar_gen3_thermal_mcelsius_to_temp(struct rcar_gen3_thermal_tsc *tsc, return INT_FIXPT(val); } -static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high) +static int rcar_gen3_thermal_update_range(struct rcar_gen3_thermal_tsc *tsc) { - struct rcar_gen3_thermal_tsc *tsc = devdata; + int temperature, low, high; + + rcar_gen3_thermal_get_temp(tsc, &temperature); - low = clamp_val(low, -40000, 120000); - high = clamp_val(high, -40000, 120000); + low = temperature - MCELSIUS(1); + high = temperature + MCELSIUS(1); rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP1, rcar_gen3_thermal_mcelsius_to_temp(tsc, low)); @@ -217,15 +217,11 @@ static int rcar_gen3_thermal_set_trips(void *devdata, int low, int high) rcar_gen3_thermal_write(tsc, REG_GEN3_IRQTEMP2, rcar_gen3_thermal_mcelsius_to_temp(tsc, high)); - tsc->low = low; - tsc->high = high; - return 0; } static const struct thermal_zone_of_device_ops rcar_gen3_tz_of_ops = { .get_temp = rcar_gen3_thermal_get_temp, - .set_trips = rcar_gen3_thermal_set_trips, }; static void rcar_thermal_irq_set(struct rcar_gen3_thermal_priv *priv, bool on) @@ -246,9 +242,11 @@ static irqreturn_t rcar_gen3_thermal_irq(int irq, void *data) for (i = 0; i < priv->num_tscs; i++) { status = rcar_gen3_thermal_read(priv->tscs[i], REG_GEN3_IRQSTR); rcar_gen3_thermal_write(priv->tscs[i], REG_GEN3_IRQSTR, 0); - if (status) + if (status) { + rcar_gen3_thermal_update_range(priv->tscs[i]); thermal_zone_device_update(priv->tscs[i]->zone, THERMAL_EVENT_UNSPECIFIED); + } } return IRQ_HANDLED; @@ -325,6 +323,10 @@ static const struct of_device_id rcar_gen3_thermal_dt_ids[] = { .data = &rcar_gen3_ths_tj_1_m3_w, }, { + .compatible = "renesas,r8a77961-thermal", + .data = &rcar_gen3_ths_tj_1_m3_w, + }, + { .compatible = "renesas,r8a77965-thermal", .data = &rcar_gen3_ths_tj_1, }, @@ -446,14 +448,15 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev) goto error_unregister; ret = devm_add_action_or_reset(dev, rcar_gen3_hwmon_action, zone); - if (ret) { + if (ret) goto error_unregister; - } ret = of_thermal_get_ntrips(tsc->zone); if (ret < 0) goto error_unregister; + rcar_gen3_thermal_update_range(tsc); + dev_info(dev, "TSC%d: Loaded %d trip points\n", i, ret); } @@ -492,7 +495,7 @@ static int __maybe_unused rcar_gen3_thermal_resume(struct device *dev) struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i]; priv->thermal_init(tsc); - rcar_gen3_thermal_set_trips(tsc, tsc->low, tsc->high); + rcar_gen3_thermal_update_range(tsc); } rcar_thermal_irq_set(priv, true); diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 8f1aafa2044e..e0c1f2409035 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -95,7 +95,6 @@ struct rcar_thermal_priv { struct mutex lock; struct list_head list; int id; - u32 ctemp; }; #define rcar_thermal_for_each_priv(pos, common) \ @@ -201,7 +200,6 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) struct device *dev = rcar_priv_to_dev(priv); int i; u32 ctemp, old, new; - int ret = -EINVAL; mutex_lock(&priv->lock); @@ -247,37 +245,29 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) ((ctemp - 1) << 0))); } - dev_dbg(dev, "thermal%d %d -> %d\n", priv->id, priv->ctemp, ctemp); - - priv->ctemp = ctemp; - ret = 0; err_out_unlock: mutex_unlock(&priv->lock); - return ret; + + return ctemp ? ctemp : -EINVAL; } static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv, int *temp) { - int tmp; - int ret; - - ret = rcar_thermal_update_temp(priv); - if (ret < 0) - return ret; + int ctemp; - mutex_lock(&priv->lock); - if (priv->chip->ctemp_bands == 1) - tmp = MCELSIUS((priv->ctemp * 5) - 65); - else if (priv->ctemp < 24) - tmp = MCELSIUS(((priv->ctemp * 55) - 720) / 10); - else - tmp = MCELSIUS((priv->ctemp * 5) - 60); - mutex_unlock(&priv->lock); + ctemp = rcar_thermal_update_temp(priv); + if (ctemp < 0) + return ctemp; /* Guaranteed operating range is -45C to 125C. */ - *temp = tmp; + if (priv->chip->ctemp_bands == 1) + *temp = MCELSIUS((ctemp * 5) - 65); + else if (ctemp < 24) + *temp = MCELSIUS(((ctemp * 55) - 720) / 10); + else + *temp = MCELSIUS((ctemp * 5) - 60); return 0; } @@ -387,28 +377,17 @@ static void _rcar_thermal_irq_ctrl(struct rcar_thermal_priv *priv, int enable) static void rcar_thermal_work(struct work_struct *work) { struct rcar_thermal_priv *priv; - int cctemp, nctemp; int ret; priv = container_of(work, struct rcar_thermal_priv, work.work); - ret = rcar_thermal_get_current_temp(priv, &cctemp); - if (ret < 0) - return; - ret = rcar_thermal_update_temp(priv); if (ret < 0) return; rcar_thermal_irq_enable(priv); - ret = rcar_thermal_get_current_temp(priv, &nctemp); - if (ret < 0) - return; - - if (nctemp != cctemp) - thermal_zone_device_update(priv->zone, - THERMAL_EVENT_UNSPECIFIED); + thermal_zone_device_update(priv->zone, THERMAL_EVENT_UNSPECIFIED); } static u32 rcar_thermal_had_changed(struct rcar_thermal_priv *priv, u32 status) @@ -521,8 +500,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); common->base = devm_ioremap_resource(dev, res); - if (IS_ERR(common->base)) - return PTR_ERR(common->base); + if (IS_ERR(common->base)) { + ret = PTR_ERR(common->base); + goto error_unregister; + } idle = 0; /* polling delay is not needed */ } diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index fd4a17812f33..e9a90bc23b11 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1094,7 +1094,9 @@ static int exynos_tmu_probe(struct platform_device *pdev) &exynos_sensor_ops); if (IS_ERR(data->tzd)) { ret = PTR_ERR(data->tzd); - dev_err(&pdev->dev, "Failed to register sensor: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Failed to register sensor: %d\n", + ret); goto err_sclk; } diff --git a/drivers/thermal/sprd_thermal.c b/drivers/thermal/sprd_thermal.c new file mode 100644 index 000000000000..a340374e8c51 --- /dev/null +++ b/drivers/thermal/sprd_thermal.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2020 Spreadtrum Communications Inc. + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/nvmem-consumer.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/thermal.h> + +#define SPRD_THM_CTL 0x0 +#define SPRD_THM_INT_EN 0x4 +#define SPRD_THM_INT_STS 0x8 +#define SPRD_THM_INT_RAW_STS 0xc +#define SPRD_THM_DET_PERIOD 0x10 +#define SPRD_THM_INT_CLR 0x14 +#define SPRD_THM_INT_CLR_ST 0x18 +#define SPRD_THM_MON_PERIOD 0x4c +#define SPRD_THM_MON_CTL 0x50 +#define SPRD_THM_INTERNAL_STS1 0x54 +#define SPRD_THM_RAW_READ_MSK 0x3ff + +#define SPRD_THM_OFFSET(id) ((id) * 0x4) +#define SPRD_THM_TEMP(id) (SPRD_THM_OFFSET(id) + 0x5c) +#define SPRD_THM_THRES(id) (SPRD_THM_OFFSET(id) + 0x2c) + +#define SPRD_THM_SEN(id) BIT((id) + 2) +#define SPRD_THM_SEN_OVERHEAT_EN(id) BIT((id) + 8) +#define SPRD_THM_SEN_OVERHEAT_ALARM_EN(id) BIT((id) + 0) + +/* bits definitions for register THM_CTL */ +#define SPRD_THM_SET_RDY_ST BIT(13) +#define SPRD_THM_SET_RDY BIT(12) +#define SPRD_THM_MON_EN BIT(1) +#define SPRD_THM_EN BIT(0) + +/* bits definitions for register THM_INT_CTL */ +#define SPRD_THM_BIT_INT_EN BIT(26) +#define SPRD_THM_OVERHEAT_EN BIT(25) +#define SPRD_THM_OTP_TRIP_SHIFT 10 + +/* bits definitions for register SPRD_THM_INTERNAL_STS1 */ +#define SPRD_THM_TEMPER_RDY BIT(0) + +#define SPRD_THM_DET_PERIOD_DATA 0x800 +#define SPRD_THM_DET_PERIOD_MASK GENMASK(19, 0) +#define SPRD_THM_MON_MODE 0x7 +#define SPRD_THM_MON_MODE_MASK GENMASK(3, 0) +#define SPRD_THM_MON_PERIOD_DATA 0x10 +#define SPRD_THM_MON_PERIOD_MASK GENMASK(15, 0) +#define SPRD_THM_THRES_MASK GENMASK(19, 0) +#define SPRD_THM_INT_CLR_MASK GENMASK(24, 0) + +/* thermal sensor calibration parameters */ +#define SPRD_THM_TEMP_LOW -40000 +#define SPRD_THM_TEMP_HIGH 120000 +#define SPRD_THM_OTP_TEMP 120000 +#define SPRD_THM_HOT_TEMP 75000 +#define SPRD_THM_RAW_DATA_LOW 0 +#define SPRD_THM_RAW_DATA_HIGH 1000 +#define SPRD_THM_SEN_NUM 8 +#define SPRD_THM_DT_OFFSET 24 +#define SPRD_THM_RATION_OFFSET 17 +#define SPRD_THM_RATION_SIGN 16 + +#define SPRD_THM_RDYST_POLLING_TIME 10 +#define SPRD_THM_RDYST_TIMEOUT 700 +#define SPRD_THM_TEMP_READY_POLL_TIME 10000 +#define SPRD_THM_TEMP_READY_TIMEOUT 600000 +#define SPRD_THM_MAX_SENSOR 8 + +struct sprd_thermal_sensor { + struct thermal_zone_device *tzd; + struct sprd_thermal_data *data; + struct device *dev; + int cal_slope; + int cal_offset; + int id; +}; + +struct sprd_thermal_data { + const struct sprd_thm_variant_data *var_data; + struct sprd_thermal_sensor *sensor[SPRD_THM_MAX_SENSOR]; + struct clk *clk; + void __iomem *base; + u32 ratio_off; + int ratio_sign; + int nr_sensors; +}; + +/* + * The conversion between ADC and temperature is based on linear relationship, + * and use idea_k to specify the slope and ideal_b to specify the offset. + * + * Since different Spreadtrum SoCs have different ideal_k and ideal_b, + * we should save ideal_k and ideal_b in the device data structure. + */ +struct sprd_thm_variant_data { + u32 ideal_k; + u32 ideal_b; +}; + +static const struct sprd_thm_variant_data ums512_data = { + .ideal_k = 262, + .ideal_b = 66400, +}; + +static inline void sprd_thm_update_bits(void __iomem *reg, u32 mask, u32 val) +{ + u32 tmp, orig; + + orig = readl(reg); + tmp = orig & ~mask; + tmp |= val & mask; + writel(tmp, reg); +} + +static int sprd_thm_cal_read(struct device_node *np, const char *cell_id, + u32 *val) +{ + struct nvmem_cell *cell; + void *buf; + size_t len; + + cell = of_nvmem_cell_get(np, cell_id); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + buf = nvmem_cell_read(cell, &len); + nvmem_cell_put(cell); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + if (len > sizeof(u32)) { + kfree(buf); + return -EINVAL; + } + + memcpy(val, buf, len); + + kfree(buf); + return 0; +} + +static int sprd_thm_sensor_calibration(struct device_node *np, + struct sprd_thermal_data *thm, + struct sprd_thermal_sensor *sen) +{ + int ret; + /* + * According to thermal datasheet, the default calibration offset is 64, + * and the default ratio is 1000. + */ + int dt_offset = 64, ratio = 1000; + + ret = sprd_thm_cal_read(np, "sen_delta_cal", &dt_offset); + if (ret) + return ret; + + ratio += thm->ratio_sign * thm->ratio_off; + + /* + * According to the ideal slope K and ideal offset B, combined with + * calibration value of thermal from efuse, then calibrate the real + * slope k and offset b: + * k_cal = (k * ratio) / 1000. + * b_cal = b + (dt_offset - 64) * 500. + */ + sen->cal_slope = (thm->var_data->ideal_k * ratio) / 1000; + sen->cal_offset = thm->var_data->ideal_b + (dt_offset - 128) * 250; + + return 0; +} + +static int sprd_thm_rawdata_to_temp(struct sprd_thermal_sensor *sen, + u32 rawdata) +{ + clamp(rawdata, (u32)SPRD_THM_RAW_DATA_LOW, (u32)SPRD_THM_RAW_DATA_HIGH); + + /* + * According to the thermal datasheet, the formula of converting + * adc value to the temperature value should be: + * T_final = k_cal * x - b_cal. + */ + return sen->cal_slope * rawdata - sen->cal_offset; +} + +static int sprd_thm_temp_to_rawdata(int temp, struct sprd_thermal_sensor *sen) +{ + u32 val; + + clamp(temp, (int)SPRD_THM_TEMP_LOW, (int)SPRD_THM_TEMP_HIGH); + + /* + * According to the thermal datasheet, the formula of converting + * adc value to the temperature value should be: + * T_final = k_cal * x - b_cal. + */ + val = (temp + sen->cal_offset) / sen->cal_slope; + + return clamp(val, val, (u32)(SPRD_THM_RAW_DATA_HIGH - 1)); +} + +static int sprd_thm_read_temp(void *devdata, int *temp) +{ + struct sprd_thermal_sensor *sen = devdata; + u32 data; + + data = readl(sen->data->base + SPRD_THM_TEMP(sen->id)) & + SPRD_THM_RAW_READ_MSK; + + *temp = sprd_thm_rawdata_to_temp(sen, data); + + return 0; +} + +static const struct thermal_zone_of_device_ops sprd_thm_ops = { + .get_temp = sprd_thm_read_temp, +}; + +static int sprd_thm_poll_ready_status(struct sprd_thermal_data *thm) +{ + u32 val; + int ret; + + /* + * Wait for thermal ready status before configuring thermal parameters. + */ + ret = readl_poll_timeout(thm->base + SPRD_THM_CTL, val, + !(val & SPRD_THM_SET_RDY_ST), + SPRD_THM_RDYST_POLLING_TIME, + SPRD_THM_RDYST_TIMEOUT); + if (ret) + return ret; + + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_MON_EN, + SPRD_THM_MON_EN); + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SET_RDY, + SPRD_THM_SET_RDY); + return 0; +} + +static int sprd_thm_wait_temp_ready(struct sprd_thermal_data *thm) +{ + u32 val; + + /* Wait for first temperature data ready before reading temperature */ + return readl_poll_timeout(thm->base + SPRD_THM_INTERNAL_STS1, val, + !(val & SPRD_THM_TEMPER_RDY), + SPRD_THM_TEMP_READY_POLL_TIME, + SPRD_THM_TEMP_READY_TIMEOUT); +} + +static int sprd_thm_set_ready(struct sprd_thermal_data *thm) +{ + int ret; + + ret = sprd_thm_poll_ready_status(thm); + if (ret) + return ret; + + /* + * Clear interrupt status, enable thermal interrupt and enable thermal. + * + * The SPRD thermal controller integrates a hardware interrupt signal, + * which means if the temperature is overheat, it will generate an + * interrupt and notify the event to PMIC automatically to shutdown the + * system. So here we should enable the interrupt bits, though we have + * not registered an irq handler. + */ + writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR); + sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN, + SPRD_THM_BIT_INT_EN, SPRD_THM_BIT_INT_EN); + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, + SPRD_THM_EN, SPRD_THM_EN); + return 0; +} + +static void sprd_thm_sensor_init(struct sprd_thermal_data *thm, + struct sprd_thermal_sensor *sen) +{ + u32 otp_rawdata, hot_rawdata; + + otp_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_OTP_TEMP, sen); + hot_rawdata = sprd_thm_temp_to_rawdata(SPRD_THM_HOT_TEMP, sen); + + /* Enable the sensor' overheat temperature protection interrupt */ + sprd_thm_update_bits(thm->base + SPRD_THM_INT_EN, + SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id), + SPRD_THM_SEN_OVERHEAT_ALARM_EN(sen->id)); + + /* Set the sensor' overheat and hot threshold temperature */ + sprd_thm_update_bits(thm->base + SPRD_THM_THRES(sen->id), + SPRD_THM_THRES_MASK, + (otp_rawdata << SPRD_THM_OTP_TRIP_SHIFT) | + hot_rawdata); + + /* Enable the corresponding sensor */ + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, SPRD_THM_SEN(sen->id), + SPRD_THM_SEN(sen->id)); +} + +static void sprd_thm_para_config(struct sprd_thermal_data *thm) +{ + /* Set the period of two valid temperature detection action */ + sprd_thm_update_bits(thm->base + SPRD_THM_DET_PERIOD, + SPRD_THM_DET_PERIOD_MASK, SPRD_THM_DET_PERIOD); + + /* Set the sensors' monitor mode */ + sprd_thm_update_bits(thm->base + SPRD_THM_MON_CTL, + SPRD_THM_MON_MODE_MASK, SPRD_THM_MON_MODE); + + /* Set the sensors' monitor period */ + sprd_thm_update_bits(thm->base + SPRD_THM_MON_PERIOD, + SPRD_THM_MON_PERIOD_MASK, SPRD_THM_MON_PERIOD); +} + +static void sprd_thm_toggle_sensor(struct sprd_thermal_sensor *sen, bool on) +{ + struct thermal_zone_device *tzd = sen->tzd; + + tzd->ops->set_mode(tzd, + on ? THERMAL_DEVICE_ENABLED : THERMAL_DEVICE_DISABLED); +} + +static int sprd_thm_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *sen_child; + struct sprd_thermal_data *thm; + struct sprd_thermal_sensor *sen; + const struct sprd_thm_variant_data *pdata; + int ret, i; + u32 val; + + pdata = of_device_get_match_data(&pdev->dev); + if (!pdata) { + dev_err(&pdev->dev, "No matching driver data found\n"); + return -EINVAL; + } + + thm = devm_kzalloc(&pdev->dev, sizeof(*thm), GFP_KERNEL); + if (!thm) + return -ENOMEM; + + thm->var_data = pdata; + thm->base = devm_platform_ioremap_resource(pdev, 0); + if (!thm->base) + return -ENOMEM; + + thm->nr_sensors = of_get_child_count(np); + if (thm->nr_sensors == 0 || thm->nr_sensors > SPRD_THM_MAX_SENSOR) { + dev_err(&pdev->dev, "incorrect sensor count\n"); + return -EINVAL; + } + + thm->clk = devm_clk_get(&pdev->dev, "enable"); + if (IS_ERR(thm->clk)) { + dev_err(&pdev->dev, "failed to get enable clock\n"); + return PTR_ERR(thm->clk); + } + + ret = clk_prepare_enable(thm->clk); + if (ret) + return ret; + + sprd_thm_para_config(thm); + + ret = sprd_thm_cal_read(np, "thm_sign_cal", &val); + if (ret) + goto disable_clk; + + if (val > 0) + thm->ratio_sign = -1; + else + thm->ratio_sign = 1; + + ret = sprd_thm_cal_read(np, "thm_ratio_cal", &thm->ratio_off); + if (ret) + goto disable_clk; + + for_each_child_of_node(np, sen_child) { + sen = devm_kzalloc(&pdev->dev, sizeof(*sen), GFP_KERNEL); + if (!sen) { + ret = -ENOMEM; + goto disable_clk; + } + + sen->data = thm; + sen->dev = &pdev->dev; + + ret = of_property_read_u32(sen_child, "reg", &sen->id); + if (ret) { + dev_err(&pdev->dev, "get sensor reg failed"); + goto disable_clk; + } + + ret = sprd_thm_sensor_calibration(sen_child, thm, sen); + if (ret) { + dev_err(&pdev->dev, "efuse cal analysis failed"); + goto disable_clk; + } + + sprd_thm_sensor_init(thm, sen); + + sen->tzd = devm_thermal_zone_of_sensor_register(sen->dev, + sen->id, + sen, + &sprd_thm_ops); + if (IS_ERR(sen->tzd)) { + dev_err(&pdev->dev, "register thermal zone failed %d\n", + sen->id); + ret = PTR_ERR(sen->tzd); + goto disable_clk; + } + + thm->sensor[sen->id] = sen; + } + + ret = sprd_thm_set_ready(thm); + if (ret) + goto disable_clk; + + ret = sprd_thm_wait_temp_ready(thm); + if (ret) + goto disable_clk; + + for (i = 0; i < thm->nr_sensors; i++) + sprd_thm_toggle_sensor(thm->sensor[i], true); + + platform_set_drvdata(pdev, thm); + return 0; + +disable_clk: + clk_disable_unprepare(thm->clk); + return ret; +} + +#ifdef CONFIG_PM_SLEEP +static void sprd_thm_hw_suspend(struct sprd_thermal_data *thm) +{ + int i; + + for (i = 0; i < thm->nr_sensors; i++) { + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, + SPRD_THM_SEN(thm->sensor[i]->id), 0); + } + + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, + SPRD_THM_EN, 0x0); +} + +static int sprd_thm_suspend(struct device *dev) +{ + struct sprd_thermal_data *thm = dev_get_drvdata(dev); + int i; + + for (i = 0; i < thm->nr_sensors; i++) + sprd_thm_toggle_sensor(thm->sensor[i], false); + + sprd_thm_hw_suspend(thm); + clk_disable_unprepare(thm->clk); + + return 0; +} + +static int sprd_thm_hw_resume(struct sprd_thermal_data *thm) +{ + int ret, i; + + for (i = 0; i < thm->nr_sensors; i++) { + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, + SPRD_THM_SEN(thm->sensor[i]->id), + SPRD_THM_SEN(thm->sensor[i]->id)); + } + + ret = sprd_thm_poll_ready_status(thm); + if (ret) + return ret; + + writel(SPRD_THM_INT_CLR_MASK, thm->base + SPRD_THM_INT_CLR); + sprd_thm_update_bits(thm->base + SPRD_THM_CTL, + SPRD_THM_EN, SPRD_THM_EN); + return sprd_thm_wait_temp_ready(thm); +} + +static int sprd_thm_resume(struct device *dev) +{ + struct sprd_thermal_data *thm = dev_get_drvdata(dev); + int ret, i; + + ret = clk_prepare_enable(thm->clk); + if (ret) + return ret; + + ret = sprd_thm_hw_resume(thm); + if (ret) + goto disable_clk; + + for (i = 0; i < thm->nr_sensors; i++) + sprd_thm_toggle_sensor(thm->sensor[i], true); + + return 0; + +disable_clk: + clk_disable_unprepare(thm->clk); + return ret; +} +#endif + +static int sprd_thm_remove(struct platform_device *pdev) +{ + struct sprd_thermal_data *thm = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < thm->nr_sensors; i++) { + sprd_thm_toggle_sensor(thm->sensor[i], false); + devm_thermal_zone_of_sensor_unregister(&pdev->dev, + thm->sensor[i]->tzd); + } + + clk_disable_unprepare(thm->clk); + return 0; +} + +static const struct of_device_id sprd_thermal_of_match[] = { + { .compatible = "sprd,ums512-thermal", .data = &ums512_data }, + { }, +}; + +static const struct dev_pm_ops sprd_thermal_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sprd_thm_suspend, sprd_thm_resume) +}; + +static struct platform_driver sprd_thermal_driver = { + .probe = sprd_thm_probe, + .remove = sprd_thm_remove, + .driver = { + .name = "sprd-thermal", + .pm = &sprd_thermal_pm_ops, + .of_match_table = sprd_thermal_of_match, + }, +}; + +module_platform_driver(sprd_thermal_driver); + +MODULE_AUTHOR("Freeman Liu <freeman.liu@unisoc.com>"); +MODULE_DESCRIPTION("Spreadtrum thermal driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/st/stm_thermal.c b/drivers/thermal/st/stm_thermal.c index ad9e3bf8fdf6..9314e3df6a42 100644 --- a/drivers/thermal/st/stm_thermal.c +++ b/drivers/thermal/st/stm_thermal.c @@ -478,7 +478,8 @@ static int stm_thermal_resume(struct device *dev) } #endif /* CONFIG_PM_SLEEP */ -SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, stm_thermal_suspend, stm_thermal_resume); +static SIMPLE_DEV_PM_OPS(stm_thermal_pm_ops, + stm_thermal_suspend, stm_thermal_resume); static const struct thermal_zone_of_device_ops stm_tz_ops = { .get_temp = stm_thermal_get_temp, diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 2fa78f738568..263b0420fbe4 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/clk.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/types.h> @@ -24,7 +24,6 @@ #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/of_irq.h> -#include <linux/of_gpio.h> #include <linux/io.h> #include "ti-bandgap.h" @@ -743,27 +742,13 @@ exit: static int ti_bandgap_tshut_init(struct ti_bandgap *bgp, struct platform_device *pdev) { - int gpio_nr = bgp->tshut_gpio; int status; - /* Request for gpio_86 line */ - status = gpio_request(gpio_nr, "tshut"); - if (status < 0) { - dev_err(bgp->dev, "Could not request for TSHUT GPIO:%i\n", 86); - return status; - } - status = gpio_direction_input(gpio_nr); - if (status) { - dev_err(bgp->dev, "Cannot set input TSHUT GPIO %d\n", gpio_nr); - return status; - } - - status = request_irq(gpio_to_irq(gpio_nr), ti_bandgap_tshut_irq_handler, + status = request_irq(gpiod_to_irq(bgp->tshut_gpiod), + ti_bandgap_tshut_irq_handler, IRQF_TRIGGER_RISING, "tshut", NULL); - if (status) { - gpio_free(gpio_nr); + if (status) dev_err(bgp->dev, "request irq failed for TSHUT"); - } return 0; } @@ -860,11 +845,10 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev) } while (res); if (TI_BANDGAP_HAS(bgp, TSHUT)) { - bgp->tshut_gpio = of_get_gpio(node, 0); - if (!gpio_is_valid(bgp->tshut_gpio)) { - dev_err(&pdev->dev, "invalid gpio for tshut (%d)\n", - bgp->tshut_gpio); - return ERR_PTR(-EINVAL); + bgp->tshut_gpiod = devm_gpiod_get(&pdev->dev, NULL, GPIOD_IN); + if (IS_ERR(bgp->tshut_gpiod)) { + dev_err(&pdev->dev, "invalid gpio for tshut\n"); + return ERR_CAST(bgp->tshut_gpiod); } } @@ -1046,10 +1030,8 @@ put_clks: put_fclock: clk_put(bgp->fclock); free_irqs: - if (TI_BANDGAP_HAS(bgp, TSHUT)) { - free_irq(gpio_to_irq(bgp->tshut_gpio), NULL); - gpio_free(bgp->tshut_gpio); - } + if (TI_BANDGAP_HAS(bgp, TSHUT)) + free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL); return ret; } @@ -1079,10 +1061,8 @@ int ti_bandgap_remove(struct platform_device *pdev) if (TI_BANDGAP_HAS(bgp, TALERT)) free_irq(bgp->irq, bgp); - if (TI_BANDGAP_HAS(bgp, TSHUT)) { - free_irq(gpio_to_irq(bgp->tshut_gpio), NULL); - gpio_free(bgp->tshut_gpio); - } + if (TI_BANDGAP_HAS(bgp, TSHUT)) + free_irq(gpiod_to_irq(bgp->tshut_gpiod), NULL); return 0; } diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.h b/drivers/thermal/ti-soc-thermal/ti-bandgap.h index bb9b0f7faf99..fce4657e9486 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.h +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.h @@ -13,6 +13,8 @@ #include <linux/types.h> #include <linux/err.h> +struct gpio_desc; + /** * DOC: bandgap driver data structure * ================================== @@ -199,7 +201,7 @@ struct ti_bandgap { struct clk *div_clk; spinlock_t lock; /* shields this struct */ int irq; - int tshut_gpio; + struct gpio_desc *tshut_gpiod; u32 clk_rate; }; diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 769e0a5d1dfc..3c6dd06ec5fb 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c @@ -136,6 +136,21 @@ static int find_console_handle(void) return 1; } +static unsigned int local_ev_byte_channel_send(unsigned int handle, + unsigned int *count, + const char *p) +{ + char buffer[EV_BYTE_CHANNEL_MAX_BYTES]; + unsigned int c = *count; + + if (c < sizeof(buffer)) { + memcpy(buffer, p, c); + memset(&buffer[c], 0, sizeof(buffer) - c); + p = buffer; + } + return ev_byte_channel_send(handle, count, p); +} + /*************************** EARLY CONSOLE DRIVER ***************************/ #ifdef CONFIG_PPC_EARLY_DEBUG_EHV_BC @@ -154,7 +169,7 @@ static void byte_channel_spin_send(const char data) do { count = 1; - ret = ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, + ret = local_ev_byte_channel_send(CONFIG_PPC_EARLY_DEBUG_EHV_BC_HANDLE, &count, &data); } while (ret == EV_EAGAIN); } @@ -221,7 +236,7 @@ static int ehv_bc_console_byte_channel_send(unsigned int handle, const char *s, while (count) { len = min_t(unsigned int, count, EV_BYTE_CHANNEL_MAX_BYTES); do { - ret = ev_byte_channel_send(handle, &len, s); + ret = local_ev_byte_channel_send(handle, &len, s); } while (ret == EV_EAGAIN); count -= len; s += len; @@ -401,7 +416,7 @@ static void ehv_bc_tx_dequeue(struct ehv_bc_data *bc) CIRC_CNT_TO_END(bc->head, bc->tail, BUF_SIZE), EV_BYTE_CHANNEL_MAX_BYTES); - ret = ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); + ret = local_ev_byte_channel_send(bc->handle, &len, bc->buf + bc->tail); /* 'len' is valid only if the return code is 0 or EV_EAGAIN */ if (!ret || (ret == EV_EAGAIN)) diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig new file mode 100644 index 000000000000..7db1460104b7 --- /dev/null +++ b/drivers/vdpa/Kconfig @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: GPL-2.0-only +config VDPA + tristate + help + Enable this module to support vDPA device that uses a + datapath which complies with virtio specifications with + vendor specific control path. + +menuconfig VDPA_MENU + bool "VDPA drivers" + default n + +if VDPA_MENU + +config VDPA_SIM + tristate "vDPA device simulator" + depends on RUNTIME_TESTING_MENU + select VDPA + select VHOST_RING + default n + help + vDPA networking device simulator which loop TX traffic back + to RX. This device is used for testing, prototyping and + development of vDPA. + +config IFCVF + tristate "Intel IFC VF VDPA driver" + depends on PCI_MSI + select VDPA + default n + help + This kernel module can drive Intel IFC VF NIC to offload + virtio dataplane traffic to hardware. + To compile this driver as a module, choose M here: the module will + be called ifcvf. + +endif # VDPA_MENU diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile new file mode 100644 index 000000000000..8bbb686ca7a2 --- /dev/null +++ b/drivers/vdpa/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_VDPA) += vdpa.o +obj-$(CONFIG_VDPA_SIM) += vdpa_sim/ +obj-$(CONFIG_IFCVF) += ifcvf/ diff --git a/drivers/vdpa/ifcvf/Makefile b/drivers/vdpa/ifcvf/Makefile new file mode 100644 index 000000000000..d709915995ab --- /dev/null +++ b/drivers/vdpa/ifcvf/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_IFCVF) += ifcvf.o +ifcvf-$(CONFIG_IFCVF) += ifcvf_main.o ifcvf_base.o diff --git a/drivers/vdpa/ifcvf/ifcvf_base.c b/drivers/vdpa/ifcvf/ifcvf_base.c new file mode 100644 index 000000000000..b61b06ea26d3 --- /dev/null +++ b/drivers/vdpa/ifcvf/ifcvf_base.c @@ -0,0 +1,389 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel IFC VF NIC driver for virtio dataplane offloading + * + * Copyright (C) 2020 Intel Corporation. + * + * Author: Zhu Lingshan <lingshan.zhu@intel.com> + * + */ + +#include "ifcvf_base.h" + +static inline u8 ifc_ioread8(u8 __iomem *addr) +{ + return ioread8(addr); +} +static inline u16 ifc_ioread16 (__le16 __iomem *addr) +{ + return ioread16(addr); +} + +static inline u32 ifc_ioread32(__le32 __iomem *addr) +{ + return ioread32(addr); +} + +static inline void ifc_iowrite8(u8 value, u8 __iomem *addr) +{ + iowrite8(value, addr); +} + +static inline void ifc_iowrite16(u16 value, __le16 __iomem *addr) +{ + iowrite16(value, addr); +} + +static inline void ifc_iowrite32(u32 value, __le32 __iomem *addr) +{ + iowrite32(value, addr); +} + +static void ifc_iowrite64_twopart(u64 val, + __le32 __iomem *lo, __le32 __iomem *hi) +{ + ifc_iowrite32((u32)val, lo); + ifc_iowrite32(val >> 32, hi); +} + +struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw) +{ + return container_of(hw, struct ifcvf_adapter, vf); +} + +static void __iomem *get_cap_addr(struct ifcvf_hw *hw, + struct virtio_pci_cap *cap) +{ + struct ifcvf_adapter *ifcvf; + struct pci_dev *pdev; + u32 length, offset; + u8 bar; + + length = le32_to_cpu(cap->length); + offset = le32_to_cpu(cap->offset); + bar = cap->bar; + + ifcvf= vf_to_adapter(hw); + pdev = ifcvf->pdev; + + if (bar >= IFCVF_PCI_MAX_RESOURCE) { + IFCVF_DBG(pdev, + "Invalid bar number %u to get capabilities\n", bar); + return NULL; + } + + if (offset + length > pci_resource_len(pdev, bar)) { + IFCVF_DBG(pdev, + "offset(%u) + len(%u) overflows bar%u's capability\n", + offset, length, bar); + return NULL; + } + + return hw->base[bar] + offset; +} + +static int ifcvf_read_config_range(struct pci_dev *dev, + uint32_t *val, int size, int where) +{ + int ret, i; + + for (i = 0; i < size; i += 4) { + ret = pci_read_config_dword(dev, where + i, val + i / 4); + if (ret < 0) + return ret; + } + + return 0; +} + +int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) +{ + struct virtio_pci_cap cap; + u16 notify_off; + int ret; + u8 pos; + u32 i; + + ret = pci_read_config_byte(pdev, PCI_CAPABILITY_LIST, &pos); + if (ret < 0) { + IFCVF_ERR(pdev, "Failed to read PCI capability list\n"); + return -EIO; + } + + while (pos) { + ret = ifcvf_read_config_range(pdev, (u32 *)&cap, + sizeof(cap), pos); + if (ret < 0) { + IFCVF_ERR(pdev, + "Failed to get PCI capability at %x\n", pos); + break; + } + + if (cap.cap_vndr != PCI_CAP_ID_VNDR) + goto next; + + switch (cap.cfg_type) { + case VIRTIO_PCI_CAP_COMMON_CFG: + hw->common_cfg = get_cap_addr(hw, &cap); + IFCVF_DBG(pdev, "hw->common_cfg = %p\n", + hw->common_cfg); + break; + case VIRTIO_PCI_CAP_NOTIFY_CFG: + pci_read_config_dword(pdev, pos + sizeof(cap), + &hw->notify_off_multiplier); + hw->notify_bar = cap.bar; + hw->notify_base = get_cap_addr(hw, &cap); + IFCVF_DBG(pdev, "hw->notify_base = %p\n", + hw->notify_base); + break; + case VIRTIO_PCI_CAP_ISR_CFG: + hw->isr = get_cap_addr(hw, &cap); + IFCVF_DBG(pdev, "hw->isr = %p\n", hw->isr); + break; + case VIRTIO_PCI_CAP_DEVICE_CFG: + hw->net_cfg = get_cap_addr(hw, &cap); + IFCVF_DBG(pdev, "hw->net_cfg = %p\n", hw->net_cfg); + break; + } + +next: + pos = cap.cap_next; + } + + if (hw->common_cfg == NULL || hw->notify_base == NULL || + hw->isr == NULL || hw->net_cfg == NULL) { + IFCVF_ERR(pdev, "Incomplete PCI capabilities\n"); + return -EIO; + } + + for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { + ifc_iowrite16(i, &hw->common_cfg->queue_select); + notify_off = ifc_ioread16(&hw->common_cfg->queue_notify_off); + hw->vring[i].notify_addr = hw->notify_base + + notify_off * hw->notify_off_multiplier; + } + + hw->lm_cfg = hw->base[IFCVF_LM_BAR]; + + IFCVF_DBG(pdev, + "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n", + hw->common_cfg, hw->notify_base, hw->isr, + hw->net_cfg, hw->notify_off_multiplier); + + return 0; +} + +u8 ifcvf_get_status(struct ifcvf_hw *hw) +{ + return ifc_ioread8(&hw->common_cfg->device_status); +} + +void ifcvf_set_status(struct ifcvf_hw *hw, u8 status) +{ + ifc_iowrite8(status, &hw->common_cfg->device_status); +} + +void ifcvf_reset(struct ifcvf_hw *hw) +{ + ifcvf_set_status(hw, 0); + /* flush set_status, make sure VF is stopped, reset */ + ifcvf_get_status(hw); +} + +static void ifcvf_add_status(struct ifcvf_hw *hw, u8 status) +{ + if (status != 0) + status |= ifcvf_get_status(hw); + + ifcvf_set_status(hw, status); + ifcvf_get_status(hw); +} + +u64 ifcvf_get_features(struct ifcvf_hw *hw) +{ + struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; + u32 features_lo, features_hi; + + ifc_iowrite32(0, &cfg->device_feature_select); + features_lo = ifc_ioread32(&cfg->device_feature); + + ifc_iowrite32(1, &cfg->device_feature_select); + features_hi = ifc_ioread32(&cfg->device_feature); + + return ((u64)features_hi << 32) | features_lo; +} + +void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset, + void *dst, int length) +{ + u8 old_gen, new_gen, *p; + int i; + + WARN_ON(offset + length > sizeof(struct virtio_net_config)); + do { + old_gen = ifc_ioread8(&hw->common_cfg->config_generation); + p = dst; + for (i = 0; i < length; i++) + *p++ = ifc_ioread8(hw->net_cfg + offset + i); + + new_gen = ifc_ioread8(&hw->common_cfg->config_generation); + } while (old_gen != new_gen); +} + +void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset, + const void *src, int length) +{ + const u8 *p; + int i; + + p = src; + WARN_ON(offset + length > sizeof(struct virtio_net_config)); + for (i = 0; i < length; i++) + ifc_iowrite8(*p++, hw->net_cfg + offset + i); +} + +static void ifcvf_set_features(struct ifcvf_hw *hw, u64 features) +{ + struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; + + ifc_iowrite32(0, &cfg->guest_feature_select); + ifc_iowrite32((u32)features, &cfg->guest_feature); + + ifc_iowrite32(1, &cfg->guest_feature_select); + ifc_iowrite32(features >> 32, &cfg->guest_feature); +} + +static int ifcvf_config_features(struct ifcvf_hw *hw) +{ + struct ifcvf_adapter *ifcvf; + + ifcvf = vf_to_adapter(hw); + ifcvf_set_features(hw, hw->req_features); + ifcvf_add_status(hw, VIRTIO_CONFIG_S_FEATURES_OK); + + if (!(ifcvf_get_status(hw) & VIRTIO_CONFIG_S_FEATURES_OK)) { + IFCVF_ERR(ifcvf->pdev, "Failed to set FEATURES_OK status\n"); + return -EIO; + } + + return 0; +} + +u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) +{ + struct ifcvf_lm_cfg __iomem *ifcvf_lm; + void __iomem *avail_idx_addr; + u16 last_avail_idx; + u32 q_pair_id; + + ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; + q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2); + avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; + last_avail_idx = ifc_ioread16(avail_idx_addr); + + return last_avail_idx; +} + +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num) +{ + struct ifcvf_lm_cfg __iomem *ifcvf_lm; + void __iomem *avail_idx_addr; + u32 q_pair_id; + + ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; + q_pair_id = qid / (IFCVF_MAX_QUEUE_PAIRS * 2); + avail_idx_addr = &ifcvf_lm->vring_lm_cfg[q_pair_id].idx_addr[qid % 2]; + hw->vring[qid].last_avail_idx = num; + ifc_iowrite16(num, avail_idx_addr); + + return 0; +} + +static int ifcvf_hw_enable(struct ifcvf_hw *hw) +{ + struct ifcvf_lm_cfg __iomem *ifcvf_lm; + struct virtio_pci_common_cfg __iomem *cfg; + struct ifcvf_adapter *ifcvf; + u32 i; + + ifcvf_lm = (struct ifcvf_lm_cfg __iomem *)hw->lm_cfg; + ifcvf = vf_to_adapter(hw); + cfg = hw->common_cfg; + ifc_iowrite16(IFCVF_MSI_CONFIG_OFF, &cfg->msix_config); + + if (ifc_ioread16(&cfg->msix_config) == VIRTIO_MSI_NO_VECTOR) { + IFCVF_ERR(ifcvf->pdev, "No msix vector for device config\n"); + return -EINVAL; + } + + for (i = 0; i < hw->nr_vring; i++) { + if (!hw->vring[i].ready) + break; + + ifc_iowrite16(i, &cfg->queue_select); + ifc_iowrite64_twopart(hw->vring[i].desc, &cfg->queue_desc_lo, + &cfg->queue_desc_hi); + ifc_iowrite64_twopart(hw->vring[i].avail, &cfg->queue_avail_lo, + &cfg->queue_avail_hi); + ifc_iowrite64_twopart(hw->vring[i].used, &cfg->queue_used_lo, + &cfg->queue_used_hi); + ifc_iowrite16(hw->vring[i].size, &cfg->queue_size); + ifc_iowrite16(i + IFCVF_MSI_QUEUE_OFF, &cfg->queue_msix_vector); + + if (ifc_ioread16(&cfg->queue_msix_vector) == + VIRTIO_MSI_NO_VECTOR) { + IFCVF_ERR(ifcvf->pdev, + "No msix vector for queue %u\n", i); + return -EINVAL; + } + + ifcvf_set_vq_state(hw, i, hw->vring[i].last_avail_idx); + ifc_iowrite16(1, &cfg->queue_enable); + } + + return 0; +} + +static void ifcvf_hw_disable(struct ifcvf_hw *hw) +{ + struct virtio_pci_common_cfg __iomem *cfg; + u32 i; + + cfg = hw->common_cfg; + ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->msix_config); + + for (i = 0; i < hw->nr_vring; i++) { + ifc_iowrite16(i, &cfg->queue_select); + ifc_iowrite16(VIRTIO_MSI_NO_VECTOR, &cfg->queue_msix_vector); + } + + ifc_ioread16(&cfg->queue_msix_vector); +} + +int ifcvf_start_hw(struct ifcvf_hw *hw) +{ + ifcvf_reset(hw); + ifcvf_add_status(hw, VIRTIO_CONFIG_S_ACKNOWLEDGE); + ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER); + + if (ifcvf_config_features(hw) < 0) + return -EINVAL; + + if (ifcvf_hw_enable(hw) < 0) + return -EINVAL; + + ifcvf_add_status(hw, VIRTIO_CONFIG_S_DRIVER_OK); + + return 0; +} + +void ifcvf_stop_hw(struct ifcvf_hw *hw) +{ + ifcvf_hw_disable(hw); + ifcvf_reset(hw); +} + +void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid) +{ + ifc_iowrite16(qid, hw->vring[qid].notify_addr); +} diff --git a/drivers/vdpa/ifcvf/ifcvf_base.h b/drivers/vdpa/ifcvf/ifcvf_base.h new file mode 100644 index 000000000000..e80307092351 --- /dev/null +++ b/drivers/vdpa/ifcvf/ifcvf_base.h @@ -0,0 +1,118 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Intel IFC VF NIC driver for virtio dataplane offloading + * + * Copyright (C) 2020 Intel Corporation. + * + * Author: Zhu Lingshan <lingshan.zhu@intel.com> + * + */ + +#ifndef _IFCVF_H_ +#define _IFCVF_H_ + +#include <linux/pci.h> +#include <linux/pci_regs.h> +#include <linux/vdpa.h> +#include <uapi/linux/virtio_net.h> +#include <uapi/linux/virtio_config.h> +#include <uapi/linux/virtio_pci.h> + +#define IFCVF_VENDOR_ID 0x1AF4 +#define IFCVF_DEVICE_ID 0x1041 +#define IFCVF_SUBSYS_VENDOR_ID 0x8086 +#define IFCVF_SUBSYS_DEVICE_ID 0x001A + +#define IFCVF_SUPPORTED_FEATURES \ + ((1ULL << VIRTIO_NET_F_MAC) | \ + (1ULL << VIRTIO_F_ANY_LAYOUT) | \ + (1ULL << VIRTIO_F_VERSION_1) | \ + (1ULL << VIRTIO_F_ORDER_PLATFORM) | \ + (1ULL << VIRTIO_F_IOMMU_PLATFORM) | \ + (1ULL << VIRTIO_NET_F_MRG_RXBUF)) + +/* Only one queue pair for now. */ +#define IFCVF_MAX_QUEUE_PAIRS 1 + +#define IFCVF_QUEUE_ALIGNMENT PAGE_SIZE +#define IFCVF_QUEUE_MAX 32768 +#define IFCVF_MSI_CONFIG_OFF 0 +#define IFCVF_MSI_QUEUE_OFF 1 +#define IFCVF_PCI_MAX_RESOURCE 6 + +#define IFCVF_LM_CFG_SIZE 0x40 +#define IFCVF_LM_RING_STATE_OFFSET 0x20 +#define IFCVF_LM_BAR 4 + +#define IFCVF_ERR(pdev, fmt, ...) dev_err(&pdev->dev, fmt, ##__VA_ARGS__) +#define IFCVF_DBG(pdev, fmt, ...) dev_dbg(&pdev->dev, fmt, ##__VA_ARGS__) +#define IFCVF_INFO(pdev, fmt, ...) dev_info(&pdev->dev, fmt, ##__VA_ARGS__) + +#define ifcvf_private_to_vf(adapter) \ + (&((struct ifcvf_adapter *)adapter)->vf) + +#define IFCVF_MAX_INTR (IFCVF_MAX_QUEUE_PAIRS * 2 + 1) + +struct vring_info { + u64 desc; + u64 avail; + u64 used; + u16 size; + u16 last_avail_idx; + bool ready; + void __iomem *notify_addr; + u32 irq; + struct vdpa_callback cb; + char msix_name[256]; +}; + +struct ifcvf_hw { + u8 __iomem *isr; + /* Live migration */ + u8 __iomem *lm_cfg; + u16 nr_vring; + /* Notification bar number */ + u8 notify_bar; + /* Notificaiton bar address */ + void __iomem *notify_base; + u32 notify_off_multiplier; + u64 req_features; + struct virtio_pci_common_cfg __iomem *common_cfg; + void __iomem *net_cfg; + struct vring_info vring[IFCVF_MAX_QUEUE_PAIRS * 2]; + void __iomem * const *base; +}; + +struct ifcvf_adapter { + struct vdpa_device vdpa; + struct pci_dev *pdev; + struct ifcvf_hw vf; +}; + +struct ifcvf_vring_lm_cfg { + u32 idx_addr[2]; + u8 reserved[IFCVF_LM_CFG_SIZE - 8]; +}; + +struct ifcvf_lm_cfg { + u8 reserved[IFCVF_LM_RING_STATE_OFFSET]; + struct ifcvf_vring_lm_cfg vring_lm_cfg[IFCVF_MAX_QUEUE_PAIRS]; +}; + +int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *dev); +int ifcvf_start_hw(struct ifcvf_hw *hw); +void ifcvf_stop_hw(struct ifcvf_hw *hw); +void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid); +void ifcvf_read_net_config(struct ifcvf_hw *hw, u64 offset, + void *dst, int length); +void ifcvf_write_net_config(struct ifcvf_hw *hw, u64 offset, + const void *src, int length); +u8 ifcvf_get_status(struct ifcvf_hw *hw); +void ifcvf_set_status(struct ifcvf_hw *hw, u8 status); +void io_write64_twopart(u64 val, u32 *lo, u32 *hi); +void ifcvf_reset(struct ifcvf_hw *hw); +u64 ifcvf_get_features(struct ifcvf_hw *hw); +u64 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid); +int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u64 num); +struct ifcvf_adapter *vf_to_adapter(struct ifcvf_hw *hw); +#endif /* _IFCVF_H_ */ diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c new file mode 100644 index 000000000000..8d54dc5b08d2 --- /dev/null +++ b/drivers/vdpa/ifcvf/ifcvf_main.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel IFC VF NIC driver for virtio dataplane offloading + * + * Copyright (C) 2020 Intel Corporation. + * + * Author: Zhu Lingshan <lingshan.zhu@intel.com> + * + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/sysfs.h> +#include "ifcvf_base.h" + +#define VERSION_STRING "0.1" +#define DRIVER_AUTHOR "Intel Corporation" +#define IFCVF_DRIVER_NAME "ifcvf" + +static irqreturn_t ifcvf_intr_handler(int irq, void *arg) +{ + struct vring_info *vring = arg; + + if (vring->cb.callback) + return vring->cb.callback(vring->cb.private); + + return IRQ_HANDLED; +} + +static int ifcvf_start_datapath(void *private) +{ + struct ifcvf_hw *vf = ifcvf_private_to_vf(private); + struct ifcvf_adapter *ifcvf; + u8 status; + int ret; + + ifcvf = vf_to_adapter(vf); + vf->nr_vring = IFCVF_MAX_QUEUE_PAIRS * 2; + ret = ifcvf_start_hw(vf); + if (ret < 0) { + status = ifcvf_get_status(vf); + status |= VIRTIO_CONFIG_S_FAILED; + ifcvf_set_status(vf, status); + } + + return ret; +} + +static int ifcvf_stop_datapath(void *private) +{ + struct ifcvf_hw *vf = ifcvf_private_to_vf(private); + int i; + + for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) + vf->vring[i].cb.callback = NULL; + + ifcvf_stop_hw(vf); + + return 0; +} + +static void ifcvf_reset_vring(struct ifcvf_adapter *adapter) +{ + struct ifcvf_hw *vf = ifcvf_private_to_vf(adapter); + int i; + + for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { + vf->vring[i].last_avail_idx = 0; + vf->vring[i].desc = 0; + vf->vring[i].avail = 0; + vf->vring[i].used = 0; + vf->vring[i].ready = 0; + vf->vring[i].cb.callback = NULL; + vf->vring[i].cb.private = NULL; + } + + ifcvf_reset(vf); +} + +static struct ifcvf_adapter *vdpa_to_adapter(struct vdpa_device *vdpa_dev) +{ + return container_of(vdpa_dev, struct ifcvf_adapter, vdpa); +} + +static struct ifcvf_hw *vdpa_to_vf(struct vdpa_device *vdpa_dev) +{ + struct ifcvf_adapter *adapter = vdpa_to_adapter(vdpa_dev); + + return &adapter->vf; +} + +static u64 ifcvf_vdpa_get_features(struct vdpa_device *vdpa_dev) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + u64 features; + + features = ifcvf_get_features(vf) & IFCVF_SUPPORTED_FEATURES; + + return features; +} + +static int ifcvf_vdpa_set_features(struct vdpa_device *vdpa_dev, u64 features) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + vf->req_features = features; + + return 0; +} + +static u8 ifcvf_vdpa_get_status(struct vdpa_device *vdpa_dev) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return ifcvf_get_status(vf); +} + +static void ifcvf_vdpa_set_status(struct vdpa_device *vdpa_dev, u8 status) +{ + struct ifcvf_adapter *adapter; + struct ifcvf_hw *vf; + + vf = vdpa_to_vf(vdpa_dev); + adapter = dev_get_drvdata(vdpa_dev->dev.parent); + + if (status == 0) { + ifcvf_stop_datapath(adapter); + ifcvf_reset_vring(adapter); + return; + } + + if (status & VIRTIO_CONFIG_S_DRIVER_OK) { + if (ifcvf_start_datapath(adapter) < 0) + IFCVF_ERR(adapter->pdev, + "Failed to set ifcvf vdpa status %u\n", + status); + } + + ifcvf_set_status(vf, status); +} + +static u16 ifcvf_vdpa_get_vq_num_max(struct vdpa_device *vdpa_dev) +{ + return IFCVF_QUEUE_MAX; +} + +static u64 ifcvf_vdpa_get_vq_state(struct vdpa_device *vdpa_dev, u16 qid) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return ifcvf_get_vq_state(vf, qid); +} + +static int ifcvf_vdpa_set_vq_state(struct vdpa_device *vdpa_dev, u16 qid, + u64 num) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return ifcvf_set_vq_state(vf, qid, num); +} + +static void ifcvf_vdpa_set_vq_cb(struct vdpa_device *vdpa_dev, u16 qid, + struct vdpa_callback *cb) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + vf->vring[qid].cb = *cb; +} + +static void ifcvf_vdpa_set_vq_ready(struct vdpa_device *vdpa_dev, + u16 qid, bool ready) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + vf->vring[qid].ready = ready; +} + +static bool ifcvf_vdpa_get_vq_ready(struct vdpa_device *vdpa_dev, u16 qid) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return vf->vring[qid].ready; +} + +static void ifcvf_vdpa_set_vq_num(struct vdpa_device *vdpa_dev, u16 qid, + u32 num) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + vf->vring[qid].size = num; +} + +static int ifcvf_vdpa_set_vq_address(struct vdpa_device *vdpa_dev, u16 qid, + u64 desc_area, u64 driver_area, + u64 device_area) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + vf->vring[qid].desc = desc_area; + vf->vring[qid].avail = driver_area; + vf->vring[qid].used = device_area; + + return 0; +} + +static void ifcvf_vdpa_kick_vq(struct vdpa_device *vdpa_dev, u16 qid) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + ifcvf_notify_queue(vf, qid); +} + +static u32 ifcvf_vdpa_get_generation(struct vdpa_device *vdpa_dev) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + return ioread8(&vf->common_cfg->config_generation); +} + +static u32 ifcvf_vdpa_get_device_id(struct vdpa_device *vdpa_dev) +{ + return VIRTIO_ID_NET; +} + +static u32 ifcvf_vdpa_get_vendor_id(struct vdpa_device *vdpa_dev) +{ + return IFCVF_SUBSYS_VENDOR_ID; +} + +static u16 ifcvf_vdpa_get_vq_align(struct vdpa_device *vdpa_dev) +{ + return IFCVF_QUEUE_ALIGNMENT; +} + +static void ifcvf_vdpa_get_config(struct vdpa_device *vdpa_dev, + unsigned int offset, + void *buf, unsigned int len) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + WARN_ON(offset + len > sizeof(struct virtio_net_config)); + ifcvf_read_net_config(vf, offset, buf, len); +} + +static void ifcvf_vdpa_set_config(struct vdpa_device *vdpa_dev, + unsigned int offset, const void *buf, + unsigned int len) +{ + struct ifcvf_hw *vf = vdpa_to_vf(vdpa_dev); + + WARN_ON(offset + len > sizeof(struct virtio_net_config)); + ifcvf_write_net_config(vf, offset, buf, len); +} + +static void ifcvf_vdpa_set_config_cb(struct vdpa_device *vdpa_dev, + struct vdpa_callback *cb) +{ + /* We don't support config interrupt */ +} + +/* + * IFCVF currently does't have on-chip IOMMU, so not + * implemented set_map()/dma_map()/dma_unmap() + */ +static const struct vdpa_config_ops ifc_vdpa_ops = { + .get_features = ifcvf_vdpa_get_features, + .set_features = ifcvf_vdpa_set_features, + .get_status = ifcvf_vdpa_get_status, + .set_status = ifcvf_vdpa_set_status, + .get_vq_num_max = ifcvf_vdpa_get_vq_num_max, + .get_vq_state = ifcvf_vdpa_get_vq_state, + .set_vq_state = ifcvf_vdpa_set_vq_state, + .set_vq_cb = ifcvf_vdpa_set_vq_cb, + .set_vq_ready = ifcvf_vdpa_set_vq_ready, + .get_vq_ready = ifcvf_vdpa_get_vq_ready, + .set_vq_num = ifcvf_vdpa_set_vq_num, + .set_vq_address = ifcvf_vdpa_set_vq_address, + .kick_vq = ifcvf_vdpa_kick_vq, + .get_generation = ifcvf_vdpa_get_generation, + .get_device_id = ifcvf_vdpa_get_device_id, + .get_vendor_id = ifcvf_vdpa_get_vendor_id, + .get_vq_align = ifcvf_vdpa_get_vq_align, + .get_config = ifcvf_vdpa_get_config, + .set_config = ifcvf_vdpa_set_config, + .set_config_cb = ifcvf_vdpa_set_config_cb, +}; + +static int ifcvf_request_irq(struct ifcvf_adapter *adapter) +{ + struct pci_dev *pdev = adapter->pdev; + struct ifcvf_hw *vf = &adapter->vf; + int vector, i, ret, irq; + + + for (i = 0; i < IFCVF_MAX_QUEUE_PAIRS * 2; i++) { + snprintf(vf->vring[i].msix_name, 256, "ifcvf[%s]-%d\n", + pci_name(pdev), i); + vector = i + IFCVF_MSI_QUEUE_OFF; + irq = pci_irq_vector(pdev, vector); + ret = devm_request_irq(&pdev->dev, irq, + ifcvf_intr_handler, 0, + vf->vring[i].msix_name, + &vf->vring[i]); + if (ret) { + IFCVF_ERR(pdev, + "Failed to request irq for vq %d\n", i); + return ret; + } + vf->vring[i].irq = irq; + } + + return 0; +} + +static void ifcvf_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static int ifcvf_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct ifcvf_adapter *adapter; + struct ifcvf_hw *vf; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + IFCVF_ERR(pdev, "Failed to enable device\n"); + return ret; + } + + ret = pcim_iomap_regions(pdev, BIT(0) | BIT(2) | BIT(4), + IFCVF_DRIVER_NAME); + if (ret) { + IFCVF_ERR(pdev, "Failed to request MMIO region\n"); + return ret; + } + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + IFCVF_ERR(pdev, "No usable DMA confiugration\n"); + return ret; + } + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + IFCVF_ERR(pdev, + "No usable coherent DMA confiugration\n"); + return ret; + } + + ret = pci_alloc_irq_vectors(pdev, IFCVF_MAX_INTR, + IFCVF_MAX_INTR, PCI_IRQ_MSIX); + if (ret < 0) { + IFCVF_ERR(pdev, "Failed to alloc irq vectors\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, ifcvf_free_irq_vectors, pdev); + if (ret) { + IFCVF_ERR(pdev, + "Failed for adding devres for freeing irq vectors\n"); + return ret; + } + + adapter = vdpa_alloc_device(struct ifcvf_adapter, vdpa, + dev, &ifc_vdpa_ops); + if (adapter == NULL) { + IFCVF_ERR(pdev, "Failed to allocate vDPA structure"); + return -ENOMEM; + } + + pci_set_master(pdev); + pci_set_drvdata(pdev, adapter); + + vf = &adapter->vf; + vf->base = pcim_iomap_table(pdev); + + adapter->pdev = pdev; + adapter->vdpa.dma_dev = &pdev->dev; + + ret = ifcvf_request_irq(adapter); + if (ret) { + IFCVF_ERR(pdev, "Failed to request MSI-X irq\n"); + goto err; + } + + ret = ifcvf_init_hw(vf, pdev); + if (ret) { + IFCVF_ERR(pdev, "Failed to init IFCVF hw\n"); + goto err; + } + + ret = vdpa_register_device(&adapter->vdpa); + if (ret) { + IFCVF_ERR(pdev, "Failed to register ifcvf to vdpa bus"); + goto err; + } + + return 0; + +err: + put_device(&adapter->vdpa.dev); + return ret; +} + +static void ifcvf_remove(struct pci_dev *pdev) +{ + struct ifcvf_adapter *adapter = pci_get_drvdata(pdev); + + vdpa_unregister_device(&adapter->vdpa); +} + +static struct pci_device_id ifcvf_pci_ids[] = { + { PCI_DEVICE_SUB(IFCVF_VENDOR_ID, + IFCVF_DEVICE_ID, + IFCVF_SUBSYS_VENDOR_ID, + IFCVF_SUBSYS_DEVICE_ID) }, + { 0 }, +}; +MODULE_DEVICE_TABLE(pci, ifcvf_pci_ids); + +static struct pci_driver ifcvf_driver = { + .name = IFCVF_DRIVER_NAME, + .id_table = ifcvf_pci_ids, + .probe = ifcvf_probe, + .remove = ifcvf_remove, +}; + +module_pci_driver(ifcvf_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_VERSION(VERSION_STRING); diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c new file mode 100644 index 000000000000..e9ed6a2b635b --- /dev/null +++ b/drivers/vdpa/vdpa.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * vDPA bus. + * + * Copyright (c) 2020, Red Hat. All rights reserved. + * Author: Jason Wang <jasowang@redhat.com> + * + */ + +#include <linux/module.h> +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/vdpa.h> + +static DEFINE_IDA(vdpa_index_ida); + +static int vdpa_dev_probe(struct device *d) +{ + struct vdpa_device *vdev = dev_to_vdpa(d); + struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); + int ret = 0; + + if (drv && drv->probe) + ret = drv->probe(vdev); + + return ret; +} + +static int vdpa_dev_remove(struct device *d) +{ + struct vdpa_device *vdev = dev_to_vdpa(d); + struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver); + + if (drv && drv->remove) + drv->remove(vdev); + + return 0; +} + +static struct bus_type vdpa_bus = { + .name = "vdpa", + .probe = vdpa_dev_probe, + .remove = vdpa_dev_remove, +}; + +static void vdpa_release_dev(struct device *d) +{ + struct vdpa_device *vdev = dev_to_vdpa(d); + const struct vdpa_config_ops *ops = vdev->config; + + if (ops->free) + ops->free(vdev); + + ida_simple_remove(&vdpa_index_ida, vdev->index); + kfree(vdev); +} + +/** + * __vdpa_alloc_device - allocate and initilaize a vDPA device + * This allows driver to some prepartion after device is + * initialized but before registered. + * @parent: the parent device + * @config: the bus operations that is supported by this device + * @size: size of the parent structure that contains private data + * + * Drvier should use vdap_alloc_device() wrapper macro instead of + * using this directly. + * + * Returns an error when parent/config/dma_dev is not set or fail to get + * ida. + */ +struct vdpa_device *__vdpa_alloc_device(struct device *parent, + const struct vdpa_config_ops *config, + size_t size) +{ + struct vdpa_device *vdev; + int err = -EINVAL; + + if (!config) + goto err; + + if (!!config->dma_map != !!config->dma_unmap) + goto err; + + err = -ENOMEM; + vdev = kzalloc(size, GFP_KERNEL); + if (!vdev) + goto err; + + err = ida_simple_get(&vdpa_index_ida, 0, 0, GFP_KERNEL); + if (err < 0) + goto err_ida; + + vdev->dev.bus = &vdpa_bus; + vdev->dev.parent = parent; + vdev->dev.release = vdpa_release_dev; + vdev->index = err; + vdev->config = config; + + err = dev_set_name(&vdev->dev, "vdpa%u", vdev->index); + if (err) + goto err_name; + + device_initialize(&vdev->dev); + + return vdev; + +err_name: + ida_simple_remove(&vdpa_index_ida, vdev->index); +err_ida: + kfree(vdev); +err: + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(__vdpa_alloc_device); + +/** + * vdpa_register_device - register a vDPA device + * Callers must have a succeed call of vdpa_init_device() before. + * @vdev: the vdpa device to be registered to vDPA bus + * + * Returns an error when fail to add to vDPA bus + */ +int vdpa_register_device(struct vdpa_device *vdev) +{ + return device_add(&vdev->dev); +} +EXPORT_SYMBOL_GPL(vdpa_register_device); + +/** + * vdpa_unregister_device - unregister a vDPA device + * @vdev: the vdpa device to be unregisted from vDPA bus + */ +void vdpa_unregister_device(struct vdpa_device *vdev) +{ + device_unregister(&vdev->dev); +} +EXPORT_SYMBOL_GPL(vdpa_unregister_device); + +/** + * __vdpa_register_driver - register a vDPA device driver + * @drv: the vdpa device driver to be registered + * @owner: module owner of the driver + * + * Returns an err when fail to do the registration + */ +int __vdpa_register_driver(struct vdpa_driver *drv, struct module *owner) +{ + drv->driver.bus = &vdpa_bus; + drv->driver.owner = owner; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(__vdpa_register_driver); + +/** + * vdpa_unregister_driver - unregister a vDPA device driver + * @drv: the vdpa device driver to be unregistered + */ +void vdpa_unregister_driver(struct vdpa_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL_GPL(vdpa_unregister_driver); + +static int vdpa_init(void) +{ + return bus_register(&vdpa_bus); +} + +static void __exit vdpa_exit(void) +{ + bus_unregister(&vdpa_bus); + ida_destroy(&vdpa_index_ida); +} +core_initcall(vdpa_init); +module_exit(vdpa_exit); + +MODULE_AUTHOR("Jason Wang <jasowang@redhat.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/vdpa/vdpa_sim/Makefile b/drivers/vdpa/vdpa_sim/Makefile new file mode 100644 index 000000000000..b40278f65e04 --- /dev/null +++ b/drivers/vdpa/vdpa_sim/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_VDPA_SIM) += vdpa_sim.o diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c new file mode 100644 index 000000000000..6e8a0cf2fdeb --- /dev/null +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -0,0 +1,629 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VDPA networking device simulator. + * + * Copyright (c) 2020, Red Hat Inc. All rights reserved. + * Author: Jason Wang <jasowang@redhat.com> + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/slab.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/uuid.h> +#include <linux/iommu.h> +#include <linux/dma-mapping.h> +#include <linux/sysfs.h> +#include <linux/file.h> +#include <linux/etherdevice.h> +#include <linux/vringh.h> +#include <linux/vdpa.h> +#include <linux/vhost_iotlb.h> +#include <uapi/linux/virtio_config.h> +#include <uapi/linux/virtio_net.h> + +#define DRV_VERSION "0.1" +#define DRV_AUTHOR "Jason Wang <jasowang@redhat.com>" +#define DRV_DESC "vDPA Device Simulator" +#define DRV_LICENSE "GPL v2" + +struct vdpasim_virtqueue { + struct vringh vring; + struct vringh_kiov iov; + unsigned short head; + bool ready; + u64 desc_addr; + u64 device_addr; + u64 driver_addr; + u32 num; + void *private; + irqreturn_t (*cb)(void *data); +}; + +#define VDPASIM_QUEUE_ALIGN PAGE_SIZE +#define VDPASIM_QUEUE_MAX 256 +#define VDPASIM_DEVICE_ID 0x1 +#define VDPASIM_VENDOR_ID 0 +#define VDPASIM_VQ_NUM 0x2 +#define VDPASIM_NAME "vdpasim-netdev" + +static u64 vdpasim_features = (1ULL << VIRTIO_F_ANY_LAYOUT) | + (1ULL << VIRTIO_F_VERSION_1) | + (1ULL << VIRTIO_F_IOMMU_PLATFORM); + +/* State of each vdpasim device */ +struct vdpasim { + struct vdpa_device vdpa; + struct vdpasim_virtqueue vqs[2]; + struct work_struct work; + /* spinlock to synchronize virtqueue state */ + spinlock_t lock; + struct virtio_net_config config; + struct vhost_iotlb *iommu; + void *buffer; + u32 status; + u32 generation; + u64 features; +}; + +static struct vdpasim *vdpasim_dev; + +static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa) +{ + return container_of(vdpa, struct vdpasim, vdpa); +} + +static struct vdpasim *dev_to_sim(struct device *dev) +{ + struct vdpa_device *vdpa = dev_to_vdpa(dev); + + return vdpa_to_sim(vdpa); +} + +static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) +{ + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + int ret; + + ret = vringh_init_iotlb(&vq->vring, vdpasim_features, + VDPASIM_QUEUE_MAX, false, + (struct vring_desc *)(uintptr_t)vq->desc_addr, + (struct vring_avail *) + (uintptr_t)vq->driver_addr, + (struct vring_used *) + (uintptr_t)vq->device_addr); +} + +static void vdpasim_vq_reset(struct vdpasim_virtqueue *vq) +{ + vq->ready = 0; + vq->desc_addr = 0; + vq->driver_addr = 0; + vq->device_addr = 0; + vq->cb = NULL; + vq->private = NULL; + vringh_init_iotlb(&vq->vring, vdpasim_features, VDPASIM_QUEUE_MAX, + false, NULL, NULL, NULL); +} + +static void vdpasim_reset(struct vdpasim *vdpasim) +{ + int i; + + for (i = 0; i < VDPASIM_VQ_NUM; i++) + vdpasim_vq_reset(&vdpasim->vqs[i]); + + vhost_iotlb_reset(vdpasim->iommu); + + vdpasim->features = 0; + vdpasim->status = 0; + ++vdpasim->generation; +} + +static void vdpasim_work(struct work_struct *work) +{ + struct vdpasim *vdpasim = container_of(work, struct + vdpasim, work); + struct vdpasim_virtqueue *txq = &vdpasim->vqs[1]; + struct vdpasim_virtqueue *rxq = &vdpasim->vqs[0]; + size_t read, write, total_write; + int err; + int pkts = 0; + + spin_lock(&vdpasim->lock); + + if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK)) + goto out; + + if (!txq->ready || !rxq->ready) + goto out; + + while (true) { + total_write = 0; + err = vringh_getdesc_iotlb(&txq->vring, &txq->iov, NULL, + &txq->head, GFP_ATOMIC); + if (err <= 0) + break; + + err = vringh_getdesc_iotlb(&rxq->vring, NULL, &rxq->iov, + &rxq->head, GFP_ATOMIC); + if (err <= 0) { + vringh_complete_iotlb(&txq->vring, txq->head, 0); + break; + } + + while (true) { + read = vringh_iov_pull_iotlb(&txq->vring, &txq->iov, + vdpasim->buffer, + PAGE_SIZE); + if (read <= 0) + break; + + write = vringh_iov_push_iotlb(&rxq->vring, &rxq->iov, + vdpasim->buffer, read); + if (write <= 0) + break; + + total_write += write; + } + + /* Make sure data is wrote before advancing index */ + smp_wmb(); + + vringh_complete_iotlb(&txq->vring, txq->head, 0); + vringh_complete_iotlb(&rxq->vring, rxq->head, total_write); + + /* Make sure used is visible before rasing the interrupt. */ + smp_wmb(); + + local_bh_disable(); + if (txq->cb) + txq->cb(txq->private); + if (rxq->cb) + rxq->cb(rxq->private); + local_bh_enable(); + + if (++pkts > 4) { + schedule_work(&vdpasim->work); + goto out; + } + } + +out: + spin_unlock(&vdpasim->lock); +} + +static int dir_to_perm(enum dma_data_direction dir) +{ + int perm = -EFAULT; + + switch (dir) { + case DMA_FROM_DEVICE: + perm = VHOST_MAP_WO; + break; + case DMA_TO_DEVICE: + perm = VHOST_MAP_RO; + break; + case DMA_BIDIRECTIONAL: + perm = VHOST_MAP_RW; + break; + default: + break; + } + + return perm; +} + +static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct vdpasim *vdpasim = dev_to_sim(dev); + struct vhost_iotlb *iommu = vdpasim->iommu; + u64 pa = (page_to_pfn(page) << PAGE_SHIFT) + offset; + int ret, perm = dir_to_perm(dir); + + if (perm < 0) + return DMA_MAPPING_ERROR; + + /* For simplicity, use identical mapping to avoid e.g iova + * allocator. + */ + ret = vhost_iotlb_add_range(iommu, pa, pa + size - 1, + pa, dir_to_perm(dir)); + if (ret) + return DMA_MAPPING_ERROR; + + return (dma_addr_t)(pa); +} + +static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct vdpasim *vdpasim = dev_to_sim(dev); + struct vhost_iotlb *iommu = vdpasim->iommu; + + vhost_iotlb_del_range(iommu, (u64)dma_addr, + (u64)dma_addr + size - 1); +} + +static void *vdpasim_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_addr, gfp_t flag, + unsigned long attrs) +{ + struct vdpasim *vdpasim = dev_to_sim(dev); + struct vhost_iotlb *iommu = vdpasim->iommu; + void *addr = kmalloc(size, flag); + int ret; + + if (!addr) + *dma_addr = DMA_MAPPING_ERROR; + else { + u64 pa = virt_to_phys(addr); + + ret = vhost_iotlb_add_range(iommu, (u64)pa, + (u64)pa + size - 1, + pa, VHOST_MAP_RW); + if (ret) { + *dma_addr = DMA_MAPPING_ERROR; + kfree(addr); + addr = NULL; + } else + *dma_addr = (dma_addr_t)pa; + } + + return addr; +} + +static void vdpasim_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, + unsigned long attrs) +{ + struct vdpasim *vdpasim = dev_to_sim(dev); + struct vhost_iotlb *iommu = vdpasim->iommu; + + vhost_iotlb_del_range(iommu, (u64)dma_addr, + (u64)dma_addr + size - 1); + kfree(phys_to_virt((uintptr_t)dma_addr)); +} + +static const struct dma_map_ops vdpasim_dma_ops = { + .map_page = vdpasim_map_page, + .unmap_page = vdpasim_unmap_page, + .alloc = vdpasim_alloc_coherent, + .free = vdpasim_free_coherent, +}; + +static const struct vdpa_config_ops vdpasim_net_config_ops; + +static struct vdpasim *vdpasim_create(void) +{ + struct virtio_net_config *config; + struct vdpasim *vdpasim; + struct device *dev; + int ret = -ENOMEM; + + vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, + &vdpasim_net_config_ops); + if (!vdpasim) + goto err_alloc; + + INIT_WORK(&vdpasim->work, vdpasim_work); + spin_lock_init(&vdpasim->lock); + + dev = &vdpasim->vdpa.dev; + dev->coherent_dma_mask = DMA_BIT_MASK(64); + set_dma_ops(dev, &vdpasim_dma_ops); + + vdpasim->iommu = vhost_iotlb_alloc(2048, 0); + if (!vdpasim->iommu) + goto err_iommu; + + vdpasim->buffer = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!vdpasim->buffer) + goto err_iommu; + + config = &vdpasim->config; + config->mtu = 1500; + config->status = VIRTIO_NET_S_LINK_UP; + eth_random_addr(config->mac); + + vringh_set_iotlb(&vdpasim->vqs[0].vring, vdpasim->iommu); + vringh_set_iotlb(&vdpasim->vqs[1].vring, vdpasim->iommu); + + vdpasim->vdpa.dma_dev = dev; + ret = vdpa_register_device(&vdpasim->vdpa); + if (ret) + goto err_iommu; + + return vdpasim; + +err_iommu: + put_device(dev); +err_alloc: + return ERR_PTR(ret); +} + +static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx, + u64 desc_area, u64 driver_area, + u64 device_area) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + + vq->desc_addr = desc_area; + vq->driver_addr = driver_area; + vq->device_addr = device_area; + + return 0; +} + +static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + + vq->num = num; +} + +static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + + if (vq->ready) + schedule_work(&vdpasim->work); +} + +static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx, + struct vdpa_callback *cb) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + + vq->cb = cb->callback; + vq->private = cb->private; +} + +static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + + spin_lock(&vdpasim->lock); + vq->ready = ready; + if (vq->ready) + vdpasim_queue_ready(vdpasim, idx); + spin_unlock(&vdpasim->lock); +} + +static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + + return vq->ready; +} + +static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx, u64 state) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + struct vringh *vrh = &vq->vring; + + spin_lock(&vdpasim->lock); + vrh->last_avail_idx = state; + spin_unlock(&vdpasim->lock); + + return 0; +} + +static u64 vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx]; + struct vringh *vrh = &vq->vring; + + return vrh->last_avail_idx; +} + +static u16 vdpasim_get_vq_align(struct vdpa_device *vdpa) +{ + return VDPASIM_QUEUE_ALIGN; +} + +static u64 vdpasim_get_features(struct vdpa_device *vdpa) +{ + return vdpasim_features; +} + +static int vdpasim_set_features(struct vdpa_device *vdpa, u64 features) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + /* DMA mapping must be done by driver */ + if (!(features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) + return -EINVAL; + + vdpasim->features = features & vdpasim_features; + + return 0; +} + +static void vdpasim_set_config_cb(struct vdpa_device *vdpa, + struct vdpa_callback *cb) +{ + /* We don't support config interrupt */ +} + +static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa) +{ + return VDPASIM_QUEUE_MAX; +} + +static u32 vdpasim_get_device_id(struct vdpa_device *vdpa) +{ + return VDPASIM_DEVICE_ID; +} + +static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa) +{ + return VDPASIM_VENDOR_ID; +} + +static u8 vdpasim_get_status(struct vdpa_device *vdpa) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + u8 status; + + spin_lock(&vdpasim->lock); + status = vdpasim->status; + spin_unlock(&vdpasim->lock); + + return vdpasim->status; +} + +static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + spin_lock(&vdpasim->lock); + vdpasim->status = status; + if (status == 0) + vdpasim_reset(vdpasim); + spin_unlock(&vdpasim->lock); +} + +static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset, + void *buf, unsigned int len) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + if (offset + len < sizeof(struct virtio_net_config)) + memcpy(buf, &vdpasim->config + offset, len); +} + +static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset, + const void *buf, unsigned int len) +{ + /* No writable config supportted by vdpasim */ +} + +static u32 vdpasim_get_generation(struct vdpa_device *vdpa) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + return vdpasim->generation; +} + +static int vdpasim_set_map(struct vdpa_device *vdpa, + struct vhost_iotlb *iotlb) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + struct vhost_iotlb_map *map; + u64 start = 0ULL, last = 0ULL - 1; + int ret; + + vhost_iotlb_reset(vdpasim->iommu); + + for (map = vhost_iotlb_itree_first(iotlb, start, last); map; + map = vhost_iotlb_itree_next(map, start, last)) { + ret = vhost_iotlb_add_range(vdpasim->iommu, map->start, + map->last, map->addr, map->perm); + if (ret) + goto err; + } + return 0; + +err: + vhost_iotlb_reset(vdpasim->iommu); + return ret; +} + +static int vdpasim_dma_map(struct vdpa_device *vdpa, u64 iova, u64 size, + u64 pa, u32 perm) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + return vhost_iotlb_add_range(vdpasim->iommu, iova, + iova + size - 1, pa, perm); +} + +static int vdpasim_dma_unmap(struct vdpa_device *vdpa, u64 iova, u64 size) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + vhost_iotlb_del_range(vdpasim->iommu, iova, iova + size - 1); + + return 0; +} + +static void vdpasim_free(struct vdpa_device *vdpa) +{ + struct vdpasim *vdpasim = vdpa_to_sim(vdpa); + + cancel_work_sync(&vdpasim->work); + kfree(vdpasim->buffer); + if (vdpasim->iommu) + vhost_iotlb_free(vdpasim->iommu); +} + +static const struct vdpa_config_ops vdpasim_net_config_ops = { + .set_vq_address = vdpasim_set_vq_address, + .set_vq_num = vdpasim_set_vq_num, + .kick_vq = vdpasim_kick_vq, + .set_vq_cb = vdpasim_set_vq_cb, + .set_vq_ready = vdpasim_set_vq_ready, + .get_vq_ready = vdpasim_get_vq_ready, + .set_vq_state = vdpasim_set_vq_state, + .get_vq_state = vdpasim_get_vq_state, + .get_vq_align = vdpasim_get_vq_align, + .get_features = vdpasim_get_features, + .set_features = vdpasim_set_features, + .set_config_cb = vdpasim_set_config_cb, + .get_vq_num_max = vdpasim_get_vq_num_max, + .get_device_id = vdpasim_get_device_id, + .get_vendor_id = vdpasim_get_vendor_id, + .get_status = vdpasim_get_status, + .set_status = vdpasim_set_status, + .get_config = vdpasim_get_config, + .set_config = vdpasim_set_config, + .get_generation = vdpasim_get_generation, + .set_map = vdpasim_set_map, + .dma_map = vdpasim_dma_map, + .dma_unmap = vdpasim_dma_unmap, + .free = vdpasim_free, +}; + +static int __init vdpasim_dev_init(void) +{ + vdpasim_dev = vdpasim_create(); + + if (!IS_ERR(vdpasim_dev)) + return 0; + + return PTR_ERR(vdpasim_dev); +} + +static void __exit vdpasim_dev_exit(void) +{ + struct vdpa_device *vdpa = &vdpasim_dev->vdpa; + + vdpa_unregister_device(vdpa); +} + +module_init(vdpasim_dev_init) +module_exit(vdpasim_dev_exit) + +MODULE_VERSION(DRV_VERSION); +MODULE_LICENSE(DRV_LICENSE); +MODULE_AUTHOR(DRV_AUTHOR); +MODULE_DESCRIPTION(DRV_DESC); diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 379a02c36e37..6c6b37b5c04e 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -9,7 +9,6 @@ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define dev_fmt pr_fmt #include <linux/device.h> #include <linux/eventfd.h> @@ -54,6 +53,12 @@ module_param(disable_idle_d3, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(disable_idle_d3, "Disable using the PCI D3 low power state for idle, unused devices"); +static bool enable_sriov; +#ifdef CONFIG_PCI_IOV +module_param(enable_sriov, bool, 0644); +MODULE_PARM_DESC(enable_sriov, "Enable support for SR-IOV configuration. Enabling SR-IOV on a PF typically requires support of the userspace PF driver, enabling VFs without such support may result in non-functional VFs or PF."); +#endif + static inline bool vfio_vga_disabled(void) { #ifdef CONFIG_VFIO_PCI_VGA @@ -466,6 +471,44 @@ out: vfio_pci_set_power_state(vdev, PCI_D3hot); } +static struct pci_driver vfio_pci_driver; + +static struct vfio_pci_device *get_pf_vdev(struct vfio_pci_device *vdev, + struct vfio_device **pf_dev) +{ + struct pci_dev *physfn = pci_physfn(vdev->pdev); + + if (!vdev->pdev->is_virtfn) + return NULL; + + *pf_dev = vfio_device_get_from_dev(&physfn->dev); + if (!*pf_dev) + return NULL; + + if (pci_dev_driver(physfn) != &vfio_pci_driver) { + vfio_device_put(*pf_dev); + return NULL; + } + + return vfio_device_data(*pf_dev); +} + +static void vfio_pci_vf_token_user_add(struct vfio_pci_device *vdev, int val) +{ + struct vfio_device *pf_dev; + struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev); + + if (!pf_vdev) + return; + + mutex_lock(&pf_vdev->vf_token->lock); + pf_vdev->vf_token->users += val; + WARN_ON(pf_vdev->vf_token->users < 0); + mutex_unlock(&pf_vdev->vf_token->lock); + + vfio_device_put(pf_dev); +} + static void vfio_pci_release(void *device_data) { struct vfio_pci_device *vdev = device_data; @@ -473,6 +516,7 @@ static void vfio_pci_release(void *device_data) mutex_lock(&vdev->reflck->lock); if (!(--vdev->refcnt)) { + vfio_pci_vf_token_user_add(vdev, -1); vfio_spapr_pci_eeh_release(vdev->pdev); vfio_pci_disable(vdev); } @@ -498,6 +542,7 @@ static int vfio_pci_open(void *device_data) goto error; vfio_spapr_pci_eeh_open(vdev->pdev); + vfio_pci_vf_token_user_add(vdev, 1); } vdev->refcnt++; error: @@ -1140,6 +1185,65 @@ hot_reset_release: return vfio_pci_ioeventfd(vdev, ioeventfd.offset, ioeventfd.data, count, ioeventfd.fd); + } else if (cmd == VFIO_DEVICE_FEATURE) { + struct vfio_device_feature feature; + uuid_t uuid; + + minsz = offsetofend(struct vfio_device_feature, flags); + + if (copy_from_user(&feature, (void __user *)arg, minsz)) + return -EFAULT; + + if (feature.argsz < minsz) + return -EINVAL; + + /* Check unknown flags */ + if (feature.flags & ~(VFIO_DEVICE_FEATURE_MASK | + VFIO_DEVICE_FEATURE_SET | + VFIO_DEVICE_FEATURE_GET | + VFIO_DEVICE_FEATURE_PROBE)) + return -EINVAL; + + /* GET & SET are mutually exclusive except with PROBE */ + if (!(feature.flags & VFIO_DEVICE_FEATURE_PROBE) && + (feature.flags & VFIO_DEVICE_FEATURE_SET) && + (feature.flags & VFIO_DEVICE_FEATURE_GET)) + return -EINVAL; + + switch (feature.flags & VFIO_DEVICE_FEATURE_MASK) { + case VFIO_DEVICE_FEATURE_PCI_VF_TOKEN: + if (!vdev->vf_token) + return -ENOTTY; + + /* + * We do not support GET of the VF Token UUID as this + * could expose the token of the previous device user. + */ + if (feature.flags & VFIO_DEVICE_FEATURE_GET) + return -EINVAL; + + if (feature.flags & VFIO_DEVICE_FEATURE_PROBE) + return 0; + + /* Don't SET unless told to do so */ + if (!(feature.flags & VFIO_DEVICE_FEATURE_SET)) + return -EINVAL; + + if (feature.argsz < minsz + sizeof(uuid)) + return -EINVAL; + + if (copy_from_user(&uuid, (void __user *)(arg + minsz), + sizeof(uuid))) + return -EFAULT; + + mutex_lock(&vdev->vf_token->lock); + uuid_copy(&vdev->vf_token->uuid, &uuid); + mutex_unlock(&vdev->vf_token->lock); + + return 0; + default: + return -ENOTTY; + } } return -ENOTTY; @@ -1278,6 +1382,150 @@ static void vfio_pci_request(void *device_data, unsigned int count) mutex_unlock(&vdev->igate); } +static int vfio_pci_validate_vf_token(struct vfio_pci_device *vdev, + bool vf_token, uuid_t *uuid) +{ + /* + * There's always some degree of trust or collaboration between SR-IOV + * PF and VFs, even if just that the PF hosts the SR-IOV capability and + * can disrupt VFs with a reset, but often the PF has more explicit + * access to deny service to the VF or access data passed through the + * VF. We therefore require an opt-in via a shared VF token (UUID) to + * represent this trust. This both prevents that a VF driver might + * assume the PF driver is a trusted, in-kernel driver, and also that + * a PF driver might be replaced with a rogue driver, unknown to in-use + * VF drivers. + * + * Therefore when presented with a VF, if the PF is a vfio device and + * it is bound to the vfio-pci driver, the user needs to provide a VF + * token to access the device, in the form of appending a vf_token to + * the device name, for example: + * + * "0000:04:10.0 vf_token=bd8d9d2b-5a5f-4f5a-a211-f591514ba1f3" + * + * When presented with a PF which has VFs in use, the user must also + * provide the current VF token to prove collaboration with existing + * VF users. If VFs are not in use, the VF token provided for the PF + * device will act to set the VF token. + * + * If the VF token is provided but unused, an error is generated. + */ + if (!vdev->pdev->is_virtfn && !vdev->vf_token && !vf_token) + return 0; /* No VF token provided or required */ + + if (vdev->pdev->is_virtfn) { + struct vfio_device *pf_dev; + struct vfio_pci_device *pf_vdev = get_pf_vdev(vdev, &pf_dev); + bool match; + + if (!pf_vdev) { + if (!vf_token) + return 0; /* PF is not vfio-pci, no VF token */ + + pci_info_ratelimited(vdev->pdev, + "VF token incorrectly provided, PF not bound to vfio-pci\n"); + return -EINVAL; + } + + if (!vf_token) { + vfio_device_put(pf_dev); + pci_info_ratelimited(vdev->pdev, + "VF token required to access device\n"); + return -EACCES; + } + + mutex_lock(&pf_vdev->vf_token->lock); + match = uuid_equal(uuid, &pf_vdev->vf_token->uuid); + mutex_unlock(&pf_vdev->vf_token->lock); + + vfio_device_put(pf_dev); + + if (!match) { + pci_info_ratelimited(vdev->pdev, + "Incorrect VF token provided for device\n"); + return -EACCES; + } + } else if (vdev->vf_token) { + mutex_lock(&vdev->vf_token->lock); + if (vdev->vf_token->users) { + if (!vf_token) { + mutex_unlock(&vdev->vf_token->lock); + pci_info_ratelimited(vdev->pdev, + "VF token required to access device\n"); + return -EACCES; + } + + if (!uuid_equal(uuid, &vdev->vf_token->uuid)) { + mutex_unlock(&vdev->vf_token->lock); + pci_info_ratelimited(vdev->pdev, + "Incorrect VF token provided for device\n"); + return -EACCES; + } + } else if (vf_token) { + uuid_copy(&vdev->vf_token->uuid, uuid); + } + + mutex_unlock(&vdev->vf_token->lock); + } else if (vf_token) { + pci_info_ratelimited(vdev->pdev, + "VF token incorrectly provided, not a PF or VF\n"); + return -EINVAL; + } + + return 0; +} + +#define VF_TOKEN_ARG "vf_token=" + +static int vfio_pci_match(void *device_data, char *buf) +{ + struct vfio_pci_device *vdev = device_data; + bool vf_token = false; + uuid_t uuid; + int ret; + + if (strncmp(pci_name(vdev->pdev), buf, strlen(pci_name(vdev->pdev)))) + return 0; /* No match */ + + if (strlen(buf) > strlen(pci_name(vdev->pdev))) { + buf += strlen(pci_name(vdev->pdev)); + + if (*buf != ' ') + return 0; /* No match: non-whitespace after name */ + + while (*buf) { + if (*buf == ' ') { + buf++; + continue; + } + + if (!vf_token && !strncmp(buf, VF_TOKEN_ARG, + strlen(VF_TOKEN_ARG))) { + buf += strlen(VF_TOKEN_ARG); + + if (strlen(buf) < UUID_STRING_LEN) + return -EINVAL; + + ret = uuid_parse(buf, &uuid); + if (ret) + return ret; + + vf_token = true; + buf += UUID_STRING_LEN; + } else { + /* Unknown/duplicate option */ + return -EINVAL; + } + } + } + + ret = vfio_pci_validate_vf_token(vdev, vf_token, &uuid); + if (ret) + return ret; + + return 1; /* Match */ +} + static const struct vfio_device_ops vfio_pci_ops = { .name = "vfio-pci", .open = vfio_pci_open, @@ -1287,10 +1535,40 @@ static const struct vfio_device_ops vfio_pci_ops = { .write = vfio_pci_write, .mmap = vfio_pci_mmap, .request = vfio_pci_request, + .match = vfio_pci_match, }; static int vfio_pci_reflck_attach(struct vfio_pci_device *vdev); static void vfio_pci_reflck_put(struct vfio_pci_reflck *reflck); +static struct pci_driver vfio_pci_driver; + +static int vfio_pci_bus_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct vfio_pci_device *vdev = container_of(nb, + struct vfio_pci_device, nb); + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_dev *physfn = pci_physfn(pdev); + + if (action == BUS_NOTIFY_ADD_DEVICE && + pdev->is_virtfn && physfn == vdev->pdev) { + pci_info(vdev->pdev, "Captured SR-IOV VF %s driver_override\n", + pci_name(pdev)); + pdev->driver_override = kasprintf(GFP_KERNEL, "%s", + vfio_pci_ops.name); + } else if (action == BUS_NOTIFY_BOUND_DRIVER && + pdev->is_virtfn && physfn == vdev->pdev) { + struct pci_driver *drv = pci_dev_driver(pdev); + + if (drv && drv != &vfio_pci_driver) + pci_warn(vdev->pdev, + "VF %s bound to driver %s while PF bound to vfio-pci\n", + pci_name(pdev), drv->name); + } + + return 0; +} static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { @@ -1302,12 +1580,12 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -EINVAL; /* - * Prevent binding to PFs with VFs enabled, this too easily allows - * userspace instance with VFs and PFs from the same device, which - * cannot work. Disabling SR-IOV here would initiate removing the - * VFs, which would unbind the driver, which is prone to blocking - * if that VF is also in use by vfio-pci. Just reject these PFs - * and let the user sort it out. + * Prevent binding to PFs with VFs enabled, the VFs might be in use + * by the host or other users. We cannot capture the VFs if they + * already exist, nor can we track VF users. Disabling SR-IOV here + * would initiate removing the VFs, which would unbind the driver, + * which is prone to blocking if that VF is also in use by vfio-pci. + * Just reject these PFs and let the user sort it out. */ if (pci_num_vf(pdev)) { pci_warn(pdev, "Cannot bind to PF with SR-IOV enabled\n"); @@ -1320,8 +1598,8 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); if (!vdev) { - vfio_iommu_group_put(group, &pdev->dev); - return -ENOMEM; + ret = -ENOMEM; + goto out_group_put; } vdev->pdev = pdev; @@ -1332,18 +1610,27 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) INIT_LIST_HEAD(&vdev->ioeventfds_list); ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev); - if (ret) { - vfio_iommu_group_put(group, &pdev->dev); - kfree(vdev); - return ret; - } + if (ret) + goto out_free; ret = vfio_pci_reflck_attach(vdev); - if (ret) { - vfio_del_group_dev(&pdev->dev); - vfio_iommu_group_put(group, &pdev->dev); - kfree(vdev); - return ret; + if (ret) + goto out_del_group_dev; + + if (pdev->is_physfn) { + vdev->vf_token = kzalloc(sizeof(*vdev->vf_token), GFP_KERNEL); + if (!vdev->vf_token) { + ret = -ENOMEM; + goto out_reflck; + } + + mutex_init(&vdev->vf_token->lock); + uuid_gen(&vdev->vf_token->uuid); + + vdev->nb.notifier_call = vfio_pci_bus_notifier; + ret = bus_register_notifier(&pci_bus_type, &vdev->nb); + if (ret) + goto out_vf_token; } if (vfio_pci_is_vga(pdev)) { @@ -1369,16 +1656,39 @@ static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) } return ret; + +out_vf_token: + kfree(vdev->vf_token); +out_reflck: + vfio_pci_reflck_put(vdev->reflck); +out_del_group_dev: + vfio_del_group_dev(&pdev->dev); +out_free: + kfree(vdev); +out_group_put: + vfio_iommu_group_put(group, &pdev->dev); + return ret; } static void vfio_pci_remove(struct pci_dev *pdev) { struct vfio_pci_device *vdev; + pci_disable_sriov(pdev); + vdev = vfio_del_group_dev(&pdev->dev); if (!vdev) return; + if (vdev->vf_token) { + WARN_ON(vdev->vf_token->users); + mutex_destroy(&vdev->vf_token->lock); + kfree(vdev->vf_token); + } + + if (vdev->nb.notifier_call) + bus_unregister_notifier(&pci_bus_type, &vdev->nb); + vfio_pci_reflck_put(vdev->reflck); vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev); @@ -1427,16 +1737,48 @@ static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_CAN_RECOVER; } +static int vfio_pci_sriov_configure(struct pci_dev *pdev, int nr_virtfn) +{ + struct vfio_pci_device *vdev; + struct vfio_device *device; + int ret = 0; + + might_sleep(); + + if (!enable_sriov) + return -ENOENT; + + device = vfio_device_get_from_dev(&pdev->dev); + if (!device) + return -ENODEV; + + vdev = vfio_device_data(device); + if (!vdev) { + vfio_device_put(device); + return -ENODEV; + } + + if (nr_virtfn == 0) + pci_disable_sriov(pdev); + else + ret = pci_enable_sriov(pdev, nr_virtfn); + + vfio_device_put(device); + + return ret < 0 ? ret : nr_virtfn; +} + static const struct pci_error_handlers vfio_err_handlers = { .error_detected = vfio_pci_aer_err_detected, }; static struct pci_driver vfio_pci_driver = { - .name = "vfio-pci", - .id_table = NULL, /* only dynamic ids */ - .probe = vfio_pci_probe, - .remove = vfio_pci_remove, - .err_handler = &vfio_err_handlers, + .name = "vfio-pci", + .id_table = NULL, /* only dynamic ids */ + .probe = vfio_pci_probe, + .remove = vfio_pci_remove, + .sriov_configure = vfio_pci_sriov_configure, + .err_handler = &vfio_err_handlers, }; static DEFINE_MUTEX(reflck_lock); diff --git a/drivers/vfio/pci/vfio_pci_nvlink2.c b/drivers/vfio/pci/vfio_pci_nvlink2.c index df4d96038cd4..ed20d73cc27c 100644 --- a/drivers/vfio/pci/vfio_pci_nvlink2.c +++ b/drivers/vfio/pci/vfio_pci_nvlink2.c @@ -422,8 +422,14 @@ int vfio_pci_ibm_npu2_init(struct vfio_pci_device *vdev) if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", nvlink_index, &mmio_atsd)) { - dev_warn(&vdev->pdev->dev, "No available ATSD found\n"); - mmio_atsd = 0; + if (of_property_read_u64_index(hose->dn, "ibm,mmio-atsd", 0, + &mmio_atsd)) { + dev_warn(&vdev->pdev->dev, "No available ATSD found\n"); + mmio_atsd = 0; + } else { + dev_warn(&vdev->pdev->dev, + "Using fallback ibm,mmio-atsd[0] for ATSD.\n"); + } } if (of_property_read_u64(npu_node, "ibm,device-tgt-addr", &tgt)) { diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 8a2c7607d513..36ec69081ecd 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -12,6 +12,8 @@ #include <linux/pci.h> #include <linux/irqbypass.h> #include <linux/types.h> +#include <linux/uuid.h> +#include <linux/notifier.h> #ifndef VFIO_PCI_PRIVATE_H #define VFIO_PCI_PRIVATE_H @@ -84,6 +86,12 @@ struct vfio_pci_reflck { struct mutex lock; }; +struct vfio_pci_vf_token { + struct mutex lock; + uuid_t uuid; + int users; +}; + struct vfio_pci_device { struct pci_dev *pdev; void __iomem *barmap[PCI_STD_NUM_BARS]; @@ -122,6 +130,8 @@ struct vfio_pci_device { struct list_head dummy_resources_list; struct mutex ioeventfds_lock; struct list_head ioeventfds_list; + struct vfio_pci_vf_token *vf_token; + struct notifier_block nb; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) diff --git a/drivers/vfio/platform/vfio_platform.c b/drivers/vfio/platform/vfio_platform.c index ae1a5eb98620..1e2769010089 100644 --- a/drivers/vfio/platform/vfio_platform.c +++ b/drivers/vfio/platform/vfio_platform.c @@ -44,7 +44,7 @@ static int get_platform_irq(struct vfio_platform_device *vdev, int i) { struct platform_device *pdev = (struct platform_device *) vdev->opaque; - return platform_get_irq(pdev, i); + return platform_get_irq_optional(pdev, i); } static int vfio_platform_probe(struct platform_device *pdev) diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index c8482624ca34..765e0e5d83ed 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -875,11 +875,23 @@ EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group, char *buf) { - struct vfio_device *it, *device = NULL; + struct vfio_device *it, *device = ERR_PTR(-ENODEV); mutex_lock(&group->device_lock); list_for_each_entry(it, &group->device_list, group_next) { - if (!strcmp(dev_name(it->dev), buf)) { + int ret; + + if (it->ops->match) { + ret = it->ops->match(it->device_data, buf); + if (ret < 0) { + device = ERR_PTR(ret); + break; + } + } else { + ret = !strcmp(dev_name(it->dev), buf); + } + + if (ret) { device = it; vfio_device_get(device); break; @@ -1430,8 +1442,8 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) return -EPERM; device = vfio_device_get_from_name(group, buf); - if (!device) - return -ENODEV; + if (IS_ERR(device)) + return PTR_ERR(device); ret = device->ops->open(device->device_data); if (ret) { @@ -1720,6 +1732,44 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep) } EXPORT_SYMBOL_GPL(vfio_group_get_external_user); +/** + * External user API, exported by symbols to be linked dynamically. + * The external user passes in a device pointer + * to verify that: + * - A VFIO group is assiciated with the device; + * - IOMMU is set for the group. + * If both checks passed, vfio_group_get_external_user_from_dev() + * increments the container user counter to prevent the VFIO group + * from disposal before external user exits and returns the pointer + * to the VFIO group. + * + * When the external user finishes using the VFIO group, it calls + * vfio_group_put_external_user() to release the VFIO group and + * decrement the container user counter. + * + * @dev [in] : device + * Return error PTR or pointer to VFIO group. + */ + +struct vfio_group *vfio_group_get_external_user_from_dev(struct device *dev) +{ + struct vfio_group *group; + int ret; + + group = vfio_group_get_from_dev(dev); + if (!group) + return ERR_PTR(-ENODEV); + + ret = vfio_group_add_container_user(group); + if (ret) { + vfio_group_put(group); + return ERR_PTR(ret); + } + + return group; +} +EXPORT_SYMBOL_GPL(vfio_group_get_external_user_from_dev); + void vfio_group_put_external_user(struct vfio_group *group) { vfio_group_try_dissolve_container(group); @@ -1961,6 +2011,146 @@ err_unpin_pages: } EXPORT_SYMBOL(vfio_unpin_pages); +/* + * Pin a set of guest IOVA PFNs and return their associated host PFNs for a + * VFIO group. + * + * The caller needs to call vfio_group_get_external_user() or + * vfio_group_get_external_user_from_dev() prior to calling this interface, + * so as to prevent the VFIO group from disposal in the middle of the call. + * But it can keep the reference to the VFIO group for several calls into + * this interface. + * After finishing using of the VFIO group, the caller needs to release the + * VFIO group by calling vfio_group_put_external_user(). + * + * @group [in] : VFIO group + * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be pinned. + * @npage [in] : count of elements in user_iova_pfn array. + * This count should not be greater + * VFIO_PIN_PAGES_MAX_ENTRIES. + * @prot [in] : protection flags + * @phys_pfn [out] : array of host PFNs + * Return error or number of pages pinned. + */ +int vfio_group_pin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage, + int prot, unsigned long *phys_pfn) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + if (!group || !user_iova_pfn || !phys_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->pin_pages)) + ret = driver->ops->pin_pages(container->iommu_data, + user_iova_pfn, npage, + prot, phys_pfn); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_group_pin_pages); + +/* + * Unpin a set of guest IOVA PFNs for a VFIO group. + * + * The caller needs to call vfio_group_get_external_user() or + * vfio_group_get_external_user_from_dev() prior to calling this interface, + * so as to prevent the VFIO group from disposal in the middle of the call. + * But it can keep the reference to the VFIO group for several calls into + * this interface. + * After finishing using of the VFIO group, the caller needs to release the + * VFIO group by calling vfio_group_put_external_user(). + * + * @group [in] : vfio group + * @user_iova_pfn [in] : array of user/guest IOVA PFNs to be unpinned. + * @npage [in] : count of elements in user_iova_pfn array. + * This count should not be greater than + * VFIO_PIN_PAGES_MAX_ENTRIES. + * Return error or number of pages unpinned. + */ +int vfio_group_unpin_pages(struct vfio_group *group, + unsigned long *user_iova_pfn, int npage) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret; + + if (!group || !user_iova_pfn || !npage) + return -EINVAL; + + if (npage > VFIO_PIN_PAGES_MAX_ENTRIES) + return -E2BIG; + + container = group->container; + driver = container->iommu_driver; + if (likely(driver && driver->ops->unpin_pages)) + ret = driver->ops->unpin_pages(container->iommu_data, + user_iova_pfn, npage); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_group_unpin_pages); + + +/* + * This interface allows the CPUs to perform some sort of virtual DMA on + * behalf of the device. + * + * CPUs read/write from/into a range of IOVAs pointing to user space memory + * into/from a kernel buffer. + * + * As the read/write of user space memory is conducted via the CPUs and is + * not a real device DMA, it is not necessary to pin the user space memory. + * + * The caller needs to call vfio_group_get_external_user() or + * vfio_group_get_external_user_from_dev() prior to calling this interface, + * so as to prevent the VFIO group from disposal in the middle of the call. + * But it can keep the reference to the VFIO group for several calls into + * this interface. + * After finishing using of the VFIO group, the caller needs to release the + * VFIO group by calling vfio_group_put_external_user(). + * + * @group [in] : VFIO group + * @user_iova [in] : base IOVA of a user space buffer + * @data [in] : pointer to kernel buffer + * @len [in] : kernel buffer length + * @write : indicate read or write + * Return error code on failure or 0 on success. + */ +int vfio_dma_rw(struct vfio_group *group, dma_addr_t user_iova, + void *data, size_t len, bool write) +{ + struct vfio_container *container; + struct vfio_iommu_driver *driver; + int ret = 0; + + if (!group || !data || len <= 0) + return -EINVAL; + + container = group->container; + driver = container->iommu_driver; + + if (likely(driver && driver->ops->dma_rw)) + ret = driver->ops->dma_rw(container->iommu_data, + user_iova, data, len, write); + else + ret = -ENOTTY; + + return ret; +} +EXPORT_SYMBOL(vfio_dma_rw); + static int vfio_register_iommu_notifier(struct vfio_group *group, unsigned long *events, struct notifier_block *nb) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index a177bf2c6683..85b32c325282 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -27,6 +27,7 @@ #include <linux/iommu.h> #include <linux/module.h> #include <linux/mm.h> +#include <linux/mmu_context.h> #include <linux/rbtree.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> @@ -1786,7 +1787,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, if (resv_msi) { ret = iommu_get_msi_cookie(domain->domain, resv_msi_base); - if (ret) + if (ret && ret != -ENODEV) goto out_detach; } @@ -2305,6 +2306,80 @@ static int vfio_iommu_type1_unregister_notifier(void *iommu_data, return blocking_notifier_chain_unregister(&iommu->notifier, nb); } +static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu, + dma_addr_t user_iova, void *data, + size_t count, bool write, + size_t *copied) +{ + struct mm_struct *mm; + unsigned long vaddr; + struct vfio_dma *dma; + bool kthread = current->mm == NULL; + size_t offset; + + *copied = 0; + + dma = vfio_find_dma(iommu, user_iova, 1); + if (!dma) + return -EINVAL; + + if ((write && !(dma->prot & IOMMU_WRITE)) || + !(dma->prot & IOMMU_READ)) + return -EPERM; + + mm = get_task_mm(dma->task); + + if (!mm) + return -EPERM; + + if (kthread) + use_mm(mm); + else if (current->mm != mm) + goto out; + + offset = user_iova - dma->iova; + + if (count > dma->size - offset) + count = dma->size - offset; + + vaddr = dma->vaddr + offset; + + if (write) + *copied = __copy_to_user((void __user *)vaddr, data, + count) ? 0 : count; + else + *copied = __copy_from_user(data, (void __user *)vaddr, + count) ? 0 : count; + if (kthread) + unuse_mm(mm); +out: + mmput(mm); + return *copied ? 0 : -EFAULT; +} + +static int vfio_iommu_type1_dma_rw(void *iommu_data, dma_addr_t user_iova, + void *data, size_t count, bool write) +{ + struct vfio_iommu *iommu = iommu_data; + int ret = 0; + size_t done; + + mutex_lock(&iommu->lock); + while (count > 0) { + ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data, + count, write, &done); + if (ret) + break; + + count -= done; + data += done; + user_iova += done; + } + + mutex_unlock(&iommu->lock); + return ret; +} + static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { .name = "vfio-iommu-type1", .owner = THIS_MODULE, @@ -2317,6 +2392,7 @@ static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { .unpin_pages = vfio_iommu_type1_unpin_pages, .register_notifier = vfio_iommu_type1_register_notifier, .unregister_notifier = vfio_iommu_type1_unregister_notifier, + .dma_rw = vfio_iommu_type1_dma_rw, }; static int __init vfio_iommu_type1_init(void) diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig index 3d03ccbd1adc..362b832f5338 100644 --- a/drivers/vhost/Kconfig +++ b/drivers/vhost/Kconfig @@ -1,4 +1,29 @@ # SPDX-License-Identifier: GPL-2.0-only +config VHOST_IOTLB + tristate + help + Generic IOTLB implementation for vhost and vringh. + +config VHOST_RING + tristate + select VHOST_IOTLB + help + This option is selected by any driver which needs to access + the host side of a virtio ring. + +config VHOST + tristate + select VHOST_IOTLB + help + This option is selected by any driver which needs to access + the core of vhost. + +menuconfig VHOST_MENU + bool "VHOST drivers" + default y + +if VHOST_MENU + config VHOST_NET tristate "Host kernel accelerator for virtio net" depends on NET && EVENTFD && (TUN || !TUN) && (TAP || !TAP) @@ -23,8 +48,8 @@ config VHOST_SCSI config VHOST_VSOCK tristate "vhost virtio-vsock driver" depends on VSOCKETS && EVENTFD - select VIRTIO_VSOCKETS_COMMON select VHOST + select VIRTIO_VSOCKETS_COMMON default n ---help--- This kernel module can be loaded in the host kernel to provide AF_VSOCK @@ -34,11 +59,17 @@ config VHOST_VSOCK To compile this driver as a module, choose M here: the module will be called vhost_vsock. -config VHOST - tristate - ---help--- - This option is selected by any driver which needs to access - the core of vhost. +config VHOST_VDPA + tristate "Vhost driver for vDPA-based backend" + depends on EVENTFD + select VHOST + select VDPA + help + This kernel module can be loaded in host kernel to accelerate + guest virtio devices with the vDPA-based backends. + + To compile this driver as a module, choose M here: the module + will be called vhost_vdpa. config VHOST_CROSS_ENDIAN_LEGACY bool "Cross-endian support for vhost" @@ -54,3 +85,5 @@ config VHOST_CROSS_ENDIAN_LEGACY adds some overhead, it is disabled by default. If unsure, say "N". + +endif diff --git a/drivers/vhost/Kconfig.vringh b/drivers/vhost/Kconfig.vringh deleted file mode 100644 index c1fe36a9b8d4..000000000000 --- a/drivers/vhost/Kconfig.vringh +++ /dev/null @@ -1,6 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config VHOST_RING - tristate - ---help--- - This option is selected by any driver which needs to access - the host side of a virtio ring. diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile index 6c6df24f770c..f3e1897cce85 100644 --- a/drivers/vhost/Makefile +++ b/drivers/vhost/Makefile @@ -10,4 +10,10 @@ vhost_vsock-y := vsock.o obj-$(CONFIG_VHOST_RING) += vringh.o +obj-$(CONFIG_VHOST_VDPA) += vhost_vdpa.o +vhost_vdpa-y := vdpa.o + obj-$(CONFIG_VHOST) += vhost.o + +obj-$(CONFIG_VHOST_IOTLB) += vhost_iotlb.o +vhost_iotlb-y := iotlb.o diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c new file mode 100644 index 000000000000..1f0ca6e44410 --- /dev/null +++ b/drivers/vhost/iotlb.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Red Hat, Inc. + * Author: Jason Wang <jasowang@redhat.com> + * + * IOTLB implementation for vhost. + */ +#include <linux/slab.h> +#include <linux/vhost_iotlb.h> +#include <linux/module.h> + +#define MOD_VERSION "0.1" +#define MOD_DESC "VHOST IOTLB" +#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>" +#define MOD_LICENSE "GPL v2" + +#define START(map) ((map)->start) +#define LAST(map) ((map)->last) + +INTERVAL_TREE_DEFINE(struct vhost_iotlb_map, + rb, __u64, __subtree_last, + START, LAST, static inline, vhost_iotlb_itree); + +/** + * vhost_iotlb_map_free - remove a map node and free it + * @iotlb: the IOTLB + * @map: the map that want to be remove and freed + */ +void vhost_iotlb_map_free(struct vhost_iotlb *iotlb, + struct vhost_iotlb_map *map) +{ + vhost_iotlb_itree_remove(map, &iotlb->root); + list_del(&map->link); + kfree(map); + iotlb->nmaps--; +} +EXPORT_SYMBOL_GPL(vhost_iotlb_map_free); + +/** + * vhost_iotlb_add_range - add a new range to vhost IOTLB + * @iotlb: the IOTLB + * @start: start of the IOVA range + * @last: last of IOVA range + * @addr: the address that is mapped to @start + * @perm: access permission of this range + * + * Returns an error last is smaller than start or memory allocation + * fails + */ +int vhost_iotlb_add_range(struct vhost_iotlb *iotlb, + u64 start, u64 last, + u64 addr, unsigned int perm) +{ + struct vhost_iotlb_map *map; + + if (last < start) + return -EFAULT; + + if (iotlb->limit && + iotlb->nmaps == iotlb->limit && + iotlb->flags & VHOST_IOTLB_FLAG_RETIRE) { + map = list_first_entry(&iotlb->list, typeof(*map), link); + vhost_iotlb_map_free(iotlb, map); + } + + map = kmalloc(sizeof(*map), GFP_ATOMIC); + if (!map) + return -ENOMEM; + + map->start = start; + map->size = last - start + 1; + map->last = last; + map->addr = addr; + map->perm = perm; + + iotlb->nmaps++; + vhost_iotlb_itree_insert(map, &iotlb->root); + + INIT_LIST_HEAD(&map->link); + list_add_tail(&map->link, &iotlb->list); + + return 0; +} +EXPORT_SYMBOL_GPL(vhost_iotlb_add_range); + +/** + * vring_iotlb_del_range - delete overlapped ranges from vhost IOTLB + * @iotlb: the IOTLB + * @start: start of the IOVA range + * @last: last of IOVA range + */ +void vhost_iotlb_del_range(struct vhost_iotlb *iotlb, u64 start, u64 last) +{ + struct vhost_iotlb_map *map; + + while ((map = vhost_iotlb_itree_iter_first(&iotlb->root, + start, last))) + vhost_iotlb_map_free(iotlb, map); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_del_range); + +/** + * vhost_iotlb_alloc - add a new vhost IOTLB + * @limit: maximum number of IOTLB entries + * @flags: VHOST_IOTLB_FLAG_XXX + * + * Returns an error is memory allocation fails + */ +struct vhost_iotlb *vhost_iotlb_alloc(unsigned int limit, unsigned int flags) +{ + struct vhost_iotlb *iotlb = kzalloc(sizeof(*iotlb), GFP_KERNEL); + + if (!iotlb) + return NULL; + + iotlb->root = RB_ROOT_CACHED; + iotlb->limit = limit; + iotlb->nmaps = 0; + iotlb->flags = flags; + INIT_LIST_HEAD(&iotlb->list); + + return iotlb; +} +EXPORT_SYMBOL_GPL(vhost_iotlb_alloc); + +/** + * vhost_iotlb_reset - reset vhost IOTLB (free all IOTLB entries) + * @iotlb: the IOTLB to be reset + */ +void vhost_iotlb_reset(struct vhost_iotlb *iotlb) +{ + vhost_iotlb_del_range(iotlb, 0ULL, 0ULL - 1); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_reset); + +/** + * vhost_iotlb_free - reset and free vhost IOTLB + * @iotlb: the IOTLB to be freed + */ +void vhost_iotlb_free(struct vhost_iotlb *iotlb) +{ + if (iotlb) { + vhost_iotlb_reset(iotlb); + kfree(iotlb); + } +} +EXPORT_SYMBOL_GPL(vhost_iotlb_free); + +/** + * vhost_iotlb_itree_first - return the first overlapped range + * @iotlb: the IOTLB + * @start: start of IOVA range + * @end: end of IOVA range + */ +struct vhost_iotlb_map * +vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last) +{ + return vhost_iotlb_itree_iter_first(&iotlb->root, start, last); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first); + +/** + * vhost_iotlb_itree_first - return the next overlapped range + * @iotlb: the IOTLB + * @start: start of IOVA range + * @end: end of IOVA range + */ +struct vhost_iotlb_map * +vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last) +{ + return vhost_iotlb_itree_iter_next(map, start, last); +} +EXPORT_SYMBOL_GPL(vhost_iotlb_itree_next); + +MODULE_VERSION(MOD_VERSION); +MODULE_DESCRIPTION(MOD_DESC); +MODULE_AUTHOR(MOD_AUTHOR); +MODULE_LICENSE(MOD_LICENSE); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 18e205eeb9af..87469d67ede8 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -1324,7 +1324,8 @@ static int vhost_net_open(struct inode *inode, struct file *f) } vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, UIO_MAXIOV + VHOST_NET_BATCH, - VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT); + VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, + NULL); vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); @@ -1586,7 +1587,7 @@ static long vhost_net_reset_owner(struct vhost_net *n) struct socket *tx_sock = NULL; struct socket *rx_sock = NULL; long err; - struct vhost_umem *umem; + struct vhost_iotlb *umem; mutex_lock(&n->dev.mutex); err = vhost_dev_check_owner(&n->dev); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 0b949a14bce3..7653667a8cdc 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1628,7 +1628,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; } vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV, - VHOST_SCSI_WEIGHT, 0); + VHOST_SCSI_WEIGHT, 0, NULL); vhost_scsi_init_inflight(vs, NULL); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c new file mode 100644 index 000000000000..421f02a8530a --- /dev/null +++ b/drivers/vhost/vdpa.c @@ -0,0 +1,883 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018-2020 Intel Corporation. + * Copyright (C) 2020 Red Hat, Inc. + * + * Author: Tiwei Bie <tiwei.bie@intel.com> + * Jason Wang <jasowang@redhat.com> + * + * Thanks Michael S. Tsirkin for the valuable comments and + * suggestions. And thanks to Cunming Liang and Zhihong Wang for all + * their supports. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/cdev.h> +#include <linux/device.h> +#include <linux/iommu.h> +#include <linux/uuid.h> +#include <linux/vdpa.h> +#include <linux/nospec.h> +#include <linux/vhost.h> +#include <linux/virtio_net.h> + +#include "vhost.h" + +enum { + VHOST_VDPA_FEATURES = + (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) | + (1ULL << VIRTIO_F_ANY_LAYOUT) | + (1ULL << VIRTIO_F_VERSION_1) | + (1ULL << VIRTIO_F_IOMMU_PLATFORM) | + (1ULL << VIRTIO_F_RING_PACKED) | + (1ULL << VIRTIO_F_ORDER_PLATFORM) | + (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | + (1ULL << VIRTIO_RING_F_EVENT_IDX), + + VHOST_VDPA_NET_FEATURES = VHOST_VDPA_FEATURES | + (1ULL << VIRTIO_NET_F_CSUM) | + (1ULL << VIRTIO_NET_F_GUEST_CSUM) | + (1ULL << VIRTIO_NET_F_MTU) | + (1ULL << VIRTIO_NET_F_MAC) | + (1ULL << VIRTIO_NET_F_GUEST_TSO4) | + (1ULL << VIRTIO_NET_F_GUEST_TSO6) | + (1ULL << VIRTIO_NET_F_GUEST_ECN) | + (1ULL << VIRTIO_NET_F_GUEST_UFO) | + (1ULL << VIRTIO_NET_F_HOST_TSO4) | + (1ULL << VIRTIO_NET_F_HOST_TSO6) | + (1ULL << VIRTIO_NET_F_HOST_ECN) | + (1ULL << VIRTIO_NET_F_HOST_UFO) | + (1ULL << VIRTIO_NET_F_MRG_RXBUF) | + (1ULL << VIRTIO_NET_F_STATUS) | + (1ULL << VIRTIO_NET_F_SPEED_DUPLEX), +}; + +/* Currently, only network backend w/o multiqueue is supported. */ +#define VHOST_VDPA_VQ_MAX 2 + +#define VHOST_VDPA_DEV_MAX (1U << MINORBITS) + +struct vhost_vdpa { + struct vhost_dev vdev; + struct iommu_domain *domain; + struct vhost_virtqueue *vqs; + struct completion completion; + struct vdpa_device *vdpa; + struct device dev; + struct cdev cdev; + atomic_t opened; + int nvqs; + int virtio_id; + int minor; +}; + +static DEFINE_IDA(vhost_vdpa_ida); + +static dev_t vhost_vdpa_major; + +static const u64 vhost_vdpa_features[] = { + [VIRTIO_ID_NET] = VHOST_VDPA_NET_FEATURES, +}; + +static void handle_vq_kick(struct vhost_work *work) +{ + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev); + const struct vdpa_config_ops *ops = v->vdpa->config; + + ops->kick_vq(v->vdpa, vq - v->vqs); +} + +static irqreturn_t vhost_vdpa_virtqueue_cb(void *private) +{ + struct vhost_virtqueue *vq = private; + struct eventfd_ctx *call_ctx = vq->call_ctx; + + if (call_ctx) + eventfd_signal(call_ctx, 1); + + return IRQ_HANDLED; +} + +static void vhost_vdpa_reset(struct vhost_vdpa *v) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + + ops->set_status(vdpa, 0); +} + +static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u32 device_id; + + device_id = ops->get_device_id(vdpa); + + if (copy_to_user(argp, &device_id, sizeof(device_id))) + return -EFAULT; + + return 0; +} + +static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u8 status; + + status = ops->get_status(vdpa); + + if (copy_to_user(statusp, &status, sizeof(status))) + return -EFAULT; + + return 0; +} + +static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u8 status; + + if (copy_from_user(&status, statusp, sizeof(status))) + return -EFAULT; + + /* + * Userspace shouldn't remove status bits unless reset the + * status to 0. + */ + if (status != 0 && (ops->get_status(vdpa) & ~status) != 0) + return -EINVAL; + + ops->set_status(vdpa, status); + + return 0; +} + +static int vhost_vdpa_config_validate(struct vhost_vdpa *v, + struct vhost_vdpa_config *c) +{ + long size = 0; + + switch (v->virtio_id) { + case VIRTIO_ID_NET: + size = sizeof(struct virtio_net_config); + break; + } + + if (c->len == 0) + return -EINVAL; + + if (c->len > size - c->off) + return -E2BIG; + + return 0; +} + +static long vhost_vdpa_get_config(struct vhost_vdpa *v, + struct vhost_vdpa_config __user *c) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct vhost_vdpa_config config; + unsigned long size = offsetof(struct vhost_vdpa_config, buf); + u8 *buf; + + if (copy_from_user(&config, c, size)) + return -EFAULT; + if (vhost_vdpa_config_validate(v, &config)) + return -EINVAL; + buf = kvzalloc(config.len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ops->get_config(vdpa, config.off, buf, config.len); + + if (copy_to_user(c->buf, buf, config.len)) { + kvfree(buf); + return -EFAULT; + } + + kvfree(buf); + return 0; +} + +static long vhost_vdpa_set_config(struct vhost_vdpa *v, + struct vhost_vdpa_config __user *c) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct vhost_vdpa_config config; + unsigned long size = offsetof(struct vhost_vdpa_config, buf); + u8 *buf; + + if (copy_from_user(&config, c, size)) + return -EFAULT; + if (vhost_vdpa_config_validate(v, &config)) + return -EINVAL; + buf = kvzalloc(config.len, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, c->buf, config.len)) { + kvfree(buf); + return -EFAULT; + } + + ops->set_config(vdpa, config.off, buf, config.len); + + kvfree(buf); + return 0; +} + +static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u64 features; + + features = ops->get_features(vdpa); + features &= vhost_vdpa_features[v->virtio_id]; + + if (copy_to_user(featurep, &features, sizeof(features))) + return -EFAULT; + + return 0; +} + +static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u64 features; + + /* + * It's not allowed to change the features after they have + * been negotiated. + */ + if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK) + return -EBUSY; + + if (copy_from_user(&features, featurep, sizeof(features))) + return -EFAULT; + + if (features & ~vhost_vdpa_features[v->virtio_id]) + return -EINVAL; + + if (ops->set_features(vdpa, features)) + return -EINVAL; + + return 0; +} + +static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + u16 num; + + num = ops->get_vq_num_max(vdpa); + + if (copy_to_user(argp, &num, sizeof(num))) + return -EFAULT; + + return 0; +} + +static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, + void __user *argp) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_callback cb; + struct vhost_virtqueue *vq; + struct vhost_vring_state s; + u8 status; + u32 idx; + long r; + + r = get_user(idx, (u32 __user *)argp); + if (r < 0) + return r; + + if (idx >= v->nvqs) + return -ENOBUFS; + + idx = array_index_nospec(idx, v->nvqs); + vq = &v->vqs[idx]; + + status = ops->get_status(vdpa); + + if (cmd == VHOST_VDPA_SET_VRING_ENABLE) { + if (copy_from_user(&s, argp, sizeof(s))) + return -EFAULT; + ops->set_vq_ready(vdpa, idx, s.num); + return 0; + } + + if (cmd == VHOST_GET_VRING_BASE) + vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx); + + r = vhost_vring_ioctl(&v->vdev, cmd, argp); + if (r) + return r; + + switch (cmd) { + case VHOST_SET_VRING_ADDR: + if (ops->set_vq_address(vdpa, idx, + (u64)(uintptr_t)vq->desc, + (u64)(uintptr_t)vq->avail, + (u64)(uintptr_t)vq->used)) + r = -EINVAL; + break; + + case VHOST_SET_VRING_BASE: + if (ops->set_vq_state(vdpa, idx, vq->last_avail_idx)) + r = -EINVAL; + break; + + case VHOST_SET_VRING_CALL: + if (vq->call_ctx) { + cb.callback = vhost_vdpa_virtqueue_cb; + cb.private = vq; + } else { + cb.callback = NULL; + cb.private = NULL; + } + ops->set_vq_cb(vdpa, idx, &cb); + break; + + case VHOST_SET_VRING_NUM: + ops->set_vq_num(vdpa, idx, vq->num); + break; + } + + return r; +} + +static long vhost_vdpa_unlocked_ioctl(struct file *filep, + unsigned int cmd, unsigned long arg) +{ + struct vhost_vdpa *v = filep->private_data; + struct vhost_dev *d = &v->vdev; + void __user *argp = (void __user *)arg; + long r; + + mutex_lock(&d->mutex); + + switch (cmd) { + case VHOST_VDPA_GET_DEVICE_ID: + r = vhost_vdpa_get_device_id(v, argp); + break; + case VHOST_VDPA_GET_STATUS: + r = vhost_vdpa_get_status(v, argp); + break; + case VHOST_VDPA_SET_STATUS: + r = vhost_vdpa_set_status(v, argp); + break; + case VHOST_VDPA_GET_CONFIG: + r = vhost_vdpa_get_config(v, argp); + break; + case VHOST_VDPA_SET_CONFIG: + r = vhost_vdpa_set_config(v, argp); + break; + case VHOST_GET_FEATURES: + r = vhost_vdpa_get_features(v, argp); + break; + case VHOST_SET_FEATURES: + r = vhost_vdpa_set_features(v, argp); + break; + case VHOST_VDPA_GET_VRING_NUM: + r = vhost_vdpa_get_vring_num(v, argp); + break; + case VHOST_SET_LOG_BASE: + case VHOST_SET_LOG_FD: + r = -ENOIOCTLCMD; + break; + default: + r = vhost_dev_ioctl(&v->vdev, cmd, argp); + if (r == -ENOIOCTLCMD) + r = vhost_vdpa_vring_ioctl(v, cmd, argp); + break; + } + + mutex_unlock(&d->mutex); + return r; +} + +static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last) +{ + struct vhost_dev *dev = &v->vdev; + struct vhost_iotlb *iotlb = dev->iotlb; + struct vhost_iotlb_map *map; + struct page *page; + unsigned long pfn, pinned; + + while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) { + pinned = map->size >> PAGE_SHIFT; + for (pfn = map->addr >> PAGE_SHIFT; + pinned > 0; pfn++, pinned--) { + page = pfn_to_page(pfn); + if (map->perm & VHOST_ACCESS_WO) + set_page_dirty_lock(page); + unpin_user_page(page); + } + atomic64_sub(map->size >> PAGE_SHIFT, &dev->mm->pinned_vm); + vhost_iotlb_map_free(iotlb, map); + } +} + +static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v) +{ + struct vhost_dev *dev = &v->vdev; + + vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1); + kfree(dev->iotlb); + dev->iotlb = NULL; +} + +static int perm_to_iommu_flags(u32 perm) +{ + int flags = 0; + + switch (perm) { + case VHOST_ACCESS_WO: + flags |= IOMMU_WRITE; + break; + case VHOST_ACCESS_RO: + flags |= IOMMU_READ; + break; + case VHOST_ACCESS_RW: + flags |= (IOMMU_WRITE | IOMMU_READ); + break; + default: + WARN(1, "invalidate vhost IOTLB permission\n"); + break; + } + + return flags | IOMMU_CACHE; +} + +static int vhost_vdpa_map(struct vhost_vdpa *v, + u64 iova, u64 size, u64 pa, u32 perm) +{ + struct vhost_dev *dev = &v->vdev; + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + int r = 0; + + r = vhost_iotlb_add_range(dev->iotlb, iova, iova + size - 1, + pa, perm); + if (r) + return r; + + if (ops->dma_map) + r = ops->dma_map(vdpa, iova, size, pa, perm); + else if (ops->set_map) + r = ops->set_map(vdpa, dev->iotlb); + else + r = iommu_map(v->domain, iova, pa, size, + perm_to_iommu_flags(perm)); + + return r; +} + +static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size) +{ + struct vhost_dev *dev = &v->vdev; + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + + vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1); + + if (ops->dma_map) + ops->dma_unmap(vdpa, iova, size); + else if (ops->set_map) + ops->set_map(vdpa, dev->iotlb); + else + iommu_unmap(v->domain, iova, size); +} + +static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v, + struct vhost_iotlb_msg *msg) +{ + struct vhost_dev *dev = &v->vdev; + struct vhost_iotlb *iotlb = dev->iotlb; + struct page **page_list; + unsigned long list_size = PAGE_SIZE / sizeof(struct page *); + unsigned int gup_flags = FOLL_LONGTERM; + unsigned long npages, cur_base, map_pfn, last_pfn = 0; + unsigned long locked, lock_limit, pinned, i; + u64 iova = msg->iova; + int ret = 0; + + if (vhost_iotlb_itree_first(iotlb, msg->iova, + msg->iova + msg->size - 1)) + return -EEXIST; + + page_list = (struct page **) __get_free_page(GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + if (msg->perm & VHOST_ACCESS_WO) + gup_flags |= FOLL_WRITE; + + npages = PAGE_ALIGN(msg->size + (iova & ~PAGE_MASK)) >> PAGE_SHIFT; + if (!npages) + return -EINVAL; + + down_read(&dev->mm->mmap_sem); + + locked = atomic64_add_return(npages, &dev->mm->pinned_vm); + lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; + + if (locked > lock_limit) { + ret = -ENOMEM; + goto out; + } + + cur_base = msg->uaddr & PAGE_MASK; + iova &= PAGE_MASK; + + while (npages) { + pinned = min_t(unsigned long, npages, list_size); + ret = pin_user_pages(cur_base, pinned, + gup_flags, page_list, NULL); + if (ret != pinned) + goto out; + + if (!last_pfn) + map_pfn = page_to_pfn(page_list[0]); + + for (i = 0; i < ret; i++) { + unsigned long this_pfn = page_to_pfn(page_list[i]); + u64 csize; + + if (last_pfn && (this_pfn != last_pfn + 1)) { + /* Pin a contiguous chunk of memory */ + csize = (last_pfn - map_pfn + 1) << PAGE_SHIFT; + if (vhost_vdpa_map(v, iova, csize, + map_pfn << PAGE_SHIFT, + msg->perm)) + goto out; + map_pfn = this_pfn; + iova += csize; + } + + last_pfn = this_pfn; + } + + cur_base += ret << PAGE_SHIFT; + npages -= ret; + } + + /* Pin the rest chunk */ + ret = vhost_vdpa_map(v, iova, (last_pfn - map_pfn + 1) << PAGE_SHIFT, + map_pfn << PAGE_SHIFT, msg->perm); +out: + if (ret) { + vhost_vdpa_unmap(v, msg->iova, msg->size); + atomic64_sub(npages, &dev->mm->pinned_vm); + } + up_read(&dev->mm->mmap_sem); + free_page((unsigned long)page_list); + return ret; +} + +static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, + struct vhost_iotlb_msg *msg) +{ + struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev); + int r = 0; + + r = vhost_dev_check_owner(dev); + if (r) + return r; + + switch (msg->type) { + case VHOST_IOTLB_UPDATE: + r = vhost_vdpa_process_iotlb_update(v, msg); + break; + case VHOST_IOTLB_INVALIDATE: + vhost_vdpa_unmap(v, msg->iova, msg->size); + break; + default: + r = -EINVAL; + break; + } + + return r; +} + +static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb, + struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct vhost_vdpa *v = file->private_data; + struct vhost_dev *dev = &v->vdev; + + return vhost_chr_write_iter(dev, from); +} + +static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v) +{ + struct vdpa_device *vdpa = v->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct device *dma_dev = vdpa_get_dma_dev(vdpa); + struct bus_type *bus; + int ret; + + /* Device want to do DMA by itself */ + if (ops->set_map || ops->dma_map) + return 0; + + bus = dma_dev->bus; + if (!bus) + return -EFAULT; + + if (!iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) + return -ENOTSUPP; + + v->domain = iommu_domain_alloc(bus); + if (!v->domain) + return -EIO; + + ret = iommu_attach_device(v->domain, dma_dev); + if (ret) + goto err_attach; + + return 0; + +err_attach: + iommu_domain_free(v->domain); + return ret; +} + +static void vhost_vdpa_free_domain(struct vhost_vdpa *v) +{ + struct vdpa_device *vdpa = v->vdpa; + struct device *dma_dev = vdpa_get_dma_dev(vdpa); + + if (v->domain) { + iommu_detach_device(v->domain, dma_dev); + iommu_domain_free(v->domain); + } + + v->domain = NULL; +} + +static int vhost_vdpa_open(struct inode *inode, struct file *filep) +{ + struct vhost_vdpa *v; + struct vhost_dev *dev; + struct vhost_virtqueue **vqs; + int nvqs, i, r, opened; + + v = container_of(inode->i_cdev, struct vhost_vdpa, cdev); + if (!v) + return -ENODEV; + + opened = atomic_cmpxchg(&v->opened, 0, 1); + if (opened) + return -EBUSY; + + nvqs = v->nvqs; + vhost_vdpa_reset(v); + + vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL); + if (!vqs) { + r = -ENOMEM; + goto err; + } + + dev = &v->vdev; + for (i = 0; i < nvqs; i++) { + vqs[i] = &v->vqs[i]; + vqs[i]->handle_kick = handle_vq_kick; + } + vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, + vhost_vdpa_process_iotlb_msg); + + dev->iotlb = vhost_iotlb_alloc(0, 0); + if (!dev->iotlb) { + r = -ENOMEM; + goto err_init_iotlb; + } + + r = vhost_vdpa_alloc_domain(v); + if (r) + goto err_init_iotlb; + + filep->private_data = v; + + return 0; + +err_init_iotlb: + vhost_dev_cleanup(&v->vdev); +err: + atomic_dec(&v->opened); + return r; +} + +static int vhost_vdpa_release(struct inode *inode, struct file *filep) +{ + struct vhost_vdpa *v = filep->private_data; + struct vhost_dev *d = &v->vdev; + + mutex_lock(&d->mutex); + filep->private_data = NULL; + vhost_vdpa_reset(v); + vhost_dev_stop(&v->vdev); + vhost_vdpa_iotlb_free(v); + vhost_vdpa_free_domain(v); + vhost_dev_cleanup(&v->vdev); + kfree(v->vdev.vqs); + mutex_unlock(&d->mutex); + + atomic_dec(&v->opened); + complete(&v->completion); + + return 0; +} + +static const struct file_operations vhost_vdpa_fops = { + .owner = THIS_MODULE, + .open = vhost_vdpa_open, + .release = vhost_vdpa_release, + .write_iter = vhost_vdpa_chr_write_iter, + .unlocked_ioctl = vhost_vdpa_unlocked_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +static void vhost_vdpa_release_dev(struct device *device) +{ + struct vhost_vdpa *v = + container_of(device, struct vhost_vdpa, dev); + + ida_simple_remove(&vhost_vdpa_ida, v->minor); + kfree(v->vqs); + kfree(v); +} + +static int vhost_vdpa_probe(struct vdpa_device *vdpa) +{ + const struct vdpa_config_ops *ops = vdpa->config; + struct vhost_vdpa *v; + int minor, nvqs = VHOST_VDPA_VQ_MAX; + int r; + + /* Currently, we only accept the network devices. */ + if (ops->get_device_id(vdpa) != VIRTIO_ID_NET) + return -ENOTSUPP; + + v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL); + if (!v) + return -ENOMEM; + + minor = ida_simple_get(&vhost_vdpa_ida, 0, + VHOST_VDPA_DEV_MAX, GFP_KERNEL); + if (minor < 0) { + kfree(v); + return minor; + } + + atomic_set(&v->opened, 0); + v->minor = minor; + v->vdpa = vdpa; + v->nvqs = nvqs; + v->virtio_id = ops->get_device_id(vdpa); + + device_initialize(&v->dev); + v->dev.release = vhost_vdpa_release_dev; + v->dev.parent = &vdpa->dev; + v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor); + v->vqs = kmalloc_array(nvqs, sizeof(struct vhost_virtqueue), + GFP_KERNEL); + if (!v->vqs) { + r = -ENOMEM; + goto err; + } + + r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor); + if (r) + goto err; + + cdev_init(&v->cdev, &vhost_vdpa_fops); + v->cdev.owner = THIS_MODULE; + + r = cdev_device_add(&v->cdev, &v->dev); + if (r) + goto err; + + init_completion(&v->completion); + vdpa_set_drvdata(vdpa, v); + + return 0; + +err: + put_device(&v->dev); + return r; +} + +static void vhost_vdpa_remove(struct vdpa_device *vdpa) +{ + struct vhost_vdpa *v = vdpa_get_drvdata(vdpa); + int opened; + + cdev_device_del(&v->cdev, &v->dev); + + do { + opened = atomic_cmpxchg(&v->opened, 0, 1); + if (!opened) + break; + wait_for_completion(&v->completion); + } while (1); + + put_device(&v->dev); +} + +static struct vdpa_driver vhost_vdpa_driver = { + .driver = { + .name = "vhost_vdpa", + }, + .probe = vhost_vdpa_probe, + .remove = vhost_vdpa_remove, +}; + +static int __init vhost_vdpa_init(void) +{ + int r; + + r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX, + "vhost-vdpa"); + if (r) + goto err_alloc_chrdev; + + r = vdpa_register_driver(&vhost_vdpa_driver); + if (r) + goto err_vdpa_register_driver; + + return 0; + +err_vdpa_register_driver: + unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX); +err_alloc_chrdev: + return r; +} +module_init(vhost_vdpa_init); + +static void __exit vhost_vdpa_exit(void) +{ + vdpa_unregister_driver(&vhost_vdpa_driver); + unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX); +} +module_exit(vhost_vdpa_exit); + +MODULE_VERSION("0.0.1"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); +MODULE_DESCRIPTION("vDPA-based vhost backend for virtio"); diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index f44340b41494..d450e16c5c25 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c @@ -50,10 +50,6 @@ enum { #define vhost_used_event(vq) ((__virtio16 __user *)&vq->avail->ring[vq->num]) #define vhost_avail_event(vq) ((__virtio16 __user *)&vq->used->ring[vq->num]) -INTERVAL_TREE_DEFINE(struct vhost_umem_node, - rb, __u64, __subtree_last, - START, LAST, static inline, vhost_umem_interval_tree); - #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY static void vhost_disable_cross_endian(struct vhost_virtqueue *vq) { @@ -457,7 +453,9 @@ static size_t vhost_get_desc_size(struct vhost_virtqueue *vq, void vhost_dev_init(struct vhost_dev *dev, struct vhost_virtqueue **vqs, int nvqs, - int iov_limit, int weight, int byte_weight) + int iov_limit, int weight, int byte_weight, + int (*msg_handler)(struct vhost_dev *dev, + struct vhost_iotlb_msg *msg)) { struct vhost_virtqueue *vq; int i; @@ -473,6 +471,7 @@ void vhost_dev_init(struct vhost_dev *dev, dev->iov_limit = iov_limit; dev->weight = weight; dev->byte_weight = byte_weight; + dev->msg_handler = msg_handler; init_llist_head(&dev->work_list); init_waitqueue_head(&dev->wait); INIT_LIST_HEAD(&dev->read_list); @@ -581,21 +580,25 @@ err_mm: } EXPORT_SYMBOL_GPL(vhost_dev_set_owner); -struct vhost_umem *vhost_dev_reset_owner_prepare(void) +static struct vhost_iotlb *iotlb_alloc(void) +{ + return vhost_iotlb_alloc(max_iotlb_entries, + VHOST_IOTLB_FLAG_RETIRE); +} + +struct vhost_iotlb *vhost_dev_reset_owner_prepare(void) { - return kvzalloc(sizeof(struct vhost_umem), GFP_KERNEL); + return iotlb_alloc(); } EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare); /* Caller should have device mutex */ -void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_umem *umem) +void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *umem) { int i; vhost_dev_cleanup(dev); - /* Restore memory to default empty mapping. */ - INIT_LIST_HEAD(&umem->umem_list); dev->umem = umem; /* We don't need VQ locks below since vhost_dev_cleanup makes sure * VQs aren't running. @@ -618,28 +621,6 @@ void vhost_dev_stop(struct vhost_dev *dev) } EXPORT_SYMBOL_GPL(vhost_dev_stop); -static void vhost_umem_free(struct vhost_umem *umem, - struct vhost_umem_node *node) -{ - vhost_umem_interval_tree_remove(node, &umem->umem_tree); - list_del(&node->link); - kfree(node); - umem->numem--; -} - -static void vhost_umem_clean(struct vhost_umem *umem) -{ - struct vhost_umem_node *node, *tmp; - - if (!umem) - return; - - list_for_each_entry_safe(node, tmp, &umem->umem_list, link) - vhost_umem_free(umem, node); - - kvfree(umem); -} - static void vhost_clear_msg(struct vhost_dev *dev) { struct vhost_msg_node *node, *n; @@ -677,9 +658,9 @@ void vhost_dev_cleanup(struct vhost_dev *dev) eventfd_ctx_put(dev->log_ctx); dev->log_ctx = NULL; /* No one will access memory at this point */ - vhost_umem_clean(dev->umem); + vhost_iotlb_free(dev->umem); dev->umem = NULL; - vhost_umem_clean(dev->iotlb); + vhost_iotlb_free(dev->iotlb); dev->iotlb = NULL; vhost_clear_msg(dev); wake_up_interruptible_poll(&dev->wait, EPOLLIN | EPOLLRDNORM); @@ -715,27 +696,26 @@ static bool vhost_overflow(u64 uaddr, u64 size) } /* Caller should have vq mutex and device mutex. */ -static bool vq_memory_access_ok(void __user *log_base, struct vhost_umem *umem, +static bool vq_memory_access_ok(void __user *log_base, struct vhost_iotlb *umem, int log_all) { - struct vhost_umem_node *node; + struct vhost_iotlb_map *map; if (!umem) return false; - list_for_each_entry(node, &umem->umem_list, link) { - unsigned long a = node->userspace_addr; + list_for_each_entry(map, &umem->list, link) { + unsigned long a = map->addr; - if (vhost_overflow(node->userspace_addr, node->size)) + if (vhost_overflow(map->addr, map->size)) return false; - if (!access_ok((void __user *)a, - node->size)) + if (!access_ok((void __user *)a, map->size)) return false; else if (log_all && !log_access_ok(log_base, - node->start, - node->size)) + map->start, + map->size)) return false; } return true; @@ -745,17 +725,17 @@ static inline void __user *vhost_vq_meta_fetch(struct vhost_virtqueue *vq, u64 addr, unsigned int size, int type) { - const struct vhost_umem_node *node = vq->meta_iotlb[type]; + const struct vhost_iotlb_map *map = vq->meta_iotlb[type]; - if (!node) + if (!map) return NULL; - return (void *)(uintptr_t)(node->userspace_addr + addr - node->start); + return (void *)(uintptr_t)(map->addr + addr - map->start); } /* Can we switch to this memory table? */ /* Caller should have device mutex but not vq mutex */ -static bool memory_access_ok(struct vhost_dev *d, struct vhost_umem *umem, +static bool memory_access_ok(struct vhost_dev *d, struct vhost_iotlb *umem, int log_all) { int i; @@ -1020,47 +1000,6 @@ static inline int vhost_get_desc(struct vhost_virtqueue *vq, return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); } -static int vhost_new_umem_range(struct vhost_umem *umem, - u64 start, u64 size, u64 end, - u64 userspace_addr, int perm) -{ - struct vhost_umem_node *tmp, *node; - - if (!size) - return -EFAULT; - - node = kmalloc(sizeof(*node), GFP_ATOMIC); - if (!node) - return -ENOMEM; - - if (umem->numem == max_iotlb_entries) { - tmp = list_first_entry(&umem->umem_list, typeof(*tmp), link); - vhost_umem_free(umem, tmp); - } - - node->start = start; - node->size = size; - node->last = end; - node->userspace_addr = userspace_addr; - node->perm = perm; - INIT_LIST_HEAD(&node->link); - list_add_tail(&node->link, &umem->umem_list); - vhost_umem_interval_tree_insert(node, &umem->umem_tree); - umem->numem++; - - return 0; -} - -static void vhost_del_umem_range(struct vhost_umem *umem, - u64 start, u64 end) -{ - struct vhost_umem_node *node; - - while ((node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, - start, end))) - vhost_umem_free(umem, node); -} - static void vhost_iotlb_notify_vq(struct vhost_dev *d, struct vhost_iotlb_msg *msg) { @@ -1117,9 +1056,9 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, break; } vhost_vq_meta_reset(dev); - if (vhost_new_umem_range(dev->iotlb, msg->iova, msg->size, - msg->iova + msg->size - 1, - msg->uaddr, msg->perm)) { + if (vhost_iotlb_add_range(dev->iotlb, msg->iova, + msg->iova + msg->size - 1, + msg->uaddr, msg->perm)) { ret = -ENOMEM; break; } @@ -1131,8 +1070,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev, break; } vhost_vq_meta_reset(dev); - vhost_del_umem_range(dev->iotlb, msg->iova, - msg->iova + msg->size - 1); + vhost_iotlb_del_range(dev->iotlb, msg->iova, + msg->iova + msg->size - 1); break; default: ret = -EINVAL; @@ -1178,7 +1117,12 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, ret = -EINVAL; goto done; } - if (vhost_process_iotlb_msg(dev, &msg)) { + + if (dev->msg_handler) + ret = dev->msg_handler(dev, &msg); + else + ret = vhost_process_iotlb_msg(dev, &msg); + if (ret) { ret = -EFAULT; goto done; } @@ -1311,44 +1255,42 @@ static bool vq_access_ok(struct vhost_virtqueue *vq, unsigned int num, } static void vhost_vq_meta_update(struct vhost_virtqueue *vq, - const struct vhost_umem_node *node, + const struct vhost_iotlb_map *map, int type) { int access = (type == VHOST_ADDR_USED) ? VHOST_ACCESS_WO : VHOST_ACCESS_RO; - if (likely(node->perm & access)) - vq->meta_iotlb[type] = node; + if (likely(map->perm & access)) + vq->meta_iotlb[type] = map; } static bool iotlb_access_ok(struct vhost_virtqueue *vq, int access, u64 addr, u64 len, int type) { - const struct vhost_umem_node *node; - struct vhost_umem *umem = vq->iotlb; + const struct vhost_iotlb_map *map; + struct vhost_iotlb *umem = vq->iotlb; u64 s = 0, size, orig_addr = addr, last = addr + len - 1; if (vhost_vq_meta_fetch(vq, addr, len, type)) return true; while (len > s) { - node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, - addr, - last); - if (node == NULL || node->start > addr) { + map = vhost_iotlb_itree_first(umem, addr, last); + if (map == NULL || map->start > addr) { vhost_iotlb_miss(vq, addr, access); return false; - } else if (!(node->perm & access)) { + } else if (!(map->perm & access)) { /* Report the possible access violation by * request another translation from userspace. */ return false; } - size = node->size - addr + node->start; + size = map->size - addr + map->start; if (orig_addr == addr && size >= len) - vhost_vq_meta_update(vq, node, type); + vhost_vq_meta_update(vq, map, type); s += size; addr += size; @@ -1364,12 +1306,12 @@ int vq_meta_prefetch(struct vhost_virtqueue *vq) if (!vq->iotlb) return 1; - return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, + return iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->desc, vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && - iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->avail, + iotlb_access_ok(vq, VHOST_MAP_RO, (u64)(uintptr_t)vq->avail, vhost_get_avail_size(vq, num), VHOST_ADDR_AVAIL) && - iotlb_access_ok(vq, VHOST_ACCESS_WO, (u64)(uintptr_t)vq->used, + iotlb_access_ok(vq, VHOST_MAP_WO, (u64)(uintptr_t)vq->used, vhost_get_used_size(vq, num), VHOST_ADDR_USED); } EXPORT_SYMBOL_GPL(vq_meta_prefetch); @@ -1408,25 +1350,11 @@ bool vhost_vq_access_ok(struct vhost_virtqueue *vq) } EXPORT_SYMBOL_GPL(vhost_vq_access_ok); -static struct vhost_umem *vhost_umem_alloc(void) -{ - struct vhost_umem *umem = kvzalloc(sizeof(*umem), GFP_KERNEL); - - if (!umem) - return NULL; - - umem->umem_tree = RB_ROOT_CACHED; - umem->numem = 0; - INIT_LIST_HEAD(&umem->umem_list); - - return umem; -} - static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) { struct vhost_memory mem, *newmem; struct vhost_memory_region *region; - struct vhost_umem *newumem, *oldumem; + struct vhost_iotlb *newumem, *oldumem; unsigned long size = offsetof(struct vhost_memory, regions); int i; @@ -1448,7 +1376,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) return -EFAULT; } - newumem = vhost_umem_alloc(); + newumem = iotlb_alloc(); if (!newumem) { kvfree(newmem); return -ENOMEM; @@ -1457,13 +1385,12 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) for (region = newmem->regions; region < newmem->regions + mem.nregions; region++) { - if (vhost_new_umem_range(newumem, - region->guest_phys_addr, - region->memory_size, - region->guest_phys_addr + - region->memory_size - 1, - region->userspace_addr, - VHOST_ACCESS_RW)) + if (vhost_iotlb_add_range(newumem, + region->guest_phys_addr, + region->guest_phys_addr + + region->memory_size - 1, + region->userspace_addr, + VHOST_MAP_RW)) goto err; } @@ -1481,11 +1408,11 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) } kvfree(newmem); - vhost_umem_clean(oldumem); + vhost_iotlb_free(oldumem); return 0; err: - vhost_umem_clean(newumem); + vhost_iotlb_free(newumem); kvfree(newmem); return -EFAULT; } @@ -1726,10 +1653,10 @@ EXPORT_SYMBOL_GPL(vhost_vring_ioctl); int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) { - struct vhost_umem *niotlb, *oiotlb; + struct vhost_iotlb *niotlb, *oiotlb; int i; - niotlb = vhost_umem_alloc(); + niotlb = iotlb_alloc(); if (!niotlb) return -ENOMEM; @@ -1745,7 +1672,7 @@ int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled) mutex_unlock(&vq->mutex); } - vhost_umem_clean(oiotlb); + vhost_iotlb_free(oiotlb); return 0; } @@ -1875,8 +1802,8 @@ static int log_write(void __user *log_base, static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) { - struct vhost_umem *umem = vq->umem; - struct vhost_umem_node *u; + struct vhost_iotlb *umem = vq->umem; + struct vhost_iotlb_map *u; u64 start, end, l, min; int r; bool hit = false; @@ -1886,16 +1813,15 @@ static int log_write_hva(struct vhost_virtqueue *vq, u64 hva, u64 len) /* More than one GPAs can be mapped into a single HVA. So * iterate all possible umems here to be safe. */ - list_for_each_entry(u, &umem->umem_list, link) { - if (u->userspace_addr > hva - 1 + len || - u->userspace_addr - 1 + u->size < hva) + list_for_each_entry(u, &umem->list, link) { + if (u->addr > hva - 1 + len || + u->addr - 1 + u->size < hva) continue; - start = max(u->userspace_addr, hva); - end = min(u->userspace_addr - 1 + u->size, - hva - 1 + len); + start = max(u->addr, hva); + end = min(u->addr - 1 + u->size, hva - 1 + len); l = end - start + 1; r = log_write(vq->log_base, - u->start + start - u->userspace_addr, + u->start + start - u->addr, l); if (r < 0) return r; @@ -2046,9 +1972,9 @@ EXPORT_SYMBOL_GPL(vhost_vq_init_access); static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, struct iovec iov[], int iov_size, int access) { - const struct vhost_umem_node *node; + const struct vhost_iotlb_map *map; struct vhost_dev *dev = vq->dev; - struct vhost_umem *umem = dev->iotlb ? dev->iotlb : dev->umem; + struct vhost_iotlb *umem = dev->iotlb ? dev->iotlb : dev->umem; struct iovec *_iov; u64 s = 0; int ret = 0; @@ -2060,25 +1986,24 @@ static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len, break; } - node = vhost_umem_interval_tree_iter_first(&umem->umem_tree, - addr, addr + len - 1); - if (node == NULL || node->start > addr) { + map = vhost_iotlb_itree_first(umem, addr, addr + len - 1); + if (map == NULL || map->start > addr) { if (umem != dev->iotlb) { ret = -EFAULT; break; } ret = -EAGAIN; break; - } else if (!(node->perm & access)) { + } else if (!(map->perm & access)) { ret = -EPERM; break; } _iov = iov + ret; - size = node->size - addr + node->start; + size = map->size - addr + map->start; _iov->iov_len = min((u64)len - s, size); _iov->iov_base = (void __user *)(unsigned long) - (node->userspace_addr + addr - node->start); + (map->addr + addr - map->start); s += size; addr += size; ++ret; diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index a123fd70847e..181382185bbc 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h @@ -12,6 +12,7 @@ #include <linux/virtio_config.h> #include <linux/virtio_ring.h> #include <linux/atomic.h> +#include <linux/vhost_iotlb.h> struct vhost_work; typedef void (*vhost_work_fn_t)(struct vhost_work *work); @@ -52,27 +53,6 @@ struct vhost_log { u64 len; }; -#define START(node) ((node)->start) -#define LAST(node) ((node)->last) - -struct vhost_umem_node { - struct rb_node rb; - struct list_head link; - __u64 start; - __u64 last; - __u64 size; - __u64 userspace_addr; - __u32 perm; - __u32 flags_padding; - __u64 __subtree_last; -}; - -struct vhost_umem { - struct rb_root_cached umem_tree; - struct list_head umem_list; - int numem; -}; - enum vhost_uaddr_type { VHOST_ADDR_DESC = 0, VHOST_ADDR_AVAIL = 1, @@ -90,7 +70,7 @@ struct vhost_virtqueue { struct vring_desc __user *desc; struct vring_avail __user *avail; struct vring_used __user *used; - const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; + const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS]; struct file *kick; struct eventfd_ctx *call_ctx; struct eventfd_ctx *error_ctx; @@ -128,8 +108,8 @@ struct vhost_virtqueue { struct iovec *indirect; struct vring_used_elem *heads; /* Protected by virtqueue mutex. */ - struct vhost_umem *umem; - struct vhost_umem *iotlb; + struct vhost_iotlb *umem; + struct vhost_iotlb *iotlb; void *private_data; u64 acked_features; u64 acked_backend_features; @@ -164,8 +144,8 @@ struct vhost_dev { struct eventfd_ctx *log_ctx; struct llist_head work_list; struct task_struct *worker; - struct vhost_umem *umem; - struct vhost_umem *iotlb; + struct vhost_iotlb *umem; + struct vhost_iotlb *iotlb; spinlock_t iotlb_lock; struct list_head read_list; struct list_head pending_list; @@ -174,16 +154,20 @@ struct vhost_dev { int weight; int byte_weight; u64 kcov_handle; + int (*msg_handler)(struct vhost_dev *dev, + struct vhost_iotlb_msg *msg); }; bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len); void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, - int nvqs, int iov_limit, int weight, int byte_weight); + int nvqs, int iov_limit, int weight, int byte_weight, + int (*msg_handler)(struct vhost_dev *dev, + struct vhost_iotlb_msg *msg)); long vhost_dev_set_owner(struct vhost_dev *dev); bool vhost_dev_has_owner(struct vhost_dev *dev); long vhost_dev_check_owner(struct vhost_dev *); -struct vhost_umem *vhost_dev_reset_owner_prepare(void); -void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_umem *); +struct vhost_iotlb *vhost_dev_reset_owner_prepare(void); +void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb); void vhost_dev_cleanup(struct vhost_dev *); void vhost_dev_stop(struct vhost_dev *); long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp); @@ -229,6 +213,9 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev, struct iov_iter *from); int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled); +void vhost_iotlb_map_free(struct vhost_iotlb *iotlb, + struct vhost_iotlb_map *map); + #define vq_err(vq, fmt, ...) do { \ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \ if ((vq)->error_ctx) \ diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index a0a2d74967ef..ee0491f579ac 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -13,6 +13,9 @@ #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/export.h> +#include <linux/bvec.h> +#include <linux/highmem.h> +#include <linux/vhost_iotlb.h> #include <uapi/linux/virtio_config.h> static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) @@ -71,9 +74,11 @@ static inline int __vringh_get_head(const struct vringh *vrh, } /* Copy some bytes to/from the iovec. Returns num copied. */ -static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, +static inline ssize_t vringh_iov_xfer(struct vringh *vrh, + struct vringh_kiov *iov, void *ptr, size_t len, - int (*xfer)(void *addr, void *ptr, + int (*xfer)(const struct vringh *vrh, + void *addr, void *ptr, size_t len)) { int err, done = 0; @@ -82,7 +87,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, size_t partlen; partlen = min(iov->iov[iov->i].iov_len, len); - err = xfer(iov->iov[iov->i].iov_base, ptr, partlen); + err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen); if (err) return err; done += partlen; @@ -96,6 +101,7 @@ static inline ssize_t vringh_iov_xfer(struct vringh_kiov *iov, /* Fix up old iov element then increment. */ iov->iov[iov->i].iov_len = iov->consumed; iov->iov[iov->i].iov_base -= iov->consumed; + iov->consumed = 0; iov->i++; @@ -227,7 +233,8 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src, u64 addr, struct vringh_range *r), struct vringh_range *range, - int (*copy)(void *dst, const void *src, size_t len)) + int (*copy)(const struct vringh *vrh, + void *dst, const void *src, size_t len)) { size_t part, len = sizeof(struct vring_desc); @@ -241,7 +248,7 @@ static int slow_copy(struct vringh *vrh, void *dst, const void *src, if (!rcheck(vrh, addr, &part, range, getrange)) return -EINVAL; - err = copy(dst, src, part); + err = copy(vrh, dst, src, part); if (err) return err; @@ -262,7 +269,8 @@ __vringh_iov(struct vringh *vrh, u16 i, struct vringh_range *)), bool (*getrange)(struct vringh *, u64, struct vringh_range *), gfp_t gfp, - int (*copy)(void *dst, const void *src, size_t len)) + int (*copy)(const struct vringh *vrh, + void *dst, const void *src, size_t len)) { int err, count = 0, up_next, desc_max; struct vring_desc desc, *descs; @@ -291,7 +299,7 @@ __vringh_iov(struct vringh *vrh, u16 i, err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange, &slowrange, copy); else - err = copy(&desc, &descs[i], sizeof(desc)); + err = copy(vrh, &desc, &descs[i], sizeof(desc)); if (unlikely(err)) goto fail; @@ -404,7 +412,8 @@ static inline int __vringh_complete(struct vringh *vrh, unsigned int num_used, int (*putu16)(const struct vringh *vrh, __virtio16 *p, u16 val), - int (*putused)(struct vring_used_elem *dst, + int (*putused)(const struct vringh *vrh, + struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned num)) { @@ -420,12 +429,12 @@ static inline int __vringh_complete(struct vringh *vrh, /* Compiler knows num_used == 1 sometimes, hence extra check */ if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) { u16 part = vrh->vring.num - off; - err = putused(&used_ring->ring[off], used, part); + err = putused(vrh, &used_ring->ring[off], used, part); if (!err) - err = putused(&used_ring->ring[0], used + part, + err = putused(vrh, &used_ring->ring[0], used + part, num_used - part); } else - err = putused(&used_ring->ring[off], used, num_used); + err = putused(vrh, &used_ring->ring[off], used, num_used); if (err) { vringh_bad("Failed to write %u used entries %u at %p", @@ -564,13 +573,15 @@ static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) return put_user(v, (__force __virtio16 __user *)p); } -static inline int copydesc_user(void *dst, const void *src, size_t len) +static inline int copydesc_user(const struct vringh *vrh, + void *dst, const void *src, size_t len) { return copy_from_user(dst, (__force void __user *)src, len) ? -EFAULT : 0; } -static inline int putused_user(struct vring_used_elem *dst, +static inline int putused_user(const struct vringh *vrh, + struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { @@ -578,13 +589,15 @@ static inline int putused_user(struct vring_used_elem *dst, sizeof(*dst) * num) ? -EFAULT : 0; } -static inline int xfer_from_user(void *src, void *dst, size_t len) +static inline int xfer_from_user(const struct vringh *vrh, void *src, + void *dst, size_t len) { return copy_from_user(dst, (__force void __user *)src, len) ? -EFAULT : 0; } -static inline int xfer_to_user(void *dst, void *src, size_t len) +static inline int xfer_to_user(const struct vringh *vrh, + void *dst, void *src, size_t len) { return copy_to_user((__force void __user *)dst, src, len) ? -EFAULT : 0; @@ -706,7 +719,7 @@ EXPORT_SYMBOL(vringh_getdesc_user); */ ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) { - return vringh_iov_xfer((struct vringh_kiov *)riov, + return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov, dst, len, xfer_from_user); } EXPORT_SYMBOL(vringh_iov_pull_user); @@ -722,7 +735,7 @@ EXPORT_SYMBOL(vringh_iov_pull_user); ssize_t vringh_iov_push_user(struct vringh_iov *wiov, const void *src, size_t len) { - return vringh_iov_xfer((struct vringh_kiov *)wiov, + return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov, (void *)src, len, xfer_to_user); } EXPORT_SYMBOL(vringh_iov_push_user); @@ -832,13 +845,15 @@ static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) return 0; } -static inline int copydesc_kern(void *dst, const void *src, size_t len) +static inline int copydesc_kern(const struct vringh *vrh, + void *dst, const void *src, size_t len) { memcpy(dst, src, len); return 0; } -static inline int putused_kern(struct vring_used_elem *dst, +static inline int putused_kern(const struct vringh *vrh, + struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { @@ -846,13 +861,15 @@ static inline int putused_kern(struct vring_used_elem *dst, return 0; } -static inline int xfer_kern(void *src, void *dst, size_t len) +static inline int xfer_kern(const struct vringh *vrh, void *src, + void *dst, size_t len) { memcpy(dst, src, len); return 0; } -static inline int kern_xfer(void *dst, void *src, size_t len) +static inline int kern_xfer(const struct vringh *vrh, void *dst, + void *src, size_t len) { memcpy(dst, src, len); return 0; @@ -949,7 +966,7 @@ EXPORT_SYMBOL(vringh_getdesc_kern); */ ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) { - return vringh_iov_xfer(riov, dst, len, xfer_kern); + return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern); } EXPORT_SYMBOL(vringh_iov_pull_kern); @@ -964,7 +981,7 @@ EXPORT_SYMBOL(vringh_iov_pull_kern); ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, const void *src, size_t len) { - return vringh_iov_xfer(wiov, (void *)src, len, kern_xfer); + return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer); } EXPORT_SYMBOL(vringh_iov_push_kern); @@ -1042,4 +1059,362 @@ int vringh_need_notify_kern(struct vringh *vrh) } EXPORT_SYMBOL(vringh_need_notify_kern); +static int iotlb_translate(const struct vringh *vrh, + u64 addr, u64 len, struct bio_vec iov[], + int iov_size, u32 perm) +{ + struct vhost_iotlb_map *map; + struct vhost_iotlb *iotlb = vrh->iotlb; + int ret = 0; + u64 s = 0; + + while (len > s) { + u64 size, pa, pfn; + + if (unlikely(ret >= iov_size)) { + ret = -ENOBUFS; + break; + } + + map = vhost_iotlb_itree_first(iotlb, addr, + addr + len - 1); + if (!map || map->start > addr) { + ret = -EINVAL; + break; + } else if (!(map->perm & perm)) { + ret = -EPERM; + break; + } + + size = map->size - addr + map->start; + pa = map->addr + addr - map->start; + pfn = pa >> PAGE_SHIFT; + iov[ret].bv_page = pfn_to_page(pfn); + iov[ret].bv_len = min(len - s, size); + iov[ret].bv_offset = pa & (PAGE_SIZE - 1); + s += size; + addr += size; + ++ret; + } + + return ret; +} + +static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, + void *src, size_t len) +{ + struct iov_iter iter; + struct bio_vec iov[16]; + int ret; + + ret = iotlb_translate(vrh, (u64)(uintptr_t)src, + len, iov, 16, VHOST_MAP_RO); + if (ret < 0) + return ret; + + iov_iter_bvec(&iter, READ, iov, ret, len); + + ret = copy_from_iter(dst, len, &iter); + + return ret; +} + +static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, + void *src, size_t len) +{ + struct iov_iter iter; + struct bio_vec iov[16]; + int ret; + + ret = iotlb_translate(vrh, (u64)(uintptr_t)dst, + len, iov, 16, VHOST_MAP_WO); + if (ret < 0) + return ret; + + iov_iter_bvec(&iter, WRITE, iov, ret, len); + + return copy_to_iter(src, len, &iter); +} + +static inline int getu16_iotlb(const struct vringh *vrh, + u16 *val, const __virtio16 *p) +{ + struct bio_vec iov; + void *kaddr, *from; + int ret; + + /* Atomic read is needed for getu16 */ + ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), + &iov, 1, VHOST_MAP_RO); + if (ret < 0) + return ret; + + kaddr = kmap_atomic(iov.bv_page); + from = kaddr + iov.bv_offset; + *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from)); + kunmap_atomic(kaddr); + + return 0; +} + +static inline int putu16_iotlb(const struct vringh *vrh, + __virtio16 *p, u16 val) +{ + struct bio_vec iov; + void *kaddr, *to; + int ret; + + /* Atomic write is needed for putu16 */ + ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p), + &iov, 1, VHOST_MAP_WO); + if (ret < 0) + return ret; + + kaddr = kmap_atomic(iov.bv_page); + to = kaddr + iov.bv_offset; + WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val)); + kunmap_atomic(kaddr); + + return 0; +} + +static inline int copydesc_iotlb(const struct vringh *vrh, + void *dst, const void *src, size_t len) +{ + int ret; + + ret = copy_from_iotlb(vrh, dst, (void *)src, len); + if (ret != len) + return -EFAULT; + + return 0; +} + +static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, + void *dst, size_t len) +{ + int ret; + + ret = copy_from_iotlb(vrh, dst, src, len); + if (ret != len) + return -EFAULT; + + return 0; +} + +static inline int xfer_to_iotlb(const struct vringh *vrh, + void *dst, void *src, size_t len) +{ + int ret; + + ret = copy_to_iotlb(vrh, dst, src, len); + if (ret != len) + return -EFAULT; + + return 0; +} + +static inline int putused_iotlb(const struct vringh *vrh, + struct vring_used_elem *dst, + const struct vring_used_elem *src, + unsigned int num) +{ + int size = num * sizeof(*dst); + int ret; + + ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst)); + if (ret != size) + return -EFAULT; + + return 0; +} + +/** + * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. + * @vrh: the vringh to initialize. + * @features: the feature bits for this ring. + * @num: the number of elements. + * @weak_barriers: true if we only need memory barriers, not I/O. + * @desc: the userpace descriptor pointer. + * @avail: the userpace avail pointer. + * @used: the userpace used pointer. + * + * Returns an error if num is invalid. + */ +int vringh_init_iotlb(struct vringh *vrh, u64 features, + unsigned int num, bool weak_barriers, + struct vring_desc *desc, + struct vring_avail *avail, + struct vring_used *used) +{ + return vringh_init_kern(vrh, features, num, weak_barriers, + desc, avail, used); +} +EXPORT_SYMBOL(vringh_init_iotlb); + +/** + * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. + * @vrh: the vring + * @iotlb: iotlb associated with this vring + */ +void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb) +{ + vrh->iotlb = iotlb; +} +EXPORT_SYMBOL(vringh_set_iotlb); + +/** + * vringh_getdesc_iotlb - get next available descriptor from ring with + * IOTLB. + * @vrh: the kernelspace vring. + * @riov: where to put the readable descriptors (or NULL) + * @wiov: where to put the writable descriptors (or NULL) + * @head: head index we received, for passing to vringh_complete_iotlb(). + * @gfp: flags for allocating larger riov/wiov. + * + * Returns 0 if there was no descriptor, 1 if there was, or -errno. + * + * Note that on error return, you can tell the difference between an + * invalid ring and a single invalid descriptor: in the former case, + * *head will be vrh->vring.num. You may be able to ignore an invalid + * descriptor, but there's not much you can do with an invalid ring. + * + * Note that you may need to clean up riov and wiov, even on error! + */ +int vringh_getdesc_iotlb(struct vringh *vrh, + struct vringh_kiov *riov, + struct vringh_kiov *wiov, + u16 *head, + gfp_t gfp) +{ + int err; + + err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx); + if (err < 0) + return err; + + /* Empty... */ + if (err == vrh->vring.num) + return 0; + + *head = err; + err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL, + gfp, copydesc_iotlb); + if (err) + return err; + + return 1; +} +EXPORT_SYMBOL(vringh_getdesc_iotlb); + +/** + * vringh_iov_pull_iotlb - copy bytes from vring_iov. + * @vrh: the vring. + * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) + * @dst: the place to copy. + * @len: the maximum length to copy. + * + * Returns the bytes copied <= len or a negative errno. + */ +ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, + struct vringh_kiov *riov, + void *dst, size_t len) +{ + return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb); +} +EXPORT_SYMBOL(vringh_iov_pull_iotlb); + +/** + * vringh_iov_push_iotlb - copy bytes into vring_iov. + * @vrh: the vring. + * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) + * @dst: the place to copy. + * @len: the maximum length to copy. + * + * Returns the bytes copied <= len or a negative errno. + */ +ssize_t vringh_iov_push_iotlb(struct vringh *vrh, + struct vringh_kiov *wiov, + const void *src, size_t len) +{ + return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb); +} +EXPORT_SYMBOL(vringh_iov_push_iotlb); + +/** + * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). + * @vrh: the vring. + * @num: the number of descriptors to put back (ie. num + * vringh_get_iotlb() to undo). + * + * The next vringh_get_iotlb() will return the old descriptor(s) again. + */ +void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) +{ + /* We only update vring_avail_event(vr) when we want to be notified, + * so we haven't changed that yet. + */ + vrh->last_avail_idx -= num; +} +EXPORT_SYMBOL(vringh_abandon_iotlb); + +/** + * vringh_complete_iotlb - we've finished with descriptor, publish it. + * @vrh: the vring. + * @head: the head as filled in by vringh_getdesc_iotlb. + * @len: the length of data we have written. + * + * You should check vringh_need_notify_iotlb() after one or more calls + * to this function. + */ +int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) +{ + struct vring_used_elem used; + + used.id = cpu_to_vringh32(vrh, head); + used.len = cpu_to_vringh32(vrh, len); + + return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb); +} +EXPORT_SYMBOL(vringh_complete_iotlb); + +/** + * vringh_notify_enable_iotlb - we want to know if something changes. + * @vrh: the vring. + * + * This always enables notifications, but returns false if there are + * now more buffers available in the vring. + */ +bool vringh_notify_enable_iotlb(struct vringh *vrh) +{ + return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb); +} +EXPORT_SYMBOL(vringh_notify_enable_iotlb); + +/** + * vringh_notify_disable_iotlb - don't tell us if something changes. + * @vrh: the vring. + * + * This is our normal running state: we disable and then only enable when + * we're going to sleep. + */ +void vringh_notify_disable_iotlb(struct vringh *vrh) +{ + __vringh_notify_disable(vrh, putu16_iotlb); +} +EXPORT_SYMBOL(vringh_notify_disable_iotlb); + +/** + * vringh_need_notify_iotlb - must we tell the other side about used buffers? + * @vrh: the vring we've called vringh_complete_iotlb() on. + * + * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. + */ +int vringh_need_notify_iotlb(struct vringh *vrh) +{ + return __vringh_need_notify(vrh, getu16_iotlb); +} +EXPORT_SYMBOL(vringh_need_notify_iotlb); + + MODULE_LICENSE("GPL"); diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index c2d7d57e98cf..97669484a3f6 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -621,7 +621,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV, VHOST_VSOCK_PKT_WEIGHT, - VHOST_VSOCK_WEIGHT); + VHOST_VSOCK_WEIGHT, NULL); file->private_data = vsock; spin_lock_init(&vsock->send_pkt_list_lock); diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c index 68f7592c5060..25ef0cbd7583 100644 --- a/drivers/video/backlight/corgi_lcd.c +++ b/drivers/video/backlight/corgi_lcd.c @@ -15,7 +15,7 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/fb.h> #include <linux/lcd.h> #include <linux/spi/spi.h> @@ -90,9 +90,8 @@ struct corgi_lcd { int mode; char buf[2]; - int gpio_backlight_on; - int gpio_backlight_cont; - int gpio_backlight_cont_inverted; + struct gpio_desc *backlight_on; + struct gpio_desc *backlight_cont; void (*kick_battery)(void); }; @@ -403,13 +402,13 @@ static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity) corgi_ssp_lcdtg_send(lcd, DUTYCTRL_ADRS, intensity); /* Bit 5 via GPIO_BACKLIGHT_CONT */ - cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted; + cont = !!(intensity & 0x20); - if (gpio_is_valid(lcd->gpio_backlight_cont)) - gpio_set_value_cansleep(lcd->gpio_backlight_cont, cont); + if (lcd->backlight_cont) + gpiod_set_value_cansleep(lcd->backlight_cont, cont); - if (gpio_is_valid(lcd->gpio_backlight_on)) - gpio_set_value_cansleep(lcd->gpio_backlight_on, intensity); + if (lcd->backlight_on) + gpiod_set_value_cansleep(lcd->backlight_on, intensity); if (lcd->kick_battery) lcd->kick_battery(); @@ -482,48 +481,17 @@ static int setup_gpio_backlight(struct corgi_lcd *lcd, struct corgi_lcd_platform_data *pdata) { struct spi_device *spi = lcd->spi_dev; - int err; - - lcd->gpio_backlight_on = -1; - lcd->gpio_backlight_cont = -1; - - if (gpio_is_valid(pdata->gpio_backlight_on)) { - err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_on, - "BL_ON"); - if (err) { - dev_err(&spi->dev, - "failed to request GPIO%d for backlight_on\n", - pdata->gpio_backlight_on); - return err; - } - - lcd->gpio_backlight_on = pdata->gpio_backlight_on; - gpio_direction_output(lcd->gpio_backlight_on, 0); - } - if (gpio_is_valid(pdata->gpio_backlight_cont)) { - err = devm_gpio_request(&spi->dev, pdata->gpio_backlight_cont, - "BL_CONT"); - if (err) { - dev_err(&spi->dev, - "failed to request GPIO%d for backlight_cont\n", - pdata->gpio_backlight_cont); - return err; - } - - lcd->gpio_backlight_cont = pdata->gpio_backlight_cont; - - /* spitz and akita use both GPIOs for backlight, and - * have inverted polarity of GPIO_BACKLIGHT_CONT - */ - if (gpio_is_valid(lcd->gpio_backlight_on)) { - lcd->gpio_backlight_cont_inverted = 1; - gpio_direction_output(lcd->gpio_backlight_cont, 1); - } else { - lcd->gpio_backlight_cont_inverted = 0; - gpio_direction_output(lcd->gpio_backlight_cont, 0); - } - } + lcd->backlight_on = devm_gpiod_get_optional(&spi->dev, + "BL_ON", GPIOD_OUT_LOW); + if (IS_ERR(lcd->backlight_on)) + return PTR_ERR(lcd->backlight_on); + + lcd->backlight_cont = devm_gpiod_get_optional(&spi->dev, "BL_CONT", + GPIOD_OUT_LOW); + if (IS_ERR(lcd->backlight_cont)) + return PTR_ERR(lcd->backlight_cont); + return 0; } diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c index efb4efc2a13d..82b8d7594701 100644 --- a/drivers/video/backlight/pwm_bl.c +++ b/drivers/video/backlight/pwm_bl.c @@ -7,7 +7,6 @@ #include <linux/delay.h> #include <linux/gpio/consumer.h> -#include <linux/gpio.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> @@ -258,8 +257,6 @@ static int pwm_backlight_parse_dt(struct device *dev, &data->post_pwm_on_delay); of_property_read_u32(node, "pwm-off-delay-ms", &data->pwm_off_delay); - data->enable_gpio = -EINVAL; - /* * Determine the number of brightness levels, if this property is not * set a default table of brightness levels will be used. @@ -503,22 +500,6 @@ static int pwm_backlight_probe(struct platform_device *pdev) } /* - * Compatibility fallback for drivers still using the integer GPIO - * platform data. Must go away soon. - */ - if (!pb->enable_gpio && gpio_is_valid(data->enable_gpio)) { - ret = devm_gpio_request_one(&pdev->dev, data->enable_gpio, - GPIOF_OUT_INIT_HIGH, "enable"); - if (ret < 0) { - dev_err(&pdev->dev, "failed to request GPIO#%d: %d\n", - data->enable_gpio, ret); - goto err_alloc; - } - - pb->enable_gpio = gpio_to_desc(data->enable_gpio); - } - - /* * If the GPIO is not known to be already configured as output, that * is, if gpiod_get_direction returns either 1 or -EINVAL, change the * direction to output and set the GPIO as active. diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 28335788e76e..9d28a8e3328f 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1282,6 +1282,9 @@ finished: if (!con_is_bound(&fb_con)) fbcon_exit(); + if (vc->vc_num == logo_shown) + logo_shown = FBCON_LOGO_CANSHOW; + return; } diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 078615cf2afc..2bbf94b15bba 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -43,6 +43,19 @@ config VIRTIO_PCI_LEGACY If unsure, say Y. +config VIRTIO_VDPA + tristate "vDPA driver for virtio devices" + select VDPA + select VIRTIO + help + This driver provides support for virtio based paravirtual + device driver over vDPA bus. For this to be useful, you need + an appropriate vDPA device implementation that operates on a + physical device to allow the datapath of virtio to be + offloaded to hardware. + + If unsure, say M. + config VIRTIO_PMEM tristate "Support for virtio pmem driver" depends on VIRTIO @@ -58,6 +71,7 @@ config VIRTIO_BALLOON tristate "Virtio balloon driver" depends on VIRTIO select MEMORY_BALLOON + select PAGE_REPORTING ---help--- This driver supports increasing and decreasing the amount of memory within a KVM guest. diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index 3a2b5c5dcf46..29a1386ecc03 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -6,3 +6,4 @@ virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o obj-$(CONFIG_VIRTIO_INPUT) += virtio_input.o +obj-$(CONFIG_VIRTIO_VDPA) += virtio_vdpa.o diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 341458fd95ca..0ef16566c3f3 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -14,11 +14,13 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/balloon_compaction.h> +#include <linux/oom.h> #include <linux/wait.h> #include <linux/mm.h> #include <linux/mount.h> #include <linux/magic.h> #include <linux/pseudo_fs.h> +#include <linux/page_reporting.h> /* * Balloon device works in 4K page units. So each page is pointed to by @@ -27,7 +29,9 @@ */ #define VIRTIO_BALLOON_PAGES_PER_PAGE (unsigned)(PAGE_SIZE >> VIRTIO_BALLOON_PFN_SHIFT) #define VIRTIO_BALLOON_ARRAY_PFNS_MAX 256 -#define VIRTBALLOON_OOM_NOTIFY_PRIORITY 80 +/* Maximum number of (4k) pages to deflate on OOM notifications. */ +#define VIRTIO_BALLOON_OOM_NR_PAGES 256 +#define VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY 80 #define VIRTIO_BALLOON_FREE_PAGE_ALLOC_FLAG (__GFP_NORETRY | __GFP_NOWARN | \ __GFP_NOMEMALLOC) @@ -47,6 +51,7 @@ enum virtio_balloon_vq { VIRTIO_BALLOON_VQ_DEFLATE, VIRTIO_BALLOON_VQ_STATS, VIRTIO_BALLOON_VQ_FREE_PAGE, + VIRTIO_BALLOON_VQ_REPORTING, VIRTIO_BALLOON_VQ_MAX }; @@ -112,8 +117,15 @@ struct virtio_balloon { /* Memory statistics */ struct virtio_balloon_stat stats[VIRTIO_BALLOON_S_NR]; - /* To register a shrinker to shrink memory upon memory pressure */ + /* Shrinker to return free pages - VIRTIO_BALLOON_F_FREE_PAGE_HINT */ struct shrinker shrinker; + + /* OOM notifier to deflate on OOM - VIRTIO_BALLOON_F_DEFLATE_ON_OOM */ + struct notifier_block oom_nb; + + /* Free page reporting device */ + struct virtqueue *reporting_vq; + struct page_reporting_dev_info pr_dev_info; }; static struct virtio_device_id id_table[] = { @@ -153,6 +165,33 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq) } +int virtballoon_free_page_report(struct page_reporting_dev_info *pr_dev_info, + struct scatterlist *sg, unsigned int nents) +{ + struct virtio_balloon *vb = + container_of(pr_dev_info, struct virtio_balloon, pr_dev_info); + struct virtqueue *vq = vb->reporting_vq; + unsigned int unused, err; + + /* We should always be able to add these buffers to an empty queue. */ + err = virtqueue_add_inbuf(vq, sg, nents, vb, GFP_NOWAIT | __GFP_NOWARN); + + /* + * In the extremely unlikely case that something has occurred and we + * are able to trigger an error we will simply display a warning + * and exit without actually processing the pages. + */ + if (WARN_ON_ONCE(err)) + return err; + + virtqueue_kick(vq); + + /* When host has read buffer, this completes via balloon_ack */ + wait_event(vb->acked, virtqueue_get_buf(vq, &unused)); + + return 0; +} + static void set_page_pfns(struct virtio_balloon *vb, __virtio32 pfns[], struct page *page) { @@ -481,6 +520,7 @@ static int init_vqs(struct virtio_balloon *vb) names[VIRTIO_BALLOON_VQ_STATS] = NULL; callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; names[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; + names[VIRTIO_BALLOON_VQ_REPORTING] = NULL; if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) { names[VIRTIO_BALLOON_VQ_STATS] = "stats"; @@ -492,6 +532,11 @@ static int init_vqs(struct virtio_balloon *vb) callbacks[VIRTIO_BALLOON_VQ_FREE_PAGE] = NULL; } + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) { + names[VIRTIO_BALLOON_VQ_REPORTING] = "reporting_vq"; + callbacks[VIRTIO_BALLOON_VQ_REPORTING] = balloon_ack; + } + err = vb->vdev->config->find_vqs(vb->vdev, VIRTIO_BALLOON_VQ_MAX, vqs, callbacks, names, NULL, NULL); if (err) @@ -524,6 +569,9 @@ static int init_vqs(struct virtio_balloon *vb) if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) vb->free_page_vq = vqs[VIRTIO_BALLOON_VQ_FREE_PAGE]; + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) + vb->reporting_vq = vqs[VIRTIO_BALLOON_VQ_REPORTING]; + return 0; } @@ -788,50 +836,13 @@ static unsigned long shrink_free_pages(struct virtio_balloon *vb, return blocks_freed * VIRTIO_BALLOON_HINT_BLOCK_PAGES; } -static unsigned long leak_balloon_pages(struct virtio_balloon *vb, - unsigned long pages_to_free) -{ - return leak_balloon(vb, pages_to_free * VIRTIO_BALLOON_PAGES_PER_PAGE) / - VIRTIO_BALLOON_PAGES_PER_PAGE; -} - -static unsigned long shrink_balloon_pages(struct virtio_balloon *vb, - unsigned long pages_to_free) -{ - unsigned long pages_freed = 0; - - /* - * One invocation of leak_balloon can deflate at most - * VIRTIO_BALLOON_ARRAY_PFNS_MAX balloon pages, so we call it - * multiple times to deflate pages till reaching pages_to_free. - */ - while (vb->num_pages && pages_freed < pages_to_free) - pages_freed += leak_balloon_pages(vb, - pages_to_free - pages_freed); - - update_balloon_size(vb); - - return pages_freed; -} - static unsigned long virtio_balloon_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { - unsigned long pages_to_free, pages_freed = 0; struct virtio_balloon *vb = container_of(shrinker, struct virtio_balloon, shrinker); - pages_to_free = sc->nr_to_scan; - - if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) - pages_freed = shrink_free_pages(vb, pages_to_free); - - if (pages_freed >= pages_to_free) - return pages_freed; - - pages_freed += shrink_balloon_pages(vb, pages_to_free - pages_freed); - - return pages_freed; + return shrink_free_pages(vb, sc->nr_to_scan); } static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, @@ -839,12 +850,22 @@ static unsigned long virtio_balloon_shrinker_count(struct shrinker *shrinker, { struct virtio_balloon *vb = container_of(shrinker, struct virtio_balloon, shrinker); - unsigned long count; - count = vb->num_pages / VIRTIO_BALLOON_PAGES_PER_PAGE; - count += vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES; + return vb->num_free_page_blocks * VIRTIO_BALLOON_HINT_BLOCK_PAGES; +} + +static int virtio_balloon_oom_notify(struct notifier_block *nb, + unsigned long dummy, void *parm) +{ + struct virtio_balloon *vb = container_of(nb, + struct virtio_balloon, oom_nb); + unsigned long *freed = parm; + + *freed += leak_balloon(vb, VIRTIO_BALLOON_OOM_NR_PAGES) / + VIRTIO_BALLOON_PAGES_PER_PAGE; + update_balloon_size(vb); - return count; + return NOTIFY_OK; } static void virtio_balloon_unregister_shrinker(struct virtio_balloon *vb) @@ -864,7 +885,6 @@ static int virtio_balloon_register_shrinker(struct virtio_balloon *vb) static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; - __u32 poison_val; int err; if (!vdev->config->get) { @@ -930,27 +950,65 @@ static int virtballoon_probe(struct virtio_device *vdev) VIRTIO_BALLOON_CMD_ID_STOP); spin_lock_init(&vb->free_page_list_lock); INIT_LIST_HEAD(&vb->free_page_list); - if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { - memset(&poison_val, PAGE_POISON, sizeof(poison_val)); - virtio_cwrite(vb->vdev, struct virtio_balloon_config, - poison_val, &poison_val); - } - } - /* - * We continue to use VIRTIO_BALLOON_F_DEFLATE_ON_OOM to decide if a - * shrinker needs to be registered to relieve memory pressure. - */ - if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { + /* + * We're allowed to reuse any free pages, even if they are + * still to be processed by the host. + */ err = virtio_balloon_register_shrinker(vb); if (err) goto out_del_balloon_wq; } + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) { + vb->oom_nb.notifier_call = virtio_balloon_oom_notify; + vb->oom_nb.priority = VIRTIO_BALLOON_OOM_NOTIFY_PRIORITY; + err = register_oom_notifier(&vb->oom_nb); + if (err < 0) + goto out_unregister_shrinker; + } + + if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_PAGE_POISON)) { + /* Start with poison val of 0 representing general init */ + __u32 poison_val = 0; + + /* + * Let the hypervisor know that we are expecting a + * specific value to be written back in balloon pages. + */ + if (!want_init_on_free()) + memset(&poison_val, PAGE_POISON, sizeof(poison_val)); + + virtio_cwrite(vb->vdev, struct virtio_balloon_config, + poison_val, &poison_val); + } + + vb->pr_dev_info.report = virtballoon_free_page_report; + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) { + unsigned int capacity; + + capacity = virtqueue_get_vring_size(vb->reporting_vq); + if (capacity < PAGE_REPORTING_CAPACITY) { + err = -ENOSPC; + goto out_unregister_oom; + } + + err = page_reporting_register(&vb->pr_dev_info); + if (err) + goto out_unregister_oom; + } + virtio_device_ready(vdev); if (towards_target(vb)) virtballoon_changed(vdev); return 0; +out_unregister_oom: + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) + unregister_oom_notifier(&vb->oom_nb); +out_unregister_shrinker: + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) + virtio_balloon_unregister_shrinker(vb); out_del_balloon_wq: if (virtio_has_feature(vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) destroy_workqueue(vb->balloon_wq); @@ -989,7 +1047,11 @@ static void virtballoon_remove(struct virtio_device *vdev) { struct virtio_balloon *vb = vdev->priv; + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_REPORTING)) + page_reporting_unregister(&vb->pr_dev_info); if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_DEFLATE_ON_OOM)) + unregister_oom_notifier(&vb->oom_nb); + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_FREE_PAGE_HINT)) virtio_balloon_unregister_shrinker(vb); spin_lock_irq(&vb->stop_update_lock); vb->stop_update = true; @@ -1045,7 +1107,10 @@ static int virtballoon_restore(struct virtio_device *vdev) static int virtballoon_validate(struct virtio_device *vdev) { - if (!page_poisoning_enabled()) + /* Tell the host whether we care about poisoned pages. */ + if (!want_init_on_free() && + (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY) || + !page_poisoning_enabled())) __virtio_clear_bit(vdev, VIRTIO_BALLOON_F_PAGE_POISON); __virtio_clear_bit(vdev, VIRTIO_F_IOMMU_PLATFORM); @@ -1058,6 +1123,7 @@ static unsigned int features[] = { VIRTIO_BALLOON_F_DEFLATE_ON_OOM, VIRTIO_BALLOON_F_FREE_PAGE_HINT, VIRTIO_BALLOON_F_PAGE_POISON, + VIRTIO_BALLOON_F_REPORTING, }; static struct virtio_driver virtio_balloon_driver = { diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c new file mode 100644 index 000000000000..c30eb55030be --- /dev/null +++ b/drivers/virtio/virtio_vdpa.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * VIRTIO based driver for vDPA device + * + * Copyright (c) 2020, Red Hat. All rights reserved. + * Author: Jason Wang <jasowang@redhat.com> + * + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/uuid.h> +#include <linux/virtio.h> +#include <linux/vdpa.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> + +#define MOD_VERSION "0.1" +#define MOD_AUTHOR "Jason Wang <jasowang@redhat.com>" +#define MOD_DESC "vDPA bus driver for virtio devices" +#define MOD_LICENSE "GPL v2" + +struct virtio_vdpa_device { + struct virtio_device vdev; + struct vdpa_device *vdpa; + u64 features; + + /* The lock to protect virtqueue list */ + spinlock_t lock; + /* List of virtio_vdpa_vq_info */ + struct list_head virtqueues; +}; + +struct virtio_vdpa_vq_info { + /* the actual virtqueue */ + struct virtqueue *vq; + + /* the list node for the virtqueues list */ + struct list_head node; +}; + +static inline struct virtio_vdpa_device * +to_virtio_vdpa_device(struct virtio_device *dev) +{ + return container_of(dev, struct virtio_vdpa_device, vdev); +} + +static struct vdpa_device *vd_get_vdpa(struct virtio_device *vdev) +{ + return to_virtio_vdpa_device(vdev)->vdpa; +} + +static void virtio_vdpa_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + ops->get_config(vdpa, offset, buf, len); +} + +static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + ops->set_config(vdpa, offset, buf, len); +} + +static u32 virtio_vdpa_generation(struct virtio_device *vdev) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + if (ops->get_generation) + return ops->get_generation(vdpa); + + return 0; +} + +static u8 virtio_vdpa_get_status(struct virtio_device *vdev) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + return ops->get_status(vdpa); +} + +static void virtio_vdpa_set_status(struct virtio_device *vdev, u8 status) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + return ops->set_status(vdpa, status); +} + +static void virtio_vdpa_reset(struct virtio_device *vdev) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + return ops->set_status(vdpa, 0); +} + +static bool virtio_vdpa_notify(struct virtqueue *vq) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vq->vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + ops->kick_vq(vdpa, vq->index); + + return true; +} + +static irqreturn_t virtio_vdpa_config_cb(void *private) +{ + struct virtio_vdpa_device *vd_dev = private; + + virtio_config_changed(&vd_dev->vdev); + + return IRQ_HANDLED; +} + +static irqreturn_t virtio_vdpa_virtqueue_cb(void *private) +{ + struct virtio_vdpa_vq_info *info = private; + + return vring_interrupt(0, info->vq); +} + +static struct virtqueue * +virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index, + void (*callback)(struct virtqueue *vq), + const char *name, bool ctx) +{ + struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + struct virtio_vdpa_vq_info *info; + struct vdpa_callback cb; + struct virtqueue *vq; + u64 desc_addr, driver_addr, device_addr; + unsigned long flags; + u32 align, num; + int err; + + if (!name) + return NULL; + + /* Queue shouldn't already be set up. */ + if (ops->get_vq_ready(vdpa, index)) + return ERR_PTR(-ENOENT); + + /* Allocate and fill out our active queue description */ + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + num = ops->get_vq_num_max(vdpa); + if (num == 0) { + err = -ENOENT; + goto error_new_virtqueue; + } + + /* Create the vring */ + align = ops->get_vq_align(vdpa); + vq = vring_create_virtqueue(index, num, align, vdev, + true, true, ctx, + virtio_vdpa_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto error_new_virtqueue; + } + + /* Setup virtqueue callback */ + cb.callback = virtio_vdpa_virtqueue_cb; + cb.private = info; + ops->set_vq_cb(vdpa, index, &cb); + ops->set_vq_num(vdpa, index, virtqueue_get_vring_size(vq)); + + desc_addr = virtqueue_get_desc_addr(vq); + driver_addr = virtqueue_get_avail_addr(vq); + device_addr = virtqueue_get_used_addr(vq); + + if (ops->set_vq_address(vdpa, index, + desc_addr, driver_addr, + device_addr)) { + err = -EINVAL; + goto err_vq; + } + + ops->set_vq_ready(vdpa, index, 1); + + vq->priv = info; + info->vq = vq; + + spin_lock_irqsave(&vd_dev->lock, flags); + list_add(&info->node, &vd_dev->virtqueues); + spin_unlock_irqrestore(&vd_dev->lock, flags); + + return vq; + +err_vq: + vring_del_virtqueue(vq); +error_new_virtqueue: + ops->set_vq_ready(vdpa, index, 0); + /* VDPA driver should make sure vq is stopeed here */ + WARN_ON(ops->get_vq_ready(vdpa, index)); + kfree(info); + return ERR_PTR(err); +} + +static void virtio_vdpa_del_vq(struct virtqueue *vq) +{ + struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vq->vdev); + struct vdpa_device *vdpa = vd_dev->vdpa; + const struct vdpa_config_ops *ops = vdpa->config; + struct virtio_vdpa_vq_info *info = vq->priv; + unsigned int index = vq->index; + unsigned long flags; + + spin_lock_irqsave(&vd_dev->lock, flags); + list_del(&info->node); + spin_unlock_irqrestore(&vd_dev->lock, flags); + + /* Select and deactivate the queue */ + ops->set_vq_ready(vdpa, index, 0); + WARN_ON(ops->get_vq_ready(vdpa, index)); + + vring_del_virtqueue(vq); + + kfree(info); +} + +static void virtio_vdpa_del_vqs(struct virtio_device *vdev) +{ + struct virtqueue *vq, *n; + + list_for_each_entry_safe(vq, n, &vdev->vqs, list) + virtio_vdpa_del_vq(vq); +} + +static int virtio_vdpa_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char * const names[], + const bool *ctx, + struct irq_affinity *desc) +{ + struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + struct vdpa_callback cb; + int i, err, queue_idx = 0; + + for (i = 0; i < nvqs; ++i) { + if (!names[i]) { + vqs[i] = NULL; + continue; + } + + vqs[i] = virtio_vdpa_setup_vq(vdev, queue_idx++, + callbacks[i], names[i], ctx ? + ctx[i] : false); + if (IS_ERR(vqs[i])) { + err = PTR_ERR(vqs[i]); + goto err_setup_vq; + } + } + + cb.callback = virtio_vdpa_config_cb; + cb.private = vd_dev; + ops->set_config_cb(vdpa, &cb); + + return 0; + +err_setup_vq: + virtio_vdpa_del_vqs(vdev); + return err; +} + +static u64 virtio_vdpa_get_features(struct virtio_device *vdev) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + return ops->get_features(vdpa); +} + +static int virtio_vdpa_finalize_features(struct virtio_device *vdev) +{ + struct vdpa_device *vdpa = vd_get_vdpa(vdev); + const struct vdpa_config_ops *ops = vdpa->config; + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + return ops->set_features(vdpa, vdev->features); +} + +static const char *virtio_vdpa_bus_name(struct virtio_device *vdev) +{ + struct virtio_vdpa_device *vd_dev = to_virtio_vdpa_device(vdev); + struct vdpa_device *vdpa = vd_dev->vdpa; + + return dev_name(&vdpa->dev); +} + +static const struct virtio_config_ops virtio_vdpa_config_ops = { + .get = virtio_vdpa_get, + .set = virtio_vdpa_set, + .generation = virtio_vdpa_generation, + .get_status = virtio_vdpa_get_status, + .set_status = virtio_vdpa_set_status, + .reset = virtio_vdpa_reset, + .find_vqs = virtio_vdpa_find_vqs, + .del_vqs = virtio_vdpa_del_vqs, + .get_features = virtio_vdpa_get_features, + .finalize_features = virtio_vdpa_finalize_features, + .bus_name = virtio_vdpa_bus_name, +}; + +static void virtio_vdpa_release_dev(struct device *_d) +{ + struct virtio_device *vdev = + container_of(_d, struct virtio_device, dev); + struct virtio_vdpa_device *vd_dev = + container_of(vdev, struct virtio_vdpa_device, vdev); + + kfree(vd_dev); +} + +static int virtio_vdpa_probe(struct vdpa_device *vdpa) +{ + const struct vdpa_config_ops *ops = vdpa->config; + struct virtio_vdpa_device *vd_dev, *reg_dev = NULL; + int ret = -EINVAL; + + vd_dev = kzalloc(sizeof(*vd_dev), GFP_KERNEL); + if (!vd_dev) + return -ENOMEM; + + vd_dev->vdev.dev.parent = vdpa_get_dma_dev(vdpa); + vd_dev->vdev.dev.release = virtio_vdpa_release_dev; + vd_dev->vdev.config = &virtio_vdpa_config_ops; + vd_dev->vdpa = vdpa; + INIT_LIST_HEAD(&vd_dev->virtqueues); + spin_lock_init(&vd_dev->lock); + + vd_dev->vdev.id.device = ops->get_device_id(vdpa); + if (vd_dev->vdev.id.device == 0) + goto err; + + vd_dev->vdev.id.vendor = ops->get_vendor_id(vdpa); + ret = register_virtio_device(&vd_dev->vdev); + reg_dev = vd_dev; + if (ret) + goto err; + + vdpa_set_drvdata(vdpa, vd_dev); + + return 0; + +err: + if (reg_dev) + put_device(&vd_dev->vdev.dev); + else + kfree(vd_dev); + return ret; +} + +static void virtio_vdpa_remove(struct vdpa_device *vdpa) +{ + struct virtio_vdpa_device *vd_dev = vdpa_get_drvdata(vdpa); + + unregister_virtio_device(&vd_dev->vdev); +} + +static struct vdpa_driver virtio_vdpa_driver = { + .driver = { + .name = "virtio_vdpa", + }, + .probe = virtio_vdpa_probe, + .remove = virtio_vdpa_remove, +}; + +module_vdpa_driver(virtio_vdpa_driver); + +MODULE_VERSION(MOD_VERSION); +MODULE_LICENSE(MOD_LICENSE); +MODULE_AUTHOR(MOD_AUTHOR); +MODULE_DESCRIPTION(MOD_DESC); diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 9ea2b43d4b01..0663c604bd64 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -584,6 +584,14 @@ config DAVINCI_WATCHDOG NOTE: once enabled, this timer cannot be disabled. Say N if you are unsure. +config K3_RTI_WATCHDOG + tristate "Texas Instruments K3 RTI watchdog" + depends on ARCH_K3 || COMPILE_TEST + select WATCHDOG_CORE + help + Say Y here if you want to include support for the K3 watchdog + timer (RTI module) available in the K3 generation of processors. + config ORION_WATCHDOG tristate "Orion watchdog" depends on ARCH_ORION5X || ARCH_DOVE || MACH_DOVE || ARCH_MVEBU || (COMPILE_TEST && !ARCH_EBSA110) diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index 2ee352bf3372..6de2e4ceef19 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -57,6 +57,7 @@ obj-$(CONFIG_EP93XX_WATCHDOG) += ep93xx_wdt.o obj-$(CONFIG_PNX4008_WATCHDOG) += pnx4008_wdt.o obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o +obj-$(CONFIG_K3_RTI_WATCHDOG) += rti_wdt.o obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index f8d58bf0bf66..1fe472f56cb3 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -244,6 +244,11 @@ static const struct regmap_config imx2_wdt_regmap_config = { .max_register = 0x8, }; +static void imx2_wdt_action(void *data) +{ + clk_disable_unprepare(data); +} + static int __init imx2_wdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -292,6 +297,10 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) if (ret) return ret; + ret = devm_add_action_or_reset(dev, imx2_wdt_action, wdev->clk); + if (ret) + return ret; + regmap_read(wdev->regmap, IMX2_WDT_WRSR, &val); wdog->bootstatus = val & IMX2_WDT_WRSR_TOUT ? WDIOF_CARDRESET : 0; @@ -315,32 +324,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev) */ regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0); - ret = watchdog_register_device(wdog); - if (ret) - goto disable_clk; - - dev_info(dev, "timeout %d sec (nowayout=%d)\n", - wdog->timeout, nowayout); - - return 0; - -disable_clk: - clk_disable_unprepare(wdev->clk); - return ret; -} - -static int __exit imx2_wdt_remove(struct platform_device *pdev) -{ - struct watchdog_device *wdog = platform_get_drvdata(pdev); - struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); - - watchdog_unregister_device(wdog); - - if (imx2_wdt_is_running(wdev)) { - imx2_wdt_ping(wdog); - dev_crit(&pdev->dev, "Device removed: Expect reboot!\n"); - } - return 0; + return devm_watchdog_register_device(dev, wdog); } static void imx2_wdt_shutdown(struct platform_device *pdev) @@ -417,7 +401,6 @@ static const struct of_device_id imx2_wdt_dt_ids[] = { MODULE_DEVICE_TABLE(of, imx2_wdt_dt_ids); static struct platform_driver imx2_wdt_driver = { - .remove = __exit_p(imx2_wdt_remove), .shutdown = imx2_wdt_shutdown, .driver = { .name = DRIVER_NAME, diff --git a/drivers/watchdog/imx7ulp_wdt.c b/drivers/watchdog/imx7ulp_wdt.c index 11b9e7c6b7f5..7993c8c41b3a 100644 --- a/drivers/watchdog/imx7ulp_wdt.c +++ b/drivers/watchdog/imx7ulp_wdt.c @@ -4,7 +4,6 @@ */ #include <linux/clk.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/watchdog/imx_sc_wdt.c b/drivers/watchdog/imx_sc_wdt.c index 8ed89f032ebf..60a32469f7de 100644 --- a/drivers/watchdog/imx_sc_wdt.c +++ b/drivers/watchdog/imx_sc_wdt.c @@ -6,13 +6,11 @@ #include <linux/arm-smccc.h> #include <linux/firmware/imx/sci.h> #include <linux/io.h> -#include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/of.h> #include <linux/platform_device.h> -#include <linux/reboot.h> #include <linux/watchdog.h> #define DEFAULT_TIMEOUT 60 diff --git a/drivers/watchdog/npcm_wdt.c b/drivers/watchdog/npcm_wdt.c index 9c773c3d6d5d..765577f11c8d 100644 --- a/drivers/watchdog/npcm_wdt.c +++ b/drivers/watchdog/npcm_wdt.c @@ -103,30 +103,29 @@ static int npcm_wdt_stop(struct watchdog_device *wdd) return 0; } - static int npcm_wdt_set_timeout(struct watchdog_device *wdd, unsigned int timeout) { if (timeout < 2) wdd->timeout = 1; else if (timeout < 3) - wdd->timeout = 2; + wdd->timeout = 2; else if (timeout < 6) - wdd->timeout = 5; + wdd->timeout = 5; else if (timeout < 11) - wdd->timeout = 10; + wdd->timeout = 10; else if (timeout < 22) - wdd->timeout = 21; + wdd->timeout = 21; else if (timeout < 44) - wdd->timeout = 43; + wdd->timeout = 43; else if (timeout < 87) - wdd->timeout = 86; + wdd->timeout = 86; else if (timeout < 173) - wdd->timeout = 172; + wdd->timeout = 172; else if (timeout < 688) - wdd->timeout = 687; + wdd->timeout = 687; else - wdd->timeout = 2750; + wdd->timeout = 2750; if (watchdog_active(wdd)) npcm_wdt_start(wdd); diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 8e6dfe76f9c9..4ddb4ea2e4a3 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -52,7 +52,7 @@ #define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT) static bool nowayout = WATCHDOG_NOWAYOUT; -static int heartbeat = -1; /* module parameter (seconds) */ +static int heartbeat; /* module parameter (seconds) */ struct orion_watchdog; diff --git a/drivers/watchdog/pm8916_wdt.c b/drivers/watchdog/pm8916_wdt.c index 1213179f863c..0937b8d33104 100644 --- a/drivers/watchdog/pm8916_wdt.c +++ b/drivers/watchdog/pm8916_wdt.c @@ -192,6 +192,7 @@ static int pm8916_wdt_probe(struct platform_device *pdev) wdt->wdev.timeout = PM8916_WDT_DEFAULT_TIMEOUT; wdt->wdev.pretimeout = 0; watchdog_set_drvdata(&wdt->wdev, wdt); + platform_set_drvdata(pdev, wdt); watchdog_init_timeout(&wdt->wdev, 0, dev); pm8916_wdt_configure_timers(&wdt->wdev); @@ -199,6 +200,29 @@ static int pm8916_wdt_probe(struct platform_device *pdev) return devm_watchdog_register_device(dev, &wdt->wdev); } +static int __maybe_unused pm8916_wdt_suspend(struct device *dev) +{ + struct pm8916_wdt *wdt = dev_get_drvdata(dev); + + if (watchdog_active(&wdt->wdev)) + return pm8916_wdt_stop(&wdt->wdev); + + return 0; +} + +static int __maybe_unused pm8916_wdt_resume(struct device *dev) +{ + struct pm8916_wdt *wdt = dev_get_drvdata(dev); + + if (watchdog_active(&wdt->wdev)) + return pm8916_wdt_start(&wdt->wdev); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(pm8916_wdt_pm_ops, pm8916_wdt_suspend, + pm8916_wdt_resume); + static const struct of_device_id pm8916_wdt_id_table[] = { { .compatible = "qcom,pm8916-wdt" }, { } @@ -210,6 +234,7 @@ static struct platform_driver pm8916_wdt_driver = { .driver = { .name = "pm8916-wdt", .of_match_table = of_match_ptr(pm8916_wdt_id_table), + .pm = &pm8916_wdt_pm_ops, }, }; module_platform_driver(pm8916_wdt_driver); diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c index eb47fe5ed280..ab7465d186fd 100644 --- a/drivers/watchdog/qcom-wdt.c +++ b/drivers/watchdog/qcom-wdt.c @@ -40,6 +40,11 @@ static const u32 reg_offset_data_kpss[] = { [WDT_BITE_TIME] = 0x14, }; +struct qcom_wdt_match_data { + const u32 *offset; + bool pretimeout; +}; + struct qcom_wdt { struct watchdog_device wdd; unsigned long rate; @@ -179,19 +184,29 @@ static void qcom_clk_disable_unprepare(void *data) clk_disable_unprepare(data); } +static const struct qcom_wdt_match_data match_data_apcs_tmr = { + .offset = reg_offset_data_apcs_tmr, + .pretimeout = false, +}; + +static const struct qcom_wdt_match_data match_data_kpss = { + .offset = reg_offset_data_kpss, + .pretimeout = true, +}; + static int qcom_wdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct qcom_wdt *wdt; struct resource *res; struct device_node *np = dev->of_node; - const u32 *regs; + const struct qcom_wdt_match_data *data; u32 percpu_offset; int irq, ret; struct clk *clk; - regs = of_device_get_match_data(dev); - if (!regs) { + data = of_device_get_match_data(dev); + if (!data) { dev_err(dev, "Unsupported QCOM WDT module\n"); return -ENODEV; } @@ -247,9 +262,8 @@ static int qcom_wdt_probe(struct platform_device *pdev) /* check if there is pretimeout support */ irq = platform_get_irq_optional(pdev, 0); - if (irq > 0) { - ret = devm_request_irq(dev, irq, qcom_wdt_isr, - IRQF_TRIGGER_RISING, + if (data->pretimeout && irq > 0) { + ret = devm_request_irq(dev, irq, qcom_wdt_isr, 0, "wdt_bark", &wdt->wdd); if (ret) return ret; @@ -267,7 +281,7 @@ static int qcom_wdt_probe(struct platform_device *pdev) wdt->wdd.min_timeout = 1; wdt->wdd.max_timeout = 0x10000000U / wdt->rate; wdt->wdd.parent = dev; - wdt->layout = regs; + wdt->layout = data->offset; if (readl(wdt_addr(wdt, WDT_STS)) & 1) wdt->wdd.bootstatus = WDIOF_CARDRESET; @@ -311,9 +325,9 @@ static int __maybe_unused qcom_wdt_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(qcom_wdt_pm_ops, qcom_wdt_suspend, qcom_wdt_resume); static const struct of_device_id qcom_wdt_of_table[] = { - { .compatible = "qcom,kpss-timer", .data = reg_offset_data_apcs_tmr }, - { .compatible = "qcom,scss-timer", .data = reg_offset_data_apcs_tmr }, - { .compatible = "qcom,kpss-wdt", .data = reg_offset_data_kpss }, + { .compatible = "qcom,kpss-timer", .data = &match_data_apcs_tmr }, + { .compatible = "qcom,scss-timer", .data = &match_data_apcs_tmr }, + { .compatible = "qcom,kpss-wdt", .data = &match_data_kpss }, { }, }; MODULE_DEVICE_TABLE(of, qcom_wdt_of_table); diff --git a/drivers/watchdog/rti_wdt.c b/drivers/watchdog/rti_wdt.c new file mode 100644 index 000000000000..d456dd72d99a --- /dev/null +++ b/drivers/watchdog/rti_wdt.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Watchdog driver for the K3 RTI module + * + * (c) Copyright 2019-2020 Texas Instruments Inc. + * All rights reserved. + */ + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/types.h> +#include <linux/watchdog.h> + +#define DEFAULT_HEARTBEAT 60 + +/* Max heartbeat is calculated at 32kHz source clock */ +#define MAX_HEARTBEAT 1000 + +/* Timer register set definition */ +#define RTIDWDCTRL 0x90 +#define RTIDWDPRLD 0x94 +#define RTIWDSTATUS 0x98 +#define RTIWDKEY 0x9c +#define RTIDWDCNTR 0xa0 +#define RTIWWDRXCTRL 0xa4 +#define RTIWWDSIZECTRL 0xa8 + +#define RTIWWDRX_NMI 0xa + +#define RTIWWDSIZE_50P 0x50 + +#define WDENABLE_KEY 0xa98559da + +#define WDKEY_SEQ0 0xe51a +#define WDKEY_SEQ1 0xa35c + +#define WDT_PRELOAD_SHIFT 13 + +#define WDT_PRELOAD_MAX 0xfff + +#define DWDST BIT(1) + +static int heartbeat; + +/* + * struct to hold data for each WDT device + * @base - base io address of WD device + * @freq - source clock frequency of WDT + * @wdd - hold watchdog device as is in WDT core + */ +struct rti_wdt_device { + void __iomem *base; + unsigned long freq; + struct watchdog_device wdd; +}; + +static int rti_wdt_start(struct watchdog_device *wdd) +{ + u32 timer_margin; + struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd); + + /* set timeout period */ + timer_margin = (u64)wdd->timeout * wdt->freq; + timer_margin >>= WDT_PRELOAD_SHIFT; + if (timer_margin > WDT_PRELOAD_MAX) + timer_margin = WDT_PRELOAD_MAX; + writel_relaxed(timer_margin, wdt->base + RTIDWDPRLD); + + /* + * RTI only supports a windowed mode, where the watchdog can only + * be petted during the open window; not too early or not too late. + * The HW configuration options only allow for the open window size + * to be 50% or less than that; we obviouly want to configure the open + * window as large as possible so we select the 50% option. To avoid + * any glitches, we accommodate 5% safety margin also, so we setup + * the min_hw_hearbeat at 55% of the timeout period. + */ + wdd->min_hw_heartbeat_ms = 11 * wdd->timeout * 1000 / 20; + + /* Generate NMI when wdt expires */ + writel_relaxed(RTIWWDRX_NMI, wdt->base + RTIWWDRXCTRL); + + /* Open window size 50%; this is the largest window size available */ + writel_relaxed(RTIWWDSIZE_50P, wdt->base + RTIWWDSIZECTRL); + + readl_relaxed(wdt->base + RTIWWDSIZECTRL); + + /* enable watchdog */ + writel_relaxed(WDENABLE_KEY, wdt->base + RTIDWDCTRL); + return 0; +} + +static int rti_wdt_ping(struct watchdog_device *wdd) +{ + struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd); + + /* put watchdog in service state */ + writel_relaxed(WDKEY_SEQ0, wdt->base + RTIWDKEY); + /* put watchdog in active state */ + writel_relaxed(WDKEY_SEQ1, wdt->base + RTIWDKEY); + + return 0; +} + +static unsigned int rti_wdt_get_timeleft(struct watchdog_device *wdd) +{ + u64 timer_counter; + u32 val; + struct rti_wdt_device *wdt = watchdog_get_drvdata(wdd); + + /* if timeout has occurred then return 0 */ + val = readl_relaxed(wdt->base + RTIWDSTATUS); + if (val & DWDST) + return 0; + + timer_counter = readl_relaxed(wdt->base + RTIDWDCNTR); + + do_div(timer_counter, wdt->freq); + + return timer_counter; +} + +static const struct watchdog_info rti_wdt_info = { + .options = WDIOF_KEEPALIVEPING, + .identity = "K3 RTI Watchdog", +}; + +static const struct watchdog_ops rti_wdt_ops = { + .owner = THIS_MODULE, + .start = rti_wdt_start, + .ping = rti_wdt_ping, + .get_timeleft = rti_wdt_get_timeleft, +}; + +static int rti_wdt_probe(struct platform_device *pdev) +{ + int ret = 0; + struct device *dev = &pdev->dev; + struct resource *wdt_mem; + struct watchdog_device *wdd; + struct rti_wdt_device *wdt; + struct clk *clk; + + wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) + return -ENOMEM; + + clk = clk_get(dev, NULL); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) != -EPROBE_DEFER) + dev_err(dev, "failed to get clock\n"); + return PTR_ERR(clk); + } + + wdt->freq = clk_get_rate(clk); + + clk_put(clk); + + if (!wdt->freq) { + dev_err(dev, "Failed to get fck rate.\n"); + return -EINVAL; + } + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "runtime pm failed\n"); + return ret; + } + + platform_set_drvdata(pdev, wdt); + + wdd = &wdt->wdd; + wdd->info = &rti_wdt_info; + wdd->ops = &rti_wdt_ops; + wdd->min_timeout = 1; + wdd->max_hw_heartbeat_ms = (WDT_PRELOAD_MAX << WDT_PRELOAD_SHIFT) / + wdt->freq * 1000; + wdd->timeout = DEFAULT_HEARTBEAT; + wdd->parent = dev; + + watchdog_init_timeout(wdd, heartbeat, dev); + + watchdog_set_drvdata(wdd, wdt); + watchdog_set_nowayout(wdd, 1); + watchdog_set_restart_priority(wdd, 128); + + wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + wdt->base = devm_ioremap_resource(dev, wdt_mem); + if (IS_ERR(wdt->base)) { + ret = PTR_ERR(wdt->base); + goto err_iomap; + } + + ret = watchdog_register_device(wdd); + if (ret) { + dev_err(dev, "cannot register watchdog device\n"); + goto err_iomap; + } + + return 0; + +err_iomap: + pm_runtime_put_sync(&pdev->dev); + + return ret; +} + +static int rti_wdt_remove(struct platform_device *pdev) +{ + struct rti_wdt_device *wdt = platform_get_drvdata(pdev); + + watchdog_unregister_device(&wdt->wdd); + pm_runtime_put(&pdev->dev); + + return 0; +} + +static const struct of_device_id rti_wdt_of_match[] = { + { .compatible = "ti,j7-rti-wdt", }, + {}, +}; +MODULE_DEVICE_TABLE(of, rti_wdt_of_match); + +static struct platform_driver rti_wdt_driver = { + .driver = { + .name = "rti-wdt", + .of_match_table = rti_wdt_of_match, + }, + .probe = rti_wdt_probe, + .remove = rti_wdt_remove, +}; + +module_platform_driver(rti_wdt_driver); + +MODULE_AUTHOR("Tero Kristo <t-kristo@ti.com>"); +MODULE_DESCRIPTION("K3 RTI Watchdog Driver"); + +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, + "Watchdog heartbeat period in seconds from 1 to " + __MODULE_STRING(MAX_HEARTBEAT) ", default " + __MODULE_STRING(DEFAULT_HEARTBEAT)); + +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rti-wdt"); diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 861daf4f37b2..423844757812 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -39,6 +39,10 @@ static DEFINE_IDA(watchdog_ida); +static int stop_on_reboot = -1; +module_param(stop_on_reboot, int, 0444); +MODULE_PARM_DESC(stop_on_reboot, "Stop watchdogs on reboot (0=keep watching, 1=stop)"); + /* * Deferred Registration infrastructure. * @@ -254,6 +258,14 @@ static int __watchdog_register_device(struct watchdog_device *wdd) } } + /* Module parameter to force watchdog policy on reboot. */ + if (stop_on_reboot != -1) { + if (stop_on_reboot) + set_bit(WDOG_STOP_ON_REBOOT, &wdd->status); + else + clear_bit(WDOG_STOP_ON_REBOOT, &wdd->status); + } + if (test_bit(WDOG_STOP_ON_REBOOT, &wdd->status)) { wdd->reboot_nb.notifier_call = watchdog_reboot_notifier; diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 8b5c742f24e8..7e4cd34a8c20 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -282,6 +282,7 @@ static int watchdog_start(struct watchdog_device *wdd) if (err == 0) { set_bit(WDOG_ACTIVE, &wdd->status); wd_data->last_keepalive = started_at; + wd_data->last_hw_keepalive = started_at; watchdog_update_worker(wdd); } diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c index 030ce240620d..d96ad8f38bd2 100644 --- a/drivers/watchdog/wm831x_wdt.c +++ b/drivers/watchdog/wm831x_wdt.c @@ -13,7 +13,6 @@ #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/uaccess.h> -#include <linux/gpio.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> @@ -29,7 +28,6 @@ struct wm831x_wdt_drvdata { struct watchdog_device wdt; struct wm831x *wm831x; struct mutex lock; - int update_gpio; int update_state; }; @@ -103,14 +101,6 @@ static int wm831x_wdt_ping(struct watchdog_device *wdt_dev) mutex_lock(&driver_data->lock); - if (driver_data->update_gpio) { - gpio_set_value_cansleep(driver_data->update_gpio, - driver_data->update_state); - driver_data->update_state = !driver_data->update_state; - ret = 0; - goto out; - } - reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG); if (!(reg & WM831X_WDOG_RST_SRC)) { @@ -239,23 +229,6 @@ static int wm831x_wdt_probe(struct platform_device *pdev) reg |= pdata->secondary << WM831X_WDOG_SECACT_SHIFT; reg |= pdata->software << WM831X_WDOG_RST_SRC_SHIFT; - if (pdata->update_gpio) { - ret = devm_gpio_request_one(dev, pdata->update_gpio, - GPIOF_OUT_INIT_LOW, - "Watchdog update"); - if (ret < 0) { - dev_err(wm831x->dev, - "Failed to request update GPIO: %d\n", - ret); - return ret; - } - - driver_data->update_gpio = pdata->update_gpio; - - /* Make sure the watchdog takes hardware updates */ - reg |= WM831X_WDOG_RST_SRC; - } - ret = wm831x_reg_unlock(wm831x); if (ret == 0) { ret = wm831x_reg_write(wm831x, WM831X_WATCHDOG, reg); diff --git a/drivers/watchdog/ziirave_wdt.c b/drivers/watchdog/ziirave_wdt.c index 4a363a8b2d20..cab86a08456b 100644 --- a/drivers/watchdog/ziirave_wdt.c +++ b/drivers/watchdog/ziirave_wdt.c @@ -422,7 +422,7 @@ static int ziirave_firm_upload(struct watchdog_device *wdd, static const struct watchdog_info ziirave_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, - .identity = "Zodiac RAVE Watchdog", + .identity = "RAVE Switch Watchdog", }; static const struct watchdog_ops ziirave_wdt_ops = { |