diff options
Diffstat (limited to 'drivers/gpu')
32 files changed, 142 insertions, 252 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 5a1f2433632b..73f2257acc23 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -333,10 +333,6 @@ int kfd_iommu_resume(struct kfd_dev *kfd) return 0; } -extern bool amd_iommu_pc_supported(void); -extern u8 amd_iommu_pc_get_max_banks(u16 devid); -extern u8 amd_iommu_pc_get_max_counters(u16 devid); - /** kfd_iommu_add_perf_counters - Add IOMMU performance counters to topology */ int kfd_iommu_add_perf_counters(struct kfd_topology_device *kdev) diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 33a93fa24eb1..73e4de3c7f49 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -1290,7 +1290,8 @@ EXPORT_SYMBOL(drm_mode_prune_invalid); * Negative if @lh_a is better than @lh_b, zero if they're equivalent, or * positive if @lh_b is better than @lh_a. */ -static int drm_mode_compare(void *priv, struct list_head *lh_a, struct list_head *lh_b) +static int drm_mode_compare(void *priv, const struct list_head *lh_a, + const struct list_head *lh_b) { struct drm_display_mode *a = list_entry(lh_a, struct drm_display_mode, head); struct drm_display_mode *b = list_entry(lh_b, struct drm_display_mode, head); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 1e1cb245fca7..69f57ca9c68d 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -20,6 +20,7 @@ config DRM_I915 select INPUT if ACPI select ACPI_VIDEO if ACPI select ACPI_BUTTON if ACPI + select IO_MAPPING select SYNC_FILE select IOSF_MBI select CRC32 @@ -101,6 +102,7 @@ config DRM_I915_GVT bool "Enable Intel GVT-g graphics virtualization host support" depends on DRM_I915 depends on 64BIT + depends on VFIO_MDEV=y || VFIO_MDEV=DRM_I915 default n help Choose this option if you want to enable Intel GVT-g graphics diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a560468765c0..6a2dee8cef1f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3474,7 +3474,18 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); for (;;) { - u8 esi[DP_DPRX_ESI_LEN] = {}; + /* + * The +2 is because DP_DPRX_ESI_LEN is 14, but we then + * pass in "esi+10" to drm_dp_channel_eq_ok(), which + * takes a 6-byte array. So we actually need 16 bytes + * here. + * + * Somebody who knows what the limits actually are + * should check this, but for now this is at least + * harmless and avoids a valid compiler warning about + * using more of the array than we have allocated. + */ + u8 esi[DP_DPRX_ESI_LEN+2] = {}; bool handled; int retry; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 651884390137..4f8337c7fd2e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -646,7 +646,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector) break; case INTEL_BACKLIGHT_DISPLAY_DDI: try_intel_interface = true; - try_vesa_interface = true; break; default: return -ENODEV; diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 597634e4f35f..02a003fd48fb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -882,7 +882,8 @@ void intel_dp_start_link_train(struct intel_dp *intel_dp, int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); if (lttpr_count < 0) - return; + /* Still continue with enabling the port and link training. */ + lttpr_count = 0; if (!intel_dp_link_train_all_phys(intel_dp, crtc_state, lttpr_count)) intel_dp_schedule_fallback_link_training(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 1059a26c1f58..74a27508759d 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -993,14 +993,14 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, * FIXME As we do with eDP, just make a note of the time here * and perform the wait before the next panel power on. */ - intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); + msleep(intel_dsi->panel_pwr_cycle_delay); } static void intel_dsi_shutdown(struct intel_encoder *encoder) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); - intel_dsi_msleep(intel_dsi, intel_dsi->panel_pwr_cycle_delay); + msleep(intel_dsi->panel_pwr_cycle_delay); } static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 21cc40897ca8..ce6b664b10aa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -42,7 +42,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) max_order = MAX_ORDER; #ifdef CONFIG_SWIOTLB - if (swiotlb_nr_tbl()) { + if (is_swiotlb_active()) { unsigned int max_segment; max_segment = swiotlb_max_segment(); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 2561a2f1e54f..23f6b00e08e2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -367,11 +367,10 @@ retry: goto err_unpin; /* Finally, remap it using the new GTT offset */ - ret = remap_io_mapping(area, - area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT), - (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, - min_t(u64, vma->size, area->vm_end - area->vm_start), - &ggtt->iomap); + ret = io_mapping_map_user(&ggtt->iomap, area, area->vm_start + + (vma->ggtt_view.partial.offset << PAGE_SHIFT), + (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT, + min_t(u64, vma->size, area->vm_end - area->vm_start)); if (ret) goto err_fence; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 1cbd84eb24e4..3cca7ea2d6ea 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -48,7 +48,8 @@ static const u8 uabi_classes[] = { [VIDEO_ENHANCEMENT_CLASS] = I915_ENGINE_CLASS_VIDEO_ENHANCE, }; -static int engine_cmp(void *priv, struct list_head *A, struct list_head *B) +static int engine_cmp(void *priv, const struct list_head *A, + const struct list_head *B) { const struct intel_engine_cs *a = container_of((struct rb_node *)A, typeof(*a), uabi_node); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 42c967dd8c60..ca9c9e27a43d 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -916,19 +916,26 @@ static int cmd_reg_handler(struct parser_exec_state *s, if (!strncmp(cmd, "srm", 3) || !strncmp(cmd, "lrm", 3)) { - if (offset != i915_mmio_reg_offset(GEN8_L3SQCREG4) && - offset != 0x21f0) { + if (offset == i915_mmio_reg_offset(GEN8_L3SQCREG4) || + offset == 0x21f0 || + (IS_BROADWELL(gvt->gt->i915) && + offset == i915_mmio_reg_offset(INSTPM))) + return 0; + else { gvt_vgpu_err("%s access to register (%x)\n", cmd, offset); return -EPERM; - } else - return 0; + } } if (!strncmp(cmd, "lrr-src", 7) || !strncmp(cmd, "lrr-dst", 7)) { - gvt_vgpu_err("not allowed cmd %s\n", cmd); - return -EPERM; + if (IS_BROADWELL(gvt->gt->i915) && offset == 0x215c) + return 0; + else { + gvt_vgpu_err("not allowed cmd %s reg (%x)\n", cmd, offset); + return -EPERM; + } } if (!strncmp(cmd, "pipe_ctrl", 9)) { diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 62e6a14ad58e..9f1c209d9251 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -41,7 +41,7 @@ struct diff_mmio { /* Compare two diff_mmio items. */ static int mmio_offset_compare(void *priv, - struct list_head *a, struct list_head *b) + const struct list_head *a, const struct list_head *b) { struct diff_mmio *ma; struct diff_mmio *mb; diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 1deb253ffe80..e7c2babcee8b 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -46,32 +46,23 @@ static const char * const supported_hypervisors[] = { [INTEL_GVT_HYPERVISOR_KVM] = "KVM", }; -static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, - const char *name) +static struct intel_vgpu_type * +intel_gvt_find_vgpu_type(struct intel_gvt *gvt, unsigned int type_group_id) { - const char *driver_name = - dev_driver_string(gvt->gt->i915->drm.dev); - int i; - - name += strlen(driver_name) + 1; - for (i = 0; i < gvt->num_types; i++) { - struct intel_vgpu_type *t = &gvt->types[i]; - - if (!strncmp(t->name, name, sizeof(t->name))) - return t; - } - - return NULL; + if (WARN_ON(type_group_id >= gvt->num_types)) + return NULL; + return &gvt->types[type_group_id]; } -static ssize_t available_instances_show(struct kobject *kobj, - struct device *dev, char *buf) +static ssize_t available_instances_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, + char *buf) { struct intel_vgpu_type *type; unsigned int num = 0; - void *gvt = kdev_to_i915(dev)->gvt; + void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype)); if (!type) num = 0; else @@ -80,19 +71,19 @@ static ssize_t available_instances_show(struct kobject *kobj, return sprintf(buf, "%u\n", num); } -static ssize_t device_api_show(struct kobject *kobj, struct device *dev, - char *buf) +static ssize_t device_api_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) { return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING); } -static ssize_t description_show(struct kobject *kobj, struct device *dev, - char *buf) +static ssize_t description_show(struct mdev_type *mtype, + struct mdev_type_attribute *attr, char *buf) { struct intel_vgpu_type *type; - void *gvt = kdev_to_i915(dev)->gvt; + void *gvt = kdev_to_i915(mtype_get_parent_dev(mtype))->gvt; - type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); + type = intel_gvt_find_vgpu_type(gvt, mtype_get_type_group_id(mtype)); if (!type) return 0; diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 8dc8170ba00f..88ab360fcb31 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -574,8 +574,8 @@ struct intel_gvt_ops { void (*vgpu_reset)(struct intel_vgpu *); void (*vgpu_activate)(struct intel_vgpu *); void (*vgpu_deactivate)(struct intel_vgpu *); - struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt, - const char *name); + struct intel_vgpu_type *(*gvt_find_vgpu_type)( + struct intel_gvt *gvt, unsigned int type_group_id); bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups); int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d089770795b8..65ff43cfc0f7 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -689,7 +689,7 @@ static void kvmgt_put_vfio_device(void *vgpu) vfio_device_put(vdev->vfio_device); } -static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) +static int intel_vgpu_create(struct mdev_device *mdev) { struct intel_vgpu *vgpu = NULL; struct intel_vgpu_type *type; @@ -700,10 +700,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) pdev = mdev_parent_dev(mdev); gvt = kdev_to_i915(pdev)->gvt; - type = intel_gvt_ops->gvt_find_vgpu_type(gvt, kobject_name(kobj)); + type = intel_gvt_ops->gvt_find_vgpu_type(gvt, + mdev_get_type_group_id(mdev)); if (!type) { - gvt_vgpu_err("failed to find type %s to create\n", - kobject_name(kobj)); ret = -EINVAL; goto out; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 69e43bf91a15..9ec9277539ec 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1905,9 +1905,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); /* i915_mm.c */ -int remap_io_mapping(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, unsigned long size, - struct io_mapping *iomap); int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct scatterlist *sgl, resource_size_t iobase); diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index e622aee6e4be..440c35f1abc9 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -105,7 +105,7 @@ static inline bool tasklet_is_locked(const struct tasklet_struct *t) static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) { if (!atomic_fetch_inc(&t->count)) - tasklet_unlock_wait(t); + tasklet_unlock_spin_wait(t); } static inline bool __tasklet_is_enabled(const struct tasklet_struct *t) diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 666808cb3a32..4c8cd08c672d 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -28,89 +28,9 @@ #include "i915_drv.h" -struct remap_pfn { - struct mm_struct *mm; - unsigned long pfn; - pgprot_t prot; - - struct sgt_iter sgt; - resource_size_t iobase; -}; - -static int remap_pfn(pte_t *pte, unsigned long addr, void *data) -{ - struct remap_pfn *r = data; - - /* Special PTE are not associated with any struct page */ - set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); - r->pfn++; - - return 0; -} - -#define use_dma(io) ((io) != -1) - -static inline unsigned long sgt_pfn(const struct remap_pfn *r) -{ - if (use_dma(r->iobase)) - return (r->sgt.dma + r->sgt.curr + r->iobase) >> PAGE_SHIFT; - else - return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); -} - -static int remap_sg(pte_t *pte, unsigned long addr, void *data) -{ - struct remap_pfn *r = data; - - if (GEM_WARN_ON(!r->sgt.sgp)) - return -EINVAL; - - /* Special PTE are not associated with any struct page */ - set_pte_at(r->mm, addr, pte, - pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); - r->pfn++; /* track insertions in case we need to unwind later */ - - r->sgt.curr += PAGE_SIZE; - if (r->sgt.curr >= r->sgt.max) - r->sgt = __sgt_iter(__sg_next(r->sgt.sgp), use_dma(r->iobase)); - - return 0; -} - -/** - * remap_io_mapping - remap an IO mapping to userspace - * @vma: user vma to map to - * @addr: target user address to start at - * @pfn: physical address of kernel memory - * @size: size of map area - * @iomap: the source io_mapping - * - * Note: this is only safe if the mm semaphore is held when called. - */ -int remap_io_mapping(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, unsigned long size, - struct io_mapping *iomap) -{ - struct remap_pfn r; - int err; - #define EXPECTED_FLAGS (VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP) - GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); - - /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ - r.mm = vma->vm_mm; - r.pfn = pfn; - r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | - (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); - err = apply_to_page_range(r.mm, addr, size, remap_pfn, &r); - if (unlikely(err)) { - zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); - return err; - } - - return 0; -} +#define use_dma(io) ((io) != -1) /** * remap_io_sg - remap an IO mapping to userspace @@ -126,12 +46,7 @@ int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, struct scatterlist *sgl, resource_size_t iobase) { - struct remap_pfn r = { - .mm = vma->vm_mm, - .prot = vma->vm_page_prot, - .sgt = __sgt_iter(sgl, use_dma(iobase)), - .iobase = iobase, - }; + unsigned long pfn, len, remapped = 0; int err; /* We rely on prevalidation of the io-mapping to skip track_pfn(). */ @@ -140,11 +55,25 @@ int remap_io_sg(struct vm_area_struct *vma, if (!use_dma(iobase)) flush_cache_range(vma, addr, size); - err = apply_to_page_range(r.mm, addr, size, remap_sg, &r); - if (unlikely(err)) { - zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); - return err; - } - - return 0; + do { + if (use_dma(iobase)) { + if (!sg_dma_len(sgl)) + break; + pfn = (sg_dma_address(sgl) + iobase) >> PAGE_SHIFT; + len = sg_dma_len(sgl); + } else { + pfn = page_to_pfn(sg_page(sgl)); + len = sgl->length; + } + + err = remap_pfn_range(vma, addr + remapped, pfn, len, + vma->vm_page_prot); + if (err) + break; + remapped += len; + } while ((sgl = __sg_next(sgl))); + + if (err) + zap_vma_ptes(vma, addr, remapped); + return err; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 066abaa73a06..0e2501b7fc27 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2994,7 +2994,7 @@ int ilk_wm_max_level(const struct drm_i915_private *dev_priv) static void intel_print_wm_latency(struct drm_i915_private *dev_priv, const char *name, - const u16 wm[8]) + const u16 wm[]) { int level, max_level = ilk_wm_max_level(dev_priv); @@ -5511,12 +5511,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; int ret; - memset(wm, 0, sizeof(*wm)); - /* Watermarks calculated in master */ if (plane_state->planar_slave) return 0; + memset(wm, 0, sizeof(*wm)); + if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; enum plane_id y_plane_id = plane_state->planar_linked_plane->id; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 2e4f06eaacc1..45c6c0107c7c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1106,7 +1106,8 @@ static int igt_ppgtt_shrink_boom(void *arg) return exercise_ppgtt(arg, shrink_boom); } -static int sort_holes(void *priv, struct list_head *A, struct list_head *B) +static int sort_holes(void *priv, const struct list_head *A, + const struct list_head *B) { struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack); struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack); diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c index 4f64940b9055..8989e215dfc9 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.c +++ b/drivers/gpu/drm/lima/lima_devfreq.c @@ -100,20 +100,12 @@ void lima_devfreq_fini(struct lima_device *ldev) devm_devfreq_remove_device(ldev->dev, devfreq->devfreq); devfreq->devfreq = NULL; } - - dev_pm_opp_of_remove_table(ldev->dev); - - dev_pm_opp_put_regulators(devfreq->regulators_opp_table); - dev_pm_opp_put_clkname(devfreq->clkname_opp_table); - devfreq->regulators_opp_table = NULL; - devfreq->clkname_opp_table = NULL; } int lima_devfreq_init(struct lima_device *ldev) { struct thermal_cooling_device *cooling; struct device *dev = ldev->dev; - struct opp_table *opp_table; struct devfreq *devfreq; struct lima_devfreq *ldevfreq = &ldev->devfreq; struct dev_pm_opp *opp; @@ -126,40 +118,28 @@ int lima_devfreq_init(struct lima_device *ldev) spin_lock_init(&ldevfreq->lock); - opp_table = dev_pm_opp_set_clkname(dev, "core"); - if (IS_ERR(opp_table)) { - ret = PTR_ERR(opp_table); - goto err_fini; - } - - ldevfreq->clkname_opp_table = opp_table; - - opp_table = dev_pm_opp_set_regulators(dev, - (const char *[]){ "mali" }, - 1); - if (IS_ERR(opp_table)) { - ret = PTR_ERR(opp_table); + ret = devm_pm_opp_set_clkname(dev, "core"); + if (ret) + return ret; + ret = devm_pm_opp_set_regulators(dev, (const char *[]){ "mali" }, 1); + if (ret) { /* Continue if the optional regulator is missing */ if (ret != -ENODEV) - goto err_fini; - } else { - ldevfreq->regulators_opp_table = opp_table; + return ret; } - ret = dev_pm_opp_of_add_table(dev); + ret = devm_pm_opp_of_add_table(dev); if (ret) - goto err_fini; + return ret; lima_devfreq_reset(ldevfreq); cur_freq = clk_get_rate(ldev->clk_gpu); opp = devfreq_recommended_opp(dev, &cur_freq, 0); - if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - goto err_fini; - } + if (IS_ERR(opp)) + return PTR_ERR(opp); lima_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); @@ -176,8 +156,7 @@ int lima_devfreq_init(struct lima_device *ldev) &ldevfreq->gov_data); if (IS_ERR(devfreq)) { dev_err(dev, "Couldn't initialize GPU devfreq\n"); - ret = PTR_ERR(devfreq); - goto err_fini; + return PTR_ERR(devfreq); } ldevfreq->devfreq = devfreq; @@ -189,10 +168,6 @@ int lima_devfreq_init(struct lima_device *ldev) ldevfreq->cooling = cooling; return 0; - -err_fini: - lima_devfreq_fini(ldev); - return ret; } void lima_devfreq_record_busy(struct lima_devfreq *devfreq) diff --git a/drivers/gpu/drm/lima/lima_devfreq.h b/drivers/gpu/drm/lima/lima_devfreq.h index b0c7c736e81a..b8e50feaeab6 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.h +++ b/drivers/gpu/drm/lima/lima_devfreq.h @@ -9,15 +9,12 @@ #include <linux/ktime.h> struct devfreq; -struct opp_table; struct thermal_cooling_device; struct lima_device; struct lima_devfreq { struct devfreq *devfreq; - struct opp_table *clkname_opp_table; - struct opp_table *regulators_opp_table; struct thermal_cooling_device *cooling; struct devfreq_simple_ondemand_data gov_data; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 6a35a30dd281..cf897297656f 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -188,10 +188,7 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) void adreno_set_llc_attributes(struct iommu_domain *iommu) { - struct io_pgtable_domain_attr pgtbl_cfg; - - pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_ARM_OUTER_WBWA; - iommu_domain_set_attr(iommu, DOMAIN_ATTR_IO_PGTABLE_CFG, &pgtbl_cfg); + iommu_set_pgtable_quirks(iommu, IO_PGTABLE_QUIRK_ARM_OUTER_WBWA); } struct msm_gem_address_space * diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index b81ae90b8449..e8b506a6685b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -321,7 +321,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) } #if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86) - need_swiotlb = !!swiotlb_nr_tbl(); + need_swiotlb = is_swiotlb_active(); #endif ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev, diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 7c5ffc81dce1..47d27e54a34f 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -89,29 +89,25 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) unsigned long cur_freq; struct device *dev = &pfdev->pdev->dev; struct devfreq *devfreq; - struct opp_table *opp_table; struct thermal_cooling_device *cooling; struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq; - opp_table = dev_pm_opp_set_regulators(dev, pfdev->comp->supply_names, - pfdev->comp->num_supplies); - if (IS_ERR(opp_table)) { - ret = PTR_ERR(opp_table); + ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names, + pfdev->comp->num_supplies); + if (ret) { /* Continue if the optional regulator is missing */ if (ret != -ENODEV) { DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n"); - goto err_fini; + return ret; } - } else { - pfdevfreq->regulators_opp_table = opp_table; } - ret = dev_pm_opp_of_add_table(dev); + ret = devm_pm_opp_of_add_table(dev); if (ret) { /* Optional, continue without devfreq */ if (ret == -ENODEV) ret = 0; - goto err_fini; + return ret; } pfdevfreq->opp_of_table_added = true; @@ -122,10 +118,8 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) cur_freq = clk_get_rate(pfdev->clock); opp = devfreq_recommended_opp(dev, &cur_freq, 0); - if (IS_ERR(opp)) { - ret = PTR_ERR(opp); - goto err_fini; - } + if (IS_ERR(opp)) + return PTR_ERR(opp); panfrost_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); @@ -142,8 +136,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) &pfdevfreq->gov_data); if (IS_ERR(devfreq)) { DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n"); - ret = PTR_ERR(devfreq); - goto err_fini; + return PTR_ERR(devfreq); } pfdevfreq->devfreq = devfreq; @@ -154,10 +147,6 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) pfdevfreq->cooling = cooling; return 0; - -err_fini: - panfrost_devfreq_fini(pfdev); - return ret; } void panfrost_devfreq_fini(struct panfrost_device *pfdev) @@ -168,14 +157,6 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev) devfreq_cooling_unregister(pfdevfreq->cooling); pfdevfreq->cooling = NULL; } - - if (pfdevfreq->opp_of_table_added) { - dev_pm_opp_of_remove_table(&pfdev->pdev->dev); - pfdevfreq->opp_of_table_added = false; - } - - dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table); - pfdevfreq->regulators_opp_table = NULL; } void panfrost_devfreq_resume(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h index 1e2a4de941aa..1514c1f9d91c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h @@ -9,14 +9,12 @@ #include <linux/ktime.h> struct devfreq; -struct opp_table; struct thermal_cooling_device; struct panfrost_device; struct panfrost_devfreq { struct devfreq *devfreq; - struct opp_table *regulators_opp_table; struct thermal_cooling_device *cooling; struct devfreq_simple_ondemand_data gov_data; bool opp_of_table_added; diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 1864467f1063..6754f578fed2 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -1,4 +1,3 @@ -/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */ /* qxl_drv.c -- QXL driver -*- linux-c -*- * * Copyright 2011 Red Hat, Inc. diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 059431689c2d..48162501c1ee 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -393,8 +393,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return 0; } -static int cmp_size_smaller_first(void *priv, struct list_head *a, - struct list_head *b) +static int cmp_size_smaller_first(void *priv, const struct list_head *a, + const struct list_head *b) { struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head); struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 42321b9c8129..d782b49c7236 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -482,11 +482,15 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) vmw_bo_unreference(&old_buf); res->id = vcotbl->type; + /* Release the pin acquired in vmw_bo_init */ + ttm_bo_unpin(bo); + return 0; out_map_new: ttm_bo_kunmap(&old_map); out_wait: + ttm_bo_unpin(bo); ttm_bo_unreserve(bo); vmw_bo_unreference(&buf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4e41d8221f06..399f70d340eb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -706,17 +706,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->last_read_seqno = (uint32_t) -100; dev_priv->drm.dev_private = dev_priv; - ret = vmw_setup_pci_resources(dev_priv, pci_id); - if (ret) - return ret; - ret = vmw_detect_version(dev_priv); - if (ret) - goto out_no_pci_or_version; - mutex_init(&dev_priv->cmdbuf_mutex); - mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); - mutex_init(&dev_priv->global_kms_state_mutex); ttm_lock_init(&dev_priv->reservation_sem); spin_lock_init(&dev_priv->resource_lock); spin_lock_init(&dev_priv->hw_lock); @@ -724,6 +715,14 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) spin_lock_init(&dev_priv->cap_lock); spin_lock_init(&dev_priv->cursor_lock); + ret = vmw_setup_pci_resources(dev_priv, pci_id); + if (ret) + return ret; + ret = vmw_detect_version(dev_priv); + if (ret) + goto out_no_pci_or_version; + + for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); INIT_LIST_HEAD(&dev_priv->res_lru[i]); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 7e6518709e14..c6b1eb5952bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -529,7 +529,6 @@ struct vmw_private { struct vmw_overlay *overlay_priv; struct drm_property *hotplug_mode_update_property; struct drm_property *implicit_placement_property; - struct mutex global_kms_state_mutex; spinlock_t cursor_lock; struct drm_atomic_state *suspend_state; @@ -592,7 +591,6 @@ struct vmw_private { bool refuse_hibernation; bool suspend_locked; - struct mutex release_mutex; atomic_t num_fifo_resources; /* @@ -1525,11 +1523,8 @@ static inline void vmw_bo_unreference(struct vmw_buffer_object **buf) struct vmw_buffer_object *tmp_buf = *buf; *buf = NULL; - if (tmp_buf != NULL) { - if (tmp_buf->base.pin_count > 0) - ttm_bo_unpin(&tmp_buf->base); + if (tmp_buf != NULL) ttm_bo_put(&tmp_buf->base); - } } static inline struct vmw_buffer_object * diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index a0b53141dded..5648664f71bc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -94,6 +94,16 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, struct vmw_piter data_iter, unsigned long num_data_pages); + +static inline void vmw_bo_unpin_unlocked(struct ttm_buffer_object *bo) +{ + int ret = ttm_bo_reserve(bo, false, true, NULL); + BUG_ON(ret != 0); + ttm_bo_unpin(bo); + ttm_bo_unreserve(bo); +} + + /* * vmw_setup_otable_base - Issue an object table base setup command to * the device @@ -277,7 +287,7 @@ out_no_setup: &batch->otables[i]); } - ttm_bo_unpin(batch->otable_bo); + vmw_bo_unpin_unlocked(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; return ret; @@ -341,6 +351,7 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, BUG_ON(ret != 0); vmw_bo_fence_single(bo, NULL); + ttm_bo_unpin(bo); ttm_bo_unreserve(bo); ttm_bo_unpin(batch->otable_bo); @@ -530,7 +541,7 @@ static void vmw_mob_pt_setup(struct vmw_mob *mob, void vmw_mob_destroy(struct vmw_mob *mob) { if (mob->pt_bo) { - ttm_bo_unpin(mob->pt_bo); + vmw_bo_unpin_unlocked(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } @@ -646,7 +657,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, out_no_cmd_space: vmw_fifo_resource_dec(dev_priv); if (pt_set_up) { - ttm_bo_unpin(mob->pt_bo); + vmw_bo_unpin_unlocked(mob->pt_bo); ttm_bo_put(mob->pt_bo); mob->pt_bo = NULL; } |