summaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig11
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/ati_pcigart.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c48
-rw-r--r--drivers/gpu/drm/drm_crtc.c36
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c122
-rw-r--r--drivers/gpu/drm/drm_drv.c43
-rw-r--r--drivers/gpu/drm/drm_edid.c851
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h380
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c7
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c76
-rw-r--r--drivers/gpu/drm/drm_fops.c33
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_global.c (renamed from drivers/gpu/drm/ttm/ttm_global.c)30
-rw-r--r--drivers/gpu/drm/drm_info.c23
-rw-r--r--drivers/gpu/drm/drm_ioctl.c141
-rw-r--r--drivers/gpu/drm/drm_irq.c26
-rw-r--r--drivers/gpu/drm/drm_mm.c359
-rw-r--r--drivers/gpu/drm/drm_pci.c143
-rw-r--r--drivers/gpu/drm/drm_platform.c122
-rw-r--r--drivers/gpu/drm/drm_stub.c92
-rw-r--r--drivers/gpu/drm/drm_sysfs.c3
-rw-r--r--drivers/gpu/drm/drm_trace.h66
-rw-r--r--drivers/gpu/drm/drm_trace_points.c4
-rw-r--r--drivers/gpu/drm/drm_vm.c14
-rw-r--r--drivers/gpu/drm/i2c/Makefile3
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c23
-rw-r--r--drivers/gpu/drm/i2c/ch7006_mode.c5
-rw-r--r--drivers/gpu/drm/i2c/ch7006_priv.h3
-rw-r--r--drivers/gpu/drm/i2c/sil164_drv.c462
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c125
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.h65
-rw-r--r--drivers/gpu/drm/i830/i830_dma.c137
-rw-r--r--drivers/gpu/drm/i830/i830_drv.c2
-rw-r--r--drivers/gpu/drm/i830/i830_drv.h49
-rw-r--r--drivers/gpu/drm/i830/i830_irq.c10
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c24
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c66
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h29
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c198
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c6
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c58
-rw-r--r--drivers/gpu/drm/i915/i915_opregion.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h56
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c9
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h36
-rw-r--r--drivers/gpu/drm/i915/intel_display.c703
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c143
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h10
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c10
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c331
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c14
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c12
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c103
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c4
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h187
-rw-r--r--drivers/gpu/drm/mga/mga_irq.c9
-rw-r--r--drivers/gpu/drm/mga/mga_state.c47
-rw-r--r--drivers/gpu/drm/mga/mga_warp.c4
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig11
-rw-r--r--drivers/gpu/drm/nouveau/Makefile12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c820
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c423
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c128
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c41
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h180
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_encoder.h16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c35
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_grctx.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c83
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c341
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c105
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_reg.h109
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c46
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c340
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c85
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c90
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv04_graph.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_instmem.c21
-rw-r--r--drivers/gpu/drm/nouveau/nv04_mc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c133
-rw-r--r--drivers/gpu/drm/nouveau/nv10_fifo.c10
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c (renamed from drivers/gpu/drm/nouveau/nv17_gpio.c)4
-rw-r--r--drivers/gpu/drm/nouveau/nv10_graph.c175
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c65
-rw-r--r--drivers/gpu/drm/nouveau/nv20_graph.c100
-rw-r--r--drivers/gpu/drm/nouveau/nv30_fb.c95
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c60
-rw-r--r--drivers/gpu/drm/nouveau/nv40_mc.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c76
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c43
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c424
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c126
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c35
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c86
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c68
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c105
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fb.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fifo.c96
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_instmem.c232
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h22
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c52
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.h122
-rw-r--r--drivers/gpu/drm/r128/r128_irq.c4
-rw-r--r--drivers/gpu/drm/r128/r128_state.c121
-rw-r--r--drivers/gpu/drm/radeon/Makefile1
-rw-r--r--drivers/gpu/drm/radeon/atom.c9
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c212
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c18
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c22
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h5
-rw-r--r--drivers/gpu/drm/radeon/r100.c55
-rw-r--r--drivers/gpu/drm/radeon/r100d.h2
-rw-r--r--drivers/gpu/drm/radeon/r300.c46
-rw-r--r--drivers/gpu/drm/radeon/r300d.h2
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h5
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c49
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c22
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c1115
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c265
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c6
-rw-r--r--drivers/gpu/drm/radeon/r600d.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c255
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c81
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c439
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c93
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c111
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c82
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c55
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h42
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c128
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c20
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r30013
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r42014
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs60013
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv51514
-rw-r--r--drivers/gpu/drm/radeon/rs400.c10
-rw-r--r--drivers/gpu/drm/radeon/rs600.c16
-rw-r--r--drivers/gpu/drm/radeon/rs690.c44
-rw-r--r--drivers/gpu/drm/radeon/rv515.c25
-rw-r--r--drivers/gpu/drm/radeon/rv770.c27
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h6
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c26
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c14
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c4
-rw-r--r--drivers/gpu/drm/via/via_dma.c120
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c71
-rw-r--r--drivers/gpu/drm/via/via_dmablit.h8
-rw-r--r--drivers/gpu/drm/via/via_drv.h22
-rw-r--r--drivers/gpu/drm/via/via_irq.c13
-rw-r--r--drivers/gpu/drm/via/via_map.c4
-rw-r--r--drivers/gpu/drm/via/via_mm.c7
-rw-r--r--drivers/gpu/drm/via/via_verifier.c47
-rw-r--r--drivers/gpu/drm/via/via_verifier.h4
-rw-r--r--drivers/gpu/drm/via/via_video.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c20
212 files changed, 9321 insertions, 6073 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 88910e5a2c77..4cab0c6397e3 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -6,7 +6,7 @@
#
menuconfig DRM
tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
- depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && MMU
+ depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
select I2C
select I2C_ALGOBIT
select SLOW_WORK
@@ -17,7 +17,7 @@ menuconfig DRM
These modules provide support for synchronization, security, and
DMA transfers. Please see <http://dri.sourceforge.net/> for more
details. You should also select and configure AGP
- (/dev/agpgart) support.
+ (/dev/agpgart) support if it is available for your platform.
config DRM_KMS_HELPER
tristate
@@ -61,6 +61,7 @@ config DRM_RADEON
select DRM_KMS_HELPER
select DRM_TTM
select POWER_SUPPLY
+ select HWMON
help
Choose this option if you have an ATI Radeon graphics card. There
are both PCI and AGP versions. You don't need to choose this to
@@ -130,7 +131,7 @@ endchoice
config DRM_MGA
tristate "Matrox g200/g400"
- depends on DRM
+ depends on DRM && PCI
select FW_LOADER
help
Choose this option if you have a Matrox G200, G400 or G450 graphics
@@ -148,14 +149,14 @@ config DRM_SIS
config DRM_VIA
tristate "Via unichrome video cards"
- depends on DRM
+ depends on DRM && PCI
help
Choose this option if you have a Via unichrome or compatible video
chipset. If M is selected the module will be called via.
config DRM_SAVAGE
tristate "Savage video cards"
- depends on DRM
+ depends on DRM && PCI
help
Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
chipset. If M is selected the module will be called savage.
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index abe3f446ca48..f3a23a329f4e 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,9 +9,10 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
- drm_info.o drm_debugfs.o drm_encoder_slave.o
+ drm_info.o drm_debugfs.o drm_encoder_slave.o \
+ drm_trace_points.o drm_global.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
@@ -19,6 +20,8 @@ drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
+CFLAGS_drm_trace_points.o := -I$(src)
+
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
diff --git a/drivers/gpu/drm/ati_pcigart.c b/drivers/gpu/drm/ati_pcigart.c
index 17be051b7aa3..1c3649242208 100644
--- a/drivers/gpu/drm/ati_pcigart.c
+++ b/drivers/gpu/drm/ati_pcigart.c
@@ -152,7 +152,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
/* we need to support large memory configurations */
entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (entry->busaddr[i] == 0) {
+ if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 2092e7bb788f..3e257a50bf56 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -39,19 +39,6 @@
#include <asm/shmparam.h>
#include "drmP.h"
-resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_start(dev->pdev, resource);
-}
-EXPORT_SYMBOL(drm_get_resource_start);
-
-resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
-{
- return pci_resource_len(dev->pdev, resource);
-}
-
-EXPORT_SYMBOL(drm_get_resource_len);
-
static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
struct drm_local_map *map)
{
@@ -189,7 +176,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
if (map->offset + (map->size-1) < map->offset ||
map->offset < virt_to_phys(high_memory)) {
kfree(map);
@@ -341,14 +328,13 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
return -EINVAL;
}
- list = kmalloc(sizeof(*list), GFP_KERNEL);
+ list = kzalloc(sizeof(*list), GFP_KERNEL);
if (!list) {
if (map->type == _DRM_REGISTERS)
iounmap(map->handle);
kfree(map);
return -EINVAL;
}
- memset(list, 0, sizeof(*list));
list->map = map;
mutex_lock(&dev->struct_mutex);
@@ -691,13 +677,12 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -721,7 +706,7 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -730,7 +715,6 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
@@ -845,22 +829,20 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist), GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
- entry->seglist = kmalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
+ entry->seglist = kzalloc(count * sizeof(*entry->seglist), GFP_KERNEL);
if (!entry->seglist) {
kfree(entry->buflist);
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->seglist, 0, count * sizeof(*entry->seglist));
/* Keep the original pagelist until we know all the allocations
* have succeeded
@@ -924,8 +906,8 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size,
- GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size,
+ GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -936,7 +918,6 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n",
entry->buf_count, buf->address);
@@ -1061,14 +1042,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist),
GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -1093,7 +1073,7 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1103,8 +1083,6 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
-
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
offset += alignment;
@@ -1222,14 +1200,13 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
return -EINVAL;
}
- entry->buflist = kmalloc(count * sizeof(*entry->buflist),
+ entry->buflist = kzalloc(count * sizeof(*entry->buflist),
GFP_KERNEL);
if (!entry->buflist) {
mutex_unlock(&dev->struct_mutex);
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(entry->buflist, 0, count * sizeof(*entry->buflist));
entry->buf_size = size;
entry->page_order = page_order;
@@ -1253,7 +1230,7 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
- buf->dev_private = kmalloc(buf->dev_priv_size, GFP_KERNEL);
+ buf->dev_private = kzalloc(buf->dev_priv_size, GFP_KERNEL);
if (!buf->dev_private) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -1262,7 +1239,6 @@ static int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request
atomic_dec(&dev->buf_alloc);
return -ENOMEM;
}
- memset(buf->dev_private, 0, buf->dev_priv_size);
DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 57cea01c4ffb..37e0b4fa482a 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -80,6 +80,7 @@ static struct drm_prop_enum_list drm_dithering_mode_enum_list[] =
{
{ DRM_MODE_DITHERING_OFF, "Off" },
{ DRM_MODE_DITHERING_ON, "On" },
+ { DRM_MODE_DITHERING_AUTO, "Automatic" },
};
/*
@@ -1126,7 +1127,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list,
head) {
- DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (put_user(crtc->base.id, crtc_id + copied)) {
ret = -EFAULT;
goto out;
@@ -1154,8 +1155,8 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each_entry(encoder,
&dev->mode_config.encoder_list,
head) {
- DRM_DEBUG_KMS("ENCODER ID is %d\n",
- encoder->base.id);
+ DRM_DEBUG_KMS("[ENCODER:%d:%s]\n", encoder->base.id,
+ drm_get_encoder_name(encoder));
if (put_user(encoder->base.id, encoder_id +
copied)) {
ret = -EFAULT;
@@ -1185,8 +1186,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
list_for_each_entry(connector,
&dev->mode_config.connector_list,
head) {
- DRM_DEBUG_KMS("CONNECTOR ID is %d\n",
- connector->base.id);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
if (put_user(connector->base.id,
connector_id + copied)) {
ret = -EFAULT;
@@ -1209,7 +1211,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
}
card_res->count_connectors = connector_count;
- DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs,
+ DRM_DEBUG_KMS("CRTC[%d] CONNECTORS[%d] ENCODERS[%d]\n", card_res->count_crtcs,
card_res->count_connectors, card_res->count_encoders);
out:
@@ -1312,7 +1314,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo));
- DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id);
mutex_lock(&dev->mode_config.mutex);
@@ -1493,6 +1495,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
crtc = obj_to_crtc(obj);
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
if (crtc_req->mode_valid) {
/* If we have a mode we need a framebuffer. */
@@ -1569,6 +1572,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
goto out;
}
connector = obj_to_connector(obj);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
+ connector->base.id,
+ drm_get_connector_name(connector));
connector_set[i] = connector;
}
@@ -1676,14 +1682,15 @@ int drm_mode_addfb(struct drm_device *dev,
/* TODO setup destructor callback */
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
- if (!fb) {
+ if (IS_ERR(fb)) {
DRM_ERROR("could not create framebuffer\n");
- ret = -EINVAL;
+ ret = PTR_ERR(fb);
goto out;
}
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
out:
mutex_unlock(&dev->mode_config.mutex);
@@ -2534,7 +2541,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
goto out;
}
- crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
+ crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
out:
mutex_unlock(&dev->mode_config.mutex);
@@ -2610,6 +2617,15 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
goto out;
crtc = obj_to_crtc(obj);
+ if (crtc->fb == NULL) {
+ /* The framebuffer is currently unbound, presumably
+ * due to a hotplug event, that userspace has not
+ * yet discovered.
+ */
+ ret = -EBUSY;
+ goto out;
+ }
+
if (crtc->funcs->page_flip == NULL)
goto out;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 9b2a54117c91..7e31d4348340 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -86,7 +86,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
int count = 0;
int mode_flags = 0;
- DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
+ drm_get_connector_name(connector));
/* set all modes to the unverified state */
list_for_each_entry_safe(mode, t, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
@@ -102,8 +103,8 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
connector->status = connector->funcs->detect(connector);
if (connector->status == connector_status_disconnected) {
- DRM_DEBUG_KMS("%s is disconnected\n",
- drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
+ connector->base.id, drm_get_connector_name(connector));
drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}
@@ -141,8 +142,8 @@ prune:
drm_mode_sort(&connector->modes);
- DRM_DEBUG_KMS("Probed modes for %s\n",
- drm_get_connector_name(connector));
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] probed modes :\n", connector->base.id,
+ drm_get_connector_name(connector));
list_for_each_entry_safe(mode, t, &connector->modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
@@ -201,6 +202,17 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_helper_crtc_in_use);
+static void
+drm_encoder_disable(struct drm_encoder *encoder)
+{
+ struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
+
+ if (encoder_funcs->disable)
+ (*encoder_funcs->disable)(encoder);
+ else
+ (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+}
+
/**
* drm_helper_disable_unused_functions - disable unused objects
* @dev: DRM device
@@ -215,7 +227,6 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
{
struct drm_encoder *encoder;
struct drm_connector *connector;
- struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_crtc *crtc;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -226,12 +237,8 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- encoder_funcs = encoder->helper_private;
if (!drm_helper_encoder_in_use(encoder)) {
- if (encoder_funcs->disable)
- (*encoder_funcs->disable)(encoder);
- else
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
}
@@ -241,7 +248,10 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc->enabled = drm_helper_crtc_in_use(crtc);
if (!crtc->enabled) {
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ if (crtc_funcs->disable)
+ (*crtc_funcs->disable)(crtc);
+ else
+ (*crtc_funcs->dpms)(crtc, DRM_MODE_DPMS_OFF);
crtc->fb = NULL;
}
}
@@ -292,11 +302,11 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
encoder_funcs = encoder->helper_private;
/* Disable unused encoders */
if (encoder->crtc == NULL)
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
/* Disable encoders whose CRTC is about to change */
if (encoder_funcs->get_crtc &&
encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
+ drm_encoder_disable(encoder);
}
}
@@ -365,6 +375,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
goto done;
}
+ DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
/* Prepare the encoders and CRTCs before setting the mode. */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -392,8 +403,9 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
if (encoder->crtc != crtc)
continue;
- DRM_DEBUG("%s: set mode %s %x\n", drm_get_encoder_name(encoder),
- mode->name, mode->base.id);
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
+ encoder->base.id, drm_get_encoder_name(encoder),
+ mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
@@ -469,10 +481,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
crtc_funcs = set->crtc->helper_private;
- DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:"
- " %d (x, y) (%i, %i)\n",
- set->crtc, set->crtc->base.id, set->fb, set->connectors,
- (int)set->num_connectors, set->x, set->y);
+ if (set->fb) {
+ DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, set->fb->base.id,
+ (int)set->num_connectors, set->x, set->y);
+ } else {
+ DRM_DEBUG_KMS("[CRTC:%d] [NOFB] #connectors=%d (x y) (%i %i)\n",
+ set->crtc->base.id, (int)set->num_connectors,
+ set->x, set->y);
+ }
dev = set->crtc->dev;
@@ -601,8 +618,14 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
connector->encoder->crtc = new_crtc;
}
- DRM_DEBUG_KMS("setting connector %d crtc to %p\n",
- connector->base.id, new_crtc);
+ if (new_crtc) {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [CRTC:%d]\n",
+ connector->base.id, drm_get_connector_name(connector),
+ new_crtc->base.id);
+ } else {
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] to [NOCRTC]\n",
+ connector->base.id, drm_get_connector_name(connector));
+ }
}
/* mode_set_base is not a required function */
@@ -620,8 +643,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (!drm_crtc_helper_set_mode(set->crtc, set->mode,
set->x, set->y,
old_fb)) {
- DRM_ERROR("failed to set mode on crtc %p\n",
- set->crtc);
+ DRM_ERROR("failed to set mode on [CRTC:%d]\n",
+ set->crtc->base.id);
ret = -EINVAL;
goto fail;
}
@@ -794,12 +817,12 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
if (encoder_funcs->dpms)
(*encoder_funcs->dpms) (encoder,
drm_helper_choose_encoder_dpms(encoder));
-
- crtc_funcs = crtc->helper_private;
- if (crtc_funcs->dpms)
- (*crtc_funcs->dpms) (crtc,
- drm_helper_choose_crtc_dpms(crtc));
}
+
+ crtc_funcs = crtc->helper_private;
+ if (crtc_funcs->dpms)
+ (*crtc_funcs->dpms) (crtc,
+ drm_helper_choose_crtc_dpms(crtc));
}
}
/* disable the unused connectors while restoring the modesetting */
@@ -808,17 +831,14 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
-static struct slow_work_ops output_poll_ops;
-
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
-static void output_poll_execute(struct slow_work *work)
+static void output_poll_execute(struct work_struct *work)
{
- struct delayed_slow_work *delayed_work = container_of(work, struct delayed_slow_work, work);
- struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_slow_work);
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
enum drm_connector_status old_status, status;
bool repoll = false, changed = false;
- int ret;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
@@ -853,18 +873,15 @@ static void output_poll_execute(struct slow_work *work)
dev->mode_config.funcs->output_poll_changed(dev);
}
- if (repoll) {
- ret = delayed_slow_work_enqueue(delayed_work, DRM_OUTPUT_POLL_PERIOD);
- if (ret)
- DRM_ERROR("delayed enqueue failed %d\n", ret);
- }
+ if (repoll)
+ queue_delayed_work(system_nrt_wq, delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
void drm_kms_helper_poll_disable(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
+ cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
@@ -872,26 +889,20 @@ void drm_kms_helper_poll_enable(struct drm_device *dev)
{
bool poll = false;
struct drm_connector *connector;
- int ret;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->polled)
poll = true;
}
- if (poll) {
- ret = delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, DRM_OUTPUT_POLL_PERIOD);
- if (ret)
- DRM_ERROR("delayed enqueue failed %d\n", ret);
- }
+ if (poll)
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
void drm_kms_helper_poll_init(struct drm_device *dev)
{
- slow_work_register_user(THIS_MODULE);
- delayed_slow_work_init(&dev->mode_config.output_poll_slow_work,
- &output_poll_ops);
+ INIT_DELAYED_WORK(&dev->mode_config.output_poll_work, output_poll_execute);
dev->mode_config.poll_enabled = true;
drm_kms_helper_poll_enable(dev);
@@ -901,7 +912,6 @@ EXPORT_SYMBOL(drm_kms_helper_poll_init);
void drm_kms_helper_poll_fini(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
- slow_work_unregister_user(THIS_MODULE);
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
@@ -909,12 +919,8 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
{
if (!dev->mode_config.poll_enabled)
return;
- delayed_slow_work_cancel(&dev->mode_config.output_poll_slow_work);
- /* schedule a slow work asap */
- delayed_slow_work_enqueue(&dev->mode_config.output_poll_slow_work, 0);
+ /* kill timer and schedule immediate execution, this doesn't block */
+ cancel_delayed_work(&dev->mode_config.output_poll_work);
+ queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
-
-static struct slow_work_ops output_poll_ops = {
- .execute = output_poll_execute,
-};
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 4a66201edaec..90288ec7c284 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -243,47 +243,20 @@ int drm_lastclose(struct drm_device * dev)
*
* Initializes an array of drm_device structures, and attempts to
* initialize all available devices, using consecutive minors, registering the
- * stubs and initializing the AGP device.
+ * stubs and initializing the device.
*
* Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
* after the initialization for driver customization.
*/
int drm_init(struct drm_driver *driver)
{
- struct pci_dev *pdev = NULL;
- const struct pci_device_id *pid;
- int i;
-
DRM_DEBUG("\n");
-
INIT_LIST_HEAD(&driver->device_list);
- if (driver->driver_features & DRIVER_MODESET)
- return pci_register_driver(&driver->pci_driver);
-
- /* If not using KMS, fall back to stealth mode manual scanning. */
- for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
- pid = &driver->pci_driver.id_table[i];
-
- /* Loop around setting up a DRM device for each PCI device
- * matching our ID and device class. If we had the internal
- * function that pci_get_subsys and pci_get_class used, we'd
- * be able to just pass pid in instead of doing a two-stage
- * thing.
- */
- pdev = NULL;
- while ((pdev =
- pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
- pid->subdevice, pdev)) != NULL) {
- if ((pdev->class & pid->class_mask) != pid->class)
- continue;
-
- /* stealth mode requires a manual probe */
- pci_dev_get(pdev);
- drm_get_dev(pdev, pid, driver);
- }
- }
- return 0;
+ if (driver->driver_features & DRIVER_USE_PLATFORM_DEVICE)
+ return drm_platform_init(driver);
+ else
+ return drm_pci_init(driver);
}
EXPORT_SYMBOL(drm_init);
@@ -315,6 +288,7 @@ static int __init drm_core_init(void)
{
int ret = -ENOMEM;
+ drm_global_init();
idr_init(&drm_minors_idr);
if (register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops))
@@ -362,6 +336,7 @@ static void __exit drm_core_exit(void)
unregister_chrdev(DRM_MAJOR, "drm");
+ idr_remove_all(&drm_minors_idr);
idr_destroy(&drm_minors_idr);
}
@@ -506,9 +481,9 @@ long drm_ioctl(struct file *filp,
if (ioctl->flags & DRM_UNLOCKED)
retcode = func(dev, kdata, file_priv);
else {
- lock_kernel();
+ mutex_lock(&drm_global_mutex);
retcode = func(dev, kdata, file_priv);
- unlock_kernel();
+ mutex_unlock(&drm_global_mutex);
}
if (cmd & IOC_OUT) {
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f87bf104df7a..96e963108225 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -33,6 +33,11 @@
#include <linux/i2c-algo-bit.h>
#include "drmP.h"
#include "drm_edid.h"
+#include "drm_edid_modes.h"
+
+#define version_greater(edid, maj, min) \
+ (((edid)->version > (maj)) || \
+ ((edid)->version == (maj) && (edid)->revision > (min)))
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
@@ -62,6 +67,13 @@
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
+struct detailed_mode_closure {
+ struct drm_connector *connector;
+ struct edid *edid;
+ bool preferred;
+ u32 quirks;
+ int modes;
+};
#define LEVEL_DMT 0
#define LEVEL_GTF 1
@@ -282,7 +294,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
return block;
carp:
- dev_warn(&connector->dev->pdev->dev, "%s: EDID block %d invalid.\n",
+ dev_warn(connector->dev->dev, "%s: EDID block %d invalid.\n",
drm_get_connector_name(connector), j);
out:
@@ -375,7 +387,6 @@ static u32 edid_get_quirks(struct edid *edid)
#define MODE_SIZE(m) ((m)->hdisplay * (m)->vdisplay)
#define MODE_REFRESH_DIFF(m,r) (abs((m)->vrefresh - target_refresh))
-
/**
* edid_fixup_preferred - set preferred modes based on quirk list
* @connector: has mode list to fix up
@@ -422,245 +433,6 @@ static void edid_fixup_preferred(struct drm_connector *connector,
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
-/*
- * Add the Autogenerated from the DMT spec.
- * This table is copied from xfree86/modes/xf86EdidModes.c.
- * But the mode with Reduced blank feature is deleted.
- */
-static struct drm_display_mode drm_dmt_modes[] = {
- /* 640x350@85Hz */
- { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 350, 382, 385, 445, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x400@85Hz */
- { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
- 736, 832, 0, 400, 401, 404, 445, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 720x400@85Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
- 828, 936, 0, 400, 401, 404, 446, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 640x480@60Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
- 752, 800, 0, 480, 489, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 492, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 640x480@85Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
- 752, 832, 0, 480, 481, 484, 509, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 800x600@56Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@72Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 800x600@85Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
- 896, 1048, 0, 600, 601, 604, 631, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 848x480@60Hz */
- { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
- 976, 1088, 0, 480, 486, 494, 517, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@43Hz, interlace */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 772, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
- DRM_MODE_FLAG_INTERLACE) },
- /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1024x768@85Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
- 1168, 1376, 0, 768, 769, 772, 808, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1152x864@75Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@60Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
- 1472, 1664, 0, 768, 771, 778, 798, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x768@75Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
- 1488, 1696, 0, 768, 771, 778, 805, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x768@85Hz */
- { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
- 1496, 1712, 0, 768, 771, 778, 809, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@60Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
- 1480, 1680, 0, 800, 803, 809, 831, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
- /* 1280x800@75Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
- 1488, 1696, 0, 800, 803, 809, 838, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x800@85Hz */
- { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
- 1496, 1712, 0, 800, 803, 809, 843, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@60Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
- 1488, 1800, 0, 960, 961, 964, 1000, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x960@85Hz */
- { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
- 1504, 1728, 0, 960, 961, 964, 1011, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@60Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@75Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1280x1024@85Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
- 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1360x768@60Hz */
- { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
- 1536, 1792, 0, 768, 771, 777, 795, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@60Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
- 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@75Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
- 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x1050@85Hz */
- { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
- 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@60Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
- 1672, 1904, 0, 900, 903, 909, 934, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@75Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
- 1688, 1936, 0, 900, 903, 909, 942, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1440x900@85Hz */
- { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
- 1696, 1952, 0, 900, 903, 909, 948, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@60Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@65Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@70Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@75Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1600x1200@85Hz */
- { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
- 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@60Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
- 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@75Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
- 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1680x1050@85Hz */
- { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
- 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1792x1344@60Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
- 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1729x1344@75Hz */
- { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
- 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1853x1392@60Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
- 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1856x1392@75Hz */
- { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
- 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@60Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
- 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@75Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
- 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1200@85Hz */
- { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
- 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@60Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
- 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 1920x1440@75Hz */
- { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
- 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@60Hz */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
- 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@75HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
- /* 2560x1600@85HZ */
- { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
- 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-};
-static const int drm_num_dmt_modes =
- sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
-
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh)
{
@@ -685,6 +457,46 @@ EXPORT_SYMBOL(drm_mode_find_dmt);
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
static void
+cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ int i, n = 0;
+ u8 rev = ext[0x01], d = ext[0x02];
+ u8 *det_base = ext + d;
+
+ switch (rev) {
+ case 0:
+ /* can't happen */
+ return;
+ case 1:
+ /* have to infer how many blocks we have, check pixel clock */
+ for (i = 0; i < 6; i++)
+ if (det_base[18*i] || det_base[18*i+1])
+ n++;
+ break;
+ default:
+ /* explicit count */
+ n = min(ext[0x03] & 0x0f, 6);
+ break;
+ }
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
+vtb_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
+{
+ unsigned int i, n = min((int)ext[0x02], 6);
+ u8 *det_base = ext + 5;
+
+ if (ext[0x01] != 1)
+ return; /* unknown version */
+
+ for (i = 0; i < n; i++)
+ cb((struct detailed_timing *)(det_base + 18 * i), closure);
+}
+
+static void
drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
{
int i;
@@ -696,7 +508,19 @@ drm_for_each_detailed_block(u8 *raw_edid, detailed_cb *cb, void *closure)
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
cb(&(edid->detailed_timings[i]), closure);
- /* XXX extension block walk */
+ for (i = 1; i <= raw_edid[0x7e]; i++) {
+ u8 *ext = raw_edid + (i * EDID_LENGTH);
+ switch (*ext) {
+ case CEA_EXT:
+ cea_for_each_detailed_block(ext, cb, closure);
+ break;
+ case VTB_EXT:
+ vtb_for_each_detailed_block(ext, cb, closure);
+ break;
+ default:
+ break;
+ }
+ }
}
static void
@@ -929,13 +753,11 @@ drm_mode_do_interlace_quirk(struct drm_display_mode *mode,
{ 1440, 576 },
{ 2880, 576 },
};
- static const int n_sizes =
- sizeof(cea_interlaced)/sizeof(cea_interlaced[0]);
if (!(pt->misc & DRM_EDID_PT_INTERLACED))
return;
- for (i = 0; i < n_sizes; i++) {
+ for (i = 0; i < ARRAY_SIZE(cea_interlaced); i++) {
if ((mode->hdisplay == cea_interlaced[i].w) &&
(mode->vdisplay == cea_interlaced[i].h / 2)) {
mode->vdisplay *= 2;
@@ -1049,117 +871,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
return mode;
}
-/*
- * Detailed mode info for the EDID "established modes" data to use.
- */
-static struct drm_display_mode edid_est_modes[] = {
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
- 968, 1056, 0, 600, 601, 605, 628, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
- 896, 1024, 0, 600, 601, 603, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
- 720, 840, 0, 480, 481, 484, 500, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
- 704, 832, 0, 480, 489, 491, 520, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
- 768, 864, 0, 480, 483, 486, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
- { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
- 752, 800, 0, 480, 490, 492, 525, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
- 846, 900, 0, 400, 421, 423, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
- { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
- 846, 900, 0, 400, 412, 414, 449, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
- { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
- 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
- 1136, 1312, 0, 768, 769, 772, 800, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
- 1184, 1328, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
- 1184, 1344, 0, 768, 771, 777, 806, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
- { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
- 1208, 1264, 0, 768, 768, 776, 817, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
- { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
- 928, 1152, 0, 624, 625, 628, 667, 0,
- DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
- 896, 1056, 0, 600, 601, 604, 625, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
- { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
- 976, 1040, 0, 600, 637, 643, 666, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
- { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
- 1344, 1600, 0, 864, 865, 868, 900, 0,
- DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
-};
-
-/**
- * add_established_modes - get est. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Each EDID block contains a bitmap of the supported "established modes" list
- * (defined above). Tease them out and add them to the global modes list.
- */
-static int add_established_modes(struct drm_connector *connector, struct edid *edid)
-{
- struct drm_device *dev = connector->dev;
- unsigned long est_bits = edid->established_timings.t1 |
- (edid->established_timings.t2 << 8) |
- ((edid->established_timings.mfg_rsvd & 0x80) << 9);
- int i, modes = 0;
-
- for (i = 0; i <= EDID_EST_TIMINGS; i++)
- if (est_bits & (1<<i)) {
- struct drm_display_mode *newmode;
- newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-
-/**
- * add_standard_modes - get std. modes from EDID and add them
- * @edid: EDID block to scan
- *
- * Standard modes can be calculated using the CVT standard. Grab them from
- * @edid, calculate them, and add them to the list.
- */
-static int add_standard_modes(struct drm_connector *connector, struct edid *edid)
-{
- int i, modes = 0;
-
- for (i = 0; i < EDID_STD_TIMINGS; i++) {
- struct drm_display_mode *newmode;
-
- newmode = drm_mode_std(connector, edid,
- &edid->standard_timings[i],
- edid->revision);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
-
- return modes;
-}
-
static bool
mode_is_rb(struct drm_display_mode *mode)
{
@@ -1269,114 +980,33 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid,
return modes;
}
-static int drm_cvt_modes(struct drm_connector *connector,
- struct detailed_timing *timing)
+static void
+do_inferred_modes(struct detailed_timing *timing, void *c)
{
- int i, j, modes = 0;
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
- struct cvt_timing *cvt;
- const int rates[] = { 60, 85, 75, 60, 50 };
- const u8 empty[3] = { 0, 0, 0 };
-
- for (i = 0; i < 4; i++) {
- int uninitialized_var(width), height;
- cvt = &(timing->data.other_data.data.cvt[i]);
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ int gtf = (closure->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
- if (!memcmp(cvt->code, empty, 3))
- continue;
+ if (gtf && data->type == EDID_DETAIL_MONITOR_RANGE)
+ closure->modes += drm_gtf_modes_for_range(closure->connector,
+ closure->edid,
+ timing);
+}
- height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
- switch (cvt->code[1] & 0x0c) {
- case 0x00:
- width = height * 4 / 3;
- break;
- case 0x04:
- width = height * 16 / 9;
- break;
- case 0x08:
- width = height * 16 / 10;
- break;
- case 0x0c:
- width = height * 15 / 9;
- break;
- }
+static int
+add_inferred_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- for (j = 1; j < 5; j++) {
- if (cvt->code[2] & (1 << j)) {
- newmode = drm_cvt_mode(dev, width, height,
- rates[j], j == 0,
- false, false);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- modes++;
- }
- }
- }
- }
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_inferred_modes,
+ &closure);
- return modes;
+ return closure.modes;
}
-static const struct {
- short w;
- short h;
- short r;
- short rb;
-} est3_modes[] = {
- /* byte 6 */
- { 640, 350, 85, 0 },
- { 640, 400, 85, 0 },
- { 720, 400, 85, 0 },
- { 640, 480, 85, 0 },
- { 848, 480, 60, 0 },
- { 800, 600, 85, 0 },
- { 1024, 768, 85, 0 },
- { 1152, 864, 75, 0 },
- /* byte 7 */
- { 1280, 768, 60, 1 },
- { 1280, 768, 60, 0 },
- { 1280, 768, 75, 0 },
- { 1280, 768, 85, 0 },
- { 1280, 960, 60, 0 },
- { 1280, 960, 85, 0 },
- { 1280, 1024, 60, 0 },
- { 1280, 1024, 85, 0 },
- /* byte 8 */
- { 1360, 768, 60, 0 },
- { 1440, 900, 60, 1 },
- { 1440, 900, 60, 0 },
- { 1440, 900, 75, 0 },
- { 1440, 900, 85, 0 },
- { 1400, 1050, 60, 1 },
- { 1400, 1050, 60, 0 },
- { 1400, 1050, 75, 0 },
- /* byte 9 */
- { 1400, 1050, 85, 0 },
- { 1680, 1050, 60, 1 },
- { 1680, 1050, 60, 0 },
- { 1680, 1050, 75, 0 },
- { 1680, 1050, 85, 0 },
- { 1600, 1200, 60, 0 },
- { 1600, 1200, 65, 0 },
- { 1600, 1200, 70, 0 },
- /* byte 10 */
- { 1600, 1200, 75, 0 },
- { 1600, 1200, 85, 0 },
- { 1792, 1344, 60, 0 },
- { 1792, 1344, 85, 0 },
- { 1856, 1392, 60, 0 },
- { 1856, 1392, 75, 0 },
- { 1920, 1200, 60, 1 },
- { 1920, 1200, 60, 0 },
- /* byte 11 */
- { 1920, 1200, 75, 0 },
- { 1920, 1200, 85, 0 },
- { 1920, 1440, 60, 0 },
- { 1920, 1440, 75, 0 },
-};
-static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
-
static int
drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
{
@@ -1387,7 +1017,7 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
for (i = 0; i < 6; i++) {
for (j = 7; j > 0; j--) {
m = (i * 8) + (7 - j);
- if (m >= num_est3_modes)
+ if (m >= ARRAY_SIZE(est3_modes))
break;
if (est[i] & (1 << j)) {
mode = drm_mode_find_dmt(connector->dev,
@@ -1406,37 +1036,63 @@ drm_est3_modes(struct drm_connector *connector, struct detailed_timing *timing)
return modes;
}
-static int add_detailed_modes(struct drm_connector *connector,
- struct detailed_timing *timing,
- struct edid *edid, u32 quirks, int preferred)
+static void
+do_established_modes(struct detailed_timing *timing, void *c)
{
- int i, modes = 0;
+ struct detailed_mode_closure *closure = c;
struct detailed_non_pixel *data = &timing->data.other_data;
- int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
- struct drm_display_mode *newmode;
- struct drm_device *dev = connector->dev;
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(dev, edid, timing, quirks);
- if (!newmode)
- return 0;
+ if (data->type == EDID_DETAIL_EST_TIMINGS)
+ closure->modes += drm_est3_modes(closure->connector, timing);
+}
- if (preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+/**
+ * add_established_modes - get est. modes from EDID and add them
+ * @edid: EDID block to scan
+ *
+ * Each EDID block contains a bitmap of the supported "established modes" list
+ * (defined above). Tease them out and add them to the global modes list.
+ */
+static int
+add_established_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct drm_device *dev = connector->dev;
+ unsigned long est_bits = edid->established_timings.t1 |
+ (edid->established_timings.t2 << 8) |
+ ((edid->established_timings.mfg_rsvd & 0x80) << 9);
+ int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- drm_mode_probed_add(connector, newmode);
- return 1;
+ for (i = 0; i <= EDID_EST_TIMINGS; i++) {
+ if (est_bits & (1<<i)) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev, &edid_est_modes[i]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
}
- /* other timing types */
- switch (data->type) {
- case EDID_DETAIL_MONITOR_RANGE:
- if (gtf)
- modes += drm_gtf_modes_for_range(connector, edid,
- timing);
- break;
- case EDID_DETAIL_STD_MODES:
- /* Six modes per detailed section */
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid,
+ do_established_modes, &closure);
+
+ return modes + closure.modes;
+}
+
+static void
+do_standard_modes(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
+ struct drm_connector *connector = closure->connector;
+ struct edid *edid = closure->edid;
+
+ if (data->type == EDID_DETAIL_STD_MODES) {
+ int i;
for (i = 0; i < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
@@ -1446,108 +1102,169 @@ static int add_detailed_modes(struct drm_connector *connector,
edid->revision);
if (newmode) {
drm_mode_probed_add(connector, newmode);
- modes++;
+ closure->modes++;
}
}
- break;
- case EDID_DETAIL_CVT_3BYTE:
- modes += drm_cvt_modes(connector, timing);
- break;
- case EDID_DETAIL_EST_TIMINGS:
- modes += drm_est3_modes(connector, timing);
- break;
- default:
- break;
}
-
- return modes;
}
/**
- * add_detailed_info - get detailed mode info from EDID data
- * @connector: attached connector
+ * add_standard_modes - get std. modes from EDID and add them
* @edid: EDID block to scan
- * @quirks: quirks to apply
*
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
+ * Standard modes can be calculated using the appropriate standard (DMT,
+ * GTF or CVT. Grab them from @edid and add them to the list.
*/
-static int add_detailed_info(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+static int
+add_standard_modes(struct drm_connector *connector, struct edid *edid)
{
int i, modes = 0;
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
+
+ for (i = 0; i < EDID_STD_TIMINGS; i++) {
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid,
+ &edid->standard_timings[i],
+ edid->revision);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+
+ if (version_greater(edid, 1, 0))
+ drm_for_each_detailed_block((u8 *)edid, do_standard_modes,
+ &closure);
+
+ /* XXX should also look for standard codes in VTB blocks */
+
+ return modes + closure.modes;
+}
- for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
- struct detailed_timing *timing = &edid->detailed_timings[i];
- int preferred = (i == 0);
+static int drm_cvt_modes(struct drm_connector *connector,
+ struct detailed_timing *timing)
+{
+ int i, j, modes = 0;
+ struct drm_display_mode *newmode;
+ struct drm_device *dev = connector->dev;
+ struct cvt_timing *cvt;
+ const int rates[] = { 60, 85, 75, 60, 50 };
+ const u8 empty[3] = { 0, 0, 0 };
- if (preferred && edid->version == 1 && edid->revision < 4)
- preferred = (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+ for (i = 0; i < 4; i++) {
+ int uninitialized_var(width), height;
+ cvt = &(timing->data.other_data.data.cvt[i]);
- /* In 1.0, only timings are allowed */
- if (!timing->pixel_clock && edid->version == 1 &&
- edid->revision == 0)
+ if (!memcmp(cvt->code, empty, 3))
continue;
- modes += add_detailed_modes(connector, timing, edid, quirks,
- preferred);
+ height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2;
+ switch (cvt->code[1] & 0x0c) {
+ case 0x00:
+ width = height * 4 / 3;
+ break;
+ case 0x04:
+ width = height * 16 / 9;
+ break;
+ case 0x08:
+ width = height * 16 / 10;
+ break;
+ case 0x0c:
+ width = height * 15 / 9;
+ break;
+ }
+
+ for (j = 1; j < 5; j++) {
+ if (cvt->code[2] & (1 << j)) {
+ newmode = drm_cvt_mode(dev, width, height,
+ rates[j], j == 0,
+ false, false);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
}
return modes;
}
-/**
- * add_detailed_mode_eedid - get detailed mode info from addtional timing
- * EDID block
- * @connector: attached connector
- * @edid: EDID block to scan(It is only to get addtional timing EDID block)
- * @quirks: quirks to apply
- *
- * Some of the detailed timing sections may contain mode information. Grab
- * it and add it to the list.
- */
-static int add_detailed_info_eedid(struct drm_connector *connector,
- struct edid *edid, u32 quirks)
+static void
+do_cvt_mode(struct detailed_timing *timing, void *c)
{
- int i, modes = 0;
- char *edid_ext = NULL;
- struct detailed_timing *timing;
- int start_offset, end_offset;
+ struct detailed_mode_closure *closure = c;
+ struct detailed_non_pixel *data = &timing->data.other_data;
- if (edid->version == 1 && edid->revision < 3)
- return 0;
- if (!edid->extensions)
- return 0;
+ if (data->type == EDID_DETAIL_CVT_3BYTE)
+ closure->modes += drm_cvt_modes(closure->connector, timing);
+}
- /* Find CEA extension */
- for (i = 0; i < edid->extensions; i++) {
- edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
- if (edid_ext[0] == 0x02)
- break;
- }
+static int
+add_cvt_modes(struct drm_connector *connector, struct edid *edid)
+{
+ struct detailed_mode_closure closure = {
+ connector, edid, 0, 0, 0
+ };
- if (i == edid->extensions)
- return 0;
+ if (version_greater(edid, 1, 2))
+ drm_for_each_detailed_block((u8 *)edid, do_cvt_mode, &closure);
- /* Get the start offset of detailed timing block */
- start_offset = edid_ext[2];
- if (start_offset == 0) {
- /* If the start_offset is zero, it means that neither detailed
- * info nor data block exist. In such case it is also
- * unnecessary to parse the detailed timing info.
- */
- return 0;
- }
+ /* XXX should also look for CVT codes in VTB blocks */
- end_offset = EDID_LENGTH;
- end_offset -= sizeof(struct detailed_timing);
- for (i = start_offset; i < end_offset;
- i += sizeof(struct detailed_timing)) {
- timing = (struct detailed_timing *)(edid_ext + i);
- modes += add_detailed_modes(connector, timing, edid, quirks, 0);
+ return closure.modes;
+}
+
+static void
+do_detailed_mode(struct detailed_timing *timing, void *c)
+{
+ struct detailed_mode_closure *closure = c;
+ struct drm_display_mode *newmode;
+
+ if (timing->pixel_clock) {
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
+
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = 0;
}
+}
- return modes;
+/*
+ * add_detailed_modes - Add modes from detailed timings
+ * @connector: attached connector
+ * @edid: EDID block to scan
+ * @quirks: quirks to apply
+ */
+static int
+add_detailed_modes(struct drm_connector *connector, struct edid *edid,
+ u32 quirks)
+{
+ struct detailed_mode_closure closure = {
+ connector,
+ edid,
+ 1,
+ quirks,
+ 0
+ };
+
+ if (closure.preferred && !version_greater(edid, 1, 3))
+ closure.preferred =
+ (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
+
+ drm_for_each_detailed_block((u8 *)edid, do_detailed_mode, &closure);
+
+ return closure.modes;
}
#define HDMI_IDENTIFIER 0x000C03
@@ -1626,7 +1343,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
return 0;
}
if (!drm_edid_is_valid(edid)) {
- dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+ dev_warn(connector->dev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
}
@@ -1643,35 +1360,21 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
* - established timing codes
* - modes inferred from GTF or CVT range information
*
- * We don't quite implement this yet, but we're close.
+ * We get this pretty much right.
*
* XXX order for additional mode types in extension blocks?
*/
- num_modes += add_detailed_info(connector, edid, quirks);
- num_modes += add_detailed_info_eedid(connector, edid, quirks);
+ num_modes += add_detailed_modes(connector, edid, quirks);
+ num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
+ num_modes += add_inferred_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
- connector->display_info.serration_vsync = (edid->input & DRM_EDID_INPUT_SERRATION_VSYNC) ? 1 : 0;
- connector->display_info.sync_on_green = (edid->input & DRM_EDID_INPUT_SYNC_ON_GREEN) ? 1 : 0;
- connector->display_info.composite_sync = (edid->input & DRM_EDID_INPUT_COMPOSITE_SYNC) ? 1 : 0;
- connector->display_info.separate_syncs = (edid->input & DRM_EDID_INPUT_SEPARATE_SYNCS) ? 1 : 0;
- connector->display_info.blank_to_black = (edid->input & DRM_EDID_INPUT_BLANK_TO_BLACK) ? 1 : 0;
- connector->display_info.video_level = (edid->input & DRM_EDID_INPUT_VIDEO_LEVEL) >> 5;
- connector->display_info.digital = (edid->input & DRM_EDID_INPUT_DIGITAL) ? 1 : 0;
connector->display_info.width_mm = edid->width_cm * 10;
connector->display_info.height_mm = edid->height_cm * 10;
- connector->display_info.gamma = edid->gamma;
- connector->display_info.gtf_supported = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) ? 1 : 0;
- connector->display_info.standard_color = (edid->features & DRM_EDID_FEATURE_STANDARD_COLOR) ? 1 : 0;
- connector->display_info.display_type = (edid->features & DRM_EDID_FEATURE_DISPLAY_TYPE) >> 3;
- connector->display_info.active_off_supported = (edid->features & DRM_EDID_FEATURE_PM_ACTIVE_OFF) ? 1 : 0;
- connector->display_info.suspend_supported = (edid->features & DRM_EDID_FEATURE_PM_SUSPEND) ? 1 : 0;
- connector->display_info.standby_supported = (edid->features & DRM_EDID_FEATURE_PM_STANDBY) ? 1 : 0;
- connector->display_info.gamma = edid->gamma;
return num_modes;
}
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
new file mode 100644
index 000000000000..6eb7592e152f
--- /dev/null
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -0,0 +1,380 @@
+/*
+ * Copyright (c) 2007-2008 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ * Copyright 2010 Red Hat, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include "drmP.h"
+#include "drm_edid.h"
+
+/*
+ * Autogenerated from the DMT spec.
+ * This table is copied from xfree86/modes/xf86EdidModes.c.
+ * But the mode with Reduced blank feature is deleted.
+ */
+static struct drm_display_mode drm_dmt_modes[] = {
+ /* 640x350@85Hz */
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 350, 382, 385, 445, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x400@85Hz */
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
+ 736, 832, 0, 400, 401, 404, 445, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x400@85Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
+ 828, 936, 0, 400, 401, 404, 446, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 489, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 492, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 640x480@85Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
+ 752, 832, 0, 480, 481, 484, 509, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 800x600@56Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@72Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 800x600@85Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
+ 896, 1048, 0, 600, 601, 604, 631, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 848x480@60Hz */
+ { DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
+ 976, 1088, 0, 480, 486, 494, 517, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@43Hz, interlace */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 772, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1024x768@85Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
+ 1168, 1376, 0, 768, 769, 772, 808, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1152x864@75Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@60Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+ 1472, 1664, 0, 768, 771, 778, 798, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x768@75Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
+ 1488, 1696, 0, 768, 771, 778, 805, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x768@85Hz */
+ { DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
+ 1496, 1712, 0, 768, 771, 778, 809, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@60Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+ 1480, 1680, 0, 800, 803, 809, 831, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x800@75Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
+ 1488, 1696, 0, 800, 803, 809, 838, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x800@85Hz */
+ { DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
+ 1496, 1712, 0, 800, 803, 809, 843, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@60Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+ 1488, 1800, 0, 960, 961, 964, 1000, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x960@85Hz */
+ { DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
+ 1504, 1728, 0, 960, 961, 964, 1011, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@60Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@75Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x1024@85Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
+ 1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1360x768@60Hz */
+ { DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+ 1536, 1792, 0, 768, 771, 777, 795, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@60Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+ 1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@75Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
+ 1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x1050@85Hz */
+ { DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
+ 1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@60Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+ 1672, 1904, 0, 900, 903, 909, 934, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@75Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
+ 1688, 1936, 0, 900, 903, 909, 942, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1440x900@85Hz */
+ { DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
+ 1696, 1952, 0, 900, 903, 909, 948, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@60Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@65Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@70Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@75Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1600x1200@85Hz */
+ { DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
+ 1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@60Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+ 1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@75Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
+ 1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1680x1050@85Hz */
+ { DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
+ 1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1792x1344@60Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+ 2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1729x1344@75Hz */
+ { DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
+ 2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1853x1392@60Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+ 2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1856x1392@75Hz */
+ { DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
+ 2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@60Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+ 2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@75Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
+ 2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1200@85Hz */
+ { DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
+ 2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@60Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+ 2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1440@75Hz */
+ { DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
+ 2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@60Hz */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+ 3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@75HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2560x1600@85HZ */
+ { DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
+ 3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_dmt_modes =
+ sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
+
+static struct drm_display_mode edid_est_modes[] = {
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+ 968, 1056, 0, 600, 601, 605, 628, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
+ 896, 1024, 0, 600, 601, 603, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
+ 720, 840, 0, 480, 481, 484, 500, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
+ 704, 832, 0, 480, 489, 491, 520, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
+ 768, 864, 0, 480, 483, 486, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
+ 846, 900, 0, 400, 421, 423, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
+ 846, 900, 0, 400, 412, 414, 449, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
+ 1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
+ 1136, 1312, 0, 768, 769, 772, 800, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
+ 1184, 1328, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+ 1184, 1344, 0, 768, 771, 777, 806, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
+ 1208, 1264, 0, 768, 768, 776, 817, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
+ 928, 1152, 0, 624, 625, 628, 667, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
+ 896, 1056, 0, 600, 601, 604, 625, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
+ 976, 1040, 0, 600, 637, 643, 666, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
+ { DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+ 1344, 1600, 0, 864, 865, 868, 900, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
+};
+
+static const struct {
+ short w;
+ short h;
+ short r;
+ short rb;
+} est3_modes[] = {
+ /* byte 6 */
+ { 640, 350, 85, 0 },
+ { 640, 400, 85, 0 },
+ { 720, 400, 85, 0 },
+ { 640, 480, 85, 0 },
+ { 848, 480, 60, 0 },
+ { 800, 600, 85, 0 },
+ { 1024, 768, 85, 0 },
+ { 1152, 864, 75, 0 },
+ /* byte 7 */
+ { 1280, 768, 60, 1 },
+ { 1280, 768, 60, 0 },
+ { 1280, 768, 75, 0 },
+ { 1280, 768, 85, 0 },
+ { 1280, 960, 60, 0 },
+ { 1280, 960, 85, 0 },
+ { 1280, 1024, 60, 0 },
+ { 1280, 1024, 85, 0 },
+ /* byte 8 */
+ { 1360, 768, 60, 0 },
+ { 1440, 900, 60, 1 },
+ { 1440, 900, 60, 0 },
+ { 1440, 900, 75, 0 },
+ { 1440, 900, 85, 0 },
+ { 1400, 1050, 60, 1 },
+ { 1400, 1050, 60, 0 },
+ { 1400, 1050, 75, 0 },
+ /* byte 9 */
+ { 1400, 1050, 85, 0 },
+ { 1680, 1050, 60, 1 },
+ { 1680, 1050, 60, 0 },
+ { 1680, 1050, 75, 0 },
+ { 1680, 1050, 85, 0 },
+ { 1600, 1200, 60, 0 },
+ { 1600, 1200, 65, 0 },
+ { 1600, 1200, 70, 0 },
+ /* byte 10 */
+ { 1600, 1200, 75, 0 },
+ { 1600, 1200, 85, 0 },
+ { 1792, 1344, 60, 0 },
+ { 1792, 1344, 85, 0 },
+ { 1856, 1392, 60, 0 },
+ { 1856, 1392, 75, 0 },
+ { 1920, 1200, 60, 1 },
+ { 1920, 1200, 60, 0 },
+ /* byte 11 */
+ { 1920, 1200, 75, 0 },
+ { 1920, 1200, 85, 0 },
+ { 1920, 1440, 60, 0 },
+ { 1920, 1440, 75, 0 },
+};
+static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index f0184696edf3..d62c064fbaa0 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -41,6 +41,9 @@
* &drm_encoder_slave. The @slave_funcs field will be initialized with
* the hooks provided by the slave driver.
*
+ * If @info->platform_data is non-NULL it will be used as the initial
+ * slave config.
+ *
* Returns 0 on success or a negative errno on failure, in particular,
* -ENODEV is returned when no matching driver is found.
*/
@@ -85,6 +88,10 @@ int drm_i2c_encoder_init(struct drm_device *dev,
if (err)
goto fail_unregister;
+ if (info->platform_data)
+ encoder->slave_funcs->set_config(&encoder->base,
+ info->platform_data);
+
return 0;
fail_unregister:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 719662034bbf..de82e201d682 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -241,6 +241,80 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
return 0;
}
+int drm_fb_helper_debug_enter(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc_helper_funcs *funcs;
+ int i;
+
+ if (list_empty(&kernel_fb_helper_list))
+ return false;
+
+ list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set =
+ &helper->crtc_info[i].mode_set;
+
+ if (!mode_set->crtc->enabled)
+ continue;
+
+ funcs = mode_set->crtc->helper_private;
+ funcs->mode_set_base_atomic(mode_set->crtc,
+ mode_set->fb,
+ mode_set->x,
+ mode_set->y);
+
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_enter);
+
+/* Find the real fb for a given fb helper CRTC */
+static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_crtc *c;
+
+ list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
+ if (crtc->base.id == c->base.id)
+ return c->fb;
+ }
+
+ return NULL;
+}
+
+int drm_fb_helper_debug_leave(struct fb_info *info)
+{
+ struct drm_fb_helper *helper = info->par;
+ struct drm_crtc *crtc;
+ struct drm_crtc_helper_funcs *funcs;
+ struct drm_framebuffer *fb;
+ int i;
+
+ for (i = 0; i < helper->crtc_count; i++) {
+ struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
+ crtc = mode_set->crtc;
+ funcs = crtc->helper_private;
+ fb = drm_mode_config_fb(crtc);
+
+ if (!crtc->enabled)
+ continue;
+
+ if (!fb) {
+ DRM_ERROR("no fb to restore??\n");
+ continue;
+ }
+
+ funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
+ crtc->y);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_fb_helper_debug_leave);
+
bool drm_fb_helper_force_kernel_mode(void)
{
int i = 0;
@@ -611,7 +685,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
- if (var->pixclock != 0)
+ if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
/* Need to resize the fb object !!! */
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index e7aace20981f..3a652a65546f 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -39,6 +39,9 @@
#include <linux/slab.h>
#include <linux/smp_lock.h>
+/* from BKL pushdown: note that nothing else serializes idr_find() */
+DEFINE_MUTEX(drm_global_mutex);
+
static int drm_open_helper(struct inode *inode, struct file *filp,
struct drm_device * dev);
@@ -132,15 +135,9 @@ int drm_open(struct inode *inode, struct file *filp)
retcode = drm_open_helper(inode, filp, dev);
if (!retcode) {
atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
- spin_lock(&dev->count_lock);
- if (!dev->open_count++) {
- spin_unlock(&dev->count_lock);
+ if (!dev->open_count++)
retcode = drm_setup(dev);
- goto out;
- }
- spin_unlock(&dev->count_lock);
}
-out:
if (!retcode) {
mutex_lock(&dev->struct_mutex);
if (minor->type == DRM_MINOR_LEGACY) {
@@ -175,8 +172,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- /* BKL pushdown: note that nothing else serializes idr_find() */
- lock_kernel();
+ mutex_lock(&drm_global_mutex);
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
goto out;
@@ -197,7 +193,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
fops_put(old_fops);
out:
- unlock_kernel();
+ mutex_unlock(&drm_global_mutex);
return err;
}
@@ -472,7 +468,7 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_device *dev = file_priv->minor->dev;
int retcode = 0;
- lock_kernel();
+ mutex_lock(&drm_global_mutex);
DRM_DEBUG("open_count = %d\n", dev->open_count);
@@ -568,22 +564,15 @@ int drm_release(struct inode *inode, struct file *filp)
*/
atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
- spin_lock(&dev->count_lock);
if (!--dev->open_count) {
if (atomic_read(&dev->ioctl_count)) {
DRM_ERROR("Device busy: %d\n",
atomic_read(&dev->ioctl_count));
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return -EBUSY;
- }
- spin_unlock(&dev->count_lock);
- unlock_kernel();
- return drm_lastclose(dev);
+ retcode = -EBUSY;
+ } else
+ retcode = drm_lastclose(dev);
}
- spin_unlock(&dev->count_lock);
-
- unlock_kernel();
+ mutex_unlock(&drm_global_mutex);
return retcode;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 33dad3fa6043..bf92d07510df 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -68,8 +68,18 @@
* We make up offsets for buffer objects so we can recognize them at
* mmap time.
*/
+
+/* pgoff in mmap is an unsigned long, so we need to make sure that
+ * the faked up offset will fit
+ */
+
+#if BITS_PER_LONG == 64
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+#else
+#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
+#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
+#endif
/**
* Initialize the GEM device fields
@@ -312,7 +322,7 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
again:
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
@@ -419,6 +429,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
+ idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
}
diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/drm_global.c
index b17007178a36..c87dc96444de 100644
--- a/drivers/gpu/drm/ttm/ttm_global.c
+++ b/drivers/gpu/drm/drm_global.c
@@ -28,45 +28,45 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include "ttm/ttm_module.h"
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include "drm_global.h"
-struct ttm_global_item {
+struct drm_global_item {
struct mutex mutex;
void *object;
int refcount;
};
-static struct ttm_global_item glob[TTM_GLOBAL_NUM];
+static struct drm_global_item glob[DRM_GLOBAL_NUM];
-void ttm_global_init(void)
+void drm_global_init(void)
{
int i;
- for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
- struct ttm_global_item *item = &glob[i];
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
mutex_init(&item->mutex);
item->object = NULL;
item->refcount = 0;
}
}
-void ttm_global_release(void)
+void drm_global_release(void)
{
int i;
- for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
- struct ttm_global_item *item = &glob[i];
+ for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
+ struct drm_global_item *item = &glob[i];
BUG_ON(item->object != NULL);
BUG_ON(item->refcount != 0);
}
}
-int ttm_global_item_ref(struct ttm_global_reference *ref)
+int drm_global_item_ref(struct drm_global_reference *ref)
{
int ret;
- struct ttm_global_item *item = &glob[ref->global_type];
+ struct drm_global_item *item = &glob[ref->global_type];
void *object;
mutex_lock(&item->mutex);
@@ -93,11 +93,11 @@ out_err:
item->object = NULL;
return ret;
}
-EXPORT_SYMBOL(ttm_global_item_ref);
+EXPORT_SYMBOL(drm_global_item_ref);
-void ttm_global_item_unref(struct ttm_global_reference *ref)
+void drm_global_item_unref(struct drm_global_reference *ref)
{
- struct ttm_global_item *item = &glob[ref->global_type];
+ struct drm_global_item *item = &glob[ref->global_type];
mutex_lock(&item->mutex);
BUG_ON(item->refcount == 0);
@@ -108,5 +108,5 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
}
mutex_unlock(&item->mutex);
}
-EXPORT_SYMBOL(ttm_global_item_unref);
+EXPORT_SYMBOL(drm_global_item_unref);
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index f0f6c6b93f3a..2ef2c7827243 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -51,13 +51,24 @@ int drm_name_info(struct seq_file *m, void *data)
if (!master)
return 0;
- if (master->unique) {
- seq_printf(m, "%s %s %s\n",
- dev->driver->pci_driver.name,
- pci_name(dev->pdev), master->unique);
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ dev->driver->platform_device->name,
+ dev_name(dev->dev), master->unique);
+ } else {
+ seq_printf(m, "%s\n",
+ dev->driver->platform_device->name);
+ }
} else {
- seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
- pci_name(dev->pdev));
+ if (master->unique) {
+ seq_printf(m, "%s %s %s\n",
+ dev->driver->pci_driver.name,
+ dev_name(dev->dev), master->unique);
+ } else {
+ seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
+ dev_name(dev->dev));
+ }
}
return 0;
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 9b9ff46c2378..47db4df37a69 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -64,6 +64,19 @@ int drm_getunique(struct drm_device *dev, void *data,
return 0;
}
+static void
+drm_unset_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ kfree(dev->devname);
+ dev->devname = NULL;
+
+ kfree(master->unique);
+ master->unique = NULL;
+ master->unique_len = 0;
+ master->unique_size = 0;
+}
+
/**
* Set the bus id.
*
@@ -94,17 +107,24 @@ int drm_setunique(struct drm_device *dev, void *data,
master->unique_len = u->unique_len;
master->unique_size = u->unique_len + 1;
master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (!master->unique)
- return -ENOMEM;
- if (copy_from_user(master->unique, u->unique, master->unique_len))
- return -EFAULT;
+ if (!master->unique) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ if (copy_from_user(master->unique, u->unique, master->unique_len)) {
+ ret = -EFAULT;
+ goto err;
+ }
master->unique[master->unique_len] = '\0';
dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
strlen(master->unique) + 2, GFP_KERNEL);
- if (!dev->devname)
- return -ENOMEM;
+ if (!dev->devname) {
+ ret = -ENOMEM;
+ goto err;
+ }
sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
master->unique);
@@ -113,53 +133,103 @@ int drm_setunique(struct drm_device *dev, void *data,
* busid.
*/
ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
- if (ret != 3)
- return -EINVAL;
+ if (ret != 3) {
+ ret = -EINVAL;
+ goto err;
+ }
+
domain = bus >> 8;
bus &= 0xff;
if ((domain != drm_get_pci_domain(dev)) ||
(bus != dev->pdev->bus->number) ||
(slot != PCI_SLOT(dev->pdev->devfn)) ||
- (func != PCI_FUNC(dev->pdev->devfn)))
- return -EINVAL;
+ (func != PCI_FUNC(dev->pdev->devfn))) {
+ ret = -EINVAL;
+ goto err;
+ }
return 0;
+
+err:
+ drm_unset_busid(dev, master);
+ return ret;
}
static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_master *master = file_priv->master;
- int len;
+ int len, ret;
if (master->unique != NULL)
- return -EBUSY;
+ drm_unset_busid(dev, master);
- master->unique_len = 40;
- master->unique_size = master->unique_len;
- master->unique = kmalloc(master->unique_size, GFP_KERNEL);
- if (master->unique == NULL)
- return -ENOMEM;
-
- len = snprintf(master->unique, master->unique_len, "pci:%04x:%02x:%02x.%d",
- drm_get_pci_domain(dev),
- dev->pdev->bus->number,
- PCI_SLOT(dev->pdev->devfn),
- PCI_FUNC(dev->pdev->devfn));
- if (len >= master->unique_len)
- DRM_ERROR("buffer overflow");
- else
- master->unique_len = len;
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE)) {
+ master->unique_len = 10 + strlen(dev->platformdev->name);
+ master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
- dev->devname = kmalloc(strlen(dev->driver->pci_driver.name) +
- master->unique_len + 2, GFP_KERNEL);
- if (dev->devname == NULL)
- return -ENOMEM;
+ if (master->unique == NULL)
+ return -ENOMEM;
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
- master->unique);
+ len = snprintf(master->unique, master->unique_len,
+ "platform:%s", dev->platformdev->name);
+
+ if (len > master->unique_len) {
+ DRM_ERROR("Unique buffer overflowed\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ dev->devname =
+ kmalloc(strlen(dev->platformdev->name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->platformdev->name,
+ master->unique);
+
+ } else {
+ master->unique_len = 40;
+ master->unique_size = master->unique_len;
+ master->unique = kmalloc(master->unique_size, GFP_KERNEL);
+ if (master->unique == NULL)
+ return -ENOMEM;
+
+ len = snprintf(master->unique, master->unique_len,
+ "pci:%04x:%02x:%02x.%d",
+ drm_get_pci_domain(dev),
+ dev->pdev->bus->number,
+ PCI_SLOT(dev->pdev->devfn),
+ PCI_FUNC(dev->pdev->devfn));
+ if (len >= master->unique_len) {
+ DRM_ERROR("buffer overflow");
+ ret = -EINVAL;
+ goto err;
+ } else
+ master->unique_len = len;
+
+ dev->devname =
+ kmalloc(strlen(dev->driver->pci_driver.name) +
+ master->unique_len + 2, GFP_KERNEL);
+
+ if (dev->devname == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+ master->unique);
+ }
return 0;
+
+err:
+ drm_unset_busid(dev, master);
+ return ret;
}
/**
@@ -322,8 +392,11 @@ int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_pri
if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
+ * Version 1.4 has proper PCI domain support
*/
- drm_set_busid(dev, file_priv);
+ retcode = drm_set_busid(dev, file_priv);
+ if (retcode)
+ goto done;
}
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index a263b7070fc6..9d3a5030b6e1 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -34,6 +34,7 @@
*/
#include "drmP.h"
+#include "drm_trace.h"
#include <linux/interrupt.h> /* For task queue support */
#include <linux/slab.h>
@@ -57,6 +58,9 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
{
struct drm_irq_busid *p = data;
+ if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
+ return -EINVAL;
+
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
@@ -211,7 +215,7 @@ int drm_irq_install(struct drm_device *dev)
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (dev->pdev->irq == 0)
+ if (drm_dev_to_irq(dev) == 0)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
@@ -229,7 +233,7 @@ int drm_irq_install(struct drm_device *dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+ DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -302,14 +306,14 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("irq=%d\n", dev->pdev->irq);
+ DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
if (!drm_core_check_feature(dev, DRIVER_MODESET))
vga_client_register(dev->pdev, NULL, NULL, NULL);
dev->driver->irq_uninstall(dev);
- free_irq(dev->pdev->irq, dev);
+ free_irq(drm_dev_to_irq(dev), dev);
return 0;
}
@@ -341,7 +345,7 @@ int drm_control(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl->irq != dev->pdev->irq)
+ ctl->irq != drm_dev_to_irq(dev))
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@@ -587,6 +591,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
return -ENOMEM;
e->pipe = pipe;
+ e->base.pid = current->pid;
e->event.base.type = DRM_EVENT_VBLANK;
e->event.base.length = sizeof e->event;
e->event.user_data = vblwait->request.signal;
@@ -614,6 +619,9 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
vblwait->request.sequence, seq, pipe);
+ trace_drm_vblank_event_queued(current->pid, pipe,
+ vblwait->request.sequence);
+
e->event.sequence = vblwait->request.sequence;
if ((seq - vblwait->request.sequence) <= (1 << 23)) {
e->event.tv_sec = now.tv_sec;
@@ -621,6 +629,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
drm_vblank_put(dev, e->pipe);
list_add_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(current->pid, pipe,
+ vblwait->request.sequence);
} else {
list_add_tail(&e->base.link, &dev->vblank_event_list);
}
@@ -651,7 +661,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
int ret = 0;
unsigned int flags, seq, crtc;
- if ((!dev->pdev->irq) || (!dev->irq_enabled))
+ if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled))
return -EINVAL;
if (vblwait->request.type & _DRM_VBLANK_SIGNAL)
@@ -751,9 +761,13 @@ void drm_handle_vblank_events(struct drm_device *dev, int crtc)
drm_vblank_put(dev, e->pipe);
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
+ trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
+ e->event.sequence);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ trace_drm_vblank_event(crtc, seq);
}
/**
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2ac074c8f5d2..da99edc50888 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -48,44 +48,14 @@
#define MM_UNUSED_TARGET 4
-unsigned long drm_mm_tail_space(struct drm_mm *mm)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return 0;
-
- return entry->size;
-}
-
-int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free)
- return -ENOMEM;
-
- if (entry->size <= size)
- return -ENOMEM;
-
- entry->size -= size;
- return 0;
-}
-
static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
{
struct drm_mm_node *child;
if (atomic)
- child = kmalloc(sizeof(*child), GFP_ATOMIC);
+ child = kzalloc(sizeof(*child), GFP_ATOMIC);
else
- child = kmalloc(sizeof(*child), GFP_KERNEL);
+ child = kzalloc(sizeof(*child), GFP_KERNEL);
if (unlikely(child == NULL)) {
spin_lock(&mm->unused_lock);
@@ -94,8 +64,8 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
else {
child =
list_entry(mm->unused_nodes.next,
- struct drm_mm_node, fl_entry);
- list_del(&child->fl_entry);
+ struct drm_mm_node, free_stack);
+ list_del(&child->free_stack);
--mm->num_unused;
}
spin_unlock(&mm->unused_lock);
@@ -115,7 +85,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
spin_lock(&mm->unused_lock);
while (mm->num_unused < MM_UNUSED_TARGET) {
spin_unlock(&mm->unused_lock);
- node = kmalloc(sizeof(*node), GFP_KERNEL);
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
spin_lock(&mm->unused_lock);
if (unlikely(node == NULL)) {
@@ -124,7 +94,7 @@ int drm_mm_pre_get(struct drm_mm *mm)
return ret;
}
++mm->num_unused;
- list_add_tail(&node->fl_entry, &mm->unused_nodes);
+ list_add_tail(&node->free_stack, &mm->unused_nodes);
}
spin_unlock(&mm->unused_lock);
return 0;
@@ -146,27 +116,12 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
child->start = start;
child->mm = mm;
- list_add_tail(&child->ml_entry, &mm->ml_entry);
- list_add_tail(&child->fl_entry, &mm->fl_entry);
+ list_add_tail(&child->node_list, &mm->node_list);
+ list_add_tail(&child->free_stack, &mm->free_stack);
return 0;
}
-int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
-{
- struct list_head *tail_node;
- struct drm_mm_node *entry;
-
- tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
- if (!entry->free) {
- return drm_mm_create_tail_node(mm, entry->start + entry->size,
- size, atomic);
- }
- entry->size += size;
- return 0;
-}
-
static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size,
int atomic)
@@ -177,15 +132,14 @@ static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
if (unlikely(child == NULL))
return NULL;
- INIT_LIST_HEAD(&child->fl_entry);
+ INIT_LIST_HEAD(&child->free_stack);
- child->free = 0;
child->size = size;
child->start = parent->start;
child->mm = parent->mm;
- list_add_tail(&child->ml_entry, &parent->ml_entry);
- INIT_LIST_HEAD(&child->fl_entry);
+ list_add_tail(&child->node_list, &parent->node_list);
+ INIT_LIST_HEAD(&child->free_stack);
parent->size -= size;
parent->start += size;
@@ -213,7 +167,7 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
}
if (node->size == size) {
- list_del_init(&node->fl_entry);
+ list_del_init(&node->free_stack);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
@@ -251,7 +205,7 @@ struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
}
if (node->size == size) {
- list_del_init(&node->fl_entry);
+ list_del_init(&node->free_stack);
node->free = 0;
} else {
node = drm_mm_split_at_start(node, size, atomic);
@@ -273,16 +227,19 @@ void drm_mm_put_block(struct drm_mm_node *cur)
{
struct drm_mm *mm = cur->mm;
- struct list_head *cur_head = &cur->ml_entry;
- struct list_head *root_head = &mm->ml_entry;
+ struct list_head *cur_head = &cur->node_list;
+ struct list_head *root_head = &mm->node_list;
struct drm_mm_node *prev_node = NULL;
struct drm_mm_node *next_node;
int merged = 0;
+ BUG_ON(cur->scanned_block || cur->scanned_prev_free
+ || cur->scanned_next_free);
+
if (cur_head->prev != root_head) {
prev_node =
- list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
+ list_entry(cur_head->prev, struct drm_mm_node, node_list);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
@@ -290,15 +247,15 @@ void drm_mm_put_block(struct drm_mm_node *cur)
}
if (cur_head->next != root_head) {
next_node =
- list_entry(cur_head->next, struct drm_mm_node, ml_entry);
+ list_entry(cur_head->next, struct drm_mm_node, node_list);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
- list_del(&next_node->ml_entry);
- list_del(&next_node->fl_entry);
+ list_del(&next_node->node_list);
+ list_del(&next_node->free_stack);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&next_node->fl_entry,
+ list_add(&next_node->free_stack,
&mm->unused_nodes);
++mm->num_unused;
} else
@@ -313,12 +270,12 @@ void drm_mm_put_block(struct drm_mm_node *cur)
}
if (!merged) {
cur->free = 1;
- list_add(&cur->fl_entry, &mm->fl_entry);
+ list_add(&cur->free_stack, &mm->free_stack);
} else {
- list_del(&cur->ml_entry);
+ list_del(&cur->node_list);
spin_lock(&mm->unused_lock);
if (mm->num_unused < MM_UNUSED_TARGET) {
- list_add(&cur->fl_entry, &mm->unused_nodes);
+ list_add(&cur->free_stack, &mm->unused_nodes);
++mm->num_unused;
} else
kfree(cur);
@@ -328,40 +285,50 @@ void drm_mm_put_block(struct drm_mm_node *cur)
EXPORT_SYMBOL(drm_mm_put_block);
+static int check_free_mm_node(struct drm_mm_node *entry, unsigned long size,
+ unsigned alignment)
+{
+ unsigned wasted = 0;
+
+ if (entry->size < size)
+ return 0;
+
+ if (alignment) {
+ register unsigned tmp = entry->start % alignment;
+ if (tmp)
+ wasted = alignment - tmp;
+ }
+
+ if (entry->size >= size + wasted) {
+ return 1;
+ }
+
+ return 0;
+}
+
struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
unsigned long size,
unsigned alignment, int best_match)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
-
- if (entry->size < size)
+ list_for_each_entry(entry, &mm->free_stack, free_stack) {
+ if (!check_free_mm_node(entry, size, alignment))
continue;
- if (alignment) {
- register unsigned tmp = entry->start % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
+ if (!best_match)
+ return entry;
- if (entry->size >= size + wasted) {
- if (!best_match)
- return entry;
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
}
}
@@ -376,43 +343,28 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
unsigned long end,
int best_match)
{
- struct list_head *list;
- const struct list_head *free_stack = &mm->fl_entry;
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long best_size;
- unsigned wasted;
+
+ BUG_ON(mm->scanned_blocks);
best = NULL;
best_size = ~0UL;
- list_for_each(list, free_stack) {
- entry = list_entry(list, struct drm_mm_node, fl_entry);
- wasted = 0;
-
- if (entry->size < size)
- continue;
-
+ list_for_each_entry(entry, &mm->free_stack, free_stack) {
if (entry->start > end || (entry->start+entry->size) < start)
continue;
- if (entry->start < start)
- wasted += start - entry->start;
+ if (!check_free_mm_node(entry, size, alignment))
+ continue;
- if (alignment) {
- register unsigned tmp = (entry->start + wasted) % alignment;
- if (tmp)
- wasted += alignment - tmp;
- }
+ if (!best_match)
+ return entry;
- if (entry->size >= size + wasted &&
- (entry->start + wasted + size) <= end) {
- if (!best_match)
- return entry;
- if (entry->size < best_size) {
- best = entry;
- best_size = entry->size;
- }
+ if (entry->size < best_size) {
+ best = entry;
+ best_size = entry->size;
}
}
@@ -420,9 +372,161 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
}
EXPORT_SYMBOL(drm_mm_search_free_in_range);
+/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
+ unsigned alignment)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+}
+EXPORT_SYMBOL(drm_mm_init_scan);
+
+/**
+ * Add a node to the scan list that might be freed to make space for the desired
+ * hole.
+ *
+ * Returns non-zero, if a hole has been found, zero otherwise.
+ */
+int drm_mm_scan_add_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct list_head *prev_free, *next_free;
+ struct drm_mm_node *prev_node, *next_node;
+
+ mm->scanned_blocks++;
+
+ prev_free = next_free = NULL;
+
+ BUG_ON(node->free);
+ node->scanned_block = 1;
+ node->free = 1;
+
+ if (node->node_list.prev != &mm->node_list) {
+ prev_node = list_entry(node->node_list.prev, struct drm_mm_node,
+ node_list);
+
+ if (prev_node->free) {
+ list_del(&prev_node->node_list);
+
+ node->start = prev_node->start;
+ node->size += prev_node->size;
+
+ prev_node->scanned_prev_free = 1;
+
+ prev_free = &prev_node->free_stack;
+ }
+ }
+
+ if (node->node_list.next != &mm->node_list) {
+ next_node = list_entry(node->node_list.next, struct drm_mm_node,
+ node_list);
+
+ if (next_node->free) {
+ list_del(&next_node->node_list);
+
+ node->size += next_node->size;
+
+ next_node->scanned_next_free = 1;
+
+ next_free = &next_node->free_stack;
+ }
+ }
+
+ /* The free_stack list is not used for allocated objects, so these two
+ * pointers can be abused (as long as no allocations in this memory
+ * manager happens). */
+ node->free_stack.prev = prev_free;
+ node->free_stack.next = next_free;
+
+ if (check_free_mm_node(node, mm->scan_size, mm->scan_alignment)) {
+ mm->scan_hit_start = node->start;
+ mm->scan_hit_size = node->size;
+
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_scan_add_block);
+
+/**
+ * Remove a node from the scan list.
+ *
+ * Nodes _must_ be removed in the exact same order from the scan list as they
+ * have been added, otherwise the internal state of the memory manager will be
+ * corrupted.
+ *
+ * When the scan list is empty, the selected memory nodes can be freed. An
+ * immediatly following drm_mm_search_free with best_match = 0 will then return
+ * the just freed block (because its at the top of the free_stack list).
+ *
+ * Returns one if this block should be evicted, zero otherwise. Will always
+ * return zero when no hole has been found.
+ */
+int drm_mm_scan_remove_block(struct drm_mm_node *node)
+{
+ struct drm_mm *mm = node->mm;
+ struct drm_mm_node *prev_node, *next_node;
+
+ mm->scanned_blocks--;
+
+ BUG_ON(!node->scanned_block);
+ node->scanned_block = 0;
+ node->free = 0;
+
+ prev_node = list_entry(node->free_stack.prev, struct drm_mm_node,
+ free_stack);
+ next_node = list_entry(node->free_stack.next, struct drm_mm_node,
+ free_stack);
+
+ if (prev_node) {
+ BUG_ON(!prev_node->scanned_prev_free);
+ prev_node->scanned_prev_free = 0;
+
+ list_add_tail(&prev_node->node_list, &node->node_list);
+
+ node->start = prev_node->start + prev_node->size;
+ node->size -= prev_node->size;
+ }
+
+ if (next_node) {
+ BUG_ON(!next_node->scanned_next_free);
+ next_node->scanned_next_free = 0;
+
+ list_add(&next_node->node_list, &node->node_list);
+
+ node->size -= next_node->size;
+ }
+
+ INIT_LIST_HEAD(&node->free_stack);
+
+ /* Only need to check for containement because start&size for the
+ * complete resulting free block (not just the desired part) is
+ * stored. */
+ if (node->start >= mm->scan_hit_start &&
+ node->start + node->size
+ <= mm->scan_hit_start + mm->scan_hit_size) {
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_mm_scan_remove_block);
+
int drm_mm_clean(struct drm_mm * mm)
{
- struct list_head *head = &mm->ml_entry;
+ struct list_head *head = &mm->node_list;
return (head->next->next == head);
}
@@ -430,10 +534,11 @@ EXPORT_SYMBOL(drm_mm_clean);
int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
- INIT_LIST_HEAD(&mm->ml_entry);
- INIT_LIST_HEAD(&mm->fl_entry);
+ INIT_LIST_HEAD(&mm->node_list);
+ INIT_LIST_HEAD(&mm->free_stack);
INIT_LIST_HEAD(&mm->unused_nodes);
mm->num_unused = 0;
+ mm->scanned_blocks = 0;
spin_lock_init(&mm->unused_lock);
return drm_mm_create_tail_node(mm, start, size, 0);
@@ -442,25 +547,25 @@ EXPORT_SYMBOL(drm_mm_init);
void drm_mm_takedown(struct drm_mm * mm)
{
- struct list_head *bnode = mm->fl_entry.next;
+ struct list_head *bnode = mm->free_stack.next;
struct drm_mm_node *entry;
struct drm_mm_node *next;
- entry = list_entry(bnode, struct drm_mm_node, fl_entry);
+ entry = list_entry(bnode, struct drm_mm_node, free_stack);
- if (entry->ml_entry.next != &mm->ml_entry ||
- entry->fl_entry.next != &mm->fl_entry) {
+ if (entry->node_list.next != &mm->node_list ||
+ entry->free_stack.next != &mm->free_stack) {
DRM_ERROR("Memory manager not clean. Delaying takedown\n");
return;
}
- list_del(&entry->fl_entry);
- list_del(&entry->ml_entry);
+ list_del(&entry->free_stack);
+ list_del(&entry->node_list);
kfree(entry);
spin_lock(&mm->unused_lock);
- list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
- list_del(&entry->fl_entry);
+ list_for_each_entry_safe(entry, next, &mm->unused_nodes, free_stack) {
+ list_del(&entry->free_stack);
kfree(entry);
--mm->num_unused;
}
@@ -475,7 +580,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+ list_for_each_entry(entry, &mm->node_list, node_list) {
printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
prefix, entry->start, entry->start + entry->size,
entry->size, entry->free ? "free" : "used");
@@ -496,7 +601,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
struct drm_mm_node *entry;
int total_used = 0, total_free = 0, total = 0;
- list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+ list_for_each_entry(entry, &mm->node_list, node_list) {
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: %s\n", entry->start, entry->start + entry->size, entry->size, entry->free ? "free" : "used");
total += entry->size;
if (entry->free)
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 2ea9ad4a8d69..e20f78b542a7 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -124,4 +124,147 @@ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
EXPORT_SYMBOL(drm_pci_free);
+#ifdef CONFIG_PCI
+/**
+ * Register.
+ *
+ * \param pdev - PCI device structure
+ * \param ent entry from the PCI ID table with device type flags
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_g1;
+
+ pci_set_master(pdev);
+
+ dev->pdev = pdev;
+ dev->dev = &pdev->dev;
+
+ dev->pci_device = pdev->device;
+ dev->pci_vendor = pdev->vendor;
+
+#ifdef __alpha__
+ dev->hose = pdev->sysdata;
+#endif
+
+ if ((ret = drm_fill_in_dev(dev, ent, driver))) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g2;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ pci_set_drvdata(pdev, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g2;
+ }
+
+ if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
+ goto err_g3;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, ent->driver_data);
+ if (ret)
+ goto err_g4;
+ }
+
+ /* setup the grouping for the legacy output */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g4;
+ }
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, pci_name(pdev), dev->primary->index);
+
+ return 0;
+
+err_g4:
+ drm_put_minor(&dev->primary);
+err_g3:
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_put_minor(&dev->control);
+err_g2:
+ pci_disable_device(pdev);
+err_g1:
+ kfree(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_get_pci_dev);
+
+/**
+ * PCI device initialization. Called via drm_init at module load time,
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs and initializing the AGP device.
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+int drm_pci_init(struct drm_driver *driver)
+{
+ struct pci_dev *pdev = NULL;
+ const struct pci_device_id *pid;
+ int i;
+
+ if (driver->driver_features & DRIVER_MODESET)
+ return pci_register_driver(&driver->pci_driver);
+
+ /* If not using KMS, fall back to stealth mode manual scanning. */
+ for (i = 0; driver->pci_driver.id_table[i].vendor != 0; i++) {
+ pid = &driver->pci_driver.id_table[i];
+
+ /* Loop around setting up a DRM device for each PCI device
+ * matching our ID and device class. If we had the internal
+ * function that pci_get_subsys and pci_get_class used, we'd
+ * be able to just pass pid in instead of doing a two-stage
+ * thing.
+ */
+ pdev = NULL;
+ while ((pdev =
+ pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
+ pid->subdevice, pdev)) != NULL) {
+ if ((pdev->class & pid->class_mask) != pid->class)
+ continue;
+
+ /* stealth mode requires a manual probe */
+ pci_dev_get(pdev);
+ drm_get_pci_dev(pdev, pid, driver);
+ }
+ }
+ return 0;
+}
+
+#else
+
+int drm_pci_init(struct drm_driver *driver)
+{
+ return -1;
+}
+
+#endif
/*@}*/
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
new file mode 100644
index 000000000000..460e9a3afa8d
--- /dev/null
+++ b/drivers/gpu/drm/drm_platform.c
@@ -0,0 +1,122 @@
+/*
+ * Derived from drm_pci.c
+ *
+ * Copyright 2003 José Fonseca.
+ * Copyright 2003 Leif Delgass.
+ * Copyright (c) 2009, Code Aurora Forum.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+
+/**
+ * Register.
+ *
+ * \param platdev - Platform device struture
+ * \return zero on success or a negative number on failure.
+ *
+ * Attempt to gets inter module "drm" information. If we are first
+ * then register the character device and inter module information.
+ * Try and register, if we fail to register, backout previous work.
+ */
+
+int drm_get_platform_dev(struct platform_device *platdev,
+ struct drm_driver *driver)
+{
+ struct drm_device *dev;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->platformdev = platdev;
+ dev->dev = &platdev->dev;
+
+ ret = drm_fill_in_dev(dev, NULL, driver);
+
+ if (ret) {
+ printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
+ goto err_g1;
+ }
+
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ dev_set_drvdata(&platdev->dev, dev);
+ ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
+ if (ret)
+ goto err_g1;
+ }
+
+ ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY);
+ if (ret)
+ goto err_g2;
+
+ if (dev->driver->load) {
+ ret = dev->driver->load(dev, 0);
+ if (ret)
+ goto err_g3;
+ }
+
+ /* setup the grouping for the legacy output */
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = drm_mode_group_init_legacy_group(dev,
+ &dev->primary->mode_group);
+ if (ret)
+ goto err_g3;
+ }
+
+ list_add_tail(&dev->driver_item, &driver->device_list);
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ driver->name, driver->major, driver->minor, driver->patchlevel,
+ driver->date, dev->primary->index);
+
+ return 0;
+
+err_g3:
+ drm_put_minor(&dev->primary);
+err_g2:
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_put_minor(&dev->control);
+err_g1:
+ kfree(dev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_get_platform_dev);
+
+/**
+ * Platform device initialization. Called via drm_init at module load time,
+ *
+ * \return zero on success or a negative number on failure.
+ *
+ * Initializes a drm_device structures,registering the
+ * stubs
+ *
+ * Expands the \c DRIVER_PREINIT and \c DRIVER_POST_INIT macros before and
+ * after the initialization for driver customization.
+ */
+
+int drm_platform_init(struct drm_driver *driver)
+{
+ return drm_get_platform_dev(driver->platform_device, driver);
+}
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index a0c365f2e521..d1ad57450df1 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -156,6 +156,9 @@ static void drm_master_destroy(struct kref *kref)
master->unique_len = 0;
}
+ kfree(dev->devname);
+ dev->devname = NULL;
+
list_for_each_entry_safe(pt, next, &master->magicfree, head) {
list_del(&pt->head);
drm_ht_remove_item(&master->magiclist, &pt->hash_item);
@@ -224,7 +227,7 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
return 0;
}
-static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
+int drm_fill_in_dev(struct drm_device *dev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
@@ -245,14 +248,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
idr_init(&dev->drw_idr);
- dev->pdev = pdev;
- dev->pci_device = pdev->device;
- dev->pci_vendor = pdev->vendor;
-
-#ifdef __alpha__
- dev->hose = pdev->sysdata;
-#endif
-
if (drm_ht_create(&dev->map_hash, 12)) {
return -ENOMEM;
}
@@ -321,7 +316,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
-static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type)
{
struct drm_minor *new_minor;
int ret;
@@ -388,83 +383,6 @@ err_idr:
}
/**
- * Register.
- *
- * \param pdev - PCI device structure
- * \param ent entry from the PCI ID table with device type flags
- * \return zero on success or a negative number on failure.
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- */
-int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- struct drm_device *dev;
- int ret;
-
- DRM_DEBUG("\n");
-
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev)
- return -ENOMEM;
-
- ret = pci_enable_device(pdev);
- if (ret)
- goto err_g1;
-
- pci_set_master(pdev);
- if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
- printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
- goto err_g2;
- }
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- pci_set_drvdata(pdev, dev);
- ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
- if (ret)
- goto err_g2;
- }
-
- if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
- goto err_g3;
-
- if (dev->driver->load) {
- ret = dev->driver->load(dev, ent->driver_data);
- if (ret)
- goto err_g4;
- }
-
- /* setup the grouping for the legacy output */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = drm_mode_group_init_legacy_group(dev, &dev->primary->mode_group);
- if (ret)
- goto err_g4;
- }
-
- list_add_tail(&dev->driver_item, &driver->device_list);
-
- DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
- driver->name, driver->major, driver->minor, driver->patchlevel,
- driver->date, pci_name(pdev), dev->primary->index);
-
- return 0;
-
-err_g4:
- drm_put_minor(&dev->primary);
-err_g3:
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_put_minor(&dev->control);
-err_g2:
- pci_disable_device(pdev);
-err_g1:
- kfree(dev);
- return ret;
-}
-EXPORT_SYMBOL(drm_get_dev);
-
-/**
* Put a secondary minor number.
*
* \param sec_minor - structure to be released
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 101d381e9d86..86118a742231 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -489,7 +489,8 @@ int drm_sysfs_device_add(struct drm_minor *minor)
int err;
char *minor_str;
- minor->kdev.parent = &minor->dev->pdev->dev;
+ minor->kdev.parent = minor->dev->dev;
+
minor->kdev.class = drm_class;
minor->kdev.release = drm_sysfs_device_release;
minor->kdev.devt = minor->device;
diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h
new file mode 100644
index 000000000000..03ea964aa604
--- /dev/null
+++ b/drivers/gpu/drm/drm_trace.h
@@ -0,0 +1,66 @@
+#if !defined(_DRM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DRM_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM drm
+#define TRACE_SYSTEM_STRING __stringify(TRACE_SYSTEM)
+#define TRACE_INCLUDE_FILE drm_trace
+
+TRACE_EVENT(drm_vblank_event,
+ TP_PROTO(int crtc, unsigned int seq),
+ TP_ARGS(crtc, seq),
+ TP_STRUCT__entry(
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("crtc=%d, seq=%d", __entry->crtc, __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_queued,
+ TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+ TP_ARGS(pid, crtc, seq),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+ __entry->seq)
+);
+
+TRACE_EVENT(drm_vblank_event_delivered,
+ TP_PROTO(pid_t pid, int crtc, unsigned int seq),
+ TP_ARGS(pid, crtc, seq),
+ TP_STRUCT__entry(
+ __field(pid_t, pid)
+ __field(int, crtc)
+ __field(unsigned int, seq)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __entry->crtc = crtc;
+ __entry->seq = seq;
+ ),
+ TP_printk("pid=%d, crtc=%d, seq=%d", __entry->pid, __entry->crtc, \
+ __entry->seq)
+);
+
+#endif /* _DRM_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/drm_trace_points.c b/drivers/gpu/drm/drm_trace_points.c
new file mode 100644
index 000000000000..0d0eb90864ae
--- /dev/null
+++ b/drivers/gpu/drm/drm_trace_points.c
@@ -0,0 +1,4 @@
+#include "drmP.h"
+
+#define CREATE_TRACE_POINTS
+#include "drm_trace.h"
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index c3b13fb41d0c..3778360eceea 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -61,7 +61,7 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
tmp = pgprot_writecombine(tmp);
else
tmp = pgprot_noncached(tmp);
-#elif defined(__sparc__)
+#elif defined(__sparc__) || defined(__arm__)
tmp = pgprot_noncached(tmp);
#endif
return tmp;
@@ -601,6 +601,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
}
switch (map->type) {
+#if !defined(__arm__)
case _DRM_AGP:
if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
/*
@@ -615,20 +616,31 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
break;
}
/* fall through to _DRM_FRAME_BUFFER... */
+#endif
case _DRM_FRAME_BUFFER:
case _DRM_REGISTERS:
offset = dev->driver->get_reg_ofs(dev);
vma->vm_flags |= VM_IO; /* not in core dump */
vma->vm_page_prot = drm_io_prot(map->type, vma);
+#if !defined(__arm__)
if (io_remap_pfn_range(vma, vma->vm_start,
(map->offset + offset) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start,
vma->vm_page_prot))
return -EAGAIN;
+#else
+ if (remap_pfn_range(vma, vma->vm_start,
+ (map->offset + offset) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+#endif
+
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%llx\n",
map->type,
vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
+
vma->vm_ops = &drm_vm_ops;
break;
case _DRM_CONSISTENT:
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 6d2abaf35ba2..92862563e7ee 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -2,3 +2,6 @@ ccflags-y := -Iinclude/drm
ch7006-y := ch7006_drv.o ch7006_mode.o
obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
+
+sil164-y := sil164_drv.o
+obj-$(CONFIG_DRM_I2C_SIL164) += sil164.o
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 81681a07a806..08792a740f18 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -33,7 +33,7 @@ static void ch7006_encoder_set_config(struct drm_encoder *encoder,
{
struct ch7006_priv *priv = to_ch7006_priv(encoder);
- priv->params = params;
+ priv->params = *(struct ch7006_encoder_params *)params;
}
static void ch7006_encoder_destroy(struct drm_encoder *encoder)
@@ -114,7 +114,7 @@ static void ch7006_encoder_mode_set(struct drm_encoder *encoder,
{
struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
struct ch7006_priv *priv = to_ch7006_priv(encoder);
- struct ch7006_encoder_params *params = priv->params;
+ struct ch7006_encoder_params *params = &priv->params;
struct ch7006_state *state = &priv->state;
uint8_t *regs = state->regs;
struct ch7006_mode *mode = priv->mode;
@@ -428,6 +428,22 @@ static int ch7006_remove(struct i2c_client *client)
return 0;
}
+static int ch7006_suspend(struct i2c_client *client, pm_message_t mesg)
+{
+ ch7006_dbg(client, "\n");
+
+ return 0;
+}
+
+static int ch7006_resume(struct i2c_client *client)
+{
+ ch7006_dbg(client, "\n");
+
+ ch7006_write(client, 0x3d, 0x0);
+
+ return 0;
+}
+
static int ch7006_encoder_init(struct i2c_client *client,
struct drm_device *dev,
struct drm_encoder_slave *encoder)
@@ -454,6 +470,7 @@ static int ch7006_encoder_init(struct i2c_client *client,
priv->hmargin = 50;
priv->vmargin = 50;
priv->last_dpms = -1;
+ priv->chip_version = ch7006_read(client, CH7006_VERSION_ID);
if (ch7006_tv_norm) {
for (i = 0; i < NUM_TV_NORMS; i++) {
@@ -487,6 +504,8 @@ static struct drm_i2c_encoder_driver ch7006_driver = {
.i2c_driver = {
.probe = ch7006_probe,
.remove = ch7006_remove,
+ .suspend = ch7006_suspend,
+ .resume = ch7006_resume,
.driver = {
.name = "ch7006",
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c
index e447dfb63890..c860f24a5afc 100644
--- a/drivers/gpu/drm/i2c/ch7006_mode.c
+++ b/drivers/gpu/drm/i2c/ch7006_mode.c
@@ -316,7 +316,10 @@ void ch7006_setup_power_state(struct drm_encoder *encoder)
}
} else {
- *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ if (priv->chip_version >= 0x20)
+ *power |= bitfs(CH7006_POWER_LEVEL, FULL_POWER_OFF);
+ else
+ *power |= bitfs(CH7006_POWER_LEVEL, POWER_OFF);
}
}
diff --git a/drivers/gpu/drm/i2c/ch7006_priv.h b/drivers/gpu/drm/i2c/ch7006_priv.h
index b06d3d93d8ac..17667b7d57e7 100644
--- a/drivers/gpu/drm/i2c/ch7006_priv.h
+++ b/drivers/gpu/drm/i2c/ch7006_priv.h
@@ -77,7 +77,7 @@ struct ch7006_state {
};
struct ch7006_priv {
- struct ch7006_encoder_params *params;
+ struct ch7006_encoder_params params;
struct ch7006_mode *mode;
struct ch7006_state state;
@@ -95,6 +95,7 @@ struct ch7006_priv {
int flicker;
int scale;
+ int chip_version;
int last_dpms;
};
diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c
new file mode 100644
index 000000000000..0b6773290c08
--- /dev/null
+++ b/drivers/gpu/drm/i2c/sil164_drv.c
@@ -0,0 +1,462 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+#include "drm_encoder_slave.h"
+#include "i2c/sil164.h"
+
+struct sil164_priv {
+ struct sil164_encoder_params config;
+ struct i2c_client *duallink_slave;
+
+ uint8_t saved_state[0x10];
+ uint8_t saved_slave_state[0x10];
+};
+
+#define to_sil164_priv(x) \
+ ((struct sil164_priv *)to_encoder_slave(x)->slave_priv)
+
+#define sil164_dbg(client, format, ...) do { \
+ if (drm_debug & DRM_UT_KMS) \
+ dev_printk(KERN_DEBUG, &client->dev, \
+ "%s: " format, __func__, ## __VA_ARGS__); \
+ } while (0)
+#define sil164_info(client, format, ...) \
+ dev_info(&client->dev, format, __VA_ARGS__)
+#define sil164_err(client, format, ...) \
+ dev_err(&client->dev, format, __VA_ARGS__)
+
+#define SIL164_I2C_ADDR_MASTER 0x38
+#define SIL164_I2C_ADDR_SLAVE 0x39
+
+/* HW register definitions */
+
+#define SIL164_VENDOR_LO 0x0
+#define SIL164_VENDOR_HI 0x1
+#define SIL164_DEVICE_LO 0x2
+#define SIL164_DEVICE_HI 0x3
+#define SIL164_REVISION 0x4
+#define SIL164_FREQ_MIN 0x6
+#define SIL164_FREQ_MAX 0x7
+#define SIL164_CONTROL0 0x8
+# define SIL164_CONTROL0_POWER_ON 0x01
+# define SIL164_CONTROL0_EDGE_RISING 0x02
+# define SIL164_CONTROL0_INPUT_24BIT 0x04
+# define SIL164_CONTROL0_DUAL_EDGE 0x08
+# define SIL164_CONTROL0_HSYNC_ON 0x10
+# define SIL164_CONTROL0_VSYNC_ON 0x20
+#define SIL164_DETECT 0x9
+# define SIL164_DETECT_INTR_STAT 0x01
+# define SIL164_DETECT_HOTPLUG_STAT 0x02
+# define SIL164_DETECT_RECEIVER_STAT 0x04
+# define SIL164_DETECT_INTR_MODE_RECEIVER 0x00
+# define SIL164_DETECT_INTR_MODE_HOTPLUG 0x08
+# define SIL164_DETECT_OUT_MODE_HIGH 0x00
+# define SIL164_DETECT_OUT_MODE_INTR 0x10
+# define SIL164_DETECT_OUT_MODE_RECEIVER 0x20
+# define SIL164_DETECT_OUT_MODE_HOTPLUG 0x30
+# define SIL164_DETECT_VSWING_STAT 0x80
+#define SIL164_CONTROL1 0xa
+# define SIL164_CONTROL1_DESKEW_ENABLE 0x10
+# define SIL164_CONTROL1_DESKEW_INCR_SHIFT 5
+#define SIL164_GPIO 0xb
+#define SIL164_CONTROL2 0xc
+# define SIL164_CONTROL2_FILTER_ENABLE 0x01
+# define SIL164_CONTROL2_FILTER_SETTING_SHIFT 1
+# define SIL164_CONTROL2_DUALLINK_MASTER 0x40
+# define SIL164_CONTROL2_SYNC_CONT 0x80
+#define SIL164_DUALLINK 0xd
+# define SIL164_DUALLINK_ENABLE 0x10
+# define SIL164_DUALLINK_SKEW_SHIFT 5
+#define SIL164_PLLZONE 0xe
+# define SIL164_PLLZONE_STAT 0x08
+# define SIL164_PLLZONE_FORCE_ON 0x10
+# define SIL164_PLLZONE_FORCE_HIGH 0x20
+
+/* HW access functions */
+
+static void
+sil164_write(struct i2c_client *client, uint8_t addr, uint8_t val)
+{
+ uint8_t buf[] = {addr, val};
+ int ret;
+
+ ret = i2c_master_send(client, buf, ARRAY_SIZE(buf));
+ if (ret < 0)
+ sil164_err(client, "Error %d writing to subaddress 0x%x\n",
+ ret, addr);
+}
+
+static uint8_t
+sil164_read(struct i2c_client *client, uint8_t addr)
+{
+ uint8_t val;
+ int ret;
+
+ ret = i2c_master_send(client, &addr, sizeof(addr));
+ if (ret < 0)
+ goto fail;
+
+ ret = i2c_master_recv(client, &val, sizeof(val));
+ if (ret < 0)
+ goto fail;
+
+ return val;
+
+fail:
+ sil164_err(client, "Error %d reading from subaddress 0x%x\n",
+ ret, addr);
+ return 0;
+}
+
+static void
+sil164_save_state(struct i2c_client *client, uint8_t *state)
+{
+ int i;
+
+ for (i = 0x8; i <= 0xe; i++)
+ state[i] = sil164_read(client, i);
+}
+
+static void
+sil164_restore_state(struct i2c_client *client, uint8_t *state)
+{
+ int i;
+
+ for (i = 0x8; i <= 0xe; i++)
+ sil164_write(client, i, state[i]);
+}
+
+static void
+sil164_set_power_state(struct i2c_client *client, bool on)
+{
+ uint8_t control0 = sil164_read(client, SIL164_CONTROL0);
+
+ if (on)
+ control0 |= SIL164_CONTROL0_POWER_ON;
+ else
+ control0 &= ~SIL164_CONTROL0_POWER_ON;
+
+ sil164_write(client, SIL164_CONTROL0, control0);
+}
+
+static void
+sil164_init_state(struct i2c_client *client,
+ struct sil164_encoder_params *config,
+ bool duallink)
+{
+ sil164_write(client, SIL164_CONTROL0,
+ SIL164_CONTROL0_HSYNC_ON |
+ SIL164_CONTROL0_VSYNC_ON |
+ (config->input_edge ? SIL164_CONTROL0_EDGE_RISING : 0) |
+ (config->input_width ? SIL164_CONTROL0_INPUT_24BIT : 0) |
+ (config->input_dual ? SIL164_CONTROL0_DUAL_EDGE : 0));
+
+ sil164_write(client, SIL164_DETECT,
+ SIL164_DETECT_INTR_STAT |
+ SIL164_DETECT_OUT_MODE_RECEIVER);
+
+ sil164_write(client, SIL164_CONTROL1,
+ (config->input_skew ? SIL164_CONTROL1_DESKEW_ENABLE : 0) |
+ (((config->input_skew + 4) & 0x7)
+ << SIL164_CONTROL1_DESKEW_INCR_SHIFT));
+
+ sil164_write(client, SIL164_CONTROL2,
+ SIL164_CONTROL2_SYNC_CONT |
+ (config->pll_filter ? 0 : SIL164_CONTROL2_FILTER_ENABLE) |
+ (4 << SIL164_CONTROL2_FILTER_SETTING_SHIFT));
+
+ sil164_write(client, SIL164_PLLZONE, 0);
+
+ if (duallink)
+ sil164_write(client, SIL164_DUALLINK,
+ SIL164_DUALLINK_ENABLE |
+ (((config->duallink_skew + 4) & 0x7)
+ << SIL164_DUALLINK_SKEW_SHIFT));
+ else
+ sil164_write(client, SIL164_DUALLINK, 0);
+}
+
+/* DRM encoder functions */
+
+static void
+sil164_encoder_set_config(struct drm_encoder *encoder, void *params)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ priv->config = *(struct sil164_encoder_params *)params;
+}
+
+static void
+sil164_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+ bool on = (mode == DRM_MODE_DPMS_ON);
+ bool duallink = (on && encoder->crtc->mode.clock > 165000);
+
+ sil164_set_power_state(drm_i2c_encoder_get_client(encoder), on);
+
+ if (priv->duallink_slave)
+ sil164_set_power_state(priv->duallink_slave, duallink);
+}
+
+static void
+sil164_encoder_save(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ sil164_save_state(drm_i2c_encoder_get_client(encoder),
+ priv->saved_state);
+
+ if (priv->duallink_slave)
+ sil164_save_state(priv->duallink_slave,
+ priv->saved_slave_state);
+}
+
+static void
+sil164_encoder_restore(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ sil164_restore_state(drm_i2c_encoder_get_client(encoder),
+ priv->saved_state);
+
+ if (priv->duallink_slave)
+ sil164_restore_state(priv->duallink_slave,
+ priv->saved_slave_state);
+}
+
+static bool
+sil164_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int
+sil164_encoder_mode_valid(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ if (mode->clock < 32000)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > 330000 ||
+ (mode->clock > 165000 && !priv->duallink_slave))
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static void
+sil164_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+ bool duallink = adjusted_mode->clock > 165000;
+
+ sil164_init_state(drm_i2c_encoder_get_client(encoder),
+ &priv->config, duallink);
+
+ if (priv->duallink_slave)
+ sil164_init_state(priv->duallink_slave,
+ &priv->config, duallink);
+
+ sil164_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static enum drm_connector_status
+sil164_encoder_detect(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ struct i2c_client *client = drm_i2c_encoder_get_client(encoder);
+
+ if (sil164_read(client, SIL164_DETECT) & SIL164_DETECT_HOTPLUG_STAT)
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static int
+sil164_encoder_get_modes(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static int
+sil164_encoder_create_resources(struct drm_encoder *encoder,
+ struct drm_connector *connector)
+{
+ return 0;
+}
+
+static int
+sil164_encoder_set_property(struct drm_encoder *encoder,
+ struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ return 0;
+}
+
+static void
+sil164_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct sil164_priv *priv = to_sil164_priv(encoder);
+
+ if (priv->duallink_slave)
+ i2c_unregister_device(priv->duallink_slave);
+
+ kfree(priv);
+ drm_i2c_encoder_destroy(encoder);
+}
+
+static struct drm_encoder_slave_funcs sil164_encoder_funcs = {
+ .set_config = sil164_encoder_set_config,
+ .destroy = sil164_encoder_destroy,
+ .dpms = sil164_encoder_dpms,
+ .save = sil164_encoder_save,
+ .restore = sil164_encoder_restore,
+ .mode_fixup = sil164_encoder_mode_fixup,
+ .mode_valid = sil164_encoder_mode_valid,
+ .mode_set = sil164_encoder_mode_set,
+ .detect = sil164_encoder_detect,
+ .get_modes = sil164_encoder_get_modes,
+ .create_resources = sil164_encoder_create_resources,
+ .set_property = sil164_encoder_set_property,
+};
+
+/* I2C driver functions */
+
+static int
+sil164_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ int vendor = sil164_read(client, SIL164_VENDOR_HI) << 8 |
+ sil164_read(client, SIL164_VENDOR_LO);
+ int device = sil164_read(client, SIL164_DEVICE_HI) << 8 |
+ sil164_read(client, SIL164_DEVICE_LO);
+ int rev = sil164_read(client, SIL164_REVISION);
+
+ if (vendor != 0x1 || device != 0x6) {
+ sil164_dbg(client, "Unknown device %x:%x.%x\n",
+ vendor, device, rev);
+ return -ENODEV;
+ }
+
+ sil164_info(client, "Detected device %x:%x.%x\n",
+ vendor, device, rev);
+
+ return 0;
+}
+
+static int
+sil164_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static struct i2c_client *
+sil164_detect_slave(struct i2c_client *client)
+{
+ struct i2c_adapter *adap = client->adapter;
+ struct i2c_msg msg = {
+ .addr = SIL164_I2C_ADDR_SLAVE,
+ .len = 0,
+ };
+ const struct i2c_board_info info = {
+ I2C_BOARD_INFO("sil164", SIL164_I2C_ADDR_SLAVE)
+ };
+
+ if (i2c_transfer(adap, &msg, 1) != 1) {
+ sil164_dbg(adap, "No dual-link slave found.");
+ return NULL;
+ }
+
+ return i2c_new_device(adap, &info);
+}
+
+static int
+sil164_encoder_init(struct i2c_client *client,
+ struct drm_device *dev,
+ struct drm_encoder_slave *encoder)
+{
+ struct sil164_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ encoder->slave_priv = priv;
+ encoder->slave_funcs = &sil164_encoder_funcs;
+
+ priv->duallink_slave = sil164_detect_slave(client);
+
+ return 0;
+}
+
+static struct i2c_device_id sil164_ids[] = {
+ { "sil164", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sil164_ids);
+
+static struct drm_i2c_encoder_driver sil164_driver = {
+ .i2c_driver = {
+ .probe = sil164_probe,
+ .remove = sil164_remove,
+ .driver = {
+ .name = "sil164",
+ },
+ .id_table = sil164_ids,
+ },
+ .encoder_init = sil164_encoder_init,
+};
+
+/* Module initialization */
+
+static int __init
+sil164_init(void)
+{
+ return drm_i2c_encoder_register(THIS_MODULE, &sil164_driver);
+}
+
+static void __exit
+sil164_exit(void)
+{
+ drm_i2c_encoder_unregister(&sil164_driver);
+}
+
+MODULE_AUTHOR("Francisco Jerez <currojerez@riseup.net>");
+MODULE_DESCRIPTION("Silicon Image sil164 TMDS transmitter driver");
+MODULE_LICENSE("GPL and additional rights");
+
+module_init(sil164_init);
+module_exit(sil164_exit);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 997d91707ad2..0e6c131313d9 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -37,6 +37,7 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#define I810_BUF_FREE 2
@@ -60,9 +61,8 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
I810_BUF_CLIENT);
- if (used == I810_BUF_FREE) {
+ if (used == I810_BUF_FREE)
return buf;
- }
}
return NULL;
}
@@ -71,7 +71,7 @@ static struct drm_buf *i810_freelist_get(struct drm_device * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+static int i810_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -121,7 +121,7 @@ static const struct file_operations i810_buffer_fops = {
.fasync = drm_fasync,
};
-static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
+static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -152,7 +152,7 @@ static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
return retcode;
}
-static int i810_unmap_buffer(struct drm_buf * buf)
+static int i810_unmap_buffer(struct drm_buf *buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -172,7 +172,7 @@ static int i810_unmap_buffer(struct drm_buf * buf)
return retcode;
}
-static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
+static int i810_dma_get_buffer(struct drm_device *dev, drm_i810_dma_t *d,
struct drm_file *file_priv)
{
struct drm_buf *buf;
@@ -202,7 +202,7 @@ static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
return retcode;
}
-static int i810_dma_cleanup(struct drm_device * dev)
+static int i810_dma_cleanup(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
@@ -218,9 +218,8 @@ static int i810_dma_cleanup(struct drm_device * dev)
drm_i810_private_t *dev_priv =
(drm_i810_private_t *) dev->dev_private;
- if (dev_priv->ring.virtual_start) {
+ if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
@@ -242,7 +241,7 @@ static int i810_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int i810_wait_ring(struct drm_device * dev, int n)
+static int i810_wait_ring(struct drm_device *dev, int n)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -271,11 +270,11 @@ static int i810_wait_ring(struct drm_device * dev, int n)
udelay(1);
}
- out_wait_ring:
+out_wait_ring:
return iters;
}
-static void i810_kernel_lost_context(struct drm_device * dev)
+static void i810_kernel_lost_context(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -287,7 +286,7 @@ static void i810_kernel_lost_context(struct drm_device * dev)
ring->space += ring->Size;
}
-static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
+static int i810_freelist_init(struct drm_device *dev, drm_i810_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
int my_idx = 24;
@@ -322,9 +321,9 @@ static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_
return 0;
}
-static int i810_dma_initialize(struct drm_device * dev,
- drm_i810_private_t * dev_priv,
- drm_i810_init_t * init)
+static int i810_dma_initialize(struct drm_device *dev,
+ drm_i810_private_t *dev_priv,
+ drm_i810_init_t *init)
{
struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
@@ -462,7 +461,7 @@ static int i810_dma_init(struct drm_device *dev, void *data,
* Use 'volatile' & local var tmp to force the emitted values to be
* identical to the verified ones.
*/
-static void i810EmitContextVerified(struct drm_device * dev,
+static void i810EmitContextVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -495,7 +494,7 @@ static void i810EmitContextVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
+static void i810EmitTexVerified(struct drm_device *dev, volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -528,7 +527,7 @@ static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i810EmitDestVerified(struct drm_device * dev,
+static void i810EmitDestVerified(struct drm_device *dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -563,7 +562,7 @@ static void i810EmitDestVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i810EmitState(struct drm_device * dev)
+static void i810EmitState(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -594,7 +593,7 @@ static void i810EmitState(struct drm_device * dev)
/* need to verify
*/
-static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
+static void i810_dma_dispatch_clear(struct drm_device *dev, int flags,
unsigned int clear_color,
unsigned int clear_zval)
{
@@ -669,7 +668,7 @@ static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
}
}
-static void i810_dma_dispatch_swap(struct drm_device * dev)
+static void i810_dma_dispatch_swap(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -715,8 +714,8 @@ static void i810_dma_dispatch_swap(struct drm_device * dev)
}
}
-static void i810_dma_dispatch_vertex(struct drm_device * dev,
- struct drm_buf * buf, int discard, int used)
+static void i810_dma_dispatch_vertex(struct drm_device *dev,
+ struct drm_buf *buf, int discard, int used)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
@@ -795,7 +794,7 @@ static void i810_dma_dispatch_vertex(struct drm_device * dev,
}
}
-static void i810_dma_dispatch_flip(struct drm_device * dev)
+static void i810_dma_dispatch_flip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int pitch = dev_priv->pitch;
@@ -841,7 +840,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
}
-static void i810_dma_quiescent(struct drm_device * dev)
+static void i810_dma_quiescent(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -858,7 +857,7 @@ static void i810_dma_quiescent(struct drm_device * dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
-static int i810_flush_queue(struct drm_device * dev)
+static int i810_flush_queue(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
@@ -891,7 +890,7 @@ static int i810_flush_queue(struct drm_device * dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(struct drm_device * dev,
+static void i810_reclaim_buffers(struct drm_device *dev,
struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
@@ -969,9 +968,8 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
- if (!dev->dev_private) {
+ if (!dev->dev_private)
return -EINVAL;
- }
i810_dma_dispatch_clear(dev, clear->flags,
clear->clear_color, clear->clear_depth);
@@ -1039,7 +1037,7 @@ static int i810_docopy(struct drm_device *dev, void *data,
return 0;
}
-static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
+static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1053,9 +1051,8 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
i810_kernel_lost_context(dev);
u = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_HARDWARE);
- if (u != I810_BUF_CLIENT) {
+ if (u != I810_BUF_CLIENT)
DRM_DEBUG("MC found buffer that isn't mine!\n");
- }
if (used > 4 * 1024)
used = 0;
@@ -1160,7 +1157,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
- //Tell the overlay to update
+ /* Tell the overlay to update */
I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
return 0;
@@ -1168,7 +1165,7 @@ static int i810_ov0_flip(struct drm_device *dev, void *data,
/* Not sure why this isn't set all the time:
*/
-static void i810_do_init_pageflip(struct drm_device * dev)
+static void i810_do_init_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1178,7 +1175,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i810_do_cleanup_pageflip(struct drm_device * dev)
+static int i810_do_cleanup_pageflip(struct drm_device *dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1218,49 +1215,61 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
}
-void i810_driver_lastclose(struct drm_device * dev)
+void i810_driver_lastclose(struct drm_device *dev)
{
i810_dma_cleanup(dev);
}
-void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
+ if (dev_priv->page_flipping)
i810_do_cleanup_pageflip(dev);
- }
}
}
-void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv)
{
i810_reclaim_buffers(dev, file_priv);
}
-int i810_driver_dma_quiescent(struct drm_device * dev)
+int i810_driver_dma_quiescent(struct drm_device *dev)
{
i810_dma_quiescent(dev);
return 0;
}
+/*
+ * call the drm_ioctl under the big kernel lock because
+ * to lock against the i810_mmap_buffers function.
+ */
+long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ lock_kernel();
+ ret = drm_ioctl(file, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
+ DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
@@ -1276,7 +1285,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
-int i810_driver_device_is_agp(struct drm_device * dev)
+int i810_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index c1e02752e023..b4250b2cac1f 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -59,7 +59,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i810_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
index 21e2691f28f9..c9339f481795 100644
--- a/drivers/gpu/drm/i810/i810_drv.h
+++ b/drivers/gpu/drm/i810/i810_drv.h
@@ -115,56 +115,59 @@ typedef struct drm_i810_private {
} drm_i810_private_t;
/* i810_dma.c */
-extern int i810_driver_dma_quiescent(struct drm_device * dev);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+extern int i810_driver_dma_quiescent(struct drm_device *dev);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(struct drm_device * dev);
-extern void i810_driver_preclose(struct drm_device * dev,
+extern void i810_driver_lastclose(struct drm_device *dev);
+extern void i810_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
-extern int i810_driver_device_is_agp(struct drm_device * dev);
+extern int i810_driver_device_is_agp(struct drm_device *dev);
+extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern struct drm_ioctl_desc i810_ioctls[];
extern int i810_max_ioctl;
#define I810_BASE(reg) ((unsigned long) \
dev_priv->mmio_map->handle)
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
-#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
+#define I810_DEREF(reg) (*(__volatile__ int *)I810_ADDR(reg))
#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
-#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
+#define I810_WRITE(reg, val) do { I810_DEREF(reg) = val; } while (0)
+#define I810_DEREF16(reg) (*(__volatile__ u16 *)I810_ADDR(reg))
#define I810_READ16(reg) I810_DEREF16(reg)
-#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
+#define I810_WRITE16(reg, val) do { I810_DEREF16(reg) = val; } while (0)
#define I810_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask; \
- volatile char *virt;
-
-#define BEGIN_LP_RING(n) do { \
- if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
- if (dev_priv->ring.space < n*4) \
- i810_wait_ring(dev, n*4); \
- dev_priv->ring.space -= n*4; \
- outring = dev_priv->ring.tail; \
- ringmask = dev_priv->ring.tail_mask; \
- virt = dev_priv->ring.virtual_start; \
+ volatile char *virt;
+
+#define BEGIN_LP_RING(n) do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
+ if (dev_priv->ring.space < n*4) \
+ i810_wait_ring(dev, n*4); \
+ dev_priv->ring.space -= n*4; \
+ outring = dev_priv->ring.tail; \
+ ringmask = dev_priv->ring.tail_mask; \
+ virt = dev_priv->ring.virtual_start; \
} while (0)
-#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+#define ADVANCE_LP_RING() do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
- I810_WRITE(LP_RING + RING_TAIL, outring); \
-} while(0)
-
-#define OUT_RING(n) do { \
- if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
- *(volatile unsigned int *)(virt + outring) = n; \
- outring += 4; \
- outring &= ringmask; \
+ I810_WRITE(LP_RING + RING_TAIL, outring); \
+} while (0)
+
+#define OUT_RING(n) do { \
+ if (I810_VERBOSE) \
+ DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
+ *(volatile unsigned int *)(virt + outring) = n; \
+ outring += 4; \
+ outring &= ringmask; \
} while (0)
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c
index 65759a9a85c8..5168862c9227 100644
--- a/drivers/gpu/drm/i830/i830_dma.c
+++ b/drivers/gpu/drm/i830/i830_dma.c
@@ -36,6 +36,7 @@
#include "i830_drm.h"
#include "i830_drv.h"
#include <linux/interrupt.h> /* For task queue support */
+#include <linux/smp_lock.h>
#include <linux/pagemap.h>
#include <linux/delay.h>
#include <linux/slab.h>
@@ -62,9 +63,8 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev)
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I830_BUF_FREE,
I830_BUF_CLIENT);
- if (used == I830_BUF_FREE) {
+ if (used == I830_BUF_FREE)
return buf;
- }
}
return NULL;
}
@@ -73,7 +73,7 @@ static struct drm_buf *i830_freelist_get(struct drm_device * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i830_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+static int i830_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -123,7 +123,7 @@ static const struct file_operations i830_buffer_fops = {
.fasync = drm_fasync,
};
-static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
+static int i830_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
{
struct drm_device *dev = file_priv->minor->dev;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
@@ -156,7 +156,7 @@ static int i830_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
return retcode;
}
-static int i830_unmap_buffer(struct drm_buf * buf)
+static int i830_unmap_buffer(struct drm_buf *buf)
{
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -176,7 +176,7 @@ static int i830_unmap_buffer(struct drm_buf * buf)
return retcode;
}
-static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
+static int i830_dma_get_buffer(struct drm_device *dev, drm_i830_dma_t *d,
struct drm_file *file_priv)
{
struct drm_buf *buf;
@@ -206,7 +206,7 @@ static int i830_dma_get_buffer(struct drm_device * dev, drm_i830_dma_t * d,
return retcode;
}
-static int i830_dma_cleanup(struct drm_device * dev)
+static int i830_dma_cleanup(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
@@ -222,9 +222,8 @@ static int i830_dma_cleanup(struct drm_device * dev)
drm_i830_private_t *dev_priv =
(drm_i830_private_t *) dev->dev_private;
- if (dev_priv->ring.virtual_start) {
+ if (dev_priv->ring.virtual_start)
drm_core_ioremapfree(&dev_priv->ring.map, dev);
- }
if (dev_priv->hw_status_page) {
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
@@ -246,7 +245,7 @@ static int i830_dma_cleanup(struct drm_device * dev)
return 0;
}
-int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
+int i830_wait_ring(struct drm_device *dev, int n, const char *caller)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -276,11 +275,11 @@ int i830_wait_ring(struct drm_device * dev, int n, const char *caller)
dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
}
- out_wait_ring:
+out_wait_ring:
return iters;
}
-static void i830_kernel_lost_context(struct drm_device * dev)
+static void i830_kernel_lost_context(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_ring_buffer_t *ring = &(dev_priv->ring);
@@ -295,7 +294,7 @@ static void i830_kernel_lost_context(struct drm_device * dev)
dev_priv->sarea_priv->perf_boxes |= I830_BOX_RING_EMPTY;
}
-static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_priv)
+static int i830_freelist_init(struct drm_device *dev, drm_i830_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
int my_idx = 36;
@@ -329,9 +328,9 @@ static int i830_freelist_init(struct drm_device * dev, drm_i830_private_t * dev_
return 0;
}
-static int i830_dma_initialize(struct drm_device * dev,
- drm_i830_private_t * dev_priv,
- drm_i830_init_t * init)
+static int i830_dma_initialize(struct drm_device *dev,
+ drm_i830_private_t *dev_priv,
+ drm_i830_init_t *init)
{
struct drm_map_list *r_list;
@@ -482,7 +481,7 @@ static int i830_dma_init(struct drm_device *dev, void *data,
/* Most efficient way to verify state for the i830 is as it is
* emitted. Non-conformant state is silently dropped.
*/
-static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitContextVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -527,7 +526,7 @@ static void i830EmitContextVerified(struct drm_device * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitTexVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -561,7 +560,7 @@ static void i830EmitTexVerified(struct drm_device * dev, unsigned int *code)
printk("rejected packet %x\n", code[0]);
}
-static void i830EmitTexBlendVerified(struct drm_device * dev,
+static void i830EmitTexBlendVerified(struct drm_device *dev,
unsigned int *code, unsigned int num)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -586,7 +585,7 @@ static void i830EmitTexBlendVerified(struct drm_device * dev,
ADVANCE_LP_RING();
}
-static void i830EmitTexPalette(struct drm_device * dev,
+static void i830EmitTexPalette(struct drm_device *dev,
unsigned int *palette, int number, int is_shared)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -603,9 +602,8 @@ static void i830EmitTexPalette(struct drm_device * dev,
} else {
OUT_RING(CMD_OP_MAP_PALETTE_LOAD | MAP_PALETTE_NUM(number));
}
- for (i = 0; i < 256; i++) {
+ for (i = 0; i < 256; i++)
OUT_RING(palette[i]);
- }
OUT_RING(0);
/* KW: WHERE IS THE ADVANCE_LP_RING? This is effectively a noop!
*/
@@ -613,7 +611,7 @@ static void i830EmitTexPalette(struct drm_device * dev,
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitDestVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
unsigned int tmp;
@@ -674,7 +672,7 @@ static void i830EmitDestVerified(struct drm_device * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
+static void i830EmitStippleVerified(struct drm_device *dev, unsigned int *code)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -685,7 +683,7 @@ static void i830EmitStippleVerified(struct drm_device * dev, unsigned int *code)
ADVANCE_LP_RING();
}
-static void i830EmitState(struct drm_device * dev)
+static void i830EmitState(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -788,7 +786,7 @@ static void i830EmitState(struct drm_device * dev)
* Performance monitoring functions
*/
-static void i830_fill_box(struct drm_device * dev,
+static void i830_fill_box(struct drm_device *dev,
int x, int y, int w, int h, int r, int g, int b)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -816,17 +814,16 @@ static void i830_fill_box(struct drm_device * dev,
OUT_RING((y << 16) | x);
OUT_RING(((y + h) << 16) | (x + w));
- if (dev_priv->current_page == 1) {
+ if (dev_priv->current_page == 1)
OUT_RING(dev_priv->front_offset);
- } else {
+ else
OUT_RING(dev_priv->back_offset);
- }
OUT_RING(color);
ADVANCE_LP_RING();
}
-static void i830_cp_performance_boxes(struct drm_device * dev)
+static void i830_cp_performance_boxes(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -871,7 +868,7 @@ static void i830_cp_performance_boxes(struct drm_device * dev)
dev_priv->sarea_priv->perf_boxes = 0;
}
-static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
+static void i830_dma_dispatch_clear(struct drm_device *dev, int flags,
unsigned int clear_color,
unsigned int clear_zval,
unsigned int clear_depthmask)
@@ -966,7 +963,7 @@ static void i830_dma_dispatch_clear(struct drm_device * dev, int flags,
}
}
-static void i830_dma_dispatch_swap(struct drm_device * dev)
+static void i830_dma_dispatch_swap(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -1036,7 +1033,7 @@ static void i830_dma_dispatch_swap(struct drm_device * dev)
}
}
-static void i830_dma_dispatch_flip(struct drm_device * dev)
+static void i830_dma_dispatch_flip(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1079,8 +1076,8 @@ static void i830_dma_dispatch_flip(struct drm_device * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static void i830_dma_dispatch_vertex(struct drm_device * dev,
- struct drm_buf * buf, int discard, int used)
+static void i830_dma_dispatch_vertex(struct drm_device *dev,
+ struct drm_buf *buf, int discard, int used)
{
drm_i830_private_t *dev_priv = dev->dev_private;
drm_i830_buf_priv_t *buf_priv = buf->dev_private;
@@ -1100,9 +1097,8 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev,
if (discard) {
u = cmpxchg(buf_priv->in_use, I830_BUF_CLIENT,
I830_BUF_HARDWARE);
- if (u != I830_BUF_CLIENT) {
+ if (u != I830_BUF_CLIENT)
DRM_DEBUG("xxxx 2\n");
- }
}
if (used > 4 * 1023)
@@ -1191,7 +1187,7 @@ static void i830_dma_dispatch_vertex(struct drm_device * dev,
}
}
-static void i830_dma_quiescent(struct drm_device * dev)
+static void i830_dma_quiescent(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1208,7 +1204,7 @@ static void i830_dma_quiescent(struct drm_device * dev)
i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
}
-static int i830_flush_queue(struct drm_device * dev)
+static int i830_flush_queue(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
@@ -1241,7 +1237,7 @@ static int i830_flush_queue(struct drm_device * dev)
}
/* Must be called with the lock held */
-static void i830_reclaim_buffers(struct drm_device * dev, struct drm_file *file_priv)
+static void i830_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
struct drm_device_dma *dma = dev->dma;
int i;
@@ -1316,9 +1312,8 @@ static int i830_clear_bufs(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
- if (!dev->dev_private) {
+ if (!dev->dev_private)
return -EINVAL;
- }
i830_dma_dispatch_clear(dev, clear->flags,
clear->clear_color,
@@ -1339,7 +1334,7 @@ static int i830_swap_bufs(struct drm_device *dev, void *data,
/* Not sure why this isn't set all the time:
*/
-static void i830_do_init_pageflip(struct drm_device * dev)
+static void i830_do_init_pageflip(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1349,7 +1344,7 @@ static void i830_do_init_pageflip(struct drm_device * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i830_do_cleanup_pageflip(struct drm_device * dev)
+static int i830_do_cleanup_pageflip(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1490,47 +1485,59 @@ int i830_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
}
-void i830_driver_lastclose(struct drm_device * dev)
+void i830_driver_lastclose(struct drm_device *dev)
{
i830_dma_cleanup(dev);
}
-void i830_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void i830_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i830_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
+ if (dev_priv->page_flipping)
i830_do_cleanup_pageflip(dev);
- }
}
}
-void i830_driver_reclaim_buffers_locked(struct drm_device * dev, struct drm_file *file_priv)
+void i830_driver_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv)
{
i830_reclaim_buffers(dev, file_priv);
}
-int i830_driver_dma_quiescent(struct drm_device * dev)
+int i830_driver_dma_quiescent(struct drm_device *dev)
{
i830_dma_quiescent(dev);
return 0;
}
+/*
+ * call the drm_ioctl under the big kernel lock because
+ * to lock against the i830_mmap_buffers function.
+ */
+long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ lock_kernel();
+ ret = drm_ioctl(file, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
struct drm_ioctl_desc i830_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH)
+ DRM_IOCTL_DEF(DRM_I830_INIT, i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_VERTEX, i830_dma_vertex, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_CLEAR, i830_clear_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_FLUSH, i830_flush_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_GETAGE, i830_getage, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_GETBUF, i830_getbuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_SWAP, i830_swap_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_COPY, i830_copybuf, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_DOCOPY, i830_docopy, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_FLIP, i830_flip_bufs, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_IRQ_EMIT, i830_irq_emit, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_IRQ_WAIT, i830_irq_wait, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_GETPARAM, i830_getparam, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_I830_SETPARAM, i830_setparam, DRM_AUTH|DRM_UNLOCKED),
};
int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
@@ -1546,7 +1553,7 @@ int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
* \returns
* A value of 1 is always retured to indictate every i8xx is AGP.
*/
-int i830_driver_device_is_agp(struct drm_device * dev)
+int i830_driver_device_is_agp(struct drm_device *dev)
{
return 1;
}
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c
index 44f990bed8f4..a5c66aa82f0c 100644
--- a/drivers/gpu/drm/i830/i830_drv.c
+++ b/drivers/gpu/drm/i830/i830_drv.c
@@ -70,7 +70,7 @@ static struct drm_driver driver = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
- .unlocked_ioctl = drm_ioctl,
+ .unlocked_ioctl = i830_ioctl,
.mmap = drm_mmap,
.poll = drm_poll,
.fasync = drm_fasync,
diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
index da82afe4ded5..0df1c720560b 100644
--- a/drivers/gpu/drm/i830/i830_drv.h
+++ b/drivers/gpu/drm/i830/i830_drv.h
@@ -122,6 +122,7 @@ typedef struct drm_i830_private {
} drm_i830_private_t;
+long i830_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern struct drm_ioctl_desc i830_ioctls[];
extern int i830_max_ioctl;
@@ -132,33 +133,33 @@ extern int i830_irq_wait(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i830_driver_irq_preinstall(struct drm_device * dev);
-extern void i830_driver_irq_postinstall(struct drm_device * dev);
-extern void i830_driver_irq_uninstall(struct drm_device * dev);
+extern void i830_driver_irq_preinstall(struct drm_device *dev);
+extern void i830_driver_irq_postinstall(struct drm_device *dev);
+extern void i830_driver_irq_uninstall(struct drm_device *dev);
extern int i830_driver_load(struct drm_device *, unsigned long flags);
-extern void i830_driver_preclose(struct drm_device * dev,
+extern void i830_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
-extern void i830_driver_lastclose(struct drm_device * dev);
-extern void i830_driver_reclaim_buffers_locked(struct drm_device * dev,
+extern void i830_driver_lastclose(struct drm_device *dev);
+extern void i830_driver_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv);
-extern int i830_driver_dma_quiescent(struct drm_device * dev);
-extern int i830_driver_device_is_agp(struct drm_device * dev);
+extern int i830_driver_dma_quiescent(struct drm_device *dev);
+extern int i830_driver_device_is_agp(struct drm_device *dev);
-#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
-#define I830_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
-#define I830_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
+#define I830_READ(reg) DRM_READ32(dev_priv->mmio_map, reg)
+#define I830_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio_map, reg, val)
+#define I830_READ16(reg) DRM_READ16(dev_priv->mmio_map, reg)
+#define I830_WRITE16(reg, val) DRM_WRITE16(dev_priv->mmio_map, reg, val)
#define I830_VERBOSE 0
#define RING_LOCALS unsigned int outring, ringmask, outcount; \
- volatile char *virt;
+ volatile char *virt;
#define BEGIN_LP_RING(n) do { \
if (I830_VERBOSE) \
printk("BEGIN_LP_RING(%d)\n", (n)); \
if (dev_priv->ring.space < n*4) \
- i830_wait_ring(dev, n*4, __func__); \
+ i830_wait_ring(dev, n*4, __func__); \
outcount = 0; \
outring = dev_priv->ring.tail; \
ringmask = dev_priv->ring.tail_mask; \
@@ -166,21 +167,23 @@ extern int i830_driver_device_is_agp(struct drm_device * dev);
} while (0)
#define OUT_RING(n) do { \
- if (I830_VERBOSE) printk(" OUT_RING %x\n", (int)(n)); \
+ if (I830_VERBOSE) \
+ printk(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
- outcount++; \
+ outcount++; \
outring += 4; \
outring &= ringmask; \
} while (0)
-#define ADVANCE_LP_RING() do { \
- if (I830_VERBOSE) printk("ADVANCE_LP_RING %x\n", outring); \
- dev_priv->ring.tail = outring; \
- dev_priv->ring.space -= outcount * 4; \
- I830_WRITE(LP_RING + RING_TAIL, outring); \
-} while(0)
+#define ADVANCE_LP_RING() do { \
+ if (I830_VERBOSE) \
+ printk("ADVANCE_LP_RING %x\n", outring); \
+ dev_priv->ring.tail = outring; \
+ dev_priv->ring.space -= outcount * 4; \
+ I830_WRITE(LP_RING + RING_TAIL, outring); \
+} while (0)
-extern int i830_wait_ring(struct drm_device * dev, int n, const char *caller);
+extern int i830_wait_ring(struct drm_device *dev, int n, const char *caller);
#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
index 91ec2bb497e9..d1a6b95d631d 100644
--- a/drivers/gpu/drm/i830/i830_irq.c
+++ b/drivers/gpu/drm/i830/i830_irq.c
@@ -53,7 +53,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-static int i830_emit_irq(struct drm_device * dev)
+static int i830_emit_irq(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -70,7 +70,7 @@ static int i830_emit_irq(struct drm_device * dev)
return atomic_read(&dev_priv->irq_emitted);
}
-static int i830_wait_irq(struct drm_device * dev, int irq_nr)
+static int i830_wait_irq(struct drm_device *dev, int irq_nr)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
DECLARE_WAITQUEUE(entry, current);
@@ -156,7 +156,7 @@ int i830_irq_wait(struct drm_device *dev, void *data,
/* drm_dma.h hooks
*/
-void i830_driver_irq_preinstall(struct drm_device * dev)
+void i830_driver_irq_preinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
@@ -168,14 +168,14 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
init_waitqueue_head(&dev_priv->irq_queue);
}
-void i830_driver_irq_postinstall(struct drm_device * dev)
+void i830_driver_irq_postinstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
I830_WRITE16(I830REG_INT_ENABLE_R, 0x2);
}
-void i830_driver_irq_uninstall(struct drm_device * dev)
+void i830_driver_irq_uninstall(struct drm_device *dev)
{
drm_i830_private_t *dev_priv = (drm_i830_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2305a1234f1e..f19ffe87af3c 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -34,12 +34,15 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
+#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
#include <linux/pnp.h>
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
+extern int intel_max_stolen; /* from AGP driver */
+
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
@@ -1256,7 +1259,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
drm_mm_put_block(compressed_fb);
}
- if (!IS_GM45(dev)) {
+ if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
4096, 0);
if (!compressed_llb) {
@@ -1282,8 +1285,9 @@ static void i915_setup_compression(struct drm_device *dev, int size)
intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
-
- if (IS_GM45(dev)) {
+ if (IS_IRONLAKE_M(dev))
+ I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
+ else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
@@ -1291,7 +1295,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->compressed_llb = compressed_llb;
}
- DRM_DEBUG("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
ll_base, size >> 20);
}
@@ -1354,7 +1358,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
int fb_bar = IS_I9XX(dev) ? 2 : 0;
int ret = 0;
- dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
0xff000000;
/* Basic memrange allocator for stolen space (aka vram) */
@@ -2063,8 +2067,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Add register map (needed for suspend/resume) */
mmio_bar = IS_I9XX(dev) ? 0 : 1;
- base = drm_get_resource_start(dev, mmio_bar);
- size = drm_get_resource_len(dev, mmio_bar);
+ base = pci_resource_start(dev->pdev, mmio_bar);
+ size = pci_resource_len(dev->pdev, mmio_bar);
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
@@ -2104,6 +2108,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto out_iomapfree;
+ if (prealloc_size > intel_max_stolen) {
+ DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
+ prealloc_size >> 20, intel_max_stolen >> 20);
+ prealloc_size = intel_max_stolen;
+ }
+
dev_priv->wq = create_singlethread_workqueue("i915");
if (dev_priv->wq == NULL) {
DRM_ERROR("Failed to create our workqueue.\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 423dc90c1e20..5044f653e8ea 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -93,11 +93,11 @@ static const struct intel_device_info intel_i945gm_info = {
};
static const struct intel_device_info intel_i965g_info = {
- .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
+ .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1,
};
static const struct intel_device_info intel_i965gm_info = {
- .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1,
+ .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
@@ -114,7 +114,7 @@ static const struct intel_device_info intel_g45_info = {
};
static const struct intel_device_info intel_gm45_info = {
- .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1,
+ .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_pipe_cxsr = 1,
.has_hotplug = 1,
@@ -134,7 +134,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
- .need_gfx_hws = 1, .has_rc6 = 1,
+ .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
.has_hotplug = 1,
};
@@ -148,33 +148,33 @@ static const struct intel_device_info intel_sandybridge_m_info = {
.has_hotplug = 1, .is_gen6 = 1,
};
-static const struct pci_device_id pciidlist[] = {
- INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
- INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
- INTEL_VGA_DEVICE(0x3582, &intel_i85x_info),
+static const struct pci_device_id pciidlist[] = { /* aka */
+ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */
+ INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */
+ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */
INTEL_VGA_DEVICE(0x358e, &intel_i85x_info),
- INTEL_VGA_DEVICE(0x2572, &intel_i865g_info),
- INTEL_VGA_DEVICE(0x2582, &intel_i915g_info),
- INTEL_VGA_DEVICE(0x258a, &intel_i915g_info),
- INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info),
- INTEL_VGA_DEVICE(0x2772, &intel_i945g_info),
- INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info),
- INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info),
- INTEL_VGA_DEVICE(0x2972, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x2982, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x2992, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info),
- INTEL_VGA_DEVICE(0x29b2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x29c2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x29d2, &intel_g33_info),
- INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info),
- INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info),
- INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info),
- INTEL_VGA_DEVICE(0x2e02, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e12, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e22, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e32, &intel_g45_info),
- INTEL_VGA_DEVICE(0x2e42, &intel_g45_info),
+ INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */
+ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */
+ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */
+ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */
+ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */
+ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */
+ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */
+ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */
+ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */
+ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */
+ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */
+ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */
+ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */
+ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */
+ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */
+ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */
+ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */
+ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */
+ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */
+ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */
+ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */
+ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */
INTEL_VGA_DEVICE(0xa001, &intel_pineview_info),
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
/*
* Clear request list
*/
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
+ i915_gem_retire_requests(dev);
if (need_display)
i915_save_display(dev);
@@ -413,7 +413,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
static int __devinit
i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ return drm_get_pci_dev(pdev, ent, &driver);
}
static void
@@ -482,7 +482,7 @@ static int i915_pm_poweroff(struct device *dev)
return i915_drm_freeze(drm_dev);
}
-const struct dev_pm_ops i915_pm_ops = {
+static const struct dev_pm_ops i915_pm_ops = {
.suspend = i915_pm_suspend,
.resume = i915_pm_resume,
.freeze = i915_pm_freeze,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2e1744d37ad5..906663b9929e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -176,7 +176,8 @@ struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size);
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@@ -200,6 +201,8 @@ struct intel_device_info {
u8 need_gfx_hws : 1;
u8 is_g4x : 1;
u8 is_pineview : 1;
+ u8 is_broadwater : 1;
+ u8 is_crestline : 1;
u8 is_ironlake : 1;
u8 is_gen6 : 1;
u8 has_fbc : 1;
@@ -288,6 +291,8 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
+ uint32_t last_instdone;
+ uint32_t last_instdone1;
struct drm_mm vram;
@@ -547,6 +552,14 @@ typedef struct drm_i915_private {
struct list_head fence_list;
/**
+ * List of objects currently pending being freed.
+ *
+ * These objects are no longer in use, but due to a signal
+ * we were prevented from freeing them at the appointed time.
+ */
+ struct list_head deferred_free_list;
+
+ /**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
@@ -677,7 +690,7 @@ struct drm_i915_gem_object {
*
* Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
*/
- int fence_reg : 5;
+ signed int fence_reg : 5;
/**
* Used for checking the object doesn't appear more than once
@@ -713,7 +726,7 @@ struct drm_i915_gem_object {
*
* In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
* bits with absolutely no headroom. So use 4 bits. */
- int pin_count : 4;
+ unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
/** AGP memory structure for our GTT binding. */
@@ -743,7 +756,7 @@ struct drm_i915_gem_object {
uint32_t stride;
/** Record of address bit 17 of each page at last unbind. */
- long *bit_17;
+ unsigned long *bit_17;
/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
uint32_t agp_type;
@@ -955,8 +968,7 @@ uint32_t i915_get_gem_seqno(struct drm_device *dev,
bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_work_handler(struct work_struct *work);
void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -986,7 +998,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
void i915_gem_object_put_pages(struct drm_gem_object *obj);
void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
+int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
void i915_gem_shrinker_init(void);
void i915_gem_shrinker_exit(void);
@@ -1046,6 +1058,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void i8xx_disable_fbc(struct drm_device *dev);
extern void g4x_disable_fbc(struct drm_device *dev);
+extern void ironlake_disable_fbc(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
@@ -1135,6 +1148,8 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
#define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g)
#define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5aa747fc25a9..0758c7802e6b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -35,7 +35,7 @@
#include <linux/swap.h>
#include <linux/pci.h>
-static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -53,6 +53,7 @@ static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv);
+static void i915_gem_free_object_tail(struct drm_gem_object *obj);
static LIST_HEAD(shrink_list);
static DEFINE_SPINLOCK(shrink_list_lock);
@@ -127,8 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- drm_gem_object_handle_unreference_unlocked(obj);
-
+ drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@@ -456,7 +456,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
/* Bounds check source.
@@ -496,10 +496,10 @@ fast_user_write(struct io_mapping *mapping,
char *vaddr_atomic;
unsigned long unwritten;
- vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
+ vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base, KM_USER0);
unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
user_data, length);
- io_mapping_unmap_atomic(vaddr_atomic);
+ io_mapping_unmap_atomic(vaddr_atomic, KM_USER0);
if (unwritten)
return -EFAULT;
return 0;
@@ -919,7 +919,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
/* Bounds check destination.
@@ -1002,7 +1002,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
@@ -1060,7 +1060,7 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ return -ENOENT;
}
#if WATCH_BUF
@@ -1099,7 +1099,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
offset = args->offset;
@@ -1373,7 +1373,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EBADF;
+ return -ENOENT;
mutex_lock(&dev->struct_mutex);
@@ -1709,9 +1709,9 @@ i915_get_gem_seqno(struct drm_device *dev,
/**
* This function clears the request list as sequence numbers are passed.
*/
-void
-i915_gem_retire_requests(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void
+i915_gem_retire_requests_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
@@ -1751,6 +1751,30 @@ i915_gem_retire_requests(struct drm_device *dev,
}
void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ if (!list_empty(&dev_priv->mm.deferred_free_list)) {
+ struct drm_i915_gem_object *obj_priv, *tmp;
+
+ /* We must be careful that during unbind() we do not
+ * accidentally infinitely recurse into retire requests.
+ * Currently:
+ * retire -> free -> unbind -> wait -> retire_ring
+ */
+ list_for_each_entry_safe(obj_priv, tmp,
+ &dev_priv->mm.deferred_free_list,
+ list)
+ i915_gem_free_object_tail(&obj_priv->base);
+ }
+
+ i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
+ if (HAS_BSD(dev))
+ i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
+}
+
+void
i915_gem_retire_work_handler(struct work_struct *work)
{
drm_i915_private_t *dev_priv;
@@ -1761,10 +1785,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
dev = dev_priv->dev;
mutex_lock(&dev->struct_mutex);
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
(!list_empty(&dev_priv->render_ring.request_list) ||
@@ -1832,7 +1853,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
* a separate wait queue to handle that.
*/
if (ret == 0)
- i915_gem_retire_requests(dev, ring);
+ i915_gem_retire_requests_ring(dev, ring);
return ret;
}
@@ -1945,11 +1966,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
* before we unbind.
*/
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
- if (ret) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("set_domain failed: %d\n", ret);
+ if (ret == -ERESTARTSYS)
return ret;
- }
+ /* Continue on if we fail due to EIO, the GPU is hung so we
+ * should be safe and we need to cleanup or else we might
+ * cause memory corruption through use-after-free.
+ */
BUG_ON(obj_priv->active);
@@ -1985,7 +2007,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
trace_i915_gem_object_unbind(obj);
- return 0;
+ return ret;
}
static struct drm_gem_object *
@@ -2107,10 +2129,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
for (;;) {
- i915_gem_retire_requests(dev, render_ring);
-
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, bsd_ring);
+ i915_gem_retire_requests(dev);
/* If there's an inactive buffer available now, grab it
* and be done.
@@ -2583,7 +2602,10 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
if (!IS_I965G(dev)) {
int ret;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return ret;
+
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
@@ -2634,10 +2656,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
if (free_space != NULL) {
obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
alignment);
- if (obj_priv->gtt_space != NULL) {
- obj_priv->gtt_space->private = obj;
+ if (obj_priv->gtt_space != NULL)
obj_priv->gtt_offset = obj_priv->gtt_space->start;
- }
}
if (obj_priv->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
@@ -2733,7 +2753,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
}
/** Flushes any GPU write domain for the object if it's dirty. */
-static void
+static int
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
@@ -2741,17 +2761,18 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return;
+ return 0;
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- (void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
- BUG_ON(obj->write_domain);
+ if (i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring) == 0)
+ return -ENOMEM;
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
+ return 0;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2795,9 +2816,11 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
old_write_domain);
}
-void
+int
i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
{
+ int ret = 0;
+
switch (obj->write_domain) {
case I915_GEM_DOMAIN_GTT:
i915_gem_object_flush_gtt_write_domain(obj);
@@ -2806,9 +2829,11 @@ i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
i915_gem_object_flush_cpu_write_domain(obj);
break;
default:
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
break;
}
+
+ return ret;
}
/**
@@ -2828,7 +2853,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret != 0)
+ return ret;
+
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
@@ -2878,7 +2906,9 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
/* Wait on any GPU rendering and flushing to occur. */
if (obj_priv->active) {
@@ -2926,7 +2956,10 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
uint32_t old_write_domain, old_read_domains;
int ret;
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
@@ -3216,7 +3249,10 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
+ if (ret)
+ return ret;
+
/* Wait on any GPU rendering and flushing to occur. */
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
@@ -3328,7 +3364,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc->target_handle);
if (target_obj == NULL) {
i915_gem_object_unpin(obj);
- return -EBADF;
+ return -ENOENT;
}
target_obj_priv = to_intel_bo(target_obj);
@@ -3451,7 +3487,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
reloc_offset = obj_priv->gtt_offset + reloc->offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
(reloc_offset &
- ~(PAGE_SIZE - 1)));
+ ~(PAGE_SIZE - 1)),
+ KM_USER0);
reloc_entry = (uint32_t __iomem *)(reloc_page +
(reloc_offset & (PAGE_SIZE - 1)));
reloc_val = target_obj_priv->gtt_offset + reloc->delta;
@@ -3462,7 +3499,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
readl(reloc_entry), reloc_val);
#endif
writel(reloc_val, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
+ io_mapping_unmap_atomic(reloc_page, KM_USER0);
/* The updated presumed offset for this entry will be
* copied back out to the user.
@@ -3744,7 +3781,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
exec_list[i].handle, i);
/* prevent error path from reading uninitialized data */
args->buffer_count = i + 1;
- ret = -EBADF;
+ ret = -ENOENT;
goto err;
}
@@ -3754,7 +3791,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
object_list[i]);
/* prevent error path from reading uninitialized data */
args->buffer_count = i + 1;
- ret = -EBADF;
+ ret = -EINVAL;
goto err;
}
obj_priv->in_execbuffer = true;
@@ -4228,7 +4265,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
args->handle);
mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ return -ENOENT;
}
obj_priv = to_intel_bo(obj);
@@ -4284,7 +4321,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
args->handle);
mutex_unlock(&dev->struct_mutex);
- return -EBADF;
+ return -ENOENT;
}
obj_priv = to_intel_bo(obj);
@@ -4313,13 +4350,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_busy *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- drm_i915_private_t *dev_priv = dev->dev_private;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
args->handle);
- return -EBADF;
+ return -ENOENT;
}
mutex_lock(&dev->struct_mutex);
@@ -4328,10 +4364,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* actually unmasked, and our working set ends up being larger than
* required.
*/
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests(dev);
obj_priv = to_intel_bo(obj);
/* Don't count being on the flushing list against the object being
@@ -4375,7 +4408,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (obj == NULL) {
DRM_ERROR("Bad handle in i915_gem_madvise_ioctl(): %d\n",
args->handle);
- return -EBADF;
+ return -ENOENT;
}
mutex_lock(&dev->struct_mutex);
@@ -4441,20 +4474,19 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-void i915_gem_free_object(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ int ret;
- trace_i915_gem_object_destroy(obj);
-
- while (obj_priv->pin_count > 0)
- i915_gem_object_unpin(obj);
-
- if (obj_priv->phys_obj)
- i915_gem_detach_phys_object(dev, obj);
-
- i915_gem_object_unbind(obj);
+ ret = i915_gem_object_unbind(obj);
+ if (ret == -ERESTARTSYS) {
+ list_move(&obj_priv->list,
+ &dev_priv->mm.deferred_free_list);
+ return;
+ }
if (obj_priv->mmap_offset)
i915_gem_free_mmap_offset(obj);
@@ -4466,6 +4498,22 @@ void i915_gem_free_object(struct drm_gem_object *obj)
kfree(obj_priv);
}
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+
+ trace_i915_gem_object_destroy(obj);
+
+ while (obj_priv->pin_count > 0)
+ i915_gem_object_unpin(obj);
+
+ if (obj_priv->phys_obj)
+ i915_gem_detach_phys_object(dev, obj);
+
+ i915_gem_free_object_tail(obj);
+}
+
/** Unbinds all inactive objects. */
static int
i915_gem_evict_from_inactive_list(struct drm_device *dev)
@@ -4689,9 +4737,19 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
mutex_unlock(&dev->struct_mutex);
- drm_irq_install(dev);
+ ret = drm_irq_install(dev);
+ if (ret)
+ goto cleanup_ringbuffer;
return 0;
+
+cleanup_ringbuffer:
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_cleanup_ringbuffer(dev);
+ dev_priv->mm.suspended = 1;
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
int
@@ -4729,6 +4787,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+ INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
if (HAS_BSD(dev)) {
@@ -5027,10 +5086,7 @@ rescan:
continue;
spin_unlock(&shrink_list_lock);
- i915_gem_retire_requests(dev, &dev_priv->render_ring);
-
- if (HAS_BSD(dev))
- i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+ i915_gem_retire_requests(dev);
list_for_each_entry_safe(obj_priv, next_obj,
&dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4b7c49d4257d..710eca70b323 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -275,7 +275,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EINVAL;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
@@ -333,8 +333,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
i915_gem_release_mmap(obj);
if (ret != 0) {
- WARN(ret != -ERESTARTSYS,
- "failed to reset object for tiling switch");
args->tiling_mode = obj_priv->tiling_mode;
args->stride = obj_priv->stride;
goto err;
@@ -364,7 +362,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL)
- return -EINVAL;
+ return -ENOENT;
obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index dba53d4b9fb3..85785a8844ed 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -171,10 +171,10 @@ void intel_enable_asle (struct drm_device *dev)
ironlake_enable_display_irq(dev_priv, DE_GSE);
else {
i915_enable_pipestat(dev_priv, 1,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
if (IS_I965G(dev))
i915_enable_pipestat(dev_priv, 0,
- I915_LEGACY_BLC_EVENT_ENABLE);
+ PIPE_LEGACY_BLC_EVENT_ENABLE);
}
}
@@ -842,7 +842,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
u32 iir, new_iir;
u32 pipea_stats, pipeb_stats;
u32 vblank_status;
- u32 vblank_enable;
int vblank = 0;
unsigned long irqflags;
int irq_received;
@@ -856,13 +855,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
iir = I915_READ(IIR);
- if (IS_I965G(dev)) {
- vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
- vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
- } else {
- vblank_status = I915_VBLANK_INTERRUPT_STATUS;
- vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
- }
+ if (IS_I965G(dev))
+ vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
+ else
+ vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
for (;;) {
irq_received = iir != 0;
@@ -966,8 +962,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
intel_finish_page_flip(dev, 1);
}
- if ((pipea_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
- (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
+ if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
+ (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
(iir & I915_ASLE_INTERRUPT))
opregion_asle_intr(dev);
@@ -1233,16 +1229,21 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd;
+ uint32_t acthd, instdone, instdone1;
/* No reset support on this chip yet. */
if (IS_GEN6(dev))
return;
- if (!IS_I965G(dev))
+ if (!IS_I965G(dev)) {
acthd = I915_READ(ACTHD);
- else
+ instdone = I915_READ(INSTDONE);
+ instdone1 = 0;
+ } else {
acthd = I915_READ(ACTHD_I965);
+ instdone = I915_READ(INSTDONE_I965);
+ instdone1 = I915_READ(INSTDONE1);
+ }
/* If all work is done then ACTHD clearly hasn't advanced. */
if (list_empty(&dev_priv->render_ring.request_list) ||
@@ -1253,21 +1254,24 @@ void i915_hangcheck_elapsed(unsigned long data)
return;
}
- if (dev_priv->last_acthd == acthd && dev_priv->hangcheck_count > 0) {
- DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
- i915_handle_error(dev, true);
- return;
- }
+ if (dev_priv->last_acthd == acthd &&
+ dev_priv->last_instdone == instdone &&
+ dev_priv->last_instdone1 == instdone1) {
+ if (dev_priv->hangcheck_count++ > 1) {
+ DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+ i915_handle_error(dev, true);
+ return;
+ }
+ } else {
+ dev_priv->hangcheck_count = 0;
+
+ dev_priv->last_acthd = acthd;
+ dev_priv->last_instdone = instdone;
+ dev_priv->last_instdone1 = instdone1;
+ }
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-
- if (acthd != dev_priv->last_acthd)
- dev_priv->hangcheck_count = 0;
- else
- dev_priv->hangcheck_count++;
-
- dev_priv->last_acthd = acthd;
}
/* drm_dma.h hooks
diff --git a/drivers/gpu/drm/i915/i915_opregion.c b/drivers/gpu/drm/i915/i915_opregion.c
index 8fcc75c1aa28..d1bf92b99788 100644
--- a/drivers/gpu/drm/i915/i915_opregion.c
+++ b/drivers/gpu/drm/i915/i915_opregion.c
@@ -535,6 +535,7 @@ int intel_opregion_init(struct drm_device *dev, int resume)
err_out:
iounmap(opregion->header);
opregion->header = NULL;
+ acpi_video_register();
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index cf41c672defe..281db6e5403a 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -442,7 +442,7 @@
#define GEN6_RENDER_IMR 0x20a8
#define GEN6_RENDER_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GEN6_RENDER_PPGTT_PAGE_FAULT (1 << 7)
-#define GEN6_RENDER TIMEOUT_COUNTER_EXPIRED (1 << 6)
+#define GEN6_RENDER_TIMEOUT_COUNTER_EXPIRED (1 << 6)
#define GEN6_RENDER_L3_PARITY_ERROR (1 << 5)
#define GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 4)
#define GEN6_RENDER_COMMAND_PARSER_MASTER_ERROR (1 << 3)
@@ -530,6 +530,21 @@
#define DPFC_CHICKEN 0x3224
#define DPFC_HT_MODIFY (1<<31)
+/* Framebuffer compression for Ironlake */
+#define ILK_DPFC_CB_BASE 0x43200
+#define ILK_DPFC_CONTROL 0x43208
+/* The bit 28-8 is reserved */
+#define DPFC_RESERVED (0x1FFFFF00)
+#define ILK_DPFC_RECOMP_CTL 0x4320c
+#define ILK_DPFC_STATUS 0x43210
+#define ILK_DPFC_FENCE_YOFF 0x43218
+#define ILK_DPFC_CHICKEN 0x43224
+#define ILK_FBC_RT_BASE 0x2128
+#define ILK_FBC_RT_VALID (1<<0)
+
+#define ILK_DISPLAY_CHICKEN1 0x42000
+#define ILK_FBCQ_DIS (1<<22)
+
/*
* GPIO regs
*/
@@ -595,32 +610,6 @@
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW 0x00ff8000 /* Pineview */
-#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
-#define I915_CRC_ERROR_ENABLE (1UL<<29)
-#define I915_CRC_DONE_ENABLE (1UL<<28)
-#define I915_GMBUS_EVENT_ENABLE (1UL<<27)
-#define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25)
-#define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
-#define I915_DPST_EVENT_ENABLE (1UL<<23)
-#define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
-#define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
-#define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
-#define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */
-#define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17)
-#define I915_OVERLAY_UPDATED_ENABLE (1UL<<16)
-#define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
-#define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
-#define I915_GMBUS_INTERRUPT_STATUS (1UL<<11)
-#define I915_VSYNC_INTERRUPT_STATUS (1UL<<9)
-#define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8)
-#define I915_DPST_EVENT_STATUS (1UL<<7)
-#define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6)
-#define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5)
-#define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4)
-#define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
-#define I915_VBLANK_INTERRUPT_STATUS (1UL<<1)
-#define I915_OVERLAY_UPDATED_STATUS (1UL<<0)
-
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
@@ -2166,7 +2155,8 @@
#define I830_FIFO_LINE_SIZE 32
#define G4X_FIFO_SIZE 127
-#define I945_FIFO_SIZE 127 /* 945 & 965 */
+#define I965_FIFO_SIZE 512
+#define I945_FIFO_SIZE 127
#define I915_FIFO_SIZE 95
#define I855GM_FIFO_SIZE 127 /* In cachelines */
#define I830_FIFO_SIZE 95
@@ -2185,6 +2175,9 @@
#define PINEVIEW_CURSOR_DFT_WM 0
#define PINEVIEW_CURSOR_GUARD_WM 5
+#define I965_CURSOR_FIFO 64
+#define I965_CURSOR_MAX_WM 32
+#define I965_CURSOR_DFT_WM 8
/* define the Watermark register on Ironlake */
#define WM0_PIPEA_ILK 0x45100
@@ -2212,6 +2205,9 @@
#define ILK_DISPLAY_FIFO 128
#define ILK_DISPLAY_MAXWM 64
#define ILK_DISPLAY_DFTWM 8
+#define ILK_CURSOR_FIFO 32
+#define ILK_CURSOR_MAXWM 16
+#define ILK_CURSOR_DFTWM 8
#define ILK_DISPLAY_SR_FIFO 512
#define ILK_DISPLAY_MAX_SRWM 0x1ff
@@ -2510,6 +2506,10 @@
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
+/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
+#define ILK_CLK_FBC (1<<7)
+#define ILK_DPFC_DIS1 (1<<8)
+#define ILK_DPFC_DIS2 (1<<9)
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 60a5800fba6e..6e2025274db5 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -602,7 +602,9 @@ void i915_save_display(struct drm_device *dev)
/* Only save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
- if (IS_GM45(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
+ dev_priv->saveDPFC_CB_BASE = I915_READ(ILK_DPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
dev_priv->saveDPFC_CB_BASE = I915_READ(DPFC_CB_BASE);
} else {
dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
@@ -706,7 +708,10 @@ void i915_restore_display(struct drm_device *dev)
/* only restore FBC info on the platform that supports FBC*/
if (I915_HAS_FBC(dev)) {
- if (IS_GM45(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
+ ironlake_disable_fbc(dev);
+ I915_WRITE(ILK_DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
+ } else if (IS_GM45(dev)) {
g4x_disable_fbc(dev);
I915_WRITE(DPFC_CB_BASE, dev_priv->saveDPFC_CB_BASE);
} else {
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fab21760dd57..fea97a21cc14 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -262,6 +262,42 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
TP_ARGS(dev)
);
+TRACE_EVENT(i915_flip_request,
+ TP_PROTO(int plane, struct drm_gem_object *obj),
+
+ TP_ARGS(plane, obj),
+
+ TP_STRUCT__entry(
+ __field(int, plane)
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
+TRACE_EVENT(i915_flip_complete,
+ TP_PROTO(int plane, struct drm_gem_object *obj),
+
+ TP_ARGS(plane, obj),
+
+ TP_STRUCT__entry(
+ __field(int, plane)
+ __field(struct drm_gem_object *, obj)
+ ),
+
+ TP_fast_assign(
+ __entry->plane = plane;
+ __entry->obj = obj;
+ ),
+
+ TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5e21b3119824..5ec10e02341b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -33,6 +33,7 @@
#include "intel_drv.h"
#include "i915_drm.h"
#include "i915_drv.h"
+#include "i915_trace.h"
#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
@@ -42,6 +43,7 @@
bool intel_pipe_has_type (struct drm_crtc *crtc, int type);
static void intel_update_watermarks(struct drm_device *dev);
static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule);
+static void intel_crtc_update_cursor(struct drm_crtc *crtc);
typedef struct {
/* given values */
@@ -322,6 +324,9 @@ struct intel_limit {
#define IRONLAKE_DP_P1_MIN 1
#define IRONLAKE_DP_P1_MAX 2
+/* FDI */
+#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
+
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
@@ -975,7 +980,10 @@ void
intel_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
- msleep(20);
+ if (in_dbg_master())
+ mdelay(20); /* The kernel debugger cannot call msleep() */
+ else
+ msleep(20);
}
/* Parameters have changed, update FBC info */
@@ -1122,6 +1130,67 @@ static bool g4x_fbc_enabled(struct drm_device *dev)
return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
}
+static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_framebuffer *fb = crtc->fb;
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+ struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int plane = (intel_crtc->plane == 0) ? DPFC_CTL_PLANEA :
+ DPFC_CTL_PLANEB;
+ unsigned long stall_watermark = 200;
+ u32 dpfc_ctl;
+
+ dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
+ dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_plane = intel_crtc->plane;
+
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= DPFC_RESERVED;
+ dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
+ if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
+ I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
+ } else {
+ I915_WRITE(ILK_DPFC_CHICKEN, ~DPFC_HT_MODIFY);
+ }
+
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
+ (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
+ (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
+ I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
+ I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ /* enable it... */
+ I915_WRITE(ILK_DPFC_CONTROL, I915_READ(ILK_DPFC_CONTROL) |
+ DPFC_CTL_EN);
+
+ DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
+}
+
+void ironlake_disable_fbc(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 dpfc_ctl;
+
+ /* Disable compression */
+ dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl &= ~DPFC_CTL_EN;
+ I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ intel_wait_for_vblank(dev);
+
+ DRM_DEBUG_KMS("disabled FBC\n");
+}
+
+static bool ironlake_fbc_enabled(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+}
+
bool intel_fbc_enabled(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1248,6 +1317,10 @@ static void intel_update_fbc(struct drm_crtc *crtc,
goto out_disable;
}
+ /* If the kernel debugger is active, always disable compression */
+ if (in_dbg_master())
+ goto out_disable;
+
if (intel_fbc_enabled(dev)) {
/* We can re-enable it in this case, but need to update pitch */
if ((fb->pitch > dev_priv->cfb_pitch) ||
@@ -1279,7 +1352,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
switch (obj_priv->tiling_mode) {
case I915_TILING_NONE:
- alignment = 64 * 1024;
+ if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
+ alignment = 128 * 1024;
+ else if (IS_I965G(dev))
+ alignment = 4 * 1024;
+ else
+ alignment = 64 * 1024;
break;
case I915_TILING_X:
/* pin() will align the object as required by fence */
@@ -1314,6 +1392,98 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, struct drm_gem_object *obj)
return 0;
}
+/* Assume fb object is pinned & idle & fenced and just update base pointers */
+static int
+intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int x, int y)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj_priv;
+ struct drm_gem_object *obj;
+ int plane = intel_crtc->plane;
+ unsigned long Start, Offset;
+ int dspbase = (plane == 0 ? DSPAADDR : DSPBADDR);
+ int dspsurf = (plane == 0 ? DSPASURF : DSPBSURF);
+ int dspstride = (plane == 0) ? DSPASTRIDE : DSPBSTRIDE;
+ int dsptileoff = (plane == 0 ? DSPATILEOFF : DSPBTILEOFF);
+ int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
+ u32 dspcntr;
+
+ switch (plane) {
+ case 0:
+ case 1:
+ break;
+ default:
+ DRM_ERROR("Can't update plane %d in SAREA\n", plane);
+ return -EINVAL;
+ }
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+ obj_priv = to_intel_bo(obj);
+
+ dspcntr = I915_READ(dspcntr_reg);
+ /* Mask out pixel format bits in case we change it */
+ dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
+ switch (fb->bits_per_pixel) {
+ case 8:
+ dspcntr |= DISPPLANE_8BPP;
+ break;
+ case 16:
+ if (fb->depth == 15)
+ dspcntr |= DISPPLANE_15_16BPP;
+ else
+ dspcntr |= DISPPLANE_16BPP;
+ break;
+ case 24:
+ case 32:
+ dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
+ break;
+ default:
+ DRM_ERROR("Unknown color depth\n");
+ return -EINVAL;
+ }
+ if (IS_I965G(dev)) {
+ if (obj_priv->tiling_mode != I915_TILING_NONE)
+ dspcntr |= DISPPLANE_TILED;
+ else
+ dspcntr &= ~DISPPLANE_TILED;
+ }
+
+ if (IS_IRONLAKE(dev))
+ /* must disable */
+ dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
+
+ I915_WRITE(dspcntr_reg, dspcntr);
+
+ Start = obj_priv->gtt_offset;
+ Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+
+ DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
+ I915_WRITE(dspstride, fb->pitch);
+ if (IS_I965G(dev)) {
+ I915_WRITE(dspbase, Offset);
+ I915_READ(dspbase);
+ I915_WRITE(dspsurf, Start);
+ I915_READ(dspsurf);
+ I915_WRITE(dsptileoff, (y << 16) | x);
+ } else {
+ I915_WRITE(dspbase, Start + Offset);
+ I915_READ(dspbase);
+ }
+
+ if ((IS_I965G(dev) || plane == 0))
+ intel_update_fbc(crtc, &crtc->mode);
+
+ intel_wait_for_vblank(dev);
+ intel_increase_pllclock(crtc, true);
+
+ return 0;
+}
+
static int
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
@@ -1554,6 +1724,15 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
u32 temp, tries = 0;
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
/* enable CPU FDI TX and PCH FDI RX */
temp = I915_READ(fdi_tx_reg);
temp |= FDI_TX_ENABLE;
@@ -1571,16 +1750,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
I915_READ(fdi_rx_reg);
udelay(150);
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
-
- for (;;) {
+ for (tries = 0; tries < 5; tries++) {
temp = I915_READ(fdi_rx_iir_reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
@@ -1590,14 +1760,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
temp | FDI_RX_BIT_LOCK);
break;
}
-
- tries++;
-
- if (tries > 5) {
- DRM_DEBUG_KMS("FDI train 1 fail!\n");
- break;
- }
}
+ if (tries == 5)
+ DRM_DEBUG_KMS("FDI train 1 fail!\n");
/* Train 2 */
temp = I915_READ(fdi_tx_reg);
@@ -1613,7 +1778,7 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
tries = 0;
- for (;;) {
+ for (tries = 0; tries < 5; tries++) {
temp = I915_READ(fdi_rx_iir_reg);
DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
@@ -1623,14 +1788,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train 2 done.\n");
break;
}
-
- tries++;
-
- if (tries > 5) {
- DRM_DEBUG_KMS("FDI train 2 fail!\n");
- break;
- }
}
+ if (tries == 5)
+ DRM_DEBUG_KMS("FDI train 2 fail!\n");
DRM_DEBUG_KMS("FDI train done\n");
}
@@ -1655,6 +1815,15 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
u32 temp, i;
+ /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
+ for train result */
+ temp = I915_READ(fdi_rx_imr_reg);
+ temp &= ~FDI_RX_SYMBOL_LOCK;
+ temp &= ~FDI_RX_BIT_LOCK;
+ I915_WRITE(fdi_rx_imr_reg, temp);
+ I915_READ(fdi_rx_imr_reg);
+ udelay(150);
+
/* enable CPU FDI TX and PCH FDI RX */
temp = I915_READ(fdi_tx_reg);
temp |= FDI_TX_ENABLE;
@@ -1680,15 +1849,6 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
I915_READ(fdi_rx_reg);
udelay(150);
- /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
- for train result */
- temp = I915_READ(fdi_rx_imr_reg);
- temp &= ~FDI_RX_SYMBOL_LOCK;
- temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(fdi_rx_imr_reg, temp);
- I915_READ(fdi_rx_imr_reg);
- udelay(150);
-
for (i = 0; i < 4; i++ ) {
temp = I915_READ(fdi_tx_reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
@@ -1843,7 +2003,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
}
/* Enable panel fitting for LVDS */
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
+ || HAS_eDP || intel_pch_has_edp(crtc)) {
temp = I915_READ(pf_ctl_reg);
I915_WRITE(pf_ctl_reg, temp | PF_ENABLE | PF_FILTER_MED_3x3);
@@ -1938,9 +2099,12 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
reg = I915_READ(trans_dp_ctl);
reg &= ~TRANS_DP_PORT_SEL_MASK;
reg = TRANS_DP_OUTPUT_ENABLE |
- TRANS_DP_ENH_FRAMING |
- TRANS_DP_VSYNC_ACTIVE_HIGH |
- TRANS_DP_HSYNC_ACTIVE_HIGH;
+ TRANS_DP_ENH_FRAMING;
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
+ reg |= TRANS_DP_HSYNC_ACTIVE_HIGH;
+ if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
+ reg |= TRANS_DP_VSYNC_ACTIVE_HIGH;
switch (intel_trans_dp_port_sel(crtc)) {
case PCH_DP_B:
@@ -1980,6 +2144,8 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
intel_crtc_load_lut(crtc);
+ intel_update_fbc(crtc, &crtc->mode);
+
break;
case DRM_MODE_DPMS_OFF:
DRM_DEBUG_KMS("crtc %d dpms off\n", pipe);
@@ -1994,6 +2160,10 @@ static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
I915_READ(dspbase_reg);
}
+ if (dev_priv->cfb_plane == plane &&
+ dev_priv->display.disable_fbc)
+ dev_priv->display.disable_fbc(dev);
+
i915_disable_vga(dev);
/* disable cpu pipe, disable after all planes disabled */
@@ -2373,8 +2543,8 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
- if (mode->clock * 3 > 27000 * 4)
- return MODE_CLOCK_HIGH;
+ if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
+ return false;
}
return true;
}
@@ -2556,6 +2726,20 @@ static struct intel_watermark_params g4x_wm_info = {
2,
G4X_FIFO_LINE_SIZE,
};
+static struct intel_watermark_params g4x_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ G4X_FIFO_LINE_SIZE,
+};
+static struct intel_watermark_params i965_cursor_wm_info = {
+ I965_CURSOR_FIFO,
+ I965_CURSOR_MAX_WM,
+ I965_CURSOR_DFT_WM,
+ 2,
+ I915_FIFO_LINE_SIZE,
+};
static struct intel_watermark_params i945_wm_info = {
I945_FIFO_SIZE,
I915_MAX_WM,
@@ -2593,6 +2777,14 @@ static struct intel_watermark_params ironlake_display_wm_info = {
ILK_FIFO_LINE_SIZE
};
+static struct intel_watermark_params ironlake_cursor_wm_info = {
+ ILK_CURSOR_FIFO,
+ ILK_CURSOR_MAXWM,
+ ILK_CURSOR_DFTWM,
+ 2,
+ ILK_FIFO_LINE_SIZE
+};
+
static struct intel_watermark_params ironlake_display_srwm_info = {
ILK_DISPLAY_SR_FIFO,
ILK_DISPLAY_MAX_SRWM,
@@ -2642,7 +2834,7 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
*/
entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1000;
- entries_required /= wm->cacheline_size;
+ entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
DRM_DEBUG_KMS("FIFO entries required for mode: %d\n", entries_required);
@@ -2653,8 +2845,14 @@ static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
/* Don't promote wm_size to unsigned... */
if (wm_size > (long)wm->max_wm)
wm_size = wm->max_wm;
- if (wm_size <= 0)
+ if (wm_size <= 0) {
wm_size = wm->default_wm;
+ DRM_ERROR("Insufficient FIFO for plane, expect flickering:"
+ " entries required = %ld, available = %lu.\n",
+ entries_required + wm->guard_size,
+ wm->fifo_size);
+ }
+
return wm_size;
}
@@ -2763,11 +2961,9 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
uint32_t dsparb = I915_READ(DSPARB);
int size;
- if (plane == 0)
- size = dsparb & 0x7f;
- else
- size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) -
- (dsparb & 0x7f);
+ size = dsparb & 0x7f;
+ if (plane)
+ size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
plane ? "B" : "A", size);
@@ -2781,11 +2977,9 @@ static int i85x_get_fifo_size(struct drm_device *dev, int plane)
uint32_t dsparb = I915_READ(DSPARB);
int size;
- if (plane == 0)
- size = dsparb & 0x1ff;
- else
- size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) -
- (dsparb & 0x1ff);
+ size = dsparb & 0x1ff;
+ if (plane)
+ size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
size >>= 1; /* Convert to cachelines */
DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
@@ -2826,7 +3020,8 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
}
static void pineview_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int unused,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
@@ -2891,7 +3086,8 @@ static void pineview_update_wm(struct drm_device *dev, int planea_clock,
}
static void g4x_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int total_size, cacheline_size;
@@ -2915,12 +3111,12 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
*/
entries_required = ((planea_clock / 1000) * pixel_size * latency_ns) /
1000;
- entries_required /= G4X_FIFO_LINE_SIZE;
+ entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
planea_wm = entries_required + planea_params.guard_size;
entries_required = ((planeb_clock / 1000) * pixel_size * latency_ns) /
1000;
- entries_required /= G4X_FIFO_LINE_SIZE;
+ entries_required = DIV_ROUND_UP(entries_required, G4X_FIFO_LINE_SIZE);
planeb_wm = entries_required + planeb_params.guard_size;
cursora_wm = cursorb_wm = 16;
@@ -2934,13 +3130,24 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / cacheline_size, 1);
- DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
+
+ entries_required = (((sr_latency_ns / line_time_us) +
+ 1000) / 1000) * pixel_size * 64;
+ entries_required = DIV_ROUND_UP(entries_required,
+ g4x_cursor_wm_info.cacheline_size);
+ cursor_sr = entries_required + g4x_cursor_wm_info.guard_size;
+
+ if (cursor_sr > g4x_cursor_wm_info.max_wm)
+ cursor_sr = g4x_cursor_wm_info.max_wm;
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", sr_entries, cursor_sr);
+
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
/* Turn off self refresh if both pipes are enabled */
@@ -2965,11 +3172,13 @@ static void g4x_update_wm(struct drm_device *dev, int planea_clock,
}
static void i965_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long line_time_us;
int sr_clock, sr_entries, srwm = 1;
+ int cursor_sr = 16;
/* Calc sr entries for one plane configs */
if (sr_hdisplay && (!planea_clock || !planeb_clock)) {
@@ -2977,17 +3186,31 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
static const int sr_latency_ns = 12000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / I915_FIFO_LINE_SIZE, 1);
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, I915_FIFO_LINE_SIZE);
DRM_DEBUG("self-refresh entries: %d\n", sr_entries);
- srwm = I945_FIFO_SIZE - sr_entries;
+ srwm = I965_FIFO_SIZE - sr_entries;
if (srwm < 0)
srwm = 1;
- srwm &= 0x3f;
+ srwm &= 0x1ff;
+
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * 64;
+ sr_entries = DIV_ROUND_UP(sr_entries,
+ i965_cursor_wm_info.cacheline_size);
+ cursor_sr = i965_cursor_wm_info.fifo_size -
+ (sr_entries + i965_cursor_wm_info.guard_size);
+
+ if (cursor_sr > i965_cursor_wm_info.max_wm)
+ cursor_sr = i965_cursor_wm_info.max_wm;
+
+ DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
+ "cursor %d\n", srwm, cursor_sr);
+
if (IS_I965GM(dev))
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
} else {
@@ -3004,10 +3227,13 @@ static void i965_update_wm(struct drm_device *dev, int planea_clock,
I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | (8 << 16) | (8 << 8) |
(8 << 0));
I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
+ /* update cursor SR watermark */
+ I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t fwater_lo;
@@ -3052,12 +3278,12 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
static const int sr_latency_ns = 6000;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
- sr_entries = (((sr_latency_ns / line_time_us) + 1) *
- pixel_size * sr_hdisplay) / 1000;
- sr_entries = roundup(sr_entries / cacheline_size, 1);
+ sr_entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
+ pixel_size * sr_hdisplay;
+ sr_entries = DIV_ROUND_UP(sr_entries, cacheline_size);
DRM_DEBUG_KMS("self-refresh entries: %d\n", sr_entries);
srwm = total_size - sr_entries;
if (srwm < 0)
@@ -3095,7 +3321,7 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
}
static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
- int unused2, int pixel_size)
+ int unused2, int unused3, int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t fwater_lo = I915_READ(FW_BLC) & ~0xfff;
@@ -3113,9 +3339,11 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
}
#define ILK_LP0_PLANE_LATENCY 700
+#define ILK_LP0_CURSOR_LATENCY 1300
static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
- int planeb_clock, int sr_hdisplay, int pixel_size)
+ int planeb_clock, int sr_hdisplay, int sr_htotal,
+ int pixel_size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
@@ -3123,20 +3351,48 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
unsigned long line_time_us;
int sr_clock, entries_required;
u32 reg_value;
+ int line_count;
+ int planea_htotal = 0, planeb_htotal = 0;
+ struct drm_crtc *crtc;
+ struct intel_crtc *intel_crtc;
+
+ /* Need htotal for all active display plane */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ intel_crtc = to_intel_crtc(crtc);
+ if (crtc->enabled) {
+ if (intel_crtc->plane == 0)
+ planea_htotal = crtc->mode.htotal;
+ else
+ planeb_htotal = crtc->mode.htotal;
+ }
+ }
/* Calculate and update the watermark for plane A */
if (planea_clock) {
entries_required = ((planea_clock / 1000) * pixel_size *
ILK_LP0_PLANE_LATENCY) / 1000;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
+ ironlake_display_wm_info.cacheline_size);
planea_wm = entries_required +
ironlake_display_wm_info.guard_size;
if (planea_wm > (int)ironlake_display_wm_info.max_wm)
planea_wm = ironlake_display_wm_info.max_wm;
- cursora_wm = 16;
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = (planea_htotal * 1000) / planea_clock;
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+
+ /* calculate the cursor watermark for cursor A */
+ entries_required = line_count * 64 * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_wm_info.cacheline_size);
+ cursora_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+ if (cursora_wm > ironlake_cursor_wm_info.max_wm)
+ cursora_wm = ironlake_cursor_wm_info.max_wm;
+
reg_value = I915_READ(WM0_PIPEA_ILK);
reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
reg_value |= (planea_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3150,14 +3406,27 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
entries_required = ((planeb_clock / 1000) * pixel_size *
ILK_LP0_PLANE_LATENCY) / 1000;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_wm_info.cacheline_size);
+ ironlake_display_wm_info.cacheline_size);
planeb_wm = entries_required +
ironlake_display_wm_info.guard_size;
if (planeb_wm > (int)ironlake_display_wm_info.max_wm)
planeb_wm = ironlake_display_wm_info.max_wm;
- cursorb_wm = 16;
+ /* Use the large buffer method to calculate cursor watermark */
+ line_time_us = (planeb_htotal * 1000) / planeb_clock;
+
+ /* Use ns/us then divide to preserve precision */
+ line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+
+ /* calculate the cursor watermark for cursor B */
+ entries_required = line_count * 64 * pixel_size;
+ entries_required = DIV_ROUND_UP(entries_required,
+ ironlake_cursor_wm_info.cacheline_size);
+ cursorb_wm = entries_required + ironlake_cursor_wm_info.guard_size;
+ if (cursorb_wm > ironlake_cursor_wm_info.max_wm)
+ cursorb_wm = ironlake_cursor_wm_info.max_wm;
+
reg_value = I915_READ(WM0_PIPEB_ILK);
reg_value &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
reg_value |= (planeb_wm << WM0_PIPE_PLANE_SHIFT) |
@@ -3172,12 +3441,12 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
* display plane is used.
*/
if (!planea_clock || !planeb_clock) {
- int line_count;
+
/* Read the self-refresh latency. The unit is 0.5us */
int ilk_sr_latency = I915_READ(MLTR_ILK) & ILK_SRLT_MASK;
sr_clock = planea_clock ? planea_clock : planeb_clock;
- line_time_us = ((sr_hdisplay * 1000) / sr_clock);
+ line_time_us = ((sr_htotal * 1000) / sr_clock);
/* Use ns/us then divide to preserve precision */
line_count = ((ilk_sr_latency * 500) / line_time_us + 1000)
@@ -3186,14 +3455,14 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
/* calculate the self-refresh watermark for display plane */
entries_required = line_count * sr_hdisplay * pixel_size;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_display_srwm_info.cacheline_size);
+ ironlake_display_srwm_info.cacheline_size);
sr_wm = entries_required +
ironlake_display_srwm_info.guard_size;
/* calculate the self-refresh watermark for display cursor */
entries_required = line_count * pixel_size * 64;
entries_required = DIV_ROUND_UP(entries_required,
- ironlake_cursor_srwm_info.cacheline_size);
+ ironlake_cursor_srwm_info.cacheline_size);
cursor_wm = entries_required +
ironlake_cursor_srwm_info.guard_size;
@@ -3237,6 +3506,7 @@ static void ironlake_update_wm(struct drm_device *dev, int planea_clock,
* bytes per pixel
* where
* line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
* and latency is assumed to be high, as above.
*
* The final value programmed to the register should always be rounded up,
@@ -3253,6 +3523,7 @@ static void intel_update_watermarks(struct drm_device *dev)
int sr_hdisplay = 0;
unsigned long planea_clock = 0, planeb_clock = 0, sr_clock = 0;
int enabled = 0, pixel_size = 0;
+ int sr_htotal = 0;
if (!dev_priv->display.update_wm)
return;
@@ -3273,6 +3544,7 @@ static void intel_update_watermarks(struct drm_device *dev)
}
sr_hdisplay = crtc->mode.hdisplay;
sr_clock = crtc->mode.clock;
+ sr_htotal = crtc->mode.htotal;
if (crtc->fb)
pixel_size = crtc->fb->bits_per_pixel / 8;
else
@@ -3284,7 +3556,7 @@ static void intel_update_watermarks(struct drm_device *dev)
return;
dev_priv->display.update_wm(dev, planea_clock, planeb_clock,
- sr_hdisplay, pixel_size);
+ sr_hdisplay, sr_htotal, pixel_size);
}
static int intel_crtc_mode_set(struct drm_crtc *crtc,
@@ -3403,6 +3675,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
}
+ /* Ensure that the cursor is valid for the new mode before changing... */
+ intel_crtc_update_cursor(crtc);
+
if (is_lvds && dev_priv->lvds_downclock_avail) {
has_reduced_clock = limit->find_pll(limit, crtc,
dev_priv->lvds_downclock,
@@ -3469,7 +3744,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
temp |= PIPE_8BPC;
else
temp |= PIPE_6BPC;
- } else if (is_edp) {
+ } else if (is_edp || (is_dp && intel_pch_has_edp(crtc))) {
switch (dev_priv->edp_bpp/3) {
case 8:
temp |= PIPE_8BPC;
@@ -3712,6 +3987,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
udelay(150);
}
+ if (HAS_PCH_SPLIT(dev)) {
+ pipeconf &= ~PIPE_ENABLE_DITHER;
+ pipeconf &= ~PIPE_DITHER_TYPE_MASK;
+ }
+
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
@@ -3754,16 +4034,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (dev_priv->lvds_dither) {
if (HAS_PCH_SPLIT(dev)) {
pipeconf |= PIPE_ENABLE_DITHER;
- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
pipeconf |= PIPE_DITHER_TYPE_ST01;
} else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (HAS_PCH_SPLIT(dev)) {
- pipeconf &= ~PIPE_ENABLE_DITHER;
- pipeconf &= ~PIPE_DITHER_TYPE_MASK;
- } else
+ if (!HAS_PCH_SPLIT(dev)) {
lvds &= ~LVDS_ENABLE_DITHER;
+ }
}
}
I915_WRITE(lvds_reg, lvds);
@@ -3939,6 +4216,85 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
}
}
+/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
+static void intel_crtc_update_cursor(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ int x = intel_crtc->cursor_x;
+ int y = intel_crtc->cursor_y;
+ uint32_t base, pos;
+ bool visible;
+
+ pos = 0;
+
+ if (crtc->fb) {
+ base = intel_crtc->cursor_addr;
+ if (x > (int) crtc->fb->width)
+ base = 0;
+
+ if (y > (int) crtc->fb->height)
+ base = 0;
+ } else
+ base = 0;
+
+ if (x < 0) {
+ if (x + intel_crtc->cursor_width < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
+ x = -x;
+ }
+ pos |= x << CURSOR_X_SHIFT;
+
+ if (y < 0) {
+ if (y + intel_crtc->cursor_height < 0)
+ base = 0;
+
+ pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
+ y = -y;
+ }
+ pos |= y << CURSOR_Y_SHIFT;
+
+ visible = base != 0;
+ if (!visible && !intel_crtc->cursor_visble)
+ return;
+
+ I915_WRITE(pipe == 0 ? CURAPOS : CURBPOS, pos);
+ if (intel_crtc->cursor_visble != visible) {
+ uint32_t cntl = I915_READ(pipe == 0 ? CURACNTR : CURBCNTR);
+ if (base) {
+ /* Hooray for CUR*CNTR differences */
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
+ cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
+ cntl |= pipe << 28; /* Connect to correct pipe */
+ } else {
+ cntl &= ~(CURSOR_FORMAT_MASK);
+ cntl |= CURSOR_ENABLE;
+ cntl |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
+ }
+ } else {
+ if (IS_MOBILE(dev) || IS_I9XX(dev)) {
+ cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
+ cntl |= CURSOR_MODE_DISABLE;
+ } else {
+ cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
+ }
+ }
+ I915_WRITE(pipe == 0 ? CURACNTR : CURBCNTR, cntl);
+
+ intel_crtc->cursor_visble = visible;
+ }
+ /* and commit changes on next vblank */
+ I915_WRITE(pipe == 0 ? CURABASE : CURBBASE, base);
+
+ if (visible)
+ intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj);
+}
+
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_file *file_priv,
uint32_t handle,
@@ -3949,11 +4305,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_gem_object *bo;
struct drm_i915_gem_object *obj_priv;
- int pipe = intel_crtc->pipe;
- uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
- uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
- uint32_t temp = I915_READ(control);
- size_t addr;
+ uint32_t addr;
int ret;
DRM_DEBUG_KMS("\n");
@@ -3961,12 +4313,6 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* if we want to turn off the cursor ignore width and height */
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
- temp |= CURSOR_MODE_DISABLE;
- } else {
- temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
- }
addr = 0;
bo = NULL;
mutex_lock(&dev->struct_mutex);
@@ -4008,7 +4354,8 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
addr = obj_priv->gtt_offset;
} else {
- ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
+ ret = i915_gem_attach_phys_object(dev, bo,
+ (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
@@ -4019,21 +4366,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!IS_I9XX(dev))
I915_WRITE(CURSIZE, (height << 12) | width);
- /* Hooray for CUR*CNTR differences */
- if (IS_MOBILE(dev) || IS_I9XX(dev)) {
- temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
- temp |= (pipe << 28); /* Connect to correct pipe */
- } else {
- temp &= ~(CURSOR_FORMAT_MASK);
- temp |= CURSOR_ENABLE;
- temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
- }
-
finish:
- I915_WRITE(control, temp);
- I915_WRITE(base, addr);
-
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
if (intel_crtc->cursor_bo != bo)
@@ -4047,6 +4380,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_addr = addr;
intel_crtc->cursor_bo = bo;
+ intel_crtc->cursor_width = width;
+ intel_crtc->cursor_height = height;
+
+ intel_crtc_update_cursor(crtc);
return 0;
fail_unpin:
@@ -4060,34 +4397,12 @@ fail:
static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
- struct drm_device *dev = crtc->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct intel_framebuffer *intel_fb;
- int pipe = intel_crtc->pipe;
- uint32_t temp = 0;
- uint32_t adder;
- if (crtc->fb) {
- intel_fb = to_intel_framebuffer(crtc->fb);
- intel_mark_busy(dev, intel_fb->obj);
- }
+ intel_crtc->cursor_x = x;
+ intel_crtc->cursor_y = y;
- if (x < 0) {
- temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
- x = -x;
- }
- if (y < 0) {
- temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
- y = -y;
- }
-
- temp |= x << CURSOR_X_SHIFT;
- temp |= y << CURSOR_Y_SHIFT;
-
- adder = intel_crtc->cursor_addr;
- I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
- I915_WRITE((pipe == 0) ? CURABASE : CURBBASE, adder);
+ intel_crtc_update_cursor(crtc);
return 0;
}
@@ -4114,15 +4429,12 @@ void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
}
static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t start, uint32_t size)
{
+ int end = (start + size > 256) ? 256 : start + size, i;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int i;
-
- if (size != 256)
- return;
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
intel_crtc->lut_r[i] = red[i] >> 8;
intel_crtc->lut_g[i] = green[i] >> 8;
intel_crtc->lut_b[i] = blue[i] >> 8;
@@ -4671,6 +4983,8 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
atomic_dec_and_test(&obj_priv->pending_flip))
DRM_WAKEUP(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
+
+ trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
void intel_finish_page_flip(struct drm_device *dev, int pipe)
@@ -4748,27 +5062,22 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev, obj);
- if (ret != 0) {
- mutex_unlock(&dev->struct_mutex);
-
- spin_lock_irqsave(&dev->event_lock, flags);
- intel_crtc->unpin_work = NULL;
- spin_unlock_irqrestore(&dev->event_lock, flags);
-
- kfree(work);
-
- DRM_DEBUG_DRIVER("flip queue: %p pin & fence failed\n",
- to_intel_bo(obj));
- return ret;
- }
+ if (ret)
+ goto cleanup_work;
/* Reference the objects for the scheduled work. */
drm_gem_object_reference(work->old_fb_obj);
drm_gem_object_reference(obj);
crtc->fb = fb;
- i915_gem_object_flush_write_domain(obj);
- drm_vblank_get(dev, intel_crtc->pipe);
+ ret = i915_gem_object_flush_write_domain(obj);
+ if (ret)
+ goto cleanup_objs;
+
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto cleanup_objs;
+
obj_priv = to_intel_bo(obj);
atomic_inc(&obj_priv->pending_flip);
work->pending_flip_obj = obj;
@@ -4806,7 +5115,23 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
mutex_unlock(&dev->struct_mutex);
+ trace_i915_flip_request(intel_crtc->plane, obj);
+
return 0;
+
+cleanup_objs:
+ drm_gem_object_unreference(work->old_fb_obj);
+ drm_gem_object_unreference(obj);
+cleanup_work:
+ mutex_unlock(&dev->struct_mutex);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ intel_crtc->unpin_work = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ kfree(work);
+
+ return ret;
}
static const struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -4814,6 +5139,7 @@ static const struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_fixup = intel_crtc_mode_fixup,
.mode_set = intel_crtc_mode_set,
.mode_set_base = intel_pipe_set_base,
+ .mode_set_base_atomic = intel_pipe_set_base_atomic,
.prepare = intel_crtc_prepare,
.commit = intel_crtc_commit,
.load_lut = intel_crtc_load_lut,
@@ -4932,19 +5258,26 @@ static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder;
+ bool dpd_is_edp = false;
- intel_crt_init(dev);
-
- /* Set up integrated LVDS */
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
if (HAS_PCH_SPLIT(dev)) {
- int found;
+ dpd_is_edp = intel_dpd_is_edp(dev);
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
intel_dp_init(dev, DP_A);
+ if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
+ intel_dp_init(dev, PCH_DP_D);
+ }
+
+ intel_crt_init(dev);
+
+ if (HAS_PCH_SPLIT(dev)) {
+ int found;
+
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB);
@@ -4963,7 +5296,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C);
- if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
@@ -5076,18 +5409,18 @@ intel_user_framebuffer_create(struct drm_device *dev,
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
if (!obj)
- return NULL;
+ return ERR_PTR(-ENOENT);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = intel_framebuffer_init(dev, intel_fb,
mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
kfree(intel_fb);
- return NULL;
+ return ERR_PTR(ret);
}
return &intel_fb->base;
@@ -5372,6 +5705,26 @@ void intel_init_clock_gating(struct drm_device *dev)
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
}
+ /*
+ * Based on the document from hardware guys the following bits
+ * should be set unconditionally in order to enable FBC.
+ * The bit 22 of 0x42000
+ * The bit 22 of 0x42004
+ * The bit 7,8,9 of 0x42020.
+ */
+ if (IS_IRONLAKE_M(dev)) {
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPFC_DIS1 |
+ ILK_DPFC_DIS2 |
+ ILK_CLK_FBC);
+ }
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -5450,7 +5803,11 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.dpms = i9xx_crtc_dpms;
if (I915_HAS_FBC(dev)) {
- if (IS_GM45(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
+ dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
+ dev_priv->display.enable_fbc = ironlake_enable_fbc;
+ dev_priv->display.disable_fbc = ironlake_disable_fbc;
+ } else if (IS_GM45(dev)) {
dev_priv->display.fbc_enabled = g4x_fbc_enabled;
dev_priv->display.enable_fbc = g4x_enable_fbc;
dev_priv->display.disable_fbc = g4x_disable_fbc;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5dde80f9e652..40be1fa65be1 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -43,6 +43,7 @@
#define DP_LINK_CONFIGURATION_SIZE 9
#define IS_eDP(i) ((i)->type == INTEL_OUTPUT_EDP)
+#define IS_PCH_eDP(dp_priv) ((dp_priv)->is_pch_edp)
struct intel_dp_priv {
uint32_t output_reg;
@@ -56,6 +57,7 @@ struct intel_dp_priv {
struct intel_encoder *intel_encoder;
struct i2c_adapter adapter;
struct i2c_algo_dp_aux_data algo;
+ bool is_pch_edp;
};
static void
@@ -128,8 +130,9 @@ intel_dp_link_required(struct drm_device *dev,
struct intel_encoder *intel_encoder, int pixel_clock)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
- if (IS_eDP(intel_encoder))
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv))
return (pixel_clock * dev_priv->edp_bpp) / 8;
else
return pixel_clock * 3;
@@ -147,9 +150,21 @@ intel_dp_mode_valid(struct drm_connector *connector,
{
struct drm_encoder *encoder = intel_attached_encoder(connector);
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_device *dev = connector->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_encoder));
int max_lanes = intel_dp_max_lane_count(intel_encoder);
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+ dev_priv->panel_fixed_mode) {
+ if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
/* only refuse the mode on non eDP since we have seen some wierd eDP panels
which are outside spec tolerances but somehow work by magic */
if (!IS_eDP(intel_encoder) &&
@@ -508,11 +523,37 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
{
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
+ struct drm_device *dev = encoder->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_encoder);
int max_clock = intel_dp_max_link_bw(intel_encoder) == DP_LINK_BW_2_7 ? 1 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+ dev_priv->panel_fixed_mode) {
+ struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+
+ adjusted_mode->hdisplay = fixed_mode->hdisplay;
+ adjusted_mode->hsync_start = fixed_mode->hsync_start;
+ adjusted_mode->hsync_end = fixed_mode->hsync_end;
+ adjusted_mode->htotal = fixed_mode->htotal;
+
+ adjusted_mode->vdisplay = fixed_mode->vdisplay;
+ adjusted_mode->vsync_start = fixed_mode->vsync_start;
+ adjusted_mode->vsync_end = fixed_mode->vsync_end;
+ adjusted_mode->vtotal = fixed_mode->vtotal;
+
+ adjusted_mode->clock = fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+
+ /*
+ * the mode->clock is used to calculate the Data&Link M/N
+ * of the pipe. For the eDP the fixed clock should be used.
+ */
+ mode->clock = dev_priv->panel_fixed_mode->clock;
+ }
+
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -531,7 +572,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
/* okay we failed just pick the highest */
dp_priv->lane_count = max_lane_count;
dp_priv->link_bw = bws[max_clock];
@@ -563,14 +604,14 @@ intel_reduce_ratio(uint32_t *num, uint32_t *den)
}
static void
-intel_dp_compute_m_n(int bytes_per_pixel,
+intel_dp_compute_m_n(int bpp,
int nlanes,
int pixel_clock,
int link_clock,
struct intel_dp_m_n *m_n)
{
m_n->tu = 64;
- m_n->gmch_m = pixel_clock * bytes_per_pixel;
+ m_n->gmch_m = (pixel_clock * bpp) >> 3;
m_n->gmch_n = link_clock * nlanes;
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
@@ -578,6 +619,28 @@ intel_dp_compute_m_n(int bytes_per_pixel,
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
+bool intel_pch_has_edp(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_encoder *encoder;
+
+ list_for_each_entry(encoder, &mode_config->encoder_list, head) {
+ struct intel_encoder *intel_encoder;
+ struct intel_dp_priv *dp_priv;
+
+ if (!encoder || encoder->crtc != crtc)
+ continue;
+
+ intel_encoder = enc_to_intel_encoder(encoder);
+ dp_priv = intel_encoder->dev_priv;
+
+ if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT)
+ return dp_priv->is_pch_edp;
+ }
+ return false;
+}
+
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -587,7 +650,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_encoder *encoder;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int lane_count = 4;
+ int lane_count = 4, bpp = 24;
struct intel_dp_m_n m_n;
/*
@@ -605,6 +668,8 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
lane_count = dp_priv->lane_count;
+ if (IS_PCH_eDP(dp_priv))
+ bpp = dev_priv->edp_bpp;
break;
}
}
@@ -614,7 +679,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
- intel_dp_compute_m_n(3, lane_count,
+ intel_dp_compute_m_n(bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
if (HAS_PCH_SPLIT(dev)) {
@@ -796,7 +861,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
if (mode != DRM_MODE_DPMS_ON) {
if (dp_reg & DP_PORT_EN) {
intel_dp_link_down(intel_encoder, dp_priv->DP);
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
ironlake_edp_backlight_off(dev);
ironlake_edp_panel_off(dev);
}
@@ -804,7 +869,7 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
} else {
if (!(dp_reg & DP_PORT_EN)) {
intel_dp_link_train(intel_encoder, dp_priv->DP, dp_priv->link_configuration);
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
ironlake_edp_panel_on(dev);
ironlake_edp_backlight_on(dev);
}
@@ -1340,17 +1405,32 @@ static int intel_dp_get_modes(struct drm_connector *connector)
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct drm_device *dev = intel_encoder->enc.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_dp_priv *dp_priv = intel_encoder->dev_priv;
int ret;
/* We should parse the EDID data and find out if it has an audio sink
*/
ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
- if (ret)
+ if (ret) {
+ if ((IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) &&
+ !dev_priv->panel_fixed_mode) {
+ struct drm_display_mode *newmode;
+ list_for_each_entry(newmode, &connector->probed_modes,
+ head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ dev_priv->panel_fixed_mode =
+ drm_mode_duplicate(dev, newmode);
+ break;
+ }
+ }
+ }
+
return ret;
+ }
/* if eDP has no EDID, try to use fixed panel mode from VBT */
- if (IS_eDP(intel_encoder)) {
+ if (IS_eDP(intel_encoder) || IS_PCH_eDP(dp_priv)) {
if (dev_priv->panel_fixed_mode != NULL) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
@@ -1435,6 +1515,26 @@ intel_trans_dp_port_sel (struct drm_crtc *crtc)
return -1;
}
+/* check the VBT to see whether the eDP is on DP-D port */
+bool intel_dpd_is_edp(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct child_device_config *p_child;
+ int i;
+
+ if (!dev_priv->child_dev_num)
+ return false;
+
+ for (i = 0; i < dev_priv->child_dev_num; i++) {
+ p_child = dev_priv->child_dev + i;
+
+ if (p_child->dvo_port == PORT_IDPD &&
+ p_child->device_type == DEVICE_TYPE_eDP)
+ return true;
+ }
+ return false;
+}
+
void
intel_dp_init(struct drm_device *dev, int output_reg)
{
@@ -1444,6 +1544,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
struct intel_connector *intel_connector;
struct intel_dp_priv *dp_priv;
const char *name = NULL;
+ int type;
intel_encoder = kcalloc(sizeof(struct intel_encoder) +
sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
@@ -1458,18 +1559,24 @@ intel_dp_init(struct drm_device *dev, int output_reg)
dp_priv = (struct intel_dp_priv *)(intel_encoder + 1);
+ if (HAS_PCH_SPLIT(dev) && (output_reg == PCH_DP_D))
+ if (intel_dpd_is_edp(dev))
+ dp_priv->is_pch_edp = true;
+
+ if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
+ type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+ } else {
+ type = DRM_MODE_CONNECTOR_DisplayPort;
+ intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
+ }
+
connector = &intel_connector->base;
- drm_connector_init(dev, connector, &intel_dp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD;
- if (output_reg == DP_A)
- intel_encoder->type = INTEL_OUTPUT_EDP;
- else
- intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
-
if (output_reg == DP_B || output_reg == PCH_DP_B)
intel_encoder->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
else if (output_reg == DP_C || output_reg == PCH_DP_C)
@@ -1528,7 +1635,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
intel_encoder->ddc_bus = &dp_priv->adapter;
intel_encoder->hot_plug = intel_dp_hot_plug;
- if (output_reg == DP_A) {
+ if (output_reg == DP_A || IS_PCH_eDP(dp_priv)) {
/* initialize panel mode from VBT if available for eDP */
if (dev_priv->lfp_lvds_vbt_mode) {
dev_priv->panel_fixed_mode =
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 2f7970be9051..b2190148703a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -143,8 +143,6 @@ struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
- struct drm_gem_object *cursor_bo;
- uint32_t cursor_addr;
u8 lut_r[256], lut_g[256], lut_b[256];
int dpms_mode;
bool busy; /* is scanout buffer being updated frequently? */
@@ -153,6 +151,12 @@ struct intel_crtc {
struct intel_overlay *overlay;
struct intel_unpin_work *unpin_work;
int fdi_lanes;
+
+ struct drm_gem_object *cursor_bo;
+ uint32_t cursor_addr;
+ int16_t cursor_x, cursor_y;
+ int16_t cursor_width, cursor_height;
+ bool cursor_visble;
};
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -179,6 +183,8 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
+extern bool intel_pch_has_edp(struct drm_crtc *crtc);
+extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 3e18c9e7729b..7bdc96256bf5 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -61,6 +61,8 @@ static struct fb_ops intelfb_ops = {
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
};
static int intelfb_create(struct intel_fbdev *ifbdev,
@@ -119,7 +121,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->par = ifbdev;
- intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ if (ret)
+ goto out_unpin;
fb = &ifbdev->ifb.base;
@@ -128,7 +132,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
strcpy(info->fix.id, "inteldrmfb");
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &intelfb_ops;
/* setup aperture base/size for vesafb takeover */
@@ -146,8 +150,6 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
info->fix.smem_len = size;
- info->flags = FBINFO_DEFAULT;
-
info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
size);
if (!info->screen_base) {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 83bd764b000e..197887ed1823 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -54,10 +54,11 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
struct intel_hdmi_priv *hdmi_priv = intel_encoder->dev_priv;
u32 sdvox;
- sdvox = SDVO_ENCODING_HDMI |
- SDVO_BORDER_ENABLE |
- SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH;
+ sdvox = SDVO_ENCODING_HDMI | SDVO_BORDER_ENABLE;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
if (hdmi_priv->has_hdmi_sink) {
sdvox |= SDVO_AUDIO_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0eab8df5bf7e..0a2e60059fb3 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -156,31 +156,73 @@ static int intel_lvds_mode_valid(struct drm_connector *connector,
return MODE_OK;
}
+static void
+centre_horizontally(struct drm_display_mode *mode,
+ int width)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the hsync and hblank widths constant */
+ sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start;
+ blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->hdisplay - width + 1) / 2;
+ border += border & 1; /* make the border even */
+
+ mode->crtc_hdisplay = width;
+ mode->crtc_hblank_start = width + border;
+ mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width;
+
+ mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos;
+ mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width;
+}
+
+static void
+centre_vertically(struct drm_display_mode *mode,
+ int height)
+{
+ u32 border, sync_pos, blank_width, sync_width;
+
+ /* keep the vsync and vblank widths constant */
+ sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ sync_pos = (blank_width - sync_width + 1) / 2;
+
+ border = (mode->vdisplay - height + 1) / 2;
+
+ mode->crtc_vdisplay = height;
+ mode->crtc_vblank_start = height + border;
+ mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width;
+
+ mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos;
+ mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width;
+}
+
+static inline u32 panel_fitter_scaling(u32 source, u32 target)
+{
+ /*
+ * Floating point operation is not supported. So the FACTOR
+ * is defined, which can avoid the floating point computation
+ * when calculating the panel ratio.
+ */
+#define ACCURACY 12
+#define FACTOR (1 << ACCURACY)
+ u32 ratio = source * FACTOR / target;
+ return (FACTOR * ratio + FACTOR/2) / FACTOR;
+}
+
static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- /*
- * float point operation is not supported . So the PANEL_RATIO_FACTOR
- * is defined, which can avoid the float point computation when
- * calculating the panel ratio.
- */
-#define PANEL_RATIO_FACTOR 8192
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
struct intel_lvds_priv *lvds_priv = intel_encoder->dev_priv;
- u32 pfit_control = 0, pfit_pgm_ratios = 0;
- int left_border = 0, right_border = 0, top_border = 0;
- int bottom_border = 0;
- bool border = 0;
- int panel_ratio, desired_ratio, vert_scale, horiz_scale;
- int horiz_ratio, vert_ratio;
- u32 hsync_width, vsync_width;
- u32 hblank_width, vblank_width;
- u32 hsync_pos, vsync_pos;
+ u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
/* Should never happen!! */
if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
@@ -200,27 +242,25 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
if (dev_priv->panel_fixed_mode == NULL)
return true;
/*
- * If we have timings from the BIOS for the panel, put them in
+ * We have timings from the BIOS for the panel, put them in
* to the adjusted mode. The CRTC will be set up for this mode,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (dev_priv->panel_fixed_mode != NULL) {
- adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
- adjusted_mode->hsync_start =
- dev_priv->panel_fixed_mode->hsync_start;
- adjusted_mode->hsync_end =
- dev_priv->panel_fixed_mode->hsync_end;
- adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
- adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
- adjusted_mode->vsync_start =
- dev_priv->panel_fixed_mode->vsync_start;
- adjusted_mode->vsync_end =
- dev_priv->panel_fixed_mode->vsync_end;
- adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
- adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- }
+ adjusted_mode->hdisplay = dev_priv->panel_fixed_mode->hdisplay;
+ adjusted_mode->hsync_start =
+ dev_priv->panel_fixed_mode->hsync_start;
+ adjusted_mode->hsync_end =
+ dev_priv->panel_fixed_mode->hsync_end;
+ adjusted_mode->htotal = dev_priv->panel_fixed_mode->htotal;
+ adjusted_mode->vdisplay = dev_priv->panel_fixed_mode->vdisplay;
+ adjusted_mode->vsync_start =
+ dev_priv->panel_fixed_mode->vsync_start;
+ adjusted_mode->vsync_end =
+ dev_priv->panel_fixed_mode->vsync_end;
+ adjusted_mode->vtotal = dev_priv->panel_fixed_mode->vtotal;
+ adjusted_mode->clock = dev_priv->panel_fixed_mode->clock;
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
/* Make sure pre-965s set dither correctly */
if (!IS_I965G(dev)) {
@@ -230,11 +270,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* Native modes don't need fitting */
if (adjusted_mode->hdisplay == mode->hdisplay &&
- adjusted_mode->vdisplay == mode->vdisplay) {
- pfit_pgm_ratios = 0;
- border = 0;
+ adjusted_mode->vdisplay == mode->vdisplay)
goto out;
- }
/* full screen scale for now */
if (HAS_PCH_SPLIT(dev))
@@ -242,25 +279,9 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
/* 965+ wants fuzzy fitting */
if (IS_I965G(dev))
- pfit_control |= (intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- PFIT_FILTER_FUZZY;
-
- hsync_width = adjusted_mode->crtc_hsync_end -
- adjusted_mode->crtc_hsync_start;
- vsync_width = adjusted_mode->crtc_vsync_end -
- adjusted_mode->crtc_vsync_start;
- hblank_width = adjusted_mode->crtc_hblank_end -
- adjusted_mode->crtc_hblank_start;
- vblank_width = adjusted_mode->crtc_vblank_end -
- adjusted_mode->crtc_vblank_start;
- /*
- * Deal with panel fitting options. Figure out how to stretch the
- * image based on its aspect ratio & the current panel fitting mode.
- */
- panel_ratio = adjusted_mode->hdisplay * PANEL_RATIO_FACTOR /
- adjusted_mode->vdisplay;
- desired_ratio = mode->hdisplay * PANEL_RATIO_FACTOR /
- mode->vdisplay;
+ pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
+ PFIT_FILTER_FUZZY);
+
/*
* Enable automatic panel scaling for non-native modes so that they fill
* the screen. Should be enabled before the pipe is enabled, according
@@ -278,170 +299,63 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* For centered modes, we have to calculate border widths &
* heights and modify the values programmed into the CRTC.
*/
- left_border = (adjusted_mode->hdisplay - mode->hdisplay) / 2;
- right_border = left_border;
- if (mode->hdisplay & 1)
- right_border++;
- top_border = (adjusted_mode->vdisplay - mode->vdisplay) / 2;
- bottom_border = top_border;
- if (mode->vdisplay & 1)
- bottom_border++;
- /* Set active & border values */
- adjusted_mode->crtc_hdisplay = mode->hdisplay;
- /* Keep the boder be even */
- if (right_border & 1)
- right_border++;
- /* use the border directly instead of border minuse one */
- adjusted_mode->crtc_hblank_start = mode->hdisplay +
- right_border;
- /* keep the blank width constant */
- adjusted_mode->crtc_hblank_end =
- adjusted_mode->crtc_hblank_start + hblank_width;
- /* get the hsync pos relative to hblank start */
- hsync_pos = (hblank_width - hsync_width) / 2;
- /* keep the hsync pos be even */
- if (hsync_pos & 1)
- hsync_pos++;
- adjusted_mode->crtc_hsync_start =
- adjusted_mode->crtc_hblank_start + hsync_pos;
- /* keep the hsync width constant */
- adjusted_mode->crtc_hsync_end =
- adjusted_mode->crtc_hsync_start + hsync_width;
- adjusted_mode->crtc_vdisplay = mode->vdisplay;
- /* use the border instead of border minus one */
- adjusted_mode->crtc_vblank_start = mode->vdisplay +
- bottom_border;
- /* keep the vblank width constant */
- adjusted_mode->crtc_vblank_end =
- adjusted_mode->crtc_vblank_start + vblank_width;
- /* get the vsync start postion relative to vblank start */
- vsync_pos = (vblank_width - vsync_width) / 2;
- adjusted_mode->crtc_vsync_start =
- adjusted_mode->crtc_vblank_start + vsync_pos;
- /* keep the vsync width constant */
- adjusted_mode->crtc_vsync_end =
- adjusted_mode->crtc_vsync_start + vsync_width;
- border = 1;
+ centre_horizontally(adjusted_mode, mode->hdisplay);
+ centre_vertically(adjusted_mode, mode->vdisplay);
+ border = LVDS_BORDER_ENABLE;
break;
+
case DRM_MODE_SCALE_ASPECT:
- /* Scale but preserve the spect ratio */
- pfit_control |= PFIT_ENABLE;
+ /* Scale but preserve the aspect ratio */
if (IS_I965G(dev)) {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
+
+ pfit_control |= PFIT_ENABLE;
/* 965+ is easy, it does everything in hw */
- if (panel_ratio > desired_ratio)
+ if (scaled_width > scaled_height)
pfit_control |= PFIT_SCALING_PILLAR;
- else if (panel_ratio < desired_ratio)
+ else if (scaled_width < scaled_height)
pfit_control |= PFIT_SCALING_LETTER;
else
pfit_control |= PFIT_SCALING_AUTO;
} else {
+ u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
+ u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
/*
* For earlier chips we have to calculate the scaling
* ratio by hand and program it into the
* PFIT_PGM_RATIO register
*/
- u32 horiz_bits, vert_bits, bits = 12;
- horiz_ratio = mode->hdisplay * PANEL_RATIO_FACTOR/
- adjusted_mode->hdisplay;
- vert_ratio = mode->vdisplay * PANEL_RATIO_FACTOR/
- adjusted_mode->vdisplay;
- horiz_scale = adjusted_mode->hdisplay *
- PANEL_RATIO_FACTOR / mode->hdisplay;
- vert_scale = adjusted_mode->vdisplay *
- PANEL_RATIO_FACTOR / mode->vdisplay;
-
- /* retain aspect ratio */
- if (panel_ratio > desired_ratio) { /* Pillar */
- u32 scaled_width;
- scaled_width = mode->hdisplay * vert_scale /
- PANEL_RATIO_FACTOR;
- horiz_ratio = vert_ratio;
- pfit_control |= (VERT_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- /* Pillar will have left/right borders */
- left_border = (adjusted_mode->hdisplay -
- scaled_width) / 2;
- right_border = left_border;
- if (mode->hdisplay & 1) /* odd resolutions */
- right_border++;
- /* keep the border be even */
- if (right_border & 1)
- right_border++;
- adjusted_mode->crtc_hdisplay = scaled_width;
- /* use border instead of border minus one */
- adjusted_mode->crtc_hblank_start =
- scaled_width + right_border;
- /* keep the hblank width constant */
- adjusted_mode->crtc_hblank_end =
- adjusted_mode->crtc_hblank_start +
- hblank_width;
- /*
- * get the hsync start pos relative to
- * hblank start
- */
- hsync_pos = (hblank_width - hsync_width) / 2;
- /* keep the hsync_pos be even */
- if (hsync_pos & 1)
- hsync_pos++;
- adjusted_mode->crtc_hsync_start =
- adjusted_mode->crtc_hblank_start +
- hsync_pos;
- /* keept hsync width constant */
- adjusted_mode->crtc_hsync_end =
- adjusted_mode->crtc_hsync_start +
- hsync_width;
- border = 1;
- } else if (panel_ratio < desired_ratio) { /* letter */
- u32 scaled_height = mode->vdisplay *
- horiz_scale / PANEL_RATIO_FACTOR;
- vert_ratio = horiz_ratio;
- pfit_control |= (HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
- /* Letterbox will have top/bottom border */
- top_border = (adjusted_mode->vdisplay -
- scaled_height) / 2;
- bottom_border = top_border;
- if (mode->vdisplay & 1)
- bottom_border++;
- adjusted_mode->crtc_vdisplay = scaled_height;
- /* use border instead of border minus one */
- adjusted_mode->crtc_vblank_start =
- scaled_height + bottom_border;
- /* keep the vblank width constant */
- adjusted_mode->crtc_vblank_end =
- adjusted_mode->crtc_vblank_start +
- vblank_width;
- /*
- * get the vsync start pos relative to
- * vblank start
- */
- vsync_pos = (vblank_width - vsync_width) / 2;
- adjusted_mode->crtc_vsync_start =
- adjusted_mode->crtc_vblank_start +
- vsync_pos;
- /* keep the vsync width constant */
- adjusted_mode->crtc_vsync_end =
- adjusted_mode->crtc_vsync_start +
- vsync_width;
- border = 1;
- } else {
- /* Aspects match, Let hw scale both directions */
- pfit_control |= (VERT_AUTO_SCALE |
- HORIZ_AUTO_SCALE |
+ if (scaled_width > scaled_height) { /* pillar */
+ centre_horizontally(adjusted_mode, scaled_height / mode->vdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->vdisplay != adjusted_mode->vdisplay) {
+ u32 bits = panel_fitter_scaling(mode->vdisplay, adjusted_mode->vdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else if (scaled_width < scaled_height) { /* letter */
+ centre_vertically(adjusted_mode, scaled_width / mode->hdisplay);
+
+ border = LVDS_BORDER_ENABLE;
+ if (mode->hdisplay != adjusted_mode->hdisplay) {
+ u32 bits = panel_fitter_scaling(mode->hdisplay, adjusted_mode->hdisplay);
+ pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT |
+ bits << PFIT_VERT_SCALE_SHIFT);
+ pfit_control |= (PFIT_ENABLE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_INTERP_BILINEAR);
+ }
+ } else
+ /* Aspects match, Let hw scale both directions */
+ pfit_control |= (PFIT_ENABLE |
+ VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
- }
- horiz_bits = (1 << bits) * horiz_ratio /
- PANEL_RATIO_FACTOR;
- vert_bits = (1 << bits) * vert_ratio /
- PANEL_RATIO_FACTOR;
- pfit_pgm_ratios =
- ((vert_bits << PFIT_VERT_SCALE_SHIFT) &
- PFIT_VERT_SCALE_MASK) |
- ((horiz_bits << PFIT_HORIZ_SCALE_SHIFT) &
- PFIT_HORIZ_SCALE_MASK);
}
break;
@@ -458,6 +372,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
VERT_INTERP_BILINEAR |
HORIZ_INTERP_BILINEAR);
break;
+
default:
break;
}
@@ -465,14 +380,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
out:
lvds_priv->pfit_control = pfit_control;
lvds_priv->pfit_pgm_ratios = pfit_pgm_ratios;
- /*
- * When there exists the border, it means that the LVDS_BORDR
- * should be enabled.
- */
- if (border)
- dev_priv->lvds_border_bits |= LVDS_BORDER_ENABLE;
- else
- dev_priv->lvds_border_bits &= ~(LVDS_BORDER_ENABLE);
+ dev_priv->lvds_border_bits = border;
+
/*
* XXX: It would be nice to support lower refresh rates on the
* panels to reduce power consumption, and perhaps match the
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index d7ad5139d17c..d39aea24eabe 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -65,7 +65,7 @@
#define OCMD_YUV_410_PLANAR (0xe<<10) /* also 411 */
#define OCMD_TVSYNCFLIP_PARITY (0x1<<9)
#define OCMD_TVSYNCFLIP_ENABLE (0x1<<7)
-#define OCMD_BUF_TYPE_MASK (Ox1<<5)
+#define OCMD_BUF_TYPE_MASK (0x1<<5)
#define OCMD_BUF_TYPE_FRAME (0x0<<5)
#define OCMD_BUF_TYPE_FIELD (0x1<<5)
#define OCMD_TEST_MODE (0x1<<4)
@@ -185,7 +185,8 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
if (OVERLAY_NONPHYSICAL(overlay->dev)) {
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- overlay->reg_bo->gtt_offset);
+ overlay->reg_bo->gtt_offset,
+ KM_USER0);
if (!regs) {
DRM_ERROR("failed to map overlay regs in GTT\n");
@@ -200,7 +201,7 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
{
if (OVERLAY_NONPHYSICAL(overlay->dev))
- io_mapping_unmap_atomic(overlay->virt_addr);
+ io_mapping_unmap_atomic(overlay->virt_addr, KM_USER0);
overlay->virt_addr = NULL;
@@ -958,7 +959,7 @@ static int check_overlay_src(struct drm_device *dev,
|| rec->src_width < N_HORIZ_Y_TAPS*4)
return -EINVAL;
- /* check alingment constrains */
+ /* check alignment constraints */
switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
case I915_OVERLAY_RGB:
/* not implemented */
@@ -990,7 +991,10 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
/* stride checking */
- stride_mask = 63;
+ if (IS_I830(dev) || IS_845G(dev))
+ stride_mask = 255;
+ else
+ stride_mask = 63;
if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 76993ac16cc1..d9d4d51aa89e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -392,13 +392,13 @@ static void intel_sdvo_debug_write(struct intel_encoder *intel_encoder, u8 cmd,
DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
for (; i < 8; i++)
DRM_LOG_KMS(" ");
- for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
if (cmd == sdvo_cmd_names[i].cmd) {
DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
break;
}
}
- if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
+ if (i == ARRAY_SIZE(sdvo_cmd_names))
DRM_LOG_KMS("(%02X)", cmd);
DRM_LOG_KMS("\n");
}
@@ -1237,9 +1237,11 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (IS_I965G(dev)) {
- sdvox |= SDVO_BORDER_ENABLE |
- SDVO_VSYNC_ACTIVE_HIGH |
- SDVO_HSYNC_ACTIVE_HIGH;
+ sdvox |= SDVO_BORDER_ENABLE;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
sdvox |= I915_READ(sdvo_priv->sdvo_reg);
switch (sdvo_priv->sdvo_reg) {
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 6d553c29d106..cc3726a4a1cb 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -476,7 +476,7 @@ static const struct tv_mode tv_modes[] = {
.vi_end_f1 = 20, .vi_end_f2 = 21,
.nbr_end = 240,
- .burst_ena = 8,
+ .burst_ena = true,
.hburst_start = 72, .hburst_len = 34,
.vburst_start_f1 = 9, .vburst_end_f1 = 240,
.vburst_start_f2 = 10, .vburst_end_f2 = 240,
@@ -896,8 +896,6 @@ static const struct tv_mode tv_modes[] = {
},
};
-#define NUM_TV_MODES sizeof(tv_modes) / sizeof (tv_modes[0])
-
static void
intel_tv_dpms(struct drm_encoder *encoder, int mode)
{
@@ -1424,7 +1422,7 @@ intel_tv_get_modes(struct drm_connector *connector)
int j, count = 0;
u64 tmp;
- for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
+ for (j = 0; j < ARRAY_SIZE(input_res_table);
j++) {
struct input_res *input = &input_res_table[j];
unsigned int hactive_s = input->w;
@@ -1512,7 +1510,7 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
tv_priv->margin[TV_MARGIN_BOTTOM] = val;
changed = true;
} else if (property == dev->mode_config.tv_mode_property) {
- if (val >= NUM_TV_MODES) {
+ if (val >= ARRAY_SIZE(tv_modes)) {
ret = -EINVAL;
goto out;
}
@@ -1693,13 +1691,13 @@ intel_tv_init(struct drm_device *dev)
connector->doublescan_allowed = false;
/* Create TV properties then attach current values */
- tv_format_names = kmalloc(sizeof(char *) * NUM_TV_MODES,
+ tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
GFP_KERNEL);
if (!tv_format_names)
goto out;
- for (i = 0; i < NUM_TV_MODES; i++)
+ for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
tv_format_names[i] = tv_modes[i].name;
- drm_mode_create_tv_properties(dev, NUM_TV_MODES, tv_format_names);
+ drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
initial_mode);
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 3c917fb3a60b..08868ac3048a 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -52,7 +52,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);
* Engine control
*/
-int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
+int mga_do_wait_for_idle(drm_mga_private_t *dev_priv)
{
u32 status = 0;
int i;
@@ -74,7 +74,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
return -EBUSY;
}
-static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
+static int mga_do_dma_reset(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
@@ -102,7 +102,7 @@ static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
* Primary DMA stream
*/
-void mga_do_dma_flush(drm_mga_private_t * dev_priv)
+void mga_do_dma_flush(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
@@ -142,11 +142,10 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
head = MGA_READ(MGA_PRIMADDRESS);
- if (head <= tail) {
+ if (head <= tail)
primary->space = primary->size - primary->tail;
- } else {
+ else
primary->space = head - tail;
- }
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset));
@@ -158,7 +157,7 @@ void mga_do_dma_flush(drm_mga_private_t * dev_priv)
DRM_DEBUG("done.\n");
}
-void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
+void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
u32 head, tail;
@@ -181,11 +180,10 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
head = MGA_READ(MGA_PRIMADDRESS);
- if (head == dev_priv->primary->offset) {
+ if (head == dev_priv->primary->offset)
primary->space = primary->size;
- } else {
+ else
primary->space = head - dev_priv->primary->offset;
- }
DRM_DEBUG(" head = 0x%06lx\n", (unsigned long)(head - dev_priv->primary->offset));
DRM_DEBUG(" tail = 0x%06x\n", primary->tail);
@@ -199,7 +197,7 @@ void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv)
DRM_DEBUG("done.\n");
}
-void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
+void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv)
{
drm_mga_primary_buffer_t *primary = &dev_priv->prim;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -220,11 +218,11 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
* Freelist management
*/
-#define MGA_BUFFER_USED ~0
+#define MGA_BUFFER_USED (~0)
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
-static void mga_freelist_print(struct drm_device * dev)
+static void mga_freelist_print(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -245,7 +243,7 @@ static void mga_freelist_print(struct drm_device * dev)
}
#endif
-static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
+static int mga_freelist_init(struct drm_device *dev, drm_mga_private_t *dev_priv)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
@@ -288,7 +286,7 @@ static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_pr
return 0;
}
-static void mga_freelist_cleanup(struct drm_device * dev)
+static void mga_freelist_cleanup(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -308,7 +306,7 @@ static void mga_freelist_cleanup(struct drm_device * dev)
#if 0
/* FIXME: Still needed?
*/
-static void mga_freelist_reset(struct drm_device * dev)
+static void mga_freelist_reset(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf;
@@ -356,7 +354,7 @@ static struct drm_buf *mga_freelist_get(struct drm_device * dev)
return NULL;
}
-int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
+int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -391,7 +389,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
* DMA initialization, cleanup
*/
-int mga_driver_load(struct drm_device * dev, unsigned long flags)
+int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -405,8 +403,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
- dev_priv->mmio_base = drm_get_resource_start(dev, 1);
- dev_priv->mmio_size = drm_get_resource_len(dev, 1);
+ dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
+ dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
dev->counters += 3;
dev->types[6] = _DRM_STAT_IRQ;
@@ -439,8 +437,8 @@ int mga_driver_load(struct drm_device * dev, unsigned long flags)
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
-static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -481,11 +479,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
*/
if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
- if (mode.mode & 0x02) {
+ if (mode.mode & 0x02)
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
- } else {
+ else
MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
- }
}
/* Allocate and bind AGP memory. */
@@ -593,8 +590,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
return 0;
}
#else
-static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
return -EINVAL;
}
@@ -614,8 +611,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device * dev,
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
-static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_pci_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
drm_mga_private_t *const dev_priv =
(drm_mga_private_t *) dev->dev_private;
@@ -678,9 +675,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
req.size = dma_bs->secondary_bin_size;
err = drm_addbufs_pci(dev, &req);
- if (!err) {
+ if (!err)
break;
- }
}
if (bin_count == 0) {
@@ -704,8 +700,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
return 0;
}
-static int mga_do_dma_bootstrap(struct drm_device * dev,
- drm_mga_dma_bootstrap_t * dma_bs)
+static int mga_do_dma_bootstrap(struct drm_device *dev,
+ drm_mga_dma_bootstrap_t *dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
int err;
@@ -737,17 +733,15 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
* carve off portions of it for internal uses. The remaining memory
* is returned to user-mode to be used for AGP textures.
*/
- if (is_agp) {
+ if (is_agp)
err = mga_do_agp_dma_bootstrap(dev, dma_bs);
- }
/* If we attempted to initialize the card for AGP DMA but failed,
* clean-up any mess that may have been created.
*/
- if (err) {
+ if (err)
mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
- }
/* Not only do we want to try and initialized PCI cards for PCI DMA,
* but we also try to initialized AGP cards that could not be
@@ -757,9 +751,8 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,
* AGP memory, etc.
*/
- if (!is_agp || err) {
+ if (!is_agp || err)
err = mga_do_pci_dma_bootstrap(dev, dma_bs);
- }
return err;
}
@@ -792,7 +785,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,
return err;
}
-static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
+static int mga_do_init_dma(struct drm_device *dev, drm_mga_init_t *init)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -800,11 +793,10 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
dev_priv = dev->dev_private;
- if (init->sgram) {
+ if (init->sgram)
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
- } else {
+ else
dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
- }
dev_priv->maccess = init->maccess;
dev_priv->fb_cpp = init->fb_cpp;
@@ -975,9 +967,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
dev_priv->agp_handle = 0;
}
- if ((dev->agp != NULL) && dev->agp->acquired) {
+ if ((dev->agp != NULL) && dev->agp->acquired)
err = drm_agp_release(dev);
- }
#endif
}
@@ -998,9 +989,8 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
memset(dev_priv->warp_pipe_phys, 0,
sizeof(dev_priv->warp_pipe_phys));
- if (dev_priv->head != NULL) {
+ if (dev_priv->head != NULL)
mga_freelist_cleanup(dev);
- }
}
return err;
@@ -1017,9 +1007,8 @@ int mga_dma_init(struct drm_device *dev, void *data,
switch (init->func) {
case MGA_INIT_DMA:
err = mga_do_init_dma(dev, init);
- if (err) {
+ if (err)
(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
- }
return err;
case MGA_CLEANUP_DMA:
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
@@ -1047,9 +1036,8 @@ int mga_dma_flush(struct drm_device *dev, void *data,
WRAP_WAIT_WITH_RETURN(dev_priv);
- if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
+ if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL))
mga_do_dma_flush(dev_priv);
- }
if (lock->flags & _DRM_LOCK_QUIESCENT) {
#if MGA_DMA_DEBUG
@@ -1079,8 +1067,8 @@ int mga_dma_reset(struct drm_device *dev, void *data,
* DMA buffer management
*/
-static int mga_dma_get_buffers(struct drm_device * dev,
- struct drm_file *file_priv, struct drm_dma * d)
+static int mga_dma_get_buffers(struct drm_device *dev,
+ struct drm_file *file_priv, struct drm_dma *d)
{
struct drm_buf *buf;
int i;
@@ -1134,9 +1122,8 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
d->granted_count = 0;
- if (d->request_count) {
+ if (d->request_count)
ret = mga_dma_get_buffers(dev, file_priv, d);
- }
return ret;
}
@@ -1144,7 +1131,7 @@ int mga_dma_buffers(struct drm_device *dev, void *data,
/**
* Called just before the module is unloaded.
*/
-int mga_driver_unload(struct drm_device * dev)
+int mga_driver_unload(struct drm_device *dev)
{
kfree(dev->dev_private);
dev->dev_private = NULL;
@@ -1155,12 +1142,12 @@ int mga_driver_unload(struct drm_device * dev)
/**
* Called when the last opener of the device is closed.
*/
-void mga_driver_lastclose(struct drm_device * dev)
+void mga_driver_lastclose(struct drm_device *dev)
{
mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
-int mga_driver_dma_quiescent(struct drm_device * dev)
+int mga_driver_dma_quiescent(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle(dev_priv);
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index ddfe16197b59..26d0d8ced80d 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -36,7 +36,7 @@
#include "drm_pciids.h"
-static int mga_driver_device_is_agp(struct drm_device * dev);
+static int mga_driver_device_is_agp(struct drm_device *dev);
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
@@ -119,7 +119,7 @@ MODULE_LICENSE("GPL and additional rights");
* \returns
* If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
*/
-static int mga_driver_device_is_agp(struct drm_device * dev)
+static int mga_driver_device_is_agp(struct drm_device *dev)
{
const struct pci_dev *const pdev = dev->pdev;
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index be6c6b9b0e89..1084fa4d261b 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -164,59 +164,59 @@ extern int mga_dma_reset(struct drm_device *dev, void *data,
extern int mga_dma_buffers(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
-extern int mga_driver_unload(struct drm_device * dev);
-extern void mga_driver_lastclose(struct drm_device * dev);
-extern int mga_driver_dma_quiescent(struct drm_device * dev);
+extern int mga_driver_unload(struct drm_device *dev);
+extern void mga_driver_lastclose(struct drm_device *dev);
+extern int mga_driver_dma_quiescent(struct drm_device *dev);
-extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
+extern int mga_do_wait_for_idle(drm_mga_private_t *dev_priv);
-extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
-extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
-extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
+extern void mga_do_dma_flush(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_start(drm_mga_private_t *dev_priv);
+extern void mga_do_dma_wrap_end(drm_mga_private_t *dev_priv);
-extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
+extern int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf);
/* mga_warp.c */
-extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
-extern int mga_warp_init(drm_mga_private_t * dev_priv);
+extern int mga_warp_install_microcode(drm_mga_private_t *dev_priv);
+extern int mga_warp_init(drm_mga_private_t *dev_priv);
/* mga_irq.c */
extern int mga_enable_vblank(struct drm_device *dev, int crtc);
extern void mga_disable_vblank(struct drm_device *dev, int crtc);
extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
-extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
-extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+extern int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
-extern void mga_driver_irq_preinstall(struct drm_device * dev);
+extern void mga_driver_irq_preinstall(struct drm_device *dev);
extern int mga_driver_irq_postinstall(struct drm_device *dev);
-extern void mga_driver_irq_uninstall(struct drm_device * dev);
+extern void mga_driver_irq_uninstall(struct drm_device *dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
#if defined(__linux__) && defined(__alpha__)
-#define MGA_BASE( reg ) ((unsigned long)(dev_priv->mmio->handle))
-#define MGA_ADDR( reg ) (MGA_BASE(reg) + reg)
+#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
+#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
-#define MGA_DEREF( reg ) *(volatile u32 *)MGA_ADDR( reg )
-#define MGA_DEREF8( reg ) *(volatile u8 *)MGA_ADDR( reg )
+#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg))
+#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg))
-#define MGA_READ( reg ) (_MGA_READ((u32 *)MGA_ADDR(reg)))
-#define MGA_READ8( reg ) (_MGA_READ((u8 *)MGA_ADDR(reg)))
-#define MGA_WRITE( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF( reg ) = val; } while (0)
-#define MGA_WRITE8( reg, val ) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8( reg ) = val; } while (0)
+#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg)))
+#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg)))
+#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
+#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
-static inline u32 _MGA_READ(u32 * addr)
+static inline u32 _MGA_READ(u32 *addr)
{
DRM_MEMORYBARRIER();
return *(volatile u32 *)addr;
}
#else
-#define MGA_READ8( reg ) DRM_READ8(dev_priv->mmio, (reg))
-#define MGA_READ( reg ) DRM_READ32(dev_priv->mmio, (reg))
-#define MGA_WRITE8( reg, val ) DRM_WRITE8(dev_priv->mmio, (reg), (val))
-#define MGA_WRITE( reg, val ) DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
+#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
+#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
+#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
#endif
#define DWGREG0 0x1c00
@@ -233,40 +233,39 @@ static inline u32 _MGA_READ(u32 * addr)
* Helper macross...
*/
-#define MGA_EMIT_STATE( dev_priv, dirty ) \
+#define MGA_EMIT_STATE(dev_priv, dirty) \
do { \
- if ( (dirty) & ~MGA_UPLOAD_CLIPRECTS ) { \
- if ( dev_priv->chipset >= MGA_CARD_TYPE_G400 ) { \
- mga_g400_emit_state( dev_priv ); \
- } else { \
- mga_g200_emit_state( dev_priv ); \
- } \
+ if ((dirty) & ~MGA_UPLOAD_CLIPRECTS) { \
+ if (dev_priv->chipset >= MGA_CARD_TYPE_G400) \
+ mga_g400_emit_state(dev_priv); \
+ else \
+ mga_g200_emit_state(dev_priv); \
} \
} while (0)
-#define WRAP_TEST_WITH_RETURN( dev_priv ) \
+#define WRAP_TEST_WITH_RETURN(dev_priv) \
do { \
- if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( mga_is_idle( dev_priv ) ) { \
- mga_do_dma_wrap_end( dev_priv ); \
- } else if ( dev_priv->prim.space < \
- dev_priv->prim.high_mark ) { \
- if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "wrap...\n"); \
- return -EBUSY; \
+ if (test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (mga_is_idle(dev_priv)) { \
+ mga_do_dma_wrap_end(dev_priv); \
+ } else if (dev_priv->prim.space < \
+ dev_priv->prim.high_mark) { \
+ if (MGA_DMA_DEBUG) \
+ DRM_INFO("wrap...\n"); \
+ return -EBUSY; \
} \
} \
} while (0)
-#define WRAP_WAIT_WITH_RETURN( dev_priv ) \
+#define WRAP_WAIT_WITH_RETURN(dev_priv) \
do { \
- if ( test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
- if ( MGA_DMA_DEBUG ) \
- DRM_INFO( "wrap...\n"); \
- return -EBUSY; \
+ if (test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (mga_do_wait_for_idle(dev_priv) < 0) { \
+ if (MGA_DMA_DEBUG) \
+ DRM_INFO("wrap...\n"); \
+ return -EBUSY; \
} \
- mga_do_dma_wrap_end( dev_priv ); \
+ mga_do_dma_wrap_end(dev_priv); \
} \
} while (0)
@@ -280,12 +279,12 @@ do { \
#define DMA_BLOCK_SIZE (5 * sizeof(u32))
-#define BEGIN_DMA( n ) \
+#define BEGIN_DMA(n) \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA( %d )\n", (n) ); \
- DRM_INFO( " space=0x%x req=0x%Zx\n", \
- dev_priv->prim.space, (n) * DMA_BLOCK_SIZE ); \
+ if (MGA_VERBOSE) { \
+ DRM_INFO("BEGIN_DMA(%d)\n", (n)); \
+ DRM_INFO(" space=0x%x req=0x%Zx\n", \
+ dev_priv->prim.space, (n) * DMA_BLOCK_SIZE); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
@@ -293,9 +292,9 @@ do { \
#define BEGIN_DMA_WRAP() \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "BEGIN_DMA()\n" ); \
- DRM_INFO( " space=0x%x\n", dev_priv->prim.space ); \
+ if (MGA_VERBOSE) { \
+ DRM_INFO("BEGIN_DMA()\n"); \
+ DRM_INFO(" space=0x%x\n", dev_priv->prim.space); \
} \
prim = dev_priv->prim.start; \
write = dev_priv->prim.tail; \
@@ -304,72 +303,68 @@ do { \
#define ADVANCE_DMA() \
do { \
dev_priv->prim.tail = write; \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( "ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
- write, dev_priv->prim.space ); \
- } \
+ if (MGA_VERBOSE) \
+ DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
+ write, dev_priv->prim.space); \
} while (0)
#define FLUSH_DMA() \
do { \
- if ( 0 ) { \
- DRM_INFO( "\n" ); \
- DRM_INFO( " tail=0x%06x head=0x%06lx\n", \
- dev_priv->prim.tail, \
- (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
- dev_priv->primary->offset)); \
+ if (0) { \
+ DRM_INFO("\n"); \
+ DRM_INFO(" tail=0x%06x head=0x%06lx\n", \
+ dev_priv->prim.tail, \
+ (unsigned long)(MGA_READ(MGA_PRIMADDRESS) - \
+ dev_priv->primary->offset)); \
} \
- if ( !test_bit( 0, &dev_priv->prim.wrapped ) ) { \
- if ( dev_priv->prim.space < \
- dev_priv->prim.high_mark ) { \
- mga_do_dma_wrap_start( dev_priv ); \
- } else { \
- mga_do_dma_flush( dev_priv ); \
- } \
+ if (!test_bit(0, &dev_priv->prim.wrapped)) { \
+ if (dev_priv->prim.space < dev_priv->prim.high_mark) \
+ mga_do_dma_wrap_start(dev_priv); \
+ else \
+ mga_do_dma_flush(dev_priv); \
} \
} while (0)
/* Never use this, always use DMA_BLOCK(...) for primary DMA output.
*/
-#define DMA_WRITE( offset, val ) \
+#define DMA_WRITE(offset, val) \
do { \
- if ( MGA_VERBOSE ) { \
- DRM_INFO( " DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
- (u32)(val), write + (offset) * sizeof(u32) ); \
- } \
+ if (MGA_VERBOSE) \
+ DRM_INFO(" DMA_WRITE( 0x%08x ) at 0x%04Zx\n", \
+ (u32)(val), write + (offset) * sizeof(u32)); \
*(volatile u32 *)(prim + write + (offset) * sizeof(u32)) = val; \
} while (0)
-#define DMA_BLOCK( reg0, val0, reg1, val1, reg2, val2, reg3, val3 ) \
+#define DMA_BLOCK(reg0, val0, reg1, val1, reg2, val2, reg3, val3) \
do { \
- DMA_WRITE( 0, ((DMAREG( reg0 ) << 0) | \
- (DMAREG( reg1 ) << 8) | \
- (DMAREG( reg2 ) << 16) | \
- (DMAREG( reg3 ) << 24)) ); \
- DMA_WRITE( 1, val0 ); \
- DMA_WRITE( 2, val1 ); \
- DMA_WRITE( 3, val2 ); \
- DMA_WRITE( 4, val3 ); \
+ DMA_WRITE(0, ((DMAREG(reg0) << 0) | \
+ (DMAREG(reg1) << 8) | \
+ (DMAREG(reg2) << 16) | \
+ (DMAREG(reg3) << 24))); \
+ DMA_WRITE(1, val0); \
+ DMA_WRITE(2, val1); \
+ DMA_WRITE(3, val2); \
+ DMA_WRITE(4, val3); \
write += DMA_BLOCK_SIZE; \
} while (0)
/* Buffer aging via primary DMA stream head pointer.
*/
-#define SET_AGE( age, h, w ) \
+#define SET_AGE(age, h, w) \
do { \
(age)->head = h; \
(age)->wrap = w; \
} while (0)
-#define TEST_AGE( age, h, w ) ( (age)->wrap < w || \
- ( (age)->wrap == w && \
- (age)->head < h ) )
+#define TEST_AGE(age, h, w) ((age)->wrap < w || \
+ ((age)->wrap == w && \
+ (age)->head < h))
-#define AGE_BUFFER( buf_priv ) \
+#define AGE_BUFFER(buf_priv) \
do { \
drm_mga_freelist_t *entry = (buf_priv)->list_entry; \
- if ( (buf_priv)->dispatched ) { \
+ if ((buf_priv)->dispatched) { \
entry->age.head = (dev_priv->prim.tail + \
dev_priv->primary->offset); \
entry->age.wrap = dev_priv->sarea_priv->last_wrap; \
@@ -681,7 +676,7 @@ do { \
/* Simple idle test.
*/
-static __inline__ int mga_is_idle(drm_mga_private_t * dev_priv)
+static __inline__ int mga_is_idle(drm_mga_private_t *dev_priv)
{
u32 status = MGA_READ(MGA_STATUS) & MGA_ENGINE_IDLE_MASK;
return (status == MGA_ENDPRDMASTS);
diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
index daa6041a483a..2581202297e4 100644
--- a/drivers/gpu/drm/mga/mga_irq.c
+++ b/drivers/gpu/drm/mga/mga_irq.c
@@ -76,9 +76,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
/* In addition to clearing the interrupt-pending bit, we
* have to write to MGA_PRIMEND to re-start the DMA operation.
*/
- if ((prim_start & ~0x03) != (prim_end & ~0x03)) {
+ if ((prim_start & ~0x03) != (prim_end & ~0x03))
MGA_WRITE(MGA_PRIMEND, prim_end);
- }
atomic_inc(&dev_priv->last_fence_retired);
DRM_WAKEUP(&dev_priv->fence_queue);
@@ -120,7 +119,7 @@ void mga_disable_vblank(struct drm_device *dev, int crtc)
/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
}
-int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
+int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
@@ -139,7 +138,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
return ret;
}
-void mga_driver_irq_preinstall(struct drm_device * dev)
+void mga_driver_irq_preinstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -162,7 +161,7 @@ int mga_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void mga_driver_irq_uninstall(struct drm_device * dev)
+void mga_driver_irq_uninstall(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index a53b848e0f17..fff82045c427 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -41,8 +41,8 @@
* DMA hardware state programming functions
*/
-static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
- struct drm_clip_rect * box)
+static void mga_emit_clip_rect(drm_mga_private_t *dev_priv,
+ struct drm_clip_rect *box)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -66,7 +66,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -89,7 +89,7 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -116,7 +116,7 @@ static __inline__ void mga_g400_emit_context(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -144,7 +144,7 @@ static __inline__ void mga_g200_emit_tex0(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
@@ -184,7 +184,7 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
@@ -223,7 +223,7 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->warp_pipe;
@@ -250,7 +250,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
+static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int pipe = sarea_priv->warp_pipe;
@@ -327,7 +327,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)
ADVANCE_DMA();
}
-static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
+static void mga_g200_emit_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -348,7 +348,7 @@ static void mga_g200_emit_state(drm_mga_private_t * dev_priv)
}
}
-static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
+static void mga_g400_emit_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -381,7 +381,7 @@ static void mga_g400_emit_state(drm_mga_private_t * dev_priv)
/* Disallow all write destinations except the front and backbuffer.
*/
-static int mga_verify_context(drm_mga_private_t * dev_priv)
+static int mga_verify_context(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -400,7 +400,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
/* Disallow texture reads from PCI space.
*/
-static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
+static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
@@ -417,7 +417,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
return 0;
}
-static int mga_verify_state(drm_mga_private_t * dev_priv)
+static int mga_verify_state(drm_mga_private_t *dev_priv)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -446,7 +446,7 @@ static int mga_verify_state(drm_mga_private_t * dev_priv)
return (ret == 0);
}
-static int mga_verify_iload(drm_mga_private_t * dev_priv,
+static int mga_verify_iload(drm_mga_private_t *dev_priv,
unsigned int dstorg, unsigned int length)
{
if (dstorg < dev_priv->texture_offset ||
@@ -465,7 +465,7 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
return 0;
}
-static int mga_verify_blit(drm_mga_private_t * dev_priv,
+static int mga_verify_blit(drm_mga_private_t *dev_priv,
unsigned int srcorg, unsigned int dstorg)
{
if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
@@ -480,7 +480,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
*
*/
-static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
+static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -568,7 +568,7 @@ static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * cl
FLUSH_DMA();
}
-static void mga_dma_dispatch_swap(struct drm_device * dev)
+static void mga_dma_dispatch_swap(struct drm_device *dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -622,7 +622,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)
DRM_DEBUG("... done.\n");
}
-static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
+static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -669,7 +669,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu
FLUSH_DMA();
}
-static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
+static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf,
unsigned int start, unsigned int end)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -718,7 +718,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b
/* This copies a 64 byte aligned agp region to the frambuffer with a
* standard blit, the ioctl needs to do checking.
*/
-static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
+static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf,
unsigned int dstorg, unsigned int length)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -766,7 +766,7 @@ static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf
FLUSH_DMA();
}
-static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
+static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -801,9 +801,8 @@ static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit
int w = pbox[i].x2 - pbox[i].x1 - 1;
int start;
- if (blit->ydir == -1) {
+ if (blit->ydir == -1)
srcy = blit->height - srcy - 1;
- }
start = srcy * blit->src_pitch + srcx;
diff --git a/drivers/gpu/drm/mga/mga_warp.c b/drivers/gpu/drm/mga/mga_warp.c
index 9aad4847afdf..f172bd5c257f 100644
--- a/drivers/gpu/drm/mga/mga_warp.c
+++ b/drivers/gpu/drm/mga/mga_warp.c
@@ -46,7 +46,7 @@ MODULE_FIRMWARE(FIRMWARE_G400);
#define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN)
-int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
+int mga_warp_install_microcode(drm_mga_private_t *dev_priv)
{
unsigned char *vcbase = dev_priv->warp->handle;
unsigned long pcbase = dev_priv->warp->offset;
@@ -133,7 +133,7 @@ out:
#define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE)
-int mga_warp_init(drm_mga_private_t * dev_priv)
+int mga_warp_init(drm_mga_private_t *dev_priv)
{
u32 wmisc;
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 1175429da102..d2d28048efb2 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -1,6 +1,6 @@
config DRM_NOUVEAU
tristate "Nouveau (nVidia) cards"
- depends on DRM
+ depends on DRM && PCI
select FW_LOADER
select DRM_KMS_HELPER
select DRM_TTM
@@ -41,4 +41,13 @@ config DRM_I2C_CH7006
This driver is currently only useful if you're also using
the nouveau driver.
+
+config DRM_I2C_SIL164
+ tristate "Silicon Image sil164 TMDS transmitter"
+ default m if DRM_NOUVEAU
+ help
+ Support for sil164 and similar single-link (or dual-link
+ when used in pairs) TMDS transmitters, used in some nVidia
+ video cards.
+
endmenu
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index acd31ed861ef..e9b06e4ef2a2 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,20 +9,20 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o nouveau_grctx.o \
+ nouveau_dp.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
- nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
+ nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
+ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o nvc0_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o \
+ nv40_graph.o nv50_graph.o nvc0_graph.o \
nv40_grctx.o nv50_grctx.o \
- nv04_instmem.o nv50_instmem.o \
+ nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
- nv17_gpio.o nv50_gpio.o \
+ nv10_gpio.o nv50_gpio.o \
nv50_calc.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index d4bcca8a5133..c17a055ee3e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -3,6 +3,7 @@
#include <linux/slab.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
+#include <acpi/video.h>
#include "drmP.h"
#include "drm.h"
@@ -11,6 +12,7 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nv50_display.h"
+#include "nouveau_connector.h"
#include <linux/vga_switcheroo.h>
@@ -42,7 +44,7 @@ static const char nouveau_dsm_muid[] = {
0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
};
-static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
+static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
@@ -259,3 +261,37 @@ int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
{
return nouveau_rom_call(nouveau_dsm_priv.rom_handle, bios, offset, len);
}
+
+int
+nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
+{
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct acpi_device *acpidev;
+ acpi_handle handle;
+ int type, ret;
+ void *edid;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ type = ACPI_VIDEO_DISPLAY_LCD;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ ret = acpi_bus_get_device(handle, &acpidev);
+ if (ret)
+ return -ENODEV;
+
+ ret = acpi_video_get_edid(acpidev, type, -1, &edid);
+ if (ret < 0)
+ return ret;
+
+ nv_connector->edid = edid;
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index e492919faf44..0b69a9628c95 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -28,6 +28,8 @@
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
+#include <linux/io-mapping.h>
+
/* these defines are made up */
#define NV_CIO_CRE_44_HEADA 0x0
#define NV_CIO_CRE_44_HEADB 0x3
@@ -209,20 +211,20 @@ static struct methods shadow_methods[] = {
{ "PCIROM", load_vbios_pci, true },
{ "ACPI", load_vbios_acpi, true },
};
+#define NUM_SHADOW_METHODS ARRAY_SIZE(shadow_methods)
static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
{
- const int nr_methods = ARRAY_SIZE(shadow_methods);
struct methods *methods = shadow_methods;
int testscore = 3;
- int scores[nr_methods], i;
+ int scores[NUM_SHADOW_METHODS], i;
if (nouveau_vbios) {
- for (i = 0; i < nr_methods; i++)
+ for (i = 0; i < NUM_SHADOW_METHODS; i++)
if (!strcasecmp(nouveau_vbios, methods[i].desc))
break;
- if (i < nr_methods) {
+ if (i < NUM_SHADOW_METHODS) {
NV_INFO(dev, "Attempting to use BIOS image from %s\n",
methods[i].desc);
@@ -234,7 +236,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
NV_ERROR(dev, "VBIOS source \'%s\' invalid\n", nouveau_vbios);
}
- for (i = 0; i < nr_methods; i++) {
+ for (i = 0; i < NUM_SHADOW_METHODS; i++) {
NV_TRACE(dev, "Attempting to load BIOS image from %s\n",
methods[i].desc);
data[0] = data[1] = 0; /* avoid reuse of previous image */
@@ -245,7 +247,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data)
}
while (--testscore > 0) {
- for (i = 0; i < nr_methods; i++) {
+ for (i = 0; i < NUM_SHADOW_METHODS; i++) {
if (scores[i] == testscore) {
NV_TRACE(dev, "Using BIOS image from %s\n",
methods[i].desc);
@@ -920,7 +922,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return -EINVAL;
+ return len;
}
configval = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1022,7 +1024,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return -EINVAL;
+ return len;
}
freq = ROM16(bios->data[offset + 12 + config * 2]);
@@ -1194,7 +1196,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
if (!dpe) {
NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
- return -EINVAL;
+ return 3;
}
switch (cond) {
@@ -1218,12 +1220,16 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
int ret;
auxch = nouveau_i2c_find(dev, bios->display.output->i2c_index);
- if (!auxch)
- return -ENODEV;
+ if (!auxch) {
+ NV_ERROR(dev, "0x%04X: couldn't get auxch\n", offset);
+ return 3;
+ }
ret = nouveau_dp_auxch(auxch, 9, 0xd, &cond, 1);
- if (ret)
- return ret;
+ if (ret) {
+ NV_ERROR(dev, "0x%04X: auxch rd fail: %d\n", offset, ret);
+ return 3;
+ }
if (cond & 1)
iexec->execute = false;
@@ -1392,7 +1398,7 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset,
NV_ERROR(bios->dev,
"0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n",
offset, config, count);
- return -EINVAL;
+ return len;
}
freq = ROM32(bios->data[offset + 11 + config * 4]);
@@ -1452,6 +1458,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* "mask n" and OR it with "data n" before writing it back to the device
*/
+ struct drm_device *dev = bios->dev;
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
@@ -1466,9 +1473,11 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
"Count: 0x%02X\n",
offset, i2c_index, i2c_address, count);
- chan = init_i2c_device_find(bios->dev, i2c_index);
- if (!chan)
- return -ENODEV;
+ chan = init_i2c_device_find(dev, i2c_index);
+ if (!chan) {
+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
uint8_t reg = bios->data[offset + 4 + i * 3];
@@ -1479,8 +1488,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_READ, reg,
I2C_SMBUS_BYTE_DATA, &val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ NV_ERROR(dev, "0x%04X: i2c rd fail: %d\n", offset, ret);
+ return len;
+ }
BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
"Mask: 0x%02X, Data: 0x%02X\n",
@@ -1494,8 +1505,10 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_WRITE, reg,
I2C_SMBUS_BYTE_DATA, &val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
+ return len;
+ }
}
return len;
@@ -1520,6 +1533,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* "DCB I2C table entry index", set the register to "data n"
*/
+ struct drm_device *dev = bios->dev;
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
@@ -1534,9 +1548,11 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
"Count: 0x%02X\n",
offset, i2c_index, i2c_address, count);
- chan = init_i2c_device_find(bios->dev, i2c_index);
- if (!chan)
- return -ENODEV;
+ chan = init_i2c_device_find(dev, i2c_index);
+ if (!chan) {
+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
uint8_t reg = bios->data[offset + 4 + i * 2];
@@ -1553,8 +1569,10 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
I2C_SMBUS_WRITE, reg,
I2C_SMBUS_BYTE_DATA, &val);
- if (ret < 0)
- return ret;
+ if (ret < 0) {
+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
+ return len;
+ }
}
return len;
@@ -1577,6 +1595,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* address" on the I2C bus given by "DCB I2C table entry index"
*/
+ struct drm_device *dev = bios->dev;
uint8_t i2c_index = bios->data[offset + 1];
uint8_t i2c_address = bios->data[offset + 2] >> 1;
uint8_t count = bios->data[offset + 3];
@@ -1584,7 +1603,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
struct nouveau_i2c_chan *chan;
struct i2c_msg msg;
uint8_t data[256];
- int i;
+ int ret, i;
if (!iexec->execute)
return len;
@@ -1593,9 +1612,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
"Count: 0x%02X\n",
offset, i2c_index, i2c_address, count);
- chan = init_i2c_device_find(bios->dev, i2c_index);
- if (!chan)
- return -ENODEV;
+ chan = init_i2c_device_find(dev, i2c_index);
+ if (!chan) {
+ NV_ERROR(dev, "0x%04X: i2c bus not found\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
data[i] = bios->data[offset + 4 + i];
@@ -1608,8 +1629,11 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
msg.flags = 0;
msg.len = count;
msg.buf = data;
- if (i2c_transfer(&chan->adapter, &msg, 1) != 1)
- return -EIO;
+ ret = i2c_transfer(&chan->adapter, &msg, 1);
+ if (ret != 1) {
+ NV_ERROR(dev, "0x%04X: i2c wr fail: %d\n", offset, ret);
+ return len;
+ }
}
return len;
@@ -1633,6 +1657,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* used -- see get_tmds_index_reg()
*/
+ struct drm_device *dev = bios->dev;
uint8_t mlv = bios->data[offset + 1];
uint32_t tmdsaddr = bios->data[offset + 2];
uint8_t mask = bios->data[offset + 3];
@@ -1647,8 +1672,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
offset, mlv, tmdsaddr, mask, data);
reg = get_tmds_index_reg(bios->dev, mlv);
- if (!reg)
- return -EINVAL;
+ if (!reg) {
+ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
+ return 5;
+ }
bios_wr32(bios, reg,
tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE);
@@ -1678,6 +1705,7 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
* register is used -- see get_tmds_index_reg()
*/
+ struct drm_device *dev = bios->dev;
uint8_t mlv = bios->data[offset + 1];
uint8_t count = bios->data[offset + 2];
int len = 3 + count * 2;
@@ -1691,8 +1719,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset,
offset, mlv, count);
reg = get_tmds_index_reg(bios->dev, mlv);
- if (!reg)
- return -EINVAL;
+ if (!reg) {
+ NV_ERROR(dev, "0x%04X: no tmds_index_reg\n", offset);
+ return len;
+ }
for (i = 0; i < count; i++) {
uint8_t tmdsaddr = bios->data[offset + 3 + i * 2];
@@ -1898,6 +1928,31 @@ init_condition_time(struct nvbios *bios, uint16_t offset,
}
static int
+init_ltime(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_LTIME opcode: 0x57 ('V')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (16 bit): time
+ *
+ * Sleep for "time" miliseconds.
+ */
+
+ unsigned time = ROM16(bios->data[offset + 1]);
+
+ if (!iexec->execute)
+ return 3;
+
+ BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X miliseconds\n",
+ offset, time);
+
+ msleep(time);
+
+ return 3;
+}
+
+static int
init_zm_reg_sequence(struct nvbios *bios, uint16_t offset,
struct init_exec *iexec)
{
@@ -1965,6 +2020,64 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
}
static int
+init_i2c_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_I2C_IF opcode: 0x5E ('^')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (8 bit): I2C register
+ * offset + 4 (8 bit): mask
+ * offset + 5 (8 bit): data
+ *
+ * Read the register given by "I2C register" on the device addressed
+ * by "I2C slave address" on the I2C bus given by "DCB I2C table
+ * entry index". Compare the result AND "mask" to "data".
+ * If they're not equal, skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
+ uint8_t reg = bios->data[offset + 3];
+ uint8_t mask = bios->data[offset + 4];
+ uint8_t data = bios->data[offset + 5];
+ struct nouveau_i2c_chan *chan;
+ union i2c_smbus_data val;
+ int ret;
+
+ /* no execute check by design */
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
+ offset, i2c_index, i2c_address);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return -ENODEV;
+
+ ret = i2c_smbus_xfer(&chan->adapter, i2c_address, 0,
+ I2C_SMBUS_READ, reg,
+ I2C_SMBUS_BYTE_DATA, &val);
+ if (ret < 0) {
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: [no device], "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reg, mask, data);
+ iexec->execute = 0;
+ return 6;
+ }
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reg, val.byte, mask, data);
+
+ iexec->execute = ((val.byte & mask) == data);
+
+ return 6;
+}
+
+static int
init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
/*
@@ -2039,6 +2152,325 @@ init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 5;
}
+static inline void
+bios_md32(struct nvbios *bios, uint32_t reg,
+ uint32_t mask, uint32_t val)
+{
+ bios_wr32(bios, reg, (bios_rd32(bios, reg) & ~mask) | val);
+}
+
+static uint32_t
+peek_fb(struct drm_device *dev, struct io_mapping *fb,
+ uint32_t off)
+{
+ uint32_t val = 0;
+
+ if (off < pci_resource_len(dev->pdev, 1)) {
+ uint32_t __iomem *p =
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+
+ val = ioread32(p + (off & ~PAGE_MASK));
+
+ io_mapping_unmap_atomic(p, KM_USER0);
+ }
+
+ return val;
+}
+
+static void
+poke_fb(struct drm_device *dev, struct io_mapping *fb,
+ uint32_t off, uint32_t val)
+{
+ if (off < pci_resource_len(dev->pdev, 1)) {
+ uint32_t __iomem *p =
+ io_mapping_map_atomic_wc(fb, off & PAGE_MASK, KM_USER0);
+
+ iowrite32(val, p + (off & ~PAGE_MASK));
+ wmb();
+
+ io_mapping_unmap_atomic(p, KM_USER0);
+ }
+}
+
+static inline bool
+read_back_fb(struct drm_device *dev, struct io_mapping *fb,
+ uint32_t off, uint32_t val)
+{
+ poke_fb(dev, fb, off, val);
+ return val == peek_fb(dev, fb, off);
+}
+
+static int
+nv04_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ uint32_t patt = 0xdeadbeef;
+ struct io_mapping *fb;
+ int i;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ /* Sequencer and refresh off */
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
+ bios_md32(bios, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF);
+
+ bios_md32(bios, NV04_PFB_BOOT_0, ~0,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_16MB |
+ NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+ NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT);
+
+ for (i = 0; i < 4; i++)
+ poke_fb(dev, fb, 4 * i, patt);
+
+ poke_fb(dev, fb, 0x400000, patt + 1);
+
+ if (peek_fb(dev, fb, 0) == patt + 1) {
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+ NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT);
+ bios_md32(bios, NV04_PFB_DEBUG_0,
+ NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+ for (i = 0; i < 4; i++)
+ poke_fb(dev, fb, 4 * i, patt);
+
+ if ((peek_fb(dev, fb, 0xc) & 0xffff) != (patt & 0xffff))
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+ } else if ((peek_fb(dev, fb, 0xc) & 0xffff0000) !=
+ (patt & 0xffff0000)) {
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_WIDTH_128 |
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+ } else if (peek_fb(dev, fb, 0) != patt) {
+ if (read_back_fb(dev, fb, 0x800000, patt))
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+ else
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE,
+ NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT);
+
+ } else if (!read_back_fb(dev, fb, 0x800000, patt)) {
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+ }
+
+ /* Refresh on, sequencer on */
+ bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
+
+ io_mapping_free(fb);
+ return 0;
+}
+
+static const uint8_t *
+nv05_memory_config(struct nvbios *bios)
+{
+ /* Defaults for BIOSes lacking a memory config table */
+ static const uint8_t default_config_tab[][2] = {
+ { 0x24, 0x00 },
+ { 0x28, 0x00 },
+ { 0x24, 0x01 },
+ { 0x1f, 0x00 },
+ { 0x0f, 0x00 },
+ { 0x17, 0x00 },
+ { 0x06, 0x00 },
+ { 0x00, 0x00 }
+ };
+ int i = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) &
+ NV_PEXTDEV_BOOT_0_RAMCFG) >> 2;
+
+ if (bios->legacy.mem_init_tbl_ptr)
+ return &bios->data[bios->legacy.mem_init_tbl_ptr + 2 * i];
+ else
+ return default_config_tab[i];
+}
+
+static int
+nv05_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ const uint8_t *ramcfg = nv05_memory_config(bios);
+ uint32_t patt = 0xdeadbeef;
+ struct io_mapping *fb;
+ int i, v;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ /* Sequencer off */
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) | 0x20);
+
+ if (bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_UMA_ENABLE)
+ goto out;
+
+ bios_md32(bios, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0);
+
+ /* If present load the hardcoded scrambling table */
+ if (bios->legacy.mem_init_tbl_ptr) {
+ uint32_t *scramble_tab = (uint32_t *)&bios->data[
+ bios->legacy.mem_init_tbl_ptr + 0x10];
+
+ for (i = 0; i < 8; i++)
+ bios_wr32(bios, NV04_PFB_SCRAMBLE(i),
+ ROM32(scramble_tab[i]));
+ }
+
+ /* Set memory type/width/length defaults depending on the straps */
+ bios_md32(bios, NV04_PFB_BOOT_0, 0x3f, ramcfg[0]);
+
+ if (ramcfg[1] & 0x80)
+ bios_md32(bios, NV04_PFB_CFG0, 0, NV04_PFB_CFG0_SCRAMBLE);
+
+ bios_md32(bios, NV04_PFB_CFG1, 0x700001, (ramcfg[1] & 1) << 20);
+ bios_md32(bios, NV04_PFB_CFG1, 0, 1);
+
+ /* Probe memory bus width */
+ for (i = 0; i < 4; i++)
+ poke_fb(dev, fb, 4 * i, patt);
+
+ if (peek_fb(dev, fb, 0xc) != patt)
+ bios_md32(bios, NV04_PFB_BOOT_0,
+ NV04_PFB_BOOT_0_RAM_WIDTH_128, 0);
+
+ /* Probe memory length */
+ v = bios_rd32(bios, NV04_PFB_BOOT_0) & NV04_PFB_BOOT_0_RAM_AMOUNT;
+
+ if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_32MB &&
+ (!read_back_fb(dev, fb, 0x1000000, ++patt) ||
+ !read_back_fb(dev, fb, 0, ++patt)))
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_16MB);
+
+ if (v == NV04_PFB_BOOT_0_RAM_AMOUNT_16MB &&
+ !read_back_fb(dev, fb, 0x800000, ++patt))
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_8MB);
+
+ if (!read_back_fb(dev, fb, 0x400000, ++patt))
+ bios_md32(bios, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT,
+ NV04_PFB_BOOT_0_RAM_AMOUNT_4MB);
+
+out:
+ /* Sequencer on */
+ NVWriteVgaSeq(dev, 0, 1, NVReadVgaSeq(dev, 0, 1) & ~0x20);
+
+ io_mapping_free(fb);
+ return 0;
+}
+
+static int
+nv10_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ const int mem_width[] = { 0x10, 0x00, 0x20 };
+ const int mem_width_count = (dev_priv->chipset >= 0x17 ? 3 : 2);
+ uint32_t patt = 0xdeadbeef;
+ struct io_mapping *fb;
+ int i, j, k;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+ /* Probe memory bus width */
+ for (i = 0; i < mem_width_count; i++) {
+ bios_md32(bios, NV04_PFB_CFG0, 0x30, mem_width[i]);
+
+ for (j = 0; j < 4; j++) {
+ for (k = 0; k < 4; k++)
+ poke_fb(dev, fb, 0x1c, 0);
+
+ poke_fb(dev, fb, 0x1c, patt);
+ poke_fb(dev, fb, 0x3c, 0);
+
+ if (peek_fb(dev, fb, 0x1c) == patt)
+ goto mem_width_found;
+ }
+ }
+
+mem_width_found:
+ patt <<= 1;
+
+ /* Probe amount of installed memory */
+ for (i = 0; i < 4; i++) {
+ int off = bios_rd32(bios, NV04_PFB_FIFO_DATA) - 0x100000;
+
+ poke_fb(dev, fb, off, patt);
+ poke_fb(dev, fb, 0, 0);
+
+ peek_fb(dev, fb, 0);
+ peek_fb(dev, fb, 0);
+ peek_fb(dev, fb, 0);
+ peek_fb(dev, fb, 0);
+
+ if (peek_fb(dev, fb, off) == patt)
+ goto amount_found;
+ }
+
+ /* IC missing - disable the upper half memory space. */
+ bios_md32(bios, NV04_PFB_CFG0, 0x1000, 0);
+
+amount_found:
+ io_mapping_free(fb);
+ return 0;
+}
+
+static int
+nv20_init_compute_mem(struct nvbios *bios)
+{
+ struct drm_device *dev = bios->dev;
+ struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ uint32_t mask = (dev_priv->chipset >= 0x25 ? 0x300 : 0x900);
+ uint32_t amount, off;
+ struct io_mapping *fb;
+
+ /* Map the framebuffer aperture */
+ fb = io_mapping_create_wc(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1));
+ if (!fb)
+ return -ENOMEM;
+
+ bios_wr32(bios, NV10_PFB_REFCTRL, NV10_PFB_REFCTRL_VALID_1);
+
+ /* Allow full addressing */
+ bios_md32(bios, NV04_PFB_CFG0, 0, mask);
+
+ amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
+ for (off = amount; off > 0x2000000; off -= 0x2000000)
+ poke_fb(dev, fb, off - 4, off);
+
+ amount = bios_rd32(bios, NV04_PFB_FIFO_DATA);
+ if (amount != peek_fb(dev, fb, amount - 4))
+ /* IC missing - disable the upper half memory space. */
+ bios_md32(bios, NV04_PFB_CFG0, mask, 0);
+
+ io_mapping_free(fb);
+ return 0;
+}
+
static int
init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
@@ -2047,64 +2479,57 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*
* offset (8 bit): opcode
*
- * This opcode is meant to set NV_PFB_CFG0 (0x100200) appropriately so
- * that the hardware can correctly calculate how much VRAM it has
- * (and subsequently report that value in NV_PFB_CSTATUS (0x10020C))
+ * This opcode is meant to set the PFB memory config registers
+ * appropriately so that we can correctly calculate how much VRAM it
+ * has (on nv10 and better chipsets the amount of installed VRAM is
+ * subsequently reported in NV_PFB_CSTATUS (0x10020C)).
*
- * The implementation of this opcode in general consists of two parts:
- * 1) determination of the memory bus width
- * 2) determination of how many of the card's RAM pads have ICs attached
+ * The implementation of this opcode in general consists of several
+ * parts:
*
- * 1) is done by a cunning combination of writes to offsets 0x1c and
- * 0x3c in the framebuffer, and seeing whether the written values are
- * read back correctly. This then affects bits 4-7 of NV_PFB_CFG0
+ * 1) Determination of memory type and density. Only necessary for
+ * really old chipsets, the memory type reported by the strap bits
+ * (0x101000) is assumed to be accurate on nv05 and newer.
*
- * 2) is done by a cunning combination of writes to an offset slightly
- * less than the maximum memory reported by NV_PFB_CSTATUS, then seeing
- * if the test pattern can be read back. This then affects bits 12-15 of
- * NV_PFB_CFG0
+ * 2) Determination of the memory bus width. Usually done by a cunning
+ * combination of writes to offsets 0x1c and 0x3c in the fb, and
+ * seeing whether the written values are read back correctly.
*
- * In this context a "cunning combination" may include multiple reads
- * and writes to varying locations, often alternating the test pattern
- * and 0, doubtless to make sure buffers are filled, residual charges
- * on tracks are removed etc.
+ * Only necessary on nv0x-nv1x and nv34, on the other cards we can
+ * trust the straps.
*
- * Unfortunately, the "cunning combination"s mentioned above, and the
- * changes to the bits in NV_PFB_CFG0 differ with nearly every bios
- * trace I have.
+ * 3) Determination of how many of the card's RAM pads have ICs
+ * attached, usually done by a cunning combination of writes to an
+ * offset slightly less than the maximum memory reported by
+ * NV_PFB_CSTATUS, then seeing if the test pattern can be read back.
*
- * Therefore, we cheat and assume the value of NV_PFB_CFG0 with which
- * we started was correct, and use that instead
+ * This appears to be a NOP on IGPs and NV4x or newer chipsets, both io
+ * logs of the VBIOS and kmmio traces of the binary driver POSTing the
+ * card show nothing being done for this opcode. Why is it still listed
+ * in the table?!
*/
/* no iexec->execute check by design */
- /*
- * This appears to be a NOP on G8x chipsets, both io logs of the VBIOS
- * and kmmio traces of the binary driver POSTing the card show nothing
- * being done for this opcode. why is it still listed in the table?!
- */
-
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ int ret;
- if (dev_priv->card_type >= NV_40)
- return 1;
-
- /*
- * On every card I've seen, this step gets done for us earlier in
- * the init scripts
- uint8_t crdata = bios_idxprt_rd(dev, NV_VIO_SRX, 0x01);
- bios_idxprt_wr(dev, NV_VIO_SRX, 0x01, crdata | 0x20);
- */
-
- /*
- * This also has probably been done in the scripts, but an mmio trace of
- * s3 resume shows nvidia doing it anyway (unlike the NV_VIO_SRX write)
- */
- bios_wr32(bios, NV_PFB_REFCTRL, NV_PFB_REFCTRL_VALID_1);
+ if (dev_priv->chipset >= 0x40 ||
+ dev_priv->chipset == 0x1a ||
+ dev_priv->chipset == 0x1f)
+ ret = 0;
+ else if (dev_priv->chipset >= 0x20 &&
+ dev_priv->chipset != 0x34)
+ ret = nv20_init_compute_mem(bios);
+ else if (dev_priv->chipset >= 0x10)
+ ret = nv10_init_compute_mem(bios);
+ else if (dev_priv->chipset >= 0x5)
+ ret = nv05_init_compute_mem(bios);
+ else
+ ret = nv04_init_compute_mem(bios);
- /* write back the saved configuration value */
- bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0);
+ if (ret)
+ return ret;
return 1;
}
@@ -2131,7 +2556,8 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
/* no iexec->execute check by design */
pci_nv_19 = bios_rd32(bios, NV_PBUS_PCI_NV_19);
- bios_wr32(bios, NV_PBUS_PCI_NV_19, 0);
+ bios_wr32(bios, NV_PBUS_PCI_NV_19, pci_nv_19 & ~0xf00);
+
bios_wr32(bios, reg, value1);
udelay(10);
@@ -2167,7 +2593,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
uint32_t reg, data;
if (bios->major_version > 2)
- return -ENODEV;
+ return 0;
bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd(
bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20);
@@ -2180,14 +2606,14 @@ init_configure_mem(struct nvbios *bios, uint16_t offset,
reg = ROM32(bios->data[seqtbloffs += 4])) {
switch (reg) {
- case NV_PFB_PRE:
- data = NV_PFB_PRE_CMD_PRECHARGE;
+ case NV04_PFB_PRE:
+ data = NV04_PFB_PRE_CMD_PRECHARGE;
break;
- case NV_PFB_PAD:
- data = NV_PFB_PAD_CKE_NORMAL;
+ case NV04_PFB_PAD:
+ data = NV04_PFB_PAD_CKE_NORMAL;
break;
- case NV_PFB_REF:
- data = NV_PFB_REF_CMD_REFRESH;
+ case NV04_PFB_REF:
+ data = NV04_PFB_REF_CMD_REFRESH;
break;
default:
data = ROM32(bios->data[meminitdata]);
@@ -2222,7 +2648,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset,
int clock;
if (bios->major_version > 2)
- return -ENODEV;
+ return 0;
clock = ROM16(bios->data[meminitoffs + 4]) * 10;
setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock);
@@ -2252,10 +2678,10 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset,
/* no iexec->execute check by design */
uint32_t straps = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
- uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6));
+ uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & 0x40) >> 6;
if (bios->major_version > 2)
- return -ENODEV;
+ return 0;
bios_idxprt_wr(bios, NV_CIO_CRX__COLOR,
NV_CIO_CRE_SCRATCH4__INDEX, cr3c);
@@ -2389,7 +2815,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
* offset + 1 (8 bit): mask
* offset + 2 (8 bit): cmpval
*
- * Test if (NV_PFB_BOOT_0 & "mask") equals "cmpval".
+ * Test if (NV04_PFB_BOOT_0 & "mask") equals "cmpval".
* If condition not met skip subsequent opcodes until condition is
* inverted (INIT_NOT), or we hit INIT_RESUME
*/
@@ -2401,7 +2827,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset,
if (!iexec->execute)
return 3;
- data = bios_rd32(bios, NV_PFB_BOOT_0) & mask;
+ data = bios_rd32(bios, NV04_PFB_BOOT_0) & mask;
BIOSLOG(bios, "0x%04X: Checking if 0x%08X equals 0x%08X\n",
offset, data, cmpval);
@@ -2795,12 +3221,13 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
*/
struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
int i;
- if (dev_priv->card_type != NV_50) {
+ if (dev_priv->card_type < NV_50) {
NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
- return -ENODEV;
+ return 1;
}
if (!iexec->execute)
@@ -2815,7 +3242,7 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
offset, gpio->tag, gpio->state_default);
if (bios->execute)
- nv50_gpio_set(bios->dev, gpio->tag, gpio->state_default);
+ pgpio->set(bios->dev, gpio->tag, gpio->state_default);
/* The NVIDIA binary driver doesn't appear to actually do
* any of this, my VBIOS does however.
@@ -2872,10 +3299,7 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
uint8_t index;
int i;
-
- if (!iexec->execute)
- return len;
-
+ /* critical! to know the length of the opcode */;
if (!blocklen) {
NV_ERROR(bios->dev,
"0x%04X: Zero block length - has the M table "
@@ -2883,6 +3307,9 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset,
return -EINVAL;
}
+ if (!iexec->execute)
+ return len;
+
strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf;
index = bios->data[bios->ram_restrict_tbl_ptr + strap_ramcfg];
@@ -3064,14 +3491,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_AUXCH: no active output\n");
- return -EINVAL;
+ return len;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return -ENODEV;
+ return len;
}
if (!iexec->execute)
@@ -3084,7 +3511,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret);
- return ret;
+ return len;
}
data &= bios->data[offset + 0];
@@ -3093,7 +3520,7 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1);
if (ret) {
NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret);
- return ret;
+ return len;
}
}
@@ -3123,14 +3550,14 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
if (!bios->display.output) {
NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n");
- return -EINVAL;
+ return len;
}
auxch = init_i2c_device_find(dev, bios->display.output->i2c_index);
if (!auxch) {
NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n",
bios->display.output->i2c_index);
- return -ENODEV;
+ return len;
}
if (!iexec->execute)
@@ -3141,13 +3568,76 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1);
if (ret) {
NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret);
- return ret;
+ return len;
}
}
return len;
}
+static int
+init_i2c_long_if(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
+{
+ /*
+ * INIT_I2C_LONG_IF opcode: 0x9A ('')
+ *
+ * offset (8 bit): opcode
+ * offset + 1 (8 bit): DCB I2C table entry index
+ * offset + 2 (8 bit): I2C slave address
+ * offset + 3 (16 bit): I2C register
+ * offset + 5 (8 bit): mask
+ * offset + 6 (8 bit): data
+ *
+ * Read the register given by "I2C register" on the device addressed
+ * by "I2C slave address" on the I2C bus given by "DCB I2C table
+ * entry index". Compare the result AND "mask" to "data".
+ * If they're not equal, skip subsequent opcodes until condition is
+ * inverted (INIT_NOT), or we hit INIT_RESUME
+ */
+
+ uint8_t i2c_index = bios->data[offset + 1];
+ uint8_t i2c_address = bios->data[offset + 2] >> 1;
+ uint8_t reglo = bios->data[offset + 3];
+ uint8_t reghi = bios->data[offset + 4];
+ uint8_t mask = bios->data[offset + 5];
+ uint8_t data = bios->data[offset + 6];
+ struct nouveau_i2c_chan *chan;
+ uint8_t buf0[2] = { reghi, reglo };
+ uint8_t buf1[1];
+ struct i2c_msg msg[2] = {
+ { i2c_address, 0, 1, buf0 },
+ { i2c_address, I2C_M_RD, 1, buf1 },
+ };
+ int ret;
+
+ /* no execute check by design */
+
+ BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X\n",
+ offset, i2c_index, i2c_address);
+
+ chan = init_i2c_device_find(bios->dev, i2c_index);
+ if (!chan)
+ return -ENODEV;
+
+
+ ret = i2c_transfer(&chan->adapter, msg, 2);
+ if (ret < 0) {
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: [no device], "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reghi, reglo, mask, data);
+ iexec->execute = 0;
+ return 7;
+ }
+
+ BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X:0x%02X, Value: 0x%02X, "
+ "Mask: 0x%02X, Data: 0x%02X\n",
+ offset, reghi, reglo, buf1[0], mask, data);
+
+ iexec->execute = ((buf1[0] & mask) == data);
+
+ return 7;
+}
+
static struct init_tbl_entry itbl_entry[] = {
/* command name , id , length , offset , mult , command handler */
/* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */
@@ -3174,9 +3664,11 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_ZM_CR" , 0x53, init_zm_cr },
{ "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group },
{ "INIT_CONDITION_TIME" , 0x56, init_condition_time },
+ { "INIT_LTIME" , 0x57, init_ltime },
{ "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence },
/* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */
{ "INIT_SUB_DIRECT" , 0x5B, init_sub_direct },
+ { "INIT_I2C_IF" , 0x5E, init_i2c_if },
{ "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg },
{ "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io },
{ "INIT_COMPUTE_MEM" , 0x63, init_compute_mem },
@@ -3210,6 +3702,7 @@ static struct init_tbl_entry itbl_entry[] = {
{ "INIT_97" , 0x97, init_97 },
{ "INIT_AUXCH" , 0x98, init_auxch },
{ "INIT_ZM_AUXCH" , 0x99, init_zm_auxch },
+ { "INIT_I2C_LONG_IF" , 0x9A, init_i2c_long_if },
{ NULL , 0 , NULL }
};
@@ -4068,7 +4561,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
bios->display.script_table_ptr,
table[2], table[3], table[0] >= 0x21);
if (!otable) {
- NV_ERROR(dev, "Couldn't find matching output script table\n");
+ NV_DEBUG_KMS(dev, "failed to match any output table\n");
return 1;
}
@@ -4125,7 +4618,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
if (script)
script = clkcmptable(bios, script, pxclk);
if (!script) {
- NV_ERROR(dev, "clock script 0 not found\n");
+ NV_DEBUG_KMS(dev, "clock script 0 not found\n");
return 1;
}
@@ -4484,7 +4977,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
pll_lim->min_p = record[12];
pll_lim->max_p = record[13];
/* where did this go to?? */
- if (limit_match == 0x00614100 || limit_match == 0x00614900)
+ if ((entry[0] & 0xf0) == 0x80)
pll_lim->refclk = 27000;
else
pll_lim->refclk = 100000;
@@ -5151,10 +5644,14 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+ if (bios->data[legacy_i2c_offset + 4])
+ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ if (bios->data[legacy_i2c_offset + 5])
+ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ if (bios->data[legacy_i2c_offset + 6])
+ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ if (bios->data[legacy_i2c_offset + 7])
+ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -5506,7 +6003,7 @@ static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
entry->i2c_index = i2c;
entry->heads = heads;
entry->location = DCB_LOC_ON_CHIP;
- /* "or" mostly unused in early gen crt modesetting, 0 is fine */
+ entry->or = 1;
}
static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
@@ -5589,9 +6086,12 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
if (conf & 0x4 || conf & 0x8)
entry->lvdsconf.use_power_scripts = true;
} else {
- mask = ~0x5;
+ mask = ~0x7;
+ if (conf & 0x2)
+ entry->lvdsconf.use_acpi_for_edid = true;
if (conf & 0x4)
entry->lvdsconf.use_power_scripts = true;
+ entry->lvdsconf.sor.link = (conf & 0x00000030) >> 4;
}
if (conf & mask) {
/*
@@ -5631,7 +6131,13 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
}
break;
case OUTPUT_TMDS:
- entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ if (dcb->version >= 0x40)
+ entry->tmdsconf.sor.link = (conf & 0x00000030) >> 4;
+ else if (dcb->version >= 0x30)
+ entry->tmdsconf.slave_addr = (conf & 0x00000700) >> 8;
+ else if (dcb->version >= 0x22)
+ entry->tmdsconf.slave_addr = (conf & 0x00000070) >> 4;
+
break;
case 0xe:
/* weird g80 mobile type that "nv" treats as a terminator */
@@ -5706,13 +6212,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
case OUTPUT_TV:
entry->tvconf.has_component_output = false;
break;
- case OUTPUT_TMDS:
- /*
- * Invent a DVI-A output, by copying the fields of the DVI-D
- * output; reported to work by math_b on an NV20(!).
- */
- fabricate_vga_output(dcb, entry->i2c_index, entry->heads);
- break;
case OUTPUT_LVDS:
if ((conn & 0x00003f00) != 0x10)
entry->lvdsconf.use_straps_for_mode = true;
@@ -5793,6 +6292,31 @@ void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
dcb->entries = newentries;
}
+static bool
+apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
+{
+ /* Dell Precision M6300
+ * DCB entry 2: 02025312 00000010
+ * DCB entry 3: 02026312 00000020
+ *
+ * Identical, except apparently a different connector on a
+ * different SOR link. Not a clue how we're supposed to know
+ * which one is in use if it even shares an i2c line...
+ *
+ * Ignore the connector on the second SOR link to prevent
+ * nasty problems until this is sorted (assuming it's not a
+ * VBIOS bug).
+ */
+ if ((dev->pdev->device == 0x040d) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x019b)) {
+ if (*conn == 0x02026312 && *conf == 0x00000020)
+ return false;
+ }
+
+ return true;
+}
+
static int
parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
@@ -5903,6 +6427,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
dcb->i2c_table = &bios->data[i2ctabptr];
if (dcb->version >= 0x30)
dcb->i2c_default_indices = dcb->i2c_table[4];
+
+ /*
+ * Parse the "management" I2C bus, used for hardware
+ * monitoring and some external TMDS transmitters.
+ */
+ if (dcb->version >= 0x22) {
+ int idx = (dcb->version >= 0x40 ?
+ dcb->i2c_default_indices & 0xf :
+ 2);
+
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ idx, &dcb->i2c[idx]);
+ }
}
if (entries > DCB_MAX_NUM_ENTRIES)
@@ -5926,6 +6463,9 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
if ((connection & 0x0000000f) == 0x0000000f)
continue;
+ if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
+ continue;
+
NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
dcb->entries, connection, config);
@@ -6181,9 +6721,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
struct nvbios *bios = &dev_priv->vbios;
int i, ret = 0;
- NVLockVgaCrtcs(dev, false);
- if (nv_two_heads(dev))
- NVSetOwner(dev, bios->state.crtchead);
+ /* Reset the BIOS head to 0. */
+ bios->state.crtchead = 0;
if (bios->major_version < 5) /* BMP only */
load_nv17_hw_sequencer_ucode(dev, bios);
@@ -6216,8 +6755,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
}
}
- NVLockVgaCrtcs(dev, true);
-
return ret;
}
@@ -6238,7 +6775,6 @@ static bool
nouveau_bios_posted(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- bool was_locked;
unsigned htotal;
if (dev_priv->chipset >= NV_50) {
@@ -6248,13 +6784,12 @@ nouveau_bios_posted(struct drm_device *dev)
return true;
}
- was_locked = NVLockVgaCrtcs(dev, false);
htotal = NVReadVgaCrtc(dev, 0, 0x06);
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x01) << 8;
htotal |= (NVReadVgaCrtc(dev, 0, 0x07) & 0x20) << 4;
htotal |= (NVReadVgaCrtc(dev, 0, 0x25) & 0x01) << 10;
htotal |= (NVReadVgaCrtc(dev, 0, 0x41) & 0x01) << 11;
- NVLockVgaCrtcs(dev, was_locked);
+
return (htotal != 0);
}
@@ -6263,8 +6798,6 @@ nouveau_bios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvbios *bios = &dev_priv->vbios;
- uint32_t saved_nv_pextdev_boot_0;
- bool was_locked;
int ret;
if (!NVInitVBIOS(dev))
@@ -6284,40 +6817,27 @@ nouveau_bios_init(struct drm_device *dev)
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
- /* these will need remembering across a suspend */
- saved_nv_pextdev_boot_0 = bios_rd32(bios, NV_PEXTDEV_BOOT_0);
- bios->state.saved_nv_pfb_cfg0 = bios_rd32(bios, NV_PFB_CFG0);
-
/* init script execution disabled */
bios->execute = false;
/* ... unless card isn't POSTed already */
if (!nouveau_bios_posted(dev)) {
- NV_INFO(dev, "Adaptor not initialised\n");
- if (dev_priv->card_type < NV_40) {
- NV_ERROR(dev, "Unable to POST this chipset\n");
- return -ENODEV;
- }
-
- NV_INFO(dev, "Running VBIOS init tables\n");
+ NV_INFO(dev, "Adaptor not initialised, "
+ "running VBIOS init tables.\n");
bios->execute = true;
}
- bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
-
ret = nouveau_run_vbios_init(dev);
if (ret)
return ret;
/* feature_byte on BMP is poor, but init always sets CR4B */
- was_locked = NVLockVgaCrtcs(dev, false);
if (bios->major_version < 5)
bios->is_mobile = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_4B) & 0x40;
/* all BIT systems need p_f_m_t for digital_min_front_porch */
if (bios->is_mobile || bios->major_version >= 5)
ret = parse_fp_mode_table(dev, bios);
- NVLockVgaCrtcs(dev, was_locked);
/* allow subsequent scripts to execute */
bios->execute = true;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index adf4ec2d06c0..fd14dfd3d780 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -81,6 +81,7 @@ struct dcb_connector_table_entry {
enum dcb_connector_type type;
uint8_t index2;
uint8_t gpio_tag;
+ void *drm;
};
struct dcb_connector_table {
@@ -117,6 +118,7 @@ struct dcb_entry {
struct {
struct sor_conf sor;
bool use_straps_for_mode;
+ bool use_acpi_for_edid;
bool use_power_scripts;
} lvdsconf;
struct {
@@ -129,6 +131,7 @@ struct dcb_entry {
} dpconf;
struct {
struct sor_conf sor;
+ int slave_addr;
} tmdsconf;
};
bool i2c_upper_default;
@@ -249,8 +252,6 @@ struct nvbios {
struct {
int crtchead;
- /* these need remembering across suspend */
- uint32_t saved_nv_pfb_cfg0;
} state;
struct {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 6f3c19522377..84f85183d041 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -51,9 +51,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
if (nvbo->tile)
nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
- spin_lock(&dev_priv->ttm.bo_list_lock);
- list_del(&nvbo->head);
- spin_unlock(&dev_priv->ttm.bo_list_lock);
kfree(nvbo);
}
@@ -166,9 +163,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
}
nvbo->channel = NULL;
- spin_lock(&dev_priv->ttm.bo_list_lock);
- list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
- spin_unlock(&dev_priv->ttm.bo_list_lock);
*pnvbo = nvbo;
return 0;
}
@@ -461,9 +455,9 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
return ret;
ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
- evict, no_wait_reserve, no_wait_gpu, new_mem);
- if (nvbo->channel && nvbo->channel != chan)
- ret = nouveau_fence_wait(fence, NULL, false, false);
+ evict || (nvbo->channel &&
+ nvbo->channel != chan),
+ no_wait_reserve, no_wait_gpu, new_mem);
nouveau_fence_unref((void *)&fence);
return ret;
}
@@ -711,8 +705,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
return ret;
/* Software copy if the card isn't up and running yet. */
- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
- !dev_priv->channel) {
+ if (!dev_priv->channel) {
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
goto out;
}
@@ -783,7 +776,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
- mem->bus.base = drm_get_resource_start(dev, 1);
+ mem->bus.base = pci_resource_start(dev->pdev, 1);
mem->bus.is_iomem = true;
break;
default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index 88f9bc0941eb..ca85da784846 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -200,7 +200,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
struct nv_sim_state sim_data;
int MClk = nouveau_hw_get_clock(dev, MPLL);
int NVClk = nouveau_hw_get_clock(dev, NVPLL);
- uint32_t cfg1 = nvReadFB(dev, NV_PFB_CFG1);
+ uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
sim_data.pclk_khz = VClk;
sim_data.mclk_khz = MClk;
@@ -218,7 +218,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
sim_data.mem_latency = 3;
sim_data.mem_page_miss = 10;
} else {
- sim_data.memory_type = nvReadFB(dev, NV_PFB_CFG0) & 0x1;
+ sim_data.memory_type = nvReadFB(dev, NV04_PFB_CFG0) & 0x1;
sim_data.memory_width = (nvReadEXTDEV(dev, NV_PEXTDEV_BOOT_0) & 0x10) ? 128 : 64;
sim_data.mem_latency = cfg1 & 0xf;
sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 1fc57ef58295..90fdcda332be 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -62,7 +62,8 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
* VRAM.
*/
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
- drm_get_resource_start(dev, 1),
+ pci_resource_start(dev->pdev,
+ 1),
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
@@ -257,9 +258,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
nouveau_debugfs_channel_fini(chan);
/* Give outstanding push buffers a chance to complete */
- spin_lock_irqsave(&chan->fence.lock, flags);
nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
if (chan->fence.sequence != chan->fence.sequence_ack) {
struct nouveau_fence *fence = NULL;
@@ -368,8 +367,6 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
if (dev_priv->engine.graph.accel_blocked)
return -ENODEV;
@@ -418,7 +415,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
struct drm_nouveau_channel_free *cfree = data;
struct nouveau_channel *chan;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
nouveau_channel_free(chan);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 149ed224c3cb..b1b22baf1428 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -37,12 +37,6 @@
#include "nouveau_connector.h"
#include "nouveau_hw.h"
-static inline struct drm_encoder_slave_funcs *
-get_slave_funcs(struct nouveau_encoder *enc)
-{
- return to_encoder_slave(to_drm_encoder(enc))->slave_funcs;
-}
-
static struct nouveau_encoder *
find_encoder_by_type(struct drm_connector *connector, int type)
{
@@ -102,63 +96,15 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
kfree(drm_connector);
}
-static void
-nouveau_connector_ddc_prepare(struct drm_connector *connector, int *flags)
-{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
-
- if (dev_priv->card_type >= NV_50)
- return;
-
- *flags = 0;
- if (NVLockVgaCrtcs(dev_priv->dev, false))
- *flags |= 1;
- if (nv_heads_tied(dev_priv->dev))
- *flags |= 2;
-
- if (*flags & 2)
- NVSetOwner(dev_priv->dev, 0); /* necessary? */
-}
-
-static void
-nouveau_connector_ddc_finish(struct drm_connector *connector, int flags)
-{
- struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
-
- if (dev_priv->card_type >= NV_50)
- return;
-
- if (flags & 2)
- NVSetOwner(dev_priv->dev, 4);
- if (flags & 1)
- NVLockVgaCrtcs(dev_priv->dev, true);
-}
-
static struct nouveau_i2c_chan *
nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
- uint8_t out_buf[] = { 0x0, 0x0}, buf[2];
- int ret, flags, i;
-
- struct i2c_msg msgs[] = {
- {
- .addr = 0x50,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = 0x50,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
+ int i;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
- struct nouveau_i2c_chan *i2c = NULL;
+ struct nouveau_i2c_chan *i2c;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
@@ -171,17 +117,9 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
+ i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (nv_encoder->dcb->i2c_index < 0xf)
- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
- if (!i2c)
- continue;
-
- nouveau_connector_ddc_prepare(connector, &flags);
- ret = i2c_transfer(&i2c->adapter, msgs, 2);
- nouveau_connector_ddc_finish(connector, flags);
-
- if (ret == 2) {
+ if (i2c && nouveau_probe_i2c_addr(i2c, 0x50)) {
*pnv_encoder = nv_encoder;
return i2c;
}
@@ -234,21 +172,7 @@ nouveau_connector_detect(struct drm_connector *connector)
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_i2c_chan *i2c;
- int type, flags;
-
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
- if (nv_encoder && nv_connector->native_mode) {
- unsigned status = connector_status_connected;
-
-#if defined(CONFIG_ACPI_BUTTON) || \
- (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
- if (!nouveau_ignorelid && !acpi_lid_open())
- status = connector_status_unknown;
-#endif
- nouveau_connector_set_encoder(connector, nv_encoder);
- return status;
- }
+ int type;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
@@ -259,9 +183,7 @@ nouveau_connector_detect(struct drm_connector *connector)
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
- nouveau_connector_ddc_prepare(connector, &flags);
nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
- nouveau_connector_ddc_finish(connector, flags);
drm_mode_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
@@ -321,6 +243,85 @@ detect_analog:
return connector_status_disconnected;
}
+static enum drm_connector_status
+nouveau_connector_detect_lvds(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = NULL;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ /* Cleanup the previous EDID block. */
+ if (nv_connector->edid) {
+ drm_mode_connector_update_edid_property(connector, NULL);
+ kfree(nv_connector->edid);
+ nv_connector->edid = NULL;
+ }
+
+ nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
+ if (!nv_encoder)
+ return connector_status_disconnected;
+
+ /* Try retrieving EDID via DDC */
+ if (!dev_priv->vbios.fp_no_ddc) {
+ status = nouveau_connector_detect(connector);
+ if (status == connector_status_connected)
+ goto out;
+ }
+
+ /* On some laptops (Sony, i'm looking at you) there appears to
+ * be no direct way of accessing the panel's EDID. The only
+ * option available to us appears to be to ask ACPI for help..
+ *
+ * It's important this check's before trying straps, one of the
+ * said manufacturer's laptops are configured in such a way
+ * the nouveau decides an entry in the VBIOS FP mode table is
+ * valid - it's not (rh#613284)
+ */
+ if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
+ if (!nouveau_acpi_edid(dev, connector)) {
+ status = connector_status_connected;
+ goto out;
+ }
+ }
+
+ /* If no EDID found above, and the VBIOS indicates a hardcoded
+ * modeline is avalilable for the panel, set it as the panel's
+ * native mode and exit.
+ */
+ if (nouveau_bios_fp_mode(dev, NULL) && (dev_priv->vbios.fp_no_ddc ||
+ nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
+ status = connector_status_connected;
+ goto out;
+ }
+
+ /* Still nothing, some VBIOS images have a hardcoded EDID block
+ * stored for the panel stored in them.
+ */
+ if (!dev_priv->vbios.fp_no_ddc) {
+ struct edid *edid =
+ (struct edid *)nouveau_bios_embedded_edid(dev);
+ if (edid) {
+ nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ *(nv_connector->edid) = *edid;
+ status = connector_status_connected;
+ }
+ }
+
+out:
+#if defined(CONFIG_ACPI_BUTTON) || \
+ (defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
+ if (status == connector_status_connected &&
+ !nouveau_ignorelid && !acpi_lid_open())
+ status = connector_status_unknown;
+#endif
+
+ drm_mode_connector_update_edid_property(connector, nv_connector->edid);
+ nouveau_connector_set_encoder(connector, nv_encoder);
+ return status;
+}
+
static void
nouveau_connector_force(struct drm_connector *connector)
{
@@ -353,6 +354,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
int ret;
@@ -425,8 +427,8 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
- return get_slave_funcs(nv_encoder)->
- set_property(to_drm_encoder(nv_encoder), connector, property, value);
+ return get_slave_funcs(encoder)->set_property(
+ encoder, connector, property, value);
return -EINVAL;
}
@@ -441,7 +443,8 @@ nouveau_connector_native_mode(struct drm_connector *connector)
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
- if (helper->mode_valid(connector, mode) != MODE_OK)
+ if (helper->mode_valid(connector, mode) != MODE_OK ||
+ (mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
/* Use preferred mode if there is one.. */
@@ -534,21 +537,28 @@ static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret = 0;
- /* If we're not LVDS, destroy the previous native mode, the attached
- * monitor could have changed.
+ /* destroy the native mode, the attached monitor could have changed.
*/
- if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
- nv_connector->native_mode) {
+ if (nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
}
if (nv_connector->edid)
ret = drm_add_edid_modes(connector, nv_connector->edid);
+ else
+ if (nv_encoder->dcb->type == OUTPUT_LVDS &&
+ (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
+ dev_priv->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
+ nv_connector->native_mode = drm_mode_create(dev);
+ nouveau_bios_fp_mode(dev, nv_connector->native_mode);
+ }
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
@@ -566,10 +576,10 @@ nouveau_connector_get_modes(struct drm_connector *connector)
}
if (nv_encoder->dcb->type == OUTPUT_TV)
- ret = get_slave_funcs(nv_encoder)->
- get_modes(to_drm_encoder(nv_encoder), connector);
+ ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
- if (nv_encoder->dcb->type == OUTPUT_LVDS)
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
+ nv_connector->dcb->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -582,6 +592,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
unsigned clock = mode->clock;
@@ -608,8 +619,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
max_clock = 350000;
break;
case OUTPUT_TV:
- return get_slave_funcs(nv_encoder)->
- mode_valid(to_drm_encoder(nv_encoder), mode);
+ return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case OUTPUT_DP:
if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
max_clock = nv_encoder->dp.link_nr * 270000;
@@ -643,6 +653,44 @@ nouveau_connector_best_encoder(struct drm_connector *connector)
return NULL;
}
+void
+nouveau_connector_set_polling(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ bool spare_crtc = false;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+ spare_crtc |= !crtc->enabled;
+
+ connector->polled = 0;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_TV:
+ if (dev_priv->card_type >= NV_50 ||
+ (nv_gf4_disp_arch(dev) && spare_crtc))
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ break;
+
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (dev_priv->card_type >= NV_50)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (connector->connector_type == DRM_MODE_CONNECTOR_DVID ||
+ spare_crtc)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ break;
+
+ default:
+ break;
+ }
+}
+
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
@@ -662,148 +710,74 @@ nouveau_connector_funcs = {
.force = nouveau_connector_force
};
-static int
-nouveau_connector_create_lvds(struct drm_device *dev,
- struct drm_connector *connector)
-{
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *i2c = NULL;
- struct nouveau_encoder *nv_encoder;
- struct drm_display_mode native, *mode, *temp;
- bool dummy, if_is_24bit = false;
- int ret, flags;
-
- nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
- if (!nv_encoder)
- return -ENODEV;
-
- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &if_is_24bit);
- if (ret) {
- NV_ERROR(dev, "Error parsing LVDS table, disabling LVDS\n");
- return ret;
- }
- nv_connector->use_dithering = !if_is_24bit;
-
- /* Firstly try getting EDID over DDC, if allowed and I2C channel
- * is available.
- */
- if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
- i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
-
- if (i2c) {
- nouveau_connector_ddc_prepare(connector, &flags);
- nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
- nouveau_connector_ddc_finish(connector, flags);
- }
-
- /* If no EDID found above, and the VBIOS indicates a hardcoded
- * modeline is avalilable for the panel, set it as the panel's
- * native mode and exit.
- */
- if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
- (nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
- dev_priv->vbios.fp_no_ddc)) {
- nv_connector->native_mode = drm_mode_duplicate(dev, &native);
- goto out;
- }
-
- /* Still nothing, some VBIOS images have a hardcoded EDID block
- * stored for the panel stored in them.
- */
- if (!nv_connector->edid && !nv_connector->native_mode &&
- !dev_priv->vbios.fp_no_ddc) {
- struct edid *edid =
- (struct edid *)nouveau_bios_embedded_edid(dev);
- if (edid) {
- nv_connector->edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
- *(nv_connector->edid) = *edid;
- }
- }
-
- if (!nv_connector->edid)
- goto out;
-
- /* We didn't find/use a panel mode from the VBIOS, so parse the EDID
- * block and look for the preferred mode there.
- */
- ret = drm_add_edid_modes(connector, nv_connector->edid);
- if (ret == 0)
- goto out;
- nv_connector->detected_encoder = nv_encoder;
- nv_connector->native_mode = nouveau_connector_native_mode(connector);
- list_for_each_entry_safe(mode, temp, &connector->probed_modes, head)
- drm_mode_remove(connector, mode);
-
-out:
- if (!nv_connector->native_mode) {
- NV_ERROR(dev, "LVDS present in DCB table, but couldn't "
- "determine its native mode. Disabling.\n");
- return -ENODEV;
- }
-
- drm_mode_connector_update_edid_property(connector, nv_connector->edid);
- return 0;
-}
+static const struct drm_connector_funcs
+nouveau_connector_funcs_lvds = {
+ .dpms = drm_helper_connector_dpms,
+ .save = NULL,
+ .restore = NULL,
+ .detect = nouveau_connector_detect_lvds,
+ .destroy = nouveau_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = nouveau_connector_set_property,
+ .force = nouveau_connector_force
+};
-int
-nouveau_connector_create(struct drm_device *dev,
- struct dcb_connector_table_entry *dcb)
+struct drm_connector *
+nouveau_connector_create(struct drm_device *dev, int index)
{
+ const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
+ struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
- struct drm_encoder *encoder;
- int ret, type;
+ int type, ret = 0;
NV_DEBUG_KMS(dev, "\n");
+ if (index >= dev_priv->vbios.dcb.connector.entries)
+ return ERR_PTR(-EINVAL);
+
+ dcb = &dev_priv->vbios.dcb.connector.entry[index];
+ if (dcb->drm)
+ return dcb->drm;
+
switch (dcb->type) {
- case DCB_CONNECTOR_NONE:
- return 0;
case DCB_CONNECTOR_VGA:
- NV_INFO(dev, "Detected a VGA connector\n");
type = DRM_MODE_CONNECTOR_VGA;
break;
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
case DCB_CONNECTOR_TV_3:
- NV_INFO(dev, "Detected a TV connector\n");
type = DRM_MODE_CONNECTOR_TV;
break;
case DCB_CONNECTOR_DVI_I:
- NV_INFO(dev, "Detected a DVI-I connector\n");
type = DRM_MODE_CONNECTOR_DVII;
break;
case DCB_CONNECTOR_DVI_D:
- NV_INFO(dev, "Detected a DVI-D connector\n");
type = DRM_MODE_CONNECTOR_DVID;
break;
case DCB_CONNECTOR_HDMI_0:
case DCB_CONNECTOR_HDMI_1:
- NV_INFO(dev, "Detected a HDMI connector\n");
type = DRM_MODE_CONNECTOR_HDMIA;
break;
case DCB_CONNECTOR_LVDS:
- NV_INFO(dev, "Detected a LVDS connector\n");
type = DRM_MODE_CONNECTOR_LVDS;
+ funcs = &nouveau_connector_funcs_lvds;
break;
case DCB_CONNECTOR_DP:
- NV_INFO(dev, "Detected a DisplayPort connector\n");
type = DRM_MODE_CONNECTOR_DisplayPort;
break;
case DCB_CONNECTOR_eDP:
- NV_INFO(dev, "Detected an eDP connector\n");
type = DRM_MODE_CONNECTOR_eDP;
break;
default:
NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
nv_connector->dcb = dcb;
connector = &nv_connector->base;
@@ -811,27 +785,21 @@ nouveau_connector_create(struct drm_device *dev,
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
+ drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
- /* attach encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb->connector != dcb->index)
- continue;
-
- if (get_slave_funcs(nv_encoder))
- get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+ /* Check if we need dithering enabled */
+ if (dcb->type == DCB_CONNECTOR_LVDS) {
+ bool dummy, is_24bit = false;
- drm_mode_connector_attach_encoder(connector, encoder);
- }
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
+ if (ret) {
+ NV_ERROR(dev, "Error parsing LVDS table, disabling "
+ "LVDS\n");
+ goto fail;
+ }
- if (!connector->encoder_ids[0]) {
- NV_WARN(dev, " no encoders, ignoring\n");
- drm_connector_cleanup(connector);
- kfree(connector);
- return 0;
+ nv_connector->use_dithering = !is_24bit;
}
/* Init DVI-I specific properties */
@@ -841,12 +809,8 @@ nouveau_connector_create(struct drm_device *dev,
drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
}
- if (dcb->type != DCB_CONNECTOR_LVDS)
- nv_connector->use_dithering = false;
-
switch (dcb->type) {
case DCB_CONNECTOR_VGA:
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
@@ -858,17 +822,6 @@ nouveau_connector_create(struct drm_device *dev,
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
- case DCB_CONNECTOR_DP:
- case DCB_CONNECTOR_eDP:
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- case DCB_CONNECTOR_DVI_I:
- case DCB_CONNECTOR_DVI_D:
- if (dev_priv->card_type >= NV_50)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- else
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
- /* fall-through */
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
@@ -882,15 +835,15 @@ nouveau_connector_create(struct drm_device *dev,
break;
}
+ nouveau_connector_set_polling(connector);
+
drm_sysfs_connector_add(connector);
+ dcb->drm = connector;
+ return dcb->drm;
- if (dcb->type == DCB_CONNECTOR_LVDS) {
- ret = nouveau_connector_create_lvds(dev, connector);
- if (ret) {
- connector->funcs->destroy(connector);
- return ret;
- }
- }
+fail:
+ drm_connector_cleanup(connector);
+ kfree(connector);
+ return ERR_PTR(ret);
- return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 4ef38abc2d9c..0d2e668ccfe5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,7 +49,10 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
-int nouveau_connector_create(struct drm_device *,
- struct dcb_connector_table_entry *);
+struct drm_connector *
+nouveau_connector_create(struct drm_device *, int index);
+
+void
+nouveau_connector_set_polling(struct drm_connector *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 74e6b4ed12c0..2e11fd65b4dd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -84,16 +84,16 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
if (!gem)
- return NULL;
+ return ERR_PTR(-ENOENT);
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!nouveau_fb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
if (ret) {
drm_gem_object_unreference(gem);
- return NULL;
+ return ERR_PTR(ret);
}
return &nouveau_fb->base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65c441a1999f..2e3c6caa97ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -92,11 +92,9 @@ nouveau_dma_init(struct nouveau_channel *chan)
return ret;
/* Map M2MF notifier object - fbcon. */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = nouveau_bo_map(chan->notifier_bo);
- if (ret)
- return ret;
- }
+ ret = nouveau_bo_map(chan->notifier_bo);
+ if (ret)
+ return ret;
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index deeb21c6865c..8a1b188b4cd1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -23,8 +23,10 @@
*/
#include "drmP.h"
+
#include "nouveau_drv.h"
#include "nouveau_i2c.h"
+#include "nouveau_connector.h"
#include "nouveau_encoder.h"
static int
@@ -270,13 +272,39 @@ bool
nouveau_dp_link_train(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- uint8_t config[4];
- uint8_t status[3];
+ struct nouveau_connector *nv_connector;
+ struct bit_displayport_encoder_table *dpe;
+ int dpe_headerlen;
+ uint8_t config[4], status[3];
bool cr_done, cr_max_vs, eq_done;
int ret = 0, i, tries, voltage;
NV_DEBUG_KMS(dev, "link training!!\n");
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!nv_connector)
+ return false;
+
+ dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
+ if (!dpe) {
+ NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
+ return false;
+ }
+
+ /* disable hotplug detect, this flips around on some panels during
+ * link training.
+ */
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+
+ if (dpe->script0) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
+ nv_encoder->dcb);
+ }
+
train:
cr_done = eq_done = false;
@@ -403,6 +431,15 @@ stop:
}
}
+ if (dpe->script1) {
+ NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
+ nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
+ nv_encoder->dcb);
+ }
+
+ /* re-enable hotplug detect */
+ pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+
return eq_done;
}
@@ -535,47 +572,64 @@ out:
return ret ? ret : (stat & NV50_AUXCH_STAT_REPLY);
}
-int
-nouveau_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
- uint8_t write_byte, uint8_t *read_byte)
+static int
+nouveau_dp_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
- struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adapter;
+ struct nouveau_i2c_chan *auxch = (struct nouveau_i2c_chan *)adap;
struct drm_device *dev = auxch->dev;
- int ret = 0, cmd, addr = algo_data->address;
- uint8_t *buf;
-
- if (mode == MODE_I2C_READ) {
- cmd = AUX_I2C_READ;
- buf = read_byte;
- } else {
- cmd = (mode & MODE_I2C_READ) ? AUX_I2C_READ : AUX_I2C_WRITE;
- buf = &write_byte;
- }
+ struct i2c_msg *msg = msgs;
+ int ret, mcnt = num;
- if (!(mode & MODE_I2C_STOP))
- cmd |= AUX_I2C_MOT;
+ while (mcnt--) {
+ u8 remaining = msg->len;
+ u8 *ptr = msg->buf;
- if (mode & MODE_I2C_START)
- return 1;
+ while (remaining) {
+ u8 cnt = (remaining > 16) ? 16 : remaining;
+ u8 cmd;
- for (;;) {
- ret = nouveau_dp_auxch(auxch, cmd, addr, buf, 1);
- if (ret < 0)
- return ret;
-
- switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
- case NV50_AUXCH_STAT_REPLY_I2C_ACK:
- return 1;
- case NV50_AUXCH_STAT_REPLY_I2C_NACK:
- return -EREMOTEIO;
- case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
- udelay(100);
- break;
- default:
- NV_ERROR(dev, "invalid auxch status: 0x%08x\n", ret);
- return -EREMOTEIO;
+ if (msg->flags & I2C_M_RD)
+ cmd = AUX_I2C_READ;
+ else
+ cmd = AUX_I2C_WRITE;
+
+ if (mcnt || remaining > 16)
+ cmd |= AUX_I2C_MOT;
+
+ ret = nouveau_dp_auxch(auxch, cmd, msg->addr, ptr, cnt);
+ if (ret < 0)
+ return ret;
+
+ switch (ret & NV50_AUXCH_STAT_REPLY_I2C) {
+ case NV50_AUXCH_STAT_REPLY_I2C_ACK:
+ break;
+ case NV50_AUXCH_STAT_REPLY_I2C_NACK:
+ return -EREMOTEIO;
+ case NV50_AUXCH_STAT_REPLY_I2C_DEFER:
+ udelay(100);
+ continue;
+ default:
+ NV_ERROR(dev, "bad auxch reply: 0x%08x\n", ret);
+ return -EREMOTEIO;
+ }
+
+ ptr += cnt;
+ remaining -= cnt;
}
+
+ msg++;
}
+
+ return num;
+}
+
+static u32
+nouveau_dp_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
+const struct i2c_algorithm nouveau_dp_i2c_algo = {
+ .master_xfer = nouveau_dp_i2c_xfer,
+ .functionality = nouveau_dp_i2c_func
+};
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 273770432298..1de5eb53e016 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -35,10 +35,6 @@
#include "drm_pciids.h"
-MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
-int nouveau_ctxfw = 0;
-module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
-
MODULE_PARM_DESC(noagp, "Disable AGP");
int nouveau_noagp;
module_param_named(noagp, nouveau_noagp, int, 0400);
@@ -56,7 +52,7 @@ int nouveau_vram_pushbuf;
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
MODULE_PARM_DESC(vram_notify, "Force DMA notifiers to be in VRAM");
-int nouveau_vram_notify = 1;
+int nouveau_vram_notify = 0;
module_param_named(vram_notify, nouveau_vram_notify, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (>=GeForce 8)");
@@ -132,7 +128,7 @@ static struct drm_driver driver;
static int __devinit
nouveau_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ return drm_get_pci_dev(pdev, ent, &driver);
}
static void
@@ -155,9 +151,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
struct drm_crtc *crtc;
int ret, i;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
@@ -257,9 +250,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
struct drm_crtc *crtc;
int ret, i;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return -ENODEV;
-
nouveau_fbcon_save_disable_accel(dev);
NV_INFO(dev, "We're back, enabling device...\n");
@@ -269,6 +259,13 @@ nouveau_pci_resume(struct pci_dev *pdev)
return -1;
pci_set_master(dev->pdev);
+ /* Make sure the AGP controller is in a consistent state */
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
+ nouveau_mem_reset_agp(dev);
+
+ /* Make the CRTCs accessible */
+ engine->display.early_init(dev);
+
NV_INFO(dev, "POSTing device...\n");
ret = nouveau_run_vbios_init(dev);
if (ret)
@@ -323,7 +320,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int ret;
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
if (!ret)
@@ -332,11 +328,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
NV_ERROR(dev, "Could not pin/map cursor.\n");
}
- if (dev_priv->card_type < NV_50) {
- nv04_display_restore(dev);
- NVLockVgaCrtcs(dev, false);
- } else
- nv50_display_init(dev);
+ engine->display.init(dev);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
@@ -371,7 +363,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
+ DRIVER_MODESET,
.load = nouveau_load,
.firstopen = nouveau_firstopen,
.lastclose = nouveau_lastclose,
@@ -438,16 +431,18 @@ static int __init nouveau_init(void)
nouveau_modeset = 1;
}
- if (nouveau_modeset == 1) {
- driver.driver_features |= DRIVER_MODESET;
- nouveau_register_dsm_handler();
- }
+ if (!nouveau_modeset)
+ return 0;
+ nouveau_register_dsm_handler();
return drm_init(&driver);
}
static void __exit nouveau_exit(void)
{
+ if (!nouveau_modeset)
+ return;
+
drm_exit(&driver);
nouveau_unregister_dsm_handler();
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index c69719106489..e424bf74d706 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -123,14 +123,6 @@ nvbo_kmap_obj_iovirtual(struct nouveau_bo *nvbo)
return ioptr;
}
-struct mem_block {
- struct mem_block *next;
- struct mem_block *prev;
- uint64_t start;
- uint64_t size;
- struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
-};
-
enum nouveau_flags {
NV_NFORCE = 0x10000000,
NV_NFORCE2 = 0x20000000
@@ -149,7 +141,7 @@ struct nouveau_gpuobj {
struct list_head list;
struct nouveau_channel *im_channel;
- struct mem_block *im_pramin;
+ struct drm_mm_node *im_pramin;
struct nouveau_bo *im_backing;
uint32_t im_backing_start;
uint32_t *im_backing_suspend;
@@ -196,7 +188,7 @@ struct nouveau_channel {
struct list_head pending;
uint32_t sequence;
uint32_t sequence_ack;
- uint32_t last_sequence_irq;
+ atomic_t last_sequence_irq;
} fence;
/* DMA push buffer */
@@ -206,7 +198,7 @@ struct nouveau_channel {
/* Notifier memory */
struct nouveau_bo *notifier_bo;
- struct mem_block *notifier_heap;
+ struct drm_mm notifier_heap;
/* PFIFO context */
struct nouveau_gpuobj_ref *ramfc;
@@ -224,7 +216,7 @@ struct nouveau_channel {
/* Objects */
struct nouveau_gpuobj_ref *ramin; /* Private instmem */
- struct mem_block *ramin_heap; /* Private PRAMIN heap */
+ struct drm_mm ramin_heap; /* Private PRAMIN heap */
struct nouveau_gpuobj_ref *ramht; /* Hash table */
struct list_head ramht_refs; /* Objects referenced by RAMHT */
@@ -277,8 +269,7 @@ struct nouveau_instmem_engine {
void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
- void (*prepare_access)(struct drm_device *, bool write);
- void (*finish_access)(struct drm_device *);
+ void (*flush)(struct drm_device *);
};
struct nouveau_mc_engine {
@@ -303,10 +294,11 @@ struct nouveau_fb_engine {
};
struct nouveau_fifo_engine {
- void *priv;
-
int channels;
+ struct nouveau_gpuobj_ref *playlist[2];
+ int cur_playlist;
+
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -339,10 +331,11 @@ struct nouveau_pgraph_object_class {
struct nouveau_pgraph_engine {
struct nouveau_pgraph_object_class *grclass;
bool accel_blocked;
- void *ctxprog;
- void *ctxvals;
int grctx_size;
+ /* NV2x/NV3x context table (0x400780) */
+ struct nouveau_gpuobj_ref *ctx_table;
+
int (*init)(struct drm_device *);
void (*takedown)(struct drm_device *);
@@ -358,6 +351,24 @@ struct nouveau_pgraph_engine {
uint32_t size, uint32_t pitch);
};
+struct nouveau_display_engine {
+ int (*early_init)(struct drm_device *);
+ void (*late_takedown)(struct drm_device *);
+ int (*create)(struct drm_device *);
+ int (*init)(struct drm_device *);
+ void (*destroy)(struct drm_device *);
+};
+
+struct nouveau_gpio_engine {
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ int (*get)(struct drm_device *, enum dcb_gpio_tag);
+ int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
+
+ void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+};
+
struct nouveau_engine {
struct nouveau_instmem_engine instmem;
struct nouveau_mc_engine mc;
@@ -365,6 +376,8 @@ struct nouveau_engine {
struct nouveau_fb_engine fb;
struct nouveau_pgraph_engine graph;
struct nouveau_fifo_engine fifo;
+ struct nouveau_display_engine display;
+ struct nouveau_gpio_engine gpio;
};
struct nouveau_pll_vals {
@@ -397,7 +410,7 @@ enum nv04_fp_display_regs {
struct nv04_crtc_reg {
unsigned char MiscOutReg; /* */
- uint8_t CRTC[0x9f];
+ uint8_t CRTC[0xa0];
uint8_t CR58[0x10];
uint8_t Sequencer[5];
uint8_t Graphics[9];
@@ -496,15 +509,11 @@ enum nouveau_card_type {
NV_30 = 0x30,
NV_40 = 0x40,
NV_50 = 0x50,
+ NV_C0 = 0xc0,
};
struct drm_nouveau_private {
struct drm_device *dev;
- enum {
- NOUVEAU_CARD_INIT_DOWN,
- NOUVEAU_CARD_INIT_DONE,
- NOUVEAU_CARD_INIT_FAILED
- } init_state;
/* the card type, takes NV_* as values */
enum nouveau_card_type card_type;
@@ -525,16 +534,12 @@ struct drm_nouveau_private {
struct list_head vbl_waiting;
struct {
- struct ttm_global_reference mem_global_ref;
+ struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
- spinlock_t bo_list_lock;
- struct list_head bo_list;
atomic_t validate_sequence;
} ttm;
- struct fb_info *fbdev_info;
-
int fifo_alloc_count;
struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
@@ -595,11 +600,7 @@ struct drm_nouveau_private {
struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
int vm_vram_pt_nr;
- struct mem_block *ramin_heap;
-
- /* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
- uint32_t ctx_table_size;
- struct nouveau_gpuobj_ref *ctx_table;
+ struct drm_mm ramin_heap;
struct list_head gpuobj_list;
@@ -618,6 +619,11 @@ struct drm_nouveau_private {
struct backlight_device *backlight;
struct nouveau_channel *evo;
+ struct {
+ struct dcb_entry *dcb;
+ u16 script;
+ u32 pclk;
+ } evo_irq;
struct {
struct dentry *channel_root;
@@ -652,14 +658,6 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0;
}
-#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
- struct drm_nouveau_private *nv = dev->dev_private; \
- if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
- NV_ERROR(dev, "called without init\n"); \
- return -EINVAL; \
- } \
-} while (0)
-
#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id, cl, ch) do { \
struct drm_nouveau_private *nv = dev->dev_private; \
if (!nouveau_channel_owner(dev, (cl), (id))) { \
@@ -682,7 +680,6 @@ extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
-extern int nouveau_ctxfw;
extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
@@ -707,17 +704,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
/* nouveau_mem.c */
-extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
- uint64_t size);
-extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
- uint64_t size, int align2,
- struct drm_file *, int tail);
-extern void nouveau_mem_takedown(struct mem_block **heap);
-extern void nouveau_mem_free_block(struct mem_block *);
extern int nouveau_mem_detect(struct drm_device *dev);
-extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
extern int nouveau_mem_init(struct drm_device *);
extern int nouveau_mem_init_agp(struct drm_device *);
+extern int nouveau_mem_reset_agp(struct drm_device *);
extern void nouveau_mem_close(struct drm_device *);
extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
uint32_t addr,
@@ -857,11 +847,13 @@ void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
+int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
+static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
#endif
/* nouveau_backlight.c */
@@ -924,6 +916,10 @@ extern void nv10_fb_takedown(struct drm_device *);
extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
uint32_t, uint32_t);
+/* nv30_fb.c */
+extern int nv30_fb_init(struct drm_device *);
+extern void nv30_fb_takedown(struct drm_device *);
+
/* nv40_fb.c */
extern int nv40_fb_init(struct drm_device *);
extern void nv40_fb_takedown(struct drm_device *);
@@ -934,6 +930,10 @@ extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
extern int nv50_fb_init(struct drm_device *);
extern void nv50_fb_takedown(struct drm_device *);
+/* nvc0_fb.c */
+extern int nvc0_fb_init(struct drm_device *);
+extern void nvc0_fb_takedown(struct drm_device *);
+
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
@@ -971,6 +971,20 @@ extern void nv50_fifo_destroy_context(struct nouveau_channel *);
extern int nv50_fifo_load_context(struct nouveau_channel *);
extern int nv50_fifo_unload_context(struct drm_device *);
+/* nvc0_fifo.c */
+extern int nvc0_fifo_init(struct drm_device *);
+extern void nvc0_fifo_takedown(struct drm_device *);
+extern void nvc0_fifo_disable(struct drm_device *);
+extern void nvc0_fifo_enable(struct drm_device *);
+extern bool nvc0_fifo_reassign(struct drm_device *, bool);
+extern bool nvc0_fifo_cache_flush(struct drm_device *);
+extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
+extern int nvc0_fifo_channel_id(struct drm_device *);
+extern int nvc0_fifo_create_context(struct nouveau_channel *);
+extern void nvc0_fifo_destroy_context(struct nouveau_channel *);
+extern int nvc0_fifo_load_context(struct nouveau_channel *);
+extern int nvc0_fifo_unload_context(struct drm_device *);
+
/* nv04_graph.c */
extern struct nouveau_pgraph_object_class nv04_graph_grclass[];
extern int nv04_graph_init(struct drm_device *);
@@ -1035,11 +1049,15 @@ extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
extern int nv50_grctx_init(struct nouveau_grctx *);
-/* nouveau_grctx.c */
-extern int nouveau_grctx_prog_load(struct drm_device *);
-extern void nouveau_grctx_vals_load(struct drm_device *,
- struct nouveau_gpuobj *);
-extern void nouveau_grctx_fini(struct drm_device *);
+/* nvc0_graph.c */
+extern int nvc0_graph_init(struct drm_device *);
+extern void nvc0_graph_takedown(struct drm_device *);
+extern void nvc0_graph_fifo_access(struct drm_device *, bool);
+extern struct nouveau_channel *nvc0_graph_channel(struct drm_device *);
+extern int nvc0_graph_create_context(struct nouveau_channel *);
+extern void nvc0_graph_destroy_context(struct nouveau_channel *);
+extern int nvc0_graph_load_context(struct nouveau_channel *);
+extern int nvc0_graph_unload_context(struct drm_device *);
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
@@ -1051,8 +1069,7 @@ extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nv04_instmem_prepare_access(struct drm_device *, bool write);
-extern void nv04_instmem_finish_access(struct drm_device *);
+extern void nv04_instmem_flush(struct drm_device *);
/* nv50_instmem.c */
extern int nv50_instmem_init(struct drm_device *);
@@ -1064,8 +1081,21 @@ extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
-extern void nv50_instmem_prepare_access(struct drm_device *, bool write);
-extern void nv50_instmem_finish_access(struct drm_device *);
+extern void nv50_instmem_flush(struct drm_device *);
+extern void nv84_instmem_flush(struct drm_device *);
+extern void nv50_vm_flush(struct drm_device *, int engine);
+
+/* nvc0_instmem.c */
+extern int nvc0_instmem_init(struct drm_device *);
+extern void nvc0_instmem_takedown(struct drm_device *);
+extern int nvc0_instmem_suspend(struct drm_device *);
+extern void nvc0_instmem_resume(struct drm_device *);
+extern int nvc0_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nvc0_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nvc0_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nvc0_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+extern void nvc0_instmem_flush(struct drm_device *);
/* nv04_mc.c */
extern int nv04_mc_init(struct drm_device *);
@@ -1088,13 +1118,14 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
/* nv04_dac.c */
-extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_dac_create(struct drm_connector *, struct dcb_entry *);
extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
extern int nv04_dac_output_offset(struct drm_encoder *encoder);
extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
+extern bool nv04_dac_in_use(struct drm_encoder *encoder);
/* nv04_dfp.c */
-extern int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_dfp_create(struct drm_connector *, struct dcb_entry *);
extern int nv04_dfp_get_bound_head(struct drm_device *dev, struct dcb_entry *dcbent);
extern void nv04_dfp_bind_head(struct drm_device *dev, struct dcb_entry *dcbent,
int head, bool dl);
@@ -1103,15 +1134,17 @@ extern void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode);
/* nv04_tv.c */
extern int nv04_tv_identify(struct drm_device *dev, int i2c_index);
-extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv04_tv_create(struct drm_connector *, struct dcb_entry *);
/* nv17_tv.c */
-extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
+extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
/* nv04_display.c */
+extern int nv04_display_early_init(struct drm_device *);
+extern void nv04_display_late_takedown(struct drm_device *);
extern int nv04_display_create(struct drm_device *);
+extern int nv04_display_init(struct drm_device *);
extern void nv04_display_destroy(struct drm_device *);
-extern void nv04_display_restore(struct drm_device *);
/* nv04_crtc.c */
extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1147,7 +1180,6 @@ extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
extern int nouveau_fence_flush(void *obj, void *arg);
extern void nouveau_fence_unref(void **obj);
extern void *nouveau_fence_ref(void *obj);
-extern void nouveau_fence_handler(struct drm_device *dev, int channel);
/* nouveau_gem.c */
extern int nouveau_gem_new(struct drm_device *, struct nouveau_channel *,
@@ -1167,13 +1199,15 @@ extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
-/* nv17_gpio.c */
-int nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+/* nv10_gpio.c */
+int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
/* nv50_gpio.c */
+int nv50_gpio_init(struct drm_device *dev);
int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+void nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
/* nv50_calc. */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
@@ -1220,6 +1254,14 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
iowrite32_native(val, dev_priv->mmio + reg);
}
+static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
+{
+ u32 tmp = nv_rd32(dev, reg);
+ tmp &= ~mask;
+ tmp |= val;
+ nv_wr32(dev, reg, tmp);
+}
+
static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/nouveau/nouveau_encoder.h b/drivers/gpu/drm/nouveau/nouveau_encoder.h
index e1df8209cd0f..7c82d68bc155 100644
--- a/drivers/gpu/drm/nouveau/nouveau_encoder.h
+++ b/drivers/gpu/drm/nouveau/nouveau_encoder.h
@@ -38,13 +38,15 @@ struct nouveau_encoder {
struct dcb_entry *dcb;
int or;
+ /* different to drm_encoder.crtc, this reflects what's
+ * actually programmed on the hw, not the proposed crtc */
+ struct drm_crtc *crtc;
+
struct drm_display_mode mode;
int last_dpms;
struct nv04_output_reg restore;
- void (*disconnect)(struct nouveau_encoder *encoder);
-
union {
struct {
int mc_unknown;
@@ -69,10 +71,16 @@ static inline struct drm_encoder *to_drm_encoder(struct nouveau_encoder *enc)
return &enc->base.base;
}
+static inline struct drm_encoder_slave_funcs *
+get_slave_funcs(struct drm_encoder *enc)
+{
+ return to_encoder_slave(enc)->slave_funcs;
+}
+
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
-int nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry);
-int nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry);
+int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
+int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
struct bit_displayport_encoder_table {
uint32_t match;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 257ea130ae13..dbd30b2e43fd 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -250,6 +250,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
+ info->flags |= FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &nouveau_fbcon_ops;
info->fix.smem_start = dev->mode_config.fb_base + nvbo->bo.offset -
dev_priv->vm_vram_base;
@@ -280,6 +281,8 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
if (dev_priv->channel && !nouveau_nofbaccel) {
switch (dev_priv->card_type) {
+ case NV_C0:
+ break;
case NV_50:
nv50_fbcon_accel_init(info);
info->fbops = &nv50_fbcon_ops;
@@ -333,7 +336,7 @@ nouveau_fbcon_output_poll_changed(struct drm_device *dev)
drm_fb_helper_hotplug_event(&dev_priv->nfbdev->helper);
}
-int
+static int
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
{
struct nouveau_framebuffer *nouveau_fb = &nfbdev->nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index faddf53ff9ed..6b208ffafa8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -67,12 +67,13 @@ nouveau_fence_update(struct nouveau_channel *chan)
if (USE_REFCNT)
sequence = nvchan_rd32(chan, 0x48);
else
- sequence = chan->fence.last_sequence_irq;
+ sequence = atomic_read(&chan->fence.last_sequence_irq);
if (chan->fence.sequence_ack == sequence)
return;
chan->fence.sequence_ack = sequence;
+ spin_lock(&chan->fence.lock);
list_for_each_safe(entry, tmp, &chan->fence.pending) {
fence = list_entry(entry, struct nouveau_fence, entry);
@@ -84,6 +85,7 @@ nouveau_fence_update(struct nouveau_channel *chan)
if (sequence == chan->fence.sequence_ack)
break;
}
+ spin_unlock(&chan->fence.lock);
}
int
@@ -119,7 +121,6 @@ nouveau_fence_emit(struct nouveau_fence *fence)
{
struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
struct nouveau_channel *chan = fence->channel;
- unsigned long flags;
int ret;
ret = RING_SPACE(chan, 2);
@@ -127,9 +128,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
return ret;
if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
- spin_lock_irqsave(&chan->fence.lock, flags);
nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
BUG_ON(chan->fence.sequence ==
chan->fence.sequence_ack - 1);
@@ -138,9 +137,9 @@ nouveau_fence_emit(struct nouveau_fence *fence)
fence->sequence = ++chan->fence.sequence;
kref_get(&fence->refcount);
- spin_lock_irqsave(&chan->fence.lock, flags);
+ spin_lock(&chan->fence.lock);
list_add_tail(&fence->entry, &chan->fence.pending);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
+ spin_unlock(&chan->fence.lock);
BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
OUT_RING(chan, fence->sequence);
@@ -173,14 +172,11 @@ nouveau_fence_signalled(void *sync_obj, void *sync_arg)
{
struct nouveau_fence *fence = nouveau_fence(sync_obj);
struct nouveau_channel *chan = fence->channel;
- unsigned long flags;
if (fence->signalled)
return true;
- spin_lock_irqsave(&chan->fence.lock, flags);
nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
return fence->signalled;
}
@@ -190,8 +186,6 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
unsigned long timeout = jiffies + (3 * DRM_HZ);
int ret = 0;
- __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
while (1) {
if (nouveau_fence_signalled(sync_obj, sync_arg))
break;
@@ -201,6 +195,8 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
break;
}
+ __set_current_state(intr ? TASK_INTERRUPTIBLE
+ : TASK_UNINTERRUPTIBLE);
if (lazy)
schedule_timeout(1);
@@ -221,27 +217,12 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
return 0;
}
-void
-nouveau_fence_handler(struct drm_device *dev, int channel)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
-
- if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
- chan = dev_priv->fifos[channel];
-
- if (chan) {
- spin_lock_irq(&chan->fence.lock);
- nouveau_fence_update(chan);
- spin_unlock_irq(&chan->fence.lock);
- }
-}
-
int
nouveau_fence_init(struct nouveau_channel *chan)
{
INIT_LIST_HEAD(&chan->fence.pending);
spin_lock_init(&chan->fence.lock);
+ atomic_set(&chan->fence.last_sequence_irq, 0);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 69c76cf93407..0f417ac1b696 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -137,8 +137,6 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
uint32_t flags = 0;
int ret = 0;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;
@@ -286,7 +284,7 @@ retry:
if (!gem) {
NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle);
validate_fini(op, NULL);
- return -EINVAL;
+ return -ENOENT;
}
nvbo = gem->driver_private;
@@ -577,10 +575,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan;
struct validate_op op;
- struct nouveau_fence *fence = 0;
+ struct nouveau_fence *fence = NULL;
int i, j, ret = 0, do_reloc = 0;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
req->vram_available = dev_priv->fb_aper_free;
@@ -760,11 +757,9 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT);
int ret = -EINVAL;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
- return ret;
+ return -ENOENT;
nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp) {
@@ -800,11 +795,9 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
struct nouveau_bo *nvbo;
int ret = -EINVAL;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
- return ret;
+ return -ENOENT;
nvbo = nouveau_gem_object(gem);
if (nvbo->cpu_filp != file_priv)
@@ -827,11 +820,9 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
struct drm_gem_object *gem;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
gem = drm_gem_object_lookup(dev, file_priv, req->handle);
if (!gem)
- return -EINVAL;
+ return -ENOENT;
ret = nouveau_gem_info(gem, req);
drm_gem_object_unreference_unlocked(gem);
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c
deleted file mode 100644
index f731c5f60536..000000000000
--- a/drivers/gpu/drm/nouveau/nouveau_grctx.c
+++ /dev/null
@@ -1,160 +0,0 @@
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <linux/slab.h>
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-
-struct nouveau_ctxprog {
- uint32_t signature;
- uint8_t version;
- uint16_t length;
- uint32_t data[];
-} __attribute__ ((packed));
-
-struct nouveau_ctxvals {
- uint32_t signature;
- uint8_t version;
- uint32_t length;
- struct {
- uint32_t offset;
- uint32_t value;
- } data[];
-} __attribute__ ((packed));
-
-int
-nouveau_grctx_prog_load(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- const int chipset = dev_priv->chipset;
- const struct firmware *fw;
- const struct nouveau_ctxprog *cp;
- const struct nouveau_ctxvals *cv;
- char name[32];
- int ret, i;
-
- if (pgraph->accel_blocked)
- return -ENODEV;
-
- if (!pgraph->ctxprog) {
- sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
- return ret;
- }
-
- pgraph->ctxprog = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (!pgraph->ctxprog) {
- NV_ERROR(dev, "OOM copying ctxprog\n");
- release_firmware(fw);
- return -ENOMEM;
- }
-
- cp = pgraph->ctxprog;
- if (le32_to_cpu(cp->signature) != 0x5043564e ||
- cp->version != 0 ||
- le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
- NV_ERROR(dev, "ctxprog invalid\n");
- release_firmware(fw);
- nouveau_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- if (!pgraph->ctxvals) {
- sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
- ret = request_firmware(&fw, name, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
- nouveau_grctx_fini(dev);
- return ret;
- }
-
- pgraph->ctxvals = kmemdup(fw->data, fw->size, GFP_KERNEL);
- if (!pgraph->ctxvals) {
- NV_ERROR(dev, "OOM copying ctxvals\n");
- release_firmware(fw);
- nouveau_grctx_fini(dev);
- return -ENOMEM;
- }
-
- cv = (void *)pgraph->ctxvals;
- if (le32_to_cpu(cv->signature) != 0x5643564e ||
- cv->version != 0 ||
- le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
- NV_ERROR(dev, "ctxvals invalid\n");
- release_firmware(fw);
- nouveau_grctx_fini(dev);
- return -EINVAL;
- }
- release_firmware(fw);
- }
-
- cp = pgraph->ctxprog;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < le16_to_cpu(cp->length); i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
- le32_to_cpu(cp->data[i]));
-
- return 0;
-}
-
-void
-nouveau_grctx_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-
- if (pgraph->ctxprog) {
- kfree(pgraph->ctxprog);
- pgraph->ctxprog = NULL;
- }
-
- if (pgraph->ctxvals) {
- kfree(pgraph->ctxprog);
- pgraph->ctxvals = NULL;
- }
-}
-
-void
-nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_ctxvals *cv = pgraph->ctxvals;
- int i;
-
- if (!cv)
- return;
-
- for (i = 0; i < le32_to_cpu(cv->length); i++)
- nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
- le32_to_cpu(cv->data[i].value));
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 7855b35effc3..7b613682e400 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -865,8 +865,12 @@ nv_save_state_ext(struct drm_device *dev, int head,
rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
- if (dev_priv->card_type >= NV_30)
+
+ if (dev_priv->card_type >= NV_30) {
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+ rd_cio_state(dev, head, regp, 0x9f);
+ }
+
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
@@ -971,8 +975,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
- if (dev_priv->card_type >= NV_30)
+
+ if (dev_priv->card_type >= NV_30) {
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+ wr_cio_state(dev, head, regp, 0x9f);
+ }
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 316a3c7e6eb4..0bd407ca3d42 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -163,7 +163,7 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
if (entry->chan)
return -EEXIST;
- if (dev_priv->card_type == NV_50 && entry->read >= NV50_I2C_PORTS) {
+ if (dev_priv->card_type == NV_C0 && entry->read >= NV50_I2C_PORTS) {
NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
return -EINVAL;
}
@@ -174,26 +174,26 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
switch (entry->port_type) {
case 0:
- i2c->algo.bit.setsda = nv04_i2c_setsda;
- i2c->algo.bit.setscl = nv04_i2c_setscl;
- i2c->algo.bit.getsda = nv04_i2c_getsda;
- i2c->algo.bit.getscl = nv04_i2c_getscl;
+ i2c->bit.setsda = nv04_i2c_setsda;
+ i2c->bit.setscl = nv04_i2c_setscl;
+ i2c->bit.getsda = nv04_i2c_getsda;
+ i2c->bit.getscl = nv04_i2c_getscl;
i2c->rd = entry->read;
i2c->wr = entry->write;
break;
case 4:
- i2c->algo.bit.setsda = nv4e_i2c_setsda;
- i2c->algo.bit.setscl = nv4e_i2c_setscl;
- i2c->algo.bit.getsda = nv4e_i2c_getsda;
- i2c->algo.bit.getscl = nv4e_i2c_getscl;
+ i2c->bit.setsda = nv4e_i2c_setsda;
+ i2c->bit.setscl = nv4e_i2c_setscl;
+ i2c->bit.getsda = nv4e_i2c_getsda;
+ i2c->bit.getscl = nv4e_i2c_getscl;
i2c->rd = 0x600800 + entry->read;
i2c->wr = 0x600800 + entry->write;
break;
case 5:
- i2c->algo.bit.setsda = nv50_i2c_setsda;
- i2c->algo.bit.setscl = nv50_i2c_setscl;
- i2c->algo.bit.getsda = nv50_i2c_getsda;
- i2c->algo.bit.getscl = nv50_i2c_getscl;
+ i2c->bit.setsda = nv50_i2c_setsda;
+ i2c->bit.setscl = nv50_i2c_setscl;
+ i2c->bit.getsda = nv50_i2c_getsda;
+ i2c->bit.getscl = nv50_i2c_getscl;
i2c->rd = nv50_i2c_port[entry->read];
i2c->wr = i2c->rd;
break;
@@ -216,17 +216,14 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
i2c_set_adapdata(&i2c->adapter, i2c);
if (entry->port_type < 6) {
- i2c->adapter.algo_data = &i2c->algo.bit;
- i2c->algo.bit.udelay = 40;
- i2c->algo.bit.timeout = usecs_to_jiffies(5000);
- i2c->algo.bit.data = i2c;
+ i2c->adapter.algo_data = &i2c->bit;
+ i2c->bit.udelay = 40;
+ i2c->bit.timeout = usecs_to_jiffies(5000);
+ i2c->bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
} else {
- i2c->adapter.algo_data = &i2c->algo.dp;
- i2c->algo.dp.running = false;
- i2c->algo.dp.address = 0;
- i2c->algo.dp.aux_ch = nouveau_dp_i2c_aux_ch;
- ret = i2c_dp_aux_add_bus(&i2c->adapter);
+ i2c->adapter.algo = &nouveau_dp_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
}
if (ret) {
@@ -278,3 +275,45 @@ nouveau_i2c_find(struct drm_device *dev, int index)
return i2c->chan;
}
+bool
+nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
+{
+ uint8_t buf[] = { 0 };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr,
+ .flags = 0,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+int
+nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ struct i2c_board_info *info, int index)
+{
+ struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
+ int i;
+
+ NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+
+ for (i = 0; info[i].addr; i++) {
+ if (nouveau_probe_i2c_addr(i2c, info[i].addr)) {
+ NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
+ return i;
+ }
+ }
+
+ NV_DEBUG(dev, "No devices found.\n");
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index c8eaf7a9fcbb..f71cb32f7571 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -33,10 +33,7 @@ struct dcb_i2c_entry;
struct nouveau_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
- union {
- struct i2c_algo_bit_data bit;
- struct i2c_algo_dp_aux_data dp;
- } algo;
+ struct i2c_algo_bit_data bit;
unsigned rd;
unsigned wr;
unsigned data;
@@ -45,8 +42,10 @@ struct nouveau_i2c_chan {
int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
+int nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ struct i2c_board_info *info, int index);
-int nouveau_dp_i2c_aux_ch(struct i2c_adapter *, int mode, uint8_t write_byte,
- uint8_t *read_byte);
+extern const struct i2c_algorithm nouveau_dp_i2c_algo;
#endif /* __NOUVEAU_I2C_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 53360f156063..794b0ee30cf6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -49,7 +49,7 @@ nouveau_irq_preinstall(struct drm_device *dev)
/* Master disable */
nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
- if (dev_priv->card_type == NV_50) {
+ if (dev_priv->card_type >= NV_50) {
INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
INIT_LIST_HEAD(&dev_priv->vbl_waiting);
@@ -586,11 +586,11 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
}
if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nouveau_pgraph_intr_context_switch(dev);
-
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
nv_wr32(dev, NV03_PGRAPH_INTR,
NV_PGRAPH_INTR_CONTEXT_SWITCH);
+
+ nouveau_pgraph_intr_context_switch(dev);
}
if (status) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index c1fd42b0dad1..9689d4147686 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -35,162 +35,6 @@
#include "drm_sarea.h"
#include "nouveau_drv.h"
-static struct mem_block *
-split_block(struct mem_block *p, uint64_t start, uint64_t size,
- struct drm_file *file_priv)
-{
- /* Maybe cut off the start of an existing block */
- if (start > p->start) {
- struct mem_block *newblock =
- kmalloc(sizeof(*newblock), GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start;
- newblock->size = p->size - (start - p->start);
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size -= newblock->size;
- p = newblock;
- }
-
- /* Maybe cut off the end of an existing block */
- if (size < p->size) {
- struct mem_block *newblock =
- kmalloc(sizeof(*newblock), GFP_KERNEL);
- if (!newblock)
- goto out;
- newblock->start = start + size;
- newblock->size = p->size - size;
- newblock->file_priv = NULL;
- newblock->next = p->next;
- newblock->prev = p;
- p->next->prev = newblock;
- p->next = newblock;
- p->size = size;
- }
-
-out:
- /* Our block is in the middle */
- p->file_priv = file_priv;
- return p;
-}
-
-struct mem_block *
-nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
- int align2, struct drm_file *file_priv, int tail)
-{
- struct mem_block *p;
- uint64_t mask = (1 << align2) - 1;
-
- if (!heap)
- return NULL;
-
- if (tail) {
- list_for_each_prev(p, heap) {
- uint64_t start = ((p->start + p->size) - size) & ~mask;
-
- if (p->file_priv == NULL && start >= p->start &&
- start + size <= p->start + p->size)
- return split_block(p, start, size, file_priv);
- }
- } else {
- list_for_each(p, heap) {
- uint64_t start = (p->start + mask) & ~mask;
-
- if (p->file_priv == NULL &&
- start + size <= p->start + p->size)
- return split_block(p, start, size, file_priv);
- }
- }
-
- return NULL;
-}
-
-void nouveau_mem_free_block(struct mem_block *p)
-{
- p->file_priv = NULL;
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- if (p->next->file_priv == NULL) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
-
- if (p->prev->file_priv == NULL) {
- struct mem_block *q = p->prev;
- q->size += p->size;
- q->next = p->next;
- q->next->prev = q;
- kfree(p);
- }
-}
-
-/* Initialize. How to check for an uninitialized heap?
- */
-int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
- uint64_t size)
-{
- struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
-
- if (!blocks)
- return -ENOMEM;
-
- *heap = kmalloc(sizeof(**heap), GFP_KERNEL);
- if (!*heap) {
- kfree(blocks);
- return -ENOMEM;
- }
-
- blocks->start = start;
- blocks->size = size;
- blocks->file_priv = NULL;
- blocks->next = blocks->prev = *heap;
-
- memset(*heap, 0, sizeof(**heap));
- (*heap)->file_priv = (struct drm_file *) -1;
- (*heap)->next = (*heap)->prev = blocks;
- return 0;
-}
-
-/*
- * Free all blocks associated with the releasing file_priv
- */
-void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
-{
- struct mem_block *p;
-
- if (!heap || !heap->next)
- return;
-
- list_for_each(p, heap) {
- if (p->file_priv == file_priv)
- p->file_priv = NULL;
- }
-
- /* Assumes a single contiguous range. Needs a special file_priv in
- * 'heap' to stop it being subsumed.
- */
- list_for_each(p, heap) {
- while ((p->file_priv == NULL) &&
- (p->next->file_priv == NULL) &&
- (p->next != heap)) {
- struct mem_block *q = p->next;
- p->size += q->size;
- p->next = q->next;
- p->next->prev = p;
- kfree(q);
- }
- }
-}
-
/*
* NV10-NV40 tiling helpers
*/
@@ -299,7 +143,6 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
phys |= 0x30;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
while (size) {
unsigned offset_h = upper_32_bits(phys);
unsigned offset_l = lower_32_bits(phys);
@@ -331,36 +174,12 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
}
}
}
- dev_priv->engine.instmem.finish_access(dev);
-
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00040001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00060001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ dev_priv->engine.instmem.flush(dev);
+ nv50_vm_flush(dev, 5);
+ nv50_vm_flush(dev, 0);
+ nv50_vm_flush(dev, 4);
+ nv50_vm_flush(dev, 6);
return 0;
}
@@ -374,7 +193,6 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
virt -= dev_priv->vm_vram_base;
pages = (size >> 16) << 1;
- dev_priv->engine.instmem.prepare_access(dev, true);
while (pages) {
pgt = dev_priv->vm_vram_pt[virt >> 29];
pte = (virt & 0x1ffe0000ULL) >> 15;
@@ -388,57 +206,19 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
while (pte < end)
nv_wo32(dev, pgt, pte++, 0);
}
- dev_priv->engine.instmem.finish_access(dev);
-
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return;
- }
-
- nv_wr32(dev, 0x100c80, 0x00040001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return;
- }
+ dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, 0x100c80, 0x00060001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- }
+ nv50_vm_flush(dev, 5);
+ nv50_vm_flush(dev, 0);
+ nv50_vm_flush(dev, 4);
+ nv50_vm_flush(dev, 6);
}
/*
* Cleanup everything
*/
-void nouveau_mem_takedown(struct mem_block **heap)
-{
- struct mem_block *p;
-
- if (!*heap)
- return;
-
- for (p = (*heap)->next; p != *heap;) {
- struct mem_block *q = p;
- p = p->next;
- kfree(q);
- }
-
- kfree(*heap);
- *heap = NULL;
-}
-
-void nouveau_mem_close(struct drm_device *dev)
+void
+nouveau_mem_close(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -449,8 +229,7 @@ void nouveau_mem_close(struct drm_device *dev)
nouveau_ttm_global_release(dev_priv);
- if (drm_core_has_AGP(dev) && dev->agp &&
- drm_core_check_feature(dev, DRIVER_MODESET)) {
+ if (drm_core_has_AGP(dev) && dev->agp) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
@@ -471,28 +250,29 @@ void nouveau_mem_close(struct drm_device *dev)
}
if (dev_priv->fb_mtrr) {
- drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),
- drm_get_resource_len(dev, 1), DRM_MTRR_WC);
- dev_priv->fb_mtrr = 0;
+ drm_mtrr_del(dev_priv->fb_mtrr,
+ pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
+ dev_priv->fb_mtrr = -1;
}
}
static uint32_t
nouveau_mem_detect_nv04(struct drm_device *dev)
{
- uint32_t boot0 = nv_rd32(dev, NV03_BOOT_0);
+ uint32_t boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
if (boot0 & 0x00000100)
return (((boot0 >> 12) & 0xf) * 2 + 2) * 1024 * 1024;
- switch (boot0 & NV03_BOOT_0_RAM_AMOUNT) {
- case NV04_BOOT_0_RAM_AMOUNT_32MB:
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
return 32 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_16MB:
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
return 16 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_8MB:
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
return 8 * 1024 * 1024;
- case NV04_BOOT_0_RAM_AMOUNT_4MB:
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
return 4 * 1024 * 1024;
}
@@ -536,12 +316,22 @@ nouveau_mem_detect(struct drm_device *dev)
} else
if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
dev_priv->vram_size = nouveau_mem_detect_nforce(dev);
- } else {
- dev_priv->vram_size = nv_rd32(dev, NV04_FIFO_DATA);
- dev_priv->vram_size &= NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK;
- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
+ } else
+ if (dev_priv->card_type < NV_50) {
+ dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ dev_priv->vram_size &= NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
+ } else
+ if (dev_priv->card_type < NV_C0) {
+ dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+ dev_priv->vram_size &= 0xffffffff00ll;
+ if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
dev_priv->vram_sys_base <<= 12;
+ }
+ } else {
+ dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
+ dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
}
NV_INFO(dev, "Detected %dMiB VRAM\n", (int)(dev_priv->vram_size >> 20));
@@ -555,18 +345,36 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
-#if __OS_HAS_AGP
-static void nouveau_mem_reset_agp(struct drm_device *dev)
+int
+nouveau_mem_reset_agp(struct drm_device *dev)
{
- uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
+#if __OS_HAS_AGP
+ uint32_t saved_pci_nv_1, pmc_enable;
+ int ret;
+
+ /* First of all, disable fast writes, otherwise if it's
+ * already enabled in the AGP bridge and we disable the card's
+ * AGP controller we might be locking ourselves out of it. */
+ if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) {
+ struct drm_agp_info info;
+ struct drm_agp_mode mode;
+
+ ret = drm_agp_info(dev, &info);
+ if (ret)
+ return ret;
+
+ mode.mode = info.mode & ~PCI_AGP_COMMAND_FW;
+ ret = drm_agp_enable(dev, mode);
+ if (ret)
+ return ret;
+ }
saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
- saved_pci_nv_19 = nv_rd32(dev, NV04_PBUS_PCI_NV_19);
/* clear busmaster bit */
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
- /* clear SBA and AGP bits */
- nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
+ /* disable AGP */
+ nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
/* power cycle pgraph, if enabled */
pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
@@ -578,11 +386,12 @@ static void nouveau_mem_reset_agp(struct drm_device *dev)
}
/* and restore (gives effect of resetting AGP) */
- nv_wr32(dev, NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
-}
#endif
+ return 0;
+}
+
int
nouveau_mem_init_agp(struct drm_device *dev)
{
@@ -592,11 +401,6 @@ nouveau_mem_init_agp(struct drm_device *dev)
struct drm_agp_mode mode;
int ret;
- if (nouveau_noagp)
- return 0;
-
- nouveau_mem_reset_agp(dev);
-
if (!dev->agp->acquired) {
ret = drm_agp_acquire(dev);
if (ret) {
@@ -605,6 +409,8 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
}
+ nouveau_mem_reset_agp(dev);
+
ret = drm_agp_info(dev, &info);
if (ret) {
NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
@@ -633,7 +439,7 @@ nouveau_mem_init(struct drm_device *dev)
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
int ret, dma_bits = 32;
- dev_priv->fb_phys = drm_get_resource_start(dev, 1);
+ dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
dev_priv->gart_info.type = NOUVEAU_GART_NONE;
if (dev_priv->card_type >= NV_50 &&
@@ -659,14 +465,13 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
- spin_lock_init(&dev_priv->ttm.bo_list_lock);
spin_lock_init(&dev_priv->tile.lock);
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
- if (dev_priv->fb_mappable_pages > drm_get_resource_len(dev, 1))
- dev_priv->fb_mappable_pages = drm_get_resource_len(dev, 1);
+ if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
+ dev_priv->fb_mappable_pages =
+ pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
/* remove reserved space at end of vram from available amount */
@@ -692,7 +497,7 @@ nouveau_mem_init(struct drm_device *dev)
/* GART */
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp) {
+ if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -718,8 +523,8 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
- drm_get_resource_len(dev, 1),
+ dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1),
DRM_MTRR_WC);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 9537f3e30115..3ec181ff50ce 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -55,7 +55,7 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
if (ret)
goto out_err;
- ret = nouveau_mem_init_heap(&chan->notifier_heap, 0, ntfy->bo.mem.size);
+ ret = drm_mm_init(&chan->notifier_heap, 0, ntfy->bo.mem.size);
if (ret)
goto out_err;
@@ -80,7 +80,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
nouveau_bo_unpin(chan->notifier_bo);
mutex_unlock(&dev->struct_mutex);
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
- nouveau_mem_takedown(&chan->notifier_heap);
+ drm_mm_takedown(&chan->notifier_heap);
}
static void
@@ -90,7 +90,7 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
NV_DEBUG(dev, "\n");
if (gpuobj->priv)
- nouveau_mem_free_block(gpuobj->priv);
+ drm_mm_put_block(gpuobj->priv);
}
int
@@ -100,18 +100,13 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
- struct mem_block *mem;
+ struct drm_mm_node *mem;
uint32_t offset;
int target, ret;
- if (!chan->notifier_heap) {
- NV_ERROR(dev, "Channel %d doesn't have a notifier heap!\n",
- chan->id);
- return -EINVAL;
- }
-
- mem = nouveau_mem_alloc_block(chan->notifier_heap, size, 0,
- (struct drm_file *)-2, 0);
+ mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block(mem, size, 0);
if (!mem) {
NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
return -ENOMEM;
@@ -144,17 +139,17 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
mem->size, NV_DMA_ACCESS_RW, target,
&nobj);
if (ret) {
- nouveau_mem_free_block(mem);
+ drm_mm_put_block(mem);
NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
return ret;
}
- nobj->dtor = nouveau_notifier_gpuobj_dtor;
- nobj->priv = mem;
+ nobj->dtor = nouveau_notifier_gpuobj_dtor;
+ nobj->priv = mem;
ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
if (ret) {
nouveau_gpuobj_del(dev, &nobj);
- nouveau_mem_free_block(mem);
+ drm_mm_put_block(mem);
NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
return ret;
}
@@ -170,7 +165,7 @@ nouveau_notifier_offset(struct nouveau_gpuobj *nobj, uint32_t *poffset)
return -EINVAL;
if (poffset) {
- struct mem_block *mem = nobj->priv;
+ struct drm_mm_node *mem = nobj->priv;
if (*poffset >= mem->size)
return false;
@@ -189,7 +184,6 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index e7c100ba63a1..b6bcb254f4ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -132,7 +132,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
}
}
- instmem->prepare_access(dev, true);
co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
do {
if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
@@ -143,7 +142,7 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
nv_wo32(dev, ramht, (co + 4)/4, ctx);
list_add_tail(&ref->list, &chan->ramht_refs);
- instmem->finish_access(dev);
+ instmem->flush(dev);
return 0;
}
NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
@@ -153,7 +152,6 @@ nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
if (co >= dev_priv->ramht_size)
co = 0;
} while (co != ho);
- instmem->finish_access(dev);
NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
return -ENOMEM;
@@ -173,7 +171,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
return;
}
- instmem->prepare_access(dev, true);
co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
do {
if (nouveau_ramht_entry_valid(dev, ramht, co) &&
@@ -186,7 +183,7 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
list_del(&ref->list);
- instmem->finish_access(dev);
+ instmem->flush(dev);
return;
}
@@ -195,7 +192,6 @@ nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
co = 0;
} while (co != ho);
list_del(&ref->list);
- instmem->finish_access(dev);
NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
chan->id, ref->handle);
@@ -209,7 +205,7 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
struct nouveau_gpuobj *gpuobj;
- struct mem_block *pramin = NULL;
+ struct drm_mm *pramin = NULL;
int ret;
NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
@@ -233,25 +229,12 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
* available.
*/
if (chan) {
- if (chan->ramin_heap) {
- NV_DEBUG(dev, "private heap\n");
- pramin = chan->ramin_heap;
- } else
- if (dev_priv->card_type < NV_50) {
- NV_DEBUG(dev, "global heap fallback\n");
- pramin = dev_priv->ramin_heap;
- }
+ NV_DEBUG(dev, "channel heap\n");
+ pramin = &chan->ramin_heap;
} else {
NV_DEBUG(dev, "global heap\n");
- pramin = dev_priv->ramin_heap;
- }
-
- if (!pramin) {
- NV_ERROR(dev, "No PRAMIN heap!\n");
- return -EINVAL;
- }
+ pramin = &dev_priv->ramin_heap;
- if (!chan) {
ret = engine->instmem.populate(dev, gpuobj, &size);
if (ret) {
nouveau_gpuobj_del(dev, &gpuobj);
@@ -260,9 +243,10 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
}
/* Allocate a chunk of the PRAMIN aperture */
- gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
- drm_order(align),
- (struct drm_file *)-2, 0);
+ gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
+ if (gpuobj->im_pramin)
+ gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
+
if (!gpuobj->im_pramin) {
nouveau_gpuobj_del(dev, &gpuobj);
return -ENOMEM;
@@ -279,10 +263,9 @@ nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
int i;
- engine->instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0);
- engine->instmem.finish_access(dev);
+ engine->instmem.flush(dev);
}
*gpuobj_ret = gpuobj;
@@ -370,10 +353,9 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
}
if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
- engine->instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0);
- engine->instmem.finish_access(dev);
+ engine->instmem.flush(dev);
}
if (gpuobj->dtor)
@@ -386,7 +368,7 @@ nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
if (gpuobj->flags & NVOBJ_FLAG_FAKE)
kfree(gpuobj->im_pramin);
else
- nouveau_mem_free_block(gpuobj->im_pramin);
+ drm_mm_put_block(gpuobj->im_pramin);
}
list_del(&gpuobj->list);
@@ -589,7 +571,7 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
if (p_offset != ~0) {
- gpuobj->im_pramin = kzalloc(sizeof(struct mem_block),
+ gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
GFP_KERNEL);
if (!gpuobj->im_pramin) {
nouveau_gpuobj_del(dev, &gpuobj);
@@ -605,10 +587,9 @@ nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
}
if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size; i += 4)
nv_wo32(dev, gpuobj, i/4, 0);
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
}
if (pref) {
@@ -696,8 +677,6 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
return ret;
}
- instmem->prepare_access(dev, true);
-
if (dev_priv->card_type < NV_50) {
uint32_t frame, adjust, pte_flags = 0;
@@ -734,7 +713,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
nv_wo32(dev, *gpuobj, 5, flags5);
}
- instmem->finish_access(dev);
+ instmem->flush(dev);
(*gpuobj)->engine = NVOBJ_ENGINE_SW;
(*gpuobj)->class = class;
@@ -849,7 +828,6 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
return ret;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
if (dev_priv->card_type >= NV_50) {
nv_wo32(dev, *gpuobj, 0, class);
nv_wo32(dev, *gpuobj, 5, 0x00010000);
@@ -874,7 +852,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
}
}
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
(*gpuobj)->engine = NVOBJ_ENGINE_GR;
(*gpuobj)->class = class;
@@ -920,6 +898,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
base = 0;
/* PGRAPH context */
+ size += dev_priv->engine.graph.grctx_size;
if (dev_priv->card_type == NV_50) {
/* Various fixed table thingos */
@@ -930,12 +909,8 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
size += 0x8000;
/* RAMFC */
size += 0x1000;
- /* PGRAPH context */
- size += 0x70000;
}
- NV_DEBUG(dev, "ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
- chan->id, size, base);
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
&chan->ramin);
if (ret) {
@@ -944,8 +919,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
}
pramin = chan->ramin->gpuobj;
- ret = nouveau_mem_init_heap(&chan->ramin_heap,
- pramin->im_pramin->start + base, size);
+ ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref_del(dev, &chan->ramin);
@@ -969,15 +943,11 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
- /* Reserve a block of PRAMIN for the channel
- *XXX: maybe on <NV50 too at some point
- */
- if (0 || dev_priv->card_type == NV_50) {
- ret = nouveau_gpuobj_channel_init_pramin(chan);
- if (ret) {
- NV_ERROR(dev, "init pramin\n");
- return ret;
- }
+ /* Allocate a chunk of memory for per-channel object storage */
+ ret = nouveau_gpuobj_channel_init_pramin(chan);
+ if (ret) {
+ NV_ERROR(dev, "init pramin\n");
+ return ret;
}
/* NV50 VM
@@ -988,17 +958,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
if (dev_priv->card_type >= NV_50) {
uint32_t vm_offset, pde;
- instmem->prepare_access(dev, true);
-
vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
vm_offset += chan->ramin->gpuobj->im_pramin->start;
ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
0, &chan->vm_pd, NULL);
- if (ret) {
- instmem->finish_access(dev);
+ if (ret)
return ret;
- }
for (i = 0; i < 0x4000; i += 8) {
nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
@@ -1008,10 +974,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
dev_priv->gart_info.sg_ctxdma,
&chan->vm_gart_pt);
- if (ret) {
- instmem->finish_access(dev);
+ if (ret)
return ret;
- }
nv_wo32(dev, chan->vm_pd, pde++,
chan->vm_gart_pt->instance | 0x03);
nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
@@ -1021,17 +985,15 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
dev_priv->vm_vram_pt[i],
&chan->vm_vram_pt[i]);
- if (ret) {
- instmem->finish_access(dev);
+ if (ret)
return ret;
- }
nv_wo32(dev, chan->vm_pd, pde++,
chan->vm_vram_pt[i]->instance | 0x61);
nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
}
- instmem->finish_access(dev);
+ instmem->flush(dev);
}
/* RAMHT */
@@ -1130,8 +1092,8 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
- if (chan->ramin_heap)
- nouveau_mem_takedown(&chan->ramin_heap);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
if (chan->ramin)
nouveau_gpuobj_ref_del(dev, &chan->ramin);
@@ -1164,10 +1126,8 @@ nouveau_gpuobj_suspend(struct drm_device *dev)
return -ENOMEM;
}
- dev_priv->engine.instmem.prepare_access(dev, false);
for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
- dev_priv->engine.instmem.finish_access(dev);
}
return 0;
@@ -1212,10 +1172,9 @@ nouveau_gpuobj_resume(struct drm_device *dev)
if (!gpuobj->im_backing_suspend)
continue;
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
}
nouveau_gpuobj_suspend_cleanup(dev);
@@ -1232,7 +1191,6 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
if (init->handle == ~0)
@@ -1283,7 +1241,6 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
struct nouveau_channel *chan;
int ret;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
diff --git a/drivers/gpu/drm/nouveau/nouveau_reg.h b/drivers/gpu/drm/nouveau/nouveau_reg.h
index 6ca80a3fe70d..21a6e453b975 100644
--- a/drivers/gpu/drm/nouveau/nouveau_reg.h
+++ b/drivers/gpu/drm/nouveau/nouveau_reg.h
@@ -1,19 +1,64 @@
+#define NV04_PFB_BOOT_0 0x00100000
+# define NV04_PFB_BOOT_0_RAM_AMOUNT 0x00000003
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_32MB 0x00000000
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_4MB 0x00000001
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_8MB 0x00000002
+# define NV04_PFB_BOOT_0_RAM_AMOUNT_16MB 0x00000003
+# define NV04_PFB_BOOT_0_RAM_WIDTH_128 0x00000004
+# define NV04_PFB_BOOT_0_RAM_TYPE 0x00000028
+# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT 0x00000000
+# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT 0x00000008
+# define NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT_4BANK 0x00000010
+# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT 0x00000018
+# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBIT 0x00000020
+# define NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_64MBITX16 0x00000028
+# define NV04_PFB_BOOT_0_UMA_ENABLE 0x00000100
+# define NV04_PFB_BOOT_0_UMA_SIZE 0x0000f000
+#define NV04_PFB_DEBUG_0 0x00100080
+# define NV04_PFB_DEBUG_0_PAGE_MODE 0x00000001
+# define NV04_PFB_DEBUG_0_REFRESH_OFF 0x00000010
+# define NV04_PFB_DEBUG_0_REFRESH_COUNTX64 0x00003f00
+# define NV04_PFB_DEBUG_0_REFRESH_SLOW_CLK 0x00004000
+# define NV04_PFB_DEBUG_0_SAFE_MODE 0x00008000
+# define NV04_PFB_DEBUG_0_ALOM_ENABLE 0x00010000
+# define NV04_PFB_DEBUG_0_CASOE 0x00100000
+# define NV04_PFB_DEBUG_0_CKE_INVERT 0x10000000
+# define NV04_PFB_DEBUG_0_REFINC 0x20000000
+# define NV04_PFB_DEBUG_0_SAVE_POWER_OFF 0x40000000
+#define NV04_PFB_CFG0 0x00100200
+# define NV04_PFB_CFG0_SCRAMBLE 0x20000000
+#define NV04_PFB_CFG1 0x00100204
+#define NV04_PFB_FIFO_DATA 0x0010020c
+# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
+# define NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+#define NV10_PFB_REFCTRL 0x00100210
+# define NV10_PFB_REFCTRL_VALID_1 (1 << 31)
+#define NV04_PFB_PAD 0x0010021c
+# define NV04_PFB_PAD_CKE_NORMAL (1 << 0)
+#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
+#define NV10_PFB_TILE__SIZE 8
+#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
+#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
+#define NV10_PFB_TSTATUS(i) (0x0010024c + (i*16))
+#define NV04_PFB_REF 0x001002d0
+# define NV04_PFB_REF_CMD_REFRESH (1 << 0)
+#define NV04_PFB_PRE 0x001002d4
+# define NV04_PFB_PRE_CMD_PRECHARGE (1 << 0)
+#define NV10_PFB_CLOSE_PAGE2 0x0010033c
+#define NV04_PFB_SCRAMBLE(i) (0x00100400 + 4 * (i))
+#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
+#define NV40_PFB_TILE__SIZE_0 12
+#define NV40_PFB_TILE__SIZE_1 15
+#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
+#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
+#define NV40_PFB_TSTATUS(i) (0x0010060c + (i*16))
+#define NV40_PFB_UNK_800 0x00100800
-#define NV03_BOOT_0 0x00100000
-# define NV03_BOOT_0_RAM_AMOUNT 0x00000003
-# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000
-# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001
-# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002
-# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003
-# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000
-# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001
-# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002
-# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003
-
-#define NV04_FIFO_DATA 0x0010020c
-# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
-# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
+#define NV_PEXTDEV_BOOT_0 0x00101000
+#define NV_PEXTDEV_BOOT_0_RAMCFG 0x0000003c
+# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
+#define NV_PEXTDEV_BOOT_3 0x0010100c
#define NV_RAMIN 0x00700000
@@ -131,23 +176,6 @@
#define NV04_PTIMER_TIME_1 0x00009410
#define NV04_PTIMER_ALARM_0 0x00009420
-#define NV04_PFB_CFG0 0x00100200
-#define NV04_PFB_CFG1 0x00100204
-#define NV40_PFB_020C 0x0010020C
-#define NV10_PFB_TILE(i) (0x00100240 + (i*16))
-#define NV10_PFB_TILE__SIZE 8
-#define NV10_PFB_TLIMIT(i) (0x00100244 + (i*16))
-#define NV10_PFB_TSIZE(i) (0x00100248 + (i*16))
-#define NV10_PFB_TSTATUS(i) (0x0010024C + (i*16))
-#define NV10_PFB_CLOSE_PAGE2 0x0010033C
-#define NV40_PFB_TILE(i) (0x00100600 + (i*16))
-#define NV40_PFB_TILE__SIZE_0 12
-#define NV40_PFB_TILE__SIZE_1 15
-#define NV40_PFB_TLIMIT(i) (0x00100604 + (i*16))
-#define NV40_PFB_TSIZE(i) (0x00100608 + (i*16))
-#define NV40_PFB_TSTATUS(i) (0x0010060C + (i*16))
-#define NV40_PFB_UNK_800 0x00100800
-
#define NV04_PGRAPH_DEBUG_0 0x00400080
#define NV04_PGRAPH_DEBUG_1 0x00400084
#define NV04_PGRAPH_DEBUG_2 0x00400088
@@ -192,28 +220,21 @@
# define NV_PGRAPH_INTR_ERROR (1<<20)
#define NV10_PGRAPH_CTX_CONTROL 0x00400144
#define NV10_PGRAPH_CTX_USER 0x00400148
-#define NV10_PGRAPH_CTX_SWITCH1 0x0040014C
-#define NV10_PGRAPH_CTX_SWITCH2 0x00400150
-#define NV10_PGRAPH_CTX_SWITCH3 0x00400154
-#define NV10_PGRAPH_CTX_SWITCH4 0x00400158
-#define NV10_PGRAPH_CTX_SWITCH5 0x0040015C
+#define NV10_PGRAPH_CTX_SWITCH(i) (0x0040014C + 0x4*(i))
#define NV04_PGRAPH_CTX_SWITCH1 0x00400160
-#define NV10_PGRAPH_CTX_CACHE1 0x00400160
+#define NV10_PGRAPH_CTX_CACHE(i, j) (0x00400160 \
+ + 0x4*(i) + 0x20*(j))
#define NV04_PGRAPH_CTX_SWITCH2 0x00400164
#define NV04_PGRAPH_CTX_SWITCH3 0x00400168
#define NV04_PGRAPH_CTX_SWITCH4 0x0040016C
#define NV04_PGRAPH_CTX_CONTROL 0x00400170
#define NV04_PGRAPH_CTX_USER 0x00400174
#define NV04_PGRAPH_CTX_CACHE1 0x00400180
-#define NV10_PGRAPH_CTX_CACHE2 0x00400180
#define NV03_PGRAPH_CTX_CONTROL 0x00400190
#define NV03_PGRAPH_CTX_USER 0x00400194
#define NV04_PGRAPH_CTX_CACHE2 0x004001A0
-#define NV10_PGRAPH_CTX_CACHE3 0x004001A0
#define NV04_PGRAPH_CTX_CACHE3 0x004001C0
-#define NV10_PGRAPH_CTX_CACHE4 0x004001C0
#define NV04_PGRAPH_CTX_CACHE4 0x004001E0
-#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
#define NV40_PGRAPH_CTXCTL_0304 0x00400304
#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
@@ -328,9 +349,12 @@
#define NV04_PGRAPH_FFINTFC_ST2 0x00400754
#define NV10_PGRAPH_RDI_DATA 0x00400754
#define NV04_PGRAPH_DMA_PITCH 0x00400760
-#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
+#define NV10_PGRAPH_FFINTFC_FIFO_PTR 0x00400760
#define NV04_PGRAPH_DVD_COLORFMT 0x00400764
+#define NV10_PGRAPH_FFINTFC_ST2 0x00400764
#define NV04_PGRAPH_SCALED_FORMAT 0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DL 0x00400768
+#define NV10_PGRAPH_FFINTFC_ST2_DH 0x0040076c
#define NV10_PGRAPH_DMA_PITCH 0x00400770
#define NV10_PGRAPH_DVD_COLORFMT 0x00400774
#define NV10_PGRAPH_SCALED_FORMAT 0x00400778
@@ -814,6 +838,7 @@
#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE 0x80000000
#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL 0x00000fff
#define NV50_SOR_DP_CTRL(i,l) (0x0061c10c + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_CTRL_ENABLED 0x00000001
#define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED 0x00004000
#define NV50_SOR_DP_CTRL_LANE_MASK 0x001f0000
#define NV50_SOR_DP_CTRL_LANE_0_ENABLED 0x00010000
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 1d6ee8b55154..491767fe4fcf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
- dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
nvbe->pte_start = pte;
for (i = 0; i < nvbe->nr_pages; i++) {
@@ -116,24 +115,11 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
- dev_priv->engine.instmem.finish_access(nvbe->dev);
+ dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ nv50_vm_flush(dev, 5); /* PGRAPH */
+ nv50_vm_flush(dev, 0); /* PFIFO */
}
nvbe->bound = true;
@@ -154,7 +140,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
if (!nvbe->bound)
return 0;
- dev_priv->engine.instmem.prepare_access(nvbe->dev, true);
pte = nvbe->pte_start;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
@@ -170,24 +155,11 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dma_offset += NV_CTXDMA_PAGE_SIZE;
}
}
- dev_priv->engine.instmem.finish_access(nvbe->dev);
+ dev_priv->engine.instmem.flush(nvbe->dev);
if (dev_priv->card_type == NV_50) {
- nv_wr32(dev, 0x100c80, 0x00050001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x100c80, 0x00000001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n",
- nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ nv50_vm_flush(dev, 5);
+ nv50_vm_flush(dev, 0);
}
nvbe->bound = false;
@@ -272,7 +244,6 @@ nouveau_sgdma_init(struct drm_device *dev)
pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- dev_priv->engine.instmem.prepare_access(dev, true);
if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
@@ -294,7 +265,7 @@ nouveau_sgdma_init(struct drm_device *dev)
nv_wo32(dev, gpuobj, (i+4)/4, 0);
}
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
dev_priv->gart_info.aper_base = 0;
@@ -325,14 +296,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
int pte;
pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
if (dev_priv->card_type < NV_50) {
- instmem->prepare_access(dev, false);
*page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
- instmem->finish_access(dev);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index b02a231d6937..989322be3728 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -38,6 +38,7 @@
#include "nv50_display.h"
static void nouveau_stub_takedown(struct drm_device *dev) {}
+static int nouveau_stub_init(struct drm_device *dev) { return 0; }
static int nouveau_init_engine_ptrs(struct drm_device *dev)
{
@@ -54,8 +55,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -85,6 +85,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv04_fifo_destroy_context;
engine->fifo.load_context = nv04_fifo_load_context;
engine->fifo.unload_context = nv04_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = NULL;
+ engine->gpio.set = NULL;
+ engine->gpio.irq_enable = NULL;
break;
case 0x10:
engine->instmem.init = nv04_instmem_init;
@@ -95,8 +105,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -128,6 +137,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x20:
engine->instmem.init = nv04_instmem_init;
@@ -138,8 +157,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -171,6 +189,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x30:
engine->instmem.init = nv04_instmem_init;
@@ -181,15 +209,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
- engine->fb.init = nv10_fb_init;
- engine->fb.takedown = nv10_fb_takedown;
+ engine->fb.init = nv30_fb_init;
+ engine->fb.takedown = nv30_fb_takedown;
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
engine->graph.grclass = nv30_graph_grclass;
engine->graph.init = nv30_graph_init;
@@ -214,6 +241,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv10_fifo_destroy_context;
engine->fifo.load_context = nv10_fifo_load_context;
engine->fifo.unload_context = nv10_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x40:
case 0x60:
@@ -225,8 +262,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv04_instmem_clear;
engine->instmem.bind = nv04_instmem_bind;
engine->instmem.unbind = nv04_instmem_unbind;
- engine->instmem.prepare_access = nv04_instmem_prepare_access;
- engine->instmem.finish_access = nv04_instmem_finish_access;
+ engine->instmem.flush = nv04_instmem_flush;
engine->mc.init = nv40_mc_init;
engine->mc.takedown = nv40_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -258,6 +294,16 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv40_fifo_destroy_context;
engine->fifo.load_context = nv40_fifo_load_context;
engine->fifo.unload_context = nv40_fifo_unload_context;
+ engine->display.early_init = nv04_display_early_init;
+ engine->display.late_takedown = nv04_display_late_takedown;
+ engine->display.create = nv04_display_create;
+ engine->display.init = nv04_display_init;
+ engine->display.destroy = nv04_display_destroy;
+ engine->gpio.init = nouveau_stub_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv10_gpio_get;
+ engine->gpio.set = nv10_gpio_set;
+ engine->gpio.irq_enable = NULL;
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
@@ -271,8 +317,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->instmem.clear = nv50_instmem_clear;
engine->instmem.bind = nv50_instmem_bind;
engine->instmem.unbind = nv50_instmem_unbind;
- engine->instmem.prepare_access = nv50_instmem_prepare_access;
- engine->instmem.finish_access = nv50_instmem_finish_access;
+ if (dev_priv->chipset == 0x50)
+ engine->instmem.flush = nv50_instmem_flush;
+ else
+ engine->instmem.flush = nv84_instmem_flush;
engine->mc.init = nv50_mc_init;
engine->mc.takedown = nv50_mc_takedown;
engine->timer.init = nv04_timer_init;
@@ -300,6 +348,64 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->fifo.destroy_context = nv50_fifo_destroy_context;
engine->fifo.load_context = nv50_fifo_load_context;
engine->fifo.unload_context = nv50_fifo_unload_context;
+ engine->display.early_init = nv50_display_early_init;
+ engine->display.late_takedown = nv50_display_late_takedown;
+ engine->display.create = nv50_display_create;
+ engine->display.init = nv50_display_init;
+ engine->display.destroy = nv50_display_destroy;
+ engine->gpio.init = nv50_gpio_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv50_gpio_get;
+ engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_enable = nv50_gpio_irq_enable;
+ break;
+ case 0xC0:
+ engine->instmem.init = nvc0_instmem_init;
+ engine->instmem.takedown = nvc0_instmem_takedown;
+ engine->instmem.suspend = nvc0_instmem_suspend;
+ engine->instmem.resume = nvc0_instmem_resume;
+ engine->instmem.populate = nvc0_instmem_populate;
+ engine->instmem.clear = nvc0_instmem_clear;
+ engine->instmem.bind = nvc0_instmem_bind;
+ engine->instmem.unbind = nvc0_instmem_unbind;
+ engine->instmem.flush = nvc0_instmem_flush;
+ engine->mc.init = nv50_mc_init;
+ engine->mc.takedown = nv50_mc_takedown;
+ engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
+ engine->timer.takedown = nv04_timer_takedown;
+ engine->fb.init = nvc0_fb_init;
+ engine->fb.takedown = nvc0_fb_takedown;
+ engine->graph.grclass = NULL; //nvc0_graph_grclass;
+ engine->graph.init = nvc0_graph_init;
+ engine->graph.takedown = nvc0_graph_takedown;
+ engine->graph.fifo_access = nvc0_graph_fifo_access;
+ engine->graph.channel = nvc0_graph_channel;
+ engine->graph.create_context = nvc0_graph_create_context;
+ engine->graph.destroy_context = nvc0_graph_destroy_context;
+ engine->graph.load_context = nvc0_graph_load_context;
+ engine->graph.unload_context = nvc0_graph_unload_context;
+ engine->fifo.channels = 128;
+ engine->fifo.init = nvc0_fifo_init;
+ engine->fifo.takedown = nvc0_fifo_takedown;
+ engine->fifo.disable = nvc0_fifo_disable;
+ engine->fifo.enable = nvc0_fifo_enable;
+ engine->fifo.reassign = nvc0_fifo_reassign;
+ engine->fifo.channel_id = nvc0_fifo_channel_id;
+ engine->fifo.create_context = nvc0_fifo_create_context;
+ engine->fifo.destroy_context = nvc0_fifo_destroy_context;
+ engine->fifo.load_context = nvc0_fifo_load_context;
+ engine->fifo.unload_context = nvc0_fifo_unload_context;
+ engine->display.early_init = nv50_display_early_init;
+ engine->display.late_takedown = nv50_display_late_takedown;
+ engine->display.create = nv50_display_create;
+ engine->display.init = nv50_display_init;
+ engine->display.destroy = nv50_display_destroy;
+ engine->gpio.init = nv50_gpio_init;
+ engine->gpio.takedown = nouveau_stub_takedown;
+ engine->gpio.get = nv50_gpio_get;
+ engine->gpio.set = nv50_gpio_set;
+ engine->gpio.irq_enable = nv50_gpio_irq_enable;
break;
default:
NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
@@ -407,11 +513,6 @@ nouveau_card_init(struct drm_device *dev)
struct nouveau_engine *engine;
int ret;
- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
-
- if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
- return 0;
-
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
nouveau_switcheroo_can_switch);
@@ -421,15 +522,17 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out;
engine = &dev_priv->engine;
- dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
spin_lock_init(&dev_priv->context_switch_lock);
+ /* Make the CRTCs and I2C buses accessible */
+ ret = engine->display.early_init(dev);
+ if (ret)
+ goto out;
+
/* Parse BIOS tables / Run init tables if card not POSTed */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = nouveau_bios_init(dev);
- if (ret)
- goto out;
- }
+ ret = nouveau_bios_init(dev);
+ if (ret)
+ goto out_display_early;
ret = nouveau_mem_detect(dev);
if (ret)
@@ -461,10 +564,15 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_gpuobj;
+ /* PGPIO */
+ ret = engine->gpio.init(dev);
+ if (ret)
+ goto out_mc;
+
/* PTIMER */
ret = engine->timer.init(dev);
if (ret)
- goto out_mc;
+ goto out_gpio;
/* PFB */
ret = engine->fb.init(dev);
@@ -485,12 +593,16 @@ nouveau_card_init(struct drm_device *dev)
goto out_graph;
}
+ ret = engine->display.create(dev);
+ if (ret)
+ goto out_fifo;
+
/* this call irq_preinstall, register irq handler and
* call irq_postinstall
*/
ret = drm_irq_install(dev);
if (ret)
- goto out_fifo;
+ goto out_display;
ret = drm_vblank_init(dev, 0);
if (ret)
@@ -504,35 +616,18 @@ nouveau_card_init(struct drm_device *dev)
goto out_irq;
}
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- if (dev_priv->card_type >= NV_50)
- ret = nv50_display_create(dev);
- else
- ret = nv04_display_create(dev);
- if (ret)
- goto out_channel;
- }
-
ret = nouveau_backlight_init(dev);
if (ret)
NV_ERROR(dev, "Error %d registering backlight\n", ret);
- dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- nouveau_fbcon_init(dev);
- drm_kms_helper_poll_init(dev);
- }
-
+ nouveau_fbcon_init(dev);
+ drm_kms_helper_poll_init(dev);
return 0;
-out_channel:
- if (dev_priv->channel) {
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
- }
out_irq:
drm_irq_uninstall(dev);
+out_display:
+ engine->display.destroy(dev);
out_fifo:
if (!nouveau_noaccel)
engine->fifo.takedown(dev);
@@ -543,6 +638,8 @@ out_fb:
engine->fb.takedown(dev);
out_timer:
engine->timer.takedown(dev);
+out_gpio:
+ engine->gpio.takedown(dev);
out_mc:
engine->mc.takedown(dev);
out_gpuobj:
@@ -556,6 +653,8 @@ out_gpuobj_early:
nouveau_gpuobj_late_takedown(dev);
out_bios:
nouveau_bios_takedown(dev);
+out_display_early:
+ engine->display.late_takedown(dev);
out:
vga_client_register(dev->pdev, NULL, NULL, NULL);
return ret;
@@ -566,45 +665,39 @@ static void nouveau_card_takedown(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
- NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
-
- if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
-
- nouveau_backlight_exit(dev);
-
- if (dev_priv->channel) {
- nouveau_channel_free(dev_priv->channel);
- dev_priv->channel = NULL;
- }
+ nouveau_backlight_exit(dev);
- if (!nouveau_noaccel) {
- engine->fifo.takedown(dev);
- engine->graph.takedown(dev);
- }
- engine->fb.takedown(dev);
- engine->timer.takedown(dev);
- engine->mc.takedown(dev);
+ if (dev_priv->channel) {
+ nouveau_channel_free(dev_priv->channel);
+ dev_priv->channel = NULL;
+ }
- mutex_lock(&dev->struct_mutex);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
- ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
- mutex_unlock(&dev->struct_mutex);
- nouveau_sgdma_takedown(dev);
+ if (!nouveau_noaccel) {
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ }
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->gpio.takedown(dev);
+ engine->mc.takedown(dev);
+ engine->display.late_takedown(dev);
- nouveau_gpuobj_takedown(dev);
- nouveau_mem_close(dev);
- engine->instmem.takedown(dev);
+ mutex_lock(&dev->struct_mutex);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+ ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);
+ mutex_unlock(&dev->struct_mutex);
+ nouveau_sgdma_takedown(dev);
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_uninstall(dev);
+ nouveau_gpuobj_takedown(dev);
+ nouveau_mem_close(dev);
+ engine->instmem.takedown(dev);
- nouveau_gpuobj_late_takedown(dev);
- nouveau_bios_takedown(dev);
+ drm_irq_uninstall(dev);
- vga_client_register(dev->pdev, NULL, NULL, NULL);
+ nouveau_gpuobj_late_takedown(dev);
+ nouveau_bios_takedown(dev);
- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
- }
+ vga_client_register(dev->pdev, NULL, NULL, NULL);
}
/* here a client dies, release the stuff that was allocated for its
@@ -691,22 +784,26 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
struct drm_nouveau_private *dev_priv;
uint32_t reg0;
resource_size_t mmio_start_offs;
+ int ret;
dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (!dev_priv)
- return -ENOMEM;
+ if (!dev_priv) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
dev->dev_private = dev_priv;
dev_priv->dev = dev;
dev_priv->flags = flags & NOUVEAU_FLAGS;
- dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
dev_priv->wq = create_workqueue("nouveau");
- if (!dev_priv->wq)
- return -EINVAL;
+ if (!dev_priv->wq) {
+ ret = -EINVAL;
+ goto err_priv;
+ }
/* resource 0 is mmio regs */
/* resource 1 is linear FB */
@@ -719,7 +816,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
if (!dev_priv->mmio) {
NV_ERROR(dev, "Unable to initialize the mmio mapping. "
"Please report your setup to " DRIVER_EMAIL "\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_wq;
}
NV_DEBUG(dev, "regs mapped ok at 0x%llx\n",
(unsigned long long)mmio_start_offs);
@@ -765,19 +863,21 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
case 0xa0:
dev_priv->card_type = NV_50;
break;
+ case 0xc0:
+ dev_priv->card_type = NV_C0;
+ break;
default:
NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
- return -EINVAL;
+ ret = -EINVAL;
+ goto err_mmio;
}
NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
dev_priv->card_type, reg0);
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int ret = nouveau_remove_conflicting_drivers(dev);
- if (ret)
- return ret;
- }
+ ret = nouveau_remove_conflicting_drivers(dev);
+ if (ret)
+ goto err_mmio;
/* Map PRAMIN BAR, or on older cards, the aperture withing BAR0 */
if (dev_priv->card_type >= NV_40) {
@@ -791,7 +891,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->ramin_size);
if (!dev_priv->ramin) {
NV_ERROR(dev, "Failed to PRAMIN BAR");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_mmio;
}
} else {
dev_priv->ramin_size = 1 * 1024 * 1024;
@@ -799,7 +900,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->ramin_size);
if (!dev_priv->ramin) {
NV_ERROR(dev, "Failed to map BAR0 PRAMIN.\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_mmio;
}
}
@@ -812,46 +914,38 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->flags |= NV_NFORCE2;
/* For kernel modesetting, init card now and bring up fbcon */
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- int ret = nouveau_card_init(dev);
- if (ret)
- return ret;
- }
+ ret = nouveau_card_init(dev);
+ if (ret)
+ goto err_ramin;
return 0;
-}
-
-static void nouveau_close(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- /* In the case of an error dev_priv may not be allocated yet */
- if (dev_priv)
- nouveau_card_takedown(dev);
+err_ramin:
+ iounmap(dev_priv->ramin);
+err_mmio:
+ iounmap(dev_priv->mmio);
+err_wq:
+ destroy_workqueue(dev_priv->wq);
+err_priv:
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+err_out:
+ return ret;
}
-/* KMS: we need mmio at load time, not when the first drm client opens. */
void nouveau_lastclose(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return;
-
- nouveau_close(dev);
}
int nouveau_unload(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->engine;
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- drm_kms_helper_poll_fini(dev);
- nouveau_fbcon_fini(dev);
- if (dev_priv->card_type >= NV_50)
- nv50_display_destroy(dev);
- else
- nv04_display_destroy(dev);
- nouveau_close(dev);
- }
+ drm_kms_helper_poll_fini(dev);
+ nouveau_fbcon_fini(dev);
+ engine->display.destroy(dev);
+ nouveau_card_takedown(dev);
iounmap(dev_priv->mmio);
iounmap(dev_priv->ramin);
@@ -867,8 +961,6 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_getparam *getparam = data;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
switch (getparam->param) {
case NOUVEAU_GETPARAM_CHIPSET_ID:
getparam->value = dev_priv->chipset;
@@ -937,8 +1029,6 @@ nouveau_ioctl_setparam(struct drm_device *dev, void *data,
{
struct drm_nouveau_setparam *setparam = data;
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
switch (setparam->param) {
default:
NV_ERROR(dev, "unknown parameter %lld\n", setparam->param);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index c385d50f041b..bd35f930568c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -42,13 +42,13 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
}
static int
-nouveau_ttm_mem_global_init(struct ttm_global_reference *ref)
+nouveau_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
static void
-nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
+nouveau_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
@@ -56,16 +56,16 @@ nouveau_ttm_mem_global_release(struct ttm_global_reference *ref)
int
nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
{
- struct ttm_global_reference *global_ref;
+ struct drm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->ttm.mem_global_ref;
- global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &nouveau_ttm_mem_global_init;
global_ref->release = &nouveau_ttm_mem_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting\n");
dev_priv->ttm.mem_global_ref.release = NULL;
@@ -74,15 +74,15 @@ nouveau_ttm_global_init(struct drm_nouveau_private *dev_priv)
dev_priv->ttm.bo_global_ref.mem_glob = global_ref->object;
global_ref = &dev_priv->ttm.bo_global_ref.ref;
- global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM BO subsystem\n");
- ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
dev_priv->ttm.mem_global_ref.release = NULL;
return ret;
}
@@ -96,8 +96,8 @@ nouveau_ttm_global_release(struct drm_nouveau_private *dev_priv)
if (dev_priv->ttm.mem_global_ref.release == NULL)
return;
- ttm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
- ttm_global_item_unref(&dev_priv->ttm.mem_global_ref);
+ drm_global_item_unref(&dev_priv->ttm.bo_global_ref.ref);
+ drm_global_item_unref(&dev_priv->ttm.mem_global_ref);
dev_priv->ttm.mem_global_ref.release = NULL;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index eba687f1099e..497df8765f28 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -157,6 +157,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
unsigned char seq1 = 0, crtc17 = 0;
unsigned char crtc1A;
@@ -211,6 +212,10 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
NVVgaSeqReset(dev, nv_crtc->index, false);
NVWriteVgaCrtc(dev, nv_crtc->index, NV_CIO_CRE_RPC1_INDEX, crtc1A);
+
+ /* Update connector polling modes */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ nouveau_connector_set_polling(connector);
}
static bool
@@ -537,6 +542,9 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
* 1 << 30 on 0x60.830), for no apparent reason */
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
+ if (dev_priv->card_type >= NV_30)
+ regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
+
regp->crtc_830 = mode->crtc_vdisplay - 3;
regp->crtc_834 = mode->crtc_vdisplay - 1;
@@ -734,15 +742,13 @@ nv_crtc_gamma_load(struct drm_crtc *crtc)
}
static void
-nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size)
+nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
+ uint32_t size)
{
+ int end = (start + size > 256) ? 256 : start + size, i;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int i;
- if (size != 256)
- return;
-
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
@@ -909,7 +915,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
if (!gem)
- return -EINVAL;
+ return -ENOENT;
cursor = nouveau_gem_object(gem);
ret = nouveau_bo_map(cursor);
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1cb19e3acb55..ea3627041ecf 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -220,6 +220,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -251,22 +252,21 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
- saved_gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
- saved_gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+ saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+ gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
msleep(4);
saved_routput = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (saved_routput & 0x100) >> 8;
-#if 0
- /* if there's a spare crtc, using it will minimise flicker for the case
- * where the in-use crtc is in use by an off-chip tmds encoder */
- if (xf86_config->crtc[head]->enabled && !xf86_config->crtc[head ^ 1]->enabled)
+
+ /* if there's a spare crtc, using it will minimise flicker */
+ if (!(NVReadVgaCrtc(dev, head, NV_CIO_CRE_RPC1_INDEX) & 0xC0))
head ^= 1;
-#endif
+
/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
routput = (saved_routput & 0xfffffece) | head << 8;
@@ -304,8 +304,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+ gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
return sample;
}
@@ -315,9 +315,12 @@ nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
- uint32_t sample = nv17_dac_sample_load(encoder);
- if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+ if (nv04_dac_in_use(encoder))
+ return connector_status_disconnected;
+
+ if (nv17_dac_sample_load(encoder) &
+ NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
NV_INFO(dev, "Load detected on output %c\n",
'@' + ffs(dcb->or));
return connector_status_connected;
@@ -330,6 +333,9 @@ static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
+ if (nv04_dac_in_use(encoder))
+ return false;
+
return true;
}
@@ -428,6 +434,17 @@ void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable)
}
}
+/* Check if the DAC corresponding to 'encoder' is being used by
+ * someone else. */
+bool nv04_dac_in_use(struct drm_encoder *encoder)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+
+ return nv_gf4_disp_arch(encoder->dev) &&
+ (dev_priv->dac_users[ffs(dcb->or) - 1] & ~(1 << dcb->index));
+}
+
static void nv04_dac_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -501,11 +518,13 @@ static const struct drm_encoder_funcs nv04_dac_funcs = {
.destroy = nv04_dac_destroy,
};
-int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+int
+nv04_dac_create(struct drm_connector *connector, struct dcb_entry *entry)
{
const struct drm_encoder_helper_funcs *helper;
- struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_device *dev = connector->dev;
+ struct drm_encoder *encoder;
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
@@ -527,5 +546,6 @@ int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 41634d4752fe..a5dcf7685800 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -34,6 +34,8 @@
#include "nouveau_hw.h"
#include "nvreg.h"
+#include "i2c/sil164.h"
+
#define FP_TG_CONTROL_ON (NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | \
NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | \
NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS)
@@ -144,6 +146,36 @@ void nv04_dfp_update_fp_control(struct drm_encoder *encoder, int mode)
}
}
+static struct drm_encoder *get_tmds_slave(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ struct drm_encoder *slave;
+
+ if (dcb->type != OUTPUT_TMDS || dcb->location == DCB_LOC_ON_CHIP)
+ return NULL;
+
+ /* Some BIOSes (e.g. the one in a Quadro FX1000) report several
+ * TMDS transmitters at the same I2C address, in the same I2C
+ * bus. This can still work because in that case one of them is
+ * always hard-wired to a reasonable configuration using straps,
+ * and the other one needs to be programmed.
+ *
+ * I don't think there's a way to know which is which, even the
+ * blob programs the one exposed via I2C for *both* heads, so
+ * let's do the same.
+ */
+ list_for_each_entry(slave, &dev->mode_config.encoder_list, head) {
+ struct dcb_entry *slave_dcb = nouveau_encoder(slave)->dcb;
+
+ if (slave_dcb->type == OUTPUT_TMDS && get_slave_funcs(slave) &&
+ slave_dcb->tmdsconf.slave_addr == dcb->tmdsconf.slave_addr)
+ return slave;
+ }
+
+ return NULL;
+}
+
static bool nv04_dfp_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -413,10 +445,6 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
struct dcb_entry *dcbe = nv_encoder->dcb;
int head = nouveau_crtc(encoder->crtc)->index;
- NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
- drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
- nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
-
if (dcbe->type == OUTPUT_TMDS)
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
else if (dcbe->type == OUTPUT_LVDS)
@@ -433,6 +461,11 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
else
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
+ /* Init external transmitters */
+ if (get_tmds_slave(encoder))
+ get_slave_funcs(get_tmds_slave(encoder))->mode_set(
+ encoder, &nv_encoder->mode, &nv_encoder->mode);
+
helper->dpms(encoder, DRM_MODE_DPMS_ON);
NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
@@ -554,10 +587,42 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
NV_DEBUG_KMS(encoder->dev, "\n");
+ if (get_slave_funcs(encoder))
+ get_slave_funcs(encoder)->destroy(encoder);
+
drm_encoder_cleanup(encoder);
kfree(nv_encoder);
}
+static void nv04_tmds_slave_init(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+ struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, 2);
+ struct i2c_board_info info[] = {
+ {
+ .type = "sil164",
+ .addr = (dcb->tmdsconf.slave_addr == 0x7 ? 0x3a : 0x38),
+ .platform_data = &(struct sil164_encoder_params) {
+ SIL164_INPUT_EDGE_RISING
+ }
+ },
+ { }
+ };
+ int type;
+
+ if (!nv_gf4_disp_arch(dev) || !i2c ||
+ get_tmds_slave(encoder))
+ return;
+
+ type = nouveau_i2c_identify(dev, "TMDS transmitter", info, 2);
+ if (type < 0)
+ return;
+
+ drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
+ &i2c->adapter, &info[type]);
+}
+
static const struct drm_encoder_helper_funcs nv04_lvds_helper_funcs = {
.dpms = nv04_lvds_dpms,
.save = nv04_dfp_save,
@@ -584,11 +649,12 @@ static const struct drm_encoder_funcs nv04_dfp_funcs = {
.destroy = nv04_dfp_destroy,
};
-int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
+int
+nv04_dfp_create(struct drm_connector *connector, struct dcb_entry *entry)
{
const struct drm_encoder_helper_funcs *helper;
- struct drm_encoder *encoder;
struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_encoder *encoder;
int type;
switch (entry->type) {
@@ -613,11 +679,16 @@ int nv04_dfp_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
- drm_encoder_init(dev, encoder, &nv04_dfp_funcs, type);
+ drm_encoder_init(connector->dev, encoder, &nv04_dfp_funcs, type);
drm_encoder_helper_add(encoder, helper);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ if (entry->type == OUTPUT_TMDS &&
+ entry->location != DCB_LOC_ON_CHIP)
+ nv04_tmds_slave_init(encoder);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index c7898b4f6dfb..9e28cf772e3c 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -32,8 +32,6 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
-#define MULTIPLE_ENCODERS(e) (e & (e - 1))
-
static void
nv04_display_store_initial_head_owner(struct drm_device *dev)
{
@@ -41,7 +39,7 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
if (dev_priv->chipset != 0x11) {
dev_priv->crtc_owner = NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44);
- goto ownerknown;
+ return;
}
/* reading CR44 is broken on nv11, so we attempt to infer it */
@@ -52,8 +50,6 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
bool tvA = false;
bool tvB = false;
- NVLockVgaCrtcs(dev, false);
-
slaved_on_B = NVReadVgaCrtc(dev, 1, NV_CIO_CRE_PIXEL_INDEX) &
0x80;
if (slaved_on_B)
@@ -66,8 +62,6 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
tvA = !(NVReadVgaCrtc(dev, 0, NV_CIO_CRE_LCD__INDEX) &
MASK(NV_CIO_CRE_LCD_LCD_SELECT));
- NVLockVgaCrtcs(dev, true);
-
if (slaved_on_A && !tvA)
dev_priv->crtc_owner = 0x0;
else if (slaved_on_B && !tvB)
@@ -79,14 +73,40 @@ nv04_display_store_initial_head_owner(struct drm_device *dev)
else
dev_priv->crtc_owner = 0x0;
}
+}
+
+int
+nv04_display_early_init(struct drm_device *dev)
+{
+ /* Make the I2C buses accessible. */
+ if (!nv_gf4_disp_arch(dev)) {
+ uint32_t pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
+
+ if (!(pmc_enable & 1))
+ nv_wr32(dev, NV03_PMC_ENABLE, pmc_enable | 1);
+ }
-ownerknown:
- NV_INFO(dev, "Initial CRTC_OWNER is %d\n", dev_priv->crtc_owner);
+ /* Unlock the VGA CRTCs. */
+ NVLockVgaCrtcs(dev, false);
+
+ /* Make sure the CRTCs aren't in slaved mode. */
+ if (nv_two_heads(dev)) {
+ nv04_display_store_initial_head_owner(dev);
+ NVSetOwner(dev, 0);
+ }
+
+ return 0;
+}
+
+void
+nv04_display_late_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (nv_two_heads(dev))
+ NVSetOwner(dev, dev_priv->crtc_owner);
- /* we need to ensure the heads are not tied henceforth, or reading any
- * 8 bit reg on head B will fail
- * setting a single arbitrary head solves that */
- NVSetOwner(dev, 0);
+ NVLockVgaCrtcs(dev, true);
}
int
@@ -94,14 +114,13 @@ nv04_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ struct drm_connector *connector, *ct;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
int i, ret;
NV_DEBUG_KMS(dev, "\n");
- if (nv_two_heads(dev))
- nv04_display_store_initial_head_owner(dev);
nouveau_hw_save_vga_fonts(dev, 1);
drm_mode_config_init(dev);
@@ -132,19 +151,23 @@ nv04_display_create(struct drm_device *dev)
for (i = 0; i < dcb->entries; i++) {
struct dcb_entry *dcbent = &dcb->entry[i];
+ connector = nouveau_connector_create(dev, dcbent->connector);
+ if (IS_ERR(connector))
+ continue;
+
switch (dcbent->type) {
case OUTPUT_ANALOG:
- ret = nv04_dac_create(dev, dcbent);
+ ret = nv04_dac_create(connector, dcbent);
break;
case OUTPUT_LVDS:
case OUTPUT_TMDS:
- ret = nv04_dfp_create(dev, dcbent);
+ ret = nv04_dfp_create(connector, dcbent);
break;
case OUTPUT_TV:
if (dcbent->location == DCB_LOC_ON_CHIP)
- ret = nv17_tv_create(dev, dcbent);
+ ret = nv17_tv_create(connector, dcbent);
else
- ret = nv04_tv_create(dev, dcbent);
+ ret = nv04_tv_create(connector, dcbent);
break;
default:
NV_WARN(dev, "DCB type %d not known\n", dcbent->type);
@@ -155,12 +178,16 @@ nv04_display_create(struct drm_device *dev)
continue;
}
- for (i = 0; i < dcb->connector.entries; i++)
- nouveau_connector_create(dev, &dcb->connector.entry[i]);
+ list_for_each_entry_safe(connector, ct,
+ &dev->mode_config.connector_list, head) {
+ if (!connector->encoder_ids[0]) {
+ NV_WARN(dev, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
+ }
/* Save previous state */
- NVLockVgaCrtcs(dev, false);
-
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->save(crtc);
@@ -191,8 +218,6 @@ nv04_display_destroy(struct drm_device *dev)
}
/* Restore state */
- NVLockVgaCrtcs(dev, false);
-
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct drm_encoder_helper_funcs *func = encoder->helper_private;
@@ -207,15 +232,12 @@ nv04_display_destroy(struct drm_device *dev)
nouveau_hw_save_vga_fonts(dev, 0);
}
-void
-nv04_display_restore(struct drm_device *dev)
+int
+nv04_display_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- NVLockVgaCrtcs(dev, false);
-
/* meh.. modeset apparently doesn't setup all the regs and depends
* on pre-existing state, for now load the state of the card *before*
* nouveau was loaded, and then do a modeset.
@@ -233,12 +255,6 @@ nv04_display_restore(struct drm_device *dev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
crtc->funcs->restore(crtc);
- if (nv_two_heads(dev)) {
- NV_INFO(dev, "Restoring CRTC_OWNER to %d.\n",
- dev_priv->crtc_owner);
- NVSetOwner(dev, dev_priv->crtc_owner);
- }
-
- NVLockVgaCrtcs(dev, true);
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index 66fe55983b6e..06cedd99c26a 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -112,6 +112,12 @@ nv04_fifo_channel_id(struct drm_device *dev)
NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
}
+#ifdef __BIG_ENDIAN
+#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
+#else
+#define DMA_FETCH_ENDIANNESS 0
+#endif
+
int
nv04_fifo_create_context(struct nouveau_channel *chan)
{
@@ -131,18 +137,13 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
/* Setup initial state */
- dev_priv->engine.instmem.prepare_access(dev, true);
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- 0));
- dev_priv->engine.instmem.finish_access(dev);
+ DMA_FETCH_ENDIANNESS));
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
@@ -169,8 +170,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV04_RAMFC(chid), tmp;
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
tmp = nv_ri32(dev, fc + 8);
@@ -181,8 +180,6 @@ nv04_fifo_do_load_context(struct drm_device *dev, int chid)
nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
@@ -223,7 +220,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
return -EINVAL;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
@@ -233,7 +229,6 @@ nv04_fifo_unload_context(struct drm_device *dev)
RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
- dev_priv->engine.instmem.finish_access(dev);
nv04_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
@@ -297,6 +292,7 @@ nv04_fifo_init(struct drm_device *dev)
nv04_fifo_init_intr(dev);
pfifo->enable(dev);
+ pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
if (dev_priv->fifos[i]) {
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c
index 618355e9cdd5..c8973421b635 100644
--- a/drivers/gpu/drm/nouveau/nv04_graph.c
+++ b/drivers/gpu/drm/nouveau/nv04_graph.c
@@ -342,7 +342,7 @@ static uint32_t nv04_graph_ctx_regs[] = {
};
struct graph_state {
- int nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+ uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
};
struct nouveau_channel *
@@ -527,8 +527,7 @@ static int
nv04_graph_mthd_set_ref(struct nouveau_channel *chan, int grclass,
int mthd, uint32_t data)
{
- chan->fence.last_sequence_irq = data;
- nouveau_fence_handler(chan->dev, chan->id);
+ atomic_set(&chan->fence.last_sequence_irq, data);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_instmem.c b/drivers/gpu/drm/nouveau/nv04_instmem.c
index a3b9563a6f60..4408232d33f1 100644
--- a/drivers/gpu/drm/nouveau/nv04_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv04_instmem.c
@@ -49,10 +49,8 @@ nv04_instmem_determine_amount(struct drm_device *dev)
NV_DEBUG(dev, "RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram >> 10);
/* Clear all of it, except the BIOS image that's in the first 64KiB */
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 64 * 1024; i < dev_priv->ramin_rsvd_vram; i += 4)
nv_wi32(dev, i, 0x00000000);
- dev_priv->engine.instmem.finish_access(dev);
}
static void
@@ -106,7 +104,7 @@ int nv04_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t offset;
- int ret = 0;
+ int ret;
nv04_instmem_determine_amount(dev);
nv04_instmem_configure_fixed_tables(dev);
@@ -129,14 +127,14 @@ int nv04_instmem_init(struct drm_device *dev)
offset = 0x40000;
}
- ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
- offset, dev_priv->ramin_rsvd_vram - offset);
+ ret = drm_mm_init(&dev_priv->ramin_heap, offset,
+ dev_priv->ramin_rsvd_vram - offset);
if (ret) {
- dev_priv->ramin_heap = NULL;
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
+ return ret;
}
- return ret;
+ return 0;
}
void
@@ -186,12 +184,7 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
}
void
-nv04_instmem_prepare_access(struct drm_device *dev, bool write)
-{
-}
-
-void
-nv04_instmem_finish_access(struct drm_device *dev)
+nv04_instmem_flush(struct drm_device *dev)
{
}
diff --git a/drivers/gpu/drm/nouveau/nv04_mc.c b/drivers/gpu/drm/nouveau/nv04_mc.c
index 617ed1e05269..2af43a1cb2ec 100644
--- a/drivers/gpu/drm/nouveau/nv04_mc.c
+++ b/drivers/gpu/drm/nouveau/nv04_mc.c
@@ -11,6 +11,10 @@ nv04_mc_init(struct drm_device *dev)
*/
nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ /* Disable PROM access. */
+ nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index c4e3404337d4..0b5d012d7c28 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -34,69 +34,26 @@
#include "i2c/ch7006.h"
-static struct {
- struct i2c_board_info board_info;
- struct drm_encoder_funcs funcs;
- struct drm_encoder_helper_funcs hfuncs;
- void *params;
-
-} nv04_tv_encoder_info[] = {
+static struct i2c_board_info nv04_tv_encoder_info[] = {
{
- .board_info = { I2C_BOARD_INFO("ch7006", 0x75) },
- .params = &(struct ch7006_encoder_params) {
+ I2C_BOARD_INFO("ch7006", 0x75),
+ .platform_data = &(struct ch7006_encoder_params) {
CH7006_FORMAT_RGB24m12I, CH7006_CLOCK_MASTER,
0, 0, 0,
CH7006_SYNC_SLAVE, CH7006_SYNC_SEPARATED,
CH7006_POUT_3_3V, CH7006_ACTIVE_HSYNC
- },
+ }
},
+ { }
};
-static bool probe_i2c_addr(struct i2c_adapter *adapter, int addr)
-{
- struct i2c_msg msg = {
- .addr = addr,
- .len = 0,
- };
-
- return i2c_transfer(adapter, &msg, 1) == 1;
-}
-
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
{
- struct nouveau_i2c_chan *i2c;
- bool was_locked;
- int i, ret;
-
- NV_TRACE(dev, "Probing TV encoders on I2C bus: %d\n", i2c_index);
-
- i2c = nouveau_i2c_find(dev, i2c_index);
- if (!i2c)
- return -ENODEV;
-
- was_locked = NVLockVgaCrtcs(dev, false);
-
- for (i = 0; i < ARRAY_SIZE(nv04_tv_encoder_info); i++) {
- if (probe_i2c_addr(&i2c->adapter,
- nv04_tv_encoder_info[i].board_info.addr)) {
- ret = i;
- break;
- }
- }
-
- if (i < ARRAY_SIZE(nv04_tv_encoder_info)) {
- NV_TRACE(dev, "Detected TV encoder: %s\n",
- nv04_tv_encoder_info[i].board_info.type);
-
- } else {
- NV_TRACE(dev, "No TV encoders found.\n");
- i = -ENODEV;
- }
-
- NVLockVgaCrtcs(dev, was_locked);
- return i;
+ return nouveau_i2c_identify(dev, "TV encoder",
+ nv04_tv_encoder_info, i2c_index);
}
+
#define PLLSEL_TV_CRTC1_MASK \
(NV_PRAMDAC_PLL_COEFF_SELECT_TV_VSCLK1 \
| NV_PRAMDAC_PLL_COEFF_SELECT_TV_PCLK1)
@@ -132,7 +89,7 @@ static void nv04_tv_dpms(struct drm_encoder *encoder, int mode)
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
- to_encoder_slave(encoder)->slave_funcs->dpms(encoder, mode);
+ get_slave_funcs(encoder)->dpms(encoder, mode);
}
static void nv04_tv_bind(struct drm_device *dev, int head, bool bind)
@@ -195,7 +152,7 @@ static void nv04_tv_mode_set(struct drm_encoder *encoder,
regp->tv_vskew = 1;
regp->tv_vsync_delay = 1;
- to_encoder_slave(encoder)->slave_funcs->mode_set(encoder, mode, adjusted_mode);
+ get_slave_funcs(encoder)->mode_set(encoder, mode, adjusted_mode);
}
static void nv04_tv_commit(struct drm_encoder *encoder)
@@ -214,30 +171,31 @@ static void nv04_tv_commit(struct drm_encoder *encoder)
static void nv04_tv_destroy(struct drm_encoder *encoder)
{
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- to_encoder_slave(encoder)->slave_funcs->destroy(encoder);
-
+ get_slave_funcs(encoder)->destroy(encoder);
drm_encoder_cleanup(encoder);
- kfree(nv_encoder);
+ kfree(encoder->helper_private);
+ kfree(nouveau_encoder(encoder));
}
-int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+static const struct drm_encoder_funcs nv04_tv_funcs = {
+ .destroy = nv04_tv_destroy,
+};
+
+int
+nv04_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct i2c_adapter *adap;
- struct drm_encoder_funcs *funcs = NULL;
- struct drm_encoder_helper_funcs *hfuncs = NULL;
- struct drm_encoder_slave_funcs *sfuncs = NULL;
- int i2c_index = entry->i2c_index;
+ struct drm_device *dev = connector->dev;
+ struct drm_encoder_helper_funcs *hfuncs;
+ struct drm_encoder_slave_funcs *sfuncs;
+ struct nouveau_i2c_chan *i2c =
+ nouveau_i2c_find(dev, entry->i2c_index);
int type, ret;
- bool was_locked;
/* Ensure that we can talk to this encoder */
- type = nv04_tv_identify(dev, i2c_index);
+ type = nv04_tv_identify(dev, entry->i2c_index);
if (type < 0)
return type;
@@ -246,40 +204,31 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
if (!nv_encoder)
return -ENOMEM;
+ hfuncs = kzalloc(sizeof(*hfuncs), GFP_KERNEL);
+ if (!hfuncs) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
/* Initialize the common members */
encoder = to_drm_encoder(nv_encoder);
- funcs = &nv04_tv_encoder_info[type].funcs;
- hfuncs = &nv04_tv_encoder_info[type].hfuncs;
-
- drm_encoder_init(dev, encoder, funcs, DRM_MODE_ENCODER_TVDAC);
+ drm_encoder_init(dev, encoder, &nv04_tv_funcs, DRM_MODE_ENCODER_TVDAC);
drm_encoder_helper_add(encoder, hfuncs);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
-
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
- adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
-
- was_locked = NVLockVgaCrtcs(dev, false);
-
- ret = drm_i2c_encoder_init(encoder->dev, to_encoder_slave(encoder), adap,
- &nv04_tv_encoder_info[type].board_info);
-
- NVLockVgaCrtcs(dev, was_locked);
-
+ ret = drm_i2c_encoder_init(dev, to_encoder_slave(encoder),
+ &i2c->adapter, &nv04_tv_encoder_info[type]);
if (ret < 0)
- goto fail;
+ goto fail_cleanup;
/* Fill the function pointers */
- sfuncs = to_encoder_slave(encoder)->slave_funcs;
-
- *funcs = (struct drm_encoder_funcs) {
- .destroy = nv04_tv_destroy,
- };
+ sfuncs = get_slave_funcs(encoder);
*hfuncs = (struct drm_encoder_helper_funcs) {
.dpms = nv04_tv_dpms,
@@ -292,14 +241,16 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
.detect = sfuncs->detect,
};
- /* Set the slave encoder configuration */
- sfuncs->set_config(encoder, nv04_tv_encoder_info[type].params);
+ /* Attach it to the specified connector. */
+ sfuncs->create_resources(encoder, connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
-fail:
+fail_cleanup:
drm_encoder_cleanup(encoder);
-
+ kfree(hfuncs);
+fail_free:
kfree(nv_encoder);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv10_fifo.c b/drivers/gpu/drm/nouveau/nv10_fifo.c
index 7aeabf262bc0..7a4069cf5d0b 100644
--- a/drivers/gpu/drm/nouveau/nv10_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv10_fifo.c
@@ -55,7 +55,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -66,7 +65,6 @@ nv10_fifo_create_context(struct nouveau_channel *chan)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0);
- dev_priv->engine.instmem.finish_access(dev);
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
@@ -91,8 +89,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV10_RAMFC(chid), tmp;
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -117,8 +113,6 @@ nv10_fifo_do_load_context(struct drm_device *dev, int chid)
nv_wr32(dev, NV10_PFIFO_CACHE1_DMA_SUBROUTINE, nv_ri32(dev, fc + 48));
out:
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
@@ -155,8 +149,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
return 0;
fc = NV10_RAMFC(chid);
- dev_priv->engine.instmem.prepare_access(dev, true);
-
nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -179,8 +171,6 @@ nv10_fifo_unload_context(struct drm_device *dev)
nv_wi32(dev, fc + 48, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
out:
- dev_priv->engine.instmem.finish_access(dev);
-
nv10_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv17_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 2e58c331e9b7..007fc29e2f86 100644
--- a/drivers/gpu/drm/nouveau/nv17_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -55,7 +55,7 @@ get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
}
int
-nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
{
struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
uint32_t reg, shift, mask, value;
@@ -72,7 +72,7 @@ nv17_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
}
int
-nv17_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
{
struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
uint32_t reg, shift, mask, value;
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c
index fcf2cdd19493..b2f6a57c0cc5 100644
--- a/drivers/gpu/drm/nouveau/nv10_graph.c
+++ b/drivers/gpu/drm/nouveau/nv10_graph.c
@@ -43,51 +43,51 @@ struct pipe_state {
};
static int nv10_graph_ctx_regs[] = {
- NV10_PGRAPH_CTX_SWITCH1,
- NV10_PGRAPH_CTX_SWITCH2,
- NV10_PGRAPH_CTX_SWITCH3,
- NV10_PGRAPH_CTX_SWITCH4,
- NV10_PGRAPH_CTX_SWITCH5,
- NV10_PGRAPH_CTX_CACHE1, /* 8 values from 0x400160 to 0x40017c */
- NV10_PGRAPH_CTX_CACHE2, /* 8 values from 0x400180 to 0x40019c */
- NV10_PGRAPH_CTX_CACHE3, /* 8 values from 0x4001a0 to 0x4001bc */
- NV10_PGRAPH_CTX_CACHE4, /* 8 values from 0x4001c0 to 0x4001dc */
- NV10_PGRAPH_CTX_CACHE5, /* 8 values from 0x4001e0 to 0x4001fc */
- 0x00400164,
- 0x00400184,
- 0x004001a4,
- 0x004001c4,
- 0x004001e4,
- 0x00400168,
- 0x00400188,
- 0x004001a8,
- 0x004001c8,
- 0x004001e8,
- 0x0040016c,
- 0x0040018c,
- 0x004001ac,
- 0x004001cc,
- 0x004001ec,
- 0x00400170,
- 0x00400190,
- 0x004001b0,
- 0x004001d0,
- 0x004001f0,
- 0x00400174,
- 0x00400194,
- 0x004001b4,
- 0x004001d4,
- 0x004001f4,
- 0x00400178,
- 0x00400198,
- 0x004001b8,
- 0x004001d8,
- 0x004001f8,
- 0x0040017c,
- 0x0040019c,
- 0x004001bc,
- 0x004001dc,
- 0x004001fc,
+ NV10_PGRAPH_CTX_SWITCH(0),
+ NV10_PGRAPH_CTX_SWITCH(1),
+ NV10_PGRAPH_CTX_SWITCH(2),
+ NV10_PGRAPH_CTX_SWITCH(3),
+ NV10_PGRAPH_CTX_SWITCH(4),
+ NV10_PGRAPH_CTX_CACHE(0, 0),
+ NV10_PGRAPH_CTX_CACHE(0, 1),
+ NV10_PGRAPH_CTX_CACHE(0, 2),
+ NV10_PGRAPH_CTX_CACHE(0, 3),
+ NV10_PGRAPH_CTX_CACHE(0, 4),
+ NV10_PGRAPH_CTX_CACHE(1, 0),
+ NV10_PGRAPH_CTX_CACHE(1, 1),
+ NV10_PGRAPH_CTX_CACHE(1, 2),
+ NV10_PGRAPH_CTX_CACHE(1, 3),
+ NV10_PGRAPH_CTX_CACHE(1, 4),
+ NV10_PGRAPH_CTX_CACHE(2, 0),
+ NV10_PGRAPH_CTX_CACHE(2, 1),
+ NV10_PGRAPH_CTX_CACHE(2, 2),
+ NV10_PGRAPH_CTX_CACHE(2, 3),
+ NV10_PGRAPH_CTX_CACHE(2, 4),
+ NV10_PGRAPH_CTX_CACHE(3, 0),
+ NV10_PGRAPH_CTX_CACHE(3, 1),
+ NV10_PGRAPH_CTX_CACHE(3, 2),
+ NV10_PGRAPH_CTX_CACHE(3, 3),
+ NV10_PGRAPH_CTX_CACHE(3, 4),
+ NV10_PGRAPH_CTX_CACHE(4, 0),
+ NV10_PGRAPH_CTX_CACHE(4, 1),
+ NV10_PGRAPH_CTX_CACHE(4, 2),
+ NV10_PGRAPH_CTX_CACHE(4, 3),
+ NV10_PGRAPH_CTX_CACHE(4, 4),
+ NV10_PGRAPH_CTX_CACHE(5, 0),
+ NV10_PGRAPH_CTX_CACHE(5, 1),
+ NV10_PGRAPH_CTX_CACHE(5, 2),
+ NV10_PGRAPH_CTX_CACHE(5, 3),
+ NV10_PGRAPH_CTX_CACHE(5, 4),
+ NV10_PGRAPH_CTX_CACHE(6, 0),
+ NV10_PGRAPH_CTX_CACHE(6, 1),
+ NV10_PGRAPH_CTX_CACHE(6, 2),
+ NV10_PGRAPH_CTX_CACHE(6, 3),
+ NV10_PGRAPH_CTX_CACHE(6, 4),
+ NV10_PGRAPH_CTX_CACHE(7, 0),
+ NV10_PGRAPH_CTX_CACHE(7, 1),
+ NV10_PGRAPH_CTX_CACHE(7, 2),
+ NV10_PGRAPH_CTX_CACHE(7, 3),
+ NV10_PGRAPH_CTX_CACHE(7, 4),
NV10_PGRAPH_CTX_USER,
NV04_PGRAPH_DMA_START_0,
NV04_PGRAPH_DMA_START_1,
@@ -653,6 +653,78 @@ static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
return -1;
}
+static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
+ uint32_t inst)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
+ uint32_t ctx_user, ctx_switch[5];
+ int i, subchan = -1;
+
+ /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
+ * that cannot be restored via MMIO. Do it through the FIFO
+ * instead.
+ */
+
+ /* Look for a celsius object */
+ for (i = 0; i < 8; i++) {
+ int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+
+ if (class == 0x56 || class == 0x96 || class == 0x99) {
+ subchan = i;
+ break;
+ }
+ }
+
+ if (subchan < 0 || !inst)
+ return;
+
+ /* Save the current ctx object */
+ ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+ for (i = 0; i < 5; i++)
+ ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
+
+ /* Save the FIFO state */
+ st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
+ st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
+ st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
+ fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+
+ for (i = 0; i < ARRAY_SIZE(fifo); i++)
+ fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
+
+ /* Switch to the celsius subchannel */
+ for (i = 0; i < 5; i++)
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
+ nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+ nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+
+ /* Inject NV10TCL_DMA_VTXBUF */
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
+ 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+ nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+ pgraph->fifo_access(dev, true);
+ pgraph->fifo_access(dev, false);
+
+ /* Restore the FIFO state */
+ for (i = 0; i < ARRAY_SIZE(fifo); i++)
+ nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
+
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+
+ /* Restore the current ctx object */
+ for (i = 0; i < 5; i++)
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
+}
+
int nv10_graph_load_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
@@ -670,6 +742,8 @@ int nv10_graph_load_context(struct nouveau_channel *chan)
}
nv10_graph_load_pipe(chan);
+ nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
+ & 0xffff));
nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
@@ -856,11 +930,12 @@ int nv10_graph_init(struct drm_device *dev)
for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 74c880374fb9..44fefb0c7083 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -37,6 +37,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -52,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
- gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
- gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+ gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -64,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
+ gpio->set(dev, DCB_GPIO_TVDAC1, true);
+ gpio->set(dev, DCB_GPIO_TVDAC0, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -110,12 +111,27 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
+ gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
+ gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
return sample;
}
+static bool
+get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
+{
+ /* Zotac FX5200 */
+ if (dev->pdev->device == 0x0322 &&
+ dev->pdev->subsystem_vendor == 0x19da &&
+ (dev->pdev->subsystem_device == 0x1035 ||
+ dev->pdev->subsystem_device == 0x2035)) {
+ *pin_mask = 0xc;
+ return false;
+ }
+
+ return true;
+}
+
static enum drm_connector_status
nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
@@ -124,12 +140,20 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
struct drm_mode_config *conf = &dev->mode_config;
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
struct dcb_entry *dcb = tv_enc->base.dcb;
+ bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask);
- if (dev_priv->chipset == 0x42 ||
- dev_priv->chipset == 0x43)
- tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
- else
- tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
+ if (nv04_dac_in_use(encoder))
+ return connector_status_disconnected;
+
+ if (reliable) {
+ if (dev_priv->chipset == 0x42 ||
+ dev_priv->chipset == 0x43)
+ tv_enc->pin_mask =
+ nv42_tv_sample_load(encoder) >> 28 & 0xe;
+ else
+ tv_enc->pin_mask =
+ nv17_dac_sample_load(encoder) >> 28 & 0xe;
+ }
switch (tv_enc->pin_mask) {
case 0x2:
@@ -154,7 +178,9 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
conf->tv_subconnector_property,
tv_enc->subconnector);
- if (tv_enc->subconnector) {
+ if (!reliable) {
+ return connector_status_unknown;
+ } else if (tv_enc->subconnector) {
NV_INFO(dev, "Load detected on output %c\n",
'@' + ffs(dcb->or));
return connector_status_connected;
@@ -296,6 +322,9 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
{
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
+ if (nv04_dac_in_use(encoder))
+ return false;
+
if (tv_norm->kind == CTV_ENC_MODE)
adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock;
else
@@ -307,6 +336,8 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -331,8 +362,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
nv_load_ptv(dev, regs, 200);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
- nv17_gpio_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+ gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
@@ -744,8 +775,10 @@ static struct drm_encoder_funcs nv17_tv_funcs = {
.destroy = nv17_tv_destroy,
};
-int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
+int
+nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry)
{
+ struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
struct nv17_tv_encoder *tv_enc = NULL;
@@ -774,5 +807,7 @@ int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry)
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+ nv17_tv_create_resources(encoder, connector);
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv20_graph.c b/drivers/gpu/drm/nouveau/nv20_graph.c
index d6fc0a82f03d..17f309b36c91 100644
--- a/drivers/gpu/drm/nouveau/nv20_graph.c
+++ b/drivers/gpu/drm/nouveau/nv20_graph.c
@@ -370,68 +370,54 @@ nv20_graph_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
- unsigned int ctx_size;
unsigned int idoffs = 0x28/4;
int ret;
switch (dev_priv->chipset) {
case 0x20:
- ctx_size = NV20_GRCTX_SIZE;
ctx_init = nv20_graph_context_init;
idoffs = 0;
break;
case 0x25:
case 0x28:
- ctx_size = NV25_GRCTX_SIZE;
ctx_init = nv25_graph_context_init;
break;
case 0x2a:
- ctx_size = NV2A_GRCTX_SIZE;
ctx_init = nv2a_graph_context_init;
idoffs = 0;
break;
case 0x30:
case 0x31:
- ctx_size = NV30_31_GRCTX_SIZE;
ctx_init = nv30_31_graph_context_init;
break;
case 0x34:
- ctx_size = NV34_GRCTX_SIZE;
ctx_init = nv34_graph_context_init;
break;
case 0x35:
case 0x36:
- ctx_size = NV35_36_GRCTX_SIZE;
ctx_init = nv35_36_graph_context_init;
break;
default:
- ctx_size = 0;
- ctx_init = nv35_36_graph_context_init;
- NV_ERROR(dev, "Please contact the devs if you want your NV%x"
- " card to work\n", dev_priv->chipset);
- return -ENOSYS;
- break;
+ BUG_ON(1);
}
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &chan->ramin_grctx);
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx);
if (ret)
return ret;
/* Initialise default context values */
- dev_priv->engine.instmem.prepare_access(dev, true);
ctx_init(dev, chan->ramin_grctx->gpuobj);
/* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
nv_wo32(dev, chan->ramin_grctx->gpuobj, idoffs,
(chan->id << 24) | 0x1); /* CTX_USER */
- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id,
- chan->ramin_grctx->instance >> 4);
-
- dev_priv->engine.instmem.finish_access(dev);
+ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id,
+ chan->ramin_grctx->instance >> 4);
return 0;
}
@@ -440,13 +426,12 @@ nv20_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
if (chan->ramin_grctx)
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
- dev_priv->engine.instmem.prepare_access(dev, true);
- nv_wo32(dev, dev_priv->ctx_table->gpuobj, chan->id, 0);
- dev_priv->engine.instmem.finish_access(dev);
+ nv_wo32(dev, pgraph->ctx_table->gpuobj, chan->id, 0);
}
int
@@ -538,29 +523,44 @@ nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
int
nv20_graph_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv =
- (struct drm_nouveau_private *)dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
uint32_t tmp, vramsz;
int ret, i;
+ switch (dev_priv->chipset) {
+ case 0x20:
+ pgraph->grctx_size = NV20_GRCTX_SIZE;
+ break;
+ case 0x25:
+ case 0x28:
+ pgraph->grctx_size = NV25_GRCTX_SIZE;
+ break;
+ case 0x2a:
+ pgraph->grctx_size = NV2A_GRCTX_SIZE;
+ break;
+ default:
+ NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
+ pgraph->accel_blocked = true;
+ return 0;
+ }
+
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
- if (!dev_priv->ctx_table) {
+ if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- dev_priv->ctx_table_size = 32 * 4;
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
- dev_priv->ctx_table_size, 16,
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC,
- &dev_priv->ctx_table);
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- dev_priv->ctx_table->instance >> 4);
+ pgraph->ctx_table->instance >> 4);
nv20_graph_rdi(dev);
@@ -616,7 +616,7 @@ nv20_graph_init(struct drm_device *dev)
nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
/* begin RAM config */
- vramsz = drm_get_resource_len(dev, 0) - 1;
+ vramsz = pci_resource_len(dev->pdev, 0) - 1;
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
@@ -644,34 +644,52 @@ void
nv20_graph_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
+ nouveau_gpuobj_ref_del(dev, &pgraph->ctx_table);
}
int
nv30_graph_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int ret, i;
+ switch (dev_priv->chipset) {
+ case 0x30:
+ case 0x31:
+ pgraph->grctx_size = NV30_31_GRCTX_SIZE;
+ break;
+ case 0x34:
+ pgraph->grctx_size = NV34_GRCTX_SIZE;
+ break;
+ case 0x35:
+ case 0x36:
+ pgraph->grctx_size = NV35_36_GRCTX_SIZE;
+ break;
+ default:
+ NV_ERROR(dev, "unknown chipset, disabling acceleration\n");
+ pgraph->accel_blocked = true;
+ return 0;
+ }
+
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
- if (!dev_priv->ctx_table) {
+ if (!pgraph->ctx_table) {
/* Create Context Pointer Table */
- dev_priv->ctx_table_size = 32 * 4;
- ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
- dev_priv->ctx_table_size, 16,
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32 * 4, 16,
NVOBJ_FLAG_ZERO_ALLOC,
- &dev_priv->ctx_table);
+ &pgraph->ctx_table);
if (ret)
return ret;
}
nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE,
- dev_priv->ctx_table->instance >> 4);
+ pgraph->ctx_table->instance >> 4);
nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
@@ -717,7 +735,7 @@ nv30_graph_init(struct drm_device *dev)
nv_wr32(dev, 0x0040075c , 0x00000001);
/* begin RAM config */
- /* vramsz = drm_get_resource_len(dev, 0) - 1; */
+ /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
if (dev_priv->chipset != 0x34) {
diff --git a/drivers/gpu/drm/nouveau/nv30_fb.c b/drivers/gpu/drm/nouveau/nv30_fb.c
new file mode 100644
index 000000000000..4a3f2f095128
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv30_fb.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+static int
+calc_bias(struct drm_device *dev, int k, int i, int j)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int b = (dev_priv->chipset > 0x30 ?
+ nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+ 0) & 0xf;
+
+ return 2 * (b & 0x8 ? b - 0x10 : b);
+}
+
+static int
+calc_ref(struct drm_device *dev, int l, int k, int i)
+{
+ int j, x = 0;
+
+ for (j = 0; j < 4; j++) {
+ int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
+
+ x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
+ }
+
+ return x;
+}
+
+int
+nv30_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i, j;
+
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_region_tiling(dev, i, 0, 0, 0);
+
+ /* Init the memory timing regs at 0x10037c/0x1003ac */
+ if (dev_priv->chipset == 0x30 ||
+ dev_priv->chipset == 0x31 ||
+ dev_priv->chipset == 0x35) {
+ /* Related to ROP count */
+ int n = (dev_priv->chipset == 0x31 ? 2 : 4);
+ int l = nv_rd32(dev, 0x1003d0);
+
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < 3; j++)
+ nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
+ calc_ref(dev, l, 0, j));
+
+ for (j = 0; j < 2; j++)
+ nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
+ calc_ref(dev, l, 1, j));
+ }
+ }
+
+ return 0;
+}
+
+void
+nv30_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index 500ccfd3a0b8..2b67f1835c39 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -48,7 +48,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
nv_wi32(dev, fc + 12, chan->pushbuf->instance >> 4);
@@ -61,7 +60,6 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
0x30000000 /* no idea.. */);
nv_wi32(dev, fc + 56, chan->ramin_grctx->instance >> 4);
nv_wi32(dev, fc + 60, 0x0001FFFF);
- dev_priv->engine.instmem.finish_access(dev);
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
@@ -89,8 +87,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV40_RAMFC(chid), tmp, tmp2;
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
nv_wr32(dev, NV10_PFIFO_CACHE1_REF_CNT, nv_ri32(dev, fc + 8));
@@ -127,8 +123,6 @@ nv40_fifo_do_load_context(struct drm_device *dev, int chid)
nv_wr32(dev, 0x2088, nv_ri32(dev, fc + 76));
nv_wr32(dev, 0x3300, nv_ri32(dev, fc + 80));
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
@@ -166,7 +160,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
return 0;
fc = NV40_RAMFC(chid);
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
nv_wi32(dev, fc + 4, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
nv_wi32(dev, fc + 8, nv_rd32(dev, NV10_PFIFO_CACHE1_REF_CNT));
@@ -200,7 +193,6 @@ nv40_fifo_unload_context(struct drm_device *dev)
tmp |= (nv_rd32(dev, NV04_PFIFO_CACHE1_PUT) << 16);
nv_wi32(dev, fc + 72, tmp);
#endif
- dev_priv->engine.instmem.finish_access(dev);
nv40_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1,
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 704a25d04ac9..fd7d2b501316 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -58,6 +58,7 @@ nv40_graph_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_grctx ctx = {};
int ret;
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
@@ -67,20 +68,13 @@ nv40_graph_create_context(struct nouveau_channel *chan)
return ret;
/* Initialise default context values */
- dev_priv->engine.instmem.prepare_access(dev, true);
- if (!pgraph->ctxprog) {
- struct nouveau_grctx ctx = {};
-
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = chan->ramin_grctx->gpuobj;
- nv40_grctx_init(&ctx);
- } else {
- nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
- }
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv40_grctx_init(&ctx);
+
nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
chan->ramin_grctx->gpuobj->im_pramin->start);
- dev_priv->engine.instmem.finish_access(dev);
return 0;
}
@@ -238,7 +232,8 @@ nv40_graph_init(struct drm_device *dev)
struct drm_nouveau_private *dev_priv =
(struct drm_nouveau_private *)dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- uint32_t vramsz;
+ struct nouveau_grctx ctx = {};
+ uint32_t vramsz, *cp;
int i, j;
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -246,32 +241,22 @@ nv40_graph_init(struct drm_device *dev)
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- if (nouveau_ctxfw) {
- nouveau_grctx_prog_load(dev);
- dev_priv->engine.graph.grctx_size = 175 * 1024;
- }
+ cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
- if (!dev_priv->engine.graph.ctxprog) {
- struct nouveau_grctx ctx = {};
- uint32_t *cp;
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 256;
+ nv40_grctx_init(&ctx);
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
- cp = kmalloc(sizeof(*cp) * 256, GFP_KERNEL);
- if (!cp)
- return -ENOMEM;
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = cp;
- ctx.ctxprog_max = 256;
- nv40_grctx_init(&ctx);
- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
-
- kfree(cp);
- }
+ kfree(cp);
/* No context present currently */
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
@@ -367,7 +352,7 @@ nv40_graph_init(struct drm_device *dev)
nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
/* begin RAM config */
- vramsz = drm_get_resource_len(dev, 0) - 1;
+ vramsz = pci_resource_len(dev->pdev, 0) - 1;
switch (dev_priv->chipset) {
case 0x40:
nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
@@ -407,7 +392,6 @@ nv40_graph_init(struct drm_device *dev)
void nv40_graph_takedown(struct drm_device *dev)
{
- nouveau_grctx_fini(dev);
}
struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
diff --git a/drivers/gpu/drm/nouveau/nv40_mc.c b/drivers/gpu/drm/nouveau/nv40_mc.c
index 2a3495e848e9..e4e72c12ab6a 100644
--- a/drivers/gpu/drm/nouveau/nv40_mc.c
+++ b/drivers/gpu/drm/nouveau/nv40_mc.c
@@ -19,7 +19,7 @@ nv40_mc_init(struct drm_device *dev)
case 0x46: /* G72 */
case 0x4e:
case 0x4c: /* C51_G7X */
- tmp = nv_rd32(dev, NV40_PFB_020C);
+ tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
nv_wr32(dev, NV40_PMC_1700, tmp);
nv_wr32(dev, NV40_PMC_1704, 0);
nv_wr32(dev, NV40_PMC_1708, 0);
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index b4e4a3b05eae..bfd4ca2fe7ef 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -264,11 +264,16 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
int
nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
{
- uint32_t reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct pll_lims pll;
- uint32_t reg1, reg2;
+ uint32_t reg, reg1, reg2;
int ret, N1, M1, N2, M2, P;
+ if (dev_priv->chipset < NV_C0)
+ reg = NV50_PDISPLAY_CRTC_CLK_CTRL1(head);
+ else
+ reg = 0x614140 + (head * 0x800);
+
ret = get_pll_limits(dev, reg, &pll);
if (ret)
return ret;
@@ -286,7 +291,8 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
nv_wr32(dev, reg, 0x10000611);
nv_wr32(dev, reg + 4, reg1 | (M1 << 16) | N1);
nv_wr32(dev, reg + 8, reg2 | (P << 28) | (M2 << 16) | N2);
- } else {
+ } else
+ if (dev_priv->chipset < NV_C0) {
ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
if (ret <= 0)
return 0;
@@ -298,6 +304,17 @@ nv50_crtc_set_clock(struct drm_device *dev, int head, int pclk)
nv_wr32(dev, reg, 0x50000610);
nv_wr32(dev, reg + 4, reg1 | (P << 16) | (M1 << 8) | N1);
nv_wr32(dev, reg + 8, N2);
+ } else {
+ ret = nv50_calc_pll2(dev, &pll, pclk, &N1, &N2, &M1, &P);
+ if (ret <= 0)
+ return 0;
+
+ NV_DEBUG(dev, "pclk %d out %d N %d fN 0x%04x M %d P %d\n",
+ pclk, ret, N1, N2, M1, P);
+
+ nv_mask(dev, reg + 0x0c, 0x00000000, 0x00000100);
+ nv_wr32(dev, reg + 0x04, (P << 16) | (N1 << 8) | M1);
+ nv_wr32(dev, reg + 0x10, N2 << 16);
}
return 0;
@@ -348,7 +365,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, buffer_handle);
if (!gem)
- return -EINVAL;
+ return -ENOENT;
cursor = nouveau_gem_object(gem);
ret = nouveau_bo_map(cursor);
@@ -381,15 +398,12 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
static void
nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t start, uint32_t size)
{
+ int end = (start + size > 256) ? 256 : start + size, i;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- int i;
-
- if (size != 256)
- return;
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
nv_crtc->lut.r[i] = r[i];
nv_crtc->lut.g[i] = g[i];
nv_crtc->lut.b[i] = b[i];
@@ -440,47 +454,15 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
- uint32_t dac = 0, sor = 0;
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
- /* Disconnect all unused encoders. */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (!drm_helper_encoder_in_use(encoder))
- continue;
-
- if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
- nv_encoder->dcb->type == OUTPUT_TV)
- dac |= (1 << nv_encoder->or);
- else
- sor |= (1 << nv_encoder->or);
- }
-
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb->type == OUTPUT_ANALOG ||
- nv_encoder->dcb->type == OUTPUT_TV) {
- if (dac & (1 << nv_encoder->or))
- continue;
- } else {
- if (sor & (1 << nv_encoder->or))
- continue;
- }
-
- nv_encoder->disconnect(nv_encoder);
- }
-
nv50_crtc_blank(nv_crtc, true);
}
static void
nv50_crtc_commit(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc2;
struct drm_device *dev = crtc->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
@@ -491,20 +473,14 @@ nv50_crtc_commit(struct drm_crtc *crtc)
nv50_crtc_blank(nv_crtc, false);
- /* Explicitly blank all unused crtc's. */
- list_for_each_entry(crtc2, &dev->mode_config.crtc_list, head) {
- if (!drm_helper_crtc_in_use(crtc2))
- nv50_crtc_blank(nouveau_crtc(crtc2), true);
- }
-
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while committing crtc\n");
return;
}
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ OUT_RING (evo, 0);
+ FIRE_RING (evo);
}
static bool
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 1fd9537beff6..1bc085962945 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -37,22 +37,31 @@
#include "nv50_display.h"
static void
-nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
+nv50_dac_disconnect(struct drm_encoder *encoder)
{
- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
int ret;
+ if (!nv_encoder->crtc)
+ return;
+ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
+
NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
- ret = RING_SPACE(evo, 2);
+ ret = RING_SPACE(evo, 4);
if (ret) {
NV_ERROR(dev, "no space while disconnecting DAC\n");
return;
}
BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, 0);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
+
+ nv_encoder->crtc = NULL;
}
static enum drm_connector_status
@@ -213,7 +222,8 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0, mode_ctl2 = 0;
int ret;
- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d type %d crtc %d\n",
+ nv_encoder->or, nv_encoder->dcb->type, crtc->index);
nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -243,6 +253,14 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
BEGIN_RING(evo, 0, NV50_EVO_DAC(nv_encoder->or, MODE_CTRL), 2);
OUT_RING(evo, mode_ctl);
OUT_RING(evo, mode_ctl2);
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static struct drm_crtc *
+nv50_dac_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
}
static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
@@ -253,7 +271,9 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
.prepare = nv50_dac_prepare,
.commit = nv50_dac_commit,
.mode_set = nv50_dac_mode_set,
- .detect = nv50_dac_detect
+ .get_crtc = nv50_dac_crtc_get,
+ .detect = nv50_dac_detect,
+ .disable = nv50_dac_disconnect
};
static void
@@ -275,14 +295,11 @@ static const struct drm_encoder_funcs nv50_dac_encoder_funcs = {
};
int
-nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
+nv50_dac_create(struct drm_connector *connector, struct dcb_entry *entry)
{
struct nouveau_encoder *nv_encoder;
struct drm_encoder *encoder;
- NV_DEBUG_KMS(dev, "\n");
- NV_INFO(dev, "Detected a DAC output\n");
-
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
if (!nv_encoder)
return -ENOMEM;
@@ -291,14 +308,14 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
- nv_encoder->disconnect = nv50_dac_disconnect;
-
- drm_encoder_init(dev, encoder, &nv50_dac_encoder_funcs,
+ drm_encoder_init(connector->dev, encoder, &nv50_dac_encoder_funcs,
DRM_MODE_ENCODER_DAC);
drm_encoder_helper_add(encoder, &nv50_dac_helper_funcs);
encoder->possible_crtcs = entry->heads;
encoder->possible_clones = 0;
+
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 580a5d10be93..612fa6d6a0cb 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -71,14 +71,16 @@ nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
return ret;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
nv_wo32(dev, obj, 1, limit);
nv_wo32(dev, obj, 2, offset);
nv_wo32(dev, obj, 3, 0x00000000);
nv_wo32(dev, obj, 4, 0x00000000);
- nv_wo32(dev, obj, 5, 0x00010000);
- dev_priv->engine.instmem.finish_access(dev);
+ if (dev_priv->card_type < NV_C0)
+ nv_wo32(dev, obj, 5, 0x00010000);
+ else
+ nv_wo32(dev, obj, 5, 0x00020000);
+ dev_priv->engine.instmem.flush(dev);
return 0;
}
@@ -110,8 +112,8 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
return ret;
}
- ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
- im_pramin->start, 32768);
+ ret = drm_mm_init(&chan->ramin_heap,
+ chan->ramin->gpuobj->im_pramin->start, 32768);
if (ret) {
NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
nv50_evo_channel_del(pchan);
@@ -179,13 +181,25 @@ nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
}
int
+nv50_display_early_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nv50_display_late_takedown(struct drm_device *dev)
+{
+}
+
+int
nv50_display_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_channel *evo = dev_priv->evo;
struct drm_connector *connector;
- uint32_t val, ram_amount, hpd_en[2];
+ uint32_t val, ram_amount;
uint64_t start;
int ret, i;
@@ -366,26 +380,13 @@ nv50_display_init(struct drm_device *dev)
NV50_PDISPLAY_INTR_EN_CLK_UNK40));
/* enable hotplug interrupts */
- hpd_en[0] = hpd_en[1] = 0;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
- struct dcb_gpio_entry *gpio;
if (conn->dcb->gpio_tag == 0xff)
continue;
- gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
- if (!gpio)
- continue;
-
- hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
- }
-
- nv_wr32(dev, 0xe054, 0xffffffff);
- nv_wr32(dev, 0xe050, hpd_en[0]);
- if (dev_priv->chipset >= 0x90) {
- nv_wr32(dev, 0xe074, 0xffffffff);
- nv_wr32(dev, 0xe070, hpd_en[1]);
+ pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
}
return 0;
@@ -465,6 +466,7 @@ int nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ struct drm_connector *connector, *ct;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
@@ -507,14 +509,18 @@ int nv50_display_create(struct drm_device *dev)
continue;
}
+ connector = nouveau_connector_create(dev, entry->connector);
+ if (IS_ERR(connector))
+ continue;
+
switch (entry->type) {
case OUTPUT_TMDS:
case OUTPUT_LVDS:
case OUTPUT_DP:
- nv50_sor_create(dev, entry);
+ nv50_sor_create(connector, entry);
break;
case OUTPUT_ANALOG:
- nv50_dac_create(dev, entry);
+ nv50_dac_create(connector, entry);
break;
default:
NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
@@ -522,11 +528,13 @@ int nv50_display_create(struct drm_device *dev)
}
}
- for (i = 0 ; i < dcb->connector.entries; i++) {
- if (i != 0 && dcb->connector.entry[i].index2 ==
- dcb->connector.entry[i - 1].index2)
- continue;
- nouveau_connector_create(dev, &dcb->connector.entry[i]);
+ list_for_each_entry_safe(connector, ct,
+ &dev->mode_config.connector_list, head) {
+ if (!connector->encoder_ids[0]) {
+ NV_WARN(dev, "%s has no encoders, removing\n",
+ drm_get_connector_name(connector));
+ connector->funcs->destroy(connector);
+ }
}
ret = nv50_display_init(dev);
@@ -538,7 +546,8 @@ int nv50_display_create(struct drm_device *dev)
return 0;
}
-int nv50_display_destroy(struct drm_device *dev)
+void
+nv50_display_destroy(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -548,135 +557,30 @@ int nv50_display_destroy(struct drm_device *dev)
nv50_display_disable(dev);
nv50_evo_channel_del(&dev_priv->evo);
-
- return 0;
-}
-
-static inline uint32_t
-nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t mc;
-
- if (sor) {
- if (dev_priv->chipset < 0x90 ||
- dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
- mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
- else
- mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
- } else {
- mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
- }
-
- return mc;
-}
-
-static int
-nv50_display_irq_head(struct drm_device *dev, int *phead,
- struct dcb_entry **pdcbent)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
- uint32_t dac = 0, sor = 0;
- int head, i, or = 0, type = OUTPUT_ANY;
-
- /* We're assuming that head 0 *or* head 1 will be active here,
- * and not both. I'm not sure if the hw will even signal both
- * ever, but it definitely shouldn't for us as we commit each
- * CRTC separately, and submission will be blocked by the GPU
- * until we handle each in turn.
- */
- NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
- head = ffs((unk30 >> 9) & 3) - 1;
- if (head < 0)
- return -EINVAL;
-
- /* This assumes CRTCs are never bound to multiple encoders, which
- * should be the case.
- */
- for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
- uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
- if (!(mc & (1 << head)))
- continue;
-
- switch ((mc >> 8) & 0xf) {
- case 0: type = OUTPUT_ANALOG; break;
- case 1: type = OUTPUT_TV; break;
- default:
- NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
- return -1;
- }
-
- or = i;
- }
-
- for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
- uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
- if (!(mc & (1 << head)))
- continue;
-
- switch ((mc >> 8) & 0xf) {
- case 0: type = OUTPUT_LVDS; break;
- case 1: type = OUTPUT_TMDS; break;
- case 2: type = OUTPUT_TMDS; break;
- case 5: type = OUTPUT_TMDS; break;
- case 8: type = OUTPUT_DP; break;
- case 9: type = OUTPUT_DP; break;
- default:
- NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
- return -1;
- }
-
- or = i;
- }
-
- NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
- if (type == OUTPUT_ANY) {
- NV_ERROR(dev, "unknown encoder!!\n");
- return -1;
- }
-
- for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
- struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
-
- if (dcbent->type != type)
- continue;
-
- if (!(dcbent->or & (1 << or)))
- continue;
-
- *phead = head;
- *pdcbent = dcbent;
- return 0;
- }
-
- NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
- return 0;
}
-static uint32_t
-nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
- int pxclk)
+static u16
+nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
+ u32 mc, int pxclk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_encoder *encoder;
struct nvbios *bios = &dev_priv->vbios;
- uint32_t mc, script = 0, or;
+ u32 script = 0, or;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- if (nv_encoder->dcb != dcbent)
+ if (nv_encoder->dcb != dcb)
continue;
nv_connector = nouveau_encoder_connector_get(nv_encoder);
break;
}
- or = ffs(dcbent->or) - 1;
- mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
- switch (dcbent->type) {
+ or = ffs(dcb->or) - 1;
+ switch (dcb->type) {
case OUTPUT_LVDS:
script = (mc >> 8) & 0xf;
if (bios->fp_no_ddc) {
@@ -767,17 +671,88 @@ nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
static void
nv50_display_unk10_handler(struct drm_device *dev)
{
- struct dcb_entry *dcbent;
- int head, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 unk30 = nv_rd32(dev, 0x610030), mc;
+ int i, crtc, or, type = OUTPUT_ANY;
- ret = nv50_display_irq_head(dev, &head, &dcbent);
- if (ret)
- goto ack;
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+ dev_priv->evo_irq.dcb = NULL;
nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
- nouveau_bios_run_display_table(dev, dcbent, 0, -1);
+ /* Determine which CRTC we're dealing with, only 1 ever will be
+ * signalled at the same time with the current nouveau code.
+ */
+ crtc = ffs((unk30 & 0x00000060) >> 5) - 1;
+ if (crtc < 0)
+ goto ack;
+
+ /* Nothing needs to be done for the encoder */
+ crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
+ if (crtc < 0)
+ goto ack;
+ /* Find which encoder was connected to the CRTC */
+ for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_C(i));
+ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
+
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_ANALOG; break;
+ case 1: type = OUTPUT_TV; break;
+ default:
+ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
+
+ or = i;
+ }
+
+ for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 ||
+ dev_priv->chipset == 0xa0)
+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_C(i));
+ else
+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_C(i));
+
+ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
+
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_LVDS; break;
+ case 1: type = OUTPUT_TMDS; break;
+ case 2: type = OUTPUT_TMDS; break;
+ case 5: type = OUTPUT_TMDS; break;
+ case 8: type = OUTPUT_DP; break;
+ case 9: type = OUTPUT_DP; break;
+ default:
+ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
+
+ or = i;
+ }
+
+ /* There was no encoder to disable */
+ if (type == OUTPUT_ANY)
+ goto ack;
+
+ /* Disable the encoder */
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
+
+ if (dcb->type == type && (dcb->or & (1 << or))) {
+ nouveau_bios_run_display_table(dev, dcb, 0, -1);
+ dev_priv->evo_irq.dcb = dcb;
+ goto ack;
+ }
+ }
+
+ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
nv_wr32(dev, 0x610030, 0x80000000);
@@ -817,33 +792,103 @@ nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
static void
nv50_display_unk20_handler(struct drm_device *dev)
{
- struct dcb_entry *dcbent;
- uint32_t tmp, pclk, script;
- int head, or, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc;
+ struct dcb_entry *dcb;
+ int i, crtc, or, type = OUTPUT_ANY;
- ret = nv50_display_irq_head(dev, &head, &dcbent);
- if (ret)
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+ dcb = dev_priv->evo_irq.dcb;
+ if (dcb) {
+ nouveau_bios_run_display_table(dev, dcb, 0, -2);
+ dev_priv->evo_irq.dcb = NULL;
+ }
+
+ /* CRTC clock change requested? */
+ crtc = ffs((unk30 & 0x00000600) >> 9) - 1;
+ if (crtc >= 0) {
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
+ pclk &= 0x003fffff;
+
+ nv50_crtc_set_clock(dev, crtc, pclk);
+
+ tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
+ tmp &= ~0x000000f;
+ nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc), tmp);
+ }
+
+ /* Nothing needs to be done for the encoder */
+ crtc = ffs((unk30 & 0x00000180) >> 7) - 1;
+ if (crtc < 0)
goto ack;
- or = ffs(dcbent->or) - 1;
- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
- script = nv50_display_script_select(dev, dcbent, pclk);
+ pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK)) & 0x003fffff;
- NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
+ /* Find which encoder is connected to the CRTC */
+ for (i = 0; type == OUTPUT_ANY && i < 3; i++) {
+ mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(i));
+ NV_DEBUG_KMS(dev, "DAC-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
- if (dcbent->type != OUTPUT_DP)
- nouveau_bios_run_display_table(dev, dcbent, 0, -2);
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_ANALOG; break;
+ case 1: type = OUTPUT_TV; break;
+ default:
+ NV_ERROR(dev, "invalid mc, DAC-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
- nv50_crtc_set_clock(dev, head, pclk);
+ or = i;
+ }
- nouveau_bios_run_display_table(dev, dcbent, script, pclk);
+ for (i = 0; type == OUTPUT_ANY && i < 4; i++) {
+ if (dev_priv->chipset < 0x90 ||
+ dev_priv->chipset == 0x92 ||
+ dev_priv->chipset == 0xa0)
+ mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(i));
+ else
+ mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(i));
- nv50_display_unk20_dp_hack(dev, dcbent);
+ NV_DEBUG_KMS(dev, "SOR-%d mc: 0x%08x\n", i, mc);
+ if (!(mc & (1 << crtc)))
+ continue;
+
+ switch ((mc & 0x00000f00) >> 8) {
+ case 0: type = OUTPUT_LVDS; break;
+ case 1: type = OUTPUT_TMDS; break;
+ case 2: type = OUTPUT_TMDS; break;
+ case 5: type = OUTPUT_TMDS; break;
+ case 8: type = OUTPUT_DP; break;
+ case 9: type = OUTPUT_DP; break;
+ default:
+ NV_ERROR(dev, "invalid mc, SOR-%d: 0x%08x\n", i, mc);
+ goto ack;
+ }
+
+ or = i;
+ }
+
+ if (type == OUTPUT_ANY)
+ goto ack;
+
+ /* Enable the encoder */
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ dcb = &dev_priv->vbios.dcb.entry[i];
+ if (dcb->type == type && (dcb->or & (1 << or)))
+ break;
+ }
+
+ if (i == dev_priv->vbios.dcb.entries) {
+ NV_ERROR(dev, "no dcb for %d %d 0x%08x\n", or, type, mc);
+ goto ack;
+ }
- tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
- tmp &= ~0x000000f;
- nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
+ script = nv50_display_script_select(dev, dcb, mc, pclk);
+ nouveau_bios_run_display_table(dev, dcb, script, pclk);
- if (dcbent->type != OUTPUT_ANALOG) {
+ nv50_display_unk20_dp_hack(dev, dcb);
+
+ if (dcb->type != OUTPUT_ANALOG) {
tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
tmp &= ~0x00000f0f;
if (script & 0x0100)
@@ -853,24 +898,61 @@ nv50_display_unk20_handler(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
}
+ dev_priv->evo_irq.dcb = dcb;
+ dev_priv->evo_irq.pclk = pclk;
+ dev_priv->evo_irq.script = script;
+
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
nv_wr32(dev, 0x610030, 0x80000000);
}
+/* If programming a TMDS output on a SOR that can also be configured for
+ * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
+ *
+ * It looks like the VBIOS TMDS scripts make an attempt at this, however,
+ * the VBIOS scripts on at least one board I have only switch it off on
+ * link 0, causing a blank display if the output has previously been
+ * programmed for DisplayPort.
+ */
+static void
+nv50_display_unk40_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
+{
+ int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
+ struct drm_encoder *encoder;
+ u32 tmp;
+
+ if (dcb->type != OUTPUT_TMDS)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->type == OUTPUT_DP &&
+ nv_encoder->dcb->or & (1 << or)) {
+ tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
+ tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
+ nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
+ break;
+ }
+ }
+}
+
static void
nv50_display_unk40_handler(struct drm_device *dev)
{
- struct dcb_entry *dcbent;
- int head, pclk, script, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_entry *dcb = dev_priv->evo_irq.dcb;
+ u16 script = dev_priv->evo_irq.script;
+ u32 unk30 = nv_rd32(dev, 0x610030), pclk = dev_priv->evo_irq.pclk;
- ret = nv50_display_irq_head(dev, &head, &dcbent);
- if (ret)
+ NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
+ dev_priv->evo_irq.dcb = NULL;
+ if (!dcb)
goto ack;
- pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
- script = nv50_display_script_select(dev, dcbent, pclk);
- nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
+ nouveau_bios_run_display_table(dev, dcb, script, -pclk);
+ nv50_display_unk40_dp_set_tmds(dev, dcb);
ack:
nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index 581d405ac014..c551f0b85ee0 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -38,9 +38,11 @@
void nv50_display_irq_handler(struct drm_device *dev);
void nv50_display_irq_handler_bh(struct work_struct *work);
void nv50_display_irq_hotplug_bh(struct work_struct *work);
-int nv50_display_init(struct drm_device *dev);
+int nv50_display_early_init(struct drm_device *dev);
+void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
-int nv50_display_destroy(struct drm_device *dev);
+int nv50_display_init(struct drm_device *dev);
+void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index e20c0e2474f3..fb0281ae8f90 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -28,41 +28,33 @@
#include "drm.h"
#include "nouveau_drv.h"
-struct nv50_fifo_priv {
- struct nouveau_gpuobj_ref *thingo[2];
- int cur_thingo;
-};
-
-#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
-
static void
-nv50_fifo_init_thingo(struct drm_device *dev)
+nv50_fifo_playlist_update(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj_ref *cur;
int i, nr;
NV_DEBUG(dev, "\n");
- cur = priv->thingo[priv->cur_thingo];
- priv->cur_thingo = !priv->cur_thingo;
+ cur = pfifo->playlist[pfifo->cur_playlist];
+ pfifo->cur_playlist = !pfifo->cur_playlist;
/* We never schedule channel 0 or 127 */
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 1, nr = 0; i < 127; i++) {
if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
nv_wo32(dev, cur->gpuobj, nr++, i);
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
nv_wr32(dev, 0x32f4, cur->instance >> 12);
nv_wr32(dev, 0x32ec, nr);
nv_wr32(dev, 0x2500, 0x101);
}
-static int
-nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
+static void
+nv50_fifo_channel_enable(struct drm_device *dev, int channel)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->fifos[channel];
@@ -70,37 +62,28 @@ nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
NV_DEBUG(dev, "ch%d\n", channel);
- if (!chan->ramfc)
- return -EINVAL;
-
- if (IS_G80)
+ if (dev_priv->chipset == 0x50)
inst = chan->ramfc->instance >> 12;
else
inst = chan->ramfc->instance >> 8;
- nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
- inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
- if (!nt)
- nv50_fifo_init_thingo(dev);
- return 0;
+ nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst |
+ NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
}
static void
-nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
+nv50_fifo_channel_disable(struct drm_device *dev, int channel)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
- NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
+ NV_DEBUG(dev, "ch%d\n", channel);
- if (IS_G80)
+ if (dev_priv->chipset == 0x50)
inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
else
inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
-
- if (!nt)
- nv50_fifo_init_thingo(dev);
}
static void
@@ -133,12 +116,12 @@ nv50_fifo_init_context_table(struct drm_device *dev)
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
if (dev_priv->fifos[i])
- nv50_fifo_channel_enable(dev, i, true);
+ nv50_fifo_channel_enable(dev, i);
else
- nv50_fifo_channel_disable(dev, i, true);
+ nv50_fifo_channel_disable(dev, i);
}
- nv50_fifo_init_thingo(dev);
+ nv50_fifo_playlist_update(dev);
}
static void
@@ -162,41 +145,38 @@ nv50_fifo_init_regs(struct drm_device *dev)
nv_wr32(dev, 0x3270, 0);
/* Enable dummy channels setup by nv50_instmem.c */
- nv50_fifo_channel_enable(dev, 0, true);
- nv50_fifo_channel_enable(dev, 127, true);
+ nv50_fifo_channel_enable(dev, 0);
+ nv50_fifo_channel_enable(dev, 127);
}
int
nv50_fifo_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int ret;
NV_DEBUG(dev, "\n");
- priv = dev_priv->engine.fifo.priv;
- if (priv) {
- priv->cur_thingo = !priv->cur_thingo;
+ if (pfifo->playlist[0]) {
+ pfifo->cur_playlist = !pfifo->cur_playlist;
goto just_reset;
}
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- dev_priv->engine.fifo.priv = priv;
-
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[0]);
if (ret) {
- NV_ERROR(dev, "error creating thingo0: %d\n", ret);
+ NV_ERROR(dev, "error creating playlist 0: %d\n", ret);
return ret;
}
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &pfifo->playlist[1]);
if (ret) {
- NV_ERROR(dev, "error creating thingo1: %d\n", ret);
+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
+ NV_ERROR(dev, "error creating playlist 1: %d\n", ret);
return ret;
}
@@ -216,18 +196,15 @@ void
nv50_fifo_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
+ struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
NV_DEBUG(dev, "\n");
- if (!priv)
+ if (!pfifo->playlist[0])
return;
- nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
- nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
-
- dev_priv->engine.fifo.priv = NULL;
- kfree(priv);
+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[0]);
+ nouveau_gpuobj_ref_del(dev, &pfifo->playlist[1]);
}
int
@@ -248,7 +225,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (IS_G80) {
+ if (dev_priv->chipset == 0x50) {
uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
@@ -281,10 +258,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- dev_priv->engine.instmem.prepare_access(dev, true);
-
nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
- nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
+ nv_wo32(dev, ramfc, 0x80/4, (0 << 27) /* 4KiB */ |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->instance >> 4));
nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
@@ -295,7 +272,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
chan->dma.ib_base * 4);
nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
- if (!IS_G80) {
+ if (dev_priv->chipset != 0x50) {
nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
nv_wo32(dev, chan->ramin->gpuobj, 1,
chan->ramfc->instance >> 8);
@@ -304,16 +281,10 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
}
- dev_priv->engine.instmem.finish_access(dev);
-
- ret = nv50_fifo_channel_enable(dev, chan->id, false);
- if (ret) {
- NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
- nouveau_gpuobj_ref_del(dev, &chan->ramfc);
- return ret;
- }
+ dev_priv->engine.instmem.flush(dev);
+ nv50_fifo_channel_enable(dev, chan->id);
+ nv50_fifo_playlist_update(dev);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
@@ -328,11 +299,12 @@ nv50_fifo_destroy_context(struct nouveau_channel *chan)
/* This will ensure the channel is seen as disabled. */
chan->ramfc = NULL;
- nv50_fifo_channel_disable(dev, chan->id, false);
+ nv50_fifo_channel_disable(dev, chan->id);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
- nv50_fifo_channel_disable(dev, 127, false);
+ nv50_fifo_channel_disable(dev, 127);
+ nv50_fifo_playlist_update(dev);
nouveau_gpuobj_ref_del(dev, &ramfc);
nouveau_gpuobj_ref_del(dev, &chan->cache);
@@ -349,8 +321,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- dev_priv->engine.instmem.prepare_access(dev, false);
-
nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
@@ -396,7 +366,7 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
- if (!IS_G80) {
+ if (dev_priv->chipset != 0x50) {
nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
@@ -404,8 +374,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
}
- dev_priv->engine.instmem.finish_access(dev);
-
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0;
}
@@ -434,8 +402,6 @@ nv50_fifo_unload_context(struct drm_device *dev)
ramfc = chan->ramfc->gpuobj;
cache = chan->cache->gpuobj;
- dev_priv->engine.instmem.prepare_access(dev, true);
-
nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
@@ -482,7 +448,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
}
/* guessing that all the 0x34xx regs aren't on NV50 */
- if (!IS_G80) {
+ if (dev_priv->chipset != 0x50) {
nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
@@ -491,7 +457,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
/*XXX: probably reload ch127 (NULL) state back too */
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index bb47ad737267..b2fab2bf3d61 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -74,3 +74,38 @@ nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
nv_wr32(dev, r, v);
return 0;
}
+
+void
+nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
+{
+ struct dcb_gpio_entry *gpio;
+ u32 reg, mask;
+
+ gpio = nouveau_bios_gpio_entry(dev, tag);
+ if (!gpio) {
+ NV_ERROR(dev, "gpio tag 0x%02x not found\n", tag);
+ return;
+ }
+
+ reg = gpio->line < 16 ? 0xe050 : 0xe070;
+ mask = 0x00010001 << (gpio->line & 0xf);
+
+ nv_wr32(dev, reg + 4, mask);
+ nv_mask(dev, reg + 0, mask, on ? mask : 0);
+}
+
+int
+nv50_gpio_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* disable, and ack any pending gpio interrupts */
+ nv_wr32(dev, 0xe050, 0x00000000);
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe070, 0x00000000);
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index b203d06f601f..1413028e1580 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -30,8 +30,6 @@
#include "nouveau_grctx.h"
-#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
-
static void
nv50_graph_init_reset(struct drm_device *dev)
{
@@ -103,37 +101,33 @@ static int
nv50_graph_init_ctxctl(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_grctx ctx = {};
+ uint32_t *cp;
+ int i;
NV_DEBUG(dev, "\n");
- if (nouveau_ctxfw) {
- nouveau_grctx_prog_load(dev);
- dev_priv->engine.graph.grctx_size = 0x70000;
+ cp = kmalloc(512 * 4, GFP_KERNEL);
+ if (!cp) {
+ NV_ERROR(dev, "failed to allocate ctxprog\n");
+ dev_priv->engine.graph.accel_blocked = true;
+ return 0;
}
- if (!dev_priv->engine.graph.ctxprog) {
- struct nouveau_grctx ctx = {};
- uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
- int i;
- if (!cp) {
- NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
- dev_priv->engine.graph.accel_blocked = true;
- return 0;
- }
- ctx.dev = dev;
- ctx.mode = NOUVEAU_GRCTX_PROG;
- ctx.data = cp;
- ctx.ctxprog_max = 512;
- if (!nv50_grctx_init(&ctx)) {
- dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
- } else {
- dev_priv->engine.graph.accel_blocked = true;
- }
- kfree(cp);
+
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 512;
+ if (!nv50_grctx_init(&ctx)) {
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ } else {
+ dev_priv->engine.graph.accel_blocked = true;
}
+ kfree(cp);
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -164,7 +158,6 @@ void
nv50_graph_takedown(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
- nouveau_grctx_fini(dev);
}
void
@@ -212,8 +205,9 @@ nv50_graph_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
- struct nouveau_gpuobj *ctx;
+ struct nouveau_gpuobj *obj;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+ struct nouveau_grctx ctx = {};
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -223,10 +217,9 @@ nv50_graph_create_context(struct nouveau_channel *chan)
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
return ret;
- ctx = chan->ramin_grctx->gpuobj;
+ obj = chan->ramin_grctx->gpuobj;
- hdr = IS_G80 ? 0x200 : 0x20;
- dev_priv->engine.instmem.prepare_access(dev, true);
+ hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
pgraph->grctx_size - 1);
@@ -234,21 +227,15 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
nv_wo32(dev, ramin, (hdr + 0x14)/4, 0x00010000);
- dev_priv->engine.instmem.finish_access(dev);
-
- dev_priv->engine.instmem.prepare_access(dev, true);
- if (!pgraph->ctxprog) {
- struct nouveau_grctx ctx = {};
- ctx.dev = chan->dev;
- ctx.mode = NOUVEAU_GRCTX_VALS;
- ctx.data = chan->ramin_grctx->gpuobj;
- nv50_grctx_init(&ctx);
- } else {
- nouveau_grctx_vals_load(dev, ctx);
- }
- nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
- dev_priv->engine.instmem.finish_access(dev);
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = obj;
+ nv50_grctx_init(&ctx);
+
+ nv_wo32(dev, obj, 0x00000/4, chan->ramin->instance >> 12);
+
+ dev_priv->engine.instmem.flush(dev);
return 0;
}
@@ -257,17 +244,16 @@ nv50_graph_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, hdr = IS_G80 ? 0x200 : 0x20;
+ int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
NV_DEBUG(dev, "ch%d\n", chan->id);
if (!chan->ramin || !chan->ramin->gpuobj)
return;
- dev_priv->engine.instmem.prepare_access(dev, true);
for (i = hdr; i < hdr + 24; i += 4)
nv_wo32(dev, chan->ramin->gpuobj, i/4, 0);
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index 5f21df31f3aa..37c7b48ab24a 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -35,8 +35,6 @@ struct nv50_instmem_priv {
struct nouveau_gpuobj_ref *pramin_pt;
struct nouveau_gpuobj_ref *pramin_bar;
struct nouveau_gpuobj_ref *fb_bar;
-
- bool last_access_wr;
};
#define NV50_INSTMEM_PAGE_SHIFT 12
@@ -147,7 +145,7 @@ nv50_instmem_init(struct drm_device *dev)
if (ret)
return ret;
- if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
+ if (drm_mm_init(&chan->ramin_heap, c_base, c_size - c_base))
return -ENOMEM;
/* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
@@ -241,7 +239,7 @@ nv50_instmem_init(struct drm_device *dev)
return ret;
BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
- drm_get_resource_len(dev, 1) - 1);
+ pci_resource_len(dev->pdev, 1) - 1);
BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
@@ -262,23 +260,18 @@ nv50_instmem_init(struct drm_device *dev)
/* Assume that praying isn't enough, check that we can re-read the
* entire fake channel back from the PRAMIN BAR */
- dev_priv->engine.instmem.prepare_access(dev, false);
for (i = 0; i < c_size; i += 4) {
if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
i);
- dev_priv->engine.instmem.finish_access(dev);
return -EINVAL;
}
}
- dev_priv->engine.instmem.finish_access(dev);
nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
/* Global PRAMIN heap */
- if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
- c_size, dev_priv->ramin_size - c_size)) {
- dev_priv->ramin_heap = NULL;
+ if (drm_mm_init(&dev_priv->ramin_heap, c_size, dev_priv->ramin_size - c_size)) {
NV_ERROR(dev, "Failed to init RAMIN heap\n");
}
@@ -321,7 +314,7 @@ nv50_instmem_takedown(struct drm_device *dev)
nouveau_gpuobj_del(dev, &chan->vm_pd);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
nouveau_gpuobj_ref_del(dev, &chan->ramin);
- nouveau_mem_takedown(&chan->ramin_heap);
+ drm_mm_takedown(&chan->ramin_heap);
dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
kfree(chan);
@@ -436,14 +429,14 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
return -EINVAL;
- NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
+ NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
gpuobj->im_pramin->start, gpuobj->im_pramin->size);
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
vram = gpuobj->im_backing_start;
- NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
+ NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
gpuobj->im_pramin->start, pte, pte_end);
NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
@@ -453,27 +446,15 @@ nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
vram |= 0x30;
}
- dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
vram += NV50_INSTMEM_PAGE_SIZE;
}
- dev_priv->engine.instmem.finish_access(dev);
-
- nv_wr32(dev, 0x100c80, 0x00040001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ dev_priv->engine.instmem.flush(dev);
- nv_wr32(dev, 0x100c80, 0x00060001);
- if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
- NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
- return -EBUSY;
- }
+ nv50_vm_flush(dev, 4);
+ nv50_vm_flush(dev, 6);
gpuobj->im_bound = 1;
return 0;
@@ -492,36 +473,37 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
pte = (gpuobj->im_pramin->start >> 12) << 1;
pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
- dev_priv->engine.instmem.prepare_access(dev, true);
while (pte < pte_end) {
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
}
- dev_priv->engine.instmem.finish_access(dev);
+ dev_priv->engine.instmem.flush(dev);
gpuobj->im_bound = 0;
return 0;
}
void
-nv50_instmem_prepare_access(struct drm_device *dev, bool write)
+nv50_instmem_flush(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
-
- priv->last_access_wr = write;
+ nv_wr32(dev, 0x00330c, 0x00000001);
+ if (!nv_wait(0x00330c, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
}
void
-nv50_instmem_finish_access(struct drm_device *dev)
+nv84_instmem_flush(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ nv_wr32(dev, 0x070000, 0x00000001);
+ if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+}
- if (priv->last_access_wr) {
- nv_wr32(dev, 0x070000, 0x00000001);
- if (!nv_wait(0x070000, 0x00000001, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
- }
+void
+nv50_vm_flush(struct drm_device *dev, int engine)
+{
+ nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+ if (!nv_wait(0x100c80, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 812778db76ac..bcd4cf84a7e6 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -37,52 +37,32 @@
#include "nv50_display.h"
static void
-nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
+nv50_sor_disconnect(struct drm_encoder *encoder)
{
- struct drm_device *dev = to_drm_encoder(nv_encoder)->dev;
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *evo = dev_priv->evo;
int ret;
+ if (!nv_encoder->crtc)
+ return;
+ nv50_crtc_blank(nouveau_crtc(nv_encoder->crtc), true);
+
NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
- ret = RING_SPACE(evo, 2);
+ ret = RING_SPACE(evo, 4);
if (ret) {
NV_ERROR(dev, "no space while disconnecting SOR\n");
return;
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
- OUT_RING(evo, 0);
-}
-
-static void
-nv50_sor_dp_link_train(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
- struct bit_displayport_encoder_table *dpe;
- int dpe_headerlen;
-
- dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
- if (!dpe) {
- NV_ERROR(dev, "SOR-%d: no DP encoder table!\n", nv_encoder->or);
- return;
- }
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
- if (dpe->script0) {
- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
- nv_encoder->dcb);
- }
-
- if (!nouveau_dp_link_train(encoder))
- NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
-
- if (dpe->script1) {
- NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
- nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
- nv_encoder->dcb);
- }
+ nv_encoder->crtc = NULL;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
static void
@@ -94,14 +74,16 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
uint32_t val;
int or = nv_encoder->or;
- NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
+ NV_DEBUG_KMS(dev, "or %d type %d mode %d\n", or, nv_encoder->dcb->type, mode);
nv_encoder->last_dpms = mode;
list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
struct nouveau_encoder *nvenc = nouveau_encoder(enc);
if (nvenc == nv_encoder ||
- nvenc->disconnect != nv50_sor_disconnect ||
+ (nvenc->dcb->type != OUTPUT_TMDS &&
+ nvenc->dcb->type != OUTPUT_LVDS &&
+ nvenc->dcb->type != OUTPUT_DP) ||
nvenc->dcb->or != nv_encoder->dcb->or)
continue;
@@ -133,8 +115,22 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(or)));
}
- if (nv_encoder->dcb->type == OUTPUT_DP && mode == DRM_MODE_DPMS_ON)
- nv50_sor_dp_link_train(encoder);
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ struct nouveau_i2c_chan *auxch;
+
+ auxch = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
+ if (!auxch)
+ return;
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ u8 status = DP_SET_POWER_D0;
+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+ nouveau_dp_link_train(encoder);
+ } else {
+ u8 status = DP_SET_POWER_D3;
+ nouveau_dp_auxch(auxch, 8, DP_SET_POWER, &status, 1);
+ }
+ }
}
static void
@@ -196,7 +192,8 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
uint32_t mode_ctl = 0;
int ret;
- NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
+ NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
+ nv_encoder->or, nv_encoder->dcb->type, crtc->index);
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,6 +236,14 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
+
+ nv_encoder->crtc = encoder->crtc;
+}
+
+static struct drm_crtc *
+nv50_sor_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
}
static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
@@ -249,7 +254,9 @@ static const struct drm_encoder_helper_funcs nv50_sor_helper_funcs = {
.prepare = nv50_sor_prepare,
.commit = nv50_sor_commit,
.mode_set = nv50_sor_mode_set,
- .detect = NULL
+ .get_crtc = nv50_sor_crtc_get,
+ .detect = NULL,
+ .disable = nv50_sor_disconnect
};
static void
@@ -272,32 +279,22 @@ static const struct drm_encoder_funcs nv50_sor_encoder_funcs = {
};
int
-nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
+nv50_sor_create(struct drm_connector *connector, struct dcb_entry *entry)
{
struct nouveau_encoder *nv_encoder = NULL;
+ struct drm_device *dev = connector->dev;
struct drm_encoder *encoder;
- bool dum;
int type;
NV_DEBUG_KMS(dev, "\n");
switch (entry->type) {
case OUTPUT_TMDS:
- NV_INFO(dev, "Detected a TMDS output\n");
+ case OUTPUT_DP:
type = DRM_MODE_ENCODER_TMDS;
break;
case OUTPUT_LVDS:
- NV_INFO(dev, "Detected a LVDS output\n");
type = DRM_MODE_ENCODER_LVDS;
-
- if (nouveau_bios_parse_lvds_table(dev, 0, &dum, &dum)) {
- NV_ERROR(dev, "Failed parsing LVDS table\n");
- return -EINVAL;
- }
- break;
- case OUTPUT_DP:
- NV_INFO(dev, "Detected a DP output\n");
- type = DRM_MODE_ENCODER_TMDS;
break;
default:
return -EINVAL;
@@ -310,8 +307,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dcb = entry;
nv_encoder->or = ffs(entry->or) - 1;
-
- nv_encoder->disconnect = nv50_sor_disconnect;
+ nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
drm_encoder_init(dev, encoder, &nv50_sor_encoder_funcs, type);
drm_encoder_helper_add(encoder, &nv50_sor_helper_funcs);
@@ -342,5 +338,6 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->dp.mc_unknown = 5;
}
+ drm_mode_connector_attach_encoder(connector, encoder);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fb.c b/drivers/gpu/drm/nouveau/nvc0_fb.c
new file mode 100644
index 000000000000..26a996025dd2
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fb.c
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+int
+nvc0_fb_init(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nvc0_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fifo.c b/drivers/gpu/drm/nouveau/nvc0_fifo.c
new file mode 100644
index 000000000000..d64375871979
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_fifo.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+void
+nvc0_fifo_disable(struct drm_device *dev)
+{
+}
+
+void
+nvc0_fifo_enable(struct drm_device *dev)
+{
+}
+
+bool
+nvc0_fifo_reassign(struct drm_device *dev, bool enable)
+{
+ return false;
+}
+
+bool
+nvc0_fifo_cache_flush(struct drm_device *dev)
+{
+ return true;
+}
+
+bool
+nvc0_fifo_cache_pull(struct drm_device *dev, bool enable)
+{
+ return false;
+}
+
+int
+nvc0_fifo_channel_id(struct drm_device *dev)
+{
+ return 127;
+}
+
+int
+nvc0_fifo_create_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+void
+nvc0_fifo_destroy_context(struct nouveau_channel *chan)
+{
+}
+
+int
+nvc0_fifo_load_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+int
+nvc0_fifo_unload_context(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nvc0_fifo_takedown(struct drm_device *dev)
+{
+}
+
+int
+nvc0_fifo_init(struct drm_device *dev)
+{
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
new file mode 100644
index 000000000000..717a5177a8d8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+void
+nvc0_graph_fifo_access(struct drm_device *dev, bool enabled)
+{
+}
+
+struct nouveau_channel *
+nvc0_graph_channel(struct drm_device *dev)
+{
+ return NULL;
+}
+
+int
+nvc0_graph_create_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+void
+nvc0_graph_destroy_context(struct nouveau_channel *chan)
+{
+}
+
+int
+nvc0_graph_load_context(struct nouveau_channel *chan)
+{
+ return 0;
+}
+
+int
+nvc0_graph_unload_context(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nvc0_graph_takedown(struct drm_device *dev)
+{
+}
+
+int
+nvc0_graph_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ dev_priv->engine.graph.accel_blocked = true;
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvc0_instmem.c b/drivers/gpu/drm/nouveau/nvc0_instmem.c
new file mode 100644
index 000000000000..3ab3cdc42173
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvc0_instmem.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+
+int
+nvc0_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ uint32_t *size)
+{
+ int ret;
+
+ *size = ALIGN(*size, 4096);
+ if (*size == 0)
+ return -EINVAL;
+
+ ret = nouveau_bo_new(dev, NULL, *size, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
+ true, false, &gpuobj->im_backing);
+ if (ret) {
+ NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
+ return ret;
+ }
+
+ ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
+ if (ret) {
+ NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ return ret;
+ }
+
+ gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
+ gpuobj->im_backing_start <<= PAGE_SHIFT;
+ return 0;
+}
+
+void
+nvc0_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->engine.instmem.unbind(dev, gpuobj);
+ nouveau_bo_unpin(gpuobj->im_backing);
+ nouveau_bo_ref(NULL, &gpuobj->im_backing);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nvc0_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pte, pte_end;
+ uint64_t vram;
+
+ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ NV_DEBUG(dev, "st=0x%lx sz=0x%lx\n",
+ gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+ pte = gpuobj->im_pramin->start >> 12;
+ pte_end = (gpuobj->im_pramin->size >> 12) + pte;
+ vram = gpuobj->im_backing_start;
+
+ NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
+ gpuobj->im_pramin->start, pte, pte_end);
+ NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
+
+ while (pte < pte_end) {
+ nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
+ nv_wr32(dev, 0x702004 + (pte * 8), 0);
+ vram += 4096;
+ pte++;
+ }
+ dev_priv->engine.instmem.flush(dev);
+
+ if (1) {
+ u32 chan = nv_rd32(dev, 0x1700) << 16;
+ nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
+ nv_wr32(dev, 0x100cbc, 0x80000005);
+ }
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nvc0_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t pte, pte_end;
+
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ pte = gpuobj->im_pramin->start >> 12;
+ pte_end = (gpuobj->im_pramin->size >> 12) + pte;
+ while (pte < pte_end) {
+ nv_wr32(dev, 0x702000 + (pte * 8), 0);
+ nv_wr32(dev, 0x702004 + (pte * 8), 0);
+ pte++;
+ }
+ dev_priv->engine.instmem.flush(dev);
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
+void
+nvc0_instmem_flush(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x070000, 1);
+ if (!nv_wait(0x070000, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+}
+
+int
+nvc0_instmem_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ dev_priv->susres.ramin_copy = vmalloc(65536);
+ if (!dev_priv->susres.ramin_copy)
+ return -ENOMEM;
+
+ for (i = 0x700000; i < 0x710000; i += 4)
+ dev_priv->susres.ramin_copy[i/4] = nv_rd32(dev, i);
+ return 0;
+}
+
+void
+nvc0_instmem_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 chan;
+ int i;
+
+ chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
+ nv_wr32(dev, 0x001700, chan >> 16);
+
+ for (i = 0x700000; i < 0x710000; i += 4)
+ nv_wr32(dev, i, dev_priv->susres.ramin_copy[i/4]);
+ vfree(dev_priv->susres.ramin_copy);
+ dev_priv->susres.ramin_copy = NULL;
+
+ nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
+}
+
+int
+nvc0_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
+ int ret, i;
+
+ dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
+ chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
+ imem = 4096 + 4096 + 32768;
+
+ nv_wr32(dev, 0x001700, chan >> 16);
+
+ /* channel setup */
+ nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
+ nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
+ nv_wr32(dev, 0x700208, lower_32_bits(lim3));
+ nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
+
+ /* point pgd -> pgt */
+ nv_wr32(dev, 0x701000, 0);
+ nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
+
+ /* point pgt -> physical vram for channel */
+ pgt3 = 0x2000;
+ for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
+ nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
+ nv_wr32(dev, 0x700004 + pgt3, 0);
+ }
+
+ /* clear rest of pgt */
+ for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
+ nv_wr32(dev, 0x700000 + pgt3, 0);
+ nv_wr32(dev, 0x700004 + pgt3, 0);
+ }
+
+ /* point bar3 at the channel */
+ nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
+
+ /* Global PRAMIN heap */
+ ret = drm_mm_init(&dev_priv->ramin_heap, imem,
+ dev_priv->ramin_size - imem);
+ if (ret) {
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ return -ENOMEM;
+ }
+
+ /*XXX: incorrect, but needed to make hash func "work" */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ return 0;
+}
+
+void
+nvc0_instmem_takedown(struct drm_device *dev)
+{
+}
+
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index 5998c35237b0..ad64673ace1f 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -147,28 +147,6 @@
# define NV_VIO_GX_DONT_CARE_INDEX 0x07
# define NV_VIO_GX_BIT_MASK_INDEX 0x08
-#define NV_PFB_BOOT_0 0x00100000
-#define NV_PFB_CFG0 0x00100200
-#define NV_PFB_CFG1 0x00100204
-#define NV_PFB_CSTATUS 0x0010020C
-#define NV_PFB_REFCTRL 0x00100210
-# define NV_PFB_REFCTRL_VALID_1 (1 << 31)
-#define NV_PFB_PAD 0x0010021C
-# define NV_PFB_PAD_CKE_NORMAL (1 << 0)
-#define NV_PFB_TILE_NV10 0x00100240
-#define NV_PFB_TILE_SIZE_NV10 0x00100244
-#define NV_PFB_REF 0x001002D0
-# define NV_PFB_REF_CMD_REFRESH (1 << 0)
-#define NV_PFB_PRE 0x001002D4
-# define NV_PFB_PRE_CMD_PRECHARGE (1 << 0)
-#define NV_PFB_CLOSE_PAGE2 0x0010033C
-#define NV_PFB_TILE_NV40 0x00100600
-#define NV_PFB_TILE_SIZE_NV40 0x00100604
-
-#define NV_PEXTDEV_BOOT_0 0x00101000
-# define NV_PEXTDEV_BOOT_0_STRAP_FP_IFACE_12BIT (8 << 12)
-#define NV_PEXTDEV_BOOT_3 0x0010100c
-
#define NV_PCRTC_INTR_0 0x00600100
# define NV_PCRTC_INTR_0_VBLANK (1 << 0)
#define NV_PCRTC_INTR_EN_0 0x00600140
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index e671d0e74d4c..570e190710bd 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -44,7 +44,7 @@
MODULE_FIRMWARE(FIRMWARE_NAME);
-static int R128_READ_PLL(struct drm_device * dev, int addr)
+static int R128_READ_PLL(struct drm_device *dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -53,7 +53,7 @@ static int R128_READ_PLL(struct drm_device * dev, int addr)
}
#if R128_FIFO_DEBUG
-static void r128_status(drm_r128_private_t * dev_priv)
+static void r128_status(drm_r128_private_t *dev_priv)
{
printk("GUI_STAT = 0x%08x\n",
(unsigned int)R128_READ(R128_GUI_STAT));
@@ -74,7 +74,7 @@ static void r128_status(drm_r128_private_t * dev_priv)
* Engine, FIFO control
*/
-static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
+static int r128_do_pixcache_flush(drm_r128_private_t *dev_priv)
{
u32 tmp;
int i;
@@ -83,9 +83,8 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
R128_WRITE(R128_PC_NGUI_CTLSTAT, tmp);
for (i = 0; i < dev_priv->usec_timeout; i++) {
- if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY)) {
+ if (!(R128_READ(R128_PC_NGUI_CTLSTAT) & R128_PC_BUSY))
return 0;
- }
DRM_UDELAY(1);
}
@@ -95,7 +94,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
return -EBUSY;
}
-static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
+static int r128_do_wait_for_fifo(drm_r128_private_t *dev_priv, int entries)
{
int i;
@@ -112,7 +111,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
return -EBUSY;
}
-static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
+static int r128_do_wait_for_idle(drm_r128_private_t *dev_priv)
{
int i, ret;
@@ -189,7 +188,7 @@ out_release:
* prior to a wait for idle, as it informs the engine that the command
* stream is ending.
*/
-static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
+static void r128_do_cce_flush(drm_r128_private_t *dev_priv)
{
u32 tmp;
@@ -199,7 +198,7 @@ static void r128_do_cce_flush(drm_r128_private_t * dev_priv)
/* Wait for the CCE to go idle.
*/
-int r128_do_cce_idle(drm_r128_private_t * dev_priv)
+int r128_do_cce_idle(drm_r128_private_t *dev_priv)
{
int i;
@@ -225,7 +224,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
/* Start the Concurrent Command Engine.
*/
-static void r128_do_cce_start(drm_r128_private_t * dev_priv)
+static void r128_do_cce_start(drm_r128_private_t *dev_priv)
{
r128_do_wait_for_idle(dev_priv);
@@ -242,7 +241,7 @@ static void r128_do_cce_start(drm_r128_private_t * dev_priv)
* commands, so you must wait for the CCE command stream to complete
* before calling this routine.
*/
-static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
+static void r128_do_cce_reset(drm_r128_private_t *dev_priv)
{
R128_WRITE(R128_PM4_BUFFER_DL_WPTR, 0);
R128_WRITE(R128_PM4_BUFFER_DL_RPTR, 0);
@@ -253,7 +252,7 @@ static void r128_do_cce_reset(drm_r128_private_t * dev_priv)
* commands, so you must flush the command stream and wait for the CCE
* to go idle before calling this routine.
*/
-static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
+static void r128_do_cce_stop(drm_r128_private_t *dev_priv)
{
R128_WRITE(R128_PM4_MICRO_CNTL, 0);
R128_WRITE(R128_PM4_BUFFER_CNTL,
@@ -264,7 +263,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
/* Reset the engine. This will stop the CCE if it is running.
*/
-static int r128_do_engine_reset(struct drm_device * dev)
+static int r128_do_engine_reset(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
@@ -301,8 +300,8 @@ static int r128_do_engine_reset(struct drm_device * dev)
return 0;
}
-static void r128_cce_init_ring_buffer(struct drm_device * dev,
- drm_r128_private_t * dev_priv)
+static void r128_cce_init_ring_buffer(struct drm_device *dev,
+ drm_r128_private_t *dev_priv)
{
u32 ring_start;
u32 tmp;
@@ -340,7 +339,7 @@ static void r128_cce_init_ring_buffer(struct drm_device * dev,
R128_WRITE(R128_BUS_CNTL, tmp);
}
-static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
+static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
{
drm_r128_private_t *dev_priv;
int rc;
@@ -588,7 +587,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
return rc;
}
-int r128_do_cleanup_cce(struct drm_device * dev)
+int r128_do_cleanup_cce(struct drm_device *dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
@@ -682,9 +681,8 @@ int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
- if (stop->flush) {
+ if (stop->flush)
r128_do_cce_flush(dev_priv);
- }
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
@@ -735,9 +733,8 @@ int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv
DEV_INIT_TEST_WITH_RETURN(dev_priv);
- if (dev_priv->cce_running) {
+ if (dev_priv->cce_running)
r128_do_cce_flush(dev_priv);
- }
return r128_do_cce_idle(dev_priv);
}
@@ -765,7 +762,7 @@ int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_pr
#define R128_BUFFER_FREE 0
#if 0
-static int r128_freelist_init(struct drm_device * dev)
+static int r128_freelist_init(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -848,7 +845,7 @@ static struct drm_buf *r128_freelist_get(struct drm_device * dev)
return NULL;
}
-void r128_freelist_reset(struct drm_device * dev)
+void r128_freelist_reset(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i;
@@ -864,7 +861,7 @@ void r128_freelist_reset(struct drm_device * dev)
* CCE command submission
*/
-int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
+int r128_wait_ring(drm_r128_private_t *dev_priv, int n)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
int i;
@@ -881,9 +878,9 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
return -EBUSY;
}
-static int r128_cce_get_buffers(struct drm_device * dev,
+static int r128_cce_get_buffers(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_dma * d)
+ struct drm_dma *d)
{
int i;
struct drm_buf *buf;
@@ -933,9 +930,8 @@ int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_p
d->granted_count = 0;
- if (d->request_count) {
+ if (d->request_count)
ret = r128_cce_get_buffers(dev, file_priv, d);
- }
return ret;
}
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index b806fdcc7170..1e2971f13aa1 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -85,7 +85,7 @@ static struct drm_driver driver = {
.patchlevel = DRIVER_PATCHLEVEL,
};
-int r128_driver_load(struct drm_device * dev, unsigned long flags)
+int r128_driver_load(struct drm_device *dev, unsigned long flags)
{
return drm_vblank_init(dev, 1);
}
diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
index 3c60829d82e9..930c71b2fb5e 100644
--- a/drivers/gpu/drm/r128/r128_drv.h
+++ b/drivers/gpu/drm/r128/r128_drv.h
@@ -53,7 +53,7 @@
#define DRIVER_MINOR 5
#define DRIVER_PATCHLEVEL 0
-#define GET_RING_HEAD(dev_priv) R128_READ( R128_PM4_BUFFER_DL_RPTR )
+#define GET_RING_HEAD(dev_priv) R128_READ(R128_PM4_BUFFER_DL_RPTR)
typedef struct drm_r128_freelist {
unsigned int age;
@@ -144,23 +144,23 @@ extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file
extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern void r128_freelist_reset(struct drm_device * dev);
+extern void r128_freelist_reset(struct drm_device *dev);
-extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
+extern int r128_wait_ring(drm_r128_private_t *dev_priv, int n);
-extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
-extern int r128_do_cleanup_cce(struct drm_device * dev);
+extern int r128_do_cce_idle(drm_r128_private_t *dev_priv);
+extern int r128_do_cleanup_cce(struct drm_device *dev);
extern int r128_enable_vblank(struct drm_device *dev, int crtc);
extern void r128_disable_vblank(struct drm_device *dev, int crtc);
extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
-extern void r128_driver_irq_preinstall(struct drm_device * dev);
+extern void r128_driver_irq_preinstall(struct drm_device *dev);
extern int r128_driver_irq_postinstall(struct drm_device *dev);
-extern void r128_driver_irq_uninstall(struct drm_device * dev);
-extern void r128_driver_lastclose(struct drm_device * dev);
-extern int r128_driver_load(struct drm_device * dev, unsigned long flags);
-extern void r128_driver_preclose(struct drm_device * dev,
+extern void r128_driver_irq_uninstall(struct drm_device *dev);
+extern void r128_driver_lastclose(struct drm_device *dev);
+extern int r128_driver_load(struct drm_device *dev, unsigned long flags);
+extern void r128_driver_preclose(struct drm_device *dev,
struct drm_file *file_priv);
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
@@ -390,27 +390,27 @@ extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
#define R128_PCIGART_TABLE_SIZE 32768
-#define R128_READ(reg) DRM_READ32( dev_priv->mmio, (reg) )
-#define R128_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) )
-#define R128_READ8(reg) DRM_READ8( dev_priv->mmio, (reg) )
-#define R128_WRITE8(reg,val) DRM_WRITE8( dev_priv->mmio, (reg), (val) )
+#define R128_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
+#define R128_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
+#define R128_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
+#define R128_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
-#define R128_WRITE_PLL(addr,val) \
+#define R128_WRITE_PLL(addr, val) \
do { \
R128_WRITE8(R128_CLOCK_CNTL_INDEX, \
((addr) & 0x1f) | R128_PLL_WR_EN); \
R128_WRITE(R128_CLOCK_CNTL_DATA, (val)); \
} while (0)
-#define CCE_PACKET0( reg, n ) (R128_CCE_PACKET0 | \
+#define CCE_PACKET0(reg, n) (R128_CCE_PACKET0 | \
((n) << 16) | ((reg) >> 2))
-#define CCE_PACKET1( reg0, reg1 ) (R128_CCE_PACKET1 | \
+#define CCE_PACKET1(reg0, reg1) (R128_CCE_PACKET1 | \
(((reg1) >> 2) << 11) | ((reg0) >> 2))
#define CCE_PACKET2() (R128_CCE_PACKET2)
-#define CCE_PACKET3( pkt, n ) (R128_CCE_PACKET3 | \
+#define CCE_PACKET3(pkt, n) (R128_CCE_PACKET3 | \
(pkt) | ((n) << 16))
-static __inline__ void r128_update_ring_snapshot(drm_r128_private_t * dev_priv)
+static __inline__ void r128_update_ring_snapshot(drm_r128_private_t *dev_priv)
{
drm_r128_ring_buffer_t *ring = &dev_priv->ring;
ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32);
@@ -430,37 +430,38 @@ do { \
} \
} while (0)
-#define RING_SPACE_TEST_WITH_RETURN( dev_priv ) \
+#define RING_SPACE_TEST_WITH_RETURN(dev_priv) \
do { \
drm_r128_ring_buffer_t *ring = &dev_priv->ring; int i; \
- if ( ring->space < ring->high_mark ) { \
- for ( i = 0 ; i < dev_priv->usec_timeout ; i++ ) { \
- r128_update_ring_snapshot( dev_priv ); \
- if ( ring->space >= ring->high_mark ) \
+ if (ring->space < ring->high_mark) { \
+ for (i = 0 ; i < dev_priv->usec_timeout ; i++) { \
+ r128_update_ring_snapshot(dev_priv); \
+ if (ring->space >= ring->high_mark) \
goto __ring_space_done; \
- DRM_UDELAY(1); \
+ DRM_UDELAY(1); \
} \
- DRM_ERROR( "ring space check failed!\n" ); \
- return -EBUSY; \
+ DRM_ERROR("ring space check failed!\n"); \
+ return -EBUSY; \
} \
__ring_space_done: \
; \
} while (0)
-#define VB_AGE_TEST_WITH_RETURN( dev_priv ) \
+#define VB_AGE_TEST_WITH_RETURN(dev_priv) \
do { \
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; \
- if ( sarea_priv->last_dispatch >= R128_MAX_VB_AGE ) { \
- int __ret = r128_do_cce_idle( dev_priv ); \
- if ( __ret ) return __ret; \
+ if (sarea_priv->last_dispatch >= R128_MAX_VB_AGE) { \
+ int __ret = r128_do_cce_idle(dev_priv); \
+ if (__ret) \
+ return __ret; \
sarea_priv->last_dispatch = 0; \
- r128_freelist_reset( dev ); \
+ r128_freelist_reset(dev); \
} \
} while (0)
#define R128_WAIT_UNTIL_PAGE_FLIPPED() do { \
- OUT_RING( CCE_PACKET0( R128_WAIT_UNTIL, 0 ) ); \
- OUT_RING( R128_EVENT_CRTC_OFFSET ); \
+ OUT_RING(CCE_PACKET0(R128_WAIT_UNTIL, 0)); \
+ OUT_RING(R128_EVENT_CRTC_OFFSET); \
} while (0)
/* ================================================================
@@ -472,13 +473,12 @@ do { \
#define RING_LOCALS \
int write, _nr; unsigned int tail_mask; volatile u32 *ring;
-#define BEGIN_RING( n ) do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
- } \
- if ( dev_priv->ring.space <= (n) * sizeof(u32) ) { \
+#define BEGIN_RING(n) do { \
+ if (R128_VERBOSE) \
+ DRM_INFO("BEGIN_RING(%d)\n", (n)); \
+ if (dev_priv->ring.space <= (n) * sizeof(u32)) { \
COMMIT_RING(); \
- r128_wait_ring( dev_priv, (n) * sizeof(u32) ); \
+ r128_wait_ring(dev_priv, (n) * sizeof(u32)); \
} \
_nr = n; dev_priv->ring.space -= (n) * sizeof(u32); \
ring = dev_priv->ring.start; \
@@ -494,40 +494,36 @@ do { \
#define R128_BROKEN_CCE 1
#define ADVANCE_RING() do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
- write, dev_priv->ring.tail ); \
- } \
- if ( R128_BROKEN_CCE && write < 32 ) { \
- memcpy( dev_priv->ring.end, \
- dev_priv->ring.start, \
- write * sizeof(u32) ); \
- } \
- if (((dev_priv->ring.tail + _nr) & tail_mask) != write) { \
+ if (R128_VERBOSE) \
+ DRM_INFO("ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
+ write, dev_priv->ring.tail); \
+ if (R128_BROKEN_CCE && write < 32) \
+ memcpy(dev_priv->ring.end, \
+ dev_priv->ring.start, \
+ write * sizeof(u32)); \
+ if (((dev_priv->ring.tail + _nr) & tail_mask) != write) \
DRM_ERROR( \
"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n", \
((dev_priv->ring.tail + _nr) & tail_mask), \
write, __LINE__); \
- } else \
+ else \
dev_priv->ring.tail = write; \
} while (0)
#define COMMIT_RING() do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( "COMMIT_RING() tail=0x%06x\n", \
- dev_priv->ring.tail ); \
- } \
+ if (R128_VERBOSE) \
+ DRM_INFO("COMMIT_RING() tail=0x%06x\n", \
+ dev_priv->ring.tail); \
DRM_MEMORYBARRIER(); \
- R128_WRITE( R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail ); \
- R128_READ( R128_PM4_BUFFER_DL_WPTR ); \
+ R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \
+ R128_READ(R128_PM4_BUFFER_DL_WPTR); \
} while (0)
-#define OUT_RING( x ) do { \
- if ( R128_VERBOSE ) { \
- DRM_INFO( " OUT_RING( 0x%08x ) at 0x%x\n", \
- (unsigned int)(x), write ); \
- } \
- ring[write++] = cpu_to_le32( x ); \
+#define OUT_RING(x) do { \
+ if (R128_VERBOSE) \
+ DRM_INFO(" OUT_RING( 0x%08x ) at 0x%x\n", \
+ (unsigned int)(x), write); \
+ ring[write++] = cpu_to_le32(x); \
write &= tail_mask; \
} while (0)
diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
index 69810fb8ac49..429d5a02695f 100644
--- a/drivers/gpu/drm/r128/r128_irq.c
+++ b/drivers/gpu/drm/r128/r128_irq.c
@@ -90,7 +90,7 @@ void r128_disable_vblank(struct drm_device *dev, int crtc)
*/
}
-void r128_driver_irq_preinstall(struct drm_device * dev)
+void r128_driver_irq_preinstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
@@ -105,7 +105,7 @@ int r128_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void r128_driver_irq_uninstall(struct drm_device * dev)
+void r128_driver_irq_uninstall(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index af2665cf4718..077af1f2f9b4 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -37,8 +37,8 @@
* CCE hardware state programming functions
*/
-static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
- struct drm_clip_rect * boxes, int count)
+static void r128_emit_clip_rects(drm_r128_private_t *dev_priv,
+ struct drm_clip_rect *boxes, int count)
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
@@ -80,7 +80,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
ADVANCE_RING();
}
-static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_core(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -95,7 +95,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_context(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -121,7 +121,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_setup(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -137,7 +137,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_masks(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -156,7 +156,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_window(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -171,7 +171,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_tex0(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_context_regs_t *ctx = &sarea_priv->context_state;
@@ -187,9 +187,8 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
OUT_RING(tex->tex_cntl);
OUT_RING(tex->tex_combine_cntl);
OUT_RING(ctx->tex_size_pitch_c);
- for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
+ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
OUT_RING(tex->tex_offset[i]);
- }
OUT_RING(CCE_PACKET0(R128_CONSTANT_COLOR_C, 1));
OUT_RING(ctx->constant_color_c);
@@ -198,7 +197,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
+static __inline__ void r128_emit_tex1(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];
@@ -211,9 +210,8 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
OUT_RING(CCE_PACKET0(R128_SEC_TEX_CNTL_C, 1 + R128_MAX_TEXTURE_LEVELS));
OUT_RING(tex->tex_cntl);
OUT_RING(tex->tex_combine_cntl);
- for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++) {
+ for (i = 0; i < R128_MAX_TEXTURE_LEVELS; i++)
OUT_RING(tex->tex_offset[i]);
- }
OUT_RING(CCE_PACKET0(R128_SEC_TEXTURE_BORDER_COLOR_C, 0));
OUT_RING(tex->tex_border_color);
@@ -221,7 +219,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)
ADVANCE_RING();
}
-static void r128_emit_state(drm_r128_private_t * dev_priv)
+static void r128_emit_state(drm_r128_private_t *dev_priv)
{
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
@@ -274,7 +272,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv)
* Performance monitoring functions
*/
-static void r128_clear_box(drm_r128_private_t * dev_priv,
+static void r128_clear_box(drm_r128_private_t *dev_priv,
int x, int y, int w, int h, int r, int g, int b)
{
u32 pitch, offset;
@@ -321,13 +319,12 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
ADVANCE_RING();
}
-static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
+static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
{
- if (atomic_read(&dev_priv->idle_count) == 0) {
+ if (atomic_read(&dev_priv->idle_count) == 0)
r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
- } else {
+ else
atomic_set(&dev_priv->idle_count, 0);
- }
}
#endif
@@ -352,8 +349,8 @@ static void r128_print_dirty(const char *msg, unsigned int flags)
(flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
}
-static void r128_cce_dispatch_clear(struct drm_device * dev,
- drm_r128_clear_t * clear)
+static void r128_cce_dispatch_clear(struct drm_device *dev,
+ drm_r128_clear_t *clear)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -458,7 +455,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev,
}
}
-static void r128_cce_dispatch_swap(struct drm_device * dev)
+static void r128_cce_dispatch_swap(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -524,7 +521,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_flip(struct drm_device * dev)
+static void r128_cce_dispatch_flip(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -542,11 +539,10 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
R128_WAIT_UNTIL_PAGE_FLIPPED();
OUT_RING(CCE_PACKET0(R128_CRTC_OFFSET, 0));
- if (dev_priv->current_page == 0) {
+ if (dev_priv->current_page == 0)
OUT_RING(dev_priv->back_offset);
- } else {
+ else
OUT_RING(dev_priv->front_offset);
- }
ADVANCE_RING();
@@ -566,7 +562,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
+static void r128_cce_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -585,9 +581,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b
if (buf->used) {
buf_priv->dispatched = 1;
- if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
+ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
r128_emit_state(dev_priv);
- }
do {
/* Emit the next set of up to three cliprects */
@@ -636,8 +631,8 @@ static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * b
sarea_priv->nbox = 0;
}
-static void r128_cce_dispatch_indirect(struct drm_device * dev,
- struct drm_buf * buf, int start, int end)
+static void r128_cce_dispatch_indirect(struct drm_device *dev,
+ struct drm_buf *buf, int start, int end)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -691,8 +686,8 @@ static void r128_cce_dispatch_indirect(struct drm_device * dev,
dev_priv->sarea_priv->last_dispatch++;
}
-static void r128_cce_dispatch_indices(struct drm_device * dev,
- struct drm_buf * buf,
+static void r128_cce_dispatch_indices(struct drm_device *dev,
+ struct drm_buf *buf,
int start, int end, int count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -713,9 +708,8 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
if (start != end) {
buf_priv->dispatched = 1;
- if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS) {
+ if (sarea_priv->dirty & ~R128_UPLOAD_CLIPRECTS)
r128_emit_state(dev_priv);
- }
dwords = (end - start + 3) / sizeof(u32);
@@ -775,9 +769,9 @@ static void r128_cce_dispatch_indices(struct drm_device * dev,
sarea_priv->nbox = 0;
}
-static int r128_cce_dispatch_blit(struct drm_device * dev,
+static int r128_cce_dispatch_blit(struct drm_device *dev,
struct drm_file *file_priv,
- drm_r128_blit_t * blit)
+ drm_r128_blit_t *blit)
{
drm_r128_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
@@ -887,8 +881,8 @@ static int r128_cce_dispatch_blit(struct drm_device * dev,
* have hardware stencil support.
*/
-static int r128_cce_dispatch_write_span(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_write_span(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, x, y;
@@ -902,12 +896,10 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
+ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
return -EFAULT;
- }
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
+ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
return -EFAULT;
- }
buffer_size = depth->n * sizeof(u32);
buffer = kmalloc(buffer_size, GFP_KERNEL);
@@ -983,8 +975,8 @@ static int r128_cce_dispatch_write_span(struct drm_device * dev,
return 0;
}
-static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, *x, *y;
@@ -1001,9 +993,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL) {
+ if (x == NULL)
return -ENOMEM;
- }
y = kmalloc(ybuf_size, GFP_KERNEL);
if (y == NULL) {
kfree(x);
@@ -1105,8 +1096,8 @@ static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
return 0;
}
-static int r128_cce_dispatch_read_span(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_read_span(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, x, y;
@@ -1117,12 +1108,10 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
+ if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x)))
return -EFAULT;
- }
- if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
+ if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y)))
return -EFAULT;
- }
BEGIN_RING(7);
@@ -1148,8 +1137,8 @@ static int r128_cce_dispatch_read_span(struct drm_device * dev,
return 0;
}
-static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
- drm_r128_depth_t * depth)
+static int r128_cce_dispatch_read_pixels(struct drm_device *dev,
+ drm_r128_depth_t *depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int count, *x, *y;
@@ -1161,16 +1150,14 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
if (count > 4096 || count <= 0)
return -EMSGSIZE;
- if (count > dev_priv->depth_pitch) {
+ if (count > dev_priv->depth_pitch)
count = dev_priv->depth_pitch;
- }
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = kmalloc(xbuf_size, GFP_KERNEL);
- if (x == NULL) {
+ if (x == NULL)
return -ENOMEM;
- }
y = kmalloc(ybuf_size, GFP_KERNEL);
if (y == NULL) {
kfree(x);
@@ -1220,7 +1207,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
* Polygon stipple
*/
-static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
+static void r128_cce_dispatch_stipple(struct drm_device *dev, u32 *stipple)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
@@ -1230,9 +1217,8 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
BEGIN_RING(33);
OUT_RING(CCE_PACKET0(R128_BRUSH_DATA0, 31));
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < 32; i++)
OUT_RING(stipple[i]);
- }
ADVANCE_RING();
}
@@ -1269,7 +1255,7 @@ static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *f
return 0;
}
-static int r128_do_init_pageflip(struct drm_device * dev)
+static int r128_do_init_pageflip(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1288,7 +1274,7 @@ static int r128_do_init_pageflip(struct drm_device * dev)
return 0;
}
-static int r128_do_cleanup_pageflip(struct drm_device * dev)
+static int r128_do_cleanup_pageflip(struct drm_device *dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1645,17 +1631,16 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
return 0;
}
-void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
+void r128_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
- if (dev_priv->page_flipping) {
+ if (dev_priv->page_flipping)
r128_do_cleanup_pageflip(dev);
- }
}
}
-void r128_driver_lastclose(struct drm_device * dev)
+void r128_driver_lastclose(struct drm_device *dev)
{
r128_do_cleanup_cce(dev);
}
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 84b1f2729d43..aebe00875041 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -69,5 +69,6 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
+radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 1d569830ed99..8e421f644a54 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -108,12 +108,11 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base++;
break;
case ATOM_IIO_READ:
- temp = ctx->card->reg_read(ctx->card, CU16(base + 1));
+ temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1));
base += 3;
break;
case ATOM_IIO_WRITE:
- (void)ctx->card->reg_read(ctx->card, CU16(base + 1));
- ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
+ ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
case ATOM_IIO_CLEAR:
@@ -715,8 +714,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 1000)) {
- DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > 5000)) {
+ DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index cd1b64ab5ca7..a589a55b223e 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -113,6 +113,8 @@ struct card_info {
struct drm_device *dev;
void (* reg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* reg_read)(struct card_info *, uint32_t); /* filled by driver */
+ void (* ioreg_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
+ uint32_t (* ioreg_read)(struct card_info *, uint32_t); /* filled by driver */
void (* mc_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
uint32_t (* mc_read)(struct card_info *, uint32_t); /* filled by driver */
void (* pll_write)(struct card_info *, uint32_t, uint32_t); /* filled by driver */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 8c2d6478a221..12ad512bd3d3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -44,10 +44,6 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
memset(&args, 0, sizeof(args));
- args.usOverscanRight = 0;
- args.usOverscanLeft = 0;
- args.usOverscanBottom = 0;
- args.usOverscanTop = 0;
args.ucCRTC = radeon_crtc->crtc_id;
switch (radeon_crtc->rmx_type) {
@@ -56,7 +52,6 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
case RMX_ASPECT:
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
@@ -69,17 +64,16 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2;
}
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
break;
case RMX_FULL:
default:
- args.usOverscanRight = 0;
- args.usOverscanLeft = 0;
- args.usOverscanBottom = 0;
- args.usOverscanTop = 0;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ args.usOverscanRight = radeon_crtc->h_border;
+ args.usOverscanLeft = radeon_crtc->h_border;
+ args.usOverscanBottom = radeon_crtc->v_border;
+ args.usOverscanTop = radeon_crtc->v_border;
break;
}
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
static void atombios_scaler_setup(struct drm_crtc *crtc)
@@ -282,22 +276,22 @@ atombios_set_crtc_dtd_timing(struct drm_crtc *crtc,
u16 misc = 0;
memset(&args, 0, sizeof(args));
- args.usH_Size = cpu_to_le16(mode->crtc_hdisplay);
+ args.usH_Size = cpu_to_le16(mode->crtc_hdisplay - (radeon_crtc->h_border * 2));
args.usH_Blanking_Time =
- cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay);
- args.usV_Size = cpu_to_le16(mode->crtc_vdisplay);
+ cpu_to_le16(mode->crtc_hblank_end - mode->crtc_hdisplay + (radeon_crtc->h_border * 2));
+ args.usV_Size = cpu_to_le16(mode->crtc_vdisplay - (radeon_crtc->v_border * 2));
args.usV_Blanking_Time =
- cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay);
+ cpu_to_le16(mode->crtc_vblank_end - mode->crtc_vdisplay + (radeon_crtc->v_border * 2));
args.usH_SyncOffset =
- cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay);
+ cpu_to_le16(mode->crtc_hsync_start - mode->crtc_hdisplay + radeon_crtc->h_border);
args.usH_SyncWidth =
cpu_to_le16(mode->crtc_hsync_end - mode->crtc_hsync_start);
args.usV_SyncOffset =
- cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay);
+ cpu_to_le16(mode->crtc_vsync_start - mode->crtc_vdisplay + radeon_crtc->v_border);
args.usV_SyncWidth =
cpu_to_le16(mode->crtc_vsync_end - mode->crtc_vsync_start);
- /*args.ucH_Border = mode->hborder;*/
- /*args.ucV_Border = mode->vborder;*/
+ args.ucH_Border = radeon_crtc->h_border;
+ args.ucV_Border = radeon_crtc->v_border;
if (mode->flags & DRM_MODE_FLAG_NVSYNC)
misc |= ATOM_VSYNC_POLARITY;
@@ -669,56 +663,25 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static void atombios_crtc_program_pll(struct drm_crtc *crtc,
+ int crtc_id,
+ int pll_id,
+ u32 encoder_mode,
+ u32 encoder_id,
+ u32 clock,
+ u32 ref_div,
+ u32 fb_div,
+ u32 frac_fb_div,
+ u32 post_div)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- struct drm_encoder *encoder = NULL;
- struct radeon_encoder *radeon_encoder = NULL;
u8 frev, crev;
- int index;
+ int index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
union set_pixel_clock args;
- u32 pll_clock = mode->clock;
- u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
- struct radeon_pll *pll;
- u32 adjusted_clock;
- int encoder_mode = 0;
memset(&args, 0, sizeof(args));
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (encoder->crtc == crtc) {
- radeon_encoder = to_radeon_encoder(encoder);
- encoder_mode = atombios_get_encoder_mode(encoder);
- break;
- }
- }
-
- if (!radeon_encoder)
- return;
-
- switch (radeon_crtc->pll_id) {
- case ATOM_PPLL1:
- pll = &rdev->clock.p1pll;
- break;
- case ATOM_PPLL2:
- pll = &rdev->clock.p2pll;
- break;
- case ATOM_DCPLL:
- case ATOM_PPLL_INVALID:
- default:
- pll = &rdev->clock.dcpll;
- break;
- }
-
- /* adjust pixel clock as needed */
- adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
-
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
-
- index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev))
return;
@@ -727,47 +690,49 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
case 1:
switch (crev) {
case 1:
- args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
+ if (clock == ATOM_DISABLE)
+ return;
+ args.v1.usPixelClock = cpu_to_le16(clock / 10);
args.v1.usRefDiv = cpu_to_le16(ref_div);
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
- args.v1.ucPpll = radeon_crtc->pll_id;
- args.v1.ucCRTC = radeon_crtc->crtc_id;
+ args.v1.ucPpll = pll_id;
+ args.v1.ucCRTC = crtc_id;
args.v1.ucRefDivSrc = 1;
break;
case 2:
- args.v2.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v2.usPixelClock = cpu_to_le16(clock / 10);
args.v2.usRefDiv = cpu_to_le16(ref_div);
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
- args.v2.ucPpll = radeon_crtc->pll_id;
- args.v2.ucCRTC = radeon_crtc->crtc_id;
+ args.v2.ucPpll = pll_id;
+ args.v2.ucCRTC = crtc_id;
args.v2.ucRefDivSrc = 1;
break;
case 3:
- args.v3.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.usPixelClock = cpu_to_le16(clock / 10);
args.v3.usRefDiv = cpu_to_le16(ref_div);
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
- args.v3.ucPpll = radeon_crtc->pll_id;
- args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
- args.v3.ucTransmitterId = radeon_encoder->encoder_id;
+ args.v3.ucPpll = pll_id;
+ args.v3.ucMiscInfo = (pll_id << 2);
+ args.v3.ucTransmitterId = encoder_id;
args.v3.ucEncoderMode = encoder_mode;
break;
case 5:
- args.v5.ucCRTC = radeon_crtc->crtc_id;
- args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v5.ucCRTC = crtc_id;
+ args.v5.usPixelClock = cpu_to_le16(clock / 10);
args.v5.ucRefDiv = ref_div;
args.v5.usFbDiv = cpu_to_le16(fb_div);
args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
args.v5.ucPostDiv = post_div;
args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
- args.v5.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v5.ucTransmitterID = encoder_id;
args.v5.ucEncoderMode = encoder_mode;
- args.v5.ucPpll = radeon_crtc->pll_id;
+ args.v5.ucPpll = pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -782,6 +747,56 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder = NULL;
+ struct radeon_encoder *radeon_encoder = NULL;
+ u32 pll_clock = mode->clock;
+ u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
+ struct radeon_pll *pll;
+ u32 adjusted_clock;
+ int encoder_mode = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc == crtc) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
+ break;
+ }
+ }
+
+ if (!radeon_encoder)
+ return;
+
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ pll = &rdev->clock.p1pll;
+ break;
+ case ATOM_PPLL2:
+ pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ default:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
+
+ /* adjust pixel clock as needed */
+ adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
+
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
+
+ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ encoder_mode, radeon_encoder->encoder_id, mode->clock,
+ ref_div, fb_div, frac_fb_div, post_div);
+
+}
+
static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -797,7 +812,7 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -841,6 +856,11 @@ static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
+
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
@@ -931,7 +951,7 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -979,11 +999,18 @@ static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
- fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
+ if (rdev->family >= CHIP_R600) {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1;
+ } else {
+ if (tiling_flags & RADEON_TILING_MACRO)
+ fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
- if (tiling_flags & RADEON_TILING_MICRO)
- fb_format |= AVIVO_D1GRPH_TILED;
+ if (tiling_flags & RADEON_TILING_MICRO)
+ fb_format |= AVIVO_D1GRPH_TILED;
+ }
if (radeon_crtc->crtc_id == 0)
WREG32(AVIVO_D1VGA_CONTROL, 0);
@@ -1143,10 +1170,8 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_enable_ss(crtc);
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_AVIVO(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- else if (ASIC_IS_AVIVO(rdev))
- atombios_crtc_set_timing(crtc, adjusted_mode);
else {
atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
@@ -1191,6 +1216,24 @@ static void atombios_crtc_commit(struct drm_crtc *crtc)
atombios_lock_crtc(crtc, ATOM_DISABLE);
}
+static void atombios_crtc_disable(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ case ATOM_PPLL2:
+ /* disable the ppll */
+ atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
+ 0, 0, ATOM_DISABLE, 0, 0, 0, 0);
+ break;
+ default:
+ break;
+ }
+ radeon_crtc->pll_id = -1;
+}
+
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.dpms = atombios_crtc_dpms,
.mode_fixup = atombios_crtc_mode_fixup,
@@ -1199,6 +1242,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.load_lut = radeon_crtc_load_lut,
+ .disable = atombios_crtc_disable,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index abffb1499e22..36e0d4b545e6 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -296,7 +296,7 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
- DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+ DRM_DEBUG_KMS("requested signal parameters: lane %d voltage %s pre_emph %s\n",
lane,
voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
@@ -313,7 +313,7 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
if (p >= dp_pre_emphasis_max(v))
p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
- DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
+ DRM_DEBUG_KMS("using signal parameters: voltage %s pre_emph %s\n",
voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
@@ -358,7 +358,7 @@ retry:
if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
- DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
+ DRM_DEBUG_KMS("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
return false;
@@ -461,10 +461,10 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
memcpy(dig_connector->dpcd, msg, 8);
{
int i;
- DRM_DEBUG("DPCD: ");
+ DRM_DEBUG_KMS("DPCD: ");
for (i = 0; i < 8; i++)
- DRM_DEBUG("%02x ", msg[i]);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("%02x ", msg[i]);
+ DRM_DEBUG_KMS("\n");
}
return true;
}
@@ -512,7 +512,7 @@ static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
return false;
}
- DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
+ DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
return true;
@@ -695,7 +695,7 @@ void dp_link_train(struct drm_encoder *encoder,
if (!clock_recovery)
DRM_ERROR("clock recovery failed\n");
else
- DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
+ DRM_DEBUG_KMS("clock recovery at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
DP_TRAIN_PRE_EMPHASIS_SHIFT);
@@ -739,7 +739,7 @@ void dp_link_train(struct drm_encoder *encoder,
if (!channel_eq)
DRM_ERROR("channel eq failed\n");
else
- DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
+ DRM_DEBUG_KMS("channel eq at voltage %d pre-emphasis %d\n",
train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
(train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 1caf625e472b..957d5067ad9c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -39,6 +39,23 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
+/* get temperature in millidegrees */
+u32 evergreen_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ u32 actual_temp = 0;
+
+ if ((temp >> 10) & 1)
+ actual_temp = 0;
+ else if ((temp >> 9) & 1)
+ actual_temp = 255;
+ else
+ actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -1115,6 +1132,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
+ rdev->config.evergreen.tile_config = gb_addr_config;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
@@ -1334,8 +1352,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
/* size in MB on evergreen */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index e028c1cd9d9b..2330f3a36fd5 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -61,6 +61,11 @@
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
+# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
+# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
+# define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1 2
+# define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1 4
#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
# define EVERGREEN_GRPH_ENDIAN_NONE 0
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index a1cd621780e2..9b7532dd30f7 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -165,6 +165,11 @@
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
+#define CG_MULT_THERMAL_STATUS 0x740
+#define ASIC_T(x) ((x) << 16)
+#define ASIC_T_MASK 0x7FF0000
+#define ASIC_T_SHIFT 16
+
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index a89a15ab524d..e817a0bb5eb4 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -141,7 +141,7 @@ void r100_pm_get_dynpm_state(struct radeon_device *rdev)
/* only one clock mode per power state */
rdev->pm.requested_clock_mode_index = 0;
- DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
@@ -276,7 +276,7 @@ void r100_pm_misc(struct radeon_device *rdev)
rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
radeon_set_pcie_lanes(rdev,
ps->pcie_lanes);
- DRM_DEBUG("Setting: p: %d\n", ps->pcie_lanes);
+ DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
}
}
@@ -849,7 +849,7 @@ static int r100_cp_init_microcode(struct radeon_device *rdev)
const char *fw_name = NULL;
int err;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
@@ -1803,6 +1803,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
return r;
break;
/* triggers drawing using indices to vertex buffer */
+ case PACKET3_3D_CLEAR_HIZ:
+ case PACKET3_3D_CLEAR_ZMASK:
+ if (p->rdev->hyperz_filp != p->filp)
+ return -EINVAL;
+ break;
case PACKET3_NOP:
break;
default:
@@ -2295,8 +2300,8 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
u64 config_aper_size;
/* work out accessible VRAM */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
/* FIXME we don't use the second aperture yet when we could use it */
if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
@@ -2364,11 +2369,10 @@ void r100_mc_init(struct radeon_device *rdev)
*/
void r100_pll_errata_after_index(struct radeon_device *rdev)
{
- if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
- return;
+ if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
+ (void)RREG32(RADEON_CLOCK_CNTL_DATA);
+ (void)RREG32(RADEON_CRTC_GEN_CNTL);
}
- (void)RREG32(RADEON_CLOCK_CNTL_DATA);
- (void)RREG32(RADEON_CRTC_GEN_CNTL);
}
static void r100_pll_errata_after_data(struct radeon_device *rdev)
@@ -2643,7 +2647,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
flags |= pitch / 8;
- DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
+ DRM_DEBUG_KMS("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
@@ -3039,7 +3043,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
}
#endif
- DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
+ DRM_DEBUG_KMS("GRPH_BUFFER_CNTL from to %x\n",
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
}
@@ -3135,7 +3139,7 @@ void r100_bandwidth_update(struct radeon_device *rdev)
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
}
- DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
+ DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
}
}
@@ -3809,6 +3813,31 @@ void r100_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+/*
+ * Due to how kexec works, it can leave the hw fully initialised when it
+ * boots the new kernel. However doing our init sequence with the CP and
+ * WB stuff setup causes GPU hangs on the RN50 at least. So at startup
+ * do some quick sanity checks and restore sane values to avoid this
+ * problem.
+ */
+void r100_restore_sanity(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32(RADEON_CP_CSQ_CNTL);
+ if (tmp) {
+ WREG32(RADEON_CP_CSQ_CNTL, 0);
+ }
+ tmp = RREG32(RADEON_CP_RB_CNTL);
+ if (tmp) {
+ WREG32(RADEON_CP_RB_CNTL, 0);
+ }
+ tmp = RREG32(RADEON_SCRATCH_UMSK);
+ if (tmp) {
+ WREG32(RADEON_SCRATCH_UMSK, 0);
+ }
+}
+
int r100_init(struct radeon_device *rdev)
{
int r;
@@ -3821,6 +3850,8 @@ int r100_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* sanity check some register to avoid hangs like after kexec */
+ r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h
index d016b16fa116..b121b6c678d4 100644
--- a/drivers/gpu/drm/radeon/r100d.h
+++ b/drivers/gpu/drm/radeon/r100d.h
@@ -48,10 +48,12 @@
#define PACKET3_3D_DRAW_IMMD 0x29
#define PACKET3_3D_DRAW_INDX 0x2A
#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_3D_CLEAR_ZMASK 0x32
#define PACKET3_INDX_BUFFER 0x33
#define PACKET3_3D_DRAW_VBUF_2 0x34
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_3D_CLEAR_HIZ 0x37
#define PACKET3_BITBLT_MULTI 0x9B
#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 19a7ef7ee344..c827738ad7dd 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1048,14 +1048,47 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLOR_CHANNEL_MASK */
track->color_channel_mask = idx_value;
break;
- case 0x4d1c:
+ case 0x43a4:
+ /* SC_HYPERZ_EN */
+ /* r300c emits this register - we need to disable hyperz for it
+ * without complaining */
+ if (p->rdev->hyperz_filp != p->filp) {
+ if (idx_value & 0x1)
+ ib[idx] = idx_value & ~1;
+ }
+ break;
+ case 0x4f1c:
/* ZB_BW_CNTL */
track->zb_cb_clear = !!(idx_value & (1 << 5));
+ if (p->rdev->hyperz_filp != p->filp) {
+ if (idx_value & (R300_HIZ_ENABLE |
+ R300_RD_COMP_ENABLE |
+ R300_WR_COMP_ENABLE |
+ R300_FAST_FILL_ENABLE))
+ goto fail;
+ }
break;
case 0x4e04:
/* RB3D_BLENDCNTL */
track->blend_read_enable = !!(idx_value & (1 << 2));
break;
+ case 0x4f28: /* ZB_DEPTHCLEARVALUE */
+ break;
+ case 0x4f30: /* ZB_MASK_OFFSET */
+ case 0x4f34: /* ZB_ZMASK_PITCH */
+ case 0x4f44: /* ZB_HIZ_OFFSET */
+ case 0x4f54: /* ZB_HIZ_PITCH */
+ if (idx_value && (p->rdev->hyperz_filp != p->filp))
+ goto fail;
+ break;
+ case 0x4028:
+ if (idx_value && (p->rdev->hyperz_filp != p->filp))
+ goto fail;
+ /* GB_Z_PEQ_CONFIG */
+ if (p->rdev->family >= CHIP_RV350)
+ break;
+ goto fail;
+ break;
case 0x4be8:
/* valid register only on RV530 */
if (p->rdev->family == CHIP_RV530)
@@ -1066,8 +1099,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
}
return 0;
fail:
- printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
- reg, idx);
+ printk(KERN_ERR "Forbidden register 0x%04X in cs at %d (val=%08x)\n",
+ reg, idx, idx_value);
return -EINVAL;
}
@@ -1161,6 +1194,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
return r;
}
break;
+ case PACKET3_3D_CLEAR_HIZ:
+ case PACKET3_3D_CLEAR_ZMASK:
+ if (p->rdev->hyperz_filp != p->filp)
+ return -EINVAL;
+ break;
case PACKET3_NOP:
break;
default:
@@ -1380,6 +1418,8 @@ int r300_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h
index 968a33317fbf..0c036c60d9df 100644
--- a/drivers/gpu/drm/radeon/r300d.h
+++ b/drivers/gpu/drm/radeon/r300d.h
@@ -48,10 +48,12 @@
#define PACKET3_3D_DRAW_IMMD 0x29
#define PACKET3_3D_DRAW_INDX 0x2A
#define PACKET3_3D_LOAD_VBPNTR 0x2F
+#define PACKET3_3D_CLEAR_ZMASK 0x32
#define PACKET3_INDX_BUFFER 0x33
#define PACKET3_3D_DRAW_VBUF_2 0x34
#define PACKET3_3D_DRAW_IMMD_2 0x35
#define PACKET3_3D_DRAW_INDX_2 0x36
+#define PACKET3_3D_CLEAR_HIZ 0x37
#define PACKET3_BITBLT_MULTI 0x9B
#define PACKET0(reg, n) (CP_PACKET0 | \
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index e6c89142bb4d..59f7bccc5be0 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -343,6 +343,8 @@ int r420_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 93c9a2bbccf8..6ac1f604e29b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -386,6 +386,11 @@
# define AVIVO_D1GRPH_TILED (1 << 20)
# define AVIVO_D1GRPH_MACRO_ADDRESS_MODE (1 << 21)
+# define R600_D1GRPH_ARRAY_MODE_LINEAR_GENERAL (0 << 20)
+# define R600_D1GRPH_ARRAY_MODE_LINEAR_ALIGNED (1 << 20)
+# define R600_D1GRPH_ARRAY_MODE_1D_TILED_THIN1 (2 << 20)
+# define R600_D1GRPH_ARRAY_MODE_2D_TILED_THIN1 (4 << 20)
+
/* The R7xx *_HIGH surface regs are backwards; the D1 regs are in the D2
* block and vice versa. This applies to GRPH, CUR, etc.
*/
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 694af7cc23ac..1458dee902dd 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -231,6 +231,8 @@ int r520_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index e100f69faeec..d0ebae9dde25 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -92,6 +92,21 @@ void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
+/* get temperature in millidegrees */
+u32 rv6xx_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ u32 actual_temp = 0;
+
+ if ((temp >> 7) & 1)
+ actual_temp = 0;
+ else
+ actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void r600_pm_get_dynpm_state(struct radeon_device *rdev)
{
int i;
@@ -256,7 +271,7 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
}
}
- DRM_DEBUG("Requested: e: %d m: %d p: %d\n",
+ DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].sclk,
rdev->pm.power_state[rdev->pm.requested_power_state_index].
@@ -571,7 +586,7 @@ void r600_pm_misc(struct radeon_device *rdev)
if (voltage->voltage != rdev->pm.current_vddc) {
radeon_atom_set_voltage(rdev, voltage->voltage);
rdev->pm.current_vddc = voltage->voltage;
- DRM_DEBUG("Setting: v: %d\n", voltage->voltage);
+ DRM_DEBUG_DRIVER("Setting: v: %d\n", voltage->voltage);
}
}
}
@@ -869,7 +884,17 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
u32 tmp;
/* flush hdp cache so updates hit vram */
- WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ u32 tmp;
+
+ /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ */
+ WREG32(HDP_DEBUG1, 0);
+ tmp = readl((void __iomem *)ptr);
+ } else
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
@@ -1217,8 +1242,8 @@ int r600_mc_init(struct radeon_device *rdev)
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
@@ -1609,7 +1634,7 @@ void r600_gpu_init(struct radeon_device *rdev)
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
-
+ rdev->config.r600.tile_config = tiling_config;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
@@ -3512,5 +3537,15 @@ int r600_debugfs_mc_info_init(struct radeon_device *rdev)
*/
void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
{
- WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
+ * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
+ */
+ if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) {
+ void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
+ u32 tmp;
+
+ WREG32(HDP_DEBUG1, 0);
+ tmp = readl((void __iomem *)ptr);
+ } else
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 2b26553c352c..b5443fe1c1d1 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -63,7 +63,8 @@ int r600_audio_bits_per_sample(struct radeon_device *rdev)
case 0x4: return 32;
}
- DRM_ERROR("Unknown bits per sample 0x%x using 16 instead.\n", (int)value);
+ dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
+ (int)value);
return 16;
}
@@ -150,7 +151,8 @@ static void r600_audio_update_hdmi(unsigned long param)
r600_hdmi_update_audio_settings(encoder);
}
- if(still_going) r600_audio_schedule_polling(rdev);
+ if (still_going)
+ r600_audio_schedule_polling(rdev);
}
/*
@@ -158,8 +160,9 @@ static void r600_audio_update_hdmi(unsigned long param)
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
- DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
+ DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+ rdev->audio_enabled = enable;
}
/*
@@ -195,12 +198,14 @@ void r600_audio_enable_polling(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DRM_DEBUG("r600_audio_enable_polling: %d", radeon_encoder->audio_polling_active);
+ DRM_DEBUG("r600_audio_enable_polling: %d\n",
+ radeon_encoder->audio_polling_active);
if (radeon_encoder->audio_polling_active)
return;
radeon_encoder->audio_polling_active = 1;
- mod_timer(&rdev->audio_timer, jiffies + 1);
+ if (rdev->audio_enabled)
+ mod_timer(&rdev->audio_timer, jiffies + 1);
}
/*
@@ -209,7 +214,8 @@ void r600_audio_enable_polling(struct drm_encoder *encoder)
void r600_audio_disable_polling(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DRM_DEBUG("r600_audio_disable_polling: %d", radeon_encoder->audio_polling_active);
+ DRM_DEBUG("r600_audio_disable_polling: %d\n",
+ radeon_encoder->audio_polling_active);
radeon_encoder->audio_polling_active = 0;
}
@@ -236,7 +242,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
break;
default:
- DRM_ERROR("Unsupported encoder type 0x%02X\n",
+ dev_err(rdev->dev, "Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
@@ -266,7 +272,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!radeon_audio || !r600_audio_chipset_supported(rdev))
+ if (!rdev->audio_enabled)
return;
del_timer(&rdev->audio_timer);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index 0271b53fa2dd..e8151c1d55b2 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -39,37 +39,45 @@
const u32 r6xx_default_state[] =
{
- 0xc0002400,
+ 0xc0002400, /* START_3D_CMDBUF */
0x00000000,
- 0xc0012800,
+
+ 0xc0012800, /* CONTEXT_CONTROL */
0x80000000,
0x80000000,
+
0xc0016800,
0x00000010,
- 0x00008000,
+ 0x00008000, /* WAIT_UNTIL */
+
0xc0016800,
0x00000542,
- 0x07000003,
+ 0x07000003, /* TA_CNTL_AUX */
+
0xc0016800,
0x000005c5,
- 0x00000000,
+ 0x00000000, /* VC_ENHANCE */
+
0xc0016800,
0x00000363,
- 0x00000000,
+ 0x00000000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
0xc0016800,
0x0000060c,
- 0x82000000,
+ 0x82000000, /* DB_DEBUG */
+
0xc0016800,
0x0000060e,
- 0x01020204,
- 0xc0016f00,
- 0x00000000,
- 0x00000000,
- 0xc0016f00,
- 0x00000001,
+ 0x01020204, /* DB_WATERMARKS */
+
+ 0xc0026f00,
0x00000000,
+ 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+ 0x00000000, /* SQ_VTX_START_INST_LOC */
+
0xc0096900,
0x0000022a,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
@@ -78,515 +86,317 @@ const u32 r6xx_default_state[] =
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
+
0xc0016900,
0x00000004,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_INFO */
+
+ 0xc0026900,
0x0000000a,
- 0x00000000,
- 0xc0016900,
- 0x0000000b,
- 0x00000000,
- 0xc0016900,
- 0x0000010c,
- 0x00000000,
- 0xc0016900,
- 0x0000010d,
- 0x00000000,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
0xc0016900,
0x00000200,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0026900,
0x00000343,
- 0x00000060,
- 0xc0016900,
- 0x00000344,
- 0x00000040,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000040, /* DB_RENDER_OVERRIDE */
+
0xc0016900,
0x00000351,
- 0x0000aa00,
- 0xc0016900,
- 0x00000104,
- 0x00000000,
- 0xc0016900,
- 0x0000010e,
- 0x00000000,
- 0xc0046900,
- 0x00000105,
- 0x00000000,
- 0x00000000,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc00f6900,
+ 0x00000100,
+ 0x00000800, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
0x00000000,
0x00000000,
- 0xc0036900,
- 0x00000109,
0x00000000,
+ 0x00000000, /* CB_FOG_RED */
0x00000000,
0x00000000,
+ 0x00000000, /* DB_STENCILREFMASK */
+ 0x00000000, /* DB_STENCILREFMASK_BF */
+ 0x00000000, /* SX_ALPHA_REF */
+
0xc0046900,
0x0000030c,
- 0x01000000,
+ 0x01000000, /* CB_CLRCMP_CNTL */
0x00000000,
0x00000000,
0x00000000,
+
0xc0046900,
0x00000048,
- 0x3f800000,
+ 0x3f800000, /* CB_CLEAR_RED */
0x00000000,
0x3f800000,
0x3f800000,
- 0xc0016900,
- 0x0000008e,
- 0x0000000f,
+
0xc0016900,
0x00000080,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00a6900,
0x00000083,
- 0x0000ffff,
- 0xc0016900,
- 0x00000084,
- 0x00000000,
- 0xc0016900,
- 0x00000085,
+ 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000,
- 0xc0016900,
- 0x00000086,
0x00000000,
- 0xc0016900,
- 0x00000087,
0x20002000,
- 0xc0016900,
- 0x00000088,
0x00000000,
- 0xc0016900,
- 0x00000089,
0x20002000,
- 0xc0016900,
- 0x0000008a,
0x00000000,
- 0xc0016900,
- 0x0000008b,
0x20002000,
- 0xc0016900,
- 0x0000008c,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SC_EDGERULE */
+
+ 0xc0406900,
0x00000094,
- 0x80000000,
- 0xc0016900,
- 0x00000095,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
0x20002000,
- 0xc0026900,
- 0x000000b4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000096,
0x80000000,
- 0xc0016900,
- 0x00000097,
0x20002000,
- 0xc0026900,
- 0x000000b6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000098,
0x80000000,
- 0xc0016900,
- 0x00000099,
0x20002000,
- 0xc0026900,
- 0x000000b8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009a,
0x80000000,
- 0xc0016900,
- 0x0000009b,
0x20002000,
- 0xc0026900,
- 0x000000ba,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009c,
0x80000000,
- 0xc0016900,
- 0x0000009d,
0x20002000,
- 0xc0026900,
- 0x000000bc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009e,
0x80000000,
- 0xc0016900,
- 0x0000009f,
0x20002000,
- 0xc0026900,
- 0x000000be,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a0,
0x80000000,
- 0xc0016900,
- 0x000000a1,
0x20002000,
- 0xc0026900,
- 0x000000c0,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a2,
0x80000000,
- 0xc0016900,
- 0x000000a3,
0x20002000,
- 0xc0026900,
- 0x000000c2,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a4,
0x80000000,
- 0xc0016900,
- 0x000000a5,
0x20002000,
- 0xc0026900,
- 0x000000c4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a6,
0x80000000,
- 0xc0016900,
- 0x000000a7,
0x20002000,
- 0xc0026900,
- 0x000000c6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a8,
0x80000000,
- 0xc0016900,
- 0x000000a9,
0x20002000,
- 0xc0026900,
- 0x000000c8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000aa,
0x80000000,
- 0xc0016900,
- 0x000000ab,
0x20002000,
- 0xc0026900,
- 0x000000ca,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ac,
0x80000000,
- 0xc0016900,
- 0x000000ad,
0x20002000,
- 0xc0026900,
- 0x000000cc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ae,
0x80000000,
- 0xc0016900,
- 0x000000af,
0x20002000,
- 0xc0026900,
- 0x000000ce,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000b0,
0x80000000,
- 0xc0016900,
- 0x000000b1,
0x20002000,
- 0xc0026900,
- 0x000000d0,
- 0x00000000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000,
- 0xc0016900,
- 0x000000b2,
- 0x80000000,
- 0xc0016900,
- 0x000000b3,
- 0x20002000,
- 0xc0026900,
- 0x000000d2,
0x00000000,
0x3f800000,
- 0xc0016900,
- 0x00000293,
- 0x00004010,
- 0xc0016900,
- 0x00000300,
0x00000000,
- 0xc0016900,
- 0x00000301,
- 0x00000000,
- 0xc0016900,
- 0x00000312,
- 0xffffffff,
- 0xc0016900,
- 0x00000307,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000308,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000283,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000292,
+ 0x3f800000,
0x00000000,
- 0xc0066900,
- 0x0000010f,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000206,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000207,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000208,
+ 0x3f800000,
0x00000000,
- 0xc0046900,
- 0x00000303,
0x3f800000,
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MPASS_PS_CNTL */
+ 0x00004010, /* PA_SC_MODE_CNTL */
+
+ 0xc0096900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x0000002d, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000,
0x3f800000,
0x3f800000,
- 0xc0016900,
- 0x00000205,
- 0x00000004,
- 0xc0016900,
- 0x00000280,
- 0x00000000,
- 0xc0016900,
- 0x00000281,
+ 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
0x00000000,
+
0xc0016900,
+ 0x00000312,
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc0066900,
0x0000037e,
- 0x00000000,
- 0xc0016900,
- 0x00000382,
- 0x00000000,
- 0xc0016900,
- 0x00000380,
- 0x00000000,
- 0xc0016900,
- 0x00000383,
- 0x00000000,
- 0xc0016900,
- 0x00000381,
- 0x00000000,
- 0xc0016900,
- 0x00000282,
- 0x00000008,
- 0xc0016900,
- 0x00000302,
- 0x0000002d,
- 0xc0016900,
- 0x0000037f,
- 0x00000000,
- 0xc0016900,
- 0x000001b2,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+ 0xc0046900,
0x000001b6,
- 0x00000000,
- 0xc0016900,
- 0x000001b7,
- 0x00000000,
- 0xc0016900,
- 0x000001b8,
- 0x00000000,
- 0xc0016900,
- 0x000001b9,
- 0x00000000,
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00000000, /* SPI_FOG_FUNC_SCALE */
+ 0x00000000, /* SPI_FOG_FUNC_BIAS */
+
0xc0016900,
0x00000225,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_START_FS */
+
0xc0016900,
0x00000229,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
0xc0016900,
0x00000237,
- 0x00000000,
- 0xc0016900,
- 0x00000100,
- 0x00000800,
- 0xc0016900,
- 0x00000101,
- 0x00000000,
- 0xc0016900,
- 0x00000102,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+ 0xc0026900,
0x000002a8,
- 0x00000000,
- 0xc0016900,
- 0x000002a9,
- 0x00000000,
- 0xc0016900,
- 0x00000103,
- 0x00000000,
- 0xc0016900,
- 0x00000284,
- 0x00000000,
- 0xc0016900,
- 0x00000290,
- 0x00000000,
- 0xc0016900,
- 0x00000285,
- 0x00000000,
- 0xc0016900,
- 0x00000286,
- 0x00000000,
- 0xc0016900,
- 0x00000287,
- 0x00000000,
- 0xc0016900,
- 0x00000288,
- 0x00000000,
- 0xc0016900,
- 0x00000289,
- 0x00000000,
- 0xc0016900,
- 0x0000028a,
- 0x00000000,
- 0xc0016900,
- 0x0000028b,
- 0x00000000,
- 0xc0016900,
- 0x0000028c,
- 0x00000000,
- 0xc0016900,
- 0x0000028d,
- 0x00000000,
- 0xc0016900,
- 0x0000028e,
- 0x00000000,
- 0xc0016900,
- 0x0000028f,
- 0x00000000,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_REUSE_DEPTH */
+ 0x00000000, /* VGT_GROUP_PRIM_TYPE */
+ 0x00000000, /* VGT_GROUP_FIRST_DECR */
+ 0x00000000, /* VGT_GROUP_DECR */
+ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+ 0x00000000, /* VGT_GS_MODE */
+
0xc0016900,
0x000002a1,
- 0x00000000,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
0xc0016900,
0x000002a5,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+ 0xc0036900,
0x000002ac,
- 0x00000000,
- 0xc0016900,
- 0x000002ad,
- 0x00000000,
- 0xc0016900,
- 0x000002ae,
- 0x00000000,
+ 0x00000000, /* VGT_STRMOUT_EN */
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
0xc0016900,
0x000002c8,
- 0x00000000,
- 0xc0016900,
- 0x00000206,
- 0x00000100,
- 0xc0016900,
- 0x00000204,
- 0x00010000,
- 0xc0036e00,
- 0x00000000,
- 0x00000012,
- 0x00000000,
- 0x00000000,
- 0xc0016900,
- 0x0000008f,
- 0x0000000f,
- 0xc0016900,
- 0x000001e8,
- 0x00000001,
- 0xc0016900,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+ 0xc0076900,
0x00000202,
- 0x00cc0000,
+ 0x00cc0000, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CNTL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000244, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+
+ 0xc0026900,
+ 0x0000008e,
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
0xc0016900,
- 0x00000205,
- 0x00000244,
+ 0x000001e8,
+ 0x00000001, /* CB_SHADER_CONTROL */
+
0xc0016900,
- 0x00000203,
- 0x00000210,
+ 0x00000185,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
0xc0016900,
+ 0x00000191,
+ 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc0056900,
0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+ 0x00000000, /* SPI_THREAD_GROUPING */
+ 0x00000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+ 0xc0036e00, /* SET_SAMPLER */
0x00000000,
- 0xc0016900,
- 0x00000185,
- 0x00000000,
- 0xc0016900,
- 0x000001b3,
- 0x00000001,
- 0xc0016900,
- 0x000001b4,
+ 0x00000012,
0x00000000,
- 0xc0016900,
- 0x00000191,
- 0x00000b00,
- 0xc0016900,
- 0x000001b5,
0x00000000,
};
const u32 r7xx_default_state[] =
{
- 0xc0012800,
+ 0xc0012800, /* CONTEXT_CONTROL */
0x80000000,
0x80000000,
+
0xc0016800,
0x00000010,
- 0x00008000,
+ 0x00008000, /* WAIT_UNTIL */
+
0xc0016800,
0x00000542,
- 0x07000002,
+ 0x07000002, /* TA_CNTL_AUX */
+
0xc0016800,
0x000005c5,
- 0x00000000,
+ 0x00000000, /* VC_ENHANCE */
+
0xc0016800,
0x00000363,
- 0x00004000,
+ 0x00004000, /* SQ_DYN_GPR_CNTL_PS_FLUSH_REQ */
+
0xc0016800,
0x0000060c,
- 0x00000000,
+ 0x00000000, /* DB_DEBUG */
+
0xc0016800,
0x0000060e,
- 0x00420204,
- 0xc0016f00,
- 0x00000000,
- 0x00000000,
- 0xc0016f00,
- 0x00000001,
+ 0x00420204, /* DB_WATERMARKS */
+
+ 0xc0026f00,
0x00000000,
+ 0x00000000, /* SQ_VTX_BASE_VTX_LOC */
+ 0x00000000, /* SQ_VTX_START_INST_LOC */
+
0xc0096900,
0x0000022a,
+ 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
0x00000000,
0x00000000,
0x00000000,
@@ -595,470 +405,269 @@ const u32 r7xx_default_state[] =
0x00000000,
0x00000000,
0x00000000,
- 0x00000000,
+
0xc0016900,
0x00000004,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_INFO */
+
+ 0xc0026900,
0x0000000a,
- 0x00000000,
- 0xc0016900,
- 0x0000000b,
- 0x00000000,
- 0xc0016900,
- 0x0000010c,
- 0x00000000,
- 0xc0016900,
- 0x0000010d,
- 0x00000000,
+ 0x00000000, /* DB_STENCIL_CLEAR */
+ 0x00000000, /* DB_DEPTH_CLEAR */
+
0xc0016900,
0x00000200,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* DB_DEPTH_CONTROL */
+
+ 0xc0026900,
0x00000343,
- 0x00000060,
- 0xc0016900,
- 0x00000344,
- 0x00000000,
+ 0x00000060, /* DB_RENDER_CONTROL */
+ 0x00000000, /* DB_RENDER_OVERRIDE */
+
0xc0016900,
0x00000351,
- 0x0000aa00,
- 0xc0016900,
- 0x00000104,
- 0x00000000,
- 0xc0016900,
- 0x0000010e,
- 0x00000000,
- 0xc0046900,
- 0x00000105,
- 0x00000000,
+ 0x0000aa00, /* DB_ALPHA_TO_MASK */
+
+ 0xc0096900,
+ 0x00000100,
+ 0x00000800, /* VGT_MAX_VTX_INDX */
+ 0x00000000, /* VGT_MIN_VTX_INDX */
+ 0x00000000, /* VGT_INDX_OFFSET */
+ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
+ 0x00000000, /* SX_ALPHA_TEST_CONTROL */
+ 0x00000000, /* CB_BLEND_RED */
0x00000000,
0x00000000,
0x00000000,
+
+ 0xc0036900,
+ 0x0000010c,
+ 0x00000000, /* DB_STENCILREFMASK */
+ 0x00000000, /* DB_STENCILREFMASK_BF */
+ 0x00000000, /* SX_ALPHA_REF */
+
0xc0046900,
- 0x0000030c,
+ 0x0000030c, /* CB_CLRCMP_CNTL */
0x01000000,
0x00000000,
0x00000000,
0x00000000,
- 0xc0016900,
- 0x0000008e,
- 0x0000000f,
+
0xc0016900,
0x00000080,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* PA_SC_WINDOW_OFFSET */
+
+ 0xc00a6900,
0x00000083,
- 0x0000ffff,
- 0xc0016900,
- 0x00000084,
- 0x00000000,
- 0xc0016900,
- 0x00000085,
+ 0x0000ffff, /* PA_SC_CLIP_RECT_RULE */
+ 0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000,
- 0xc0016900,
- 0x00000086,
0x00000000,
- 0xc0016900,
- 0x00000087,
0x20002000,
- 0xc0016900,
- 0x00000088,
0x00000000,
- 0xc0016900,
- 0x00000089,
0x20002000,
- 0xc0016900,
- 0x0000008a,
0x00000000,
- 0xc0016900,
- 0x0000008b,
0x20002000,
- 0xc0016900,
- 0x0000008c,
- 0xaaaaaaaa,
- 0xc0016900,
+ 0xaaaaaaaa, /* PA_SC_EDGERULE */
+
+ 0xc0406900,
0x00000094,
- 0x80000000,
- 0xc0016900,
- 0x00000095,
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
+ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
+ 0x80000000, /* PA_SC_VPORT_SCISSOR_1_TL */
0x20002000,
- 0xc0026900,
- 0x000000b4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000096,
0x80000000,
- 0xc0016900,
- 0x00000097,
0x20002000,
- 0xc0026900,
- 0x000000b6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x00000098,
0x80000000,
- 0xc0016900,
- 0x00000099,
0x20002000,
- 0xc0026900,
- 0x000000b8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009a,
0x80000000,
- 0xc0016900,
- 0x0000009b,
0x20002000,
- 0xc0026900,
- 0x000000ba,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009c,
0x80000000,
- 0xc0016900,
- 0x0000009d,
0x20002000,
- 0xc0026900,
- 0x000000bc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x0000009e,
0x80000000,
- 0xc0016900,
- 0x0000009f,
0x20002000,
- 0xc0026900,
- 0x000000be,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a0,
0x80000000,
- 0xc0016900,
- 0x000000a1,
0x20002000,
- 0xc0026900,
- 0x000000c0,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a2,
0x80000000,
- 0xc0016900,
- 0x000000a3,
0x20002000,
- 0xc0026900,
- 0x000000c2,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a4,
0x80000000,
- 0xc0016900,
- 0x000000a5,
0x20002000,
- 0xc0026900,
- 0x000000c4,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a6,
0x80000000,
- 0xc0016900,
- 0x000000a7,
0x20002000,
- 0xc0026900,
- 0x000000c6,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000a8,
0x80000000,
- 0xc0016900,
- 0x000000a9,
0x20002000,
- 0xc0026900,
- 0x000000c8,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000aa,
0x80000000,
- 0xc0016900,
- 0x000000ab,
0x20002000,
- 0xc0026900,
- 0x000000ca,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ac,
0x80000000,
- 0xc0016900,
- 0x000000ad,
0x20002000,
- 0xc0026900,
- 0x000000cc,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000ae,
0x80000000,
- 0xc0016900,
- 0x000000af,
0x20002000,
- 0xc0026900,
- 0x000000ce,
- 0x00000000,
- 0x3f800000,
- 0xc0016900,
- 0x000000b0,
0x80000000,
- 0xc0016900,
- 0x000000b1,
0x20002000,
- 0xc0026900,
- 0x000000d0,
- 0x00000000,
+ 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000,
- 0xc0016900,
- 0x000000b2,
- 0x80000000,
- 0xc0016900,
- 0x000000b3,
- 0x20002000,
- 0xc0026900,
- 0x000000d2,
0x00000000,
0x3f800000,
- 0xc0016900,
- 0x00000293,
- 0x00514000,
- 0xc0016900,
- 0x00000300,
- 0x00000000,
- 0xc0016900,
- 0x00000301,
0x00000000,
- 0xc0016900,
- 0x00000312,
- 0xffffffff,
- 0xc0016900,
- 0x00000307,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000308,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000283,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000292,
+ 0x3f800000,
0x00000000,
- 0xc0066900,
- 0x0000010f,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000206,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000207,
+ 0x3f800000,
0x00000000,
- 0xc0016900,
- 0x00000208,
+ 0x3f800000,
0x00000000,
- 0xc0046900,
- 0x00000303,
0x3f800000,
+
+ 0xc0026900,
+ 0x00000292,
+ 0x00000000, /* PA_SC_MPASS_PS_CNTL */
+ 0x00514000, /* PA_SC_MODE_CNTL */
+
+ 0xc0096900,
+ 0x00000300,
+ 0x00000000, /* PA_SC_LINE_CNTL */
+ 0x00000000, /* PA_SC_AA_CONFIG */
+ 0x0000002d, /* PA_SU_VTX_CNTL */
+ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000,
0x3f800000,
0x3f800000,
- 0xc0016900,
- 0x00000205,
- 0x00000004,
- 0xc0016900,
- 0x00000280,
- 0x00000000,
- 0xc0016900,
- 0x00000281,
+ 0x00000000, /* PA_SC_SAMPLE_LOCS_MCTX */
0x00000000,
+
0xc0016900,
+ 0x00000312,
+ 0xffffffff, /* PA_SC_AA_MASK */
+
+ 0xc0066900,
0x0000037e,
- 0x00000000,
- 0xc0016900,
- 0x00000382,
- 0x00000000,
- 0xc0016900,
- 0x00000380,
- 0x00000000,
- 0xc0016900,
- 0x00000383,
- 0x00000000,
- 0xc0016900,
- 0x00000381,
- 0x00000000,
- 0xc0016900,
- 0x00000282,
- 0x00000008,
- 0xc0016900,
- 0x00000302,
- 0x0000002d,
- 0xc0016900,
- 0x0000037f,
- 0x00000000,
- 0xc0016900,
- 0x000001b2,
- 0x00000001,
- 0xc0016900,
+ 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
+ 0x00000000, /* PA_SU_POLY_OFFSET_CLAMP */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_FRONT_OFFSET */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_SCALE */
+ 0x00000000, /* PA_SU_POLY_OFFSET_BACK_OFFSET */
+
+ 0xc0046900,
0x000001b6,
- 0x00000000,
- 0xc0016900,
- 0x000001b7,
- 0x00000000,
- 0xc0016900,
- 0x000001b8,
- 0x00000000,
- 0xc0016900,
- 0x000001b9,
- 0x00000000,
+ 0x00000000, /* SPI_INPUT_Z */
+ 0x00000000, /* SPI_FOG_CNTL */
+ 0x00000000, /* SPI_FOG_FUNC_SCALE */
+ 0x00000000, /* SPI_FOG_FUNC_BIAS */
+
0xc0016900,
0x00000225,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_START_FS */
+
0xc0016900,
0x00000229,
- 0x00000000,
+ 0x00000000, /* SQ_PGM_RESOURCES_FS */
+
0xc0016900,
0x00000237,
- 0x00000000,
- 0xc0016900,
- 0x00000100,
- 0x00000800,
- 0xc0016900,
- 0x00000101,
- 0x00000000,
- 0xc0016900,
- 0x00000102,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* SQ_PGM_CF_OFFSET_FS */
+
+ 0xc0026900,
0x000002a8,
- 0x00000000,
- 0xc0016900,
- 0x000002a9,
- 0x00000000,
- 0xc0016900,
- 0x00000103,
- 0x00000000,
- 0xc0016900,
- 0x00000284,
- 0x00000000,
- 0xc0016900,
- 0x00000290,
- 0x00000000,
- 0xc0016900,
- 0x00000285,
- 0x00000000,
- 0xc0016900,
- 0x00000286,
- 0x00000000,
- 0xc0016900,
- 0x00000287,
- 0x00000000,
- 0xc0016900,
- 0x00000288,
- 0x00000000,
- 0xc0016900,
- 0x00000289,
- 0x00000000,
- 0xc0016900,
- 0x0000028a,
- 0x00000000,
- 0xc0016900,
- 0x0000028b,
- 0x00000000,
- 0xc0016900,
- 0x0000028c,
- 0x00000000,
- 0xc0016900,
- 0x0000028d,
- 0x00000000,
- 0xc0016900,
- 0x0000028e,
- 0x00000000,
- 0xc0016900,
- 0x0000028f,
- 0x00000000,
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
+ 0x00000000, /* VGT_INSTANCE_STEP_RATE_1 */
+
+ 0xc0116900,
+ 0x00000280,
+ 0x00000000, /* PA_SU_POINT_SIZE */
+ 0x00000000, /* PA_SU_POINT_MINMAX */
+ 0x00000008, /* PA_SU_LINE_CNTL */
+ 0x00000000, /* PA_SC_LINE_STIPPLE */
+ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
+ 0x00000000, /* VGT_HOS_CNTL */
+ 0x00000000, /* VGT_HOS_MAX_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_MIN_TESS_LEVEL */
+ 0x00000000, /* VGT_HOS_REUSE_DEPTH */
+ 0x00000000, /* VGT_GROUP_PRIM_TYPE */
+ 0x00000000, /* VGT_GROUP_FIRST_DECR */
+ 0x00000000, /* VGT_GROUP_DECR */
+ 0x00000000, /* VGT_GROUP_VECT_0_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_0_FMT_CNTL */
+ 0x00000000, /* VGT_GROUP_VECT_1_FMT_CNTL */
+ 0x00000000, /* VGT_GS_MODE */
+
0xc0016900,
0x000002a1,
- 0x00000000,
+ 0x00000000, /* VGT_PRIMITIVEID_EN */
+
0xc0016900,
0x000002a5,
- 0x00000000,
- 0xc0016900,
+ 0x00000000, /* VGT_MULTI_PRIM_ID_RESET_EN */
+
+ 0xc0036900,
0x000002ac,
- 0x00000000,
- 0xc0016900,
- 0x000002ad,
- 0x00000000,
- 0xc0016900,
- 0x000002ae,
- 0x00000000,
+ 0x00000000, /* VGT_STRMOUT_EN */
+ 0x00000000, /* VGT_REUSE_OFF */
+ 0x00000000, /* VGT_VTX_CNT_EN */
+
0xc0016900,
0x000002c8,
- 0x00000000,
- 0xc0016900,
- 0x00000206,
- 0x00000100,
- 0xc0016900,
- 0x00000204,
- 0x00010000,
- 0xc0036e00,
- 0x00000000,
- 0x00000012,
- 0x00000000,
- 0x00000000,
- 0xc0016900,
- 0x0000008f,
- 0x0000000f,
- 0xc0016900,
- 0x000001e8,
- 0x00000001,
- 0xc0016900,
+ 0x00000000, /* VGT_STRMOUT_BUFFER_EN */
+
+ 0xc0076900,
0x00000202,
- 0x00cc0000,
+ 0x00cc0000, /* CB_COLOR_CONTROL */
+ 0x00000210, /* DB_SHADER_CNTL */
+ 0x00010000, /* PA_CL_CLIP_CNTL */
+ 0x00000244, /* PA_SU_SC_MODE_CNTL */
+ 0x00000100, /* PA_CL_VTE_CNTL */
+ 0x00000000, /* PA_CL_VS_OUT_CNTL */
+ 0x00000000, /* PA_CL_NANINF_CNTL */
+
+ 0xc0026900,
+ 0x0000008e,
+ 0x0000000f, /* CB_TARGET_MASK */
+ 0x0000000f, /* CB_SHADER_MASK */
+
0xc0016900,
- 0x00000205,
- 0x00000244,
+ 0x000001e8,
+ 0x00000001, /* CB_SHADER_CONTROL */
+
0xc0016900,
- 0x00000203,
- 0x00000210,
+ 0x00000185,
+ 0x00000000, /* SPI_VS_OUT_ID_0 */
+
0xc0016900,
+ 0x00000191,
+ 0x00000b00, /* SPI_PS_INPUT_CNTL_0 */
+
+ 0xc0056900,
0x000001b1,
+ 0x00000000, /* SPI_VS_OUT_CONFIG */
+ 0x00000001, /* SPI_THREAD_GROUPING */
+ 0x00000001, /* SPI_PS_IN_CONTROL_0 */
+ 0x00000000, /* SPI_PS_IN_CONTROL_1 */
+ 0x00000000, /* SPI_INTERP_CONTROL_0 */
+
+ 0xc0036e00, /* SET_SAMPLER */
0x00000000,
- 0xc0016900,
- 0x00000185,
- 0x00000000,
- 0xc0016900,
- 0x000001b3,
- 0x00000001,
- 0xc0016900,
- 0x000001b4,
+ 0x00000012,
0x00000000,
- 0xc0016900,
- 0x00000191,
- 0x00000b00,
- 0xc0016900,
- 0x000001b5,
0x00000000,
};
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 68e6f4349309..4f4cd8b286d5 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -200,7 +200,7 @@ int r600_page_table_init(struct drm_device *dev)
entry->pagelist[i], 0,
PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
- if (entry->busaddr[i] == 0) {
+ if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
r600_page_table_cleanup(dev, gart_info);
goto done;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 144c32d37136..d8864949e387 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/kernel.h>
#include "drmP.h"
#include "radeon.h"
#include "r600d.h"
@@ -132,6 +133,7 @@ static inline int r600_bpe_from_format(u32 *bpe, u32 format)
case V_038004_FMT_GB_GR:
case V_038004_FMT_BG_RG:
case V_038004_COLOR_INVALID:
+ default:
*bpe = 16;
return -EINVAL;
}
@@ -166,70 +168,71 @@ static void r600_cs_track_init(struct r600_cs_track *track)
static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
{
struct r600_cs_track *track = p->track;
- u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
+ u32 bpe = 0, pitch, slice_tile_max, size, tmp, height, pitch_align;
volatile u32 *ib = p->ib->ptr;
if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
return -EINVAL;
}
- size = radeon_bo_size(track->cb_color_bo[i]);
+ size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i];
if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
__func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
i, track->cb_color_info[i]);
return -EINVAL;
}
- pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
+ /* pitch is the number of 8x8 tiles per row */
+ pitch = G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1;
slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
- if (!pitch) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
- __func__, __LINE__, pitch, i, track->cb_color_size[i]);
- return -EINVAL;
- }
- height = size / (pitch * bpe);
+ height = size / (pitch * 8 * bpe);
if (height > 8192)
height = 8192;
+ if (height > 7)
+ height &= ~0x7;
switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
case V_0280A0_ARRAY_LINEAR_GENERAL:
+ /* technically height & 0x7 */
+ break;
case V_0280A0_ARRAY_LINEAR_ALIGNED:
- if (pitch & 0x3f) {
- dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
- __func__, __LINE__, pitch, bpe, pitch * bpe);
+ pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
return -EINVAL;
}
- if ((pitch * bpe) & (track->group_size - 1)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ if (!IS_ALIGNED(height, 8)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
break;
case V_0280A0_ARRAY_1D_TILED_THIN1:
- if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
+ pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe * track->nsamples))) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, 8)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
- height &= ~0x7;
- if (!height)
- height = 8;
break;
case V_0280A0_ARRAY_2D_TILED_THIN1:
- if (pitch & ((8 * track->nbanks) - 1)) {
+ pitch_align = max((u32)track->nbanks,
+ (u32)(((track->group_size / 8) / (bpe * track->nsamples)) * track->nbanks));
+ if (!IS_ALIGNED(pitch, pitch_align)) {
dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
__func__, __LINE__, pitch);
return -EINVAL;
}
- tmp = pitch * 8 * bpe * track->nsamples;
- tmp = tmp / track->nbanks;
- if (tmp & (track->group_size - 1)) {
- dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
- __func__, __LINE__, pitch);
+ if (!IS_ALIGNED((height / 8), track->nbanks)) {
+ dev_warn(p->dev, "%s:%d cb height (%d) invalid\n",
+ __func__, __LINE__, height);
return -EINVAL;
}
- height &= ~((16 * track->npipes) - 1);
- if (!height)
- height = 16 * track->npipes;
break;
default:
dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
@@ -238,16 +241,20 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
return -EINVAL;
}
/* check offset */
- tmp = height * pitch;
+ tmp = height * pitch * 8 * bpe;
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
- dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
+ dev_warn(p->dev, "%s offset[%d] %d too big\n", __func__, i, track->cb_color_bo_offset[i]);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(track->cb_color_bo_offset[i], track->group_size)) {
+ dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->cb_color_bo_offset[i]);
return -EINVAL;
}
/* limit max tile */
- tmp = (height * pitch) >> 6;
+ tmp = (height * pitch * 8) >> 6;
if (tmp < slice_tile_max)
slice_tile_max = tmp;
- tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
+ tmp = S_028060_PITCH_TILE_MAX(pitch - 1) |
S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
ib[track->cb_color_size_idx[i]] = tmp;
return 0;
@@ -289,7 +296,7 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
/* Check depth buffer */
if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
G_028800_Z_ENABLE(track->db_depth_control)) {
- u32 nviews, bpe, ntiles;
+ u32 nviews, bpe, ntiles, pitch, pitch_align, height, size;
if (track->db_bo == NULL) {
dev_warn(p->dev, "z/stencil with no depth buffer\n");
return -EINVAL;
@@ -321,7 +328,6 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
dev_warn(p->dev, "z/stencil buffer size not set\n");
return -EINVAL;
}
- printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
tmp = radeon_bo_size(track->db_bo) - track->db_offset;
tmp = (tmp / bpe) >> 6;
if (!tmp) {
@@ -332,6 +338,51 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
}
ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
} else {
+ size = radeon_bo_size(track->db_bo);
+ pitch = G_028000_PITCH_TILE_MAX(track->db_depth_size) + 1;
+ height = size / (pitch * 8 * bpe);
+ height &= ~0x7;
+ if (!height)
+ height = 8;
+
+ switch (G_028010_ARRAY_MODE(track->db_depth_info)) {
+ case V_028010_ARRAY_1D_TILED_THIN1:
+ pitch_align = (max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8);
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(height, 8)) {
+ dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
+ __func__, __LINE__, height);
+ return -EINVAL;
+ }
+ break;
+ case V_028010_ARRAY_2D_TILED_THIN1:
+ pitch_align = max((u32)track->nbanks,
+ (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ if ((height / 8) & (track->nbanks - 1)) {
+ dev_warn(p->dev, "%s:%d db height (%d) invalid\n",
+ __func__, __LINE__, height);
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_028010_ARRAY_MODE(track->db_depth_info),
+ track->db_depth_info);
+ return -EINVAL;
+ }
+ if (!IS_ALIGNED(track->db_offset, track->group_size)) {
+ dev_warn(p->dev, "%s offset[%d] %d not aligned\n", __func__, i, track->db_offset);
+ return -EINVAL;
+ }
ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
tmp = ntiles * bpe * 64 * nviews;
@@ -724,7 +775,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- track->db_depth_info = radeon_get_ib_value(p, idx);
+ if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_depth_info = radeon_get_ib_value(p, idx);
+ ib[idx] &= C_028010_ARRAY_MODE;
+ track->db_depth_info &= C_028010_ARRAY_MODE;
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+ track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_2D_TILED_THIN1);
+ } else {
+ ib[idx] |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ track->db_depth_info |= S_028010_ARRAY_MODE(V_028010_ARRAY_1D_TILED_THIN1);
+ }
+ } else
+ track->db_depth_info = radeon_get_ib_value(p, idx);
break;
case R_028004_DB_DEPTH_VIEW:
track->db_depth_view = radeon_get_ib_value(p, idx);
@@ -757,8 +826,25 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
- track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (r600_cs_packet_next_is_pkt3_nop(p)) {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+ track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_2D_TILED_THIN1);
+ } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
+ ib[idx] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+ track->cb_color_info[tmp] |= S_0280A0_ARRAY_MODE(V_0280A0_ARRAY_1D_TILED_THIN1);
+ }
+ } else {
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ }
break;
case R_028060_CB_COLOR0_SIZE:
case R_028064_CB_COLOR1_SIZE:
@@ -796,8 +882,6 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return -EINVAL;
}
ib[idx] = track->cb_color_base_last[tmp];
- printk_once(KERN_WARNING "You have old & broken userspace "
- "please consider updating mesa & xf86-video-ati\n");
track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -824,8 +908,6 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return -EINVAL;
}
ib[idx] = track->cb_color_base_last[tmp];
- printk_once(KERN_WARNING "You have old & broken userspace "
- "please consider updating mesa & xf86-video-ati\n");
track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
} else {
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -852,7 +934,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
return -EINVAL;
}
tmp = (reg - CB_COLOR0_BASE) / 4;
- track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->cb_color_base_last[tmp] = ib[idx];
track->cb_color_bo[tmp] = reloc->robj;
@@ -864,7 +946,7 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
"0x%04X\n", reg);
return -EINVAL;
}
- track->db_offset = radeon_get_ib_value(p, idx);
+ track->db_offset = radeon_get_ib_value(p, idx) << 8;
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
track->db_bo = reloc->robj;
break;
@@ -946,8 +1028,9 @@ static inline unsigned minify(unsigned size, unsigned levels)
}
static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
- unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
- unsigned *l0_size, unsigned *mipmap_size)
+ unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
+ unsigned pitch_align,
+ unsigned *l0_size, unsigned *mipmap_size)
{
unsigned offset, i, level, face;
unsigned width, height, depth, rowstride, size;
@@ -960,18 +1043,18 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
height = minify(h0, i);
depth = minify(d0, i);
for(face = 0; face < nfaces; face++) {
- rowstride = ((width * bpe) + 255) & ~255;
+ rowstride = ALIGN((width * bpe), pitch_align);
size = height * rowstride * depth;
offset += size;
offset = (offset + 0x1f) & ~0x1f;
}
}
- *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
+ *l0_size = ALIGN((w0 * bpe), pitch_align) * h0 * d0;
*mipmap_size = offset;
- if (!blevel)
- *mipmap_size -= *l0_size;
if (!nlevels)
*mipmap_size = *l0_size;
+ if (!blevel)
+ *mipmap_size -= *l0_size;
}
/**
@@ -985,16 +1068,23 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels
* the texture and mipmap bo object are big enough to cover this resource.
*/
static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
- struct radeon_bo *texture,
- struct radeon_bo *mipmap)
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap,
+ u32 tiling_flags)
{
+ struct r600_cs_track *track = p->track;
u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
- u32 word0, word1, l0_size, mipmap_size;
+ u32 word0, word1, l0_size, mipmap_size, pitch, pitch_align;
/* on legacy kernel we don't perform advanced check */
if (p->rdev == NULL)
return 0;
+
word0 = radeon_get_ib_value(p, idx + 0);
+ if (tiling_flags & RADEON_TILING_MACRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
word1 = radeon_get_ib_value(p, idx + 1);
w0 = G_038000_TEX_WIDTH(word0) + 1;
h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1021,20 +1111,64 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
__func__, __LINE__, G_038004_DATA_FORMAT(word1));
return -EINVAL;
}
+
+ pitch = G_038000_PITCH(word0) + 1;
+ switch (G_038000_TILE_MODE(word0)) {
+ case V_038000_ARRAY_LINEAR_GENERAL:
+ pitch_align = 1;
+ /* XXX check height align */
+ break;
+ case V_038000_ARRAY_LINEAR_ALIGNED:
+ pitch_align = max((u32)64, (u32)(track->group_size / bpe)) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ /* XXX check height align */
+ break;
+ case V_038000_ARRAY_1D_TILED_THIN1:
+ pitch_align = max((u32)8, (u32)(track->group_size / (8 * bpe))) / 8;
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ /* XXX check height align */
+ break;
+ case V_038000_ARRAY_2D_TILED_THIN1:
+ pitch_align = max((u32)track->nbanks,
+ (u32)(((track->group_size / 8) / bpe) * track->nbanks));
+ if (!IS_ALIGNED(pitch, pitch_align)) {
+ dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ /* XXX check height align */
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d (0x%08X)\n", __func__,
+ G_038000_TILE_MODE(word0), word0);
+ return -EINVAL;
+ }
+ /* XXX check offset align */
+
word0 = radeon_get_ib_value(p, idx + 4);
word1 = radeon_get_ib_value(p, idx + 5);
blevel = G_038010_BASE_LEVEL(word0);
nlevels = G_038014_LAST_LEVEL(word1);
- r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
+ r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe,
+ (pitch_align * bpe),
+ &l0_size, &mipmap_size);
/* using get ib will give us the offset into the texture bo */
- word0 = radeon_get_ib_value(p, idx + 2);
+ word0 = radeon_get_ib_value(p, idx + 2) << 8;
if ((l0_size + word0) > radeon_bo_size(texture)) {
dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
return -EINVAL;
}
/* using get ib will give us the offset into the mipmap bo */
- word0 = radeon_get_ib_value(p, idx + 3);
+ word0 = radeon_get_ib_value(p, idx + 3) << 8;
if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
@@ -1228,7 +1362,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < (pkt->count / 7); i++) {
struct radeon_bo *texture, *mipmap;
- u32 size, offset;
+ u32 size, offset, base_offset, mip_offset;
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
@@ -1238,7 +1372,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
+ ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
@@ -1246,12 +1384,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
- ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mip_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
mipmap = reloc->robj;
r = r600_check_texture_resource(p, idx+(i*7)+1,
- texture, mipmap);
+ texture, mipmap, reloc->lobj.tiling_flags);
if (r)
return r;
+ ib[idx+1+(i*7)+2] += base_offset;
+ ib[idx+1+(i*7)+3] += mip_offset;
break;
case SQ_TEX_VTX_VALID_BUFFER:
/* vtx base */
@@ -1261,10 +1401,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
- size = radeon_get_ib_value(p, idx+1+(i*7)+1);
+ size = radeon_get_ib_value(p, idx+1+(i*7)+1) + 1;
if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
/* force size to size of the buffer */
- dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ dev_warn(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n",
+ size + offset, radeon_bo_size(reloc->robj));
ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
}
ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 26b4bc9d89a5..e6a58ed48dcf 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -435,7 +435,8 @@ static int r600_hdmi_find_free_block(struct drm_device *dev)
}
}
- if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
+ if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
+ rdev->family == CHIP_RS740) {
return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
} else if (rdev->family >= CHIP_R600) {
if (free_blocks[0])
@@ -466,7 +467,8 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
if (ASIC_IS_DCE32(rdev))
radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
- } else if (rdev->family >= CHIP_R600) {
+ } else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
+ rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
}
}
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 59c1f8793e60..858a1920c0d7 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -239,12 +239,18 @@
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1<<0)
+#define CG_THERMAL_STATUS 0x7F4
+#define ASIC_T(x) ((x) << 0)
+#define ASIC_T_MASK 0x1FF
+#define ASIC_T_SHIFT 0
+
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
+#define HDP_DEBUG1 0x2F34
#define MC_VM_AGP_TOP 0x2184
#define MC_VM_AGP_BOT 0x2188
@@ -1154,6 +1160,10 @@
#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
#define C_038000_TILE_MODE 0xFFFFFF87
+#define V_038000_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_038000_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_038000_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_038000_ARRAY_2D_TILED_THIN1 0x00000004
#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
#define C_038000_TILE_TYPE 0xFFFFFF7F
@@ -1357,6 +1367,8 @@
#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
#define C_028010_ARRAY_MODE 0xFFF87FFF
+#define V_028010_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_028010_ARRAY_2D_TILED_THIN1 0x00000004
#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2f94dc66c183..3dfcfa3ca425 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -178,6 +178,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 level);
void rs690_pm_info(struct radeon_device *rdev);
+extern u32 rv6xx_get_temp(struct radeon_device *rdev);
+extern u32 rv770_get_temp(struct radeon_device *rdev);
+extern u32 evergreen_get_temp(struct radeon_device *rdev);
/*
* Fences.
@@ -232,7 +235,7 @@ struct radeon_surface_reg {
*/
struct radeon_mman {
struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
+ struct drm_global_reference mem_global_ref;
struct ttm_bo_device bdev;
bool mem_global_referenced;
bool initialized;
@@ -671,6 +674,13 @@ struct radeon_pm_profile {
int dpms_on_cm_idx;
};
+enum radeon_int_thermal_type {
+ THERMAL_TYPE_NONE,
+ THERMAL_TYPE_RV6XX,
+ THERMAL_TYPE_RV770,
+ THERMAL_TYPE_EVERGREEN,
+};
+
struct radeon_voltage {
enum radeon_voltage_type type;
/* gpio voltage */
@@ -766,6 +776,9 @@ struct radeon_pm {
enum radeon_pm_profile_type profile;
int profile_index;
struct radeon_pm_profile profiles[PM_PROFILE_MAX];
+ /* internal thermal controller on rv6xx+ */
+ enum radeon_int_thermal_type int_thermal_type;
+ struct device *int_hwmon_dev;
};
@@ -902,6 +915,7 @@ struct r600_asic {
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
+ unsigned tile_config;
struct r100_gpu_lockup lockup;
};
@@ -926,6 +940,7 @@ struct rv770_asic {
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
+ unsigned tile_config;
struct r100_gpu_lockup lockup;
};
@@ -951,6 +966,7 @@ struct evergreen_asic {
unsigned tiling_nbanks;
unsigned tiling_npipes;
unsigned tiling_group_size;
+ unsigned tile_config;
};
union radeon_asic_config {
@@ -1033,6 +1049,9 @@ struct radeon_device {
uint32_t pcie_reg_mask;
radeon_rreg_t pciep_rreg;
radeon_wreg_t pciep_wreg;
+ /* io port */
+ void __iomem *rio_mem;
+ resource_size_t rio_mem_size;
struct radeon_clock clock;
struct radeon_mc mc;
struct radeon_gart gart;
@@ -1069,6 +1088,7 @@ struct radeon_device {
struct mutex vram_mutex;
/* audio stuff */
+ bool audio_enabled;
struct timer_list audio_timer;
int audio_channels;
int audio_rate;
@@ -1078,6 +1098,10 @@ struct radeon_device {
bool powered_down;
struct notifier_block acpi_nb;
+ /* only one userspace can use Hyperz features at a time */
+ struct drm_file *hyperz_filp;
+ /* i2c buses */
+ struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1114,6 +1138,26 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
}
}
+static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
+{
+ if (reg < rdev->rio_mem_size)
+ return ioread32(rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ return ioread32(rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
+
+static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
+{
+ if (reg < rdev->rio_mem_size)
+ iowrite32(v, rdev->rio_mem + reg);
+ else {
+ iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
+ iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
+ }
+}
+
/*
* Cast helper
*/
@@ -1152,6 +1196,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
WREG32_PLL(reg, tmp_); \
} while (0)
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg)))
+#define RREG32_IO(reg) r100_io_rreg(rdev, (reg))
+#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v))
/*
* Indirect registers accessor
@@ -1415,6 +1461,13 @@ extern void r700_cp_fini(struct radeon_device *rdev);
extern void evergreen_disable_interrupt_state(struct radeon_device *rdev);
extern int evergreen_irq_set(struct radeon_device *rdev);
+/* radeon_acpi.c */
+#if defined(CONFIG_ACPI)
+extern int radeon_acpi_init(struct radeon_device *rdev);
+#else
+static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
+#endif
+
/* evergreen */
struct evergreen_mc_save {
u32 vga_control[6];
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
new file mode 100644
index 000000000000..3f6636bb2d7f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -0,0 +1,67 @@
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/slab.h>
+#include <acpi/acpi_drivers.h>
+#include <acpi/acpi_bus.h>
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sarea.h"
+#include "drm_crtc_helper.h"
+#include "radeon.h"
+
+#include <linux/vga_switcheroo.h>
+
+/* Call the ATIF method
+ *
+ * Note: currently we discard the output
+ */
+static int radeon_atif_call(acpi_handle handle)
+{
+ acpi_status status;
+ union acpi_object atif_arg_elements[2];
+ struct acpi_object_list atif_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ atif_arg.count = 2;
+ atif_arg.pointer = &atif_arg_elements[0];
+
+ atif_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atif_arg_elements[0].integer.value = 0;
+ atif_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atif_arg_elements[1].integer.value = 0;
+
+ status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+
+ /* Fail only if calling the method fails and ATIF is supported */
+ if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
+ printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ kfree(buffer.pointer);
+ return 1;
+ }
+
+ kfree(buffer.pointer);
+ return 0;
+}
+
+/* Call all ACPI methods here */
+int radeon_acpi_init(struct radeon_device *rdev)
+{
+ acpi_handle handle;
+ int ret;
+
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
+ return 0;
+
+ /* Get the device handle */
+ handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+
+ /* Call the ATIF method */
+ ret = radeon_atif_call(handle);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index c0bbaa64157a..a5aff755f0d2 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -113,6 +113,7 @@ void r100_wb_fini(struct radeon_device *rdev);
int r100_wb_init(struct radeon_device *rdev);
int r100_cp_reset(struct radeon_device *rdev);
void r100_vga_render_disable(struct radeon_device *rdev);
+void r100_restore_sanity(struct radeon_device *rdev);
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
struct radeon_bo *robj);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 10673ae59cfa..6d30868744ee 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -48,7 +48,8 @@ radeon_add_atom_connector(struct drm_device *dev,
struct radeon_i2c_bus_rec *i2c_bus,
bool linkb, uint32_t igp_lane_info,
uint16_t connector_object_id,
- struct radeon_hpd *hpd);
+ struct radeon_hpd *hpd,
+ struct radeon_router *router);
/* from radeon_legacy_encoder.c */
extern void
@@ -114,7 +115,8 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
i2c.i2c_id = gpio->sucI2cId.ucAccess;
- i2c.valid = true;
+ if (i2c.mask_clk_reg)
+ i2c.valid = true;
break;
}
}
@@ -123,6 +125,66 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
return i2c;
}
+void radeon_atombios_i2c_init(struct radeon_device *rdev)
+{
+ struct atom_context *ctx = rdev->mode_info.atom_context;
+ ATOM_GPIO_I2C_ASSIGMENT *gpio;
+ struct radeon_i2c_bus_rec i2c;
+ int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
+ struct _ATOM_GPIO_I2C_INFO *i2c_info;
+ uint16_t data_offset, size;
+ int i, num_indices;
+ char stmp[32];
+
+ memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
+
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT);
+
+ for (i = 0; i < num_indices; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+ i2c.valid = false;
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ if (i2c.mask_clk_reg) {
+ i2c.valid = true;
+ sprintf(stmp, "0x%x", i2c.i2c_id);
+ rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
+ }
+ }
+ }
+}
+
static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
@@ -206,6 +268,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
uint16_t *line_mux,
struct radeon_hpd *hpd)
{
+ struct radeon_device *rdev = dev->dev_private;
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
if ((dev->pdev->device == 0x791e) &&
@@ -308,13 +371,22 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
- /* Acer laptop reports DVI-D as DVI-I */
+ /* Acer laptop reports DVI-D as DVI-I and hpd pins reversed */
if ((dev->pdev->device == 0x95c4) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
+ struct radeon_gpio_rec gpio;
+
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
- (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+ gpio = radeon_lookup_gpio(rdev, 6);
+ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
*connector_type = DRM_MODE_CONNECTOR_DVID;
+ } else if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
+ (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) {
+ gpio = radeon_lookup_gpio(rdev, 7);
+ *hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+ }
}
/* XFX Pine Group device rv730 reports no VGA DDC lines
@@ -399,13 +471,15 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
u16 size, data_offset;
u8 frev, crev;
ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
+ ATOM_OBJECT_TABLE *router_obj;
ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
ATOM_OBJECT_HEADER *obj_header;
- int i, j, path_size, device_support;
+ int i, j, k, path_size, device_support;
int connector_type;
u16 igp_lane_info, conn_id, connector_object_id;
bool linkb;
struct radeon_i2c_bus_rec ddc_bus;
+ struct radeon_router router;
struct radeon_gpio_rec gpio;
struct radeon_hpd hpd;
@@ -415,6 +489,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (crev < 2)
return false;
+ router.valid = false;
+
obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
(ctx->bios + data_offset +
@@ -422,6 +498,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
(ctx->bios + data_offset +
le16_to_cpu(obj_header->usConnectorObjectTableOffset));
+ router_obj = (ATOM_OBJECT_TABLE *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(obj_header->usRouterObjectTableOffset));
device_support = le16_to_cpu(obj_header->usDeviceSupport);
path_size = 0;
@@ -508,33 +587,86 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
if (connector_type == DRM_MODE_CONNECTOR_Unknown)
continue;
- for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2);
- j++) {
- uint8_t enc_obj_id, enc_obj_num, enc_obj_type;
+ for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
+ uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
- enc_obj_id =
+ grph_obj_id =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
- enc_obj_num =
+ grph_obj_num =
(le16_to_cpu(path->usGraphicObjIds[j]) &
ENUM_ID_MASK) >> ENUM_ID_SHIFT;
- enc_obj_type =
+ grph_obj_type =
(le16_to_cpu(path->usGraphicObjIds[j]) &
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
- /* FIXME: add support for router objects */
- if (enc_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
- if (enc_obj_num == 2)
+ if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
+ if (grph_obj_num == 2)
linkb = true;
else
linkb = false;
radeon_add_atom_encoder(dev,
- enc_obj_id,
+ grph_obj_id,
le16_to_cpu
(path->
usDeviceTag));
+ } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
+ router.valid = false;
+ for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
+ u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+ if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
+ ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
+ ATOM_I2C_RECORD *i2c_record;
+ ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+ ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+ ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
+ (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
+ (ctx->bios + data_offset +
+ le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
+ int enum_id;
+
+ router.router_id = router_obj_id;
+ for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst;
+ enum_id++) {
+ if (le16_to_cpu(path->usConnObjectId) ==
+ le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id]))
+ break;
+ }
+
+ while (record->ucRecordType > 0 &&
+ record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
+ switch (record->ucRecordType) {
+ case ATOM_I2C_RECORD_TYPE:
+ i2c_record =
+ (ATOM_I2C_RECORD *)
+ record;
+ i2c_config =
+ (ATOM_I2C_ID_CONFIG_ACCESS *)
+ &i2c_record->sucI2cId;
+ router.i2c_info =
+ radeon_lookup_i2c_gpio(rdev,
+ i2c_config->
+ ucAccess);
+ router.i2c_addr = i2c_record->ucI2CAddr >> 1;
+ break;
+ case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
+ ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
+ record;
+ router.valid = true;
+ router.mux_type = ddc_path->ucMuxType;
+ router.mux_control_pin = ddc_path->ucMuxControlPin;
+ router.mux_state = ddc_path->ucMuxState[enum_id];
+ break;
+ }
+ record = (ATOM_COMMON_RECORD_HEADER *)
+ ((char *)record + record->ucRecordSize);
+ }
+ }
+ }
}
}
@@ -614,7 +746,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
connector_type, &ddc_bus,
linkb, igp_lane_info,
connector_object_id,
- &hpd);
+ &hpd,
+ &router);
}
}
@@ -691,6 +824,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
int i, j, max_device;
struct bios_connector *bios_connectors;
size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
+ struct radeon_router router;
+
+ router.valid = false;
bios_connectors = kzalloc(bc_size, GFP_KERNEL);
if (!bios_connectors)
@@ -723,7 +859,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
}
if (i == ATOM_DEVICE_CV_INDEX) {
- DRM_DEBUG("Skipping Component Video\n");
+ DRM_DEBUG_KMS("Skipping Component Video\n");
continue;
}
@@ -862,7 +998,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
&bios_connectors[i].ddc_bus,
false, 0,
connector_object_id,
- &bios_connectors[i].hpd);
+ &bios_connectors[i].hpd,
+ &router);
}
}
@@ -1032,21 +1169,18 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
u8 frev, crev;
u16 data_offset;
+ /* sideport is AMD only */
+ if (rdev->family == CHIP_RS600)
+ return false;
+
if (atom_parse_data_header(mode_info->atom_context, index, NULL,
&frev, &crev, &data_offset)) {
igp_info = (union igp_info *)(mode_info->atom_context->bios +
data_offset);
switch (crev) {
case 1:
- /* AMD IGPS */
- if ((rdev->family == CHIP_RS690) ||
- (rdev->family == CHIP_RS740)) {
- if (igp_info->info.ulBootUpMemoryClock)
- return true;
- } else {
- if (igp_info->info.ucMemoryType & 0xf0)
- return true;
- }
+ if (igp_info->info.ulBootUpMemoryClock)
+ return true;
break;
case 2:
if (igp_info->info_2.ucMemoryType & 0x0f)
@@ -1095,7 +1229,7 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
(tmds_info->asMiscInfo[i].
ucPLL_VoltageSwing & 0xf) << 16;
- DRM_DEBUG("TMDS PLL From ATOMBIOS %u %x\n",
+ DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
@@ -1524,7 +1658,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
thermal_controller_names[power_info->info.ucOverdriveThermalController],
power_info->info.ucOverdriveControllerAddress >> 1);
i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = thermal_controller_names[power_info->info.
@@ -1789,14 +1923,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
}
/* add the i2c bus for thermal/fan chip */
- /* no support for internal controller yet */
if (controller->ucType > 0) {
- if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
- (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) ||
- (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN)) {
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
@@ -1809,7 +1951,7 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine);
- rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = pp_lib_thermal_controller_names[controller->ucType];
@@ -1922,6 +2064,11 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_PERFORMANCE;
break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
}
rdev->pm.power_state[state_index].flags = 0;
if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
@@ -2179,11 +2326,11 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("TV1 connected\n");
+ DRM_DEBUG_KMS("TV1 connected\n");
bios_3_scratch |= ATOM_S3_TV1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_TV1;
} else {
- DRM_DEBUG("TV1 disconnected\n");
+ DRM_DEBUG_KMS("TV1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_TV1_MASK;
bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1;
@@ -2192,11 +2339,11 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CV connected\n");
+ DRM_DEBUG_KMS("CV connected\n");
bios_3_scratch |= ATOM_S3_CV_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CV;
} else {
- DRM_DEBUG("CV disconnected\n");
+ DRM_DEBUG_KMS("CV disconnected\n");
bios_0_scratch &= ~ATOM_S0_CV_MASK;
bios_3_scratch &= ~ATOM_S3_CV_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV;
@@ -2205,12 +2352,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("LCD1 connected\n");
+ DRM_DEBUG_KMS("LCD1 connected\n");
bios_0_scratch |= ATOM_S0_LCD1;
bios_3_scratch |= ATOM_S3_LCD1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1;
} else {
- DRM_DEBUG("LCD1 disconnected\n");
+ DRM_DEBUG_KMS("LCD1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_LCD1;
bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1;
@@ -2219,12 +2366,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT1 connected\n");
+ DRM_DEBUG_KMS("CRT1 connected\n");
bios_0_scratch |= ATOM_S0_CRT1_COLOR;
bios_3_scratch |= ATOM_S3_CRT1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1;
} else {
- DRM_DEBUG("CRT1 disconnected\n");
+ DRM_DEBUG_KMS("CRT1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT1_MASK;
bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1;
@@ -2233,12 +2380,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT2 connected\n");
+ DRM_DEBUG_KMS("CRT2 connected\n");
bios_0_scratch |= ATOM_S0_CRT2_COLOR;
bios_3_scratch |= ATOM_S3_CRT2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2;
} else {
- DRM_DEBUG("CRT2 disconnected\n");
+ DRM_DEBUG_KMS("CRT2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_CRT2_MASK;
bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2;
@@ -2247,12 +2394,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP1 connected\n");
+ DRM_DEBUG_KMS("DFP1 connected\n");
bios_0_scratch |= ATOM_S0_DFP1;
bios_3_scratch |= ATOM_S3_DFP1_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1;
} else {
- DRM_DEBUG("DFP1 disconnected\n");
+ DRM_DEBUG_KMS("DFP1 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP1;
bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1;
@@ -2261,12 +2408,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP2 connected\n");
+ DRM_DEBUG_KMS("DFP2 connected\n");
bios_0_scratch |= ATOM_S0_DFP2;
bios_3_scratch |= ATOM_S3_DFP2_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2;
} else {
- DRM_DEBUG("DFP2 disconnected\n");
+ DRM_DEBUG_KMS("DFP2 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP2;
bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2;
@@ -2275,12 +2422,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP3 connected\n");
+ DRM_DEBUG_KMS("DFP3 connected\n");
bios_0_scratch |= ATOM_S0_DFP3;
bios_3_scratch |= ATOM_S3_DFP3_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3;
} else {
- DRM_DEBUG("DFP3 disconnected\n");
+ DRM_DEBUG_KMS("DFP3 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP3;
bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3;
@@ -2289,12 +2436,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP4 connected\n");
+ DRM_DEBUG_KMS("DFP4 connected\n");
bios_0_scratch |= ATOM_S0_DFP4;
bios_3_scratch |= ATOM_S3_DFP4_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4;
} else {
- DRM_DEBUG("DFP4 disconnected\n");
+ DRM_DEBUG_KMS("DFP4 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP4;
bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4;
@@ -2303,12 +2450,12 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP5 connected\n");
+ DRM_DEBUG_KMS("DFP5 connected\n");
bios_0_scratch |= ATOM_S0_DFP5;
bios_3_scratch |= ATOM_S3_DFP5_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5;
} else {
- DRM_DEBUG("DFP5 disconnected\n");
+ DRM_DEBUG_KMS("DFP5 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP5;
bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 2c9213739999..654787ec43f4 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -53,7 +53,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
return false;
rdev->bios = NULL;
- vram_base = drm_get_resource_start(rdev->ddev, 0);
+ vram_base = pci_resource_start(rdev->pdev, 0);
bios = ioremap(vram_base, size);
if (!bios) {
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index f64936cc4dd9..14448a740ba6 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -91,6 +91,85 @@ uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
return mclk;
}
+#ifdef CONFIG_OF
+/*
+ * Read XTAL (ref clock), SCLK and MCLK from Open Firmware device
+ * tree. Hopefully, ATI OF driver is kind enough to fill these
+ */
+static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct device_node *dp = rdev->pdev->dev.of_node;
+ const u32 *val;
+ struct radeon_pll *p1pll = &rdev->clock.p1pll;
+ struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *spll = &rdev->clock.spll;
+ struct radeon_pll *mpll = &rdev->clock.mpll;
+
+ if (dp == NULL)
+ return false;
+ val = of_get_property(dp, "ATY,RefCLK", NULL);
+ if (!val || !*val) {
+ printk(KERN_WARNING "radeonfb: No ATY,RefCLK property !\n");
+ return false;
+ }
+ p1pll->reference_freq = p2pll->reference_freq = (*val) / 10;
+ p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
+ if (p1pll->reference_div < 2)
+ p1pll->reference_div = 12;
+ p2pll->reference_div = p1pll->reference_div;
+
+ /* These aren't in the device-tree */
+ if (rdev->family >= CHIP_R420) {
+ p1pll->pll_in_min = 100;
+ p1pll->pll_in_max = 1350;
+ p1pll->pll_out_min = 20000;
+ p1pll->pll_out_max = 50000;
+ p2pll->pll_in_min = 100;
+ p2pll->pll_in_max = 1350;
+ p2pll->pll_out_min = 20000;
+ p2pll->pll_out_max = 50000;
+ } else {
+ p1pll->pll_in_min = 40;
+ p1pll->pll_in_max = 500;
+ p1pll->pll_out_min = 12500;
+ p1pll->pll_out_max = 35000;
+ p2pll->pll_in_min = 40;
+ p2pll->pll_in_max = 500;
+ p2pll->pll_out_min = 12500;
+ p2pll->pll_out_max = 35000;
+ }
+
+ spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
+ spll->reference_div = mpll->reference_div =
+ RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) &
+ RADEON_M_SPLL_REF_DIV_MASK;
+
+ val = of_get_property(dp, "ATY,SCLK", NULL);
+ if (val && *val)
+ rdev->clock.default_sclk = (*val) / 10;
+ else
+ rdev->clock.default_sclk =
+ radeon_legacy_get_engine_clock(rdev);
+
+ val = of_get_property(dp, "ATY,MCLK", NULL);
+ if (val && *val)
+ rdev->clock.default_mclk = (*val) / 10;
+ else
+ rdev->clock.default_mclk =
+ radeon_legacy_get_memory_clock(rdev);
+
+ DRM_INFO("Using device-tree clock info\n");
+
+ return true;
+}
+#else
+static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
+{
+ return false;
+}
+#endif /* CONFIG_OF */
+
void radeon_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -105,6 +184,8 @@ void radeon_get_clock_info(struct drm_device *dev)
ret = radeon_atom_get_clock_info(dev);
else
ret = radeon_combios_get_clock_info(dev);
+ if (!ret)
+ ret = radeon_read_clocks_OF(dev);
if (ret) {
if (p1pll->reference_div < 2) {
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 2417d7b06fdb..885dcfac1838 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -480,9 +480,66 @@ radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
}
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
- int ddc_line)
+ enum radeon_combios_ddc ddc,
+ u32 clk_mask,
+ u32 data_mask)
{
struct radeon_i2c_bus_rec i2c;
+ int ddc_line = 0;
+
+ /* ddc id = mask reg
+ * DDC_NONE_DETECTED = none
+ * DDC_DVI = RADEON_GPIO_DVI_DDC
+ * DDC_VGA = RADEON_GPIO_VGA_DDC
+ * DDC_LCD = RADEON_GPIOPAD_MASK
+ * DDC_GPIO = RADEON_MDGPIO_MASK
+ * r1xx/r2xx
+ * DDC_MONID = RADEON_GPIO_MONID
+ * DDC_CRT2 = RADEON_GPIO_CRT2_DDC
+ * r3xx
+ * DDC_MONID = RADEON_GPIO_MONID
+ * DDC_CRT2 = RADEON_GPIO_DVI_DDC
+ * rs3xx/rs4xx
+ * DDC_MONID = RADEON_GPIOPAD_MASK
+ * DDC_CRT2 = RADEON_GPIO_MONID
+ */
+ switch (ddc) {
+ case DDC_NONE_DETECTED:
+ default:
+ ddc_line = 0;
+ break;
+ case DDC_DVI:
+ ddc_line = RADEON_GPIO_DVI_DDC;
+ break;
+ case DDC_VGA:
+ ddc_line = RADEON_GPIO_VGA_DDC;
+ break;
+ case DDC_LCD:
+ ddc_line = RADEON_GPIOPAD_MASK;
+ break;
+ case DDC_GPIO:
+ ddc_line = RADEON_MDGPIO_MASK;
+ break;
+ case DDC_MONID:
+ if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480)
+ ddc_line = RADEON_GPIOPAD_MASK;
+ else
+ ddc_line = RADEON_GPIO_MONID;
+ break;
+ case DDC_CRT2:
+ if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480)
+ ddc_line = RADEON_GPIO_MONID;
+ else if (rdev->family >= CHIP_R300) {
+ ddc_line = RADEON_GPIO_DVI_DDC;
+ ddc = DDC_DVI;
+ } else
+ ddc_line = RADEON_GPIO_CRT2_DDC;
+ break;
+ }
if (ddc_line == RADEON_GPIOPAD_MASK) {
i2c.mask_clk_reg = RADEON_GPIOPAD_MASK;
@@ -503,15 +560,6 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_clk_reg = RADEON_MDGPIO_Y;
i2c.y_data_reg = RADEON_MDGPIO_Y;
} else {
- i2c.mask_clk_mask = RADEON_GPIO_EN_1;
- i2c.mask_data_mask = RADEON_GPIO_EN_0;
- i2c.a_clk_mask = RADEON_GPIO_A_1;
- i2c.a_data_mask = RADEON_GPIO_A_0;
- i2c.en_clk_mask = RADEON_GPIO_EN_1;
- i2c.en_data_mask = RADEON_GPIO_EN_0;
- i2c.y_clk_mask = RADEON_GPIO_Y_1;
- i2c.y_data_mask = RADEON_GPIO_Y_0;
-
i2c.mask_clk_reg = ddc_line;
i2c.mask_data_reg = ddc_line;
i2c.a_clk_reg = ddc_line;
@@ -522,6 +570,26 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_data_reg = ddc_line;
}
+ if (clk_mask && data_mask) {
+ i2c.mask_clk_mask = clk_mask;
+ i2c.mask_data_mask = data_mask;
+ i2c.a_clk_mask = clk_mask;
+ i2c.a_data_mask = data_mask;
+ i2c.en_clk_mask = clk_mask;
+ i2c.en_data_mask = data_mask;
+ i2c.y_clk_mask = clk_mask;
+ i2c.y_data_mask = data_mask;
+ } else {
+ i2c.mask_clk_mask = RADEON_GPIO_EN_1;
+ i2c.mask_data_mask = RADEON_GPIO_EN_0;
+ i2c.a_clk_mask = RADEON_GPIO_A_1;
+ i2c.a_data_mask = RADEON_GPIO_A_0;
+ i2c.en_clk_mask = RADEON_GPIO_EN_1;
+ i2c.en_data_mask = RADEON_GPIO_EN_0;
+ i2c.y_clk_mask = RADEON_GPIO_Y_1;
+ i2c.y_data_mask = RADEON_GPIO_Y_0;
+ }
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -599,7 +667,8 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
break;
}
i2c.mm_i2c = false;
- i2c.i2c_id = 0;
+
+ i2c.i2c_id = ddc;
i2c.hpd = RADEON_HPD_NONE;
if (ddc_line)
@@ -610,6 +679,62 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
return i2c;
}
+void radeon_combios_i2c_init(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct radeon_i2c_bus_rec i2c;
+
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
+ rdev->i2c_bus[0] = radeon_i2c_create(dev, &i2c, "DVI_DDC");
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
+ rdev->i2c_bus[1] = radeon_i2c_create(dev, &i2c, "VGA_DDC");
+
+ i2c.valid = true;
+ i2c.hw_capable = true;
+ i2c.mm_i2c = true;
+ i2c.i2c_id = 0xa0;
+ rdev->i2c_bus[2] = radeon_i2c_create(dev, &i2c, "MM_I2C");
+
+ if (rdev->family == CHIP_RS300 ||
+ rdev->family == CHIP_RS400 ||
+ rdev->family == CHIP_RS480) {
+ u16 offset;
+ u8 id, blocks, clk, data;
+ int i;
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+ offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
+ if (offset) {
+ blocks = RBIOS8(offset + 2);
+ for (i = 0; i < blocks; i++) {
+ id = RBIOS8(offset + 3 + (i * 5) + 0);
+ if (id == 136) {
+ clk = RBIOS8(offset + 3 + (i * 5) + 3);
+ data = RBIOS8(offset + 3 + (i * 5) + 4);
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
+ clk, data);
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
+ break;
+ }
+ }
+ }
+
+ } else if (rdev->family >= CHIP_R300) {
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+ } else {
+ i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
+
+ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
+ rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "CRT2_DDC");
+ }
+}
+
bool radeon_combios_get_clock_info(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -693,6 +818,10 @@ bool radeon_combios_sideport_present(struct radeon_device *rdev)
struct drm_device *dev = rdev->ddev;
u16 igp_info;
+ /* sideport is AMD only */
+ if (rdev->family == CHIP_RS400)
+ return false;
+
igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
if (igp_info) {
@@ -1205,7 +1334,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
RBIOS32(tmds_info + i * 10 + 0x08);
tmds->tmds_pll[i].freq =
RBIOS16(tmds_info + i * 10 + 0x10);
- DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+ DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
}
@@ -1223,7 +1352,7 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
stride += 10;
else
stride += 6;
- DRM_DEBUG("TMDS PLL From COMBIOS %u %x\n",
+ DRM_DEBUG_KMS("TMDS PLL From COMBIOS %u %x\n",
tmds->tmds_pll[i].freq,
tmds->tmds_pll[i].value);
}
@@ -1243,8 +1372,8 @@ bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
struct radeon_i2c_bus_rec i2c_bus;
/* default for macs */
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
/* XXX some macs have duallink chips */
switch (rdev->mode_info.connector_table) {
@@ -1265,47 +1394,16 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
struct drm_device *dev = encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
uint16_t offset;
- uint8_t ver, id, blocks, clk, data;
- int i;
+ uint8_t ver;
enum radeon_combios_ddc gpio;
struct radeon_i2c_bus_rec i2c_bus;
tmds->i2c_bus = NULL;
if (rdev->flags & RADEON_IS_IGP) {
- offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
- if (offset) {
- ver = RBIOS8(offset);
- DRM_INFO("GPIO Table revision: %d\n", ver);
- blocks = RBIOS8(offset + 2);
- for (i = 0; i < blocks; i++) {
- id = RBIOS8(offset + 3 + (i * 5) + 0);
- if (id == 136) {
- clk = RBIOS8(offset + 3 + (i * 5) + 3);
- data = RBIOS8(offset + 3 + (i * 5) + 4);
- i2c_bus.valid = true;
- i2c_bus.mask_clk_mask = (1 << clk);
- i2c_bus.mask_data_mask = (1 << data);
- i2c_bus.a_clk_mask = (1 << clk);
- i2c_bus.a_data_mask = (1 << data);
- i2c_bus.en_clk_mask = (1 << clk);
- i2c_bus.en_data_mask = (1 << data);
- i2c_bus.y_clk_mask = (1 << clk);
- i2c_bus.y_data_mask = (1 << data);
- i2c_bus.mask_clk_reg = RADEON_GPIOPAD_MASK;
- i2c_bus.mask_data_reg = RADEON_GPIOPAD_MASK;
- i2c_bus.a_clk_reg = RADEON_GPIOPAD_A;
- i2c_bus.a_data_reg = RADEON_GPIOPAD_A;
- i2c_bus.en_clk_reg = RADEON_GPIOPAD_EN;
- i2c_bus.en_data_reg = RADEON_GPIOPAD_EN;
- i2c_bus.y_clk_reg = RADEON_GPIOPAD_Y;
- i2c_bus.y_data_reg = RADEON_GPIOPAD_Y;
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- tmds->dvo_chip = DVO_SIL164;
- tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
- break;
- }
- }
- }
+ i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
+ tmds->dvo_chip = DVO_SIL164;
+ tmds->slave_addr = 0x70 >> 1; /* 7 bit addressing */
} else {
offset = combios_get_table_offset(dev, COMBIOS_EXT_TMDS_INFO_TABLE);
if (offset) {
@@ -1314,37 +1412,15 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
tmds->slave_addr = RBIOS8(offset + 4 + 2);
tmds->slave_addr >>= 1; /* 7 bit addressing */
gpio = RBIOS8(offset + 4 + 3);
- switch (gpio) {
- case DDC_MONID:
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_DVI:
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_VGA:
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_CRT2:
- /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
- if (rdev->family >= CHIP_R300)
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- else
- i2c_bus = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- case DDC_LCD: /* MM i2c */
+ if (gpio == DDC_LCD) {
+ /* MM i2c */
i2c_bus.valid = true;
i2c_bus.hw_capable = true;
i2c_bus.mm_i2c = true;
- tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
- break;
- default:
- DRM_ERROR("Unsupported gpio %d\n", gpio);
- break;
- }
+ i2c_bus.i2c_id = 0xa0;
+ } else
+ i2c_bus = combios_setup_i2c_bus(rdev, gpio, 0, 0);
+ tmds->i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
}
}
@@ -1426,7 +1502,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
/* these are the most common settings */
if (rdev->flags & RADEON_SINGLE_CRTC) {
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1441,7 +1517,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
} else if (rdev->flags & RADEON_IS_MOBILITY) {
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, 0);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1456,7 +1532,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1471,7 +1547,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
} else {
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1492,7 +1568,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
&hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1528,7 +1604,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (ibook)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1540,7 +1616,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* VGA - TV DAC */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1569,7 +1645,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (powerbook external tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1581,7 +1657,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - primary dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1618,7 +1694,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (powerbook internal tmds)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1630,7 +1706,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - primary dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1666,7 +1742,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (powerbook vga)\n",
rdev->mode_info.connector_table);
/* LVDS */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1678,7 +1754,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1707,7 +1783,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (mini external tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, ext tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_2; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1744,7 +1820,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (mini internal tmds)\n",
rdev->mode_info.connector_table);
/* DVI-I - tv dac, int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1780,7 +1856,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (imac g5 isight)\n",
rdev->mode_info.connector_table);
/* DVI-D - int tmds */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1792,7 +1868,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D,
&hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1821,7 +1897,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (emac)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1833,7 +1909,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
CONNECTOR_OBJECT_ID_VGA,
&hpd);
/* VGA - tv dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1862,7 +1938,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_INFO("Connector Table: %d (rn50-power)\n",
rdev->mode_info.connector_table);
/* VGA - primary dac */
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1873,7 +1949,7 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -1903,31 +1979,6 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev,
struct radeon_i2c_bus_rec *ddc_i2c,
struct radeon_hpd *hpd)
{
- struct radeon_device *rdev = dev->dev_private;
-
- /* XPRESS DDC quirks */
- if ((rdev->family == CHIP_RS400 ||
- rdev->family == CHIP_RS480) &&
- ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
- *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- else if ((rdev->family == CHIP_RS400 ||
- rdev->family == CHIP_RS480) &&
- ddc_i2c->mask_clk_reg == RADEON_GPIO_MONID) {
- *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIOPAD_MASK);
- ddc_i2c->mask_clk_mask = (0x20 << 8);
- ddc_i2c->mask_data_mask = 0x80;
- ddc_i2c->a_clk_mask = (0x20 << 8);
- ddc_i2c->a_data_mask = 0x80;
- ddc_i2c->en_clk_mask = (0x20 << 8);
- ddc_i2c->en_data_mask = 0x80;
- ddc_i2c->y_clk_mask = (0x20 << 8);
- ddc_i2c->y_data_mask = 0x80;
- }
-
- /* R3xx+ chips don't have GPIO_CRT2_DDC gpio pad */
- if ((rdev->family >= CHIP_R300) &&
- ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC)
- *ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
/* Certain IBM chipset RN50s have a BIOS reporting two VGAs,
one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */
@@ -2031,27 +2082,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
connector = (tmp >> 12) & 0xf;
ddc_type = (tmp >> 8) & 0xf;
- switch (ddc_type) {
- case DDC_MONID:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_MONID);
- break;
- case DDC_DVI:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
- break;
- case DDC_VGA:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
- break;
- case DDC_CRT2:
- ddc_i2c =
- combios_setup_i2c_bus(rdev, RADEON_GPIO_CRT2_DDC);
- break;
- default:
- ddc_i2c.valid = false;
- break;
- }
+ ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
switch (connector) {
case CONNECTOR_PROPRIETARY_LEGACY:
@@ -2208,7 +2239,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
uint16_t tmds_info =
combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
- DRM_DEBUG("Found DFP table, assuming DVI connector\n");
+ DRM_DEBUG_KMS("Found DFP table, assuming DVI connector\n");
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
@@ -2221,7 +2252,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
0),
ATOM_DEVICE_DFP1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_DVI_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1;
radeon_add_legacy_connector(dev,
0,
@@ -2234,14 +2265,14 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
} else {
uint16_t crt_info =
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
- DRM_DEBUG("Found CRT table, assuming VGA connector\n");
+ DRM_DEBUG_KMS("Found CRT table, assuming VGA connector\n");
if (crt_info) {
radeon_add_legacy_encoder(dev,
radeon_get_encoder_id(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
- ddc_i2c = combios_setup_i2c_bus(rdev, RADEON_GPIO_VGA_DDC);
+ ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_connector(dev,
0,
@@ -2251,7 +2282,7 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
CONNECTOR_OBJECT_ID_VGA,
&hpd);
} else {
- DRM_DEBUG("No connector info found\n");
+ DRM_DEBUG_KMS("No connector info found\n");
return false;
}
}
@@ -2274,73 +2305,28 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
if (lcd_ddc_info) {
ddc_type = RBIOS8(lcd_ddc_info + 2);
switch (ddc_type) {
- case DDC_MONID:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_MONID);
- break;
- case DDC_DVI:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_DVI_DDC);
- break;
- case DDC_VGA:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_VGA_DDC);
- break;
- case DDC_CRT2:
- ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIO_CRT2_DDC);
- break;
case DDC_LCD:
ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_GPIOPAD_MASK);
- ddc_i2c.mask_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.mask_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.a_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.a_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.en_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.en_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.y_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.y_data_mask =
- RBIOS32(lcd_ddc_info + 7);
+ combios_setup_i2c_bus(rdev,
+ DDC_LCD,
+ RBIOS32(lcd_ddc_info + 3),
+ RBIOS32(lcd_ddc_info + 7));
+ radeon_i2c_add(rdev, &ddc_i2c, "LCD");
break;
case DDC_GPIO:
ddc_i2c =
- combios_setup_i2c_bus
- (rdev, RADEON_MDGPIO_MASK);
- ddc_i2c.mask_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.mask_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.a_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.a_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.en_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.en_data_mask =
- RBIOS32(lcd_ddc_info + 7);
- ddc_i2c.y_clk_mask =
- RBIOS32(lcd_ddc_info + 3);
- ddc_i2c.y_data_mask =
- RBIOS32(lcd_ddc_info + 7);
+ combios_setup_i2c_bus(rdev,
+ DDC_GPIO,
+ RBIOS32(lcd_ddc_info + 3),
+ RBIOS32(lcd_ddc_info + 7));
+ radeon_i2c_add(rdev, &ddc_i2c, "LCD");
break;
default:
- ddc_i2c.valid = false;
+ ddc_i2c =
+ combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
break;
}
- DRM_DEBUG("LCD DDC Info Table found!\n");
+ DRM_DEBUG_KMS("LCD DDC Info Table found!\n");
} else
ddc_i2c.valid = false;
@@ -2941,9 +2927,8 @@ static void combios_write_ram_size(struct drm_device *dev)
if (rev < 3) {
mem_cntl = RBIOS32(offset + 1);
mem_size = RBIOS16(offset + 5);
- if (((rdev->flags & RADEON_FAMILY_MASK) < CHIP_R200) &&
- ((dev->pdev->device != 0x515e)
- && (dev->pdev->device != 0x5969)))
+ if ((rdev->family < CHIP_R200) &&
+ !ASIC_IS_RN50(rdev))
WREG32(RADEON_MEM_CNTL, mem_cntl);
}
}
@@ -2954,10 +2939,8 @@ static void combios_write_ram_size(struct drm_device *dev)
if (offset) {
rev = RBIOS8(offset - 1);
if (rev < 1) {
- if (((rdev->flags & RADEON_FAMILY_MASK) <
- CHIP_R200)
- && ((dev->pdev->device != 0x515e)
- && (dev->pdev->device != 0x5969))) {
+ if ((rdev->family < CHIP_R200)
+ && !ASIC_IS_RN50(rdev)) {
int ram = 0;
int mem_addr_mapping = 0;
@@ -3121,14 +3104,14 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("TV1 connected\n");
+ DRM_DEBUG_KMS("TV1 connected\n");
/* fix me */
bios_4_scratch |= RADEON_TV1_ATTACHED_SVIDEO;
/*save->bios_4_scratch |= RADEON_TV1_ATTACHED_COMP; */
bios_5_scratch |= RADEON_TV1_ON;
bios_5_scratch |= RADEON_ACC_REQ_TV1;
} else {
- DRM_DEBUG("TV1 disconnected\n");
+ DRM_DEBUG_KMS("TV1 disconnected\n");
bios_4_scratch &= ~RADEON_TV1_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_TV1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_TV1;
@@ -3137,12 +3120,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("LCD1 connected\n");
+ DRM_DEBUG_KMS("LCD1 connected\n");
bios_4_scratch |= RADEON_LCD1_ATTACHED;
bios_5_scratch |= RADEON_LCD1_ON;
bios_5_scratch |= RADEON_ACC_REQ_LCD1;
} else {
- DRM_DEBUG("LCD1 disconnected\n");
+ DRM_DEBUG_KMS("LCD1 disconnected\n");
bios_4_scratch &= ~RADEON_LCD1_ATTACHED;
bios_5_scratch &= ~RADEON_LCD1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_LCD1;
@@ -3151,12 +3134,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT1 connected\n");
+ DRM_DEBUG_KMS("CRT1 connected\n");
bios_4_scratch |= RADEON_CRT1_ATTACHED_COLOR;
bios_5_scratch |= RADEON_CRT1_ON;
bios_5_scratch |= RADEON_ACC_REQ_CRT1;
} else {
- DRM_DEBUG("CRT1 disconnected\n");
+ DRM_DEBUG_KMS("CRT1 disconnected\n");
bios_4_scratch &= ~RADEON_CRT1_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_CRT1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_CRT1;
@@ -3165,12 +3148,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("CRT2 connected\n");
+ DRM_DEBUG_KMS("CRT2 connected\n");
bios_4_scratch |= RADEON_CRT2_ATTACHED_COLOR;
bios_5_scratch |= RADEON_CRT2_ON;
bios_5_scratch |= RADEON_ACC_REQ_CRT2;
} else {
- DRM_DEBUG("CRT2 disconnected\n");
+ DRM_DEBUG_KMS("CRT2 disconnected\n");
bios_4_scratch &= ~RADEON_CRT2_ATTACHED_MASK;
bios_5_scratch &= ~RADEON_CRT2_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_CRT2;
@@ -3179,12 +3162,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP1 connected\n");
+ DRM_DEBUG_KMS("DFP1 connected\n");
bios_4_scratch |= RADEON_DFP1_ATTACHED;
bios_5_scratch |= RADEON_DFP1_ON;
bios_5_scratch |= RADEON_ACC_REQ_DFP1;
} else {
- DRM_DEBUG("DFP1 disconnected\n");
+ DRM_DEBUG_KMS("DFP1 disconnected\n");
bios_4_scratch &= ~RADEON_DFP1_ATTACHED;
bios_5_scratch &= ~RADEON_DFP1_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_DFP1;
@@ -3193,12 +3176,12 @@ radeon_combios_connected_scratch_regs(struct drm_connector *connector,
if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) {
if (connected) {
- DRM_DEBUG("DFP2 connected\n");
+ DRM_DEBUG_KMS("DFP2 connected\n");
bios_4_scratch |= RADEON_DFP2_ATTACHED;
bios_5_scratch |= RADEON_DFP2_ON;
bios_5_scratch |= RADEON_ACC_REQ_DFP2;
} else {
- DRM_DEBUG("DFP2 disconnected\n");
+ DRM_DEBUG_KMS("DFP2 disconnected\n");
bios_4_scratch &= ~RADEON_DFP2_ATTACHED;
bios_5_scratch &= ~RADEON_DFP2_ON;
bios_5_scratch &= ~RADEON_ACC_REQ_DFP2;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index adccbc2c202c..47c4b276d30c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -214,7 +214,7 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
drm_mode_set_name(mode);
- DRM_DEBUG("Adding native panel mode %s\n", mode->name);
+ DRM_DEBUG_KMS("Adding native panel mode %s\n", mode->name);
} else if (native_mode->hdisplay != 0 &&
native_mode->vdisplay != 0) {
/* mac laptops without an edid */
@@ -226,7 +226,7 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
*/
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
- DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
+ DRM_DEBUG_KMS("Adding cvt approximation of native panel mode %s\n", mode->name);
}
return mode;
}
@@ -312,6 +312,20 @@ int radeon_connector_set_property(struct drm_connector *connector, struct drm_pr
}
}
+ if (property == rdev->mode_info.underscan_property) {
+ /* need to find digital encoder on connector */
+ encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS);
+ if (!encoder)
+ return 0;
+
+ radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->underscan_type != val) {
+ radeon_encoder->underscan_type = val;
+ radeon_property_change_mode(&radeon_encoder->base);
+ }
+ }
+
if (property == rdev->mode_info.tv_std_property) {
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC);
if (!encoder) {
@@ -504,8 +518,6 @@ static void radeon_connector_destroy(struct drm_connector *connector)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
@@ -522,7 +534,7 @@ static int radeon_lvds_set_property(struct drm_connector *connector,
struct radeon_encoder *radeon_encoder;
enum radeon_rmx_type rmx_type;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (property != dev->mode_config.scaling_mode_property)
return 0;
@@ -941,8 +953,6 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
@@ -1030,7 +1040,8 @@ radeon_add_atom_connector(struct drm_device *dev,
bool linkb,
uint32_t igp_lane_info,
uint16_t connector_object_id,
- struct radeon_hpd *hpd)
+ struct radeon_hpd *hpd,
+ struct radeon_router *router)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
@@ -1055,6 +1066,11 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = true;
shared_ddc = true;
}
+ if (radeon_connector->router_bus && router->valid &&
+ (radeon_connector->router.router_id == router->router_id)) {
+ radeon_connector->shared_ddc = false;
+ shared_ddc = false;
+ }
}
}
@@ -1069,12 +1085,18 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
+ radeon_connector->router = *router;
+ if (router->valid) {
+ radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
+ if (!radeon_connector->router_bus)
+ goto failed;
+ }
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1082,13 +1104,15 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1096,6 +1120,8 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1108,7 +1134,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1116,6 +1142,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_AUTO);
if (connector_type == DRM_MODE_CONNECTOR_DVII) {
radeon_connector->dac_load_detect = true;
drm_connector_attach_property(&radeon_connector->base,
@@ -1134,13 +1164,17 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_AUTO);
subpixel_order = SubPixelHorizontalRGB;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
@@ -1161,10 +1195,7 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
if (!radeon_dig_connector->dp_i2c_bus)
goto failed;
- if (connector_type == DRM_MODE_CONNECTOR_eDP)
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
- else
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1172,6 +1203,10 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.coherent_mode_property,
1);
+ if (ASIC_IS_AVIVO(rdev))
+ drm_connector_attach_property(&radeon_connector->base,
+ rdev->mode_info.underscan_property,
+ UNDERSCAN_AUTO);
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1186,6 +1221,8 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
radeon_atombios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
}
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -1198,7 +1235,7 @@ radeon_add_atom_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1209,7 +1246,7 @@ radeon_add_atom_connector(struct drm_device *dev,
break;
}
- if (hpd->hpd == RADEON_HPD_NONE) {
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
if (i2c_bus->valid)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
@@ -1220,8 +1257,6 @@ radeon_add_atom_connector(struct drm_device *dev,
return;
failed:
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}
@@ -1268,7 +1303,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1276,13 +1311,15 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1290,13 +1327,15 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.load_detect_property,
1);
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1328,13 +1367,15 @@ radeon_add_legacy_connector(struct drm_device *dev,
drm_connector_attach_property(&radeon_connector->base,
rdev->mode_info.tv_std_property,
radeon_combios_get_tv_info(rdev));
+ /* no HPD on analog connectors */
+ radeon_connector->hpd.hpd = RADEON_HPD_NONE;
}
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
if (i2c_bus->valid) {
- radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
+ radeon_connector->ddc_bus = radeon_i2c_lookup(rdev, i2c_bus);
if (!radeon_connector->ddc_bus)
goto failed;
}
@@ -1345,7 +1386,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
break;
}
- if (hpd->hpd == RADEON_HPD_NONE) {
+ if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
if (i2c_bus->valid)
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
} else
@@ -1355,8 +1396,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
return;
failed:
- if (radeon_connector->ddc_bus)
- radeon_i2c_destroy(radeon_connector->ddc_bus);
drm_connector_cleanup(connector);
kfree(connector);
}
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 2f042a3c0e62..eb6b9eed7349 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2120,8 +2120,8 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
else
dev_priv->flags |= RADEON_IS_PCI;
- ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
- drm_get_resource_len(dev, 2), _DRM_REGISTERS,
+ ret = drm_addmap(dev, pci_resource_start(dev->pdev, 2),
+ pci_resource_len(dev->pdev, 2), _DRM_REGISTERS,
_DRM_READ_ONLY | _DRM_DRIVER, &dev_priv->mmio);
if (ret != 0)
return ret;
@@ -2194,9 +2194,9 @@ int radeon_driver_firstopen(struct drm_device *dev)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
- dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0);
+ dev_priv->fb_aper_offset = pci_resource_start(dev->pdev, 0);
ret = drm_addmap(dev, dev_priv->fb_aper_offset,
- drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
+ pci_resource_len(dev->pdev, 0), _DRM_FRAME_BUFFER,
_DRM_WRITE_COMBINING, &map);
if (ret != 0)
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ae0fb7356e62..fcc79b5d22d1 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -72,7 +72,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
if (p->relocs[i].gobj == NULL) {
DRM_ERROR("gem object lookup failed 0x%x\n",
r->handle);
- return -EINVAL;
+ return -ENOENT;
}
p->relocs_ptr[i] = &p->relocs[i];
p->relocs[i].robj = p->relocs[i].gobj->driver_private;
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 4eb67c0e0996..5731fc9b1ae3 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -170,7 +170,7 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
- return -EINVAL;
+ return -ENOENT;
}
ret = radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index dd279da90546..4f7a170d1566 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -347,7 +347,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
return -ENOMEM;
rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (!rdev->dummy_page.addr) {
+ if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
+ dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
__free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
return -ENOMEM;
@@ -415,6 +416,22 @@ static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
return r;
}
+static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+
+ WREG32_IO(reg*4, val);
+}
+
+static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
+{
+ struct radeon_device *rdev = info->dev->dev_private;
+ uint32_t r;
+
+ r = RREG32_IO(reg*4);
+ return r;
+}
+
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
@@ -427,6 +444,15 @@ int radeon_atombios_init(struct radeon_device *rdev)
atom_card_info->dev = rdev->ddev;
atom_card_info->reg_read = cail_reg_read;
atom_card_info->reg_write = cail_reg_write;
+ /* needed for iio ops */
+ if (rdev->rio_mem) {
+ atom_card_info->ioreg_read = cail_ioreg_read;
+ atom_card_info->ioreg_write = cail_ioreg_write;
+ } else {
+ DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
+ atom_card_info->ioreg_read = cail_reg_read;
+ atom_card_info->ioreg_write = cail_reg_write;
+ }
atom_card_info->mc_read = cail_mc_read;
atom_card_info->mc_write = cail_mc_write;
atom_card_info->pll_read = cail_pll_read;
@@ -573,7 +599,7 @@ int radeon_device_init(struct radeon_device *rdev,
struct pci_dev *pdev,
uint32_t flags)
{
- int r;
+ int r, i;
int dma_bits;
rdev->shutdown = false;
@@ -650,8 +676,8 @@ int radeon_device_init(struct radeon_device *rdev,
/* Registers mapping */
/* TODO: block userspace mapping of io register */
- rdev->rmmio_base = drm_get_resource_start(rdev->ddev, 2);
- rdev->rmmio_size = drm_get_resource_len(rdev->ddev, 2);
+ rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
+ rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
if (rdev->rmmio == NULL) {
return -ENOMEM;
@@ -659,6 +685,17 @@ int radeon_device_init(struct radeon_device *rdev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
+ /* io port mapping */
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
+ rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
+ rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
+ break;
+ }
+ }
+ if (rdev->rio_mem == NULL)
+ DRM_ERROR("Unable to find PCI I/O BAR\n");
+
/* if we have > 1 VGA cards, then disable the radeon VGA resources */
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
@@ -701,6 +738,9 @@ void radeon_device_fini(struct radeon_device *rdev)
destroy_workqueue(rdev->wq);
vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
+ if (rdev->rio_mem)
+ pci_iounmap(rdev->pdev, rdev->rio_mem);
+ rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8154cdf796e4..5764f4d3b4f1 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -42,7 +42,7 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
int i;
- DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
WREG32(AVIVO_DC_LUTA_CONTROL + radeon_crtc->crtc_offset, 0);
WREG32(AVIVO_DC_LUTA_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
@@ -75,7 +75,7 @@ static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
struct radeon_device *rdev = dev->dev_private;
int i;
- DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id);
WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
@@ -161,17 +161,13 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
}
static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
- u16 *blue, uint32_t size)
+ u16 *blue, uint32_t start, uint32_t size)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- int i;
-
- if (size != 256) {
- return;
- }
+ int end = (start + size > 256) ? 256 : start + size, i;
/* userspace palettes are always correct as is */
- for (i = 0; i < 256; i++) {
+ for (i = start; i < end; i++) {
radeon_crtc->lut_r[i] = red[i] >> 6;
radeon_crtc->lut_g[i] = green[i] >> 6;
radeon_crtc->lut_b[i] = blue[i] >> 6;
@@ -319,6 +315,10 @@ static void radeon_print_display_setup(struct drm_device *dev)
radeon_connector->ddc_bus->rec.en_data_reg,
radeon_connector->ddc_bus->rec.y_clk_reg,
radeon_connector->ddc_bus->rec.y_data_reg);
+ if (radeon_connector->router_bus)
+ DRM_INFO(" DDC Router 0x%x/0x%x\n",
+ radeon_connector->router.mux_control_pin,
+ radeon_connector->router.mux_state);
} else {
if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -395,6 +395,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
struct radeon_device *rdev = dev->dev_private;
int ret = 0;
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.valid)
+ radeon_router_select_port(radeon_connector);
+
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
@@ -425,6 +429,10 @@ static int radeon_ddc_dump(struct drm_connector *connector)
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret = 0;
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.valid)
+ radeon_router_select_port(radeon_connector);
+
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
@@ -469,7 +477,7 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint32_t post_div;
u32 pll_out_min, pll_out_max;
- DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
+ DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
if (pll->flags & RADEON_PLL_IS_LCD) {
@@ -558,15 +566,17 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
current_freq = radeon_div(tmp, ref_div * post_div);
if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
- error = freq - current_freq;
- error = error < 0 ? 0xffffffff : error;
+ if (freq < current_freq)
+ error = 0xffffffff;
+ else
+ error = freq - current_freq;
} else
error = abs(current_freq - freq);
vco_diff = abs(vco - best_vco);
if ((best_vco == 0 && error < best_error) ||
(best_vco != 0 &&
- (error < best_error - 100 ||
+ ((best_error > 100 && error < best_error - 100) ||
(abs(error - best_error) < 100 && vco_diff < best_vco_diff)))) {
best_post_div = post_div;
best_ref_div = ref_div;
@@ -803,7 +813,7 @@ done:
*ref_div_p = ref_div;
*post_div_p = post_div;
- DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+ DRM_DEBUG_KMS("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
}
void radeon_compute_pll(struct radeon_pll *pll,
@@ -874,13 +884,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
"can't create framebuffer\n", mode_cmd->handle);
- return NULL;
+ return ERR_PTR(-ENOENT);
}
radeon_fb = kzalloc(sizeof(*radeon_fb), GFP_KERNEL);
- if (radeon_fb == NULL) {
- return NULL;
- }
+ if (radeon_fb == NULL)
+ return ERR_PTR(-ENOMEM);
radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
@@ -919,6 +928,12 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
{ TV_STD_SECAM, "secam" },
};
+static struct drm_prop_enum_list radeon_underscan_enum_list[] =
+{ { UNDERSCAN_OFF, "off" },
+ { UNDERSCAN_ON, "on" },
+ { UNDERSCAN_AUTO, "auto" },
+};
+
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
@@ -972,6 +987,18 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
radeon_tv_std_enum_list[i].name);
}
+ sz = ARRAY_SIZE(radeon_underscan_enum_list);
+ rdev->mode_info.underscan_property =
+ drm_property_create(rdev->ddev,
+ DRM_MODE_PROP_ENUM,
+ "underscan", sz);
+ for (i = 0; i < sz; i++) {
+ drm_property_add_enum(rdev->mode_info.underscan_property,
+ i,
+ radeon_underscan_enum_list[i].type,
+ radeon_underscan_enum_list[i].name);
+ }
+
return 0;
}
@@ -1020,6 +1047,9 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
+ /* init i2c buses */
+ radeon_i2c_init(rdev);
+
/* check combios for a valid hardcoded EDID - Sun servers */
if (!rdev->is_atom_bios) {
/* check for hardcoded EDID in BIOS */
@@ -1060,6 +1090,8 @@ void radeon_modeset_fini(struct radeon_device *rdev)
drm_mode_config_cleanup(rdev->ddev);
rdev->mode_info.mode_config_initialized = false;
}
+ /* free i2c buses */
+ radeon_i2c_fini(rdev);
}
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
@@ -1067,15 +1099,26 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_encoder *radeon_encoder;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
bool first = true;
+ u32 src_v = 1, dst_v = 1;
+ u32 src_h = 1, dst_h = 1;
+
+ radeon_crtc->h_border = 0;
+ radeon_crtc->v_border = 0;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- radeon_encoder = to_radeon_encoder(encoder);
if (encoder->crtc != crtc)
continue;
+ radeon_encoder = to_radeon_encoder(encoder);
+ connector = radeon_get_connector_for_encoder(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
if (first) {
/* set scaling */
if (radeon_encoder->rmx_type == RMX_OFF)
@@ -1085,31 +1128,49 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
radeon_crtc->rmx_type = radeon_encoder->rmx_type;
else
radeon_crtc->rmx_type = RMX_OFF;
+ src_v = crtc->mode.vdisplay;
+ dst_v = radeon_crtc->native_mode.vdisplay;
+ src_h = crtc->mode.hdisplay;
+ dst_h = radeon_crtc->native_mode.vdisplay;
/* copy native mode */
memcpy(&radeon_crtc->native_mode,
&radeon_encoder->native_mode,
sizeof(struct drm_display_mode));
+
+ /* fix up for overscan on hdmi */
+ if (ASIC_IS_AVIVO(rdev) &&
+ ((radeon_encoder->underscan_type == UNDERSCAN_ON) ||
+ ((radeon_encoder->underscan_type == UNDERSCAN_AUTO) &&
+ drm_detect_hdmi_monitor(radeon_connector->edid)))) {
+ radeon_crtc->h_border = (mode->hdisplay >> 5) + 16;
+ radeon_crtc->v_border = (mode->vdisplay >> 5) + 16;
+ radeon_crtc->rmx_type = RMX_FULL;
+ src_v = crtc->mode.vdisplay;
+ dst_v = crtc->mode.vdisplay - (radeon_crtc->v_border * 2);
+ src_h = crtc->mode.hdisplay;
+ dst_h = crtc->mode.hdisplay - (radeon_crtc->h_border * 2);
+ }
first = false;
} else {
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) {
/* WARNING: Right now this can't happen but
* in the future we need to check that scaling
- * are consistent accross different encoder
+ * are consistent across different encoder
* (ie all encoder can work with the same
* scaling).
*/
- DRM_ERROR("Scaling not consistent accross encoder.\n");
+ DRM_ERROR("Scaling not consistent across encoder.\n");
return false;
}
}
}
if (radeon_crtc->rmx_type != RMX_OFF) {
fixed20_12 a, b;
- a.full = dfixed_const(crtc->mode.vdisplay);
- b.full = dfixed_const(radeon_crtc->native_mode.hdisplay);
+ a.full = dfixed_const(src_v);
+ b.full = dfixed_const(dst_v);
radeon_crtc->vsc.full = dfixed_div(a, b);
- a.full = dfixed_const(crtc->mode.hdisplay);
- b.full = dfixed_const(radeon_crtc->native_mode.vdisplay);
+ a.full = dfixed_const(src_h);
+ b.full = dfixed_const(dst_h);
radeon_crtc->hsc.full = dfixed_div(a, b);
} else {
radeon_crtc->vsc.full = dfixed_const(1);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index e166fe4d7c30..795403b0e2cd 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -46,9 +46,10 @@
* - 2.3.0 - add MSPOS + 3D texture + r500 VAP regs
* - 2.4.0 - add crtc id query
* - 2.5.0 - add get accel 2 to work around ddx breakage for evergreen
+ * - 2.6.0 - add tiling config query (r6xx+), add initial HiZ support (r300->r500)
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 5
+#define KMS_DRIVER_MINOR 6
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -238,7 +239,7 @@ static struct drm_driver kms_driver;
static int __devinit
radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &kms_driver);
+ return drm_get_pci_dev(pdev, ent, &kms_driver);
}
static void
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index e0b30b264c28..263c8098d7dd 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -205,14 +205,14 @@ void radeon_encoder_set_active_device(struct drm_encoder *encoder)
if (connector->encoder == encoder) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices;
- DRM_DEBUG("setting active device to %08x from %08x %08x for encoder %d\n",
+ DRM_DEBUG_KMS("setting active device to %08x from %08x %08x for encoder %d\n",
radeon_encoder->active_device, radeon_encoder->devices,
radeon_connector->devices, encoder->encoder_type);
}
}
}
-static struct drm_connector *
+struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -1021,7 +1021,7 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
memset(&args, 0, sizeof(args));
- DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
+ DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
switch (radeon_encoder->encoder_id) {
@@ -1484,7 +1484,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
uint32_t bios_0_scratch;
if (!atombios_dac_load_detect(encoder, connector)) {
- DRM_DEBUG("detect returned false \n");
+ DRM_DEBUG_KMS("detect returned false \n");
return connector_status_unknown;
}
@@ -1493,7 +1493,7 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
else
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
- DRM_DEBUG("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
+ DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
@@ -1694,6 +1694,7 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
radeon_encoder->encoder_id = encoder_id;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
+ radeon_encoder->underscan_type = UNDERSCAN_OFF;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
@@ -1707,6 +1708,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ if (ASIC_IS_AVIVO(rdev))
+ radeon_encoder->underscan_type = UNDERSCAN_AUTO;
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
@@ -1736,6 +1739,8 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
+ if (ASIC_IS_AVIVO(rdev))
+ radeon_encoder->underscan_type = UNDERSCAN_AUTO;
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index dc1634bb0c11..dbf86962bdd1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -224,7 +224,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
- info->flags = FBINFO_DEFAULT;
+ info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start;
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index a72a3ee5d69b..c578f265b24c 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -226,7 +226,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
@@ -245,7 +245,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
args->addr_ptr = radeon_bo_mmap_offset(robj);
@@ -264,7 +264,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, &cur_placement, true);
@@ -294,7 +294,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
- return -EINVAL;
+ return -ENOENT;
}
robj = gobj->driver_private;
r = radeon_bo_wait(robj, NULL, false);
@@ -316,7 +316,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG("%d \n", args->handle);
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
- return -EINVAL;
+ return -ENOENT;
robj = gobj->driver_private;
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
drm_gem_object_unreference_unlocked(gobj);
@@ -334,7 +334,7 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
DRM_DEBUG("\n");
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL)
- return -EINVAL;
+ return -ENOENT;
rbo = gobj->driver_private;
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 5def6f5dff38..bfd2ce5f5372 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -52,6 +52,10 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
};
+ /* on hw with routers, select right port */
+ if (radeon_connector->router.valid)
+ radeon_router_select_port(radeon_connector);
+
ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
if (ret == 2)
return true;
@@ -960,6 +964,59 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
kfree(i2c);
}
+/* Add the default buses */
+void radeon_i2c_init(struct radeon_device *rdev)
+{
+ if (rdev->is_atom_bios)
+ radeon_atombios_i2c_init(rdev);
+ else
+ radeon_combios_i2c_init(rdev);
+}
+
+/* remove all the buses */
+void radeon_i2c_fini(struct radeon_device *rdev)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (rdev->i2c_bus[i]) {
+ radeon_i2c_destroy(rdev->i2c_bus[i]);
+ rdev->i2c_bus[i] = NULL;
+ }
+ }
+}
+
+/* Add additional buses */
+void radeon_i2c_add(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name)
+{
+ struct drm_device *dev = rdev->ddev;
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (!rdev->i2c_bus[i]) {
+ rdev->i2c_bus[i] = radeon_i2c_create(dev, rec, name);
+ return;
+ }
+ }
+}
+
+/* looks up bus based on id */
+struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *i2c_bus)
+{
+ int i;
+
+ for (i = 0; i < RADEON_MAX_I2C_BUS; i++) {
+ if (rdev->i2c_bus[i] &&
+ (rdev->i2c_bus[i]->rec.i2c_id == i2c_bus->i2c_id)) {
+ return rdev->i2c_bus[i];
+ }
+ }
+ return NULL;
+}
+
struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
{
return NULL;
@@ -1020,3 +1077,28 @@ void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
addr, val);
}
+/* router switching */
+void radeon_router_select_port(struct radeon_connector *radeon_connector)
+{
+ u8 val;
+
+ if (!radeon_connector->router.valid)
+ return;
+
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, &val);
+ val &= radeon_connector->router.mux_control_pin;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x3, val);
+ radeon_i2c_get_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, &val);
+ val &= radeon_connector->router.mux_control_pin;
+ val |= radeon_connector->router.mux_state;
+ radeon_i2c_put_byte(radeon_connector->router_bus,
+ radeon_connector->router.i2c_addr,
+ 0x1, val);
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index ab389f89fa8d..b1c8ace5f080 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -49,7 +49,7 @@ int radeon_driver_unload_kms(struct drm_device *dev)
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
{
struct radeon_device *rdev;
- int r;
+ int r, acpi_status;
rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
if (rdev == NULL) {
@@ -77,6 +77,12 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
goto out;
}
+
+ /* Call ACPI methods */
+ acpi_status = radeon_acpi_init(rdev);
+ if (acpi_status)
+ dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n");
+
/* Again modeset_init should fail only on fatal error
* otherwise it should provide enough functionalities
* for shadowfb to run
@@ -106,7 +112,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
info = data;
value_ptr = (uint32_t *)((unsigned long)info->value);
- value = *value_ptr;
+ if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value)))
+ return -EFAULT;
+
switch (info->request) {
case RADEON_INFO_DEVICE_ID:
value = dev->pci_device;
@@ -135,15 +143,50 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
}
if (!found) {
- DRM_DEBUG("unknown crtc id %d\n", value);
+ DRM_DEBUG_KMS("unknown crtc id %d\n", value);
return -EINVAL;
}
break;
case RADEON_INFO_ACCEL_WORKING2:
value = rdev->accel_working;
break;
+ case RADEON_INFO_TILING_CONFIG:
+ if (rdev->family >= CHIP_CEDAR)
+ value = rdev->config.evergreen.tile_config;
+ else if (rdev->family >= CHIP_RV770)
+ value = rdev->config.rv770.tile_config;
+ else if (rdev->family >= CHIP_R600)
+ value = rdev->config.r600.tile_config;
+ else {
+ DRM_DEBUG_KMS("tiling config is r6xx+ only!\n");
+ return -EINVAL;
+ }
+ case RADEON_INFO_WANT_HYPERZ:
+ /* The "value" here is both an input and output parameter.
+ * If the input value is 1, filp requests hyper-z access.
+ * If the input value is 0, filp revokes its hyper-z access.
+ *
+ * When returning, the value is 1 if filp owns hyper-z access,
+ * 0 otherwise. */
+ if (value >= 2) {
+ DRM_DEBUG_KMS("WANT_HYPERZ: invalid value %d\n", value);
+ return -EINVAL;
+ }
+ mutex_lock(&dev->struct_mutex);
+ if (value == 1) {
+ /* wants hyper-z */
+ if (!rdev->hyperz_filp)
+ rdev->hyperz_filp = filp;
+ } else if (value == 0) {
+ /* revokes hyper-z */
+ if (rdev->hyperz_filp == filp)
+ rdev->hyperz_filp = NULL;
+ }
+ value = rdev->hyperz_filp == filp ? 1 : 0;
+ mutex_unlock(&dev->struct_mutex);
+ break;
default:
- DRM_DEBUG("Invalid request %d\n", info->request);
+ DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
}
if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) {
@@ -181,9 +224,11 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+ if (rdev->hyperz_filp == file_priv)
+ rdev->hyperz_filp = NULL;
}
-
/*
* VBlank related functions.
*/
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index e1e5255396ac..989df519a1e4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -362,10 +362,10 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
uint32_t gen_cntl_reg, gen_cntl_val;
int r;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
/* no fb bound */
if (!crtc->fb) {
- DRM_DEBUG("No FB bound\n");
+ DRM_DEBUG_KMS("No FB bound\n");
return 0;
}
@@ -528,7 +528,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
uint32_t crtc_v_sync_strt_wid;
bool is_tv = false;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -757,7 +757,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
}
}
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (!use_bios_divs) {
radeon_compute_pll(pll, mode->clock,
@@ -772,7 +772,7 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!post_div->divider)
post_div = &post_divs[0];
- DRM_DEBUG("dc=%u, fd=%d, rd=%d, pd=%d\n",
+ DRM_DEBUG_KMS("dc=%u, fd=%d, rd=%d, pd=%d\n",
(unsigned)freq,
feedback_div,
reference_div,
@@ -841,12 +841,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
| RADEON_P2PLL_SLEEP
| RADEON_P2PLL_ATOMIC_UPDATE_EN));
- DRM_DEBUG("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ DRM_DEBUG_KMS("Wrote2: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
(unsigned)pll_ref_div,
(unsigned)pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_P2PLL_CNTL));
- DRM_DEBUG("Wrote2: rd=%u, fd=%u, pd=%u\n",
+ DRM_DEBUG_KMS("Wrote2: rd=%u, fd=%u, pd=%u\n",
(unsigned)pll_ref_div & RADEON_P2PLL_REF_DIV_MASK,
(unsigned)pll_fb_post_div & RADEON_P2PLL_FB0_DIV_MASK,
(unsigned)((pll_fb_post_div &
@@ -947,12 +947,12 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
| RADEON_PPLL_ATOMIC_UPDATE_EN
| RADEON_PPLL_VGA_ATOMIC_UPDATE_EN));
- DRM_DEBUG("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
+ DRM_DEBUG_KMS("Wrote: 0x%08x 0x%08x 0x%08x (0x%08x)\n",
pll_ref_div,
pll_fb_post_div,
(unsigned)htotal_cntl,
RREG32_PLL(RADEON_PPLL_CNTL));
- DRM_DEBUG("Wrote: rd=%d, fd=%d, pd=%d\n",
+ DRM_DEBUG_KMS("Wrote: rd=%d, fd=%d, pd=%d\n",
pll_ref_div & RADEON_PPLL_REF_DIV_MASK,
pll_fb_post_div & RADEON_PPLL_FB3_DIV_MASK,
(pll_fb_post_div & RADEON_PPLL_POST3_DIV_MASK) >> 16);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 5688a0cf6bbe..b8149cbc0c70 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -47,7 +47,7 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
uint32_t lvds_gen_cntl, lvds_pll_cntl, pixclks_cntl, disp_pwr_man;
int panel_pwr_delay = 2000;
bool is_mac = false;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
@@ -151,7 +151,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t lvds_pll_cntl, lvds_gen_cntl, lvds_ss_gen_cntl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
@@ -167,7 +167,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
} else {
struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
if (lvds) {
- DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
+ DRM_DEBUG_KMS("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
lvds_gen_cntl = lvds->lvds_gen_cntl;
lvds_ss_gen_cntl &= ~((0xf << RADEON_LVDS_PWRSEQ_DELAY1_SHIFT) |
(0xf << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
@@ -250,7 +250,7 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
uint32_t dac_cntl = RREG32(RADEON_DAC_CNTL);
uint32_t dac_macro_cntl = RREG32(RADEON_DAC_MACRO_CNTL);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -315,7 +315,7 @@ static void radeon_legacy_primary_dac_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t disp_output_cntl, dac_cntl, dac2_cntl, dac_macro_cntl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (radeon_crtc->crtc_id == 0) {
if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) {
@@ -446,7 +446,7 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t fp_gen_cntl = RREG32(RADEON_FP_GEN_CNTL);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -502,7 +502,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
uint32_t tmp, tmds_pll_cntl, tmds_transmitter_cntl, fp_gen_cntl;
int i;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL);
tmp &= 0xfffff;
@@ -610,7 +610,7 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL);
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -666,7 +666,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t fp2_gen_cntl;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
if (rdev->is_atom_bios) {
radeon_encoder->pixel_clock = adjusted_mode->clock;
@@ -760,7 +760,7 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0;
uint32_t tv_master_cntl = 0;
bool is_tv;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
@@ -878,7 +878,7 @@ static void radeon_legacy_tv_dac_mode_set(struct drm_encoder *encoder,
uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0;
bool is_tv = false;
- DRM_DEBUG("\n");
+ DRM_DEBUG_KMS("\n");
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false;
@@ -1075,10 +1075,10 @@ static bool r300_legacy_tv_detect(struct drm_encoder *encoder,
tmp = RREG32(RADEON_TV_DAC_CNTL);
if ((tmp & RADEON_TV_DAC_GDACDET) != 0) {
found = true;
- DRM_DEBUG("S-video TV connection detected\n");
+ DRM_DEBUG_KMS("S-video TV connection detected\n");
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
found = true;
- DRM_DEBUG("Composite TV connection detected\n");
+ DRM_DEBUG_KMS("Composite TV connection detected\n");
}
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
@@ -1141,10 +1141,10 @@ static bool radeon_legacy_tv_detect(struct drm_encoder *encoder,
tmp = RREG32(RADEON_TV_DAC_CNTL);
if (tmp & RADEON_TV_DAC_GDACDET) {
found = true;
- DRM_DEBUG("S-video TV connection detected\n");
+ DRM_DEBUG_KMS("S-video TV connection detected\n");
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) {
found = true;
- DRM_DEBUG("Composite TV connection detected\n");
+ DRM_DEBUG_KMS("Composite TV connection detected\n");
}
WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 032040397743..c7b6cb428d09 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -496,7 +496,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
restart -= v_offset + h_offset;
- DRM_DEBUG("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
+ DRM_DEBUG_KMS("compute_restarts: def = %u h = %d v = %d, p1 = %04x, p2 = %04x, restart = %d\n",
const_ptr->def_restart, tv_dac->h_pos, tv_dac->v_pos, p1, p2, restart);
tv_dac->tv.hrestart = restart % h_total;
@@ -505,7 +505,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
restart /= v_total;
tv_dac->tv.frestart = restart % f_total;
- DRM_DEBUG("compute_restart: F/H/V=%u,%u,%u\n",
+ DRM_DEBUG_KMS("compute_restart: F/H/V=%u,%u,%u\n",
(unsigned)tv_dac->tv.frestart,
(unsigned)tv_dac->tv.vrestart,
(unsigned)tv_dac->tv.hrestart);
@@ -523,7 +523,7 @@ static bool radeon_legacy_tv_init_restarts(struct drm_encoder *encoder)
tv_dac->tv.timing_cntl = (tv_dac->tv.timing_cntl & ~RADEON_H_INC_MASK) |
((u32)h_inc << RADEON_H_INC_SHIFT);
- DRM_DEBUG("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
+ DRM_DEBUG_KMS("compute_restart: h_size = %d h_inc = %d\n", tv_dac->h_size, h_inc);
return h_changed;
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 95696aa57ac8..5bbc086b9267 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -66,6 +66,12 @@ enum radeon_tv_std {
TV_STD_PAL_N,
};
+enum radeon_underscan_type {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+};
+
enum radeon_hpd_id {
RADEON_HPD_1 = 0,
RADEON_HPD_2,
@@ -76,6 +82,8 @@ enum radeon_hpd_id {
RADEON_HPD_NONE = 0xff,
};
+#define RADEON_MAX_I2C_BUS 16
+
/* radeon gpio-based i2c
* 1. "mask" reg and bits
* grabs the gpio pins for software use
@@ -226,10 +234,12 @@ struct radeon_mode_info {
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
struct drm_property *load_detect_property;
- /* TV standard load detect */
+ /* TV standard */
struct drm_property *tv_std_property;
/* legacy TMDS PLL detect */
struct drm_property *tmds_pll_property;
+ /* underscan */
+ struct drm_property *underscan_property;
/* hardcoded DFP edid from BIOS */
struct edid *bios_hardcoded_edid;
@@ -266,6 +276,8 @@ struct radeon_crtc {
uint32_t legacy_display_base_addr;
uint32_t legacy_cursor_offset;
enum radeon_rmx_type rmx_type;
+ u8 h_border;
+ u8 v_border;
fixed20_12 vsc;
fixed20_12 hsc;
struct drm_display_mode native_mode;
@@ -354,6 +366,7 @@ struct radeon_encoder {
uint32_t flags;
uint32_t pixel_clock;
enum radeon_rmx_type rmx_type;
+ enum radeon_underscan_type underscan_type;
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
@@ -387,12 +400,22 @@ struct radeon_hpd {
struct radeon_gpio_rec gpio;
};
+struct radeon_router {
+ bool valid;
+ u32 router_id;
+ struct radeon_i2c_bus_rec i2c_info;
+ u8 i2c_addr;
+ u8 mux_type;
+ u8 mux_control_pin;
+ u8 mux_state;
+};
+
struct radeon_connector {
struct drm_connector base;
uint32_t connector_id;
uint32_t devices;
struct radeon_i2c_chan *ddc_bus;
- /* some systems have a an hdmi and vga port with a shared ddc line */
+ /* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
bool use_digital;
/* we need to mind the EDID between detect
@@ -402,6 +425,8 @@ struct radeon_connector {
bool dac_load_detect;
uint16_t connector_object_id;
struct radeon_hpd hpd;
+ struct radeon_router router;
+ struct radeon_i2c_chan *router_bus;
};
struct radeon_framebuffer {
@@ -414,6 +439,9 @@ radeon_combios_get_tv_info(struct radeon_device *rdev);
extern enum radeon_tv_std
radeon_atombios_get_tv_info(struct radeon_device *rdev);
+extern struct drm_connector *
+radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
@@ -431,6 +459,15 @@ extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint8_t write_byte, uint8_t *read_byte);
+extern void radeon_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_fini(struct radeon_device *rdev);
+extern void radeon_combios_i2c_init(struct radeon_device *rdev);
+extern void radeon_atombios_i2c_init(struct radeon_device *rdev);
+extern void radeon_i2c_add(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *rec,
+ const char *name);
+extern struct radeon_i2c_chan *radeon_i2c_lookup(struct radeon_device *rdev,
+ struct radeon_i2c_bus_rec *i2c_bus);
extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
@@ -446,6 +483,7 @@ extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
u8 slave_addr,
u8 addr,
u8 val);
+extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d5b9373ce06c..0afd1e62347d 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -110,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
+retry:
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
@@ -118,10 +119,15 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
&radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
- if (r != -ERESTARTSYS)
+ if (r != -ERESTARTSYS) {
+ if (domain == RADEON_GEM_DOMAIN_VRAM) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
dev_err(rdev->dev,
"object_init failed for (%lu, 0x%08X)\n",
size, domain);
+ }
return r;
}
*bo_ptr = bo;
@@ -321,6 +327,7 @@ int radeon_bo_list_validate(struct list_head *head)
{
struct radeon_bo_list *lobj;
struct radeon_bo *bo;
+ u32 domain;
int r;
list_for_each_entry(lobj, head, list) {
@@ -333,17 +340,19 @@ int radeon_bo_list_validate(struct list_head *head)
list_for_each_entry(lobj, head, list) {
bo = lobj->bo;
if (!bo->pin_count) {
- if (lobj->wdomain) {
- radeon_ttm_placement_from_domain(bo,
- lobj->wdomain);
- } else {
- radeon_ttm_placement_from_domain(bo,
- lobj->rdomain);
- }
+ domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
+
+ retry:
+ radeon_ttm_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false, false);
- if (unlikely(r))
+ if (unlikely(r)) {
+ if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
+ domain |= RADEON_GEM_DOMAIN_GTT;
+ goto retry;
+ }
return r;
+ }
}
lobj->gpu_offset = radeon_bo_gpu_offset(bo);
lobj->tiling_flags = bo->tiling_flags;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 3fa6984d9896..58038f5cab38 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -27,6 +27,8 @@
#include <linux/acpi.h>
#endif
#include <linux/power_supply.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
@@ -60,9 +62,9 @@ static int radeon_acpi_event(struct notifier_block *nb,
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
if (power_supply_is_system_supplied() > 0)
- DRM_DEBUG("pm: AC\n");
+ DRM_DEBUG_DRIVER("pm: AC\n");
else
- DRM_DEBUG("pm: DC\n");
+ DRM_DEBUG_DRIVER("pm: DC\n");
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
@@ -196,7 +198,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
radeon_set_engine_clock(rdev, sclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_sclk = sclk;
- DRM_DEBUG("Setting: e: %d\n", sclk);
+ DRM_DEBUG_DRIVER("Setting: e: %d\n", sclk);
}
/* set memory clock */
@@ -205,7 +207,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.current_mclk = mclk;
- DRM_DEBUG("Setting: m: %d\n", mclk);
+ DRM_DEBUG_DRIVER("Setting: m: %d\n", mclk);
}
if (misc_after)
@@ -217,7 +219,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
rdev->pm.current_power_state_index = rdev->pm.requested_power_state_index;
rdev->pm.current_clock_mode_index = rdev->pm.requested_clock_mode_index;
} else
- DRM_DEBUG("pm: GUI not idle!!!\n");
+ DRM_DEBUG_DRIVER("pm: GUI not idle!!!\n");
}
static void radeon_pm_set_clocks(struct radeon_device *rdev)
@@ -292,27 +294,27 @@ static void radeon_pm_print_states(struct radeon_device *rdev)
struct radeon_power_state *power_state;
struct radeon_pm_clock_info *clock_info;
- DRM_DEBUG("%d Power State(s)\n", rdev->pm.num_power_states);
+ DRM_DEBUG_DRIVER("%d Power State(s)\n", rdev->pm.num_power_states);
for (i = 0; i < rdev->pm.num_power_states; i++) {
power_state = &rdev->pm.power_state[i];
- DRM_DEBUG("State %d: %s\n", i,
+ DRM_DEBUG_DRIVER("State %d: %s\n", i,
radeon_pm_state_type_name[power_state->type]);
if (i == rdev->pm.default_power_state_index)
- DRM_DEBUG("\tDefault");
+ DRM_DEBUG_DRIVER("\tDefault");
if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
- DRM_DEBUG("\t%d PCIE Lanes\n", power_state->pcie_lanes);
+ DRM_DEBUG_DRIVER("\t%d PCIE Lanes\n", power_state->pcie_lanes);
if (power_state->flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
- DRM_DEBUG("\tSingle display only\n");
- DRM_DEBUG("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
+ DRM_DEBUG_DRIVER("\tSingle display only\n");
+ DRM_DEBUG_DRIVER("\t%d Clock Mode(s)\n", power_state->num_clock_modes);
for (j = 0; j < power_state->num_clock_modes; j++) {
clock_info = &(power_state->clock_info[j]);
if (rdev->flags & RADEON_IS_IGP)
- DRM_DEBUG("\t\t%d e: %d%s\n",
+ DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
else
- DRM_DEBUG("\t\t%d e: %d\tm: %d\tv: %d%s\n",
+ DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
j,
clock_info->sclk * 10,
clock_info->mclk * 10,
@@ -424,6 +426,93 @@ fail:
static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
+static ssize_t radeon_hwmon_show_temp(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
+ struct radeon_device *rdev = ddev->dev_private;
+ u32 temp;
+
+ switch (rdev->pm.int_thermal_type) {
+ case THERMAL_TYPE_RV6XX:
+ temp = rv6xx_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_RV770:
+ temp = rv770_get_temp(rdev);
+ break;
+ case THERMAL_TYPE_EVERGREEN:
+ temp = evergreen_get_temp(rdev);
+ break;
+ default:
+ temp = 0;
+ break;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
+}
+
+static ssize_t radeon_hwmon_show_name(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return sprintf(buf, "radeon\n");
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, radeon_hwmon_show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, radeon_hwmon_show_name, NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_name.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+ .attrs = hwmon_attributes,
+};
+
+static int radeon_hwmon_init(struct radeon_device *rdev)
+{
+ int err = 0;
+
+ rdev->pm.int_hwmon_dev = NULL;
+
+ switch (rdev->pm.int_thermal_type) {
+ case THERMAL_TYPE_RV6XX:
+ case THERMAL_TYPE_RV770:
+ case THERMAL_TYPE_EVERGREEN:
+ rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
+ if (IS_ERR(rdev->pm.int_hwmon_dev)) {
+ err = PTR_ERR(rdev->pm.int_hwmon_dev);
+ dev_err(rdev->dev,
+ "Unable to register hwmon device: %d\n", err);
+ break;
+ }
+ dev_set_drvdata(rdev->pm.int_hwmon_dev, rdev->ddev);
+ err = sysfs_create_group(&rdev->pm.int_hwmon_dev->kobj,
+ &hwmon_attrgroup);
+ if (err) {
+ dev_err(rdev->dev,
+ "Unable to create hwmon sysfs file: %d\n", err);
+ hwmon_device_unregister(rdev->dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static void radeon_hwmon_fini(struct radeon_device *rdev)
+{
+ if (rdev->pm.int_hwmon_dev) {
+ sysfs_remove_group(&rdev->pm.int_hwmon_dev->kobj, &hwmon_attrgroup);
+ hwmon_device_unregister(rdev->pm.int_hwmon_dev);
+ }
+}
+
void radeon_pm_suspend(struct radeon_device *rdev)
{
bool flush_wq = false;
@@ -462,6 +551,7 @@ void radeon_pm_resume(struct radeon_device *rdev)
int radeon_pm_init(struct radeon_device *rdev)
{
int ret;
+
/* default to profile method */
rdev->pm.pm_method = PM_METHOD_PROFILE;
rdev->pm.profile = PM_PROFILE_DEFAULT;
@@ -471,6 +561,7 @@ int radeon_pm_init(struct radeon_device *rdev)
rdev->pm.dynpm_can_downclock = true;
rdev->pm.current_sclk = rdev->clock.default_sclk;
rdev->pm.current_mclk = rdev->clock.default_mclk;
+ rdev->pm.int_thermal_type = THERMAL_TYPE_NONE;
if (rdev->bios) {
if (rdev->is_atom_bios)
@@ -481,6 +572,10 @@ int radeon_pm_init(struct radeon_device *rdev)
radeon_pm_init_profile(rdev);
}
+ /* set up the internal thermal sensor if applicable */
+ ret = radeon_hwmon_init(rdev);
+ if (ret)
+ return ret;
if (rdev->pm.num_power_states > 1) {
/* where's the best place to put these? */
ret = device_create_file(rdev->dev, &dev_attr_power_profile);
@@ -536,6 +631,7 @@ void radeon_pm_fini(struct radeon_device *rdev)
#endif
}
+ radeon_hwmon_fini(rdev);
if (rdev->pm.i2c_bus)
radeon_i2c_destroy(rdev->pm.i2c_bus);
}
@@ -576,7 +672,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
radeon_pm_get_dynpm_state(rdev);
radeon_pm_set_clocks(rdev);
- DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ DRM_DEBUG_DRIVER("radeon: dynamic power management deactivated\n");
}
} else if (rdev->pm.active_crtc_count == 1) {
/* TODO: Increase clocks if needed for current mode */
@@ -593,7 +689,7 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_state = DYNPM_STATE_ACTIVE;
queue_delayed_work(rdev->wq, &rdev->pm.dynpm_idle_work,
msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
- DRM_DEBUG("radeon: dynamic power management activated\n");
+ DRM_DEBUG_DRIVER("radeon: dynamic power management activated\n");
}
} else { /* count == 0 */
if (rdev->pm.dynpm_state != DYNPM_STATE_MINIMUM) {
@@ -689,7 +785,7 @@ static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish
bool in_vbl = radeon_pm_in_vbl(rdev);
if (in_vbl == false)
- DRM_DEBUG("not in vbl for pm change %08x at %s\n", stat_crtc,
+ DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc,
finish ? "exit" : "entry");
return in_vbl;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e9918d88f5b0..84c53e41a88f 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -59,28 +59,28 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
/*
* Global memory.
*/
-static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
+static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
-static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
+static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
static int radeon_ttm_global_init(struct radeon_device *rdev)
{
- struct ttm_global_reference *global_ref;
+ struct drm_global_reference *global_ref;
int r;
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.mem_global_ref;
- global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &radeon_ttm_mem_global_init;
global_ref->release = &radeon_ttm_mem_global_release;
- r = ttm_global_item_ref(global_ref);
+ r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
@@ -90,14 +90,14 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
rdev->mman.bo_global_ref.mem_glob =
rdev->mman.mem_global_ref.object;
global_ref = &rdev->mman.bo_global_ref.ref;
- global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
- r = ttm_global_item_ref(global_ref);
+ r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
- ttm_global_item_unref(&rdev->mman.mem_global_ref);
+ drm_global_item_unref(&rdev->mman.mem_global_ref);
return r;
}
@@ -108,8 +108,8 @@ static int radeon_ttm_global_init(struct radeon_device *rdev)
static void radeon_ttm_global_fini(struct radeon_device *rdev)
{
if (rdev->mman.mem_global_referenced) {
- ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
- ttm_global_item_unref(&rdev->mman.mem_global_ref);
+ drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
+ drm_global_item_unref(&rdev->mman.mem_global_ref);
rdev->mman.mem_global_referenced = false;
}
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index 1e97b2d129fd..b506ec1cab4b 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -187,7 +187,6 @@ r300 0x4f60
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -716,16 +715,4 @@ r300 0x4f60
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index e958980d00f1..8c1214c2390f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,6 +130,7 @@ r420 0x4f60
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
0x4100 TX_INVALTAGS
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
@@ -187,7 +188,6 @@ r420 0x4f60
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -782,16 +782,4 @@ r420 0x4f60
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 83e8bc0c2bb2..0828d80396f2 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -187,7 +187,6 @@ rs600 0x6d40
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -782,16 +781,4 @@ rs600 0x6d40
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index 1e46233985eb..b3f9f1d92005 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -235,7 +235,6 @@ rv515 0x6d40
0x4354 RS_INST_13
0x4358 RS_INST_14
0x435C RS_INST_15
-0x43A4 SC_HYPERZ_EN
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
@@ -317,6 +316,7 @@ rv515 0x6d40
0x4BD0 FG_FOG_COLOR_B
0x4BD4 FG_ALPHA_FUNC
0x4BD8 FG_DEPTH_SRC
+0x4BE0 FG_ALPHA_VALUE
0x4C00 US_ALU_CONST_R_0
0x4C04 US_ALU_CONST_G_0
0x4C08 US_ALU_CONST_B_0
@@ -479,17 +479,5 @@ rv515 0x6d40
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
-0x4F1C ZB_BW_CNTL
-0x4F28 ZB_DEPTHCLEARVALUE
-0x4F30 ZB_ZMASK_OFFSET
-0x4F34 ZB_ZMASK_PITCH
-0x4F38 ZB_ZMASK_WRINDEX
-0x4F3C ZB_ZMASK_DWORD
-0x4F40 ZB_ZMASK_RDINDEX
-0x4F44 ZB_HIZ_OFFSET
-0x4F48 ZB_HIZ_WRINDEX
-0x4F4C ZB_HIZ_DWORD
-0x4F50 ZB_HIZ_RDINDEX
-0x4F54 ZB_HIZ_PITCH
0x4F58 ZB_ZPASS_DATA
0x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f454c9a5e7f2..ae2b76b9a388 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -55,14 +55,6 @@ void rs400_gart_adjust_size(struct radeon_device *rdev)
rdev->mc.gtt_size = 32 * 1024 * 1024;
return;
}
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
- /* FIXME: RS400 & RS480 seems to have issue with GART size
- * if 4G of system memory (needs more testing)
- */
- /* XXX is this still an issue with proper alignment? */
- rdev->mc.gtt_size = 32 * 1024 * 1024;
- DRM_ERROR("Forcing to 32M GART size (because of ASIC bug ?)\n");
- }
}
void rs400_gart_tlb_flush(struct radeon_device *rdev)
@@ -483,6 +475,8 @@ int rs400_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 6dc15ea8ba33..cc05b230d7ef 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -686,8 +686,8 @@ void rs600_mc_init(struct radeon_device *rdev)
{
u64 base;
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
@@ -696,7 +696,6 @@ void rs600_mc_init(struct radeon_device *rdev)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
- rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, &rdev->mc);
@@ -813,6 +812,13 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
@@ -839,6 +845,7 @@ int rs600_resume(struct radeon_device *rdev)
int rs600_suspend(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -848,6 +855,7 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -871,6 +879,8 @@ int rs600_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index ce4ecbe10816..3e3f75718be3 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -154,13 +154,13 @@ void rs690_mc_init(struct radeon_device *rdev)
rdev->mc.vram_width = 128;
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
rdev->mc.visible_vram_size = rdev->mc.aper_size;
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
- rs690_pm_info(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ rs690_pm_info(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1;
radeon_gtt_location(rdev, &rdev->mc);
@@ -398,7 +398,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rs690_watermark wm0;
struct rs690_watermark wm1;
- u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
+ u32 d2mode_priority_a_cnt = S_006548_D1MODE_PRIORITY_A_OFF(1);
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
@@ -495,10 +497,6 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
}
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -528,13 +526,7 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
- S_006D48_D2MODE_PRIORITY_A_OFF(1));
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
- S_006D4C_D2MODE_PRIORITY_B_OFF(1));
- } else {
+ } else if (mode1) {
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_mul(wm1.dbpp, wm1.num_line_pair);
else
@@ -563,13 +555,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
- S_006548_D1MODE_PRIORITY_A_OFF(1));
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
- S_00654C_D1MODE_PRIORITY_B_OFF(1));
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
+
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -641,6 +632,13 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing audio\n");
+ return r;
+ }
+
return 0;
}
@@ -667,6 +665,7 @@ int rs690_resume(struct radeon_device *rdev)
int rs690_suspend(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_disable(rdev);
r100_wb_disable(rdev);
rs600_irq_disable(rdev);
@@ -676,6 +675,7 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -699,6 +699,8 @@ int rs690_init(struct radeon_device *rdev)
radeon_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* TODO: disable VGA need to use VGA request */
/* BIOS*/
if (!radeon_get_bios(rdev)) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0c9c169a6852..4d6e86041a9f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -469,6 +469,8 @@ int rv515_init(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
/* TODO: disable VGA need to use VGA request */
+ /* restore some register to sane defaults */
+ r100_restore_sanity(rdev);
/* BIOS*/
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
@@ -925,7 +927,9 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rv515_watermark wm0;
struct rv515_watermark wm1;
- u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ u32 tmp;
+ u32 d1mode_priority_a_cnt = MODE_PRIORITY_OFF;
+ u32 d2mode_priority_a_cnt = MODE_PRIORITY_OFF;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
@@ -999,10 +1003,6 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
}
- WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
if (dfixed_trunc(wm0.dbpp) > 64)
a.full = dfixed_div(wm0.dbpp, wm0.num_line_pair);
@@ -1032,11 +1032,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
d1mode_priority_a_cnt = dfixed_trunc(priority_mark02);
if (rdev->disp_priority == 2)
d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
- WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
- WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
- } else {
+ } else if (mode1) {
if (dfixed_trunc(wm1.dbpp) > 64)
a.full = dfixed_div(wm1.dbpp, wm1.num_line_pair);
else
@@ -1065,11 +1061,12 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
d2mode_priority_a_cnt = dfixed_trunc(priority_mark12);
if (rdev->disp_priority == 2)
d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
- WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
- WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
- WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
+
+ WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
void rv515_bandwidth_update(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b7fd82064922..f1c796810117 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -42,6 +42,21 @@
static void rv770_gpu_init(struct radeon_device *rdev);
void rv770_fini(struct radeon_device *rdev);
+/* get temperature in millidegrees */
+u32 rv770_get_temp(struct radeon_device *rdev)
+{
+ u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
+ ASIC_T_SHIFT;
+ u32 actual_temp = 0;
+
+ if ((temp >> 9) & 1)
+ actual_temp = 0;
+ else
+ actual_temp = (temp >> 1) & 0xff;
+
+ return actual_temp * 1000;
+}
+
void rv770_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -189,7 +204,10 @@ static void rv770_mc_program(struct radeon_device *rdev)
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
- WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+ /* r7xx hw bug. Read from HDP_DEBUG1 rather
+ * than writing to HDP_REG_COHERENCY_FLUSH_CNTL
+ */
+ tmp = RREG32(HDP_DEBUG1);
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
@@ -659,8 +677,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
- gb_tiling_config |= BACKEND_MAP(backend_map);
+ rdev->config.rv770.tile_config = gb_tiling_config;
+ gb_tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -919,8 +938,8 @@ int rv770_mc_init(struct radeon_device *rdev)
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
+ rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9506f8cb99e0..b7a5a20e81dc 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -122,12 +122,18 @@
#define GUI_ACTIVE (1<<31)
#define GRBM_STATUS2 0x8014
+#define CG_MULT_THERMAL_STATUS 0x740
+#define ASIC_T(x) ((x) << 16)
+#define ASIC_T_MASK 0x3FF0000
+#define ASIC_T_SHIFT 16
+
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C
+#define HDP_DEBUG1 0x2F34
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 2d0c9ca484c5..976dc8d25280 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -552,7 +552,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
/*
- * Initalize mappings. On Savage4 and SavageIX the alignment
+ * Initialize mappings. On Savage4 and SavageIX the alignment
* and size of the aperture is not suitable for automatic MTRR setup
* in drm_addmap. Therefore we add them manually before the maps are
* initialized, and tear them down on last close.
@@ -573,13 +573,13 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[2].handle = -1;
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
fb_rsrc = 0;
- fb_base = drm_get_resource_start(dev, 0);
+ fb_base = pci_resource_start(dev->pdev, 0);
fb_size = SAVAGE_FB_SIZE_S3;
mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
aper_rsrc = 0;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
- if (drm_get_resource_len(dev, 0) == 0x08000000) {
+ if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
/* Don't make MMIO write-cobining! We need 3
* MTRRs. */
dev_priv->mtrr[0].base = fb_base;
@@ -599,18 +599,19 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[2].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08llx\n",
- (unsigned long long)drm_get_resource_len(dev, 0));
+ (unsigned long long)
+ pci_resource_len(dev->pdev, 0));
}
} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
dev_priv->chipset != S3_SAVAGE2000) {
- mmio_base = drm_get_resource_start(dev, 0);
+ mmio_base = pci_resource_start(dev->pdev, 0);
fb_rsrc = 1;
- fb_base = drm_get_resource_start(dev, 1);
+ fb_base = pci_resource_start(dev->pdev, 1);
fb_size = SAVAGE_FB_SIZE_S4;
aper_rsrc = 1;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
- if (drm_get_resource_len(dev, 1) == 0x08000000) {
+ if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
/* Can use one MTRR to cover both fb and
* aperture. */
dev_priv->mtrr[0].base = fb_base;
@@ -620,15 +621,16 @@ int savage_driver_firstopen(struct drm_device *dev)
dev_priv->mtrr[0].size, DRM_MTRR_WC);
} else {
DRM_ERROR("strange pci_resource_len %08llx\n",
- (unsigned long long)drm_get_resource_len(dev, 1));
+ (unsigned long long)
+ pci_resource_len(dev->pdev, 1));
}
} else {
- mmio_base = drm_get_resource_start(dev, 0);
+ mmio_base = pci_resource_start(dev->pdev, 0);
fb_rsrc = 1;
- fb_base = drm_get_resource_start(dev, 1);
- fb_size = drm_get_resource_len(dev, 1);
+ fb_base = pci_resource_start(dev->pdev, 1);
+ fb_size = pci_resource_len(dev->pdev, 1);
aper_rsrc = 2;
- aperture_base = drm_get_resource_start(dev, 2);
+ aperture_base = pci_resource_start(dev->pdev, 2);
/* Automatic MTRR setup will do the right thing. */
}
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 4fd1f067d380..776bf9e9ea1a 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,8 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret) {
+ if (ret)
kfree(dev_priv);
- }
return ret;
}
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index af22111397d8..07d0f2979cac 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -78,7 +78,7 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
-#define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
+#define SIS_MM_ALIGN_MASK ((1 << SIS_MM_ALIGN_SHIFT) - 1)
#endif /* CONFIG_FB_SIS[_MODULE] */
@@ -225,9 +225,8 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev)
map = entry->map;
if (!map)
continue;
- if (map->type == _DRM_REGISTERS) {
+ if (map->type == _DRM_REGISTERS)
return map;
- }
}
return NULL;
}
@@ -264,10 +263,10 @@ int sis_idle(struct drm_device *dev)
end = jiffies + (DRM_HZ * 3);
- for (i=0; i<4; ++i) {
+ for (i = 0; i < 4; ++i) {
do {
idle_reg = SIS_READ(0x85cc);
- } while ( !time_after_eq(jiffies, end) &&
+ } while (!time_after_eq(jiffies, end) &&
((idle_reg & 0x80000000) != 0x80000000));
}
@@ -301,7 +300,7 @@ void sis_lastclose(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void sis_reclaim_buffers_locked(struct drm_device * dev,
+void sis_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
@@ -312,9 +311,8 @@ void sis_reclaim_buffers_locked(struct drm_device * dev,
return;
}
- if (dev->driver->dma_quiescent) {
+ if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- }
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 4256e2006476..b256d4adfafe 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -3,7 +3,7 @@
ccflags-y := -Iinclude/drm
ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
- ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 555ebb12ace8..cb4cf7ef4d1e 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -476,7 +476,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
++put_count;
}
if (bo->mem.mm_node) {
- bo->mem.mm_node->private = NULL;
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
@@ -670,7 +669,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
spin_lock(&glob->lru_lock);
if (evict_mem.mm_node) {
- evict_mem.mm_node->private = NULL;
drm_mm_put_block(evict_mem.mm_node);
evict_mem.mm_node = NULL;
}
@@ -929,8 +927,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
mem->mm_node = node;
mem->mem_type = mem_type;
mem->placement = cur_flags;
- if (node)
- node->private = bo;
return 0;
}
@@ -973,7 +969,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
interruptible, no_wait_reserve, no_wait_gpu);
if (ret == 0 && mem->mm_node) {
mem->placement = cur_flags;
- mem->mm_node->private = bo;
return 0;
}
if (ret == -ERESTARTSYS)
@@ -1029,7 +1024,6 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
out_unlock:
if (ret && mem.mm_node) {
spin_lock(&glob->lru_lock);
- mem.mm_node->private = NULL;
drm_mm_put_block(mem.mm_node);
spin_unlock(&glob->lru_lock);
}
@@ -1401,7 +1395,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj)
kfree(glob);
}
-void ttm_bo_global_release(struct ttm_global_reference *ref)
+void ttm_bo_global_release(struct drm_global_reference *ref)
{
struct ttm_bo_global *glob = ref->object;
@@ -1410,7 +1404,7 @@ void ttm_bo_global_release(struct ttm_global_reference *ref)
}
EXPORT_SYMBOL(ttm_bo_global_release);
-int ttm_bo_global_init(struct ttm_global_reference *ref)
+int ttm_bo_global_init(struct drm_global_reference *ref)
{
struct ttm_bo_global_ref *bo_ref =
container_of(ref, struct ttm_bo_global_ref, ref);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 13012a1f1486..7cffb3e04232 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,8 +353,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->vm_node = NULL;
fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
- if (fbo->mem.mm_node)
- fbo->mem.mm_node->private = (void *)fbo;
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 9a6edbfeaa9e..902d7cf9fb4e 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -70,8 +70,6 @@ static int __init ttm_init(void)
if (unlikely(ret != 0))
return ret;
- ttm_global_init();
-
atomic_set(&device_released, 0);
ret = drm_class_device_register(&ttm_drm_class_device);
if (unlikely(ret != 0))
@@ -81,7 +79,6 @@ static int __init ttm_init(void)
out_no_dev_reg:
atomic_set(&device_released, 1);
wake_up_all(&exit_q);
- ttm_global_release();
return ret;
}
@@ -95,7 +92,6 @@ static void __exit ttm_exit(void)
*/
wait_event(exit_q, atomic_read(&device_released) == 1);
- ttm_global_release();
}
module_init(ttm_init);
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index bfb92d283260..68dda74a50ae 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -58,28 +58,29 @@
*((uint32_t *)(vb)) = ((nReg) >> 2) | HALCYON_HEADER1; \
*((uint32_t *)(vb) + 1) = (nData); \
vb = ((uint32_t *)vb) + 2; \
- dev_priv->dma_low +=8; \
+ dev_priv->dma_low += 8; \
}
#define via_flush_write_combine() DRM_MEMORYBARRIER()
-#define VIA_OUT_RING_QW(w1,w2) \
+#define VIA_OUT_RING_QW(w1, w2) do { \
*vb++ = (w1); \
*vb++ = (w2); \
- dev_priv->dma_low += 8;
+ dev_priv->dma_low += 8; \
+} while (0)
-static void via_cmdbuf_start(drm_via_private_t * dev_priv);
-static void via_cmdbuf_pause(drm_via_private_t * dev_priv);
-static void via_cmdbuf_reset(drm_via_private_t * dev_priv);
-static void via_cmdbuf_rewind(drm_via_private_t * dev_priv);
-static int via_wait_idle(drm_via_private_t * dev_priv);
-static void via_pad_cache(drm_via_private_t * dev_priv, int qwords);
+static void via_cmdbuf_start(drm_via_private_t *dev_priv);
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv);
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv);
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv);
+static int via_wait_idle(drm_via_private_t *dev_priv);
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords);
/*
* Free space in command buffer.
*/
-static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
+static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
@@ -93,7 +94,7 @@ static uint32_t via_cmdbuf_space(drm_via_private_t * dev_priv)
* How much does the command regulator lag behind?
*/
-static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
+static uint32_t via_cmdbuf_lag(drm_via_private_t *dev_priv)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base;
@@ -108,7 +109,7 @@ static uint32_t via_cmdbuf_lag(drm_via_private_t * dev_priv)
*/
static inline int
-via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)
+via_cmdbuf_wait(drm_via_private_t *dev_priv, unsigned int size)
{
uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;
uint32_t cur_addr, hw_addr, next_addr;
@@ -146,14 +147,13 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
dev_priv->dma_high) {
via_cmdbuf_rewind(dev_priv);
}
- if (via_cmdbuf_wait(dev_priv, size) != 0) {
+ if (via_cmdbuf_wait(dev_priv, size) != 0)
return NULL;
- }
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
-int via_dma_cleanup(struct drm_device * dev)
+int via_dma_cleanup(struct drm_device *dev)
{
if (dev->dev_private) {
drm_via_private_t *dev_priv =
@@ -171,9 +171,9 @@ int via_dma_cleanup(struct drm_device * dev)
return 0;
}
-static int via_initialize(struct drm_device * dev,
- drm_via_private_t * dev_priv,
- drm_via_dma_init_t * init)
+static int via_initialize(struct drm_device *dev,
+ drm_via_private_t *dev_priv,
+ drm_via_dma_init_t *init)
{
if (!dev_priv || !dev_priv->mmio) {
DRM_ERROR("via_dma_init called before via_map_init\n");
@@ -258,7 +258,7 @@ static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *fil
return retcode;
}
-static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
+static int via_dispatch_cmdbuffer(struct drm_device *dev, drm_via_cmdbuffer_t *cmd)
{
drm_via_private_t *dev_priv;
uint32_t *vb;
@@ -271,9 +271,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
return -EFAULT;
}
- if (cmd->size > VIA_PCI_BUF_SIZE) {
+ if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- }
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
@@ -291,9 +290,8 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
}
vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
- if (vb == NULL) {
+ if (vb == NULL)
return -EAGAIN;
- }
memcpy(vb, dev_priv->pci_buf, cmd->size);
@@ -311,13 +309,12 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *
return 0;
}
-int via_driver_dma_quiescent(struct drm_device * dev)
+int via_driver_dma_quiescent(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
- if (!via_wait_idle(dev_priv)) {
+ if (!via_wait_idle(dev_priv))
return -EBUSY;
- }
return 0;
}
@@ -339,22 +336,17 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_cmdbuffer(dev, cmdbuf);
- if (ret) {
- return ret;
- }
-
- return 0;
+ return ret;
}
-static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
- drm_via_cmdbuffer_t * cmd)
+static int via_dispatch_pci_cmdbuffer(struct drm_device *dev,
+ drm_via_cmdbuffer_t *cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
int ret;
- if (cmd->size > VIA_PCI_BUF_SIZE) {
+ if (cmd->size > VIA_PCI_BUF_SIZE)
return -ENOMEM;
- }
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
return -EFAULT;
@@ -380,19 +372,14 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file
DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);
ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
- if (ret) {
- return ret;
- }
-
- return 0;
+ return ret;
}
-static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
+static inline uint32_t *via_align_buffer(drm_via_private_t *dev_priv,
uint32_t * vb, int qw_count)
{
- for (; qw_count > 0; --qw_count) {
+ for (; qw_count > 0; --qw_count)
VIA_OUT_RING_QW(HC_DUMMY, HC_DUMMY);
- }
return vb;
}
@@ -401,7 +388,7 @@ static inline uint32_t *via_align_buffer(drm_via_private_t * dev_priv,
*
* Returns virtual pointer to ring buffer.
*/
-static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
+static inline uint32_t *via_get_dma(drm_via_private_t *dev_priv)
{
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
@@ -411,18 +398,18 @@ static inline uint32_t *via_get_dma(drm_via_private_t * dev_priv)
* modifying the pause address stored in the buffer itself. If
* the regulator has already paused, restart it.
*/
-static int via_hook_segment(drm_via_private_t * dev_priv,
+static int via_hook_segment(drm_via_private_t *dev_priv,
uint32_t pause_addr_hi, uint32_t pause_addr_lo,
int no_pci_fire)
{
int paused, count;
volatile uint32_t *paused_at = dev_priv->last_pause_ptr;
- uint32_t reader,ptr;
+ uint32_t reader, ptr;
uint32_t diff;
paused = 0;
via_flush_write_combine();
- (void) *(volatile uint32_t *)(via_get_dma(dev_priv) -1);
+ (void) *(volatile uint32_t *)(via_get_dma(dev_priv) - 1);
*paused_at = pause_addr_lo;
via_flush_write_combine();
@@ -435,7 +422,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
dev_priv->last_pause_ptr = via_get_dma(dev_priv) - 1;
/*
- * If there is a possibility that the command reader will
+ * If there is a possibility that the command reader will
* miss the new pause address and pause on the old one,
* In that case we need to program the new start address
* using PCI.
@@ -443,9 +430,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
count = 10000000;
- while(diff == 0 && count--) {
+ while (diff == 0 && count--) {
paused = (VIA_READ(0x41c) & 0x80000000);
- if (paused)
+ if (paused)
break;
reader = *(dev_priv->hw_addr_ptr);
diff = (uint32_t) (ptr - reader) - dev_priv->dma_diff;
@@ -477,7 +464,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
return paused;
}
-static int via_wait_idle(drm_via_private_t * dev_priv)
+static int via_wait_idle(drm_via_private_t *dev_priv)
{
int count = 10000000;
@@ -491,9 +478,9 @@ static int via_wait_idle(drm_via_private_t * dev_priv)
return count;
}
-static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
- uint32_t addr, uint32_t * cmd_addr_hi,
- uint32_t * cmd_addr_lo, int skip_wait)
+static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,
+ uint32_t addr, uint32_t *cmd_addr_hi,
+ uint32_t *cmd_addr_lo, int skip_wait)
{
uint32_t agp_base;
uint32_t cmd_addr, addr_lo, addr_hi;
@@ -521,7 +508,7 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,
return vb;
}
-static void via_cmdbuf_start(drm_via_private_t * dev_priv)
+static void via_cmdbuf_start(drm_via_private_t *dev_priv)
{
uint32_t pause_addr_lo, pause_addr_hi;
uint32_t start_addr, start_addr_lo;
@@ -580,7 +567,7 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)
dev_priv->dma_diff = ptr - reader;
}
-static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
+static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)
{
uint32_t *vb;
@@ -590,7 +577,7 @@ static void via_pad_cache(drm_via_private_t * dev_priv, int qwords)
via_align_buffer(dev_priv, vb, qwords);
}
-static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
+static inline void via_dummy_bitblt(drm_via_private_t *dev_priv)
{
uint32_t *vb = via_get_dma(dev_priv);
SetReg2DAGP(0x0C, (0 | (0 << 16)));
@@ -598,7 +585,7 @@ static inline void via_dummy_bitblt(drm_via_private_t * dev_priv)
SetReg2DAGP(0x0, 0x1 | 0x2000 | 0xAA000000);
}
-static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
+static void via_cmdbuf_jump(drm_via_private_t *dev_priv)
{
uint32_t agp_base;
uint32_t pause_addr_lo, pause_addr_hi;
@@ -617,9 +604,8 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
*/
dev_priv->dma_low = 0;
- if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0) {
+ if (via_cmdbuf_wait(dev_priv, CMDBUF_ALIGNMENT_SIZE) != 0)
DRM_ERROR("via_cmdbuf_jump failed\n");
- }
via_dummy_bitblt(dev_priv);
via_dummy_bitblt(dev_priv);
@@ -657,12 +643,12 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)
}
-static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)
+static void via_cmdbuf_rewind(drm_via_private_t *dev_priv)
{
via_cmdbuf_jump(dev_priv);
}
-static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
+static void via_cmdbuf_flush(drm_via_private_t *dev_priv, uint32_t cmd_type)
{
uint32_t pause_addr_lo, pause_addr_hi;
@@ -670,12 +656,12 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)
via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);
}
-static void via_cmdbuf_pause(drm_via_private_t * dev_priv)
+static void via_cmdbuf_pause(drm_via_private_t *dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_PAUSE);
}
-static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
+static void via_cmdbuf_reset(drm_via_private_t *dev_priv)
{
via_cmdbuf_flush(dev_priv, HC_HAGPBpID_STOP);
via_wait_idle(dev_priv);
@@ -708,9 +694,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
case VIA_CMDBUF_SPACE:
while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
&& --count) {
- if (!d_siz->wait) {
+ if (!d_siz->wait)
break;
- }
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
@@ -720,9 +705,8 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *
case VIA_CMDBUF_LAG:
while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
&& --count) {
- if (!d_siz->wait) {
+ if (!d_siz->wait)
break;
- }
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 4c54f043068e..9b5b4d9dd62c 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -70,7 +70,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
- while(num_desc--) {
+ while (num_desc--) {
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
@@ -174,19 +174,19 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
struct page *page;
int i;
- switch(vsg->state) {
+ switch (vsg->state) {
case dr_via_device_mapped:
via_unmap_blit_from_device(pdev, vsg);
case dr_via_desc_pages_alloc:
- for (i=0; i<vsg->num_desc_pages; ++i) {
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
if (vsg->desc_pages[i] != NULL)
- free_page((unsigned long)vsg->desc_pages[i]);
+ free_page((unsigned long)vsg->desc_pages[i]);
}
kfree(vsg->desc_pages);
case dr_via_pages_locked:
- for (i=0; i<vsg->num_pages; ++i) {
- if ( NULL != (page = vsg->pages[i])) {
- if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+ for (i = 0; i < vsg->num_pages; ++i) {
+ if (NULL != (page = vsg->pages[i])) {
+ if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
@@ -232,7 +232,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
first_pfn + 1;
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
@@ -268,7 +268,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
- vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
+ vsg->descriptors_per_page = PAGE_SIZE / sizeof(drm_via_descriptor_t);
vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
@@ -276,7 +276,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
return -ENOMEM;
vsg->state = dr_via_desc_pages_alloc;
- for (i=0; i<vsg->num_desc_pages; ++i) {
+ for (i = 0; i < vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
@@ -318,21 +318,20 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
int cur;
int done_transfer;
- unsigned long irqsave=0;
+ unsigned long irqsave = 0;
uint32_t status = 0;
DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
engine, from_irq, (unsigned long) blitq);
- if (from_irq) {
+ if (from_irq)
spin_lock(&blitq->blit_lock);
- } else {
+ else
spin_lock_irqsave(&blitq->blit_lock, irqsave);
- }
done_transfer = blitq->is_active &&
- (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+ ((status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+ done_transfer = done_transfer || (blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
@@ -377,18 +376,16 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
if (!timer_pending(&blitq->poll_timer))
mod_timer(&blitq->poll_timer, jiffies + 1);
} else {
- if (timer_pending(&blitq->poll_timer)) {
+ if (timer_pending(&blitq->poll_timer))
del_timer(&blitq->poll_timer);
- }
via_dmablit_engine_off(dev, engine);
}
}
- if (from_irq) {
+ if (from_irq)
spin_unlock(&blitq->blit_lock);
- } else {
+ else
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- }
}
@@ -414,10 +411,9 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
((blitq->cur_blit_handle - handle) <= (1 << 23));
if (queue && active) {
- slot = handle - blitq->done_blit_handle + blitq->cur -1;
- if (slot >= VIA_NUM_BLIT_SLOTS) {
+ slot = handle - blitq->done_blit_handle + blitq->cur - 1;
+ if (slot >= VIA_NUM_BLIT_SLOTS)
slot -= VIA_NUM_BLIT_SLOTS;
- }
*queue = blitq->blit_queue + slot;
}
@@ -506,12 +502,12 @@ via_dmablit_workqueue(struct work_struct *work)
int cur_released;
- DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n", (unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
- while(blitq->serviced != blitq->cur) {
+ while (blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
@@ -545,13 +541,13 @@ via_dmablit_workqueue(struct work_struct *work)
void
via_init_dmablit(struct drm_device *dev)
{
- int i,j;
+ int i, j;
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
pci_set_master(dev->pdev);
- for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
+ for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
blitq->cur_blit_handle = 0;
@@ -564,9 +560,8 @@ via_init_dmablit(struct drm_device *dev)
blitq->is_active = 0;
blitq->aborting = 0;
spin_lock_init(&blitq->blit_lock);
- for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
+ for (j = 0; j < VIA_NUM_BLIT_SLOTS; ++j)
DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
- }
DRM_INIT_WAITQUEUE(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
setup_timer(&blitq->poll_timer, via_dmablit_timer,
@@ -685,18 +680,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
- int ret=0;
+ int ret = 0;
unsigned long irqsave;
DRM_DEBUG("Num free is %d\n", blitq->num_free);
spin_lock_irqsave(&blitq->blit_lock, irqsave);
- while(blitq->num_free == 0) {
+ while (blitq->num_free == 0) {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
- if (ret) {
+ if (ret)
return (-EINTR == ret) ? -EAGAIN : ret;
- }
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
@@ -719,7 +713,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->num_free++;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
- DRM_WAKEUP( &blitq->busy_queue );
+ DRM_WAKEUP(&blitq->busy_queue);
}
/*
@@ -744,9 +738,8 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
engine = (xfer->to_fb) ? 0 : 1;
blitq = dev_priv->blit_queues + engine;
- if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
+ if (0 != (ret = via_dmablit_grab_slot(blitq, engine)))
return ret;
- }
if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
via_dmablit_release_slot(blitq);
return -ENOMEM;
@@ -780,7 +773,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
*/
int
-via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
+via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_blitsync_t *sync = data;
int err;
@@ -804,7 +797,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
*/
int
-via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
+via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_dmablit_t *xfer = data;
int err;
diff --git a/drivers/gpu/drm/via/via_dmablit.h b/drivers/gpu/drm/via/via_dmablit.h
index 7408a547a036..9b662a327cef 100644
--- a/drivers/gpu/drm/via/via_dmablit.h
+++ b/drivers/gpu/drm/via/via_dmablit.h
@@ -45,12 +45,12 @@ typedef struct _drm_via_sg_info {
int num_desc;
enum dma_data_direction direction;
unsigned char *bounce_buffer;
- dma_addr_t chain_start;
+ dma_addr_t chain_start;
uint32_t free_on_sequence;
- unsigned int descriptors_per_page;
+ unsigned int descriptors_per_page;
int aborted;
enum {
- dr_via_device_mapped,
+ dr_via_device_mapped,
dr_via_desc_pages_alloc,
dr_via_pages_locked,
dr_via_pages_alloc,
@@ -68,7 +68,7 @@ typedef struct _drm_via_blitq {
unsigned num_free;
unsigned num_outstanding;
unsigned long end;
- int aborting;
+ int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
spinlock_t blit_lock;
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index cafcb844a223..9cf87d912325 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -107,9 +107,9 @@ enum via_family {
#define VIA_BASE ((dev_priv->mmio))
#define VIA_READ(reg) DRM_READ32(VIA_BASE, reg)
-#define VIA_WRITE(reg,val) DRM_WRITE32(VIA_BASE, reg, val)
+#define VIA_WRITE(reg, val) DRM_WRITE32(VIA_BASE, reg, val)
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
-#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
+#define VIA_WRITE8(reg, val) DRM_WRITE8(VIA_BASE, reg, val)
extern struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
@@ -121,28 +121,28 @@ extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *fil
extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
-extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
+extern int via_dma_blit_sync(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
extern int via_driver_unload(struct drm_device *dev);
-extern int via_init_context(struct drm_device * dev, int context);
-extern int via_final_context(struct drm_device * dev, int context);
+extern int via_init_context(struct drm_device *dev, int context);
+extern int via_final_context(struct drm_device *dev, int context);
-extern int via_do_cleanup_map(struct drm_device * dev);
+extern int via_do_cleanup_map(struct drm_device *dev);
extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
extern int via_enable_vblank(struct drm_device *dev, int crtc);
extern void via_disable_vblank(struct drm_device *dev, int crtc);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
-extern void via_driver_irq_preinstall(struct drm_device * dev);
+extern void via_driver_irq_preinstall(struct drm_device *dev);
extern int via_driver_irq_postinstall(struct drm_device *dev);
-extern void via_driver_irq_uninstall(struct drm_device * dev);
+extern void via_driver_irq_uninstall(struct drm_device *dev);
-extern int via_dma_cleanup(struct drm_device * dev);
+extern int via_dma_cleanup(struct drm_device *dev);
extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(struct drm_device * dev);
+extern int via_driver_dma_quiescent(struct drm_device *dev);
extern void via_init_futex(drm_via_private_t *dev_priv);
extern void via_cleanup_futex(drm_via_private_t *dev_priv);
extern void via_release_futex(drm_via_private_t *dev_priv, int context);
diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
index 34079f251cd4..d391f48ef87a 100644
--- a/drivers/gpu/drm/via/via_irq.c
+++ b/drivers/gpu/drm/via/via_irq.c
@@ -141,11 +141,10 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&cur_irq->irq_received);
DRM_WAKEUP(&cur_irq->irq_queue);
handled = 1;
- if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
+ if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
via_dmablit_handler(dev, 0, 1);
- } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
+ else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i)
via_dmablit_handler(dev, 1, 1);
- }
}
cur_irq++;
}
@@ -160,7 +159,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
+static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t *dev_priv)
{
u32 status;
@@ -207,7 +206,7 @@ void via_disable_vblank(struct drm_device *dev, int crtc)
}
static int
-via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
+via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -260,7 +259,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
* drm_dma.h hooks
*/
-void via_driver_irq_preinstall(struct drm_device * dev)
+void via_driver_irq_preinstall(struct drm_device *dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
@@ -329,7 +328,7 @@ int via_driver_irq_postinstall(struct drm_device *dev)
return 0;
}
-void via_driver_irq_uninstall(struct drm_device * dev)
+void via_driver_irq_uninstall(struct drm_device *dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 6e6f91591639..6cca9a709f7a 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -25,7 +25,7 @@
#include "via_drm.h"
#include "via_drv.h"
-static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
+static int via_do_init_map(struct drm_device *dev, drm_via_init_t *init)
{
drm_via_private_t *dev_priv = dev->dev_private;
@@ -68,7 +68,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
return 0;
}
-int via_do_cleanup_map(struct drm_device * dev)
+int via_do_cleanup_map(struct drm_device *dev)
{
via_dma_cleanup(dev);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index f694cb5ededc..6cc2dadae3ef 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -31,7 +31,7 @@
#include "drm_sman.h"
#define VIA_MM_ALIGN_SHIFT 4
-#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
+#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@@ -172,7 +172,7 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
}
-void via_reclaim_buffers_locked(struct drm_device * dev,
+void via_reclaim_buffers_locked(struct drm_device *dev,
struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
@@ -183,9 +183,8 @@ void via_reclaim_buffers_locked(struct drm_device * dev,
return;
}
- if (dev->driver->dma_quiescent) {
+ if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- }
drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c
index 46a579198747..48957b856d41 100644
--- a/drivers/gpu/drm/via/via_verifier.c
+++ b/drivers/gpu/drm/via/via_verifier.c
@@ -235,7 +235,7 @@ static hazard_t table2[256];
static hazard_t table3[256];
static __inline__ int
-eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
+eat_words(const uint32_t **buf, const uint32_t *buf_end, unsigned num_words)
{
if ((buf_end - *buf) >= num_words) {
*buf += num_words;
@@ -252,7 +252,7 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
unsigned long offset,
unsigned long size,
- struct drm_device * dev)
+ struct drm_device *dev)
{
struct drm_map_list *r_list;
drm_local_map_t *map = seq->map_cache;
@@ -344,7 +344,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
}
static __inline__ int
-investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
+investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t *cur_seq)
{
register uint32_t tmp, *tmp_addr;
@@ -518,7 +518,7 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)
static __inline__ int
via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
- drm_via_state_t * cur_seq)
+ drm_via_state_t *cur_seq)
{
drm_via_private_t *dev_priv =
(drm_via_private_t *) cur_seq->dev->dev_private;
@@ -621,8 +621,8 @@ via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
}
static __inline__ verifier_state_t
-via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
- drm_via_state_t * hc_state)
+via_check_header2(uint32_t const **buffer, const uint32_t *buf_end,
+ drm_via_state_t *hc_state)
{
uint32_t cmd;
int hz_mode;
@@ -706,16 +706,15 @@ via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
return state_error;
}
}
- if (hc_state->unfinished && finish_current_sequence(hc_state)) {
+ if (hc_state->unfinished && finish_current_sequence(hc_state))
return state_error;
- }
*buffer = buf;
return state_command;
}
static __inline__ verifier_state_t
-via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end, int *fire_count)
+via_parse_header2(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end, int *fire_count)
{
uint32_t cmd;
const uint32_t *buf = *buffer;
@@ -833,8 +832,8 @@ via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
}
static __inline__ verifier_state_t
-via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end)
+via_parse_header1(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
{
register uint32_t cmd;
const uint32_t *buf = *buffer;
@@ -851,7 +850,7 @@ via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
}
static __inline__ verifier_state_t
-via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
+via_check_vheader5(uint32_t const **buffer, const uint32_t *buf_end)
{
uint32_t data;
const uint32_t *buf = *buffer;
@@ -884,8 +883,8 @@ via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
}
static __inline__ verifier_state_t
-via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end)
+via_parse_vheader5(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
{
uint32_t addr, count, i;
const uint32_t *buf = *buffer;
@@ -893,9 +892,8 @@ via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
addr = *buf++ & ~VIA_VIDEOMASK;
i = count = *buf;
buf += 3;
- while (i--) {
+ while (i--)
VIA_WRITE(addr, *buf++);
- }
if (count & 3)
buf += 4 - (count & 3);
*buffer = buf;
@@ -940,8 +938,8 @@ via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
}
static __inline__ verifier_state_t
-via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
- const uint32_t * buf_end)
+via_parse_vheader6(drm_via_private_t *dev_priv, uint32_t const **buffer,
+ const uint32_t *buf_end)
{
uint32_t addr, count, i;
@@ -1037,7 +1035,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
}
int
-via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
+via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size)
{
@@ -1085,9 +1083,8 @@ via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
return -EINVAL;
}
}
- if (state == state_error) {
+ if (state == state_error)
return -EINVAL;
- }
return 0;
}
@@ -1096,13 +1093,11 @@ setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
{
int i;
- for (i = 0; i < 256; ++i) {
+ for (i = 0; i < 256; ++i)
table[i] = forbidden_command;
- }
- for (i = 0; i < size; ++i) {
+ for (i = 0; i < size; ++i)
table[init_table[i].code] = init_table[i].hz;
- }
}
void via_init_command_verifier(void)
diff --git a/drivers/gpu/drm/via/via_verifier.h b/drivers/gpu/drm/via/via_verifier.h
index d6f8214b69f5..26b6d361ab95 100644
--- a/drivers/gpu/drm/via/via_verifier.h
+++ b/drivers/gpu/drm/via/via_verifier.h
@@ -54,8 +54,8 @@ typedef struct {
const uint32_t *buf_start;
} drm_via_state_t;
-extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
- struct drm_device * dev, int agp);
+extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,
+ struct drm_device *dev, int agp);
extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,
unsigned int size);
diff --git a/drivers/gpu/drm/via/via_video.c b/drivers/gpu/drm/via/via_video.c
index 6efac8117c93..675d311f038f 100644
--- a/drivers/gpu/drm/via/via_video.c
+++ b/drivers/gpu/drm/via/via_video.c
@@ -29,7 +29,7 @@
#include "via_drm.h"
#include "via_drv.h"
-void via_init_futex(drm_via_private_t * dev_priv)
+void via_init_futex(drm_via_private_t *dev_priv)
{
unsigned int i;
@@ -41,11 +41,11 @@ void via_init_futex(drm_via_private_t * dev_priv)
}
}
-void via_cleanup_futex(drm_via_private_t * dev_priv)
+void via_cleanup_futex(drm_via_private_t *dev_priv)
{
}
-void via_release_futex(drm_via_private_t * dev_priv, int context)
+void via_release_futex(drm_via_private_t *dev_priv, int context)
{
unsigned int i;
volatile int *lock;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index b793c8c9acb3..9dd395b90216 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -764,7 +764,7 @@ static struct drm_driver driver = {
static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- return drm_get_dev(pdev, ent, &driver);
+ return drm_get_pci_dev(pdev, ent, &driver);
}
static int __init vmwgfx_init(void)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index eaad52095339..429f917b60bf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -164,7 +164,7 @@ struct vmw_vga_topology_state {
struct vmw_private {
struct ttm_bo_device bdev;
struct ttm_bo_global_ref bo_global_ref;
- struct ttm_global_reference mem_global_ref;
+ struct drm_global_reference mem_global_ref;
struct vmw_fifo_state fifo;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index b0866f04ec76..870967a97c15 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -528,7 +528,7 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
* Dirty & Deferred IO
*/
par->dirty.x1 = par->dirty.x2 = 0;
- par->dirty.y1 = par->dirty.y1 = 0;
+ par->dirty.y1 = par->dirty.y2 = 0;
par->dirty.active = true;
spin_lock_init(&par->dirty.lock);
info->fbdefio = &vmw_defio;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 437ac786277a..64d7f47df868 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -737,7 +737,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- return NULL;
+ return ERR_PTR(ret);
}
return &vfb->base;
@@ -747,7 +747,7 @@ try_dmabuf:
ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
if (ret) {
DRM_ERROR("failed to find buffer: %i\n", ret);
- return NULL;
+ return ERR_PTR(-ENOENT);
}
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
@@ -758,7 +758,7 @@ try_dmabuf:
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
- return NULL;
+ return ERR_PTR(ret);
}
return &vfb->base;
@@ -768,7 +768,7 @@ err_not_scanout:
/* vmw_user_surface_lookup takes one ref */
vmw_surface_unreference(&surface);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index cfaf690a5b2f..2ff5cf78235f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -79,7 +79,7 @@ static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
- uint32_t size)
+ uint32_t start, uint32_t size)
{
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 8612378b131e..5f2d5df01e5c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1017,7 +1017,7 @@ int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
}
/*
- * Stream managment
+ * Stream management
*/
static void vmw_stream_destroy(struct vmw_resource *res)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
index e3df4adfb4d8..83123287c60c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c
@@ -44,29 +44,29 @@ int vmw_mmap(struct file *filp, struct vm_area_struct *vma)
return ttm_bo_mmap(filp, vma, &dev_priv->bdev);
}
-static int vmw_ttm_mem_global_init(struct ttm_global_reference *ref)
+static int vmw_ttm_mem_global_init(struct drm_global_reference *ref)
{
DRM_INFO("global init.\n");
return ttm_mem_global_init(ref->object);
}
-static void vmw_ttm_mem_global_release(struct ttm_global_reference *ref)
+static void vmw_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
int vmw_ttm_global_init(struct vmw_private *dev_priv)
{
- struct ttm_global_reference *global_ref;
+ struct drm_global_reference *global_ref;
int ret;
global_ref = &dev_priv->mem_global_ref;
- global_ref->global_type = TTM_GLOBAL_TTM_MEM;
+ global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &vmw_ttm_mem_global_init;
global_ref->release = &vmw_ttm_mem_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM memory accounting.\n");
return ret;
@@ -75,11 +75,11 @@ int vmw_ttm_global_init(struct vmw_private *dev_priv)
dev_priv->bo_global_ref.mem_glob =
dev_priv->mem_global_ref.object;
global_ref = &dev_priv->bo_global_ref.ref;
- global_ref->global_type = TTM_GLOBAL_TTM_BO;
+ global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
- ret = ttm_global_item_ref(global_ref);
+ ret = drm_global_item_ref(global_ref);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed setting up TTM buffer objects.\n");
@@ -88,12 +88,12 @@ int vmw_ttm_global_init(struct vmw_private *dev_priv)
return 0;
out_no_bo:
- ttm_global_item_unref(&dev_priv->mem_global_ref);
+ drm_global_item_unref(&dev_priv->mem_global_ref);
return ret;
}
void vmw_ttm_global_release(struct vmw_private *dev_priv)
{
- ttm_global_item_unref(&dev_priv->bo_global_ref.ref);
- ttm_global_item_unref(&dev_priv->mem_global_ref);
+ drm_global_item_unref(&dev_priv->bo_global_ref.ref);
+ drm_global_item_unref(&dev_priv->mem_global_ref);
}