diff options
Diffstat (limited to 'drivers/gpu')
62 files changed, 4149 insertions, 919 deletions
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index de566cf0414c..30879df3daea 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -1 +1 @@ -obj-y += drm/ +obj-y += drm/ vga/ diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 6246e3f3dad7..3d09e304f6f4 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -310,10 +310,10 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset, (unsigned long long)map->offset, map->size); break; + } case _DRM_GEM: - DRM_ERROR("tried to rmmap GEM object\n"); + DRM_ERROR("tried to addmap GEM object\n"); break; - } case _DRM_SCATTER_GATHER: if (!dev->sg) { kfree(map); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index ff447f175a56..fe8697447f32 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -869,6 +869,9 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } drm_helper_disable_unused_functions(dev); } else if (fb_changed) { + set->crtc->x = set->x; + set->crtc->y = set->y; + old_fb = set->crtc->fb; if (set->crtc->fb != set->fb) set->crtc->fb = set->fb; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 8eee4a620376..2c4671314884 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -116,27 +116,30 @@ static void drm_fb_helper_on(struct fb_info *info) * For each CRTC in this fb, turn the crtc on then, * find all associated encoders and turn them on. */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; + for (i = 0; i < fb_helper->crtc_count; i++) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; - for (i = 0; i < fb_helper->crtc_count; i++) { - if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) - break; - } + /* Only mess with CRTCs in this fb */ + if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || + !crtc->enabled) + continue; - mutex_lock(&dev->mode_config.mutex); - crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); - mutex_unlock(&dev->mode_config.mutex); + mutex_lock(&dev->mode_config.mutex); + crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); + mutex_unlock(&dev->mode_config.mutex); - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; - encoder_funcs = encoder->helper_private; - mutex_lock(&dev->mode_config.mutex); - encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); - mutex_unlock(&dev->mode_config.mutex); + encoder_funcs = encoder->helper_private; + mutex_lock(&dev->mode_config.mutex); + encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); + mutex_unlock(&dev->mode_config.mutex); + } } } } @@ -154,30 +157,33 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode) * For each CRTC in this fb, find all associated encoders * and turn them off, then turn off the CRTC. */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; - - for (i = 0; i < fb_helper->crtc_count; i++) { - if (crtc->base.id == fb_helper->crtc_info[i].crtc_id) - break; - } - - /* Found a CRTC on this fb, now find encoders */ - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { - if (encoder->crtc == crtc) { - struct drm_encoder_helper_funcs *encoder_funcs; - - encoder_funcs = encoder->helper_private; + for (i = 0; i < fb_helper->crtc_count; i++) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + struct drm_crtc_helper_funcs *crtc_funcs = + crtc->helper_private; + + /* Only mess with CRTCs in this fb */ + if (crtc->base.id != fb_helper->crtc_info[i].crtc_id || + !crtc->enabled) + continue; + + /* Found a CRTC on this fb, now find encoders */ + list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { + if (encoder->crtc == crtc) { + struct drm_encoder_helper_funcs *encoder_funcs; + + encoder_funcs = encoder->helper_private; + mutex_lock(&dev->mode_config.mutex); + encoder_funcs->dpms(encoder, dpms_mode); + mutex_unlock(&dev->mode_config.mutex); + } + } + if (dpms_mode == DRM_MODE_DPMS_OFF) { mutex_lock(&dev->mode_config.mutex); - encoder_funcs->dpms(encoder, dpms_mode); + crtc_funcs->dpms(crtc, dpms_mode); mutex_unlock(&dev->mode_config.mutex); } } - if (dpms_mode == DRM_MODE_DPMS_OFF) { - mutex_lock(&dev->mode_config.mutex); - crtc_funcs->dpms(crtc, dpms_mode); - mutex_unlock(&dev->mode_config.mutex); - } } } diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index f85aaf21e783..0a6f0b3bdc78 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -37,6 +37,7 @@ #include <linux/interrupt.h> /* For task queue support */ +#include <linux/vgaarb.h> /** * Get interrupt from bus id. * @@ -171,6 +172,26 @@ err: } EXPORT_SYMBOL(drm_vblank_init); +static void drm_irq_vgaarb_nokms(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + if (dev->driver->vgaarb_irq) { + dev->driver->vgaarb_irq(dev, state); + return; + } + + if (!dev->irq_enabled) + return; + + if (state) + dev->driver->irq_uninstall(dev); + else { + dev->driver->irq_preinstall(dev); + dev->driver->irq_postinstall(dev); + } +} + /** * Install IRQ handler. * @@ -231,6 +252,9 @@ int drm_irq_install(struct drm_device *dev) return ret; } + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + vga_client_register(dev->pdev, (void *)dev, drm_irq_vgaarb_nokms, NULL); + /* After installing handler */ ret = dev->driver->irq_postinstall(dev); if (ret < 0) { @@ -279,6 +303,9 @@ int drm_irq_uninstall(struct drm_device * dev) DRM_DEBUG("irq=%d\n", dev->pdev->irq); + if (!drm_core_check_feature(dev, DRIVER_MODESET)) + vga_client_register(dev->pdev, NULL, NULL, NULL); + dev->driver->irq_uninstall(dev); free_irq(dev->pdev->irq, dev); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 51611722aa02..7e42b7e9d43a 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -77,7 +77,7 @@ static ssize_t version_show(struct class *dev, char *buf) CORE_MINOR, CORE_PATCHLEVEL, CORE_DATE); } -static char *drm_nodename(struct device *dev) +static char *drm_devnode(struct device *dev, mode_t *mode) { return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); } @@ -113,7 +113,7 @@ struct class *drm_sysfs_create(struct module *owner, char *name) if (err) goto err_out_class; - class->nodename = drm_nodename; + class->devnode = drm_devnode; return class; diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index ae7ec0390024..45d507ebd3ff 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -34,6 +34,7 @@ #include "i915_drm.h" #include "i915_drv.h" #include "i915_trace.h" +#include <linux/vgaarb.h> /* Really want an OS-independent resettable timer. Would like to have * this loop run for (eg) 3 sec, but have the timer reset every time @@ -1168,6 +1169,19 @@ static void i915_setup_compression(struct drm_device *dev, int size) ll_base, size >> 20); } +/* true = enable decode, false = disable decoder */ +static unsigned int i915_vga_set_decode(void *cookie, bool state) +{ + struct drm_device *dev = cookie; + + intel_modeset_vga_set_state(dev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + static int i915_load_modeset_init(struct drm_device *dev, unsigned long prealloc_start, unsigned long prealloc_size, @@ -1233,6 +1247,11 @@ static int i915_load_modeset_init(struct drm_device *dev, if (ret) DRM_INFO("failed to find VBIOS tables\n"); + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ + ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); + if (ret) + goto destroy_ringbuffer; + ret = drm_irq_install(dev); if (ret) goto destroy_ringbuffer; @@ -1507,6 +1526,7 @@ int i915_driver_unload(struct drm_device *dev) if (drm_core_check_feature(dev, DRIVER_MODESET)) { drm_irq_uninstall(dev); + vga_client_register(dev->pdev, NULL, NULL, NULL); } if (dev->pdev->msi_enabled) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 931087272854..b24b2d145b75 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -826,6 +826,7 @@ static inline void opregion_enable_asle(struct drm_device *dev) { return; } /* modesetting */ extern void intel_modeset_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); +extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); extern void i8xx_disable_fbc(struct drm_device *dev); extern void g4x_disable_fbc(struct drm_device *dev); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6129b7b4f1a5..40727d4c2919 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -4549,15 +4549,11 @@ int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - int ret; - if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; - ret = i915_gem_idle(dev); drm_irq_uninstall(dev); - - return ret; + return i915_gem_idle(dev); } void diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8122a72828e4..0466ddbeba32 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -30,6 +30,7 @@ * fb aperture size and the amount of pre-reserved memory. */ #define INTEL_GMCH_CTRL 0x52 +#define INTEL_GMCH_VGA_DISABLE (1 << 1) #define INTEL_GMCH_ENABLED 0x4 #define INTEL_GMCH_MEM_MASK 0x1 #define INTEL_GMCH_MEM_64M 0x1 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d995762ce4bc..93ff6c03733e 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4319,3 +4319,20 @@ struct drm_encoder *intel_best_encoder(struct drm_connector *connector) return &intel_output->enc; } + +/* + * set vga decode state - true == enable VGA decode + */ +int intel_modeset_vga_set_state(struct drm_device *dev, bool state) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + u16 gmch_ctrl; + + pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); + if (state) + gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; + else + gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; + pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); + return 0; +} diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index f2afc4af4bc9..f4856a510476 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -232,7 +232,7 @@ intel_dp_aux_ch(struct intel_output *intel_output, for (try = 0; try < 5; try++) { /* Load the send data into the aux channel data registers */ for (i = 0; i < send_bytes; i += 4) { - uint32_t d = pack_aux(send + i, send_bytes - i);; + uint32_t d = pack_aux(send + i, send_bytes - i); I915_WRITE(ch_data + i, d); } @@ -1263,7 +1263,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) if (IS_eDP(intel_output)) { intel_output->crtc_mask = (1 << 1); - intel_output->clone_mask = (1 << INTEL_OUTPUT_EDP); + intel_output->clone_mask = (1 << INTEL_EDP_CLONE_BIT); } else intel_output->crtc_mask = (1 << 0) | (1 << 1); connector->interlace_allowed = true; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 7478196390b4..8aa4b7f30daa 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -75,6 +75,7 @@ #define INTEL_LVDS_CLONE_BIT 14 #define INTEL_DVO_TMDS_CLONE_BIT 15 #define INTEL_DVO_LVDS_CLONE_BIT 16 +#define INTEL_EDP_CLONE_BIT 17 #define INTEL_DVO_CHIP_NONE 0 #define INTEL_DVO_CHIP_LVDS 1 @@ -179,4 +180,5 @@ extern int intel_framebuffer_create(struct drm_device *dev, struct drm_mode_fb_cmd *mode_cmd, struct drm_framebuffer **fb, struct drm_gem_object *obj); + #endif /* __INTEL_DRV_H__ */ diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index a6c686cded54..c64eab493fb0 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c @@ -1760,6 +1760,7 @@ intel_tv_init(struct drm_device *dev) drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); tv_priv = (struct intel_tv_priv *)(intel_output + 1); intel_output->type = INTEL_OUTPUT_TVOUT; + intel_output->crtc_mask = (1 << 0) | (1 << 1); intel_output->clone_mask = (1 << INTEL_TV_CLONE_BIT); intel_output->enc.possible_crtcs = ((1 << 0) | (1 << 1)); intel_output->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT); diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c index b710fab21cb3..a53b848e0f17 100644 --- a/drivers/gpu/drm/mga/mga_state.c +++ b/drivers/gpu/drm/mga/mga_state.c @@ -239,7 +239,7 @@ static __inline__ void mga_g200_emit_pipe(drm_mga_private_t * dev_priv) MGA_WR34, 0x00000000, MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff); - /* Padding required to to hardware bug. + /* Padding required due to hardware bug. */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, @@ -317,7 +317,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv) MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */ MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */ - /* Padding required to to hardware bug */ + /* Padding required due to hardware bug */ DMA_BLOCK(MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, MGA_DMAPAD, 0xffffffff, diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index a7edd0f2ac37..6a015929deee 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -488,6 +488,11 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, } switch (crtc->fb->bits_per_pixel) { + case 8: + fb_format = + AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | + AVIVO_D1GRPH_CONTROL_8BPP_INDEXED; + break; case 15: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h index d4e6e6e4a938..e2b92c445bab 100644 --- a/drivers/gpu/drm/radeon/avivod.h +++ b/drivers/gpu/drm/radeon/avivod.h @@ -57,4 +57,13 @@ #define VGA_RENDER_CONTROL 0x0300 #define VGA_VSTATUS_CNTL_MASK 0x00030000 +/* AVIVO disable VGA rendering */ +static inline void radeon_avivo_vga_render_disable(struct radeon_device *rdev) +{ + u32 vga_render; + vga_render = RREG32(VGA_RENDER_CONTROL); + vga_render &= ~VGA_VSTATUS_CNTL_MASK; + WREG32(VGA_RENDER_CONTROL, vga_render); +} + #endif diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5708c07ce733..be51c5f7d0f6 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -84,23 +84,28 @@ void r100_pci_gart_tlb_flush(struct radeon_device *rdev) * could end up in wrong address. */ } -int r100_pci_gart_enable(struct radeon_device *rdev) +int r100_pci_gart_init(struct radeon_device *rdev) { - uint32_t tmp; int r; + if (rdev->gart.table.ram.ptr) { + WARN(1, "R100 PCI GART already initialized.\n"); + return 0; + } /* Initialize common gart structure */ r = radeon_gart_init(rdev); - if (r) { + if (r) return r; - } - if (rdev->gart.table.ram.ptr == NULL) { - rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; - r = radeon_gart_table_ram_alloc(rdev); - if (r) { - return r; - } - } + rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; + rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart_set_page = &r100_pci_gart_set_page; + return radeon_gart_table_ram_alloc(rdev); +} + +int r100_pci_gart_enable(struct radeon_device *rdev) +{ + uint32_t tmp; + /* discard memory request outside of configured range */ tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; WREG32(RADEON_AIC_CNTL, tmp); @@ -140,13 +145,11 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) return 0; } -int r100_gart_enable(struct radeon_device *rdev) +void r100_pci_gart_fini(struct radeon_device *rdev) { - if (rdev->flags & RADEON_IS_AGP) { - r100_pci_gart_disable(rdev); - return 0; - } - return r100_pci_gart_enable(rdev); + r100_pci_gart_disable(rdev); + radeon_gart_table_ram_free(rdev); + radeon_gart_fini(rdev); } @@ -273,9 +276,6 @@ int r100_mc_init(struct radeon_device *rdev) void r100_mc_fini(struct radeon_device *rdev) { - r100_pci_gart_disable(rdev); - radeon_gart_table_ram_free(rdev); - radeon_gart_fini(rdev); } @@ -299,6 +299,17 @@ int r100_irq_set(struct radeon_device *rdev) return 0; } +void r100_irq_disable(struct radeon_device *rdev) +{ + u32 tmp; + + WREG32(R_000040_GEN_INT_CNTL, 0); + /* Wait and acknowledge irq */ + mdelay(1); + tmp = RREG32(R_000044_GEN_INT_STATUS); + WREG32(R_000044_GEN_INT_STATUS, tmp); +} + static inline uint32_t r100_irq_ack(struct radeon_device *rdev) { uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS); @@ -319,6 +330,9 @@ int r100_irq_process(struct radeon_device *rdev) if (!status) { return IRQ_NONE; } + if (rdev->shutdown) { + return IRQ_NONE; + } while (status) { /* SW interrupt */ if (status & RADEON_SW_INT_TEST) { @@ -393,14 +407,21 @@ int r100_wb_init(struct radeon_device *rdev) return r; } } - WREG32(RADEON_SCRATCH_ADDR, rdev->wb.gpu_addr); - WREG32(RADEON_CP_RB_RPTR_ADDR, rdev->wb.gpu_addr + 1024); - WREG32(RADEON_SCRATCH_UMSK, 0xff); + WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); + WREG32(R_00070C_CP_RB_RPTR_ADDR, + S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); + WREG32(R_000770_SCRATCH_UMSK, 0xff); return 0; } +void r100_wb_disable(struct radeon_device *rdev) +{ + WREG32(R_000770_SCRATCH_UMSK, 0); +} + void r100_wb_fini(struct radeon_device *rdev) { + r100_wb_disable(rdev); if (rdev->wb.wb_obj) { radeon_object_kunmap(rdev->wb.wb_obj); radeon_object_unpin(rdev->wb.wb_obj); @@ -487,6 +508,21 @@ int r100_copy_blit(struct radeon_device *rdev, /* * CP */ +static int r100_cp_wait_for_idle(struct radeon_device *rdev) +{ + unsigned i; + u32 tmp; + + for (i = 0; i < rdev->usec_timeout; i++) { + tmp = RREG32(R_000E40_RBBM_STATUS); + if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { + return 0; + } + udelay(1); + } + return -1; +} + void r100_ring_start(struct radeon_device *rdev) { int r; @@ -715,9 +751,11 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) void r100_cp_fini(struct radeon_device *rdev) { + if (r100_cp_wait_for_idle(rdev)) { + DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); + } /* Disable ring */ - rdev->cp.ready = false; - WREG32(RADEON_CP_CSQ_CNTL, 0); + r100_cp_disable(rdev); radeon_ring_fini(rdev); DRM_INFO("radeon: cp finalized\n"); } @@ -1561,11 +1599,12 @@ static int r100_packet3_check(struct radeon_cs_parser *p, int r100_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; - struct r100_cs_track track; + struct r100_cs_track *track; int r; - r100_cs_track_clear(p->rdev, &track); - p->track = &track; + track = kzalloc(sizeof(*track), GFP_KERNEL); + r100_cs_track_clear(p->rdev, track); + p->track = track; do { r = r100_cs_packet_parse(p, &pkt, p->idx); if (r) { @@ -1916,6 +1955,20 @@ void r100_vram_init_sizes(struct radeon_device *rdev) rdev->mc.real_vram_size = rdev->mc.aper_size; } +void r100_vga_set_state(struct radeon_device *rdev, bool state) +{ + uint32_t temp; + + temp = RREG32(RADEON_CONFIG_CNTL); + if (state == false) { + temp &= ~(1<<8); + temp |= (1<<9); + } else { + temp &= ~(1<<9); + } + WREG32(RADEON_CONFIG_CNTL, temp); +} + void r100_vram_info(struct radeon_device *rdev) { r100_vram_get_type(rdev); @@ -2196,6 +2249,11 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg, flags |= R300_SURF_TILE_MICRO; } + if (tiling_flags & RADEON_TILING_SWAP_16BIT) + flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; + if (tiling_flags & RADEON_TILING_SWAP_32BIT) + flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; + DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); WREG32(RADEON_SURFACE0_INFO + surf_index, flags); WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); @@ -3065,3 +3123,86 @@ int r100_ib_test(struct radeon_device *rdev) radeon_ib_free(rdev, &ib); return r; } + +void r100_ib_fini(struct radeon_device *rdev) +{ + radeon_ib_pool_fini(rdev); +} + +int r100_ib_init(struct radeon_device *rdev) +{ + int r; + + r = radeon_ib_pool_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing IB pool (%d).\n", r); + r100_ib_fini(rdev); + return r; + } + r = r100_ib_test(rdev); + if (r) { + dev_err(rdev->dev, "failled testing IB (%d).\n", r); + r100_ib_fini(rdev); + return r; + } + return 0; +} + +void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) +{ + /* Shutdown CP we shouldn't need to do that but better be safe than + * sorry + */ + rdev->cp.ready = false; + WREG32(R_000740_CP_CSQ_CNTL, 0); + + /* Save few CRTC registers */ + save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); + save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); + save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); + save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); + if (!(rdev->flags & RADEON_SINGLE_CRTC)) { + save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); + save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); + } + + /* Disable VGA aperture access */ + WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); + /* Disable cursor, overlay, crtc */ + WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); + WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | + S_000054_CRTC_DISPLAY_DIS(1)); + WREG32(R_000050_CRTC_GEN_CNTL, + (C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | + S_000050_CRTC_DISP_REQ_EN_B(1)); + WREG32(R_000420_OV0_SCALE_CNTL, + C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); + WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); + if (!(rdev->flags & RADEON_SINGLE_CRTC)) { + WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | + S_000360_CUR2_LOCK(1)); + WREG32(R_0003F8_CRTC2_GEN_CNTL, + (C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | + S_0003F8_CRTC2_DISPLAY_DIS(1) | + S_0003F8_CRTC2_DISP_REQ_EN_B(1)); + WREG32(R_000360_CUR2_OFFSET, + C_000360_CUR2_LOCK & save->CUR2_OFFSET); + } +} + +void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) +{ + /* Update base address for crtc */ + WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); + if (!(rdev->flags & RADEON_SINGLE_CRTC)) { + WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, + rdev->mc.vram_location); + } + /* Restore CRTC registers */ + WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); + WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); + WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); + if (!(rdev->flags & RADEON_SINGLE_CRTC)) { + WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); + } +} diff --git a/drivers/gpu/drm/radeon/r100d.h b/drivers/gpu/drm/radeon/r100d.h index 6da7d92c321c..c4b257ec920e 100644 --- a/drivers/gpu/drm/radeon/r100d.h +++ b/drivers/gpu/drm/radeon/r100d.h @@ -73,4 +73,535 @@ #define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) +/* Registers */ +#define R_000040_GEN_INT_CNTL 0x000040 +#define S_000040_CRTC_VBLANK(x) (((x) & 0x1) << 0) +#define G_000040_CRTC_VBLANK(x) (((x) >> 0) & 0x1) +#define C_000040_CRTC_VBLANK 0xFFFFFFFE +#define S_000040_CRTC_VLINE(x) (((x) & 0x1) << 1) +#define G_000040_CRTC_VLINE(x) (((x) >> 1) & 0x1) +#define C_000040_CRTC_VLINE 0xFFFFFFFD +#define S_000040_CRTC_VSYNC(x) (((x) & 0x1) << 2) +#define G_000040_CRTC_VSYNC(x) (((x) >> 2) & 0x1) +#define C_000040_CRTC_VSYNC 0xFFFFFFFB +#define S_000040_SNAPSHOT(x) (((x) & 0x1) << 3) +#define G_000040_SNAPSHOT(x) (((x) >> 3) & 0x1) +#define C_000040_SNAPSHOT 0xFFFFFFF7 +#define S_000040_FP_DETECT(x) (((x) & 0x1) << 4) +#define G_000040_FP_DETECT(x) (((x) >> 4) & 0x1) +#define C_000040_FP_DETECT 0xFFFFFFEF +#define S_000040_CRTC2_VLINE(x) (((x) & 0x1) << 5) +#define G_000040_CRTC2_VLINE(x) (((x) >> 5) & 0x1) +#define C_000040_CRTC2_VLINE 0xFFFFFFDF +#define S_000040_DMA_VIPH0_INT_EN(x) (((x) & 0x1) << 12) +#define G_000040_DMA_VIPH0_INT_EN(x) (((x) >> 12) & 0x1) +#define C_000040_DMA_VIPH0_INT_EN 0xFFFFEFFF +#define S_000040_CRTC2_VSYNC(x) (((x) & 0x1) << 6) +#define G_000040_CRTC2_VSYNC(x) (((x) >> 6) & 0x1) +#define C_000040_CRTC2_VSYNC 0xFFFFFFBF +#define S_000040_SNAPSHOT2(x) (((x) & 0x1) << 7) +#define G_000040_SNAPSHOT2(x) (((x) >> 7) & 0x1) +#define C_000040_SNAPSHOT2 0xFFFFFF7F +#define S_000040_CRTC2_VBLANK(x) (((x) & 0x1) << 9) +#define G_000040_CRTC2_VBLANK(x) (((x) >> 9) & 0x1) +#define C_000040_CRTC2_VBLANK 0xFFFFFDFF +#define S_000040_FP2_DETECT(x) (((x) & 0x1) << 10) +#define G_000040_FP2_DETECT(x) (((x) >> 10) & 0x1) +#define C_000040_FP2_DETECT 0xFFFFFBFF +#define S_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) & 0x1) << 11) +#define G_000040_VSYNC_DIFF_OVER_LIMIT(x) (((x) >> 11) & 0x1) +#define C_000040_VSYNC_DIFF_OVER_LIMIT 0xFFFFF7FF +#define S_000040_DMA_VIPH1_INT_EN(x) (((x) & 0x1) << 13) +#define G_000040_DMA_VIPH1_INT_EN(x) (((x) >> 13) & 0x1) +#define C_000040_DMA_VIPH1_INT_EN 0xFFFFDFFF +#define S_000040_DMA_VIPH2_INT_EN(x) (((x) & 0x1) << 14) +#define G_000040_DMA_VIPH2_INT_EN(x) (((x) >> 14) & 0x1) +#define C_000040_DMA_VIPH2_INT_EN 0xFFFFBFFF +#define S_000040_DMA_VIPH3_INT_EN(x) (((x) & 0x1) << 15) +#define G_000040_DMA_VIPH3_INT_EN(x) (((x) >> 15) & 0x1) +#define C_000040_DMA_VIPH3_INT_EN 0xFFFF7FFF +#define S_000040_I2C_INT_EN(x) (((x) & 0x1) << 17) +#define G_000040_I2C_INT_EN(x) (((x) >> 17) & 0x1) +#define C_000040_I2C_INT_EN 0xFFFDFFFF +#define S_000040_GUI_IDLE(x) (((x) & 0x1) << 19) +#define G_000040_GUI_IDLE(x) (((x) >> 19) & 0x1) +#define C_000040_GUI_IDLE 0xFFF7FFFF +#define S_000040_VIPH_INT_EN(x) (((x) & 0x1) << 24) +#define G_000040_VIPH_INT_EN(x) (((x) >> 24) & 0x1) +#define C_000040_VIPH_INT_EN 0xFEFFFFFF +#define S_000040_SW_INT_EN(x) (((x) & 0x1) << 25) +#define G_000040_SW_INT_EN(x) (((x) >> 25) & 0x1) +#define C_000040_SW_INT_EN 0xFDFFFFFF +#define S_000040_GEYSERVILLE(x) (((x) & 0x1) << 27) +#define G_000040_GEYSERVILLE(x) (((x) >> 27) & 0x1) +#define C_000040_GEYSERVILLE 0xF7FFFFFF +#define S_000040_HDCP_AUTHORIZED_INT(x) (((x) & 0x1) << 28) +#define G_000040_HDCP_AUTHORIZED_INT(x) (((x) >> 28) & 0x1) +#define C_000040_HDCP_AUTHORIZED_INT 0xEFFFFFFF +#define S_000040_DVI_I2C_INT(x) (((x) & 0x1) << 29) +#define G_000040_DVI_I2C_INT(x) (((x) >> 29) & 0x1) +#define C_000040_DVI_I2C_INT 0xDFFFFFFF +#define S_000040_GUIDMA(x) (((x) & 0x1) << 30) +#define G_000040_GUIDMA(x) (((x) >> 30) & 0x1) +#define C_000040_GUIDMA 0xBFFFFFFF +#define S_000040_VIDDMA(x) (((x) & 0x1) << 31) +#define G_000040_VIDDMA(x) (((x) >> 31) & 0x1) +#define C_000040_VIDDMA 0x7FFFFFFF +#define R_000044_GEN_INT_STATUS 0x000044 +#define S_000044_CRTC_VBLANK_STAT(x) (((x) & 0x1) << 0) +#define G_000044_CRTC_VBLANK_STAT(x) (((x) >> 0) & 0x1) +#define C_000044_CRTC_VBLANK_STAT 0xFFFFFFFE +#define S_000044_CRTC_VBLANK_STAT_AK(x) (((x) & 0x1) << 0) +#define G_000044_CRTC_VBLANK_STAT_AK(x) (((x) >> 0) & 0x1) +#define C_000044_CRTC_VBLANK_STAT_AK 0xFFFFFFFE +#define S_000044_CRTC_VLINE_STAT(x) (((x) & 0x1) << 1) +#define G_000044_CRTC_VLINE_STAT(x) (((x) >> 1) & 0x1) +#define C_000044_CRTC_VLINE_STAT 0xFFFFFFFD +#define S_000044_CRTC_VLINE_STAT_AK(x) (((x) & 0x1) << 1) +#define G_000044_CRTC_VLINE_STAT_AK(x) (((x) >> 1) & 0x1) +#define C_000044_CRTC_VLINE_STAT_AK 0xFFFFFFFD +#define S_000044_CRTC_VSYNC_STAT(x) (((x) & 0x1) << 2) +#define G_000044_CRTC_VSYNC_STAT(x) (((x) >> 2) & 0x1) +#define C_000044_CRTC_VSYNC_STAT 0xFFFFFFFB +#define S_000044_CRTC_VSYNC_STAT_AK(x) (((x) & 0x1) << 2) +#define G_000044_CRTC_VSYNC_STAT_AK(x) (((x) >> 2) & 0x1) +#define C_000044_CRTC_VSYNC_STAT_AK 0xFFFFFFFB +#define S_000044_SNAPSHOT_STAT(x) (((x) & 0x1) << 3) +#define G_000044_SNAPSHOT_STAT(x) (((x) >> 3) & 0x1) +#define C_000044_SNAPSHOT_STAT 0xFFFFFFF7 +#define S_000044_SNAPSHOT_STAT_AK(x) (((x) & 0x1) << 3) +#define G_000044_SNAPSHOT_STAT_AK(x) (((x) >> 3) & 0x1) +#define C_000044_SNAPSHOT_STAT_AK 0xFFFFFFF7 +#define S_000044_FP_DETECT_STAT(x) (((x) & 0x1) << 4) +#define G_000044_FP_DETECT_STAT(x) (((x) >> 4) & 0x1) +#define C_000044_FP_DETECT_STAT 0xFFFFFFEF +#define S_000044_FP_DETECT_STAT_AK(x) (((x) & 0x1) << 4) +#define G_000044_FP_DETECT_STAT_AK(x) (((x) >> 4) & 0x1) +#define C_000044_FP_DETECT_STAT_AK 0xFFFFFFEF +#define S_000044_CRTC2_VLINE_STAT(x) (((x) & 0x1) << 5) +#define G_000044_CRTC2_VLINE_STAT(x) (((x) >> 5) & 0x1) +#define C_000044_CRTC2_VLINE_STAT 0xFFFFFFDF +#define S_000044_CRTC2_VLINE_STAT_AK(x) (((x) & 0x1) << 5) +#define G_000044_CRTC2_VLINE_STAT_AK(x) (((x) >> 5) & 0x1) +#define C_000044_CRTC2_VLINE_STAT_AK 0xFFFFFFDF +#define S_000044_CRTC2_VSYNC_STAT(x) (((x) & 0x1) << 6) +#define G_000044_CRTC2_VSYNC_STAT(x) (((x) >> 6) & 0x1) +#define C_000044_CRTC2_VSYNC_STAT 0xFFFFFFBF +#define S_000044_CRTC2_VSYNC_STAT_AK(x) (((x) & 0x1) << 6) +#define G_000044_CRTC2_VSYNC_STAT_AK(x) (((x) >> 6) & 0x1) +#define C_000044_CRTC2_VSYNC_STAT_AK 0xFFFFFFBF +#define S_000044_SNAPSHOT2_STAT(x) (((x) & 0x1) << 7) +#define G_000044_SNAPSHOT2_STAT(x) (((x) >> 7) & 0x1) +#define C_000044_SNAPSHOT2_STAT 0xFFFFFF7F +#define S_000044_SNAPSHOT2_STAT_AK(x) (((x) & 0x1) << 7) +#define G_000044_SNAPSHOT2_STAT_AK(x) (((x) >> 7) & 0x1) +#define C_000044_SNAPSHOT2_STAT_AK 0xFFFFFF7F +#define S_000044_CAP0_INT_ACTIVE(x) (((x) & 0x1) << 8) +#define G_000044_CAP0_INT_ACTIVE(x) (((x) >> 8) & 0x1) +#define C_000044_CAP0_INT_ACTIVE 0xFFFFFEFF +#define S_000044_CRTC2_VBLANK_STAT(x) (((x) & 0x1) << 9) +#define G_000044_CRTC2_VBLANK_STAT(x) (((x) >> 9) & 0x1) +#define C_000044_CRTC2_VBLANK_STAT 0xFFFFFDFF +#define S_000044_CRTC2_VBLANK_STAT_AK(x) (((x) & 0x1) << 9) +#define G_000044_CRTC2_VBLANK_STAT_AK(x) (((x) >> 9) & 0x1) +#define C_000044_CRTC2_VBLANK_STAT_AK 0xFFFFFDFF +#define S_000044_FP2_DETECT_STAT(x) (((x) & 0x1) << 10) +#define G_000044_FP2_DETECT_STAT(x) (((x) >> 10) & 0x1) +#define C_000044_FP2_DETECT_STAT 0xFFFFFBFF +#define S_000044_FP2_DETECT_STAT_AK(x) (((x) & 0x1) << 10) +#define G_000044_FP2_DETECT_STAT_AK(x) (((x) >> 10) & 0x1) +#define C_000044_FP2_DETECT_STAT_AK 0xFFFFFBFF +#define S_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x) (((x) & 0x1) << 11) +#define G_000044_VSYNC_DIFF_OVER_LIMIT_STAT(x) (((x) >> 11) & 0x1) +#define C_000044_VSYNC_DIFF_OVER_LIMIT_STAT 0xFFFFF7FF +#define S_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x) (((x) & 0x1) << 11) +#define G_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK(x) (((x) >> 11) & 0x1) +#define C_000044_VSYNC_DIFF_OVER_LIMIT_STAT_AK 0xFFFFF7FF +#define S_000044_DMA_VIPH0_INT(x) (((x) & 0x1) << 12) +#define G_000044_DMA_VIPH0_INT(x) (((x) >> 12) & 0x1) +#define C_000044_DMA_VIPH0_INT 0xFFFFEFFF +#define S_000044_DMA_VIPH0_INT_AK(x) (((x) & 0x1) << 12) +#define G_000044_DMA_VIPH0_INT_AK(x) (((x) >> 12) & 0x1) +#define C_000044_DMA_VIPH0_INT_AK 0xFFFFEFFF +#define S_000044_DMA_VIPH1_INT(x) (((x) & 0x1) << 13) +#define G_000044_DMA_VIPH1_INT(x) (((x) >> 13) & 0x1) +#define C_000044_DMA_VIPH1_INT 0xFFFFDFFF +#define S_000044_DMA_VIPH1_INT_AK(x) (((x) & 0x1) << 13) +#define G_000044_DMA_VIPH1_INT_AK(x) (((x) >> 13) & 0x1) +#define C_000044_DMA_VIPH1_INT_AK 0xFFFFDFFF +#define S_000044_DMA_VIPH2_INT(x) (((x) & 0x1) << 14) +#define G_000044_DMA_VIPH2_INT(x) (((x) >> 14) & 0x1) +#define C_000044_DMA_VIPH2_INT 0xFFFFBFFF +#define S_000044_DMA_VIPH2_INT_AK(x) (((x) & 0x1) << 14) +#define G_000044_DMA_VIPH2_INT_AK(x) (((x) >> 14) & 0x1) +#define C_000044_DMA_VIPH2_INT_AK 0xFFFFBFFF +#define S_000044_DMA_VIPH3_INT(x) (((x) & 0x1) << 15) +#define G_000044_DMA_VIPH3_INT(x) (((x) >> 15) & 0x1) +#define C_000044_DMA_VIPH3_INT 0xFFFF7FFF +#define S_000044_DMA_VIPH3_INT_AK(x) (((x) & 0x1) << 15) +#define G_000044_DMA_VIPH3_INT_AK(x) (((x) >> 15) & 0x1) +#define C_000044_DMA_VIPH3_INT_AK 0xFFFF7FFF +#define S_000044_I2C_INT(x) (((x) & 0x1) << 17) +#define G_000044_I2C_INT(x) (((x) >> 17) & 0x1) +#define C_000044_I2C_INT 0xFFFDFFFF +#define S_000044_I2C_INT_AK(x) (((x) & 0x1) << 17) +#define G_000044_I2C_INT_AK(x) (((x) >> 17) & 0x1) +#define C_000044_I2C_INT_AK 0xFFFDFFFF +#define S_000044_GUI_IDLE_STAT(x) (((x) & 0x1) << 19) +#define G_000044_GUI_IDLE_STAT(x) (((x) >> 19) & 0x1) +#define C_000044_GUI_IDLE_STAT 0xFFF7FFFF +#define S_000044_GUI_IDLE_STAT_AK(x) (((x) & 0x1) << 19) +#define G_000044_GUI_IDLE_STAT_AK(x) (((x) >> 19) & 0x1) +#define C_000044_GUI_IDLE_STAT_AK 0xFFF7FFFF +#define S_000044_VIPH_INT(x) (((x) & 0x1) << 24) +#define G_000044_VIPH_INT(x) (((x) >> 24) & 0x1) +#define C_000044_VIPH_INT 0xFEFFFFFF +#define S_000044_SW_INT(x) (((x) & 0x1) << 25) +#define G_000044_SW_INT(x) (((x) >> 25) & 0x1) +#define C_000044_SW_INT 0xFDFFFFFF +#define S_000044_SW_INT_AK(x) (((x) & 0x1) << 25) +#define G_000044_SW_INT_AK(x) (((x) >> 25) & 0x1) +#define C_000044_SW_INT_AK 0xFDFFFFFF +#define S_000044_SW_INT_SET(x) (((x) & 0x1) << 26) +#define G_000044_SW_INT_SET(x) (((x) >> 26) & 0x1) +#define C_000044_SW_INT_SET 0xFBFFFFFF +#define S_000044_GEYSERVILLE_STAT(x) (((x) & 0x1) << 27) +#define G_000044_GEYSERVILLE_STAT(x) (((x) >> 27) & 0x1) +#define C_000044_GEYSERVILLE_STAT 0xF7FFFFFF +#define S_000044_GEYSERVILLE_STAT_AK(x) (((x) & 0x1) << 27) +#define G_000044_GEYSERVILLE_STAT_AK(x) (((x) >> 27) & 0x1) +#define C_000044_GEYSERVILLE_STAT_AK 0xF7FFFFFF +#define S_000044_HDCP_AUTHORIZED_INT_STAT(x) (((x) & 0x1) << 28) +#define G_000044_HDCP_AUTHORIZED_INT_STAT(x) (((x) >> 28) & 0x1) +#define C_000044_HDCP_AUTHORIZED_INT_STAT 0xEFFFFFFF +#define S_000044_HDCP_AUTHORIZED_INT_AK(x) (((x) & 0x1) << 28) +#define G_000044_HDCP_AUTHORIZED_INT_AK(x) (((x) >> 28) & 0x1) +#define C_000044_HDCP_AUTHORIZED_INT_AK 0xEFFFFFFF +#define S_000044_DVI_I2C_INT_STAT(x) (((x) & 0x1) << 29) +#define G_000044_DVI_I2C_INT_STAT(x) (((x) >> 29) & 0x1) +#define C_000044_DVI_I2C_INT_STAT 0xDFFFFFFF +#define S_000044_DVI_I2C_INT_AK(x) (((x) & 0x1) << 29) +#define G_000044_DVI_I2C_INT_AK(x) (((x) >> 29) & 0x1) +#define C_000044_DVI_I2C_INT_AK 0xDFFFFFFF +#define S_000044_GUIDMA_STAT(x) (((x) & 0x1) << 30) +#define G_000044_GUIDMA_STAT(x) (((x) >> 30) & 0x1) +#define C_000044_GUIDMA_STAT 0xBFFFFFFF +#define S_000044_GUIDMA_AK(x) (((x) & 0x1) << 30) +#define G_000044_GUIDMA_AK(x) (((x) >> 30) & 0x1) +#define C_000044_GUIDMA_AK 0xBFFFFFFF +#define S_000044_VIDDMA_STAT(x) (((x) & 0x1) << 31) +#define G_000044_VIDDMA_STAT(x) (((x) >> 31) & 0x1) +#define C_000044_VIDDMA_STAT 0x7FFFFFFF +#define S_000044_VIDDMA_AK(x) (((x) & 0x1) << 31) +#define G_000044_VIDDMA_AK(x) (((x) >> 31) & 0x1) +#define C_000044_VIDDMA_AK 0x7FFFFFFF +#define R_000050_CRTC_GEN_CNTL 0x000050 +#define S_000050_CRTC_DBL_SCAN_EN(x) (((x) & 0x1) << 0) +#define G_000050_CRTC_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) +#define C_000050_CRTC_DBL_SCAN_EN 0xFFFFFFFE +#define S_000050_CRTC_INTERLACE_EN(x) (((x) & 0x1) << 1) +#define G_000050_CRTC_INTERLACE_EN(x) (((x) >> 1) & 0x1) +#define C_000050_CRTC_INTERLACE_EN 0xFFFFFFFD +#define S_000050_CRTC_C_SYNC_EN(x) (((x) & 0x1) << 4) +#define G_000050_CRTC_C_SYNC_EN(x) (((x) >> 4) & 0x1) +#define C_000050_CRTC_C_SYNC_EN 0xFFFFFFEF +#define S_000050_CRTC_PIX_WIDTH(x) (((x) & 0xF) << 8) +#define G_000050_CRTC_PIX_WIDTH(x) (((x) >> 8) & 0xF) +#define C_000050_CRTC_PIX_WIDTH 0xFFFFF0FF +#define S_000050_CRTC_ICON_EN(x) (((x) & 0x1) << 15) +#define G_000050_CRTC_ICON_EN(x) (((x) >> 15) & 0x1) +#define C_000050_CRTC_ICON_EN 0xFFFF7FFF +#define S_000050_CRTC_CUR_EN(x) (((x) & 0x1) << 16) +#define G_000050_CRTC_CUR_EN(x) (((x) >> 16) & 0x1) +#define C_000050_CRTC_CUR_EN 0xFFFEFFFF +#define S_000050_CRTC_VSTAT_MODE(x) (((x) & 0x3) << 17) +#define G_000050_CRTC_VSTAT_MODE(x) (((x) >> 17) & 0x3) +#define C_000050_CRTC_VSTAT_MODE 0xFFF9FFFF +#define S_000050_CRTC_CUR_MODE(x) (((x) & 0x7) << 20) +#define G_000050_CRTC_CUR_MODE(x) (((x) >> 20) & 0x7) +#define C_000050_CRTC_CUR_MODE 0xFF8FFFFF +#define S_000050_CRTC_EXT_DISP_EN(x) (((x) & 0x1) << 24) +#define G_000050_CRTC_EXT_DISP_EN(x) (((x) >> 24) & 0x1) +#define C_000050_CRTC_EXT_DISP_EN 0xFEFFFFFF +#define S_000050_CRTC_EN(x) (((x) & 0x1) << 25) +#define G_000050_CRTC_EN(x) (((x) >> 25) & 0x1) +#define C_000050_CRTC_EN 0xFDFFFFFF +#define S_000050_CRTC_DISP_REQ_EN_B(x) (((x) & 0x1) << 26) +#define G_000050_CRTC_DISP_REQ_EN_B(x) (((x) >> 26) & 0x1) +#define C_000050_CRTC_DISP_REQ_EN_B 0xFBFFFFFF +#define R_000054_CRTC_EXT_CNTL 0x000054 +#define S_000054_CRTC_VGA_XOVERSCAN(x) (((x) & 0x1) << 0) +#define G_000054_CRTC_VGA_XOVERSCAN(x) (((x) >> 0) & 0x1) +#define C_000054_CRTC_VGA_XOVERSCAN 0xFFFFFFFE +#define S_000054_VGA_BLINK_RATE(x) (((x) & 0x3) << 1) +#define G_000054_VGA_BLINK_RATE(x) (((x) >> 1) & 0x3) +#define C_000054_VGA_BLINK_RATE 0xFFFFFFF9 +#define S_000054_VGA_ATI_LINEAR(x) (((x) & 0x1) << 3) +#define G_000054_VGA_ATI_LINEAR(x) (((x) >> 3) & 0x1) +#define C_000054_VGA_ATI_LINEAR 0xFFFFFFF7 +#define S_000054_VGA_128KAP_PAGING(x) (((x) & 0x1) << 4) +#define G_000054_VGA_128KAP_PAGING(x) (((x) >> 4) & 0x1) +#define C_000054_VGA_128KAP_PAGING 0xFFFFFFEF +#define S_000054_VGA_TEXT_132(x) (((x) & 0x1) << 5) +#define G_000054_VGA_TEXT_132(x) (((x) >> 5) & 0x1) +#define C_000054_VGA_TEXT_132 0xFFFFFFDF +#define S_000054_VGA_XCRT_CNT_EN(x) (((x) & 0x1) << 6) +#define G_000054_VGA_XCRT_CNT_EN(x) (((x) >> 6) & 0x1) +#define C_000054_VGA_XCRT_CNT_EN 0xFFFFFFBF +#define S_000054_CRTC_HSYNC_DIS(x) (((x) & 0x1) << 8) +#define G_000054_CRTC_HSYNC_DIS(x) (((x) >> 8) & 0x1) +#define C_000054_CRTC_HSYNC_DIS 0xFFFFFEFF +#define S_000054_CRTC_VSYNC_DIS(x) (((x) & 0x1) << 9) +#define G_000054_CRTC_VSYNC_DIS(x) (((x) >> 9) & 0x1) +#define C_000054_CRTC_VSYNC_DIS 0xFFFFFDFF +#define S_000054_CRTC_DISPLAY_DIS(x) (((x) & 0x1) << 10) +#define G_000054_CRTC_DISPLAY_DIS(x) (((x) >> 10) & 0x1) +#define C_000054_CRTC_DISPLAY_DIS 0xFFFFFBFF +#define S_000054_CRTC_SYNC_TRISTATE(x) (((x) & 0x1) << 11) +#define G_000054_CRTC_SYNC_TRISTATE(x) (((x) >> 11) & 0x1) +#define C_000054_CRTC_SYNC_TRISTATE 0xFFFFF7FF +#define S_000054_CRTC_HSYNC_TRISTATE(x) (((x) & 0x1) << 12) +#define G_000054_CRTC_HSYNC_TRISTATE(x) (((x) >> 12) & 0x1) +#define C_000054_CRTC_HSYNC_TRISTATE 0xFFFFEFFF +#define S_000054_CRTC_VSYNC_TRISTATE(x) (((x) & 0x1) << 13) +#define G_000054_CRTC_VSYNC_TRISTATE(x) (((x) >> 13) & 0x1) +#define C_000054_CRTC_VSYNC_TRISTATE 0xFFFFDFFF +#define S_000054_CRT_ON(x) (((x) & 0x1) << 15) +#define G_000054_CRT_ON(x) (((x) >> 15) & 0x1) +#define C_000054_CRT_ON 0xFFFF7FFF +#define S_000054_VGA_CUR_B_TEST(x) (((x) & 0x1) << 17) +#define G_000054_VGA_CUR_B_TEST(x) (((x) >> 17) & 0x1) +#define C_000054_VGA_CUR_B_TEST 0xFFFDFFFF +#define S_000054_VGA_PACK_DIS(x) (((x) & 0x1) << 18) +#define G_000054_VGA_PACK_DIS(x) (((x) >> 18) & 0x1) +#define C_000054_VGA_PACK_DIS 0xFFFBFFFF +#define S_000054_VGA_MEM_PS_EN(x) (((x) & 0x1) << 19) +#define G_000054_VGA_MEM_PS_EN(x) (((x) >> 19) & 0x1) +#define C_000054_VGA_MEM_PS_EN 0xFFF7FFFF +#define S_000054_VCRTC_IDX_MASTER(x) (((x) & 0x7F) << 24) +#define G_000054_VCRTC_IDX_MASTER(x) (((x) >> 24) & 0x7F) +#define C_000054_VCRTC_IDX_MASTER 0x80FFFFFF +#define R_00023C_DISPLAY_BASE_ADDR 0x00023C +#define S_00023C_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) +#define G_00023C_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_00023C_DISPLAY_BASE_ADDR 0x00000000 +#define R_000260_CUR_OFFSET 0x000260 +#define S_000260_CUR_OFFSET(x) (((x) & 0x7FFFFFF) << 0) +#define G_000260_CUR_OFFSET(x) (((x) >> 0) & 0x7FFFFFF) +#define C_000260_CUR_OFFSET 0xF8000000 +#define S_000260_CUR_LOCK(x) (((x) & 0x1) << 31) +#define G_000260_CUR_LOCK(x) (((x) >> 31) & 0x1) +#define C_000260_CUR_LOCK 0x7FFFFFFF +#define R_00033C_CRTC2_DISPLAY_BASE_ADDR 0x00033C +#define S_00033C_CRTC2_DISPLAY_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) +#define G_00033C_CRTC2_DISPLAY_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_00033C_CRTC2_DISPLAY_BASE_ADDR 0x00000000 +#define R_000360_CUR2_OFFSET 0x000360 +#define S_000360_CUR2_OFFSET(x) (((x) & 0x7FFFFFF) << 0) +#define G_000360_CUR2_OFFSET(x) (((x) >> 0) & 0x7FFFFFF) +#define C_000360_CUR2_OFFSET 0xF8000000 +#define S_000360_CUR2_LOCK(x) (((x) & 0x1) << 31) +#define G_000360_CUR2_LOCK(x) (((x) >> 31) & 0x1) +#define C_000360_CUR2_LOCK 0x7FFFFFFF +#define R_0003C0_GENMO_WT 0x0003C0 +#define S_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) & 0x1) << 0) +#define G_0003C0_GENMO_MONO_ADDRESS_B(x) (((x) >> 0) & 0x1) +#define C_0003C0_GENMO_MONO_ADDRESS_B 0xFFFFFFFE +#define S_0003C0_VGA_RAM_EN(x) (((x) & 0x1) << 1) +#define G_0003C0_VGA_RAM_EN(x) (((x) >> 1) & 0x1) +#define C_0003C0_VGA_RAM_EN 0xFFFFFFFD +#define S_0003C0_VGA_CKSEL(x) (((x) & 0x3) << 2) +#define G_0003C0_VGA_CKSEL(x) (((x) >> 2) & 0x3) +#define C_0003C0_VGA_CKSEL 0xFFFFFFF3 +#define S_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) & 0x1) << 5) +#define G_0003C0_ODD_EVEN_MD_PGSEL(x) (((x) >> 5) & 0x1) +#define C_0003C0_ODD_EVEN_MD_PGSEL 0xFFFFFFDF +#define S_0003C0_VGA_HSYNC_POL(x) (((x) & 0x1) << 6) +#define G_0003C0_VGA_HSYNC_POL(x) (((x) >> 6) & 0x1) +#define C_0003C0_VGA_HSYNC_POL 0xFFFFFFBF +#define S_0003C0_VGA_VSYNC_POL(x) (((x) & 0x1) << 7) +#define G_0003C0_VGA_VSYNC_POL(x) (((x) >> 7) & 0x1) +#define C_0003C0_VGA_VSYNC_POL 0xFFFFFF7F +#define R_0003F8_CRTC2_GEN_CNTL 0x0003F8 +#define S_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) & 0x1) << 0) +#define G_0003F8_CRTC2_DBL_SCAN_EN(x) (((x) >> 0) & 0x1) +#define C_0003F8_CRTC2_DBL_SCAN_EN 0xFFFFFFFE +#define S_0003F8_CRTC2_INTERLACE_EN(x) (((x) & 0x1) << 1) +#define G_0003F8_CRTC2_INTERLACE_EN(x) (((x) >> 1) & 0x1) +#define C_0003F8_CRTC2_INTERLACE_EN 0xFFFFFFFD +#define S_0003F8_CRTC2_SYNC_TRISTATE(x) (((x) & 0x1) << 4) +#define G_0003F8_CRTC2_SYNC_TRISTATE(x) (((x) >> 4) & 0x1) +#define C_0003F8_CRTC2_SYNC_TRISTATE 0xFFFFFFEF +#define S_0003F8_CRTC2_HSYNC_TRISTATE(x) (((x) & 0x1) << 5) +#define G_0003F8_CRTC2_HSYNC_TRISTATE(x) (((x) >> 5) & 0x1) +#define C_0003F8_CRTC2_HSYNC_TRISTATE 0xFFFFFFDF +#define S_0003F8_CRTC2_VSYNC_TRISTATE(x) (((x) & 0x1) << 6) +#define G_0003F8_CRTC2_VSYNC_TRISTATE(x) (((x) >> 6) & 0x1) +#define C_0003F8_CRTC2_VSYNC_TRISTATE 0xFFFFFFBF +#define S_0003F8_CRT2_ON(x) (((x) & 0x1) << 7) +#define G_0003F8_CRT2_ON(x) (((x) >> 7) & 0x1) +#define C_0003F8_CRT2_ON 0xFFFFFF7F +#define S_0003F8_CRTC2_PIX_WIDTH(x) (((x) & 0xF) << 8) +#define G_0003F8_CRTC2_PIX_WIDTH(x) (((x) >> 8) & 0xF) +#define C_0003F8_CRTC2_PIX_WIDTH 0xFFFFF0FF +#define S_0003F8_CRTC2_ICON_EN(x) (((x) & 0x1) << 15) +#define G_0003F8_CRTC2_ICON_EN(x) (((x) >> 15) & 0x1) +#define C_0003F8_CRTC2_ICON_EN 0xFFFF7FFF +#define S_0003F8_CRTC2_CUR_EN(x) (((x) & 0x1) << 16) +#define G_0003F8_CRTC2_CUR_EN(x) (((x) >> 16) & 0x1) +#define C_0003F8_CRTC2_CUR_EN 0xFFFEFFFF +#define S_0003F8_CRTC2_CUR_MODE(x) (((x) & 0x7) << 20) +#define G_0003F8_CRTC2_CUR_MODE(x) (((x) >> 20) & 0x7) +#define C_0003F8_CRTC2_CUR_MODE 0xFF8FFFFF +#define S_0003F8_CRTC2_DISPLAY_DIS(x) (((x) & 0x1) << 23) +#define G_0003F8_CRTC2_DISPLAY_DIS(x) (((x) >> 23) & 0x1) +#define C_0003F8_CRTC2_DISPLAY_DIS 0xFF7FFFFF +#define S_0003F8_CRTC2_EN(x) (((x) & 0x1) << 25) +#define G_0003F8_CRTC2_EN(x) (((x) >> 25) & 0x1) +#define C_0003F8_CRTC2_EN 0xFDFFFFFF +#define S_0003F8_CRTC2_DISP_REQ_EN_B(x) (((x) & 0x1) << 26) +#define G_0003F8_CRTC2_DISP_REQ_EN_B(x) (((x) >> 26) & 0x1) +#define C_0003F8_CRTC2_DISP_REQ_EN_B 0xFBFFFFFF +#define S_0003F8_CRTC2_C_SYNC_EN(x) (((x) & 0x1) << 27) +#define G_0003F8_CRTC2_C_SYNC_EN(x) (((x) >> 27) & 0x1) +#define C_0003F8_CRTC2_C_SYNC_EN 0xF7FFFFFF +#define S_0003F8_CRTC2_HSYNC_DIS(x) (((x) & 0x1) << 28) +#define G_0003F8_CRTC2_HSYNC_DIS(x) (((x) >> 28) & 0x1) +#define C_0003F8_CRTC2_HSYNC_DIS 0xEFFFFFFF +#define S_0003F8_CRTC2_VSYNC_DIS(x) (((x) & 0x1) << 29) +#define G_0003F8_CRTC2_VSYNC_DIS(x) (((x) >> 29) & 0x1) +#define C_0003F8_CRTC2_VSYNC_DIS 0xDFFFFFFF +#define R_000420_OV0_SCALE_CNTL 0x000420 +#define S_000420_OV0_NO_READ_BEHIND_SCAN(x) (((x) & 0x1) << 1) +#define G_000420_OV0_NO_READ_BEHIND_SCAN(x) (((x) >> 1) & 0x1) +#define C_000420_OV0_NO_READ_BEHIND_SCAN 0xFFFFFFFD +#define S_000420_OV0_HORZ_PICK_NEAREST(x) (((x) & 0x1) << 2) +#define G_000420_OV0_HORZ_PICK_NEAREST(x) (((x) >> 2) & 0x1) +#define C_000420_OV0_HORZ_PICK_NEAREST 0xFFFFFFFB +#define S_000420_OV0_VERT_PICK_NEAREST(x) (((x) & 0x1) << 3) +#define G_000420_OV0_VERT_PICK_NEAREST(x) (((x) >> 3) & 0x1) +#define C_000420_OV0_VERT_PICK_NEAREST 0xFFFFFFF7 +#define S_000420_OV0_SIGNED_UV(x) (((x) & 0x1) << 4) +#define G_000420_OV0_SIGNED_UV(x) (((x) >> 4) & 0x1) +#define C_000420_OV0_SIGNED_UV 0xFFFFFFEF +#define S_000420_OV0_GAMMA_SEL(x) (((x) & 0x7) << 5) +#define G_000420_OV0_GAMMA_SEL(x) (((x) >> 5) & 0x7) +#define C_000420_OV0_GAMMA_SEL 0xFFFFFF1F +#define S_000420_OV0_SURFACE_FORMAT(x) (((x) & 0xF) << 8) +#define G_000420_OV0_SURFACE_FORMAT(x) (((x) >> 8) & 0xF) +#define C_000420_OV0_SURFACE_FORMAT 0xFFFFF0FF +#define S_000420_OV0_ADAPTIVE_DEINT(x) (((x) & 0x1) << 12) +#define G_000420_OV0_ADAPTIVE_DEINT(x) (((x) >> 12) & 0x1) +#define C_000420_OV0_ADAPTIVE_DEINT 0xFFFFEFFF +#define S_000420_OV0_CRTC_SEL(x) (((x) & 0x1) << 14) +#define G_000420_OV0_CRTC_SEL(x) (((x) >> 14) & 0x1) +#define C_000420_OV0_CRTC_SEL 0xFFFFBFFF +#define S_000420_OV0_BURST_PER_PLANE(x) (((x) & 0x7F) << 16) +#define G_000420_OV0_BURST_PER_PLANE(x) (((x) >> 16) & 0x7F) +#define C_000420_OV0_BURST_PER_PLANE 0xFF80FFFF +#define S_000420_OV0_DOUBLE_BUFFER_REGS(x) (((x) & 0x1) << 24) +#define G_000420_OV0_DOUBLE_BUFFER_REGS(x) (((x) >> 24) & 0x1) +#define C_000420_OV0_DOUBLE_BUFFER_REGS 0xFEFFFFFF +#define S_000420_OV0_BANDWIDTH(x) (((x) & 0x1) << 26) +#define G_000420_OV0_BANDWIDTH(x) (((x) >> 26) & 0x1) +#define C_000420_OV0_BANDWIDTH 0xFBFFFFFF +#define S_000420_OV0_LIN_TRANS_BYPASS(x) (((x) & 0x1) << 28) +#define G_000420_OV0_LIN_TRANS_BYPASS(x) (((x) >> 28) & 0x1) +#define C_000420_OV0_LIN_TRANS_BYPASS 0xEFFFFFFF +#define S_000420_OV0_INT_EMU(x) (((x) & 0x1) << 29) +#define G_000420_OV0_INT_EMU(x) (((x) >> 29) & 0x1) +#define C_000420_OV0_INT_EMU 0xDFFFFFFF +#define S_000420_OV0_OVERLAY_EN(x) (((x) & 0x1) << 30) +#define G_000420_OV0_OVERLAY_EN(x) (((x) >> 30) & 0x1) +#define C_000420_OV0_OVERLAY_EN 0xBFFFFFFF +#define S_000420_OV0_SOFT_RESET(x) (((x) & 0x1) << 31) +#define G_000420_OV0_SOFT_RESET(x) (((x) >> 31) & 0x1) +#define C_000420_OV0_SOFT_RESET 0x7FFFFFFF +#define R_00070C_CP_RB_RPTR_ADDR 0x00070C +#define S_00070C_RB_RPTR_SWAP(x) (((x) & 0x3) << 0) +#define G_00070C_RB_RPTR_SWAP(x) (((x) >> 0) & 0x3) +#define C_00070C_RB_RPTR_SWAP 0xFFFFFFFC +#define S_00070C_RB_RPTR_ADDR(x) (((x) & 0x3FFFFFFF) << 2) +#define G_00070C_RB_RPTR_ADDR(x) (((x) >> 2) & 0x3FFFFFFF) +#define C_00070C_RB_RPTR_ADDR 0x00000003 +#define R_000740_CP_CSQ_CNTL 0x000740 +#define S_000740_CSQ_CNT_PRIMARY(x) (((x) & 0xFF) << 0) +#define G_000740_CSQ_CNT_PRIMARY(x) (((x) >> 0) & 0xFF) +#define C_000740_CSQ_CNT_PRIMARY 0xFFFFFF00 +#define S_000740_CSQ_CNT_INDIRECT(x) (((x) & 0xFF) << 8) +#define G_000740_CSQ_CNT_INDIRECT(x) (((x) >> 8) & 0xFF) +#define C_000740_CSQ_CNT_INDIRECT 0xFFFF00FF +#define S_000740_CSQ_MODE(x) (((x) & 0xF) << 28) +#define G_000740_CSQ_MODE(x) (((x) >> 28) & 0xF) +#define C_000740_CSQ_MODE 0x0FFFFFFF +#define R_000770_SCRATCH_UMSK 0x000770 +#define S_000770_SCRATCH_UMSK(x) (((x) & 0x3F) << 0) +#define G_000770_SCRATCH_UMSK(x) (((x) >> 0) & 0x3F) +#define C_000770_SCRATCH_UMSK 0xFFFFFFC0 +#define S_000770_SCRATCH_SWAP(x) (((x) & 0x3) << 16) +#define G_000770_SCRATCH_SWAP(x) (((x) >> 16) & 0x3) +#define C_000770_SCRATCH_SWAP 0xFFFCFFFF +#define R_000774_SCRATCH_ADDR 0x000774 +#define S_000774_SCRATCH_ADDR(x) (((x) & 0x7FFFFFF) << 5) +#define G_000774_SCRATCH_ADDR(x) (((x) >> 5) & 0x7FFFFFF) +#define C_000774_SCRATCH_ADDR 0x0000001F +#define R_000E40_RBBM_STATUS 0x000E40 +#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) +#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) +#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 +#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) +#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) +#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF +#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) +#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) +#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF +#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) +#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) +#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF +#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) +#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) +#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF +#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) +#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) +#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF +#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) +#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) +#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF +#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) +#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) +#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF +#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) +#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) +#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF +#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) +#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) +#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF +#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) +#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) +#define C_000E40_E2_BUSY 0xFFFDFFFF +#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) +#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) +#define C_000E40_RB2D_BUSY 0xFFFBFFFF +#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) +#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) +#define C_000E40_RB3D_BUSY 0xFFF7FFFF +#define S_000E40_SE_BUSY(x) (((x) & 0x1) << 20) +#define G_000E40_SE_BUSY(x) (((x) >> 20) & 0x1) +#define C_000E40_SE_BUSY 0xFFEFFFFF +#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) +#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) +#define C_000E40_RE_BUSY 0xFFDFFFFF +#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) +#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) +#define C_000E40_TAM_BUSY 0xFFBFFFFF +#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) +#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) +#define C_000E40_TDM_BUSY 0xFF7FFFFF +#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) +#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) +#define C_000E40_PB_BUSY 0xFEFFFFFF +#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) +#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) +#define C_000E40_GUI_ACTIVE 0x7FFFFFFF + #endif diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index a5f82f7beed6..bb151ecdf8fc 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -31,7 +31,6 @@ #include "radeon_reg.h" #include "radeon.h" #include "radeon_drm.h" -#include "radeon_share.h" #include "r100_track.h" #include "r300d.h" @@ -43,7 +42,6 @@ int r100_cp_reset(struct radeon_device *rdev); int r100_rb2d_reset(struct radeon_device *rdev); int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); int r100_pci_gart_enable(struct radeon_device *rdev); -void r100_pci_gart_disable(struct radeon_device *rdev); void r100_mc_setup(struct radeon_device *rdev); void r100_mc_disable_clients(struct radeon_device *rdev); int r100_gui_wait_for_idle(struct radeon_device *rdev); @@ -87,26 +85,57 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) mb(); } -int rv370_pcie_gart_enable(struct radeon_device *rdev) +int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) +{ + void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; + + if (i < 0 || i > rdev->gart.num_gpu_pages) { + return -EINVAL; + } + addr = (lower_32_bits(addr) >> 8) | + ((upper_32_bits(addr) & 0xff) << 24) | + 0xc; + /* on x86 we want this to be CPU endian, on powerpc + * on powerpc without HW swappers, it'll get swapped on way + * into VRAM - so no need for cpu_to_le32 on VRAM tables */ + writel(addr, ((void __iomem *)ptr) + (i * 4)); + return 0; +} + +int rv370_pcie_gart_init(struct radeon_device *rdev) { - uint32_t table_addr; - uint32_t tmp; int r; + if (rdev->gart.table.vram.robj) { + WARN(1, "RV370 PCIE GART already initialized.\n"); + return 0; + } /* Initialize common gart structure */ r = radeon_gart_init(rdev); - if (r) { + if (r) return r; - } r = rv370_debugfs_pcie_gart_info_init(rdev); - if (r) { + if (r) DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); - } rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; - r = radeon_gart_table_vram_alloc(rdev); - if (r) { - return r; + rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; + return radeon_gart_table_vram_alloc(rdev); +} + +int rv370_pcie_gart_enable(struct radeon_device *rdev) +{ + uint32_t table_addr; + uint32_t tmp; + int r; + + if (rdev->gart.table.vram.robj == NULL) { + dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; } + r = radeon_gart_table_vram_pin(rdev); + if (r) + return r; /* discard memory request outside of configured range */ tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); @@ -146,45 +175,13 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev) } } -int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) -{ - void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; - - if (i < 0 || i > rdev->gart.num_gpu_pages) { - return -EINVAL; - } - addr = (lower_32_bits(addr) >> 8) | - ((upper_32_bits(addr) & 0xff) << 24) | - 0xc; - /* on x86 we want this to be CPU endian, on powerpc - * on powerpc without HW swappers, it'll get swapped on way - * into VRAM - so no need for cpu_to_le32 on VRAM tables */ - writel(addr, ((void __iomem *)ptr) + (i * 4)); - return 0; -} - -int r300_gart_enable(struct radeon_device *rdev) +void rv370_pcie_gart_fini(struct radeon_device *rdev) { -#if __OS_HAS_AGP - if (rdev->flags & RADEON_IS_AGP) { - if (rdev->family > CHIP_RV350) { - rv370_pcie_gart_disable(rdev); - } else { - r100_pci_gart_disable(rdev); - } - return 0; - } -#endif - if (rdev->flags & RADEON_IS_PCIE) { - rdev->asic->gart_disable = &rv370_pcie_gart_disable; - rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; - rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; - return rv370_pcie_gart_enable(rdev); - } - return r100_pci_gart_enable(rdev); + rv370_pcie_gart_disable(rdev); + radeon_gart_table_vram_free(rdev); + radeon_gart_fini(rdev); } - /* * MC */ @@ -232,14 +229,6 @@ int r300_mc_init(struct radeon_device *rdev) void r300_mc_fini(struct radeon_device *rdev) { - if (rdev->flags & RADEON_IS_PCIE) { - rv370_pcie_gart_disable(rdev); - radeon_gart_table_vram_free(rdev); - } else { - r100_pci_gart_disable(rdev); - radeon_gart_table_ram_free(rdev); - } - radeon_gart_fini(rdev); } @@ -1235,11 +1224,12 @@ static int r300_packet3_check(struct radeon_cs_parser *p, int r300_cs_parse(struct radeon_cs_parser *p) { struct radeon_cs_packet pkt; - struct r100_cs_track track; + struct r100_cs_track *track; int r; - r100_cs_track_clear(p->rdev, &track); - p->track = &track; + track = kzalloc(sizeof(*track), GFP_KERNEL); + r100_cs_track_clear(p->rdev, track); + p->track = track; do { r = r100_cs_packet_parse(p, &pkt, p->idx); if (r) { @@ -1269,9 +1259,48 @@ int r300_cs_parse(struct radeon_cs_parser *p) return 0; } -int r300_init(struct radeon_device *rdev) +void r300_set_reg_safe(struct radeon_device *rdev) { rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); +} + +int r300_init(struct radeon_device *rdev) +{ + r300_set_reg_safe(rdev); return 0; } + +void r300_mc_program(struct radeon_device *rdev) +{ + struct r100_mc_save save; + int r; + + r = r100_debugfs_mc_info_init(rdev); + if (r) { + dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); + } + + /* Stops all mc clients */ + r100_mc_stop(rdev, &save); + if (rdev->flags & RADEON_IS_AGP) { + WREG32(R_00014C_MC_AGP_LOCATION, + S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | + S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); + WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); + WREG32(R_00015C_AGP_BASE_2, + upper_32_bits(rdev->mc.agp_base) & 0xff); + } else { + WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); + WREG32(R_000170_AGP_BASE, 0); + WREG32(R_00015C_AGP_BASE_2, 0); + } + /* Wait for mc idle */ + if (r300_mc_wait_for_idle(rdev)) + DRM_INFO("Failed to wait MC idle before programming MC.\n"); + /* Program MC, should be a 32bits limited address space */ + WREG32(R_000148_MC_FB_LOCATION, + S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | + S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); + r100_mc_resume(rdev, &save); +} diff --git a/drivers/gpu/drm/radeon/r300d.h b/drivers/gpu/drm/radeon/r300d.h index 63ec076f2cd4..d4fa3eb1074f 100644 --- a/drivers/gpu/drm/radeon/r300d.h +++ b/drivers/gpu/drm/radeon/r300d.h @@ -73,4 +73,29 @@ #define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) #define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) +/* Registers */ +#define R_000148_MC_FB_LOCATION 0x000148 +#define S_000148_MC_FB_START(x) (((x) & 0xFFFF) << 0) +#define G_000148_MC_FB_START(x) (((x) >> 0) & 0xFFFF) +#define C_000148_MC_FB_START 0xFFFF0000 +#define S_000148_MC_FB_TOP(x) (((x) & 0xFFFF) << 16) +#define G_000148_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_000148_MC_FB_TOP 0x0000FFFF +#define R_00014C_MC_AGP_LOCATION 0x00014C +#define S_00014C_MC_AGP_START(x) (((x) & 0xFFFF) << 0) +#define G_00014C_MC_AGP_START(x) (((x) >> 0) & 0xFFFF) +#define C_00014C_MC_AGP_START 0xFFFF0000 +#define S_00014C_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16) +#define G_00014C_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF) +#define C_00014C_MC_AGP_TOP 0x0000FFFF +#define R_00015C_AGP_BASE_2 0x00015C +#define S_00015C_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0) +#define G_00015C_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF) +#define C_00015C_AGP_BASE_ADDR_2 0xFFFFFFF0 +#define R_000170_AGP_BASE 0x000170 +#define S_000170_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0) +#define G_000170_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_000170_AGP_BASE_ADDR 0x00000000 + + #endif diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 97426a6f370f..49a2fdc57d27 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -29,47 +29,13 @@ #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" +#include "atom.h" +#include "r420d.h" -/* r420,r423,rv410 depends on : */ -void r100_pci_gart_disable(struct radeon_device *rdev); -void r100_hdp_reset(struct radeon_device *rdev); -void r100_mc_setup(struct radeon_device *rdev); -int r100_gui_wait_for_idle(struct radeon_device *rdev); -void r100_mc_disable_clients(struct radeon_device *rdev); -void r300_vram_info(struct radeon_device *rdev); -int r300_mc_wait_for_idle(struct radeon_device *rdev); -int rv370_pcie_gart_enable(struct radeon_device *rdev); -void rv370_pcie_gart_disable(struct radeon_device *rdev); - -/* This files gather functions specifics to : - * r420,r423,rv410 - * - * Some of these functions might be used by newer ASICs. - */ -void r420_gpu_init(struct radeon_device *rdev); -int r420_debugfs_pipes_info_init(struct radeon_device *rdev); - - -/* - * MC - */ int r420_mc_init(struct radeon_device *rdev) { int r; - if (r100_debugfs_rbbm_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for RBBM !\n"); - } - if (r420_debugfs_pipes_info_init(rdev)) { - DRM_ERROR("Failed to register debugfs file for pipes !\n"); - } - - r420_gpu_init(rdev); - r100_pci_gart_disable(rdev); - if (rdev->flags & RADEON_IS_PCIE) { - rv370_pcie_gart_disable(rdev); - } - /* Setup GPU memory space */ rdev->mc.vram_location = 0xFFFFFFFFUL; rdev->mc.gtt_location = 0xFFFFFFFFUL; @@ -87,33 +53,9 @@ int r420_mc_init(struct radeon_device *rdev) if (r) { return r; } - - /* Program GPU memory space */ - r100_mc_disable_clients(rdev); - if (r300_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); - } - r100_mc_setup(rdev); return 0; } -void r420_mc_fini(struct radeon_device *rdev) -{ - rv370_pcie_gart_disable(rdev); - radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); -} - - -/* - * Global GPU functions - */ -void r420_errata(struct radeon_device *rdev) -{ - rdev->pll_errata = 0; -} - void r420_pipes_init(struct radeon_device *rdev) { unsigned tmp; @@ -122,6 +64,11 @@ void r420_pipes_init(struct radeon_device *rdev) /* GA_ENHANCE workaround TCL deadlock issue */ WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); + /* add idle wait as per freedesktop.org bug 24041 */ + if (r100_gui_wait_for_idle(rdev)) { + printk(KERN_WARNING "Failed to wait GUI idle while " + "programming pipes. Bad things might happen.\n"); + } /* get max number of pipes */ gb_pipe_select = RREG32(0x402C); num_pipes = ((gb_pipe_select >> 12) & 3) + 1; @@ -179,25 +126,239 @@ void r420_pipes_init(struct radeon_device *rdev) rdev->num_gb_pipes, rdev->num_z_pipes); } -void r420_gpu_init(struct radeon_device *rdev) +u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) +{ + u32 r; + + WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); + r = RREG32(R_0001FC_MC_IND_DATA); + return r; +} + +void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) +{ + WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | + S_0001F8_MC_IND_WR_EN(1)); + WREG32(R_0001FC_MC_IND_DATA, v); +} + +static void r420_debugfs(struct radeon_device *rdev) +{ + if (r100_debugfs_rbbm_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for RBBM !\n"); + } + if (r420_debugfs_pipes_info_init(rdev)) { + DRM_ERROR("Failed to register debugfs file for pipes !\n"); + } +} + +static void r420_clock_resume(struct radeon_device *rdev) +{ + u32 sclk_cntl; + sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); + sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); + if (rdev->family == CHIP_R420) + sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); + WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); +} + +static int r420_startup(struct radeon_device *rdev) { - r100_hdp_reset(rdev); + int r; + + r300_mc_program(rdev); + /* Initialize GART (initialize after TTM so we can allocate + * memory through TTM but finalize after TTM) */ + if (rdev->flags & RADEON_IS_PCIE) { + r = rv370_pcie_gart_enable(rdev); + if (r) + return r; + } + if (rdev->flags & RADEON_IS_PCI) { + r = r100_pci_gart_enable(rdev); + if (r) + return r; + } r420_pipes_init(rdev); - if (r300_mc_wait_for_idle(rdev)) { - printk(KERN_WARNING "Failed to wait MC idle while " - "programming pipes. Bad things might happen.\n"); + /* Enable IRQ */ + rdev->irq.sw_int = true; + r100_irq_set(rdev); + /* 1M ring buffer */ + r = r100_cp_init(rdev, 1024 * 1024); + if (r) { + dev_err(rdev->dev, "failled initializing CP (%d).\n", r); + return r; + } + r = r100_wb_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing WB (%d).\n", r); + } + r = r100_ib_init(rdev); + if (r) { + dev_err(rdev->dev, "failled initializing IB (%d).\n", r); + return r; } + return 0; } +int r420_resume(struct radeon_device *rdev) +{ + /* Make sur GART are not working */ + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_disable(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_disable(rdev); + /* Resume clock before doing reset */ + r420_clock_resume(rdev); + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* check if cards are posted or not */ + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } + /* Resume clock after posting */ + r420_clock_resume(rdev); -/* - * r420,r423,rv410 VRAM info - */ -void r420_vram_info(struct radeon_device *rdev) + return r420_startup(rdev); +} + +int r420_suspend(struct radeon_device *rdev) { - r300_vram_info(rdev); + r100_cp_disable(rdev); + r100_wb_disable(rdev); + r100_irq_disable(rdev); + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_disable(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_disable(rdev); + return 0; +} + +void r420_fini(struct radeon_device *rdev) +{ + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + radeon_gem_fini(rdev); + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_fini(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_fini(rdev); + radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); + radeon_fence_driver_fini(rdev); + radeon_object_fini(rdev); + if (rdev->is_atom_bios) { + radeon_atombios_fini(rdev); + } else { + radeon_combios_fini(rdev); + } + kfree(rdev->bios); + rdev->bios = NULL; } +int r420_init(struct radeon_device *rdev) +{ + int r; + + rdev->new_init_path = true; + /* Initialize scratch registers */ + radeon_scratch_init(rdev); + /* Initialize surface registers */ + radeon_surface_init(rdev); + /* TODO: disable VGA need to use VGA request */ + /* BIOS*/ + if (!radeon_get_bios(rdev)) { + if (ASIC_IS_AVIVO(rdev)) + return -EINVAL; + } + if (rdev->is_atom_bios) { + r = radeon_atombios_init(rdev); + if (r) { + return r; + } + } else { + r = radeon_combios_init(rdev); + if (r) { + return r; + } + } + /* Reset gpu before posting otherwise ATOM will enter infinite loop */ + if (radeon_gpu_reset(rdev)) { + dev_warn(rdev->dev, + "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", + RREG32(R_000E40_RBBM_STATUS), + RREG32(R_0007C0_CP_STAT)); + } + /* check if cards are posted or not */ + if (!radeon_card_posted(rdev) && rdev->bios) { + DRM_INFO("GPU not posted. posting now...\n"); + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } + } + /* Initialize clocks */ + radeon_get_clock_info(rdev->ddev); + /* Get vram informations */ + r300_vram_info(rdev); + /* Initialize memory controller (also test AGP) */ + r = r420_mc_init(rdev); + if (r) { + return r; + } + r420_debugfs(rdev); + /* Fence driver */ + r = radeon_fence_driver_init(rdev); + if (r) { + return r; + } + r = radeon_irq_kms_init(rdev); + if (r) { + return r; + } + /* Memory manager */ + r = radeon_object_init(rdev); + if (r) { + return r; + } + if (rdev->flags & RADEON_IS_PCIE) { + r = rv370_pcie_gart_init(rdev); + if (r) + return r; + } + if (rdev->flags & RADEON_IS_PCI) { + r = r100_pci_gart_init(rdev); + if (r) + return r; + } + r300_set_reg_safe(rdev); + rdev->accel_working = true; + r = r420_startup(rdev); + if (r) { + /* Somethings want wront with the accel init stop accel */ + dev_err(rdev->dev, "Disabling GPU acceleration\n"); + r420_suspend(rdev); + r100_cp_fini(rdev); + r100_wb_fini(rdev); + r100_ib_fini(rdev); + if (rdev->flags & RADEON_IS_PCIE) + rv370_pcie_gart_fini(rdev); + if (rdev->flags & RADEON_IS_PCI) + r100_pci_gart_fini(rdev); + radeon_agp_fini(rdev); + radeon_irq_kms_fini(rdev); + rdev->accel_working = false; + } + return 0; +} /* * Debugfs info diff --git a/drivers/gpu/drm/radeon/r420d.h b/drivers/gpu/drm/radeon/r420d.h new file mode 100644 index 000000000000..a48a7db1e2aa --- /dev/null +++ b/drivers/gpu/drm/radeon/r420d.h @@ -0,0 +1,249 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ +#ifndef R420D_H +#define R420D_H + +#define R_0001F8_MC_IND_INDEX 0x0001F8 +#define S_0001F8_MC_IND_ADDR(x) (((x) & 0x7F) << 0) +#define G_0001F8_MC_IND_ADDR(x) (((x) >> 0) & 0x7F) +#define C_0001F8_MC_IND_ADDR 0xFFFFFF80 +#define S_0001F8_MC_IND_WR_EN(x) (((x) & 0x1) << 8) +#define G_0001F8_MC_IND_WR_EN(x) (((x) >> 8) & 0x1) +#define C_0001F8_MC_IND_WR_EN 0xFFFFFEFF +#define R_0001FC_MC_IND_DATA 0x0001FC +#define S_0001FC_MC_IND_DATA(x) (((x) & 0xFFFFFFFF) << 0) +#define G_0001FC_MC_IND_DATA(x) (((x) >> 0) & 0xFFFFFFFF) +#define C_0001FC_MC_IND_DATA 0x00000000 +#define R_0007C0_CP_STAT 0x0007C0 +#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0) +#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1) +#define C_0007C0_MRU_BUSY 0xFFFFFFFE +#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1) +#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1) +#define C_0007C0_MWU_BUSY 0xFFFFFFFD +#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2) +#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1) +#define C_0007C0_RSIU_BUSY 0xFFFFFFFB +#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3) +#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1) +#define C_0007C0_RCIU_BUSY 0xFFFFFFF7 +#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9) +#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1) +#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF +#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10) +#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1) +#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF +#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11) +#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1) +#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF +#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12) +#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1) +#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF +#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13) +#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1) +#define C_0007C0_CSI_BUSY 0xFFFFDFFF +#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14) +#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1) +#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF +#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15) +#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1) +#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF +#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28) +#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1) +#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF +#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29) +#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1) +#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF +#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30) +#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1) +#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF +#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31) +#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1) +#define C_0007C0_CP_BUSY 0x7FFFFFFF +#define R_000E40_RBBM_STATUS 0x000E40 +#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0) +#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F) +#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80 +#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8) +#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1) +#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF +#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9) +#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1) +#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF +#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10) +#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1) +#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF +#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11) +#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1) +#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF +#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12) +#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1) +#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF +#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13) +#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1) +#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF +#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14) +#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1) +#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF +#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15) +#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1) +#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF +#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16) +#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1) +#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF +#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17) +#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1) +#define C_000E40_E2_BUSY 0xFFFDFFFF +#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18) +#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1) +#define C_000E40_RB2D_BUSY 0xFFFBFFFF +#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19) +#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1) +#define C_000E40_RB3D_BUSY 0xFFF7FFFF +#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20) +#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1) +#define C_000E40_VAP_BUSY 0xFFEFFFFF +#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21) +#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1) +#define C_000E40_RE_BUSY 0xFFDFFFFF +#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22) +#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1) +#define C_000E40_TAM_BUSY 0xFFBFFFFF +#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23) +#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1) +#define C_000E40_TDM_BUSY 0xFF7FFFFF +#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24) +#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1) +#define C_000E40_PB_BUSY 0xFEFFFFFF +#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25) +#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1) +#define C_000E40_TIM_BUSY 0xFDFFFFFF +#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26) +#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1) +#define C_000E40_GA_BUSY 0xFBFFFFFF +#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27) +#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1) +#define C_000E40_CBA2D_BUSY 0xF7FFFFFF +#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31) +#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1) +#define C_000E40_GUI_ACTIVE 0x7FFFFFFF + +/* CLK registers */ +#define R_00000D_SCLK_CNTL 0x00000D +#define S_00000D_SCLK_SRC_SEL(x) (((x) & 0x7) << 0) +#define G_00000D_SCLK_SRC_SEL(x) (((x) >> 0) & 0x7) +#define C_00000D_SCLK_SRC_SEL 0xFFFFFFF8 +#define S_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 3) +#define G_00000D_CP_MAX_DYN_STOP_LAT(x) (((x) >> 3) & 0x1) +#define C_00000D_CP_MAX_DYN_STOP_LAT 0xFFFFFFF7 +#define S_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 4) +#define G_00000D_HDP_MAX_DYN_STOP_LAT(x) (((x) >> 4) & 0x1) +#define C_00000D_HDP_MAX_DYN_STOP_LAT 0xFFFFFFEF +#define S_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 5) +#define G_00000D_TV_MAX_DYN_STOP_LAT(x) (((x) >> 5) & 0x1) +#define C_00000D_TV_MAX_DYN_STOP_LAT 0xFFFFFFDF +#define S_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 6) +#define G_00000D_E2_MAX_DYN_STOP_LAT(x) (((x) >> 6) & 0x1) +#define C_00000D_E2_MAX_DYN_STOP_LAT 0xFFFFFFBF +#define S_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 7) +#define G_00000D_SE_MAX_DYN_STOP_LAT(x) (((x) >> 7) & 0x1) +#define C_00000D_SE_MAX_DYN_STOP_LAT 0xFFFFFF7F +#define S_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 8) +#define G_00000D_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 8) & 0x1) +#define C_00000D_IDCT_MAX_DYN_STOP_LAT 0xFFFFFEFF +#define S_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 9) +#define G_00000D_VIP_MAX_DYN_STOP_LAT(x) (((x) >> 9) & 0x1) +#define C_00000D_VIP_MAX_DYN_STOP_LAT 0xFFFFFDFF +#define S_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 10) +#define G_00000D_RE_MAX_DYN_STOP_LAT(x) (((x) >> 10) & 0x1) +#define C_00000D_RE_MAX_DYN_STOP_LAT 0xFFFFFBFF +#define S_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 11) +#define G_00000D_PB_MAX_DYN_STOP_LAT(x) (((x) >> 11) & 0x1) +#define C_00000D_PB_MAX_DYN_STOP_LAT 0xFFFFF7FF +#define S_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 12) +#define G_00000D_TAM_MAX_DYN_STOP_LAT(x) (((x) >> 12) & 0x1) +#define C_00000D_TAM_MAX_DYN_STOP_LAT 0xFFFFEFFF +#define S_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 13) +#define G_00000D_TDM_MAX_DYN_STOP_LAT(x) (((x) >> 13) & 0x1) +#define C_00000D_TDM_MAX_DYN_STOP_LAT 0xFFFFDFFF +#define S_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 14) +#define G_00000D_RB_MAX_DYN_STOP_LAT(x) (((x) >> 14) & 0x1) +#define C_00000D_RB_MAX_DYN_STOP_LAT 0xFFFFBFFF +#define S_00000D_FORCE_DISP2(x) (((x) & 0x1) << 15) +#define G_00000D_FORCE_DISP2(x) (((x) >> 15) & 0x1) +#define C_00000D_FORCE_DISP2 0xFFFF7FFF +#define S_00000D_FORCE_CP(x) (((x) & 0x1) << 16) +#define G_00000D_FORCE_CP(x) (((x) >> 16) & 0x1) +#define C_00000D_FORCE_CP 0xFFFEFFFF +#define S_00000D_FORCE_HDP(x) (((x) & 0x1) << 17) +#define G_00000D_FORCE_HDP(x) (((x) >> 17) & 0x1) +#define C_00000D_FORCE_HDP 0xFFFDFFFF +#define S_00000D_FORCE_DISP1(x) (((x) & 0x1) << 18) +#define G_00000D_FORCE_DISP1(x) (((x) >> 18) & 0x1) +#define C_00000D_FORCE_DISP1 0xFFFBFFFF +#define S_00000D_FORCE_TOP(x) (((x) & 0x1) << 19) +#define G_00000D_FORCE_TOP(x) (((x) >> 19) & 0x1) +#define C_00000D_FORCE_TOP 0xFFF7FFFF +#define S_00000D_FORCE_E2(x) (((x) & 0x1) << 20) +#define G_00000D_FORCE_E2(x) (((x) >> 20) & 0x1) +#define C_00000D_FORCE_E2 0xFFEFFFFF +#define S_00000D_FORCE_SE(x) (((x) & 0x1) << 21) +#define G_00000D_FORCE_SE(x) (((x) >> 21) & 0x1) +#define C_00000D_FORCE_SE 0xFFDFFFFF +#define S_00000D_FORCE_IDCT(x) (((x) & 0x1) << 22) +#define G_00000D_FORCE_IDCT(x) (((x) >> 22) & 0x1) +#define C_00000D_FORCE_IDCT 0xFFBFFFFF +#define S_00000D_FORCE_VIP(x) (((x) & 0x1) << 23) +#define G_00000D_FORCE_VIP(x) (((x) >> 23) & 0x1) +#define C_00000D_FORCE_VIP 0xFF7FFFFF +#define S_00000D_FORCE_RE(x) (((x) & 0x1) << 24) +#define G_00000D_FORCE_RE(x) (((x) >> 24) & 0x1) +#define C_00000D_FORCE_RE 0xFEFFFFFF +#define S_00000D_FORCE_PB(x) (((x) & 0x1) << 25) +#define G_00000D_FORCE_PB(x) (((x) >> 25) & 0x1) +#define C_00000D_FORCE_PB 0xFDFFFFFF +#define S_00000D_FORCE_PX(x) (((x) & 0x1) << 26) +#define G_00000D_FORCE_PX(x) (((x) >> 26) & 0x1) +#define C_00000D_FORCE_PX 0xFBFFFFFF +#define S_00000D_FORCE_TX(x) (((x) & 0x1) << 27) +#define G_00000D_FORCE_TX(x) (((x) >> 27) & 0x1) +#define C_00000D_FORCE_TX 0xF7FFFFFF +#define S_00000D_FORCE_RB(x) (((x) & 0x1) << 28) +#define G_00000D_FORCE_RB(x) (((x) >> 28) & 0x1) +#define C_00000D_FORCE_RB 0xEFFFFFFF +#define S_00000D_FORCE_TV_SCLK(x) (((x) & 0x1) << 29) +#define G_00000D_FORCE_TV_SCLK(x) (((x) >> 29) & 0x1) +#define C_00000D_FORCE_TV_SCLK 0xDFFFFFFF +#define S_00000D_FORCE_SUBPIC(x) (((x) & 0x1) << 30) +#define G_00000D_FORCE_SUBPIC(x) (((x) >> 30) & 0x1) +#define C_00000D_FORCE_SUBPIC 0xBFFFFFFF +#define S_00000D_FORCE_OV0(x) (((x) & 0x1) << 31) +#define G_00000D_FORCE_OV0(x) (((x) >> 31) & 0x1) +#define C_00000D_FORCE_OV0 0x7FFFFFFF + +#endif diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index ebd6b0f7bdff..d4b0b9d2e39b 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -28,12 +28,9 @@ #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" -#include "radeon_share.h" /* r520,rv530,rv560,rv570,r580 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); -int rv370_pcie_gart_enable(struct radeon_device *rdev); -void rv370_pcie_gart_disable(struct radeon_device *rdev); void r420_pipes_init(struct radeon_device *rdev); void rs600_mc_disable_clients(struct radeon_device *rdev); void rs600_disable_vga(struct radeon_device *rdev); @@ -119,9 +116,6 @@ int r520_mc_init(struct radeon_device *rdev) void r520_mc_fini(struct radeon_device *rdev) { - rv370_pcie_gart_disable(rdev); - radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index d8fcef44a69f..eab31c1d6df1 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -32,7 +32,6 @@ #include "radeon_drm.h" #include "radeon.h" #include "radeon_mode.h" -#include "radeon_share.h" #include "r600d.h" #include "avivod.h" #include "atom.h" @@ -114,23 +113,35 @@ void r600_pcie_gart_tlb_flush(struct radeon_device *rdev) } } -int r600_pcie_gart_enable(struct radeon_device *rdev) +int r600_pcie_gart_init(struct radeon_device *rdev) { - u32 tmp; - int r, i; + int r; + if (rdev->gart.table.vram.robj) { + WARN(1, "R600 PCIE GART already initialized.\n"); + return 0; + } /* Initialize common gart structure */ r = radeon_gart_init(rdev); - if (r) { + if (r) return r; - } rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; - r = radeon_gart_table_vram_alloc(rdev); - if (r) { - return r; + return radeon_gart_table_vram_alloc(rdev); +} + +int r600_pcie_gart_enable(struct radeon_device *rdev) +{ + u32 tmp; + int r, i; + + if (rdev->gart.table.vram.robj == NULL) { + dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; } - for (i = 0; i < rdev->gart.num_gpu_pages; i++) - r600_gart_clear_page(rdev, i); + r = radeon_gart_table_vram_pin(rdev); + if (r) + return r; + /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | @@ -176,10 +187,6 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) u32 tmp; int i; - /* Clear ptes*/ - for (i = 0; i < rdev->gart.num_gpu_pages; i++) - r600_gart_clear_page(rdev, i); - r600_pcie_gart_tlb_flush(rdev); /* Disable all tables */ for (i = 0; i < 7; i++) WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); @@ -205,6 +212,17 @@ void r600_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); + if (rdev->gart.table.vram.robj) { + radeon_object_kunmap(rdev->gart.table.vram.robj); + radeon_object_unpin(rdev->gart.table.vram.robj); + } +} + +void r600_pcie_gart_fini(struct radeon_device *rdev) +{ + r600_pcie_gart_disable(rdev); + radeon_gart_table_vram_free(rdev); + radeon_gart_fini(rdev); } int r600_mc_wait_for_idle(struct radeon_device *rdev) @@ -321,6 +339,10 @@ static void r600_mc_resume(struct radeon_device *rdev) WREG32(D1VGA_CONTROL, d1vga_control); WREG32(D2VGA_CONTROL, d2vga_control); WREG32(VGA_RENDER_CONTROL, vga_render_control); + + /* we need to own VRAM, so turn off the VGA renderer here + * to stop it overwriting our objects */ + radeon_avivo_vga_render_disable(rdev); } int r600_mc_init(struct radeon_device *rdev) @@ -1444,7 +1466,7 @@ bool r600_card_posted(struct radeon_device *rdev) return false; } -int r600_resume(struct radeon_device *rdev) +int r600_startup(struct radeon_device *rdev) { int r; @@ -1454,6 +1476,14 @@ int r600_resume(struct radeon_device *rdev) if (r) return r; r600_gpu_init(rdev); + + r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + if (r) { + DRM_ERROR("failed to pin blit object %d\n", r); + return r; + } + r = radeon_ring_init(rdev, rdev->cp.ring_size); if (r) return r; @@ -1469,10 +1499,63 @@ int r600_resume(struct radeon_device *rdev) return 0; } +void r600_vga_set_state(struct radeon_device *rdev, bool state) +{ + uint32_t temp; + + temp = RREG32(CONFIG_CNTL); + if (state == false) { + temp &= ~(1<<0); + temp |= (1<<1); + } else { + temp &= ~(1<<1); + } + WREG32(CONFIG_CNTL, temp); +} + +int r600_resume(struct radeon_device *rdev) +{ + int r; + + if (radeon_gpu_reset(rdev)) { + /* FIXME: what do we want to do here ? */ + } + /* post card */ + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } + /* Initialize clocks */ + r = radeon_clocks_init(rdev); + if (r) { + return r; + } + + r = r600_startup(rdev); + if (r) { + DRM_ERROR("r600 startup failed on resume\n"); + return r; + } + + r = radeon_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + return r; + } + return r; +} + + int r600_suspend(struct radeon_device *rdev) { /* FIXME: we should wait for ring to be empty */ r600_cp_stop(rdev); + rdev->cp.ready = false; + + r600_pcie_gart_disable(rdev); + /* unpin shaders bo */ + radeon_object_unpin(rdev->r600_blit.shader_obj); return 0; } @@ -1517,6 +1600,7 @@ int r600_init(struct radeon_device *rdev) r600_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); + radeon_get_clock_info(rdev->ddev); r = radeon_clocks_init(rdev); if (r) return r; @@ -1549,7 +1633,18 @@ int r600_init(struct radeon_device *rdev) } } - r = r600_resume(rdev); + r = r600_pcie_gart_init(rdev); + if (r) + return r; + + rdev->accel_working = true; + r = r600_blit_init(rdev); + if (r) { + DRM_ERROR("radeon: failled blitter (%d).\n", r); + return r; + } + + r = r600_startup(rdev); if (r) { if (rdev->flags & RADEON_IS_AGP) { /* Retry with disabling AGP */ @@ -1557,22 +1652,19 @@ int r600_init(struct radeon_device *rdev) rdev->flags &= ~RADEON_IS_AGP; return r600_init(rdev); } - return r; - } - r = radeon_ib_pool_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - return r; - } - r = r600_blit_init(rdev); - if (r) { - DRM_ERROR("radeon: failled blitter (%d).\n", r); - return r; + rdev->accel_working = false; } - r = radeon_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); - return r; + if (rdev->accel_working) { + r = radeon_ib_pool_init(rdev); + if (r) { + DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); + rdev->accel_working = false; + } + r = radeon_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + rdev->accel_working = false; + } } return 0; } @@ -1584,9 +1676,7 @@ void r600_fini(struct radeon_device *rdev) r600_blit_fini(rdev); radeon_ring_fini(rdev); - r600_pcie_gart_disable(rdev); - radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); + r600_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c index c51402e92493..d988eece0187 100644 --- a/drivers/gpu/drm/radeon/r600_blit.c +++ b/drivers/gpu/drm/radeon/r600_blit.c @@ -126,7 +126,7 @@ set_shaders(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; u64 gpu_addr; - int shader_size, i; + int i; u32 *vs, *ps; uint32_t sq_pgm_resources; RING_LOCALS; @@ -136,11 +136,9 @@ set_shaders(struct drm_device *dev) vs = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset); ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); - shader_size = r6xx_vs_size; - for (i = 0; i < shader_size; i++) + for (i = 0; i < r6xx_vs_size; i++) vs[i] = r6xx_vs[i]; - shader_size = r6xx_ps_size; - for (i = 0; i < shader_size; i++) + for (i = 0; i < r6xx_ps_size; i++) ps[i] = r6xx_ps[i]; dev_priv->blit_vb->used = 512; @@ -309,7 +307,7 @@ draw_auto(drm_radeon_private_t *dev_priv) static inline void set_default_state(drm_radeon_private_t *dev_priv) { - int default_state_dw, i; + int i; u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; @@ -462,14 +460,12 @@ set_default_state(drm_radeon_private_t *dev_priv) R600_NUM_ES_STACK_ENTRIES(num_es_stack_entries)); if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV770) { - default_state_dw = r7xx_default_size * 4; - BEGIN_RING(default_state_dw + 10); - for (i = 0; i < default_state_dw; i++) + BEGIN_RING(r7xx_default_size + 10); + for (i = 0; i < r7xx_default_size; i++) OUT_RING(r7xx_default_state[i]); } else { - default_state_dw = r6xx_default_size * 4; - BEGIN_RING(default_state_dw + 10); - for (i = 0; i < default_state_dw; i++) + BEGIN_RING(r6xx_default_size + 10); + for (i = 0; i < r6xx_default_size; i++) OUT_RING(r6xx_default_state[i]); } OUT_RING(CP_PACKET3(R600_IT_EVENT_WRITE, 0)); @@ -512,7 +508,7 @@ static inline uint32_t i2f(uint32_t input) } -int r600_nomm_get_vb(struct drm_device *dev) +static inline int r600_nomm_get_vb(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; dev_priv->blit_vb = radeon_freelist_get(dev); @@ -523,7 +519,7 @@ int r600_nomm_get_vb(struct drm_device *dev) return 0; } -void r600_nomm_put_vb(struct drm_device *dev) +static inline void r600_nomm_put_vb(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; @@ -531,7 +527,7 @@ void r600_nomm_put_vb(struct drm_device *dev) radeon_cp_discard_buffer(dev, dev_priv->blit_vb->file_priv->master, dev_priv->blit_vb); } -void *r600_nomm_get_vb_ptr(struct drm_device *dev) +static inline void *r600_nomm_get_vb_ptr(struct drm_device *dev) { drm_radeon_private_t *dev_priv = dev->dev_private; return (((char *)dev->agp_buffer_map->handle + @@ -741,7 +737,7 @@ r600_blit_copy(struct drm_device *dev, /* dst */ set_render_target(dev_priv, COLOR_8_8_8_8, - dst_x + cur_size, h, + (dst_x + cur_size) / 4, h, dst_gpu_addr); /* scissors */ @@ -781,8 +777,7 @@ r600_blit_swap(struct drm_device *dev, u64 vb_addr; u32 *vb; - vb = (u32 *) ((char *)dev->agp_buffer_map->handle + - dev_priv->blit_vb->offset + dev_priv->blit_vb->used); + vb = r600_nomm_get_vb_ptr(dev); if ((dev_priv->blit_vb->used + 48) > dev_priv->blit_vb->total) { diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 5755647e688a..acae33e2ad51 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c @@ -129,6 +129,7 @@ set_shaders(struct radeon_device *rdev) radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 0); + gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); } @@ -248,6 +249,7 @@ set_default_state(struct radeon_device *rdev) int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; u64 gpu_addr; + int dwords; switch (rdev->family) { case CHIP_R600: @@ -394,11 +396,12 @@ set_default_state(struct radeon_device *rdev) NUM_ES_STACK_ENTRIES(num_es_stack_entries)); /* emit an IB pointing at default state */ + dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf; gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); - radeon_ring_write(rdev, (rdev->r600_blit.state_len / 4)); + radeon_ring_write(rdev, dwords); radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0)); radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT); @@ -441,17 +444,25 @@ static inline uint32_t i2f(uint32_t input) int r600_blit_init(struct radeon_device *rdev) { u32 obj_size; - int r; + int r, dwords; void *ptr; + u32 packet2s[16]; + int num_packet2s = 0; rdev->r600_blit.state_offset = 0; if (rdev->family >= CHIP_RV770) - rdev->r600_blit.state_len = r7xx_default_size * 4; + rdev->r600_blit.state_len = r7xx_default_size; else - rdev->r600_blit.state_len = r6xx_default_size * 4; + rdev->r600_blit.state_len = r6xx_default_size; - obj_size = rdev->r600_blit.state_len; + dwords = rdev->r600_blit.state_len; + while (dwords & 0xf) { + packet2s[num_packet2s++] = PACKET2(0); + dwords++; + } + + obj_size = dwords * 4; obj_size = ALIGN(obj_size, 256); rdev->r600_blit.vs_offset = obj_size; @@ -470,15 +481,8 @@ int r600_blit_init(struct radeon_device *rdev) return r; } - r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, - &rdev->r600_blit.shader_gpu_addr); - if (r) { - DRM_ERROR("failed to pin blit object %d\n", r); - return r; - } - - DRM_DEBUG("r6xx blit allocated bo @ 0x%16llx %08x vs %08x ps %08x\n", - rdev->r600_blit.shader_gpu_addr, obj_size, + DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n", + obj_size, rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr); @@ -488,9 +492,15 @@ int r600_blit_init(struct radeon_device *rdev) } if (rdev->family >= CHIP_RV770) - memcpy_toio(ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len); + memcpy_toio(ptr + rdev->r600_blit.state_offset, + r7xx_default_state, rdev->r600_blit.state_len * 4); else - memcpy_toio(ptr + rdev->r600_blit.state_offset, r6xx_default_state, rdev->r600_blit.state_len); + memcpy_toio(ptr + rdev->r600_blit.state_offset, + r6xx_default_state, rdev->r600_blit.state_len * 4); + if (num_packet2s) + memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), + packet2s, num_packet2s * 4); + memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); @@ -521,8 +531,8 @@ int r600_vb_ib_get(struct radeon_device *rdev) void r600_vb_ib_put(struct radeon_device *rdev) { - mutex_lock(&rdev->ib_pool.mutex); radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); + mutex_lock(&rdev->ib_pool.mutex); list_add_tail(&rdev->r600_blit.vb_ib->list, &rdev->ib_pool.scheduled_ibs); mutex_unlock(&rdev->ib_pool.mutex); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); @@ -531,14 +541,32 @@ void r600_vb_ib_put(struct radeon_device *rdev) int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) { int r; - int ring_size; - const int max_size = 8192*8192; + int ring_size, line_size; + int max_size; + /* loops of emits 64 + fence emit possible */ + int dwords_per_loop = 76, num_loops; r = r600_vb_ib_get(rdev); WARN_ON(r); - /* loops of emits 64 + fence emit possible */ - ring_size = ((size_bytes + max_size) / max_size) * 78; + /* set_render_target emits 2 extra dwords on rv6xx */ + if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) + dwords_per_loop += 2; + + /* 8 bpp vs 32 bpp for xfer unit */ + if (size_bytes & 3) + line_size = 8192; + else + line_size = 8192*4; + + max_size = 8192 * line_size; + + /* major loops cover the max size transfer */ + num_loops = ((size_bytes + max_size) / max_size); + /* minor loops cover the extra non aligned bits */ + num_loops += ((size_bytes % line_size) ? 1 : 0); + /* calculate number of loops correctly */ + ring_size = num_loops * dwords_per_loop; /* set default + shaders */ ring_size += 40; /* shaders + def state */ ring_size += 3; /* fence emit for VB IB */ @@ -746,7 +774,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, /* dst 23 */ set_render_target(rdev, COLOR_8_8_8_8, - dst_x + cur_size, h, + (dst_x + cur_size) / 4, h, dst_gpu_addr); /* scissors 12 */ diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 39bf6349351b..33b89cd8743e 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c @@ -27,7 +27,6 @@ */ #include "drmP.h" #include "radeon.h" -#include "radeon_share.h" #include "r600d.h" #include "avivod.h" diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 723295f59281..4a9028a85c9b 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -78,6 +78,7 @@ #define CB_COLOR0_MASK 0x28100 #define CONFIG_MEMSIZE 0x5428 +#define CONFIG_CNTL 0x5424 #define CP_STAT 0x8680 #define CP_COHER_BASE 0x85F8 #define CP_DEBUG 0xC1FC diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 3299733ac300..6311b1362594 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -49,8 +49,8 @@ #include <linux/list.h> #include <linux/kref.h> +#include "radeon_family.h" #include "radeon_mode.h" -#include "radeon_share.h" #include "radeon_reg.h" /* @@ -76,64 +76,7 @@ extern int radeon_tv; #define RADEON_IB_POOL_SIZE 16 #define RADEON_DEBUGFS_MAX_NUM_FILES 32 #define RADEONFB_CONN_LIMIT 4 - -enum radeon_family { - CHIP_R100, - CHIP_RV100, - CHIP_RS100, - CHIP_RV200, - CHIP_RS200, - CHIP_R200, - CHIP_RV250, - CHIP_RS300, - CHIP_RV280, - CHIP_R300, - CHIP_R350, - CHIP_RV350, - CHIP_RV380, - CHIP_R420, - CHIP_R423, - CHIP_RV410, - CHIP_RS400, - CHIP_RS480, - CHIP_RS600, - CHIP_RS690, - CHIP_RS740, - CHIP_RV515, - CHIP_R520, - CHIP_RV530, - CHIP_RV560, - CHIP_RV570, - CHIP_R580, - CHIP_R600, - CHIP_RV610, - CHIP_RV630, - CHIP_RV620, - CHIP_RV635, - CHIP_RV670, - CHIP_RS780, - CHIP_RS880, - CHIP_RV770, - CHIP_RV730, - CHIP_RV710, - CHIP_RV740, - CHIP_LAST, -}; - -enum radeon_chip_flags { - RADEON_FAMILY_MASK = 0x0000ffffUL, - RADEON_FLAGS_MASK = 0xffff0000UL, - RADEON_IS_MOBILITY = 0x00010000UL, - RADEON_IS_IGP = 0x00020000UL, - RADEON_SINGLE_CRTC = 0x00040000UL, - RADEON_IS_AGP = 0x00080000UL, - RADEON_HAS_HIERZ = 0x00100000UL, - RADEON_IS_PCIE = 0x00200000UL, - RADEON_NEW_MEMMAP = 0x00400000UL, - RADEON_IS_PCI = 0x00800000UL, - RADEON_IS_IGPGART = 0x01000000UL, -}; - +#define RADEON_BIOS_NUM_SCRATCH 8 /* * Errata workarounds. @@ -403,6 +346,10 @@ struct radeon_ib { uint32_t length_dw; }; +/* + * locking - + * mutex protects scheduled_ibs, ready, alloc_bm + */ struct radeon_ib_pool { struct mutex mutex; struct radeon_object *robj; @@ -592,11 +539,14 @@ struct radeon_asic { int (*suspend)(struct radeon_device *rdev); void (*errata)(struct radeon_device *rdev); void (*vram_info)(struct radeon_device *rdev); + void (*vga_set_state)(struct radeon_device *rdev, bool state); int (*gpu_reset)(struct radeon_device *rdev); int (*mc_init)(struct radeon_device *rdev); void (*mc_fini)(struct radeon_device *rdev); int (*wb_init)(struct radeon_device *rdev); void (*wb_fini)(struct radeon_device *rdev); + int (*gart_init)(struct radeon_device *rdev); + void (*gart_fini)(struct radeon_device *rdev); int (*gart_enable)(struct radeon_device *rdev); void (*gart_disable)(struct radeon_device *rdev); void (*gart_tlb_flush)(struct radeon_device *rdev); @@ -640,11 +590,55 @@ struct radeon_asic { void (*bandwidth_update)(struct radeon_device *rdev); }; +/* + * Asic structures + */ struct r100_asic { const unsigned *reg_safe_bm; unsigned reg_safe_bm_size; }; +struct r300_asic { + const unsigned *reg_safe_bm; + unsigned reg_safe_bm_size; +}; + +struct r600_asic { + unsigned max_pipes; + unsigned max_tile_pipes; + unsigned max_simds; + unsigned max_backends; + unsigned max_gprs; + unsigned max_threads; + unsigned max_stack_entries; + unsigned max_hw_contexts; + unsigned max_gs_threads; + unsigned sx_max_export_size; + unsigned sx_max_export_pos_size; + unsigned sx_max_export_smx_size; + unsigned sq_num_cf_insts; +}; + +struct rv770_asic { + unsigned max_pipes; + unsigned max_tile_pipes; + unsigned max_simds; + unsigned max_backends; + unsigned max_gprs; + unsigned max_threads; + unsigned max_stack_entries; + unsigned max_hw_contexts; + unsigned max_gs_threads; + unsigned sx_max_export_size; + unsigned sx_max_export_pos_size; + unsigned sx_max_export_smx_size; + unsigned sq_num_cf_insts; + unsigned sx_num_of_sets; + unsigned sc_prim_fifo_size; + unsigned sc_hiz_tile_fifo_size; + unsigned sc_earlyz_tile_fifo_fize; +}; + union radeon_asic_config { struct r300_asic r300; struct r100_asic r100; @@ -690,6 +684,7 @@ typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); typedef void (*radeon_wreg_t)(struct radeon_device*, uint32_t, uint32_t); struct radeon_device { + struct device *dev; struct drm_device *ddev; struct pci_dev *pdev; /* ASIC */ @@ -733,6 +728,7 @@ struct radeon_device { struct radeon_asic *asic; struct radeon_gem gem; struct radeon_pm pm; + uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; struct mutex cs_mutex; struct radeon_wb wb; struct radeon_dummy_page dummy_page; @@ -741,6 +737,7 @@ struct radeon_device { bool suspend; bool need_dma32; bool new_init_path; + bool accel_working; struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; const struct firmware *me_fw; /* all family ME firmware */ const struct firmware *pfp_fw; /* r6/700 PFP firmware */ @@ -901,11 +898,14 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_cs_parse(p) rdev->asic->cs_parse((p)) #define radeon_errata(rdev) (rdev)->asic->errata((rdev)) #define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) +#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) #define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) #define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) #define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) #define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev)) #define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev)) +#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev)) +#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev)) #define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev)) #define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev)) #define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) @@ -933,4 +933,88 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) +/* Common functions */ +extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); +extern int radeon_modeset_init(struct radeon_device *rdev); +extern void radeon_modeset_fini(struct radeon_device *rdev); +extern bool radeon_card_posted(struct radeon_device *rdev); +extern int radeon_clocks_init(struct radeon_device *rdev); +extern void radeon_clocks_fini(struct radeon_device *rdev); +extern void radeon_scratch_init(struct radeon_device *rdev); +extern void radeon_surface_init(struct radeon_device *rdev); +extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); + +/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ +struct r100_mc_save { + u32 GENMO_WT; + u32 CRTC_EXT_CNTL; + u32 CRTC_GEN_CNTL; + u32 CRTC2_GEN_CNTL; + u32 CUR_OFFSET; + u32 CUR2_OFFSET; +}; +extern void r100_cp_disable(struct radeon_device *rdev); +extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); +extern void r100_cp_fini(struct radeon_device *rdev); +extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev); +extern int r100_pci_gart_init(struct radeon_device *rdev); +extern void r100_pci_gart_fini(struct radeon_device *rdev); +extern int r100_pci_gart_enable(struct radeon_device *rdev); +extern void r100_pci_gart_disable(struct radeon_device *rdev); +extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); +extern int r100_debugfs_mc_info_init(struct radeon_device *rdev); +extern int r100_gui_wait_for_idle(struct radeon_device *rdev); +extern void r100_ib_fini(struct radeon_device *rdev); +extern int r100_ib_init(struct radeon_device *rdev); +extern void r100_irq_disable(struct radeon_device *rdev); +extern int r100_irq_set(struct radeon_device *rdev); +extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); +extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); +extern void r100_vram_init_sizes(struct radeon_device *rdev); +extern void r100_wb_disable(struct radeon_device *rdev); +extern void r100_wb_fini(struct radeon_device *rdev); +extern int r100_wb_init(struct radeon_device *rdev); + +/* r300,r350,rv350,rv370,rv380 */ +extern void r300_set_reg_safe(struct radeon_device *rdev); +extern void r300_mc_program(struct radeon_device *rdev); +extern void r300_vram_info(struct radeon_device *rdev); +extern int rv370_pcie_gart_init(struct radeon_device *rdev); +extern void rv370_pcie_gart_fini(struct radeon_device *rdev); +extern int rv370_pcie_gart_enable(struct radeon_device *rdev); +extern void rv370_pcie_gart_disable(struct radeon_device *rdev); + +/* r420,r423,rv410 */ +extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); +extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); +extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); + +/* rv515 */ +extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); + +/* rs690, rs740 */ +extern void rs690_line_buffer_adjust(struct radeon_device *rdev, + struct drm_display_mode *mode1, + struct drm_display_mode *mode2); + +/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ +extern bool r600_card_posted(struct radeon_device *rdev); +extern void r600_cp_stop(struct radeon_device *rdev); +extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); +extern int r600_cp_resume(struct radeon_device *rdev); +extern int r600_count_pipe_bits(uint32_t val); +extern int r600_gart_clear_page(struct radeon_device *rdev, int i); +extern int r600_mc_wait_for_idle(struct radeon_device *rdev); +extern int r600_pcie_gart_init(struct radeon_device *rdev); +extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); +extern int r600_ib_test(struct radeon_device *rdev); +extern int r600_ring_test(struct radeon_device *rdev); +extern int r600_wb_init(struct radeon_device *rdev); +extern void r600_wb_fini(struct radeon_device *rdev); +extern void r600_scratch_init(struct radeon_device *rdev); +extern int r600_blit_init(struct radeon_device *rdev); +extern void r600_blit_fini(struct radeon_device *rdev); +extern int r600_cp_init_microcode(struct radeon_device *rdev); +extern int r600_gpu_reset(struct radeon_device *rdev); + #endif diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index e87bb915a6de..8968f78fa1e3 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -47,13 +47,16 @@ uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); void r100_errata(struct radeon_device *rdev); void r100_vram_info(struct radeon_device *rdev); +void r100_vga_set_state(struct radeon_device *rdev, bool state); int r100_gpu_reset(struct radeon_device *rdev); int r100_mc_init(struct radeon_device *rdev); void r100_mc_fini(struct radeon_device *rdev); u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); int r100_wb_init(struct radeon_device *rdev); void r100_wb_fini(struct radeon_device *rdev); -int r100_gart_enable(struct radeon_device *rdev); +int r100_pci_gart_init(struct radeon_device *rdev); +void r100_pci_gart_fini(struct radeon_device *rdev); +int r100_pci_gart_enable(struct radeon_device *rdev); void r100_pci_gart_disable(struct radeon_device *rdev); void r100_pci_gart_tlb_flush(struct radeon_device *rdev); int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); @@ -87,12 +90,15 @@ static struct radeon_asic r100_asic = { .init = &r100_init, .errata = &r100_errata, .vram_info = &r100_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r100_gpu_reset, .mc_init = &r100_mc_init, .mc_fini = &r100_mc_fini, .wb_init = &r100_wb_init, .wb_fini = &r100_wb_fini, - .gart_enable = &r100_gart_enable, + .gart_init = &r100_pci_gart_init, + .gart_fini = &r100_pci_gart_fini, + .gart_enable = &r100_pci_gart_enable, .gart_disable = &r100_pci_gart_disable, .gart_tlb_flush = &r100_pci_gart_tlb_flush, .gart_set_page = &r100_pci_gart_set_page, @@ -135,7 +141,9 @@ void r300_ring_start(struct radeon_device *rdev); void r300_fence_ring_emit(struct radeon_device *rdev, struct radeon_fence *fence); int r300_cs_parse(struct radeon_cs_parser *p); -int r300_gart_enable(struct radeon_device *rdev); +int rv370_pcie_gart_init(struct radeon_device *rdev); +void rv370_pcie_gart_fini(struct radeon_device *rdev); +int rv370_pcie_gart_enable(struct radeon_device *rdev); void rv370_pcie_gart_disable(struct radeon_device *rdev); void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); @@ -152,12 +160,15 @@ static struct radeon_asic r300_asic = { .init = &r300_init, .errata = &r300_errata, .vram_info = &r300_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &r300_mc_init, .mc_fini = &r300_mc_fini, .wb_init = &r100_wb_init, .wb_fini = &r100_wb_fini, - .gart_enable = &r300_gart_enable, + .gart_init = &r100_pci_gart_init, + .gart_fini = &r100_pci_gart_fini, + .gart_enable = &r100_pci_gart_enable, .gart_disable = &r100_pci_gart_disable, .gart_tlb_flush = &r100_pci_gart_tlb_flush, .gart_set_page = &r100_pci_gart_set_page, @@ -189,31 +200,35 @@ static struct radeon_asic r300_asic = { /* * r420,r423,rv410 */ -void r420_errata(struct radeon_device *rdev); -void r420_vram_info(struct radeon_device *rdev); -int r420_mc_init(struct radeon_device *rdev); -void r420_mc_fini(struct radeon_device *rdev); +extern int r420_init(struct radeon_device *rdev); +extern void r420_fini(struct radeon_device *rdev); +extern int r420_suspend(struct radeon_device *rdev); +extern int r420_resume(struct radeon_device *rdev); static struct radeon_asic r420_asic = { - .init = &r300_init, - .errata = &r420_errata, - .vram_info = &r420_vram_info, + .init = &r420_init, + .fini = &r420_fini, + .suspend = &r420_suspend, + .resume = &r420_resume, + .errata = NULL, + .vram_info = NULL, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, - .mc_init = &r420_mc_init, - .mc_fini = &r420_mc_fini, - .wb_init = &r100_wb_init, - .wb_fini = &r100_wb_fini, - .gart_enable = &r300_gart_enable, - .gart_disable = &rv370_pcie_gart_disable, + .mc_init = NULL, + .mc_fini = NULL, + .wb_init = NULL, + .wb_fini = NULL, + .gart_enable = NULL, + .gart_disable = NULL, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, - .cp_init = &r100_cp_init, - .cp_fini = &r100_cp_fini, - .cp_disable = &r100_cp_disable, + .cp_init = NULL, + .cp_fini = NULL, + .cp_disable = NULL, .cp_commit = &r100_cp_commit, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ring_ib_execute = &r100_ring_ib_execute, - .ib_test = &r100_ib_test, + .ib_test = NULL, .irq_set = &r100_irq_set, .irq_process = &r100_irq_process, .get_vblank_counter = &r100_get_vblank_counter, @@ -239,6 +254,8 @@ void rs400_errata(struct radeon_device *rdev); void rs400_vram_info(struct radeon_device *rdev); int rs400_mc_init(struct radeon_device *rdev); void rs400_mc_fini(struct radeon_device *rdev); +int rs400_gart_init(struct radeon_device *rdev); +void rs400_gart_fini(struct radeon_device *rdev); int rs400_gart_enable(struct radeon_device *rdev); void rs400_gart_disable(struct radeon_device *rdev); void rs400_gart_tlb_flush(struct radeon_device *rdev); @@ -249,11 +266,14 @@ static struct radeon_asic rs400_asic = { .init = &r300_init, .errata = &rs400_errata, .vram_info = &rs400_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &rs400_mc_init, .mc_fini = &rs400_mc_fini, .wb_init = &r100_wb_init, .wb_fini = &r100_wb_fini, + .gart_init = &rs400_gart_init, + .gart_fini = &rs400_gart_fini, .gart_enable = &rs400_gart_enable, .gart_disable = &rs400_gart_disable, .gart_tlb_flush = &rs400_gart_tlb_flush, @@ -295,6 +315,8 @@ void rs600_mc_fini(struct radeon_device *rdev); int rs600_irq_set(struct radeon_device *rdev); int rs600_irq_process(struct radeon_device *rdev); u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); +int rs600_gart_init(struct radeon_device *rdev); +void rs600_gart_fini(struct radeon_device *rdev); int rs600_gart_enable(struct radeon_device *rdev); void rs600_gart_disable(struct radeon_device *rdev); void rs600_gart_tlb_flush(struct radeon_device *rdev); @@ -306,11 +328,14 @@ static struct radeon_asic rs600_asic = { .init = &rs600_init, .errata = &rs600_errata, .vram_info = &rs600_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &rs600_mc_init, .mc_fini = &rs600_mc_fini, .wb_init = &r100_wb_init, .wb_fini = &r100_wb_fini, + .gart_init = &rs600_gart_init, + .gart_fini = &rs600_gart_fini, .gart_enable = &rs600_gart_enable, .gart_disable = &rs600_gart_disable, .gart_tlb_flush = &rs600_gart_tlb_flush, @@ -353,11 +378,14 @@ static struct radeon_asic rs690_asic = { .init = &rs600_init, .errata = &rs690_errata, .vram_info = &rs690_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &r300_gpu_reset, .mc_init = &rs690_mc_init, .mc_fini = &rs690_mc_fini, .wb_init = &r100_wb_init, .wb_fini = &r100_wb_fini, + .gart_init = &rs400_gart_init, + .gart_fini = &rs400_gart_fini, .gart_enable = &rs400_gart_enable, .gart_disable = &rs400_gart_disable, .gart_tlb_flush = &rs400_gart_tlb_flush, @@ -407,12 +435,15 @@ static struct radeon_asic rv515_asic = { .init = &rv515_init, .errata = &rv515_errata, .vram_info = &rv515_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, .mc_init = &rv515_mc_init, .mc_fini = &rv515_mc_fini, .wb_init = &r100_wb_init, .wb_fini = &r100_wb_fini, - .gart_enable = &r300_gart_enable, + .gart_init = &rv370_pcie_gart_init, + .gart_fini = &rv370_pcie_gart_fini, + .gart_enable = &rv370_pcie_gart_enable, .gart_disable = &rv370_pcie_gart_disable, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, @@ -454,12 +485,15 @@ static struct radeon_asic r520_asic = { .init = &rv515_init, .errata = &r520_errata, .vram_info = &r520_vram_info, + .vga_set_state = &r100_vga_set_state, .gpu_reset = &rv515_gpu_reset, .mc_init = &r520_mc_init, .mc_fini = &r520_mc_fini, .wb_init = &r100_wb_init, .wb_fini = &r100_wb_fini, - .gart_enable = &r300_gart_enable, + .gart_init = &rv370_pcie_gart_init, + .gart_fini = &rv370_pcie_gart_fini, + .gart_enable = &rv370_pcie_gart_enable, .gart_disable = &rv370_pcie_gart_disable, .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, .gart_set_page = &rv370_pcie_gart_set_page, @@ -495,6 +529,7 @@ int r600_init(struct radeon_device *rdev); void r600_fini(struct radeon_device *rdev); int r600_suspend(struct radeon_device *rdev); int r600_resume(struct radeon_device *rdev); +void r600_vga_set_state(struct radeon_device *rdev, bool state); int r600_wb_init(struct radeon_device *rdev); void r600_wb_fini(struct radeon_device *rdev); void r600_cp_commit(struct radeon_device *rdev); @@ -531,6 +566,7 @@ static struct radeon_asic r600_asic = { .resume = &r600_resume, .cp_commit = &r600_cp_commit, .vram_info = NULL, + .vga_set_state = &r600_vga_set_state, .gpu_reset = &r600_gpu_reset, .mc_init = NULL, .mc_fini = NULL, @@ -553,7 +589,7 @@ static struct radeon_asic r600_asic = { .cs_parse = &r600_cs_parse, .copy_blit = &r600_copy_blit, .copy_dma = &r600_copy_blit, - .copy = NULL, + .copy = &r600_copy_blit, .set_engine_clock = &radeon_atom_set_engine_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = NULL, @@ -581,6 +617,7 @@ static struct radeon_asic rv770_asic = { .cp_commit = &r600_cp_commit, .vram_info = NULL, .gpu_reset = &rv770_gpu_reset, + .vga_set_state = &r600_vga_set_state, .mc_init = NULL, .mc_fini = NULL, .wb_init = &r600_wb_init, @@ -602,7 +639,7 @@ static struct radeon_asic rv770_asic = { .cs_parse = &r600_cs_parse, .copy_blit = &r600_copy_blit, .copy_dma = &r600_copy_blit, - .copy = NULL, + .copy = &r600_copy_blit, .set_engine_clock = &radeon_atom_set_engine_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .set_pcie_lanes = NULL, diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index a8fb392c9cd6..743742128307 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -104,7 +104,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, uint32_t supported_device, int *connector_type, struct radeon_i2c_bus_rec *i2c_bus, - uint8_t *line_mux) + uint16_t *line_mux) { /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ @@ -143,20 +143,31 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, return false; } - /* some BIOSes seem to report DAC on HDMI - they hurt me with their lies */ - if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) || - (*connector_type == DRM_MODE_CONNECTOR_HDMIB)) { - if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) { - return false; - } - } - /* ASUS HD 3600 XT board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01da)) { - if (*connector_type == DRM_MODE_CONNECTOR_HDMIB) { - *connector_type = DRM_MODE_CONNECTOR_DVID; + if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { + *connector_type = DRM_MODE_CONNECTOR_DVII; + } + } + + /* ASUS HD 3450 board lists the DVI port as HDMI */ + if ((dev->pdev->device == 0x95C5) && + (dev->pdev->subsystem_vendor == 0x1043) && + (dev->pdev->subsystem_device == 0x01e2)) { + if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { + *connector_type = DRM_MODE_CONNECTOR_DVII; + } + } + + /* some BIOSes seem to report DAC on HDMI - usually this is a board with + * HDMI + VGA reporting as HDMI + */ + if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { + if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) { + *connector_type = DRM_MODE_CONNECTOR_VGA; + *line_mux = 0; } } @@ -192,11 +203,11 @@ const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Unknown, + DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_HDMIA, DRM_MODE_CONNECTOR_HDMIB, - DRM_MODE_CONNECTOR_HDMIB, DRM_MODE_CONNECTOR_LVDS, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, @@ -218,7 +229,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) ATOM_OBJECT_HEADER *obj_header; int i, j, path_size, device_support; int connector_type; - uint16_t igp_lane_info; + uint16_t igp_lane_info, conn_id; bool linkb; struct radeon_i2c_bus_rec ddc_bus; @@ -405,9 +416,15 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) else ddc_bus = radeon_lookup_gpio(dev, line_mux); + conn_id = le16_to_cpu(path->usConnObjectId); + + if (!radeon_atom_apply_quirks + (dev, le16_to_cpu(path->usDeviceTag), &connector_type, + &ddc_bus, &conn_id)) + continue; + radeon_add_atom_connector(dev, - le16_to_cpu(path-> - usConnObjectId), + conn_id, le16_to_cpu(path-> usDeviceTag), connector_type, &ddc_bus, @@ -423,7 +440,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) struct bios_connector { bool valid; - uint8_t line_mux; + uint16_t line_mux; uint16_t devices; int connector_type; struct radeon_i2c_bus_rec ddc_bus; @@ -702,9 +719,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) return false; } -struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct - radeon_encoder - *encoder) +bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; @@ -715,7 +731,6 @@ struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct uint8_t frev, crev; uint16_t maxfreq; int i; - struct radeon_encoder_int_tmds *tmds = NULL; atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); @@ -725,12 +740,6 @@ struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct data_offset); if (tmds_info) { - tmds = - kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); - - if (!tmds) - return NULL; - maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); for (i = 0; i < 4; i++) { tmds->tmds_pll[i].freq = @@ -756,8 +765,9 @@ struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct break; } } + return true; } - return tmds; + return false; } union lvds_info { @@ -1028,6 +1038,34 @@ void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) } +void radeon_save_bios_scratch_regs(struct radeon_device *rdev) +{ + uint32_t scratch_reg; + int i; + + if (rdev->family >= CHIP_R600) + scratch_reg = R600_BIOS_0_SCRATCH; + else + scratch_reg = RADEON_BIOS_0_SCRATCH; + + for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) + rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4)); +} + +void radeon_restore_bios_scratch_regs(struct radeon_device *rdev) +{ + uint32_t scratch_reg; + int i; + + if (rdev->family >= CHIP_R600) + scratch_reg = R600_BIOS_0_SCRATCH; + else + scratch_reg = RADEON_BIOS_0_SCRATCH; + + for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) + WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]); +} + void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) { struct drm_device *dev = encoder->dev; diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 2a027e00762a..748265a105b3 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -863,8 +863,10 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder int tmp, i; struct radeon_encoder_lvds *lvds = NULL; - if (rdev->bios == NULL) - return radeon_legacy_get_lvds_info_from_regs(rdev); + if (rdev->bios == NULL) { + lvds = radeon_legacy_get_lvds_info_from_regs(rdev); + goto out; + } lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); @@ -965,11 +967,13 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder lvds->native_mode.flags = 0; } } - encoder->native_mode = lvds->native_mode; } else { DRM_INFO("No panel info found in BIOS\n"); - return radeon_legacy_get_lvds_info_from_regs(rdev); + lvds = radeon_legacy_get_lvds_info_from_regs(rdev); } +out: + if (lvds) + encoder->native_mode = lvds->native_mode; return lvds; } @@ -994,48 +998,37 @@ static const struct radeon_tmds_pll default_tmds_pll[CHIP_LAST][4] = { {{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */ }; -static struct radeon_encoder_int_tmds - *radeon_legacy_get_tmds_info_from_table(struct radeon_device *rdev) +bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds) { + struct drm_device *dev = encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; int i; - struct radeon_encoder_int_tmds *tmds = NULL; - - tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); - - if (!tmds) - return NULL; for (i = 0; i < 4; i++) { tmds->tmds_pll[i].value = - default_tmds_pll[rdev->family][i].value; + default_tmds_pll[rdev->family][i].value; tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq; } - return tmds; + return true; } -struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct - radeon_encoder - *encoder) +bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; uint16_t tmds_info; int i, n; uint8_t ver; - struct radeon_encoder_int_tmds *tmds = NULL; if (rdev->bios == NULL) - return radeon_legacy_get_tmds_info_from_table(rdev); + return false; tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); if (tmds_info) { - tmds = - kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); - - if (!tmds) - return NULL; ver = RBIOS8(tmds_info); DRM_INFO("DFP table revision: %d\n", ver); @@ -1073,6 +1066,23 @@ struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct } } else DRM_INFO("No TMDS info found in BIOS\n"); + return true; +} + +struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder) +{ + struct radeon_encoder_int_tmds *tmds = NULL; + bool ret; + + tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); + + if (!tmds) + return NULL; + + ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); + if (ret == false) + radeon_legacy_get_tmds_info_from_table(encoder, tmds); + return tmds; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 6a2b0296adff..af1d551f1a8f 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -28,6 +28,7 @@ #include "drm_crtc_helper.h" #include "radeon_drm.h" #include "radeon.h" +#include "atom.h" extern void radeon_combios_connected_scratch_regs(struct drm_connector *connector, @@ -38,6 +39,15 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected); +static void radeon_property_change_mode(struct drm_encoder *encoder) +{ + struct drm_crtc *crtc = encoder->crtc; + + if (crtc && crtc->enabled) { + drm_crtc_helper_set_mode(crtc, &crtc->mode, + crtc->x, crtc->y, crtc->fb); + } +} static void radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status) { @@ -77,6 +87,27 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c } } +struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) +{ + struct drm_mode_object *obj; + struct drm_encoder *encoder; + int i; + + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) + break; + + obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); + if (!obj) + continue; + + encoder = obj_to_encoder(obj); + if (encoder->encoder_type == encoder_type) + return encoder; + } + return NULL; +} + struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { int enc_id = connector->encoder_ids[0]; @@ -94,7 +125,6 @@ struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) return NULL; } - /* * radeon_connector_analog_encoder_conflict_solve * - search for other connectors sharing this encoder @@ -174,12 +204,171 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode return mode; } +static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) +{ + struct drm_device *dev = encoder->dev; + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_display_mode *mode = NULL; + struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; + int i; + struct mode_size { + int w; + int h; + } common_modes[17] = { + { 640, 480}, + { 720, 480}, + { 800, 600}, + { 848, 480}, + {1024, 768}, + {1152, 768}, + {1280, 720}, + {1280, 800}, + {1280, 854}, + {1280, 960}, + {1280, 1024}, + {1440, 900}, + {1400, 1050}, + {1680, 1050}, + {1600, 1200}, + {1920, 1080}, + {1920, 1200} + }; + + for (i = 0; i < 17; i++) { + if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { + if (common_modes[i].w > native_mode->panel_xres || + common_modes[i].h > native_mode->panel_yres || + (common_modes[i].w == native_mode->panel_xres && + common_modes[i].h == native_mode->panel_yres)) + continue; + } + if (common_modes[i].w < 320 || common_modes[i].h < 200) + continue; + + mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false); + drm_mode_probed_add(connector, mode); + } +} + int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) { + struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; + struct drm_encoder *encoder; + struct radeon_encoder *radeon_encoder; + + if (property == rdev->mode_info.coherent_mode_property) { + struct radeon_encoder_atom_dig *dig; + + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + if (!radeon_encoder->enc_priv) + return 0; + + dig = radeon_encoder->enc_priv; + dig->coherent_mode = val ? true : false; + radeon_property_change_mode(&radeon_encoder->base); + } + + if (property == rdev->mode_info.tv_std_property) { + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); + if (!encoder) { + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC); + } + + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + if (!radeon_encoder->enc_priv) + return 0; + if (rdev->is_atom_bios) { + struct radeon_encoder_atom_dac *dac_int; + dac_int = radeon_encoder->enc_priv; + dac_int->tv_std = val; + } else { + struct radeon_encoder_tv_dac *dac_int; + dac_int = radeon_encoder->enc_priv; + dac_int->tv_std = val; + } + radeon_property_change_mode(&radeon_encoder->base); + } + + if (property == rdev->mode_info.load_detect_property) { + struct radeon_connector *radeon_connector = + to_radeon_connector(connector); + + if (val == 0) + radeon_connector->dac_load_detect = false; + else + radeon_connector->dac_load_detect = true; + } + + if (property == rdev->mode_info.tmds_pll_property) { + struct radeon_encoder_int_tmds *tmds = NULL; + bool ret = false; + /* need to find digital encoder on connector */ + encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); + if (!encoder) + return 0; + + radeon_encoder = to_radeon_encoder(encoder); + + tmds = radeon_encoder->enc_priv; + if (!tmds) + return 0; + + if (val == 0) { + if (rdev->is_atom_bios) + ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds); + else + ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds); + } + if (val == 1 || ret == false) { + radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds); + } + radeon_property_change_mode(&radeon_encoder->base); + } + return 0; } +static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; + + /* Try to get native mode details from EDID if necessary */ + if (!native_mode->dotclock) { + struct drm_display_mode *t, *mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->hdisplay == native_mode->panel_xres && + mode->vdisplay == native_mode->panel_yres) { + native_mode->hblank = mode->htotal - mode->hdisplay; + native_mode->hoverplus = mode->hsync_start - mode->hdisplay; + native_mode->hsync_width = mode->hsync_end - mode->hsync_start; + native_mode->vblank = mode->vtotal - mode->vdisplay; + native_mode->voverplus = mode->vsync_start - mode->vdisplay; + native_mode->vsync_width = mode->vsync_end - mode->vsync_start; + native_mode->dotclock = mode->clock; + DRM_INFO("Determined LVDS native mode details from EDID\n"); + break; + } + } + } + if (!native_mode->dotclock) { + DRM_INFO("No LVDS native mode details, disabling RMX\n"); + radeon_encoder->rmx_type = RMX_OFF; + } +} static int radeon_lvds_get_modes(struct drm_connector *connector) { @@ -191,6 +380,12 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) if (radeon_connector->ddc_bus) { ret = radeon_ddc_get_modes(radeon_connector); if (ret > 0) { + encoder = radeon_best_single_encoder(connector); + if (encoder) { + radeon_fixup_lvds_native_mode(encoder, connector); + /* add scaled modes */ + radeon_add_common_modes(encoder, connector); + } return ret; } } @@ -204,7 +399,10 @@ static int radeon_lvds_get_modes(struct drm_connector *connector) if (mode) { ret = 1; drm_mode_probed_add(connector, mode); + /* add scaled modes */ + radeon_add_common_modes(encoder, connector); } + return ret; } @@ -234,6 +432,42 @@ static void radeon_connector_destroy(struct drm_connector *connector) kfree(connector); } +static int radeon_lvds_set_property(struct drm_connector *connector, + struct drm_property *property, + uint64_t value) +{ + struct drm_device *dev = connector->dev; + struct radeon_encoder *radeon_encoder; + enum radeon_rmx_type rmx_type; + + DRM_DEBUG("\n"); + if (property != dev->mode_config.scaling_mode_property) + return 0; + + if (connector->encoder) + radeon_encoder = to_radeon_encoder(connector->encoder); + else { + struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; + radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); + } + + switch (value) { + case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; + case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; + case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; + default: + case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; + } + if (radeon_encoder->rmx_type == rmx_type) + return 0; + + radeon_encoder->rmx_type = rmx_type; + + radeon_property_change_mode(&radeon_encoder->base); + return 0; +} + + struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { .get_modes = radeon_lvds_get_modes, .mode_valid = radeon_lvds_mode_valid, @@ -245,7 +479,7 @@ struct drm_connector_funcs radeon_lvds_connector_funcs = { .detect = radeon_lvds_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_connector_destroy, - .set_property = radeon_connector_set_property, + .set_property = radeon_lvds_set_property, }; static int radeon_vga_get_modes(struct drm_connector *connector) @@ -282,8 +516,10 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect if (dret) ret = connector_status_connected; else { - encoder_funcs = encoder->helper_private; - ret = encoder_funcs->detect(encoder, connector); + if (radeon_connector->dac_load_detect) { + encoder_funcs = encoder->helper_private; + ret = encoder_funcs->detect(encoder, connector); + } } if (ret == connector_status_connected) @@ -306,21 +542,27 @@ struct drm_connector_funcs radeon_vga_connector_funcs = { .set_property = radeon_connector_set_property, }; -static struct drm_display_mode tv_fixed_mode = { - DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 38250, 800, 832, - 912, 1024, 0, 600, 603, 607, 624, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC), -}; - static int radeon_tv_get_modes(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct radeon_device *rdev = dev->dev_private; struct drm_display_mode *tv_mode; + struct drm_encoder *encoder; - tv_mode = drm_mode_duplicate(dev, &tv_fixed_mode); - tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - - drm_mode_probed_add(connector, tv_mode); + encoder = radeon_best_single_encoder(connector); + if (!encoder) + return 0; + /* avivo chips can scale any mode */ + if (rdev->family >= CHIP_RS600) + /* add scaled modes */ + radeon_add_common_modes(encoder, connector); + else { + /* only 800x600 is supported right now on pre-avivo chips */ + tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false); + tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, tv_mode); + } return 1; } @@ -334,7 +576,11 @@ static enum drm_connector_status radeon_tv_detect(struct drm_connector *connecto { struct drm_encoder *encoder; struct drm_encoder_helper_funcs *encoder_funcs; - int ret; + struct radeon_connector *radeon_connector = to_radeon_connector(connector); + enum drm_connector_status ret = connector_status_disconnected; + + if (!radeon_connector->dac_load_detect) + return ret; encoder = radeon_best_single_encoder(connector); if (!encoder) @@ -418,27 +664,29 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect goto out; /* find analog encoder */ - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] == 0) - break; - - obj = drm_mode_object_find(connector->dev, - connector->encoder_ids[i], - DRM_MODE_OBJECT_ENCODER); - if (!obj) - continue; - - encoder = obj_to_encoder(obj); + if (radeon_connector->dac_load_detect) { + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] == 0) + break; - encoder_funcs = encoder->helper_private; - if (encoder_funcs->detect) { - if (ret != connector_status_connected) { - ret = encoder_funcs->detect(encoder, connector); - if (ret == connector_status_connected) { - radeon_connector->use_digital = false; + obj = drm_mode_object_find(connector->dev, + connector->encoder_ids[i], + DRM_MODE_OBJECT_ENCODER); + if (!obj) + continue; + + encoder = obj_to_encoder(obj); + + encoder_funcs = encoder->helper_private; + if (encoder_funcs->detect) { + if (ret != connector_status_connected) { + ret = encoder_funcs->detect(encoder, connector); + if (ret == connector_status_connected) { + radeon_connector->use_digital = false; + } } + break; } - break; } } @@ -518,6 +766,7 @@ radeon_add_atom_connector(struct drm_device *dev, bool linkb, uint32_t igp_lane_info) { + struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; struct radeon_connector_atom_dig *radeon_dig_connector; @@ -553,6 +802,9 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -562,6 +814,9 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -579,6 +834,12 @@ radeon_add_atom_connector(struct drm_device *dev, goto failed; } subpixel_order = SubPixelHorizontalRGB; + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.coherent_mode_property, + 1); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); break; case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: @@ -595,6 +856,9 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.coherent_mode_property, + 1); subpixel_order = SubPixelHorizontalRGB; break; case DRM_MODE_CONNECTOR_DisplayPort: @@ -620,6 +884,9 @@ radeon_add_atom_connector(struct drm_device *dev, drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); } + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); break; case DRM_MODE_CONNECTOR_LVDS: radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); @@ -635,6 +902,10 @@ radeon_add_atom_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } + drm_mode_create_scaling_mode_property(dev); + drm_connector_attach_property(&radeon_connector->base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; break; } @@ -657,6 +928,7 @@ radeon_add_legacy_connector(struct drm_device *dev, int connector_type, struct radeon_i2c_bus_rec *i2c_bus) { + struct radeon_device *rdev = dev->dev_private; struct drm_connector *connector; struct radeon_connector *radeon_connector; uint32_t subpixel_order = SubPixelNone; @@ -691,6 +963,9 @@ radeon_add_legacy_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); break; case DRM_MODE_CONNECTOR_DVIA: drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); @@ -700,6 +975,9 @@ radeon_add_legacy_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: @@ -709,6 +987,9 @@ radeon_add_legacy_connector(struct drm_device *dev, radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); if (!radeon_connector->ddc_bus) goto failed; + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); } subpixel_order = SubPixelHorizontalRGB; break; @@ -718,6 +999,9 @@ radeon_add_legacy_connector(struct drm_device *dev, if (radeon_tv == 1) { drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); + drm_connector_attach_property(&radeon_connector->base, + rdev->mode_info.load_detect_property, + 1); } break; case DRM_MODE_CONNECTOR_LVDS: @@ -728,6 +1012,9 @@ radeon_add_legacy_connector(struct drm_device *dev, if (!radeon_connector->ddc_bus) goto failed; } + drm_connector_attach_property(&radeon_connector->base, + dev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_FULLSCREEN); subpixel_order = SubPixelHorizontalRGB; break; } diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c index fa063d0cfb63..4f7afc79dd82 100644 --- a/drivers/gpu/drm/radeon/radeon_cp.c +++ b/drivers/gpu/drm/radeon/radeon_cp.c @@ -616,6 +616,18 @@ static void radeon_do_cp_start(drm_radeon_private_t * dev_priv) dev_priv->cp_running = 1; + /* on r420, any DMA from CP to system memory while 2D is active + * can cause a hang. workaround is to queue a CP RESYNC token + */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { + BEGIN_RING(3); + OUT_RING(CP_PACKET0(R300_CP_RESYNC_ADDR, 1)); + OUT_RING(5); /* scratch reg 5 */ + OUT_RING(0xdeadbeef); + ADVANCE_RING(); + COMMIT_RING(); + } + BEGIN_RING(8); /* isync can only be written through cp on r5xx write it here */ OUT_RING(CP_PACKET0(RADEON_ISYNC_CNTL, 0)); @@ -653,8 +665,19 @@ static void radeon_do_cp_reset(drm_radeon_private_t * dev_priv) */ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv) { + RING_LOCALS; DRM_DEBUG("\n"); + /* finish the pending CP_RESYNC token */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) { + BEGIN_RING(2); + OUT_RING(CP_PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); + OUT_RING(R300_RB3D_DC_FINISH); + ADVANCE_RING(); + COMMIT_RING(); + radeon_do_wait_for_idle(dev_priv); + } + RADEON_WRITE(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIDIS_INDDIS); dev_priv->cp_running = 0; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index a169067efc4e..12f5990c2d2a 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -145,7 +145,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; size = p->chunks[i].length_dw * sizeof(uint32_t); - p->chunks[i].kdata = kzalloc(size, GFP_KERNEL); + p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); if (p->chunks[i].kdata == NULL) { return -ENOMEM; } @@ -185,6 +185,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) mutex_unlock(&parser->rdev->ddev->struct_mutex); } } + kfree(parser->track); kfree(parser->relocs); kfree(parser->relocs_ptr); for (i = 0; i < parser->nchunks; i++) { diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index f2469c511789..daf5db780956 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -29,6 +29,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> +#include <linux/vgaarb.h> #include "radeon_reg.h" #include "radeon.h" #include "radeon_asic.h" @@ -156,6 +157,10 @@ int radeon_mc_setup(struct radeon_device *rdev) tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); rdev->mc.gtt_location = tmp; } + rdev->mc.vram_start = rdev->mc.vram_location; + rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; + rdev->mc.gtt_start = rdev->mc.gtt_location; + rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", (unsigned)rdev->mc.vram_location, @@ -171,7 +176,7 @@ int radeon_mc_setup(struct radeon_device *rdev) /* * GPU helpers function. */ -static bool radeon_card_posted(struct radeon_device *rdev) +bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; @@ -266,6 +271,10 @@ void radeon_register_accessor_init(struct radeon_device *rdev) rdev->pll_rreg = &r100_pll_rreg; rdev->pll_wreg = &r100_pll_wreg; } + if (rdev->family >= CHIP_R420) { + rdev->mc_rreg = &r420_mc_rreg; + rdev->mc_wreg = &r420_mc_wreg; + } if (rdev->family >= CHIP_RV515) { rdev->mc_rreg = &rv515_mc_rreg; rdev->mc_wreg = &rv515_mc_wreg; @@ -312,6 +321,14 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RV350: case CHIP_RV380: rdev->asic = &r300_asic; + if (rdev->flags & RADEON_IS_PCIE) { + rdev->asic->gart_init = &rv370_pcie_gart_init; + rdev->asic->gart_fini = &rv370_pcie_gart_fini; + rdev->asic->gart_enable = &rv370_pcie_gart_enable; + rdev->asic->gart_disable = &rv370_pcie_gart_disable; + rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; + } break; case CHIP_R420: case CHIP_R423: @@ -370,7 +387,6 @@ int radeon_clocks_init(struct radeon_device *rdev) { int r; - radeon_get_clock_info(rdev->ddev); r = radeon_static_clocks_init(rdev->ddev); if (r) { return r; @@ -465,10 +481,18 @@ void radeon_combios_fini(struct radeon_device *rdev) { } -int radeon_modeset_init(struct radeon_device *rdev); -void radeon_modeset_fini(struct radeon_device *rdev); - +/* if we get transitioned to only one device, tak VGA back */ +static unsigned int radeon_vga_set_decode(void *cookie, bool state) +{ + struct radeon_device *rdev = cookie; + radeon_vga_set_state(rdev, state); + if (state) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} /* * Radeon device. */ @@ -477,11 +501,12 @@ int radeon_device_init(struct radeon_device *rdev, struct pci_dev *pdev, uint32_t flags) { - int r, ret = 0; + int r; int dma_bits; DRM_INFO("radeon: Initializing kernel modesetting.\n"); rdev->shutdown = false; + rdev->dev = &pdev->dev; rdev->ddev = ddev; rdev->pdev = pdev; rdev->flags = flags; @@ -490,33 +515,47 @@ int radeon_device_init(struct radeon_device *rdev, rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT; rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; rdev->gpu_lockup = false; + rdev->accel_working = false; /* mutex initialization are all done here so we * can recall function without having locking issues */ mutex_init(&rdev->cs_mutex); mutex_init(&rdev->ib_pool.mutex); mutex_init(&rdev->cp.mutex); rwlock_init(&rdev->fence_drv.lock); + INIT_LIST_HEAD(&rdev->gem.objects); + + /* Set asic functions */ + r = radeon_asic_init(rdev); + if (r) { + return r; + } if (radeon_agpmode == -1) { rdev->flags &= ~RADEON_IS_AGP; - if (rdev->family > CHIP_RV515 || + if (rdev->family >= CHIP_RV515 || rdev->family == CHIP_RV380 || rdev->family == CHIP_RV410 || rdev->family == CHIP_R423) { DRM_INFO("Forcing AGP to PCIE mode\n"); rdev->flags |= RADEON_IS_PCIE; + rdev->asic->gart_init = &rv370_pcie_gart_init; + rdev->asic->gart_fini = &rv370_pcie_gart_fini; + rdev->asic->gart_enable = &rv370_pcie_gart_enable; + rdev->asic->gart_disable = &rv370_pcie_gart_disable; + rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; + rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; } else { DRM_INFO("Forcing AGP to PCI mode\n"); rdev->flags |= RADEON_IS_PCI; + rdev->asic->gart_init = &r100_pci_gart_init; + rdev->asic->gart_fini = &r100_pci_gart_fini; + rdev->asic->gart_enable = &r100_pci_gart_enable; + rdev->asic->gart_disable = &r100_pci_gart_disable; + rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; + rdev->asic->gart_set_page = &r100_pci_gart_set_page; } } - /* Set asic functions */ - r = radeon_asic_init(rdev); - if (r) { - return r; - } - /* set DMA mask + need_dma32 flags. * PCIE - can handle 40-bits. * IGP - can handle 40-bits (in theory) @@ -551,6 +590,13 @@ int radeon_device_init(struct radeon_device *rdev, if (r) { return r; } + + /* if we have > 1 VGA cards, then disable the radeon VGA resources */ + r = vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode); + if (r) { + return -EINVAL; + } + if (!rdev->new_init_path) { /* Setup errata flags */ radeon_errata(rdev); @@ -559,7 +605,6 @@ int radeon_device_init(struct radeon_device *rdev, /* Initialize surface registers */ radeon_surface_init(rdev); - /* TODO: disable VGA need to use VGA request */ /* BIOS*/ if (!radeon_get_bios(rdev)) { if (ASIC_IS_AVIVO(rdev)) @@ -589,22 +634,15 @@ int radeon_device_init(struct radeon_device *rdev, radeon_combios_asic_init(rdev->ddev); } } + /* Get clock & vram information */ + radeon_get_clock_info(rdev->ddev); + radeon_vram_info(rdev); /* Initialize clocks */ r = radeon_clocks_init(rdev); if (r) { return r; } - /* Get vram informations */ - radeon_vram_info(rdev); - /* Add an MTRR for the VRAM */ - rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, - MTRR_TYPE_WRCOMB, 1); - DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", - (unsigned)(rdev->mc.mc_vram_size >> 20), - (unsigned)(rdev->mc.aper_size >> 20)); - DRM_INFO("RAM width %dbits %cDR\n", - rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); /* Initialize memory controller (also test AGP) */ r = radeon_mc_init(rdev); if (r) { @@ -624,75 +662,60 @@ int radeon_device_init(struct radeon_device *rdev, if (r) { return r; } + r = radeon_gpu_gart_init(rdev); + if (r) + return r; /* Initialize GART (initialize after TTM so we can allocate * memory through TTM but finalize after TTM) */ r = radeon_gart_enable(rdev); - if (!r) { + if (r) + return 0; r = radeon_gem_init(rdev); - } + if (r) + return 0; /* 1M ring buffer */ - if (!r) { - r = radeon_cp_init(rdev, 1024 * 1024); - } - if (!r) { - r = radeon_wb_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing WB (%d).\n", r); - return r; - } - } - if (!r) { - r = radeon_ib_pool_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - return r; - } - } - if (!r) { - r = radeon_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); - return r; - } - } - ret = r; - } - r = radeon_modeset_init(rdev); - if (r) { - return r; - } - if (!ret) { - DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); + r = radeon_cp_init(rdev, 1024 * 1024); + if (r) + return 0; + r = radeon_wb_init(rdev); + if (r) + DRM_ERROR("radeon: failled initializing WB (%d).\n", r); + r = radeon_ib_pool_init(rdev); + if (r) + return 0; + r = radeon_ib_test(rdev); + if (r) + return 0; + rdev->accel_working = true; } + DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); if (radeon_testing) { radeon_test_moves(rdev); } if (radeon_benchmarking) { radeon_benchmark(rdev); } - return ret; + return 0; } void radeon_device_fini(struct radeon_device *rdev) { - if (rdev == NULL || rdev->rmmio == NULL) { - return; - } DRM_INFO("radeon: finishing device.\n"); rdev->shutdown = true; /* Order matter so becarefull if you rearrange anythings */ - radeon_modeset_fini(rdev); if (!rdev->new_init_path) { radeon_ib_pool_fini(rdev); radeon_cp_fini(rdev); radeon_wb_fini(rdev); + radeon_gpu_gart_fini(rdev); radeon_gem_fini(rdev); radeon_mc_fini(rdev); #if __OS_HAS_AGP radeon_agp_fini(rdev); #endif radeon_irq_kms_fini(rdev); + vga_client_register(rdev->pdev, NULL, NULL, NULL); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); radeon_object_fini(rdev); @@ -743,18 +766,19 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state) /* wait for gpu to finish processing current batch */ radeon_fence_wait_last(rdev); + radeon_save_bios_scratch_regs(rdev); + if (!rdev->new_init_path) { radeon_cp_disable(rdev); radeon_gart_disable(rdev); + rdev->irq.sw_int = false; + radeon_irq_set(rdev); } else { radeon_suspend(rdev); } /* evict remaining vram memory */ radeon_object_evict_vram(rdev); - rdev->irq.sw_int = false; - radeon_irq_set(rdev); - pci_save_state(dev->pdev); if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ @@ -781,10 +805,10 @@ int radeon_resume_kms(struct drm_device *dev) } pci_set_master(dev->pdev); /* Reset gpu before posting otherwise ATOM will enter infinite loop */ - if (radeon_gpu_reset(rdev)) { - /* FIXME: what do we want to do here ? */ - } if (!rdev->new_init_path) { + if (radeon_gpu_reset(rdev)) { + /* FIXME: what do we want to do here ? */ + } /* post card */ if (rdev->is_atom_bios) { atom_asic_init(rdev->mode_info.atom_context); @@ -817,6 +841,7 @@ int radeon_resume_kms(struct drm_device *dev) radeon_resume(rdev); } out: + radeon_restore_bios_scratch_regs(rdev); fb_set_suspend(rdev->fbdev_info, 0); release_console_sem(); diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 9d817a62e7f1..5d8141b13765 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -365,7 +365,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector) return ret; } drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); - return -1; + return 0; } static int radeon_ddc_dump(struct drm_connector *connector) @@ -623,6 +623,83 @@ static const struct drm_mode_config_funcs radeon_mode_funcs = { .fb_changed = radeonfb_probe, }; +struct drm_prop_enum_list { + int type; + char *name; +}; + +static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = +{ { 0, "driver" }, + { 1, "bios" }, +}; + +static struct drm_prop_enum_list radeon_tv_std_enum_list[] = +{ { TV_STD_NTSC, "ntsc" }, + { TV_STD_PAL, "pal" }, + { TV_STD_PAL_M, "pal-m" }, + { TV_STD_PAL_60, "pal-60" }, + { TV_STD_NTSC_J, "ntsc-j" }, + { TV_STD_SCART_PAL, "scart-pal" }, + { TV_STD_PAL_CN, "pal-cn" }, + { TV_STD_SECAM, "secam" }, +}; + +int radeon_modeset_create_props(struct radeon_device *rdev) +{ + int i, sz; + + if (rdev->is_atom_bios) { + rdev->mode_info.coherent_mode_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_RANGE, + "coherent", 2); + if (!rdev->mode_info.coherent_mode_property) + return -ENOMEM; + + rdev->mode_info.coherent_mode_property->values[0] = 0; + rdev->mode_info.coherent_mode_property->values[0] = 1; + } + + if (!ASIC_IS_AVIVO(rdev)) { + sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); + rdev->mode_info.tmds_pll_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_ENUM, + "tmds_pll", sz); + for (i = 0; i < sz; i++) { + drm_property_add_enum(rdev->mode_info.tmds_pll_property, + i, + radeon_tmds_pll_enum_list[i].type, + radeon_tmds_pll_enum_list[i].name); + } + } + + rdev->mode_info.load_detect_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_RANGE, + "load detection", 2); + if (!rdev->mode_info.load_detect_property) + return -ENOMEM; + rdev->mode_info.load_detect_property->values[0] = 0; + rdev->mode_info.load_detect_property->values[0] = 1; + + drm_mode_create_scaling_mode_property(rdev->ddev); + + sz = ARRAY_SIZE(radeon_tv_std_enum_list); + rdev->mode_info.tv_std_property = + drm_property_create(rdev->ddev, + DRM_MODE_PROP_ENUM, + "tv standard", sz); + for (i = 0; i < sz; i++) { + drm_property_add_enum(rdev->mode_info.tv_std_property, + i, + radeon_tv_std_enum_list[i].type, + radeon_tv_std_enum_list[i].name); + } + + return 0; +} + int radeon_modeset_init(struct radeon_device *rdev) { int num_crtc = 2, i; @@ -643,6 +720,10 @@ int radeon_modeset_init(struct radeon_device *rdev) rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; + ret = radeon_modeset_create_props(rdev); + if (ret) { + return ret; + } /* allocate crtcs - TODO single crtc */ for (i = 0; i < num_crtc; i++) { radeon_crtc_init(rdev->ddev, i); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 29f040a7861b..50fce498910c 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -286,7 +286,7 @@ static struct drm_driver kms_driver = { .poll = drm_poll, .fasync = drm_fasync, #ifdef CONFIG_COMPAT - .compat_ioctl = NULL, + .compat_ioctl = radeon_kms_compat_ioctl, #endif }, diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h index c7b185924f6c..350962e0f346 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.h +++ b/drivers/gpu/drm/radeon/radeon_drv.h @@ -34,6 +34,8 @@ #include <linux/firmware.h> #include <linux/platform_device.h> +#include "radeon_family.h" + /* General customization: */ @@ -109,75 +111,12 @@ #define DRIVER_MINOR 31 #define DRIVER_PATCHLEVEL 0 -/* - * Radeon chip families - */ -enum radeon_family { - CHIP_R100, - CHIP_RV100, - CHIP_RS100, - CHIP_RV200, - CHIP_RS200, - CHIP_R200, - CHIP_RV250, - CHIP_RS300, - CHIP_RV280, - CHIP_R300, - CHIP_R350, - CHIP_RV350, - CHIP_RV380, - CHIP_R420, - CHIP_R423, - CHIP_RV410, - CHIP_RS400, - CHIP_RS480, - CHIP_RS600, - CHIP_RS690, - CHIP_RS740, - CHIP_RV515, - CHIP_R520, - CHIP_RV530, - CHIP_RV560, - CHIP_RV570, - CHIP_R580, - CHIP_R600, - CHIP_RV610, - CHIP_RV630, - CHIP_RV620, - CHIP_RV635, - CHIP_RV670, - CHIP_RS780, - CHIP_RS880, - CHIP_RV770, - CHIP_RV730, - CHIP_RV710, - CHIP_RV740, - CHIP_LAST, -}; - enum radeon_cp_microcode_version { UCODE_R100, UCODE_R200, UCODE_R300, }; -/* - * Chip flags - */ -enum radeon_chip_flags { - RADEON_FAMILY_MASK = 0x0000ffffUL, - RADEON_FLAGS_MASK = 0xffff0000UL, - RADEON_IS_MOBILITY = 0x00010000UL, - RADEON_IS_IGP = 0x00020000UL, - RADEON_SINGLE_CRTC = 0x00040000UL, - RADEON_IS_AGP = 0x00080000UL, - RADEON_HAS_HIERZ = 0x00100000UL, - RADEON_IS_PCIE = 0x00200000UL, - RADEON_NEW_MEMMAP = 0x00400000UL, - RADEON_IS_PCI = 0x00800000UL, - RADEON_IS_IGPGART = 0x01000000UL, -}; - typedef struct drm_radeon_freelist { unsigned int age; struct drm_buf *buf; @@ -471,6 +410,8 @@ extern int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv); extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +extern long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); extern int radeon_master_create(struct drm_device *dev, struct drm_master *master); extern void radeon_master_destroy(struct drm_device *dev, struct drm_master *master); @@ -1097,6 +1038,9 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index); # define RADEON_CSQ_PRIBM_INDBM (4 << 28) # define RADEON_CSQ_PRIPIO_INDPIO (15 << 28) +#define R300_CP_RESYNC_ADDR 0x0778 +#define R300_CP_RESYNC_DATA 0x077c + #define RADEON_AIC_CNTL 0x01d0 # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) # define RS400_MSI_REARM (1 << 3) diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 9ad20350118f..621646752cd2 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -241,9 +241,12 @@ atombios_dac_setup(struct drm_encoder *encoder, int action) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); DAC_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0, num = 0; - /* fixme - fill in enc_priv for atom dac */ + struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; enum radeon_tv_std tv_std = TV_STD_NTSC; + if (dac_info->tv_std) + tv_std = dac_info->tv_std; + memset(&args, 0, sizeof(args)); switch (radeon_encoder->encoder_id) { @@ -296,9 +299,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); TV_ENCODER_CONTROL_PS_ALLOCATION args; int index = 0; - /* fixme - fill in enc_priv for atom dac */ + struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; enum radeon_tv_std tv_std = TV_STD_NTSC; + if (dac_info->tv_std) + tv_std = dac_info->tv_std; + memset(&args, 0, sizeof(args)); index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); @@ -537,6 +543,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) switch (connector->connector_type) { case DRM_MODE_CONNECTOR_DVII: + case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) return ATOM_ENCODER_MODE_HDMI; else if (radeon_connector->use_digital) @@ -546,7 +553,6 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) break; case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_HDMIA: - case DRM_MODE_CONNECTOR_HDMIB: default: if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) return ATOM_ENCODER_MODE_HDMI; diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h new file mode 100644 index 000000000000..797972e344a6 --- /dev/null +++ b/drivers/gpu/drm/radeon/radeon_family.h @@ -0,0 +1,97 @@ +/* + * Copyright 2008 Advanced Micro Devices, Inc. + * Copyright 2008 Red Hat Inc. + * Copyright 2009 Jerome Glisse. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alex Deucher + * Jerome Glisse + */ + +/* this file defines the CHIP_ and family flags used in the pciids, + * its is common between kms and non-kms because duplicating it and + * changing one place is fail. + */ +#ifndef RADEON_FAMILY_H +#define RADEON_FAMILY_H +/* + * Radeon chip families + */ +enum radeon_family { + CHIP_R100, + CHIP_RV100, + CHIP_RS100, + CHIP_RV200, + CHIP_RS200, + CHIP_R200, + CHIP_RV250, + CHIP_RS300, + CHIP_RV280, + CHIP_R300, + CHIP_R350, + CHIP_RV350, + CHIP_RV380, + CHIP_R420, + CHIP_R423, + CHIP_RV410, + CHIP_RS400, + CHIP_RS480, + CHIP_RS600, + CHIP_RS690, + CHIP_RS740, + CHIP_RV515, + CHIP_R520, + CHIP_RV530, + CHIP_RV560, + CHIP_RV570, + CHIP_R580, + CHIP_R600, + CHIP_RV610, + CHIP_RV630, + CHIP_RV670, + CHIP_RV620, + CHIP_RV635, + CHIP_RS780, + CHIP_RS880, + CHIP_RV770, + CHIP_RV730, + CHIP_RV710, + CHIP_RV740, + CHIP_LAST, +}; + +/* + * Chip flags + */ +enum radeon_chip_flags { + RADEON_FAMILY_MASK = 0x0000ffffUL, + RADEON_FLAGS_MASK = 0xffff0000UL, + RADEON_IS_MOBILITY = 0x00010000UL, + RADEON_IS_IGP = 0x00020000UL, + RADEON_SINGLE_CRTC = 0x00040000UL, + RADEON_IS_AGP = 0x00080000UL, + RADEON_HAS_HIERZ = 0x00100000UL, + RADEON_IS_PCIE = 0x00200000UL, + RADEON_NEW_MEMMAP = 0x00400000UL, + RADEON_IS_PCI = 0x00800000UL, + RADEON_IS_IGPGART = 0x01000000UL, +}; +#endif diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index ebb58959f418..944e4fa78db5 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -45,71 +45,9 @@ struct radeon_fb_device { struct radeon_device *rdev; }; -static int radeon_fb_check_var(struct fb_var_screeninfo *var, - struct fb_info *info) -{ - int ret; - ret = drm_fb_helper_check_var(var, info); - if (ret) - return ret; - - /* big endian override for radeon endian workaround */ -#ifdef __BIG_ENDIAN - { - int depth; - switch (var->bits_per_pixel) { - case 16: - depth = (var->green.length == 6) ? 16 : 15; - break; - case 32: - depth = (var->transp.length > 0) ? 32 : 24; - break; - default: - depth = var->bits_per_pixel; - break; - } - switch (depth) { - case 8: - var->red.offset = 0; - var->green.offset = 0; - var->blue.offset = 0; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 24: - var->red.offset = 8; - var->green.offset = 16; - var->blue.offset = 24; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 0; - var->transp.offset = 0; - break; - case 32: - var->red.offset = 8; - var->green.offset = 16; - var->blue.offset = 24; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - var->transp.length = 8; - var->transp.offset = 0; - break; - default: - return -EINVAL; - } - } -#endif - return 0; -} - static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, - .fb_check_var = radeon_fb_check_var, + .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_setcolreg = drm_fb_helper_setcolreg, .fb_fillrect = cfb_fillrect, @@ -206,6 +144,7 @@ int radeonfb_create(struct drm_device *dev, void *fbptr = NULL; unsigned long tmp; bool fb_tiled = false; /* useful for testing */ + u32 tiling_flags = 0; mode_cmd.width = surface_width; mode_cmd.height = surface_height; @@ -230,7 +169,22 @@ int radeonfb_create(struct drm_device *dev, robj = gobj->driver_private; if (fb_tiled) - radeon_object_set_tiling_flags(robj, RADEON_TILING_MACRO|RADEON_TILING_SURFACE, mode_cmd.pitch); + tiling_flags = RADEON_TILING_MACRO; + +#ifdef __BIG_ENDIAN + switch (mode_cmd.bpp) { + case 32: + tiling_flags |= RADEON_TILING_SWAP_32BIT; + break; + case 16: + tiling_flags |= RADEON_TILING_SWAP_16BIT; + default: + break; + } +#endif + + if (tiling_flags) + radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch); mutex_lock(&rdev->ddev->struct_mutex); fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); if (fb == NULL) { @@ -258,6 +212,7 @@ int radeonfb_create(struct drm_device *dev, goto out_unref; } + rdev->fbdev_info = info; rfbdev = info->par; rfbdev->helper.funcs = &radeon_fb_helper_funcs; rfbdev->helper.dev = dev; @@ -312,45 +267,6 @@ int radeonfb_create(struct drm_device *dev, DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitch); -#ifdef __BIG_ENDIAN - /* fill var sets defaults for this stuff - override - on big endian */ - switch (fb->depth) { - case 8: - info->var.red.offset = 0; - info->var.green.offset = 0; - info->var.blue.offset = 0; - info->var.red.length = 8; /* 8bit DAC */ - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 24: - info->var.red.offset = 8; - info->var.green.offset = 16; - info->var.blue.offset = 24; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 0; - break; - case 32: - info->var.red.offset = 8; - info->var.green.offset = 16; - info->var.blue.offset = 24; - info->var.red.length = 8; - info->var.green.length = 8; - info->var.blue.length = 8; - info->var.transp.offset = 0; - info->var.transp.length = 8; - break; - default: - break; - } -#endif - fb->fbdev = info; rfbdev->rfb = rfb; rfbdev->rdev = rdev; diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 2977539880fb..a931af065dd4 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -75,7 +75,6 @@ void radeon_gart_table_ram_free(struct radeon_device *rdev) int radeon_gart_table_vram_alloc(struct radeon_device *rdev) { - uint64_t gpu_addr; int r; if (rdev->gart.table.vram.robj == NULL) { @@ -88,6 +87,14 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev) return r; } } + return 0; +} + +int radeon_gart_table_vram_pin(struct radeon_device *rdev) +{ + uint64_t gpu_addr; + int r; + r = radeon_object_pin(rdev->gart.table.vram.robj, RADEON_GEM_DOMAIN_VRAM, &gpu_addr); if (r) { diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c index 56decda2a71f..a1bf11de308a 100644 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c @@ -422,3 +422,18 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return ret; } + +long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + lock_kernel(); /* XXX for now */ + ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c index 9836c705a952..b79ecc4a7cc4 100644 --- a/drivers/gpu/drm/radeon/radeon_irq.c +++ b/drivers/gpu/drm/radeon/radeon_irq.c @@ -188,6 +188,9 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS) u32 stat; u32 r500_disp_int; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return IRQ_NONE; + /* Only consider the bits we're interested in - others could be used * outside the DRM */ @@ -286,6 +289,9 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr drm_radeon_irq_emit_t *emit = data; int result; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return -EINVAL; + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv) { @@ -315,6 +321,9 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr return -EINVAL; } + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return -EINVAL; + return radeon_wait_irq(dev, irqwait->irq_seq); } @@ -326,6 +335,9 @@ void radeon_driver_irq_preinstall(struct drm_device * dev) (drm_radeon_private_t *) dev->dev_private; u32 dummy; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return; + /* Disable *all* interrupts */ if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) RADEON_WRITE(R500_DxMODE_INT_MASK, 0); @@ -345,6 +357,9 @@ int radeon_driver_irq_postinstall(struct drm_device *dev) dev->max_vblank_count = 0x001fffff; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return 0; + radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); return 0; @@ -357,6 +372,9 @@ void radeon_driver_irq_uninstall(struct drm_device * dev) if (!dev_priv) return; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + return; + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) RADEON_WRITE(R500_DxMODE_INT_MASK, 0); /* Disable *all* interrupts */ diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index dce09ada32bc..709bd892b3a9 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -54,12 +54,23 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) flags |= RADEON_IS_PCI; } + /* radeon_device_init should report only fatal error + * like memory allocation failure or iomapping failure, + * or memory manager initialization failure, it must + * properly initialize the GPU MC controller and permit + * VRAM allocation + */ r = radeon_device_init(rdev, dev, dev->pdev, flags); if (r) { - DRM_ERROR("Failed to initialize radeon, disabling IOCTL\n"); - radeon_device_fini(rdev); - kfree(rdev); - dev->dev_private = NULL; + DRM_ERROR("Fatal error while trying to initialize radeon.\n"); + return r; + } + /* Again modeset_init should fail only on fatal error + * otherwise it should provide enough functionalities + * for shadowfb to run + */ + r = radeon_modeset_init(rdev); + if (r) { return r; } return 0; @@ -69,6 +80,9 @@ int radeon_driver_unload_kms(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; + if (rdev == NULL) + return 0; + radeon_modeset_fini(rdev); radeon_device_fini(rdev); kfree(rdev); dev->dev_private = NULL; @@ -98,6 +112,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) case RADEON_INFO_NUM_Z_PIPES: value = rdev->num_z_pipes; break; + case RADEON_INFO_ACCEL_WORKING: + value = rdev->accel_working; + break; default: DRM_DEBUG("Invalid request %d\n", info->request); return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 0d29d15aa62b..2b997a15fb1f 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -341,6 +341,9 @@ void radeon_legacy_atom_set_surface(struct drm_crtc *crtc) uint32_t crtc_pitch; switch (crtc->fb->bits_per_pixel) { + case 8: + format = 2; + break; case 15: /* 555 */ format = 3; break; @@ -401,11 +404,33 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; uint32_t crtc_pitch, pitch_pixels; uint32_t tiling_flags; + int format; + uint32_t gen_cntl_reg, gen_cntl_val; DRM_DEBUG("\n"); radeon_fb = to_radeon_framebuffer(crtc->fb); + switch (crtc->fb->bits_per_pixel) { + case 8: + format = 2; + break; + case 15: /* 555 */ + format = 3; + break; + case 16: /* 565 */ + format = 4; + break; + case 24: /* RGB */ + format = 5; + break; + case 32: /* xRGB */ + format = 6; + break; + default: + return false; + } + obj = radeon_fb->obj; if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { return -EINVAL; @@ -458,6 +483,9 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, } else { int offset = y * pitch_pixels + x; switch (crtc->fb->bits_per_pixel) { + case 8: + offset *= 1; + break; case 15: case 16: offset *= 2; @@ -476,6 +504,16 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y, base &= ~7; + if (radeon_crtc->crtc_id == 1) + gen_cntl_reg = RADEON_CRTC2_GEN_CNTL; + else + gen_cntl_reg = RADEON_CRTC_GEN_CNTL; + + gen_cntl_val = RREG32(gen_cntl_reg); + gen_cntl_val &= ~(0xf << 8); + gen_cntl_val |= (format << 8); + WREG32(gen_cntl_reg, gen_cntl_val); + crtc_offset = (u32)base; WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); @@ -526,6 +564,9 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod } switch (crtc->fb->bits_per_pixel) { + case 8: + format = 2; + break; case 15: /* 555 */ format = 3; break; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 0aaafcd2089f..b1547f700d73 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -1271,6 +1271,30 @@ static const struct drm_encoder_funcs radeon_legacy_tv_dac_enc_funcs = { .destroy = radeon_enc_destroy, }; + +static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct radeon_device *rdev = dev->dev_private; + struct radeon_encoder_int_tmds *tmds = NULL; + bool ret; + + tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); + + if (!tmds) + return NULL; + + if (rdev->is_atom_bios) + ret = radeon_atombios_get_tmds_info(encoder, tmds); + else + ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); + + if (ret == false) + radeon_legacy_get_tmds_info_from_table(encoder, tmds); + + return tmds; +} + void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) { @@ -1317,10 +1341,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t case ENCODER_OBJECT_ID_INTERNAL_TMDS1: drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); - if (rdev->is_atom_bios) - radeon_encoder->enc_priv = radeon_atombios_get_tmds_info(radeon_encoder); - else - radeon_encoder->enc_priv = radeon_combios_get_tmds_info(radeon_encoder); + radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); break; case ENCODER_OBJECT_ID_INTERNAL_DAC1: drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 523d6cbd4f08..570a58729daf 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -175,6 +175,15 @@ struct radeon_mode_info { enum radeon_connector_table connector_table; bool mode_config_initialized; struct radeon_crtc *crtcs[2]; + /* DVI-I properties */ + struct drm_property *coherent_mode_property; + /* DAC enable load detect */ + struct drm_property *load_detect_property; + /* TV standard load detect */ + struct drm_property *tv_std_property; + /* legacy TMDS PLL detect */ + struct drm_property *tmds_pll_property; + }; struct radeon_native_mode { @@ -304,6 +313,7 @@ struct radeon_connector { and get modes due to analog/digital/tvencoder */ struct edid *edid; void *con_priv; + bool dac_load_detect; }; struct radeon_framebuffer { @@ -364,16 +374,18 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev); extern bool radeon_combios_get_clock_info(struct drm_device *dev); extern struct radeon_encoder_atom_dig * radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); -extern struct radeon_encoder_int_tmds * -radeon_atombios_get_tmds_info(struct radeon_encoder *encoder); +bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds); +bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds); +bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, + struct radeon_encoder_int_tmds *tmds); extern struct radeon_encoder_primary_dac * radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); extern struct radeon_encoder_tv_dac * radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder); extern struct radeon_encoder_lvds * radeon_combios_get_lvds_info(struct radeon_encoder *encoder); -extern struct radeon_encoder_int_tmds * -radeon_combios_get_tmds_info(struct radeon_encoder *encoder); extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder); extern struct radeon_encoder_tv_dac * radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); @@ -383,6 +395,8 @@ extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock); extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev); +extern void radeon_save_bios_scratch_regs(struct radeon_device *rdev); +extern void radeon_restore_bios_scratch_regs(struct radeon_device *rdev); extern void radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc); extern void diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index b85fb83d7ae8..73af463b7a59 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -188,6 +188,7 @@ int radeon_object_kmap(struct radeon_object *robj, void **ptr) if (ptr) { *ptr = robj->kptr; } + radeon_object_check_tiling(robj, 0, 0); return 0; } @@ -200,6 +201,7 @@ void radeon_object_kunmap(struct radeon_object *robj) } robj->kptr = NULL; spin_unlock(&robj->tobj.lock); + radeon_object_check_tiling(robj, 0, 0); ttm_bo_kunmap(&robj->kmap); } @@ -369,6 +371,14 @@ void radeon_object_force_delete(struct radeon_device *rdev) int radeon_object_init(struct radeon_device *rdev) { + /* Add an MTRR for the VRAM */ + rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, + MTRR_TYPE_WRCOMB, 1); + DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", + rdev->mc.mc_vram_size >> 20, + (unsigned long long)rdev->mc.aper_size >> 20); + DRM_INFO("RAM width %dbits %cDR\n", + rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); return radeon_ttm_init(rdev); } diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index aa9837a6aa75..747b4bffb84b 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -56,10 +56,12 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) set_bit(i, rdev->ib_pool.alloc_bm); rdev->ib_pool.ibs[i].length_dw = 0; *ib = &rdev->ib_pool.ibs[i]; + mutex_unlock(&rdev->ib_pool.mutex); goto out; } if (list_empty(&rdev->ib_pool.scheduled_ibs)) { /* we go do nothings here */ + mutex_unlock(&rdev->ib_pool.mutex); DRM_ERROR("all IB allocated none scheduled.\n"); r = -EINVAL; goto out; @@ -69,10 +71,13 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) struct radeon_ib, list); if (nib->fence == NULL) { /* we go do nothings here */ + mutex_unlock(&rdev->ib_pool.mutex); DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); r = -EINVAL; goto out; } + mutex_unlock(&rdev->ib_pool.mutex); + r = radeon_fence_wait(nib->fence, false); if (r) { DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, @@ -81,12 +86,17 @@ int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib) goto out; } radeon_fence_unref(&nib->fence); + nib->length_dw = 0; + + /* scheduled list is accessed here */ + mutex_lock(&rdev->ib_pool.mutex); list_del(&nib->list); INIT_LIST_HEAD(&nib->list); + mutex_unlock(&rdev->ib_pool.mutex); + *ib = nib; out: - mutex_unlock(&rdev->ib_pool.mutex); if (r) { radeon_fence_unref(&fence); } else { @@ -110,9 +120,10 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) return; } list_del(&tmp->list); - if (tmp->fence) { + INIT_LIST_HEAD(&tmp->list); + if (tmp->fence) radeon_fence_unref(&tmp->fence); - } + tmp->length_dw = 0; clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); mutex_unlock(&rdev->ib_pool.mutex); @@ -122,25 +133,24 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) { int r = 0; - mutex_lock(&rdev->ib_pool.mutex); if (!ib->length_dw || !rdev->cp.ready) { /* TODO: Nothings in the ib we should report. */ - mutex_unlock(&rdev->ib_pool.mutex); DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); return -EINVAL; } + /* 64 dwords should be enough for fence too */ r = radeon_ring_lock(rdev, 64); if (r) { DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); - mutex_unlock(&rdev->ib_pool.mutex); return r; } radeon_ring_ib_execute(rdev, ib); radeon_fence_emit(rdev, ib->fence); - radeon_ring_unlock_commit(rdev); + mutex_lock(&rdev->ib_pool.mutex); list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); mutex_unlock(&rdev->ib_pool.mutex); + radeon_ring_unlock_commit(rdev); return 0; } @@ -151,6 +161,8 @@ int radeon_ib_pool_init(struct radeon_device *rdev) int i; int r = 0; + if (rdev->ib_pool.robj) + return 0; /* Allocate 1M object buffer */ INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, diff --git a/drivers/gpu/drm/radeon/radeon_share.h b/drivers/gpu/drm/radeon/radeon_share.h deleted file mode 100644 index 5f9e358ab506..000000000000 --- a/drivers/gpu/drm/radeon/radeon_share.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright 2008 Advanced Micro Devices, Inc. - * Copyright 2008 Red Hat Inc. - * Copyright 2009 Jerome Glisse. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Dave Airlie - * Alex Deucher - * Jerome Glisse - */ -#ifndef __RADEON_SHARE_H__ -#define __RADEON_SHARE_H__ - -/* Common */ -struct radeon_device; -struct radeon_cs_parser; -int radeon_clocks_init(struct radeon_device *rdev); -void radeon_clocks_fini(struct radeon_device *rdev); -void radeon_scratch_init(struct radeon_device *rdev); -void radeon_surface_init(struct radeon_device *rdev); -int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); - - -/* R100, RV100, RS100, RV200, RS200, R200, RV250, RS300, RV280 */ -void r100_vram_init_sizes(struct radeon_device *rdev); - - -/* R300, R350, RV350, RV380 */ -struct r300_asic { - const unsigned *reg_safe_bm; - unsigned reg_safe_bm_size; -}; - - -/* RS690, RS740 */ -void rs690_line_buffer_adjust(struct radeon_device *rdev, - struct drm_display_mode *mode1, - struct drm_display_mode *mode2); - - -/* RV515 */ -void rv515_bandwidth_avivo_update(struct radeon_device *rdev); - - -/* R600, RV610, RV630, RV620, RV635, RV670, RS780, RS880 */ -bool r600_card_posted(struct radeon_device *rdev); -void r600_cp_stop(struct radeon_device *rdev); -void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); -int r600_cp_resume(struct radeon_device *rdev); -int r600_count_pipe_bits(uint32_t val); -int r600_gart_clear_page(struct radeon_device *rdev, int i); -int r600_mc_wait_for_idle(struct radeon_device *rdev); -void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); -int r600_ib_test(struct radeon_device *rdev); -int r600_ring_test(struct radeon_device *rdev); -int r600_wb_init(struct radeon_device *rdev); -void r600_wb_fini(struct radeon_device *rdev); -void r600_scratch_init(struct radeon_device *rdev); -int r600_blit_init(struct radeon_device *rdev); -void r600_blit_fini(struct radeon_device *rdev); -int r600_cp_init_microcode(struct radeon_device *rdev); -struct r600_asic { - unsigned max_pipes; - unsigned max_tile_pipes; - unsigned max_simds; - unsigned max_backends; - unsigned max_gprs; - unsigned max_threads; - unsigned max_stack_entries; - unsigned max_hw_contexts; - unsigned max_gs_threads; - unsigned sx_max_export_size; - unsigned sx_max_export_pos_size; - unsigned sx_max_export_smx_size; - unsigned sq_num_cf_insts; -}; - -/* RV770, RV7300, RV710 */ -struct rv770_asic { - unsigned max_pipes; - unsigned max_tile_pipes; - unsigned max_simds; - unsigned max_backends; - unsigned max_gprs; - unsigned max_threads; - unsigned max_stack_entries; - unsigned max_hw_contexts; - unsigned max_gs_threads; - unsigned sx_max_export_size; - unsigned sx_max_export_pos_size; - unsigned sx_max_export_smx_size; - unsigned sq_num_cf_insts; - unsigned sx_num_of_sets; - unsigned sc_prim_fifo_size; - unsigned sc_hiz_tile_fifo_size; - unsigned sc_earlyz_tile_fifo_fize; -}; - -#endif diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index aad0c6fafcf4..38537d971a3e 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -3034,7 +3034,10 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil value = GET_SCRATCH(dev_priv, 2); break; case RADEON_PARAM_IRQ_NR: - value = drm_dev_to_irq(dev); + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600) + value = 0; + else + value = drm_dev_to_irq(dev); break; case RADEON_PARAM_GART_BASE: value = dev_priv->gart_vm_start; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index 8c3ea7e36060..a3fbdad938c7 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -29,7 +29,6 @@ #include <drm/drmP.h> #include "radeon_reg.h" #include "radeon.h" -#include "radeon_share.h" /* rs400,rs480 depends on : */ void r100_hdp_reset(struct radeon_device *rdev); @@ -93,20 +92,41 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev) WREG32_MC(RS480_GART_CACHE_CNTRL, 0); } -int rs400_gart_enable(struct radeon_device *rdev) +int rs400_gart_init(struct radeon_device *rdev) { - uint32_t size_reg; - uint32_t tmp; int r; + if (rdev->gart.table.ram.ptr) { + WARN(1, "RS400 GART already initialized.\n"); + return 0; + } + /* Check gart size */ + switch(rdev->mc.gtt_size / (1024 * 1024)) { + case 32: + case 64: + case 128: + case 256: + case 512: + case 1024: + case 2048: + break; + default: + return -EINVAL; + } /* Initialize common gart structure */ r = radeon_gart_init(rdev); - if (r) { + if (r) return r; - } - if (rs400_debugfs_pcie_gart_info_init(rdev)) { + if (rs400_debugfs_pcie_gart_info_init(rdev)) DRM_ERROR("Failed to register debugfs file for RS400 GART !\n"); - } + rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; + return radeon_gart_table_ram_alloc(rdev); +} + +int rs400_gart_enable(struct radeon_device *rdev) +{ + uint32_t size_reg; + uint32_t tmp; tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; @@ -137,13 +157,6 @@ int rs400_gart_enable(struct radeon_device *rdev) default: return -EINVAL; } - if (rdev->gart.table.ram.ptr == NULL) { - rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; - r = radeon_gart_table_ram_alloc(rdev); - if (r) { - return r; - } - } /* It should be fine to program it to max value */ if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF); @@ -202,6 +215,13 @@ void rs400_gart_disable(struct radeon_device *rdev) WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0); } +void rs400_gart_fini(struct radeon_device *rdev) +{ + rs400_gart_disable(rdev); + radeon_gart_table_ram_free(rdev); + radeon_gart_fini(rdev); +} + int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) { uint32_t entry; @@ -256,14 +276,12 @@ int rs400_mc_init(struct radeon_device *rdev) (void)RREG32(RADEON_HOST_PATH_CNTL); WREG32(RADEON_HOST_PATH_CNTL, tmp); (void)RREG32(RADEON_HOST_PATH_CNTL); + return 0; } void rs400_mc_fini(struct radeon_device *rdev) { - rs400_gart_disable(rdev); - radeon_gart_table_ram_free(rdev); - radeon_gart_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 1b8d62f5e73c..0e791e26def3 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -28,6 +28,7 @@ #include "drmP.h" #include "radeon_reg.h" #include "radeon.h" +#include "avivod.h" #include "rs600_reg_safe.h" @@ -68,22 +69,35 @@ void rs600_gart_tlb_flush(struct radeon_device *rdev) tmp = RREG32_MC(RS600_MC_PT0_CNTL); } -int rs600_gart_enable(struct radeon_device *rdev) +int rs600_gart_init(struct radeon_device *rdev) { - uint32_t tmp; - int i; int r; + if (rdev->gart.table.vram.robj) { + WARN(1, "RS600 GART already initialized.\n"); + return 0; + } /* Initialize common gart structure */ r = radeon_gart_init(rdev); if (r) { return r; } rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; - r = radeon_gart_table_vram_alloc(rdev); - if (r) { - return r; + return radeon_gart_table_vram_alloc(rdev); +} + +int rs600_gart_enable(struct radeon_device *rdev) +{ + uint32_t tmp; + int r, i; + + if (rdev->gart.table.vram.robj == NULL) { + dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; } + r = radeon_gart_table_vram_pin(rdev); + if (r) + return r; /* FIXME: setup default page */ WREG32_MC(RS600_MC_PT0_CNTL, (RS600_EFFECTIVE_L2_CACHE_SIZE(6) | @@ -138,8 +152,17 @@ void rs600_gart_disable(struct radeon_device *rdev) tmp = RREG32_MC(RS600_MC_CNTL1); tmp &= ~RS600_ENABLE_PAGE_TABLES; WREG32_MC(RS600_MC_CNTL1, tmp); - radeon_object_kunmap(rdev->gart.table.vram.robj); - radeon_object_unpin(rdev->gart.table.vram.robj); + if (rdev->gart.table.vram.robj) { + radeon_object_kunmap(rdev->gart.table.vram.robj); + radeon_object_unpin(rdev->gart.table.vram.robj); + } +} + +void rs600_gart_fini(struct radeon_device *rdev) +{ + rs600_gart_disable(rdev); + radeon_gart_table_vram_free(rdev); + radeon_gart_fini(rdev); } #define R600_PTE_VALID (1 << 0) @@ -175,6 +198,8 @@ void rs600_mc_disable_clients(struct radeon_device *rdev) "programming pipes. Bad things might happen.\n"); } + radeon_avivo_vga_render_disable(rdev); + tmp = RREG32(AVIVO_D1VGA_CONTROL); WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); tmp = RREG32(AVIVO_D2VGA_CONTROL); @@ -235,9 +260,6 @@ int rs600_mc_init(struct radeon_device *rdev) void rs600_mc_fini(struct radeon_device *rdev) { - rs600_gart_disable(rdev); - radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); } @@ -253,11 +275,9 @@ int rs600_irq_set(struct radeon_device *rdev) tmp |= RADEON_SW_INT_ENABLE; } if (rdev->irq.crtc_vblank_int[0]) { - tmp |= AVIVO_DISPLAY_INT_STATUS; mode_int |= AVIVO_D1MODE_INT_MASK; } if (rdev->irq.crtc_vblank_int[1]) { - tmp |= AVIVO_DISPLAY_INT_STATUS; mode_int |= AVIVO_D2MODE_INT_MASK; } WREG32(RADEON_GEN_INT_CNTL, tmp); diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 839595b00728..0f585ca8276d 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -94,9 +94,6 @@ int rs690_mc_init(struct radeon_device *rdev) void rs690_mc_fini(struct radeon_device *rdev) { - rs400_gart_disable(rdev); - radeon_gart_table_ram_free(rdev); - radeon_gart_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 99e397f16384..fd799748e7d8 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -29,7 +29,6 @@ #include "drmP.h" #include "rv515d.h" #include "radeon.h" -#include "radeon_share.h" #include "rv515_reg_safe.h" /* rv515 depends on : */ @@ -38,8 +37,6 @@ int r100_cp_reset(struct radeon_device *rdev); int r100_rb2d_reset(struct radeon_device *rdev); int r100_gui_wait_for_idle(struct radeon_device *rdev); int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); -int rv370_pcie_gart_enable(struct radeon_device *rdev); -void rv370_pcie_gart_disable(struct radeon_device *rdev); void r420_pipes_init(struct radeon_device *rdev); void rs600_mc_disable_clients(struct radeon_device *rdev); void rs600_disable_vga(struct radeon_device *rdev); @@ -127,9 +124,6 @@ int rv515_mc_init(struct radeon_device *rdev) void rv515_mc_fini(struct radeon_device *rdev) { - rv370_pcie_gart_disable(rdev); - radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); } diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 57765f6d5b20..b574c73a5109 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -29,7 +29,7 @@ #include <linux/platform_device.h> #include "drmP.h" #include "radeon.h" -#include "radeon_share.h" +#include "radeon_drm.h" #include "rv770d.h" #include "avivod.h" #include "atom.h" @@ -49,18 +49,13 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev) u32 tmp; int r, i; - /* Initialize common gart structure */ - r = radeon_gart_init(rdev); - if (r) { - return r; + if (rdev->gart.table.vram.robj == NULL) { + dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); + return -EINVAL; } - rdev->gart.table_size = rdev->gart.num_gpu_pages * 8; - r = radeon_gart_table_vram_alloc(rdev); - if (r) { + r = radeon_gart_table_vram_pin(rdev); + if (r) return r; - } - for (i = 0; i < rdev->gart.num_gpu_pages; i++) - r600_gart_clear_page(rdev, i); /* Setup L2 cache */ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING | ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE | @@ -99,10 +94,6 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) u32 tmp; int i; - /* Clear ptes*/ - for (i = 0; i < rdev->gart.num_gpu_pages; i++) - r600_gart_clear_page(rdev, i); - r600_pcie_gart_tlb_flush(rdev); /* Disable all tables */ for (i = 0; i < 7; i++) WREG32(VM_CONTEXT0_CNTL + (i * 4), 0); @@ -121,6 +112,17 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp); + if (rdev->gart.table.vram.robj) { + radeon_object_kunmap(rdev->gart.table.vram.robj); + radeon_object_unpin(rdev->gart.table.vram.robj); + } +} + +void rv770_pcie_gart_fini(struct radeon_device *rdev) +{ + rv770_pcie_gart_disable(rdev); + radeon_gart_table_vram_free(rdev); + radeon_gart_fini(rdev); } @@ -226,6 +228,10 @@ static void rv770_mc_resume(struct radeon_device *rdev) WREG32(D1VGA_CONTROL, d1vga_control); WREG32(D2VGA_CONTROL, d2vga_control); WREG32(VGA_RENDER_CONTROL, vga_render_control); + + /* we need to own VRAM, so turn off the VGA renderer here + * to stop it overwriting our objects */ + radeon_avivo_vga_render_disable(rdev); } @@ -673,11 +679,11 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(SQ_CONFIG, sq_config); WREG32(SQ_GPR_RESOURCE_MGMT_1, (NUM_PS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | - NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | - NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2))); + NUM_VS_GPRS((rdev->config.rv770.max_gprs * 24)/64) | + NUM_CLAUSE_TEMP_GPRS(((rdev->config.rv770.max_gprs * 24)/64)/2))); WREG32(SQ_GPR_RESOURCE_MGMT_2, (NUM_GS_GPRS((rdev->config.rv770.max_gprs * 7)/64) | - NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64))); + NUM_ES_GPRS((rdev->config.rv770.max_gprs * 7)/64))); sq_thread_resource_mgmt = (NUM_PS_THREADS((rdev->config.rv770.max_threads * 4)/8) | NUM_VS_THREADS((rdev->config.rv770.max_threads * 2)/8) | @@ -709,14 +715,14 @@ static void rv770_gpu_init(struct radeon_device *rdev) WREG32(SQ_DYN_GPR_SIZE_SIMD_AB_7, sq_dyn_gpr_size_simd_ab_0); WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) | - FORCE_EOV_MAX_REZ_CNT(255))); + FORCE_EOV_MAX_REZ_CNT(255))); if (rdev->family == CHIP_RV710) WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(TC_ONLY) | - AUTO_INVLD_EN(ES_AND_GS_AUTO))); + AUTO_INVLD_EN(ES_AND_GS_AUTO))); else WREG32(VGT_CACHE_INVALIDATION, (CACHE_INVALIDATION(VC_AND_TC) | - AUTO_INVLD_EN(ES_AND_GS_AUTO))); + AUTO_INVLD_EN(ES_AND_GS_AUTO))); switch (rdev->family) { case CHIP_RV770: @@ -840,19 +846,28 @@ int rv770_mc_init(struct radeon_device *rdev) } int rv770_gpu_reset(struct radeon_device *rdev) { - /* FIXME: implement */ - return 0; + /* FIXME: implement any rv770 specific bits */ + return r600_gpu_reset(rdev); } -int rv770_resume(struct radeon_device *rdev) +static int rv770_startup(struct radeon_device *rdev) { int r; + radeon_gpu_reset(rdev); rv770_mc_resume(rdev); r = rv770_pcie_gart_enable(rdev); if (r) return r; rv770_gpu_init(rdev); + + r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM, + &rdev->r600_blit.shader_gpu_addr); + if (r) { + DRM_ERROR("failed to pin blit object %d\n", r); + return r; + } + r = radeon_ring_init(rdev, rdev->cp.ring_size); if (r) return r; @@ -868,10 +883,49 @@ int rv770_resume(struct radeon_device *rdev) return 0; } +int rv770_resume(struct radeon_device *rdev) +{ + int r; + + if (radeon_gpu_reset(rdev)) { + /* FIXME: what do we want to do here ? */ + } + /* post card */ + if (rdev->is_atom_bios) { + atom_asic_init(rdev->mode_info.atom_context); + } else { + radeon_combios_asic_init(rdev->ddev); + } + /* Initialize clocks */ + r = radeon_clocks_init(rdev); + if (r) { + return r; + } + + r = rv770_startup(rdev); + if (r) { + DRM_ERROR("r600 startup failed on resume\n"); + return r; + } + + r = radeon_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + return r; + } + return r; + +} + int rv770_suspend(struct radeon_device *rdev) { /* FIXME: we should wait for ring to be empty */ r700_cp_stop(rdev); + rdev->cp.ready = false; + rv770_pcie_gart_disable(rdev); + + /* unpin shaders bo */ + radeon_object_unpin(rdev->r600_blit.shader_obj); return 0; } @@ -913,6 +967,7 @@ int rv770_init(struct radeon_device *rdev) r600_scratch_init(rdev); /* Initialize surface registers */ radeon_surface_init(rdev); + radeon_get_clock_info(rdev->ddev); r = radeon_clocks_init(rdev); if (r) return r; @@ -945,7 +1000,18 @@ int rv770_init(struct radeon_device *rdev) } } - r = rv770_resume(rdev); + r = r600_pcie_gart_init(rdev); + if (r) + return r; + + rdev->accel_working = true; + r = r600_blit_init(rdev); + if (r) { + DRM_ERROR("radeon: failled blitter (%d).\n", r); + rdev->accel_working = false; + } + + r = rv770_startup(rdev); if (r) { if (rdev->flags & RADEON_IS_AGP) { /* Retry with disabling AGP */ @@ -953,33 +1019,30 @@ int rv770_init(struct radeon_device *rdev) rdev->flags &= ~RADEON_IS_AGP; return rv770_init(rdev); } - return r; + rdev->accel_working = false; } - r = r600_blit_init(rdev); - if (r) { - DRM_ERROR("radeon: failled blitter (%d).\n", r); - return r; - } - r = radeon_ib_pool_init(rdev); - if (r) { - DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); - return r; - } - r = radeon_ib_test(rdev); - if (r) { - DRM_ERROR("radeon: failled testing IB (%d).\n", r); - return r; + if (rdev->accel_working) { + r = radeon_ib_pool_init(rdev); + if (r) { + DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); + rdev->accel_working = false; + } + r = radeon_ib_test(rdev); + if (r) { + DRM_ERROR("radeon: failled testing IB (%d).\n", r); + rdev->accel_working = false; + } } return 0; } void rv770_fini(struct radeon_device *rdev) { + rv770_suspend(rdev); + r600_blit_fini(rdev); radeon_ring_fini(rdev); - rv770_pcie_gart_disable(rdev); - radeon_gart_table_vram_free(rdev); - radeon_gart_fini(rdev); + rv770_pcie_gart_fini(rdev); radeon_gem_fini(rdev); radeon_fence_driver_fini(rdev); radeon_clocks_fini(rdev); diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig new file mode 100644 index 000000000000..790e675b13eb --- /dev/null +++ b/drivers/gpu/vga/Kconfig @@ -0,0 +1,10 @@ +config VGA_ARB + bool "VGA Arbitration" if EMBEDDED + default y + depends on PCI + help + Some "legacy" VGA devices implemented on PCI typically have the same + hard-decoded addresses as they did on ISA. When multiple PCI devices + are accessed at same time they need some kind of coordination. Please + see Documentation/vgaarbiter.txt for more details. Select this to + enable VGA arbiter. diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile new file mode 100644 index 000000000000..7cc8c1ed645b --- /dev/null +++ b/drivers/gpu/vga/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_VGA_ARB) += vgaarb.o diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c new file mode 100644 index 000000000000..1ac0c93603c9 --- /dev/null +++ b/drivers/gpu/vga/vgaarb.c @@ -0,0 +1,1205 @@ +/* + * vgaarb.c + * + * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org> + * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com> + * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org> + * + * Implements the VGA arbitration. For details refer to + * Documentation/vgaarbiter.txt + */ + +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/list.h> +#include <linux/sched.h> +#include <linux/wait.h> +#include <linux/spinlock.h> +#include <linux/poll.h> +#include <linux/miscdevice.h> + +#include <linux/uaccess.h> + +#include <linux/vgaarb.h> + +static void vga_arbiter_notify_clients(void); +/* + * We keep a list of all vga devices in the system to speed + * up the various operations of the arbiter + */ +struct vga_device { + struct list_head list; + struct pci_dev *pdev; + unsigned int decodes; /* what does it decodes */ + unsigned int owns; /* what does it owns */ + unsigned int locks; /* what does it locks */ + unsigned int io_lock_cnt; /* legacy IO lock count */ + unsigned int mem_lock_cnt; /* legacy MEM lock count */ + unsigned int io_norm_cnt; /* normal IO count */ + unsigned int mem_norm_cnt; /* normal MEM count */ + + /* allow IRQ enable/disable hook */ + void *cookie; + void (*irq_set_state)(void *cookie, bool enable); + unsigned int (*set_vga_decode)(void *cookie, bool decode); +}; + +static LIST_HEAD(vga_list); +static int vga_count, vga_decode_count; +static bool vga_arbiter_used; +static DEFINE_SPINLOCK(vga_lock); +static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue); + + +static const char *vga_iostate_to_str(unsigned int iostate) +{ + /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */ + iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + switch (iostate) { + case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM: + return "io+mem"; + case VGA_RSRC_LEGACY_IO: + return "io"; + case VGA_RSRC_LEGACY_MEM: + return "mem"; + } + return "none"; +} + +static int vga_str_to_iostate(char *buf, int str_size, int *io_state) +{ + /* we could in theory hand out locks on IO and mem + * separately to userspace but it can cause deadlocks */ + if (strncmp(buf, "none", 4) == 0) { + *io_state = VGA_RSRC_NONE; + return 1; + } + + /* XXX We're not chekcing the str_size! */ + if (strncmp(buf, "io+mem", 6) == 0) + goto both; + else if (strncmp(buf, "io", 2) == 0) + goto both; + else if (strncmp(buf, "mem", 3) == 0) + goto both; + return 0; +both: + *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + return 1; +} + +#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE +/* this is only used a cookie - it should not be dereferenced */ +static struct pci_dev *vga_default; +#endif + +static void vga_arb_device_card_gone(struct pci_dev *pdev); + +/* Find somebody in our list */ +static struct vga_device *vgadev_find(struct pci_dev *pdev) +{ + struct vga_device *vgadev; + + list_for_each_entry(vgadev, &vga_list, list) + if (pdev == vgadev->pdev) + return vgadev; + return NULL; +} + +/* Returns the default VGA device (vgacon's babe) */ +#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE +struct pci_dev *vga_default_device(void) +{ + return vga_default; +} +#endif + +static inline void vga_irq_set_state(struct vga_device *vgadev, bool state) +{ + if (vgadev->irq_set_state) + vgadev->irq_set_state(vgadev->cookie, state); +} + + +/* If we don't ever use VGA arb we should avoid + turning off anything anywhere due to old X servers getting + confused about the boot device not being VGA */ +static void vga_check_first_use(void) +{ + /* we should inform all GPUs in the system that + * VGA arb has occured and to try and disable resources + * if they can */ + if (!vga_arbiter_used) { + vga_arbiter_used = true; + vga_arbiter_notify_clients(); + } +} + +static struct vga_device *__vga_tryget(struct vga_device *vgadev, + unsigned int rsrc) +{ + unsigned int wants, legacy_wants, match; + struct vga_device *conflict; + unsigned int pci_bits; + /* Account for "normal" resources to lock. If we decode the legacy, + * counterpart, we need to request it as well + */ + if ((rsrc & VGA_RSRC_NORMAL_IO) && + (vgadev->decodes & VGA_RSRC_LEGACY_IO)) + rsrc |= VGA_RSRC_LEGACY_IO; + if ((rsrc & VGA_RSRC_NORMAL_MEM) && + (vgadev->decodes & VGA_RSRC_LEGACY_MEM)) + rsrc |= VGA_RSRC_LEGACY_MEM; + + pr_devel("%s: %d\n", __func__, rsrc); + pr_devel("%s: owns: %d\n", __func__, vgadev->owns); + + /* Check what resources we need to acquire */ + wants = rsrc & ~vgadev->owns; + + /* We already own everything, just mark locked & bye bye */ + if (wants == 0) + goto lock_them; + + /* We don't need to request a legacy resource, we just enable + * appropriate decoding and go + */ + legacy_wants = wants & VGA_RSRC_LEGACY_MASK; + if (legacy_wants == 0) + goto enable_them; + + /* Ok, we don't, let's find out how we need to kick off */ + list_for_each_entry(conflict, &vga_list, list) { + unsigned int lwants = legacy_wants; + unsigned int change_bridge = 0; + + /* Don't conflict with myself */ + if (vgadev == conflict) + continue; + + /* Check if the architecture allows a conflict between those + * 2 devices or if they are on separate domains + */ + if (!vga_conflicts(vgadev->pdev, conflict->pdev)) + continue; + + /* We have a possible conflict. before we go further, we must + * check if we sit on the same bus as the conflicting device. + * if we don't, then we must tie both IO and MEM resources + * together since there is only a single bit controlling + * VGA forwarding on P2P bridges + */ + if (vgadev->pdev->bus != conflict->pdev->bus) { + change_bridge = 1; + lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + } + + /* Check if the guy has a lock on the resource. If he does, + * return the conflicting entry + */ + if (conflict->locks & lwants) + return conflict; + + /* Ok, now check if he owns the resource we want. We don't need + * to check "decodes" since it should be impossible to own + * own legacy resources you don't decode unless I have a bug + * in this code... + */ + WARN_ON(conflict->owns & ~conflict->decodes); + match = lwants & conflict->owns; + if (!match) + continue; + + /* looks like he doesn't have a lock, we can steal + * them from him + */ + vga_irq_set_state(conflict, false); + + pci_bits = 0; + if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) + pci_bits |= PCI_COMMAND_MEMORY; + if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) + pci_bits |= PCI_COMMAND_IO; + + pci_set_vga_state(conflict->pdev, false, pci_bits, + change_bridge); + conflict->owns &= ~lwants; + /* If he also owned non-legacy, that is no longer the case */ + if (lwants & VGA_RSRC_LEGACY_MEM) + conflict->owns &= ~VGA_RSRC_NORMAL_MEM; + if (lwants & VGA_RSRC_LEGACY_IO) + conflict->owns &= ~VGA_RSRC_NORMAL_IO; + } + +enable_them: + /* ok dude, we got it, everybody conflicting has been disabled, let's + * enable us. Make sure we don't mark a bit in "owns" that we don't + * also have in "decodes". We can lock resources we don't decode but + * not own them. + */ + pci_bits = 0; + if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) + pci_bits |= PCI_COMMAND_MEMORY; + if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) + pci_bits |= PCI_COMMAND_IO; + pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK)); + + vga_irq_set_state(vgadev, true); + vgadev->owns |= (wants & vgadev->decodes); +lock_them: + vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK); + if (rsrc & VGA_RSRC_LEGACY_IO) + vgadev->io_lock_cnt++; + if (rsrc & VGA_RSRC_LEGACY_MEM) + vgadev->mem_lock_cnt++; + if (rsrc & VGA_RSRC_NORMAL_IO) + vgadev->io_norm_cnt++; + if (rsrc & VGA_RSRC_NORMAL_MEM) + vgadev->mem_norm_cnt++; + + return NULL; +} + +static void __vga_put(struct vga_device *vgadev, unsigned int rsrc) +{ + unsigned int old_locks = vgadev->locks; + + pr_devel("%s\n", __func__); + + /* Update our counters, and account for equivalent legacy resources + * if we decode them + */ + if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) { + vgadev->io_norm_cnt--; + if (vgadev->decodes & VGA_RSRC_LEGACY_IO) + rsrc |= VGA_RSRC_LEGACY_IO; + } + if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) { + vgadev->mem_norm_cnt--; + if (vgadev->decodes & VGA_RSRC_LEGACY_MEM) + rsrc |= VGA_RSRC_LEGACY_MEM; + } + if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0) + vgadev->io_lock_cnt--; + if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0) + vgadev->mem_lock_cnt--; + + /* Just clear lock bits, we do lazy operations so we don't really + * have to bother about anything else at this point + */ + if (vgadev->io_lock_cnt == 0) + vgadev->locks &= ~VGA_RSRC_LEGACY_IO; + if (vgadev->mem_lock_cnt == 0) + vgadev->locks &= ~VGA_RSRC_LEGACY_MEM; + + /* Kick the wait queue in case somebody was waiting if we actually + * released something + */ + if (old_locks != vgadev->locks) + wake_up_all(&vga_wait_queue); +} + +int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) +{ + struct vga_device *vgadev, *conflict; + unsigned long flags; + wait_queue_t wait; + int rc = 0; + + vga_check_first_use(); + /* The one who calls us should check for this, but lets be sure... */ + if (pdev == NULL) + pdev = vga_default_device(); + if (pdev == NULL) + return 0; + + for (;;) { + spin_lock_irqsave(&vga_lock, flags); + vgadev = vgadev_find(pdev); + if (vgadev == NULL) { + spin_unlock_irqrestore(&vga_lock, flags); + rc = -ENODEV; + break; + } + conflict = __vga_tryget(vgadev, rsrc); + spin_unlock_irqrestore(&vga_lock, flags); + if (conflict == NULL) + break; + + + /* We have a conflict, we wait until somebody kicks the + * work queue. Currently we have one work queue that we + * kick each time some resources are released, but it would + * be fairly easy to have a per device one so that we only + * need to attach to the conflicting device + */ + init_waitqueue_entry(&wait, current); + add_wait_queue(&vga_wait_queue, &wait); + set_current_state(interruptible ? + TASK_INTERRUPTIBLE : + TASK_UNINTERRUPTIBLE); + if (signal_pending(current)) { + rc = -EINTR; + break; + } + schedule(); + remove_wait_queue(&vga_wait_queue, &wait); + set_current_state(TASK_RUNNING); + } + return rc; +} +EXPORT_SYMBOL(vga_get); + +int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) +{ + struct vga_device *vgadev; + unsigned long flags; + int rc = 0; + + vga_check_first_use(); + + /* The one who calls us should check for this, but lets be sure... */ + if (pdev == NULL) + pdev = vga_default_device(); + if (pdev == NULL) + return 0; + spin_lock_irqsave(&vga_lock, flags); + vgadev = vgadev_find(pdev); + if (vgadev == NULL) { + rc = -ENODEV; + goto bail; + } + if (__vga_tryget(vgadev, rsrc)) + rc = -EBUSY; +bail: + spin_unlock_irqrestore(&vga_lock, flags); + return rc; +} +EXPORT_SYMBOL(vga_tryget); + +void vga_put(struct pci_dev *pdev, unsigned int rsrc) +{ + struct vga_device *vgadev; + unsigned long flags; + + /* The one who calls us should check for this, but lets be sure... */ + if (pdev == NULL) + pdev = vga_default_device(); + if (pdev == NULL) + return; + spin_lock_irqsave(&vga_lock, flags); + vgadev = vgadev_find(pdev); + if (vgadev == NULL) + goto bail; + __vga_put(vgadev, rsrc); +bail: + spin_unlock_irqrestore(&vga_lock, flags); +} +EXPORT_SYMBOL(vga_put); + +/* + * Currently, we assume that the "initial" setup of the system is + * not sane, that is we come up with conflicting devices and let + * the arbiter's client decides if devices decodes or not legacy + * things. + */ +static bool vga_arbiter_add_pci_device(struct pci_dev *pdev) +{ + struct vga_device *vgadev; + unsigned long flags; + struct pci_bus *bus; + struct pci_dev *bridge; + u16 cmd; + + /* Only deal with VGA class devices */ + if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) + return false; + + /* Allocate structure */ + vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL); + if (vgadev == NULL) { + pr_err("vgaarb: failed to allocate pci device\n"); + /* What to do on allocation failure ? For now, let's + * just do nothing, I'm not sure there is anything saner + * to be done + */ + return false; + } + + memset(vgadev, 0, sizeof(*vgadev)); + + /* Take lock & check for duplicates */ + spin_lock_irqsave(&vga_lock, flags); + if (vgadev_find(pdev) != NULL) { + BUG_ON(1); + goto fail; + } + vgadev->pdev = pdev; + + /* By default, assume we decode everything */ + vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + + /* by default mark it as decoding */ + vga_decode_count++; + /* Mark that we "own" resources based on our enables, we will + * clear that below if the bridge isn't forwarding + */ + pci_read_config_word(pdev, PCI_COMMAND, &cmd); + if (cmd & PCI_COMMAND_IO) + vgadev->owns |= VGA_RSRC_LEGACY_IO; + if (cmd & PCI_COMMAND_MEMORY) + vgadev->owns |= VGA_RSRC_LEGACY_MEM; + + /* Check if VGA cycles can get down to us */ + bus = pdev->bus; + while (bus) { + bridge = bus->self; + if (bridge) { + u16 l; + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, + &l); + if (!(l & PCI_BRIDGE_CTL_VGA)) { + vgadev->owns = 0; + break; + } + } + bus = bus->parent; + } + + /* Deal with VGA default device. Use first enabled one + * by default if arch doesn't have it's own hook + */ +#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE + if (vga_default == NULL && + ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK)) + vga_default = pci_dev_get(pdev); +#endif + + /* Add to the list */ + list_add(&vgadev->list, &vga_list); + vga_count++; + pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n", + pci_name(pdev), + vga_iostate_to_str(vgadev->decodes), + vga_iostate_to_str(vgadev->owns), + vga_iostate_to_str(vgadev->locks)); + + spin_unlock_irqrestore(&vga_lock, flags); + return true; +fail: + spin_unlock_irqrestore(&vga_lock, flags); + kfree(vgadev); + return false; +} + +static bool vga_arbiter_del_pci_device(struct pci_dev *pdev) +{ + struct vga_device *vgadev; + unsigned long flags; + bool ret = true; + + spin_lock_irqsave(&vga_lock, flags); + vgadev = vgadev_find(pdev); + if (vgadev == NULL) { + ret = false; + goto bail; + } + + if (vga_default == pdev) { + pci_dev_put(vga_default); + vga_default = NULL; + } + + if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) + vga_decode_count--; + + /* Remove entry from list */ + list_del(&vgadev->list); + vga_count--; + /* Notify userland driver that the device is gone so it discards + * it's copies of the pci_dev pointer + */ + vga_arb_device_card_gone(pdev); + + /* Wake up all possible waiters */ + wake_up_all(&vga_wait_queue); +bail: + spin_unlock_irqrestore(&vga_lock, flags); + kfree(vgadev); + return ret; +} + +/* this is called with the lock */ +static inline void vga_update_device_decodes(struct vga_device *vgadev, + int new_decodes) +{ + int old_decodes; + struct vga_device *new_vgadev, *conflict; + + old_decodes = vgadev->decodes; + vgadev->decodes = new_decodes; + + pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", + pci_name(vgadev->pdev), + vga_iostate_to_str(old_decodes), + vga_iostate_to_str(vgadev->decodes), + vga_iostate_to_str(vgadev->owns)); + + + /* if we own the decodes we should move them along to + another card */ + if ((vgadev->owns & old_decodes) && (vga_count > 1)) { + /* set us to own nothing */ + vgadev->owns &= ~old_decodes; + list_for_each_entry(new_vgadev, &vga_list, list) { + if ((new_vgadev != vgadev) && + (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) { + pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev)); + conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK); + if (!conflict) + __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK); + break; + } + } + } + + /* change decodes counter */ + if (old_decodes != new_decodes) { + if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) + vga_decode_count++; + else + vga_decode_count--; + } +} + +void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace) +{ + struct vga_device *vgadev; + unsigned long flags; + + decodes &= VGA_RSRC_LEGACY_MASK; + + spin_lock_irqsave(&vga_lock, flags); + vgadev = vgadev_find(pdev); + if (vgadev == NULL) + goto bail; + + /* don't let userspace futz with kernel driver decodes */ + if (userspace && vgadev->set_vga_decode) + goto bail; + + /* update the device decodes + counter */ + vga_update_device_decodes(vgadev, decodes); + + /* XXX if somebody is going from "doesn't decode" to "decodes" state + * here, additional care must be taken as we may have pending owner + * ship of non-legacy region ... + */ +bail: + spin_unlock_irqrestore(&vga_lock, flags); +} + +void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes) +{ + __vga_set_legacy_decoding(pdev, decodes, false); +} +EXPORT_SYMBOL(vga_set_legacy_decoding); + +/* call with NULL to unregister */ +int vga_client_register(struct pci_dev *pdev, void *cookie, + void (*irq_set_state)(void *cookie, bool state), + unsigned int (*set_vga_decode)(void *cookie, bool decode)) +{ + int ret = -1; + struct vga_device *vgadev; + unsigned long flags; + + spin_lock_irqsave(&vga_lock, flags); + vgadev = vgadev_find(pdev); + if (!vgadev) + goto bail; + + vgadev->irq_set_state = irq_set_state; + vgadev->set_vga_decode = set_vga_decode; + vgadev->cookie = cookie; + ret = 0; + +bail: + spin_unlock_irqrestore(&vga_lock, flags); + return ret; + +} +EXPORT_SYMBOL(vga_client_register); + +/* + * Char driver implementation + * + * Semantics is: + * + * open : open user instance of the arbitrer. by default, it's + * attached to the default VGA device of the system. + * + * close : close user instance, release locks + * + * read : return a string indicating the status of the target. + * an IO state string is of the form {io,mem,io+mem,none}, + * mc and ic are respectively mem and io lock counts (for + * debugging/diagnostic only). "decodes" indicate what the + * card currently decodes, "owns" indicates what is currently + * enabled on it, and "locks" indicates what is locked by this + * card. If the card is unplugged, we get "invalid" then for + * card_ID and an -ENODEV error is returned for any command + * until a new card is targeted + * + * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)" + * + * write : write a command to the arbiter. List of commands is: + * + * target <card_ID> : switch target to card <card_ID> (see below) + * lock <io_state> : acquires locks on target ("none" is invalid io_state) + * trylock <io_state> : non-blocking acquire locks on target + * unlock <io_state> : release locks on target + * unlock all : release all locks on target held by this user + * decodes <io_state> : set the legacy decoding attributes for the card + * + * poll : event if something change on any card (not just the target) + * + * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default" + * to go back to the system default card (TODO: not implemented yet). + * Currently, only PCI is supported as a prefix, but the userland API may + * support other bus types in the future, even if the current kernel + * implementation doesn't. + * + * Note about locks: + * + * The driver keeps track of which user has what locks on which card. It + * supports stacking, like the kernel one. This complexifies the implementation + * a bit, but makes the arbiter more tolerant to userspace problems and able + * to properly cleanup in all cases when a process dies. + * Currently, a max of 16 cards simultaneously can have locks issued from + * userspace for a given user (file descriptor instance) of the arbiter. + * + * If the device is hot-unplugged, there is a hook inside the module to notify + * they being added/removed in the system and automatically added/removed in + * the arbiter. + */ + +#define MAX_USER_CARDS 16 +#define PCI_INVALID_CARD ((struct pci_dev *)-1UL) + +/* + * Each user has an array of these, tracking which cards have locks + */ +struct vga_arb_user_card { + struct pci_dev *pdev; + unsigned int mem_cnt; + unsigned int io_cnt; +}; + +struct vga_arb_private { + struct list_head list; + struct pci_dev *target; + struct vga_arb_user_card cards[MAX_USER_CARDS]; + spinlock_t lock; +}; + +static LIST_HEAD(vga_user_list); +static DEFINE_SPINLOCK(vga_user_lock); + + +/* + * This function gets a string in the format: "PCI:domain:bus:dev.fn" and + * returns the respective values. If the string is not in this format, + * it returns 0. + */ +static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain, + unsigned int *bus, unsigned int *devfn) +{ + int n; + unsigned int slot, func; + + + n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func); + if (n != 4) + return 0; + + *devfn = PCI_DEVFN(slot, func); + + return 1; +} + +static ssize_t vga_arb_read(struct file *file, char __user * buf, + size_t count, loff_t *ppos) +{ + struct vga_arb_private *priv = file->private_data; + struct vga_device *vgadev; + struct pci_dev *pdev; + unsigned long flags; + size_t len; + int rc; + char *lbuf; + + lbuf = kmalloc(1024, GFP_KERNEL); + if (lbuf == NULL) + return -ENOMEM; + + /* Shields against vga_arb_device_card_gone (pci_dev going + * away), and allows access to vga list + */ + spin_lock_irqsave(&vga_lock, flags); + + /* If we are targetting the default, use it */ + pdev = priv->target; + if (pdev == NULL || pdev == PCI_INVALID_CARD) { + spin_unlock_irqrestore(&vga_lock, flags); + len = sprintf(lbuf, "invalid"); + goto done; + } + + /* Find card vgadev structure */ + vgadev = vgadev_find(pdev); + if (vgadev == NULL) { + /* Wow, it's not in the list, that shouldn't happen, + * let's fix us up and return invalid card + */ + if (pdev == priv->target) + vga_arb_device_card_gone(pdev); + spin_unlock_irqrestore(&vga_lock, flags); + len = sprintf(lbuf, "invalid"); + goto done; + } + + /* Fill the buffer with infos */ + len = snprintf(lbuf, 1024, + "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n", + vga_decode_count, pci_name(pdev), + vga_iostate_to_str(vgadev->decodes), + vga_iostate_to_str(vgadev->owns), + vga_iostate_to_str(vgadev->locks), + vgadev->io_lock_cnt, vgadev->mem_lock_cnt); + + spin_unlock_irqrestore(&vga_lock, flags); +done: + + /* Copy that to user */ + if (len > count) + len = count; + rc = copy_to_user(buf, lbuf, len); + kfree(lbuf); + if (rc) + return -EFAULT; + return len; +} + +/* + * TODO: To avoid parsing inside kernel and to improve the speed we may + * consider use ioctl here + */ +static ssize_t vga_arb_write(struct file *file, const char __user * buf, + size_t count, loff_t *ppos) +{ + struct vga_arb_private *priv = file->private_data; + struct vga_arb_user_card *uc = NULL; + struct pci_dev *pdev; + + unsigned int io_state; + + char *kbuf, *curr_pos; + size_t remaining = count; + + int ret_val; + int i; + + + kbuf = kmalloc(count + 1, GFP_KERNEL); + if (!kbuf) + return -ENOMEM; + + if (copy_from_user(kbuf, buf, count)) { + kfree(kbuf); + return -EFAULT; + } + curr_pos = kbuf; + kbuf[count] = '\0'; /* Just to make sure... */ + + if (strncmp(curr_pos, "lock ", 5) == 0) { + curr_pos += 5; + remaining -= 5; + + pr_devel("client 0x%p called 'lock'\n", priv); + + if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { + ret_val = -EPROTO; + goto done; + } + if (io_state == VGA_RSRC_NONE) { + ret_val = -EPROTO; + goto done; + } + + pdev = priv->target; + if (priv->target == NULL) { + ret_val = -ENODEV; + goto done; + } + + vga_get_uninterruptible(pdev, io_state); + + /* Update the client's locks lists... */ + for (i = 0; i < MAX_USER_CARDS; i++) { + if (priv->cards[i].pdev == pdev) { + if (io_state & VGA_RSRC_LEGACY_IO) + priv->cards[i].io_cnt++; + if (io_state & VGA_RSRC_LEGACY_MEM) + priv->cards[i].mem_cnt++; + break; + } + } + + ret_val = count; + goto done; + } else if (strncmp(curr_pos, "unlock ", 7) == 0) { + curr_pos += 7; + remaining -= 7; + + pr_devel("client 0x%p called 'unlock'\n", priv); + + if (strncmp(curr_pos, "all", 3) == 0) + io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM; + else { + if (!vga_str_to_iostate + (curr_pos, remaining, &io_state)) { + ret_val = -EPROTO; + goto done; + } + /* TODO: Add this? + if (io_state == VGA_RSRC_NONE) { + ret_val = -EPROTO; + goto done; + } + */ + } + + pdev = priv->target; + if (priv->target == NULL) { + ret_val = -ENODEV; + goto done; + } + for (i = 0; i < MAX_USER_CARDS; i++) { + if (priv->cards[i].pdev == pdev) + uc = &priv->cards[i]; + } + + if (!uc) + return -EINVAL; + + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) + return -EINVAL; + + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) + return -EINVAL; + + vga_put(pdev, io_state); + + if (io_state & VGA_RSRC_LEGACY_IO) + uc->io_cnt--; + if (io_state & VGA_RSRC_LEGACY_MEM) + uc->mem_cnt--; + + ret_val = count; + goto done; + } else if (strncmp(curr_pos, "trylock ", 8) == 0) { + curr_pos += 8; + remaining -= 8; + + pr_devel("client 0x%p called 'trylock'\n", priv); + + if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { + ret_val = -EPROTO; + goto done; + } + /* TODO: Add this? + if (io_state == VGA_RSRC_NONE) { + ret_val = -EPROTO; + goto done; + } + */ + + pdev = priv->target; + if (priv->target == NULL) { + ret_val = -ENODEV; + goto done; + } + + if (vga_tryget(pdev, io_state)) { + /* Update the client's locks lists... */ + for (i = 0; i < MAX_USER_CARDS; i++) { + if (priv->cards[i].pdev == pdev) { + if (io_state & VGA_RSRC_LEGACY_IO) + priv->cards[i].io_cnt++; + if (io_state & VGA_RSRC_LEGACY_MEM) + priv->cards[i].mem_cnt++; + break; + } + } + ret_val = count; + goto done; + } else { + ret_val = -EBUSY; + goto done; + } + + } else if (strncmp(curr_pos, "target ", 7) == 0) { + unsigned int domain, bus, devfn; + struct vga_device *vgadev; + + curr_pos += 7; + remaining -= 7; + pr_devel("client 0x%p called 'target'\n", priv); + /* if target is default */ + if (!strncmp(buf, "default", 7)) + pdev = pci_dev_get(vga_default_device()); + else { + if (!vga_pci_str_to_vars(curr_pos, remaining, + &domain, &bus, &devfn)) { + ret_val = -EPROTO; + goto done; + } + + pdev = pci_get_bus_and_slot(bus, devfn); + if (!pdev) { + pr_info("vgaarb: invalid PCI address!\n"); + ret_val = -ENODEV; + goto done; + } + } + + vgadev = vgadev_find(pdev); + if (vgadev == NULL) { + pr_info("vgaarb: this pci device is not a vga device\n"); + pci_dev_put(pdev); + ret_val = -ENODEV; + goto done; + } + + priv->target = pdev; + for (i = 0; i < MAX_USER_CARDS; i++) { + if (priv->cards[i].pdev == pdev) + break; + if (priv->cards[i].pdev == NULL) { + priv->cards[i].pdev = pdev; + priv->cards[i].io_cnt = 0; + priv->cards[i].mem_cnt = 0; + break; + } + } + if (i == MAX_USER_CARDS) { + pr_err("vgaarb: maximum user cards number reached!\n"); + pci_dev_put(pdev); + /* XXX: which value to return? */ + ret_val = -ENOMEM; + goto done; + } + + ret_val = count; + pci_dev_put(pdev); + goto done; + + + } else if (strncmp(curr_pos, "decodes ", 8) == 0) { + curr_pos += 8; + remaining -= 8; + pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv); + + if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) { + ret_val = -EPROTO; + goto done; + } + pdev = priv->target; + if (priv->target == NULL) { + ret_val = -ENODEV; + goto done; + } + + __vga_set_legacy_decoding(pdev, io_state, true); + ret_val = count; + goto done; + } + /* If we got here, the message written is not part of the protocol! */ + kfree(kbuf); + return -EPROTO; + +done: + kfree(kbuf); + return ret_val; +} + +static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait) +{ + struct vga_arb_private *priv = file->private_data; + + pr_devel("%s\n", __func__); + + if (priv == NULL) + return -ENODEV; + poll_wait(file, &vga_wait_queue, wait); + return POLLIN; +} + +static int vga_arb_open(struct inode *inode, struct file *file) +{ + struct vga_arb_private *priv; + unsigned long flags; + + pr_devel("%s\n", __func__); + + priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL); + if (priv == NULL) + return -ENOMEM; + memset(priv, 0, sizeof(*priv)); + spin_lock_init(&priv->lock); + file->private_data = priv; + + spin_lock_irqsave(&vga_user_lock, flags); + list_add(&priv->list, &vga_user_list); + spin_unlock_irqrestore(&vga_user_lock, flags); + + /* Set the client' lists of locks */ + priv->target = vga_default_device(); /* Maybe this is still null! */ + priv->cards[0].pdev = priv->target; + priv->cards[0].io_cnt = 0; + priv->cards[0].mem_cnt = 0; + + + return 0; +} + +static int vga_arb_release(struct inode *inode, struct file *file) +{ + struct vga_arb_private *priv = file->private_data; + struct vga_arb_user_card *uc; + unsigned long flags; + int i; + + pr_devel("%s\n", __func__); + + if (priv == NULL) + return -ENODEV; + + spin_lock_irqsave(&vga_user_lock, flags); + list_del(&priv->list); + for (i = 0; i < MAX_USER_CARDS; i++) { + uc = &priv->cards[i]; + if (uc->pdev == NULL) + continue; + pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n", + uc->io_cnt, uc->mem_cnt); + while (uc->io_cnt--) + vga_put(uc->pdev, VGA_RSRC_LEGACY_IO); + while (uc->mem_cnt--) + vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM); + } + spin_unlock_irqrestore(&vga_user_lock, flags); + + kfree(priv); + + return 0; +} + +static void vga_arb_device_card_gone(struct pci_dev *pdev) +{ +} + +/* + * callback any registered clients to let them know we have a + * change in VGA cards + */ +static void vga_arbiter_notify_clients(void) +{ + struct vga_device *vgadev; + unsigned long flags; + uint32_t new_decodes; + bool new_state; + + if (!vga_arbiter_used) + return; + + spin_lock_irqsave(&vga_lock, flags); + list_for_each_entry(vgadev, &vga_list, list) { + if (vga_count > 1) + new_state = false; + else + new_state = true; + if (vgadev->set_vga_decode) { + new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state); + vga_update_device_decodes(vgadev, new_decodes); + } + } + spin_unlock_irqrestore(&vga_lock, flags); +} + +static int pci_notify(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + bool notify = false; + + pr_devel("%s\n", __func__); + + /* For now we're only intereted in devices added and removed. I didn't + * test this thing here, so someone needs to double check for the + * cases of hotplugable vga cards. */ + if (action == BUS_NOTIFY_ADD_DEVICE) + notify = vga_arbiter_add_pci_device(pdev); + else if (action == BUS_NOTIFY_DEL_DEVICE) + notify = vga_arbiter_del_pci_device(pdev); + + if (notify) + vga_arbiter_notify_clients(); + return 0; +} + +static struct notifier_block pci_notifier = { + .notifier_call = pci_notify, +}; + +static const struct file_operations vga_arb_device_fops = { + .read = vga_arb_read, + .write = vga_arb_write, + .poll = vga_arb_fpoll, + .open = vga_arb_open, + .release = vga_arb_release, +}; + +static struct miscdevice vga_arb_device = { + MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops +}; + +static int __init vga_arb_device_init(void) +{ + int rc; + struct pci_dev *pdev; + + rc = misc_register(&vga_arb_device); + if (rc < 0) + pr_err("vgaarb: error %d registering device\n", rc); + + bus_register_notifier(&pci_bus_type, &pci_notifier); + + /* We add all pci devices satisfying vga class in the arbiter by + * default */ + pdev = NULL; + while ((pdev = + pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_ANY_ID, pdev)) != NULL) + vga_arbiter_add_pci_device(pdev); + + pr_info("vgaarb: loaded\n"); + return rc; +} +subsys_initcall(vga_arb_device_init); |