diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 298 |
1 files changed, 189 insertions, 109 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 1fc327d3421e..81adf89b92f1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -56,7 +56,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20150522" +#define DRIVER_DATE "20150731" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -182,6 +182,7 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_C_4_LANES, POWER_DOMAIN_PORT_DDI_D_2_LANES, POWER_DOMAIN_PORT_DDI_D_4_LANES, + POWER_DOMAIN_PORT_DDI_E_2_LANES, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -206,17 +207,51 @@ enum intel_display_power_domain { enum hpd_pin { HPD_NONE = 0, - HPD_PORT_A = HPD_NONE, /* PORT_A is internal */ HPD_TV = HPD_NONE, /* TV is known to be unreliable */ HPD_CRT, HPD_SDVO_B, HPD_SDVO_C, + HPD_PORT_A, HPD_PORT_B, HPD_PORT_C, HPD_PORT_D, + HPD_PORT_E, HPD_NUM_PINS }; +#define for_each_hpd_pin(__pin) \ + for ((__pin) = (HPD_NONE + 1); (__pin) < HPD_NUM_PINS; (__pin)++) + +struct i915_hotplug { + struct work_struct hotplug_work; + + struct { + unsigned long last_jiffies; + int count; + enum { + HPD_ENABLED = 0, + HPD_DISABLED = 1, + HPD_MARK_DISABLED = 2 + } state; + } stats[HPD_NUM_PINS]; + u32 event_bits; + struct delayed_work reenable_work; + + struct intel_digital_port *irq_port[I915_MAX_PORTS]; + u32 long_port_mask; + u32 short_port_mask; + struct work_struct dig_port_work; + + /* + * if we get a HPD irq from DP and a HPD irq from non-DP + * the non-DP HPD could block the workqueue on a mode config + * mutex getting, that userspace may have taken. However + * userspace is waiting on the DP workqueue to run which is + * blocked behind the non-DP one. + */ + struct workqueue_struct *dp_wq; +}; + #define I915_GEM_GPU_DOMAINS \ (I915_GEM_DOMAIN_RENDER | \ I915_GEM_DOMAIN_SAMPLER | \ @@ -243,6 +278,12 @@ enum hpd_pin { &dev->mode_config.plane_list, \ base.head) +#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ + list_for_each_entry(intel_plane, \ + &(dev)->mode_config.plane_list, \ + base.head) \ + if ((intel_plane)->pipe == (intel_crtc)->pipe) + #define for_each_intel_crtc(dev, intel_crtc) \ list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) @@ -333,7 +374,8 @@ struct intel_dpll_hw_state { uint32_t cfgcr1, cfgcr2; /* bxt */ - uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12; + uint32_t ebb0, ebb4, pll0, pll1, pll2, pll3, pll6, pll8, pll9, pll10, + pcsdw12; }; struct intel_shared_dpll_config { @@ -343,7 +385,6 @@ struct intel_shared_dpll_config { struct intel_shared_dpll { struct intel_shared_dpll_config config; - struct intel_shared_dpll_config *new_config; int active; /* count of number of active CRTCs (i.e. DPMS on) */ bool on; /* is the PLL actually active? Disabled during modeset */ @@ -445,6 +486,7 @@ struct drm_i915_error_state { struct timeval time; char error_msg[128]; + int iommu; u32 reset_count; u32 suspend_count; @@ -559,9 +601,6 @@ struct intel_limit; struct dpll; struct drm_i915_display_funcs { - bool (*fbc_enabled)(struct drm_device *dev); - void (*enable_fbc)(struct drm_crtc *crtc); - void (*disable_fbc)(struct drm_device *dev); int (*get_display_clock_speed)(struct drm_device *dev); int (*get_fifo_size)(struct drm_device *dev, int plane); /** @@ -587,7 +626,8 @@ struct drm_i915_display_funcs { struct drm_crtc *crtc, uint32_t sprite_width, uint32_t sprite_height, int pixel_size, bool enable, bool scaled); - void (*modeset_global_resources)(struct drm_atomic_state *state); + int (*modeset_calc_cdclk)(struct drm_atomic_state *state); + void (*modeset_commit_cdclk)(struct drm_atomic_state *state); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, @@ -598,7 +638,6 @@ struct drm_i915_display_funcs { struct intel_crtc_state *crtc_state); void (*crtc_enable)(struct drm_crtc *crtc); void (*crtc_disable)(struct drm_crtc *crtc); - void (*off)(struct drm_crtc *crtc); void (*audio_codec_enable)(struct drm_connector *connector, struct intel_encoder *encoder, struct drm_display_mode *mode); @@ -608,7 +647,7 @@ struct drm_i915_display_funcs { int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring, + struct drm_i915_gem_request *req, uint32_t flags); void (*update_primary_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, @@ -706,7 +745,7 @@ enum csr_state { struct intel_csr { const char *fw_path; - __be32 *dmc_payload; + uint32_t *dmc_payload; uint32_t dmc_fw_size; uint32_t mmio_count; uint32_t mmioaddr[8]; @@ -805,11 +844,15 @@ struct i915_ctx_hang_stats { /* This must match up with the value previously used for execbuf2.rsvd1. */ #define DEFAULT_CONTEXT_HANDLE 0 + +#define CONTEXT_NO_ZEROMAP (1<<0) /** * struct intel_context - as the name implies, represents a context. * @ref: reference count. * @user_handle: userspace tracking identity for this context. * @remap_slice: l3 row remapping information. + * @flags: context specific flags: + * CONTEXT_NO_ZEROMAP: do not allow mapping things to page 0. * @file_priv: filp associated with this context (NULL for global default * context). * @hang_stats: information about the role of this context in possible GPU @@ -827,6 +870,7 @@ struct intel_context { int user_handle; uint8_t remap_slice; struct drm_i915_private *i915; + int flags; struct drm_i915_file_private *file_priv; struct i915_ctx_hang_stats hang_stats; struct i915_hw_ppgtt *ppgtt; @@ -853,9 +897,13 @@ enum fb_op_origin { ORIGIN_CPU, ORIGIN_CS, ORIGIN_FLIP, + ORIGIN_DIRTYFB, }; struct i915_fbc { + /* This is always the inner lock when overlapping with struct_mutex and + * it's the outer lock when overlapping with stolen_lock. */ + struct mutex lock; unsigned long uncompressed_size; unsigned threshold; unsigned int fb_id; @@ -875,7 +923,7 @@ struct i915_fbc { struct intel_fbc_work { struct delayed_work work; - struct drm_crtc *crtc; + struct intel_crtc *crtc; struct drm_framebuffer *fb; } *fbc_work; @@ -891,7 +939,13 @@ struct i915_fbc { FBC_MULTIPLE_PIPES, /* more than one pipe active */ FBC_MODULE_PARAM, FBC_CHIP_DEFAULT, /* disabled by default on this chip */ + FBC_ROTATION, /* rotation is not supported */ + FBC_IN_DBG_MASTER, /* kernel debugger is active */ } no_fbc_reason; + + bool (*fbc_enabled)(struct drm_i915_private *dev_priv); + void (*enable_fbc)(struct intel_crtc *crtc); + void (*disable_fbc)(struct drm_i915_private *dev_priv); }; /** @@ -1201,6 +1255,10 @@ struct intel_l3_parity { struct i915_gem_mm { /** Memory allocator for GTT stolen memory */ struct drm_mm stolen; + /** Protects the usage of the GTT stolen memory allocator. This is + * always the inner lock when overlapping with struct_mutex. */ + struct mutex stolen_lock; + /** List of all objects in gtt_space. Used to restore gtt * mappings on resume */ struct list_head bound_list; @@ -1354,6 +1412,15 @@ enum modeset_restore { MODESET_SUSPENDED, }; +#define DP_AUX_A 0x40 +#define DP_AUX_B 0x10 +#define DP_AUX_C 0x20 +#define DP_AUX_D 0x30 + +#define DDC_PIN_B 0x05 +#define DDC_PIN_C 0x04 +#define DDC_PIN_D 0x06 + struct ddi_vbt_port_info { /* * This is an index in the HDMI/DVI DDI buffer translation table. @@ -1366,6 +1433,12 @@ struct ddi_vbt_port_info { uint8_t supports_dvi:1; uint8_t supports_hdmi:1; uint8_t supports_dp:1; + + uint8_t alternate_aux_channel; + uint8_t alternate_ddc_pin; + + uint8_t dp_boost_level; + uint8_t hdmi_boost_level; }; enum psr_lines_to_wait { @@ -1461,23 +1534,27 @@ struct ilk_wm_values { enum intel_ddb_partitioning partitioning; }; -struct vlv_wm_values { - struct { - uint16_t primary; - uint16_t sprite[2]; - uint8_t cursor; - } pipe[3]; +struct vlv_pipe_wm { + uint16_t primary; + uint16_t sprite[2]; + uint8_t cursor; +}; - struct { - uint16_t plane; - uint8_t cursor; - } sr; +struct vlv_sr_wm { + uint16_t plane; + uint8_t cursor; +}; +struct vlv_wm_values { + struct vlv_pipe_wm pipe[3]; + struct vlv_sr_wm sr; struct { uint8_t cursor; uint8_t sprite[2]; uint8_t primary; } ddl[3]; + uint8_t level; + bool cxsr; }; struct skl_ddb_entry { @@ -1611,6 +1688,18 @@ struct i915_virtual_gpu { bool active; }; +struct i915_execbuffer_params { + struct drm_device *dev; + struct drm_file *file; + uint32_t dispatch_flags; + uint32_t args_batch_start_offset; + uint32_t batch_obj_vm_offset; + struct intel_engine_cs *ring; + struct drm_i915_gem_object *batch_obj; + struct intel_context *ctx; + struct drm_i915_gem_request *request; +}; + struct drm_i915_private { struct drm_device *dev; struct kmem_cache *objects; @@ -1680,19 +1769,7 @@ struct drm_i915_private { u32 pm_rps_events; u32 pipestat_irq_mask[I915_MAX_PIPES]; - struct work_struct hotplug_work; - struct { - unsigned long hpd_last_jiffies; - int hpd_cnt; - enum { - HPD_ENABLED = 0, - HPD_DISABLED = 1, - HPD_MARK_DISABLED = 2 - } hpd_mark; - } hpd_stats[HPD_NUM_PINS]; - u32 hpd_event_bits; - struct delayed_work hotplug_reenable_work; - + struct i915_hotplug hotplug; struct i915_fbc fbc; struct i915_drrs drrs; struct intel_opregion opregion; @@ -1718,7 +1795,7 @@ struct drm_i915_private { unsigned int fsb_freq, mem_freq, is_ddr3; unsigned int skl_boot_cdclk; - unsigned int cdclk_freq; + unsigned int cdclk_freq, max_cdclk_freq; unsigned int hpll_freq; /** @@ -1769,9 +1846,6 @@ struct drm_i915_private { /* Reclocking support */ bool render_reclock_avail; - bool lvds_downclock_avail; - /* indicates the reduced downclock for LVDS*/ - int lvds_downclock; struct i915_frontbuffer_tracking fb_tracking; @@ -1799,7 +1873,7 @@ struct drm_i915_private { struct drm_i915_gem_object *vlv_pctx; -#ifdef CONFIG_DRM_I915_FBDEV +#ifdef CONFIG_DRM_FBDEV_EMULATION /* list of fbdev register on this device */ struct intel_fbdev *fbdev; struct work_struct fbdev_suspend_work; @@ -1859,29 +1933,11 @@ struct drm_i915_private { struct i915_runtime_pm pm; - struct intel_digital_port *hpd_irq_port[I915_MAX_PORTS]; - u32 long_hpd_port_mask; - u32 short_hpd_port_mask; - struct work_struct dig_port_work; - - /* - * if we get a HPD irq from DP and a HPD irq from non-DP - * the non-DP HPD could block the workqueue on a mode config - * mutex getting, that userspace may have taken. However - * userspace is waiting on the DP workqueue to run which is - * blocked behind the non-DP one. - */ - struct workqueue_struct *dp_wq; - /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct { - int (*execbuf_submit)(struct drm_device *dev, struct drm_file *file, - struct intel_engine_cs *ring, - struct intel_context *ctx, + int (*execbuf_submit)(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas, - struct drm_i915_gem_object *batch_obj, - u64 exec_start, u32 flags); + struct list_head *vmas); int (*init_rings)(struct drm_device *dev); void (*cleanup_ring)(struct intel_engine_cs *ring); void (*stop_ring)(struct intel_engine_cs *ring); @@ -2149,7 +2205,8 @@ struct drm_i915_gem_request { struct intel_context *ctx; struct intel_ringbuffer *ringbuf; - /** Batch buffer related to this request if any */ + /** Batch buffer related to this request if any (used for + error state dump only) */ struct drm_i915_gem_object *batch_obj; /** Time at which this request was emitted, in jiffies. */ @@ -2187,8 +2244,12 @@ struct drm_i915_gem_request { }; int i915_gem_request_alloc(struct intel_engine_cs *ring, - struct intel_context *ctx); + struct intel_context *ctx, + struct drm_i915_gem_request **req_out); +void i915_gem_request_cancel(struct drm_i915_gem_request *req); void i915_gem_request_free(struct kref *req_ref); +int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, + struct drm_file *file); static inline uint32_t i915_gem_request_get_seqno(struct drm_i915_gem_request *req) @@ -2392,6 +2453,9 @@ struct drm_i915_cmd_table { ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ (INTEL_DEVID(dev) & 0xf) == 0xb || \ (INTEL_DEVID(dev) & 0xf) == 0xe)) +/* ULX machines are also considered ULT. */ +#define IS_BDW_ULX(dev) (IS_BROADWELL(dev) && \ + (INTEL_DEVID(dev) & 0xf) == 0xe) #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) #define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ @@ -2401,6 +2465,14 @@ struct drm_i915_cmd_table { /* ULX machines are also considered ULT. */ #define IS_HSW_ULX(dev) (INTEL_DEVID(dev) == 0x0A0E || \ INTEL_DEVID(dev) == 0x0A1E) +#define IS_SKL_ULT(dev) (INTEL_DEVID(dev) == 0x1906 || \ + INTEL_DEVID(dev) == 0x1913 || \ + INTEL_DEVID(dev) == 0x1916 || \ + INTEL_DEVID(dev) == 0x1921 || \ + INTEL_DEVID(dev) == 0x1926) +#define IS_SKL_ULX(dev) (INTEL_DEVID(dev) == 0x190E || \ + INTEL_DEVID(dev) == 0x1915 || \ + INTEL_DEVID(dev) == 0x191E) #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary) #define SKL_REVID_A0 (0x0) @@ -2467,9 +2539,6 @@ struct drm_i915_cmd_table { */ #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \ IS_I915GM(dev))) -#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) -#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev)) #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv) #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) @@ -2495,6 +2564,12 @@ struct drm_i915_cmd_table { #define HAS_CSR(dev) (IS_SKYLAKE(dev)) +#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ + INTEL_INFO(dev)->gen >= 8) + +#define HAS_CORE_RING_FREQ(dev) (INTEL_INFO(dev)->gen >= 6 && \ + !IS_VALLEYVIEW(dev) && !IS_BROXTON(dev)) + #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 @@ -2534,7 +2609,6 @@ struct i915_params { int modeset; int panel_ignore_lid; int semaphores; - unsigned int lvds_downclock; int lvds_channel_mode; int panel_use_ssc; int vbt_sdvo_panel_type; @@ -2556,10 +2630,11 @@ struct i915_params { bool reset; bool disable_display; bool disable_vtd_wa; + bool enable_guc_submission; + int guc_log_level; int use_mmio_flip; int mmio_debug; bool verbose_state_checks; - bool nuclear_pageflip; int edp_vswing; }; extern struct i915_params i915 __read_mostly; @@ -2573,21 +2648,27 @@ extern void i915_driver_preclose(struct drm_device *dev, struct drm_file *file); extern void i915_driver_postclose(struct drm_device *dev, struct drm_file *file); -extern int i915_driver_device_is_agp(struct drm_device * dev); #ifdef CONFIG_COMPAT extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); #endif extern int intel_gpu_reset(struct drm_device *dev); +extern bool intel_has_gpu_reset(struct drm_device *dev); extern int i915_reset(struct drm_device *dev); extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); -void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); void i915_firmware_load_error_print(const char *fw_path, int err); +/* intel_hotplug.c */ +void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); +void intel_hpd_init(struct drm_i915_private *dev_priv); +void intel_hpd_init_work(struct drm_i915_private *dev_priv); +void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); +bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); + /* i915_irq.c */ void i915_queue_hangcheck(struct drm_device *dev); __printf(3, 4) @@ -2595,7 +2676,6 @@ void i915_handle_error(struct drm_device *dev, bool wedged, const char *fmt, ...); extern void intel_irq_init(struct drm_i915_private *dev_priv); -extern void intel_hpd_init(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); void intel_irq_uninstall(struct drm_i915_private *dev_priv); @@ -2662,19 +2742,11 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void i915_gem_execbuffer_move_to_active(struct list_head *vmas, - struct intel_engine_cs *ring); -void i915_gem_execbuffer_retire_commands(struct drm_device *dev, - struct drm_file *file, - struct intel_engine_cs *ring, - struct drm_i915_gem_object *obj); -int i915_gem_ringbuffer_submission(struct drm_device *dev, - struct drm_file *file, - struct intel_engine_cs *ring, - struct intel_context *ctx, + struct drm_i915_gem_request *req); +void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params); +int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params, struct drm_i915_gem_execbuffer2 *args, - struct list_head *vmas, - struct drm_i915_gem_object *batch_obj, - u64 exec_start, u32 flags); + struct list_head *vmas); int i915_gem_execbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv); int i915_gem_execbuffer2(struct drm_device *dev, void *data, @@ -2707,6 +2779,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, const struct drm_i915_gem_object_ops *ops); struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, size_t size); +struct drm_i915_gem_object *i915_gem_object_create_from_data( + struct drm_device *dev, const void *data, size_t size); void i915_init_vm(struct drm_i915_private *dev_priv, struct i915_address_space *vm); void i915_gem_free_object(struct drm_gem_object *obj); @@ -2781,9 +2855,10 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int i915_gem_object_sync(struct drm_i915_gem_object *obj, - struct intel_engine_cs *to); + struct intel_engine_cs *to, + struct drm_i915_gem_request **to_req); void i915_vma_move_to_active(struct i915_vma *vma, - struct intel_engine_cs *ring); + struct drm_i915_gem_request *req); int i915_gem_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -2812,11 +2887,6 @@ static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); -int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); -int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); - -bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); -void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); struct drm_i915_gem_request * i915_gem_find_active_request(struct intel_engine_cs *ring); @@ -2825,7 +2895,6 @@ bool i915_gem_retire_requests(struct drm_device *dev); void i915_gem_retire_requests_ring(struct intel_engine_cs *ring); int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, bool interruptible); -int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req); static inline bool i915_reset_in_progress(struct i915_gpu_error *error) { @@ -2860,16 +2929,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); int __must_check i915_gem_init(struct drm_device *dev); int i915_gem_init_rings(struct drm_device *dev); int __must_check i915_gem_init_hw(struct drm_device *dev); -int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice); +int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice); void i915_gem_init_swizzling(struct drm_device *dev); void i915_gem_cleanup_ringbuffer(struct drm_device *dev); int __must_check i915_gpu_idle(struct drm_device *dev); int __must_check i915_gem_suspend(struct drm_device *dev); -int __i915_add_request(struct intel_engine_cs *ring, - struct drm_file *file, - struct drm_i915_gem_object *batch_obj); -#define i915_add_request(ring) \ - __i915_add_request(ring, NULL, NULL) +void __i915_add_request(struct drm_i915_gem_request *req, + struct drm_i915_gem_object *batch_obj, + bool flush_caches); +#define i915_add_request(req) \ + __i915_add_request(req, NULL, true) +#define i915_add_request_no_flush(req) \ + __i915_add_request(req, NULL, false) int __i915_wait_request(struct drm_i915_gem_request *req, unsigned reset_counter, bool interruptible, @@ -2889,6 +2960,7 @@ int __must_check i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, struct intel_engine_cs *pipelined, + struct drm_i915_gem_request **pipelined_request, const struct i915_ggtt_view *view); void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view); @@ -2912,8 +2984,6 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *i915_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gem_obj, int flags); -void i915_gem_restore_fences(struct drm_device *dev); - unsigned long i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, const struct i915_ggtt_view *view); @@ -3008,15 +3078,27 @@ i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj) i915_gem_object_ggtt_unpin_view(obj, &i915_ggtt_view_normal); } +/* i915_gem_fence.c */ +int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); +int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); + +bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); +void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); + +void i915_gem_restore_fences(struct drm_device *dev); + +void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); +void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); +void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); + /* i915_gem_context.c */ int __must_check i915_gem_context_init(struct drm_device *dev); void i915_gem_context_fini(struct drm_device *dev); void i915_gem_context_reset(struct drm_device *dev); int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); -int i915_gem_context_enable(struct drm_i915_private *dev_priv); +int i915_gem_context_enable(struct drm_i915_gem_request *req); void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); -int i915_switch_context(struct intel_engine_cs *ring, - struct intel_context *to); +int i915_switch_context(struct drm_i915_gem_request *req); struct intel_context * i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id); void i915_gem_context_free(struct kref *ctx_ref); @@ -3066,9 +3148,12 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev) } /* i915_gem_stolen.c */ +int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv, + struct drm_mm_node *node, u64 size, + unsigned alignment); +void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, + struct drm_mm_node *node); int i915_gem_init_stolen(struct drm_device *dev); -int i915_gem_stolen_setup_compression(struct drm_device *dev, int size, int fb_cpp); -void i915_gem_stolen_cleanup_compression(struct drm_device *dev); void i915_gem_cleanup_stolen(struct drm_device *dev); struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_device *dev, u32 size); @@ -3098,10 +3183,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec obj->tiling_mode != I915_TILING_NONE; } -void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); -void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); -void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); - /* i915_gem_debug.c */ #if WATCH_LISTS int i915_verify_lists(struct drm_device *dev); @@ -3223,8 +3304,7 @@ extern void intel_modeset_gem_init(struct drm_device *dev); extern void intel_modeset_cleanup(struct drm_device *dev); extern void intel_connector_unregister(struct intel_connector *); extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); -extern void intel_modeset_setup_hw_state(struct drm_device *dev, - bool force_restore); +extern void intel_display_resume(struct drm_device *dev); extern void i915_redisable_vga(struct drm_device *dev); extern void i915_redisable_vga_power_on(struct drm_device *dev); extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |