summaryrefslogtreecommitdiff
path: root/include/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/Kbuild1
-rw-r--r--include/drm/drm.h4
-rw-r--r--include/drm/drmP.h224
-rw-r--r--include/drm/drm_crtc.h29
-rw-r--r--include/drm/drm_hashtab.h6
-rw-r--r--include/drm/drm_mm.h7
-rw-r--r--include/drm/drm_mode.h29
-rw-r--r--include/drm/drm_pciids.h41
-rw-r--r--include/drm/drm_usb.h15
-rw-r--r--include/drm/i830_drm.h342
-rw-r--r--include/drm/i915_drm.h14
-rw-r--r--include/drm/intel-gtt.h35
-rw-r--r--include/drm/nouveau_drm.h12
-rw-r--r--include/drm/radeon_drm.h2
-rw-r--r--include/drm/ttm/ttm_bo_api.h50
-rw-r--r--include/drm/ttm/ttm_bo_driver.h152
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h11
17 files changed, 495 insertions, 479 deletions
diff --git a/include/drm/Kbuild b/include/drm/Kbuild
index ffec177f3481..3a60ac889520 100644
--- a/include/drm/Kbuild
+++ b/include/drm/Kbuild
@@ -2,7 +2,6 @@ header-y += drm.h
header-y += drm_mode.h
header-y += drm_sarea.h
header-y += i810_drm.h
-header-y += i830_drm.h
header-y += i915_drm.h
header-y += mga_drm.h
header-y += nouveau_drm.h
diff --git a/include/drm/drm.h b/include/drm/drm.h
index e5f70617dec5..8598cc94e169 100644
--- a/include/drm/drm.h
+++ b/include/drm/drm.h
@@ -701,6 +701,10 @@ struct drm_gem_open {
#define DRM_IOCTL_MODE_PAGE_FLIP DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
#define DRM_IOCTL_MODE_DIRTYFB DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
+#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
+#define DRM_IOCTL_MODE_MAP_DUMB DRM_IOWR(0xB3, struct drm_mode_map_dumb)
+#define DRM_IOCTL_MODE_DESTROY_DUMB DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 274eaaa15c36..52a2fd2f7789 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -145,7 +145,10 @@ extern void drm_ut_debug_printk(unsigned int request_level,
#define DRIVER_IRQ_VBL2 0x800
#define DRIVER_GEM 0x1000
#define DRIVER_MODESET 0x2000
-#define DRIVER_USE_PLATFORM_DEVICE 0x4000
+
+#define DRIVER_BUS_PCI 0x1
+#define DRIVER_BUS_PLATFORM 0x2
+#define DRIVER_BUS_USB 0x3
/***********************************************************************/
/** \name Begin the DRM... */
@@ -683,6 +686,34 @@ struct drm_master {
void *driver_priv; /**< Private structure for driver to use */
};
+/* Size of ringbuffer for vblank timestamps. Just double-buffer
+ * in initial implementation.
+ */
+#define DRM_VBLANKTIME_RBSIZE 2
+
+/* Flags and return codes for get_vblank_timestamp() driver function. */
+#define DRM_CALLED_FROM_VBLIRQ 1
+#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
+#define DRM_VBLANKTIME_INVBL (1 << 1)
+
+/* get_scanout_position() return flags */
+#define DRM_SCANOUTPOS_VALID (1 << 0)
+#define DRM_SCANOUTPOS_INVBL (1 << 1)
+#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
+
+struct drm_bus {
+ int bus_type;
+ int (*get_irq)(struct drm_device *dev);
+ const char *(*get_name)(struct drm_device *dev);
+ int (*set_busid)(struct drm_device *dev, struct drm_master *master);
+ int (*set_unique)(struct drm_device *dev, struct drm_master *master,
+ struct drm_unique *unique);
+ int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p);
+ /* hooks that are for PCI */
+ int (*agp_init)(struct drm_device *dev);
+
+};
+
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -760,6 +791,68 @@ struct drm_driver {
*/
int (*device_is_agp) (struct drm_device *dev);
+ /**
+ * Called by vblank timestamping code.
+ *
+ * Return the current display scanout position from a crtc.
+ *
+ * \param dev DRM device.
+ * \param crtc Id of the crtc to query.
+ * \param *vpos Target location for current vertical scanout position.
+ * \param *hpos Target location for current horizontal scanout position.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * \return Flags, or'ed together as follows:
+ *
+ * DRM_SCANOUTPOS_VALID = Query successfull.
+ * DRM_SCANOUTPOS_INVBL = Inside vblank.
+ * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of
+ * this flag means that returned position may be offset by a constant
+ * but unknown small number of scanlines wrt. real scanout position.
+ *
+ */
+ int (*get_scanout_position) (struct drm_device *dev, int crtc,
+ int *vpos, int *hpos);
+
+ /**
+ * Called by \c drm_get_last_vbltimestamp. Should return a precise
+ * timestamp when the most recent VBLANK interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of VBLANK will start scanning out,
+ * the time immmediately after end of the VBLANK interval. If the
+ * @crtc is currently inside VBLANK, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * \param dev dev DRM device handle.
+ * \param crtc crtc for which timestamp should be returned.
+ * \param *max_error Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most *max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * \param *vblank_time Target location for returned vblank timestamp.
+ * \param flags 0 = Defaults, no special treatment needed.
+ * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank
+ * irq handler. Some drivers need to apply some workarounds
+ * for gpu-specific vblank irq quirks if flag is set.
+ *
+ * \returns
+ * Zero if timestamping isn't supported in current display mode or a
+ * negative number on failure. A positive status code on success,
+ * which describes how the vblank_time timestamp was computed.
+ */
+ int (*get_vblank_timestamp) (struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
/* these have to be filled in */
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
@@ -803,6 +896,17 @@ struct drm_driver {
/* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state);
+ /* dumb alloc support */
+ int (*dumb_create)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+ int (*dumb_map_offset)(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
+ int (*dumb_destroy)(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle);
+
/* Driver private ops for this object */
struct vm_operations_struct *gem_vm_ops;
@@ -818,8 +922,13 @@ struct drm_driver {
struct drm_ioctl_desc *ioctls;
int num_ioctls;
struct file_operations fops;
- struct pci_driver pci_driver;
- struct platform_device *platform_device;
+ union {
+ struct pci_driver *pci;
+ struct platform_device *platform_device;
+ struct usb_driver *usb;
+ } kdriver;
+ struct drm_bus *bus;
+
/* List of devices hanging off this driver */
struct list_head device_list;
};
@@ -983,6 +1092,8 @@ struct drm_device {
wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
+ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */
+ spinlock_t vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */
spinlock_t vbl_lock;
atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */
u32 *last_vblank; /* protected by dev->vbl_lock, used */
@@ -1020,6 +1131,7 @@ struct drm_device {
#endif
struct platform_device *platformdev; /**< Platform device struture */
+ struct usb_device *usbdev;
struct drm_sg_mem *sg; /**< Scatter gather memory */
int num_crtcs; /**< Number of CRTCs on this device */
@@ -1041,12 +1153,14 @@ struct drm_device {
/*@{ */
spinlock_t object_name_lock;
struct idr object_name_idr;
- uint32_t invalidate_domains; /* domains pending invalidation */
- uint32_t flush_domains; /* domains pending flush */
/*@} */
-
+ int switch_power_state;
};
+#define DRM_SWITCH_POWER_ON 0
+#define DRM_SWITCH_POWER_OFF 1
+#define DRM_SWITCH_POWER_CHANGING 2
+
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
{
@@ -1055,28 +1169,9 @@ static __inline__ int drm_core_check_feature(struct drm_device *dev,
static inline int drm_dev_to_irq(struct drm_device *dev)
{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return platform_get_irq(dev->platformdev, 0);
- else
- return dev->pdev->irq;
+ return dev->driver->bus->get_irq(dev);
}
-static inline int drm_get_pci_domain(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
-
-#ifndef __alpha__
- /* For historical reasons, drm_get_pci_domain() is busticated
- * on most archs and has to remain so for userspace interface
- * < 1.4, except on alpha which was right from the beginning
- */
- if (dev->if_version < 0x10004)
- return 0;
-#endif /* __alpha__ */
-
- return pci_domain_nr(dev->pdev->bus);
-}
#if __OS_HAS_AGP
static inline int drm_core_has_AGP(struct drm_device *dev)
@@ -1130,8 +1225,6 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
/*@{*/
/* Driver support (drm_drv.h) */
-extern int drm_init(struct drm_driver *driver);
-extern void drm_exit(struct drm_driver *driver);
extern long drm_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
@@ -1284,11 +1377,22 @@ extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
-extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
+ struct timeval *vblanktime);
+extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern void drm_vblank_off(struct drm_device *dev, int crtc);
extern void drm_vblank_cleanup(struct drm_device *dev);
+extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
+ struct timeval *tvblank, unsigned flags);
+extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
+ int crtc, int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags,
+ struct drm_crtc *refcrtc);
+extern void drm_calc_timestamping_constants(struct drm_crtc *crtc);
+
/* Modesetting support */
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc);
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc);
@@ -1321,7 +1425,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern void drm_agp_chipset_flush(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
@@ -1331,15 +1434,14 @@ extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
struct drm_master *drm_master_create(struct drm_minor *minor);
extern struct drm_master *drm_master_get(struct drm_master *master);
extern void drm_master_put(struct drm_master **master);
-extern int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
-extern int drm_get_platform_dev(struct platform_device *pdev,
- struct drm_driver *driver);
+
extern void drm_put_dev(struct drm_device *dev);
extern int drm_put_minor(struct drm_minor **minor);
extern unsigned int drm_debug;
+extern unsigned int drm_vblank_offdelay;
+extern unsigned int drm_timestamp_precision;
+
extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern struct dentry *drm_debugfs_root;
@@ -1450,6 +1552,7 @@ drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
u32 *handlep);
+int drm_gem_handle_delete(struct drm_file *filp, u32 handle);
static inline void
drm_gem_object_handle_reference(struct drm_gem_object *obj)
@@ -1522,11 +1625,21 @@ static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev,
return NULL;
}
-static __inline__ int drm_device_is_agp(struct drm_device *dev)
+static __inline__ void drm_core_dropmap(struct drm_local_map *map)
{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
+}
+
+#include "drm_mem_util.h"
+
+extern int drm_fill_in_dev(struct drm_device *dev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver);
+int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
+/*@}*/
+/* PCI section */
+static __inline__ int drm_pci_device_is_agp(struct drm_device *dev)
+{
if (dev->driver->device_is_agp != NULL) {
int err = (*dev->driver->device_is_agp) (dev);
@@ -1538,35 +1651,26 @@ static __inline__ int drm_device_is_agp(struct drm_device *dev)
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
}
-static __inline__ int drm_device_is_pcie(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return 0;
- else
- return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
-}
-static __inline__ void drm_core_dropmap(struct drm_local_map *map)
+static __inline__ int drm_pci_device_is_pcie(struct drm_device *dev)
{
+ return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
-#include "drm_mem_util.h"
-
-static inline void *drm_get_device(struct drm_device *dev)
-{
- if (drm_core_check_feature(dev, DRIVER_USE_PLATFORM_DEVICE))
- return dev->platformdev;
- else
- return dev->pdev;
-}
-extern int drm_platform_init(struct drm_driver *driver);
-extern int drm_pci_init(struct drm_driver *driver);
-extern int drm_fill_in_dev(struct drm_device *dev,
+extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+extern int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
-int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type);
-/*@}*/
+
+
+/* platform section */
+extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
+extern void drm_platform_exit(struct drm_driver *driver, struct platform_device *platform_device);
+
+extern int drm_get_platform_dev(struct platform_device *pdev,
+ struct drm_driver *driver);
#endif /* __KERNEL__ */
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 029aa688e787..60edf9be31e5 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -275,6 +275,7 @@ struct drm_pending_vblank_event;
/**
* drm_crtc_funcs - control CRTCs for a given device
+ * @reset: reset CRTC after state has been invalidate (e.g. resume)
* @dpms: control display power levels
* @save: save CRTC state
* @resore: restore CRTC state
@@ -302,6 +303,8 @@ struct drm_crtc_funcs {
void (*save)(struct drm_crtc *crtc); /* suspend? */
/* Restore CRTC state */
void (*restore)(struct drm_crtc *crtc); /* resume? */
+ /* Reset CRTC state */
+ void (*reset)(struct drm_crtc *crtc);
/* cursor controls */
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv,
@@ -351,8 +354,14 @@ struct drm_crtc {
bool enabled;
+ /* Requested mode from modesetting. */
struct drm_display_mode mode;
+ /* Programmed mode in hw, after adjustments for encoders,
+ * crtc, panel scaling etc. Needed for timestamping etc.
+ */
+ struct drm_display_mode hwmode;
+
int x, y;
const struct drm_crtc_funcs *funcs;
@@ -360,6 +369,9 @@ struct drm_crtc {
uint32_t gamma_size;
uint16_t *gamma_store;
+ /* Constants needed for precise vblank and swap timestamping. */
+ s64 framedur_ns, linedur_ns, pixeldur_ns;
+
/* if you are using the helper */
void *helper_private;
};
@@ -370,6 +382,7 @@ struct drm_crtc {
* @dpms: set power state (see drm_crtc_funcs above)
* @save: save connector state
* @restore: restore connector state
+ * @reset: reset connector after state has been invalidate (e.g. resume)
* @mode_valid: is this mode valid on the given connector?
* @mode_fixup: try to fixup proposed mode for this connector
* @mode_set: set this mode
@@ -387,6 +400,7 @@ struct drm_connector_funcs {
void (*dpms)(struct drm_connector *connector, int mode);
void (*save)(struct drm_connector *connector);
void (*restore)(struct drm_connector *connector);
+ void (*reset)(struct drm_connector *connector);
/* Check to see if anything is attached to the connector.
* @force is set to false whilst polling, true when checking the
@@ -404,6 +418,7 @@ struct drm_connector_funcs {
};
struct drm_encoder_funcs {
+ void (*reset)(struct drm_encoder *encoder);
void (*destroy)(struct drm_encoder *encoder);
};
@@ -644,9 +659,10 @@ extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode);
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode);
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev,
- struct drm_display_mode *mode);
+ const struct drm_display_mode *mode);
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode);
extern void drm_mode_config_init(struct drm_device *dev);
+extern void drm_mode_config_reset(struct drm_device *dev);
extern void drm_mode_config_cleanup(struct drm_device *dev);
extern void drm_mode_set_name(struct drm_display_mode *mode);
extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2);
@@ -669,8 +685,8 @@ extern void drm_mode_validate_size(struct drm_device *dev,
extern void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
extern void drm_mode_sort(struct list_head *mode_list);
-extern int drm_mode_hsync(struct drm_display_mode *mode);
-extern int drm_mode_vrefresh(struct drm_display_mode *mode);
+extern int drm_mode_hsync(const struct drm_display_mode *mode);
+extern int drm_mode_vrefresh(const struct drm_display_mode *mode);
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p,
int adjust_flags);
extern void drm_mode_connector_list_update(struct drm_connector *connector);
@@ -782,4 +798,11 @@ extern int drm_add_modes_noedid(struct drm_connector *connector,
extern bool drm_edid_is_valid(struct edid *edid);
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh);
+
+extern int drm_mode_create_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
#endif /* __DRM_CRTC_H__ */
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index 0af087a4d3b3..3650d5d011ee 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -45,14 +45,10 @@ struct drm_hash_item {
};
struct drm_open_hash {
- unsigned int size;
- unsigned int order;
- unsigned int fill;
struct hlist_head *table;
- int use_vmalloc;
+ u8 order;
};
-
extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index bf01531193d5..e39177778601 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -62,11 +62,14 @@ struct drm_mm {
struct list_head unused_nodes;
int num_unused;
spinlock_t unused_lock;
+ unsigned int scan_check_range : 1;
unsigned scan_alignment;
unsigned long scan_size;
unsigned long scan_hit_start;
unsigned scan_hit_size;
unsigned scanned_blocks;
+ unsigned long scan_start;
+ unsigned long scan_end;
};
/*
@@ -145,6 +148,10 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
unsigned alignment);
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end);
int drm_mm_scan_add_block(struct drm_mm_node *node);
int drm_mm_scan_remove_block(struct drm_mm_node *node);
diff --git a/include/drm/drm_mode.h b/include/drm/drm_mode.h
index 0fc7397c8f1f..ae6b7a3dbec7 100644
--- a/include/drm/drm_mode.h
+++ b/include/drm/drm_mode.h
@@ -344,4 +344,33 @@ struct drm_mode_crtc_page_flip {
__u64 user_data;
};
+/* create a dumb scanout buffer */
+struct drm_mode_create_dumb {
+ uint32_t height;
+ uint32_t width;
+ uint32_t bpp;
+ uint32_t flags;
+ /* handle, pitch, size will be returned */
+ uint32_t handle;
+ uint32_t pitch;
+ uint64_t size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+};
+
+struct drm_mode_destroy_dumb {
+ uint32_t handle;
+};
+
#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 883c1d439899..5ff1194dc2ea 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -28,7 +28,6 @@
{0x1002, 0x4156, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
{0x1002, 0x4237, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP}, \
{0x1002, 0x4242, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
- {0x1002, 0x4243, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
{0x1002, 0x4336, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS100|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4337, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x4437, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS200|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
@@ -142,6 +141,42 @@
{0x1002, 0x5e4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5e4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6723, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6724, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6725, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6726, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6727, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6728, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6743, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6744, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6745, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6746, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6762, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6763, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6764, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6765, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6766, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
@@ -419,6 +454,10 @@
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9804, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/drm/drm_usb.h b/include/drm/drm_usb.h
new file mode 100644
index 000000000000..33506c11da8b
--- /dev/null
+++ b/include/drm/drm_usb.h
@@ -0,0 +1,15 @@
+#ifndef DRM_USB_H
+#define DRM_USB_H
+
+#include <drmP.h>
+
+#include <linux/usb.h>
+
+extern int drm_usb_init(struct drm_driver *driver, struct usb_driver *udriver);
+extern void drm_usb_exit(struct drm_driver *driver, struct usb_driver *udriver);
+
+int drm_get_usb_dev(struct usb_interface *interface,
+ const struct usb_device_id *id,
+ struct drm_driver *driver);
+
+#endif
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h
deleted file mode 100644
index 61315c29b8f3..000000000000
--- a/include/drm/i830_drm.h
+++ /dev/null
@@ -1,342 +0,0 @@
-#ifndef _I830_DRM_H_
-#define _I830_DRM_H_
-
-/* WARNING: These defines must be the same as what the Xserver uses.
- * if you change them, you must change the defines in the Xserver.
- *
- * KW: Actually, you can't ever change them because doing so would
- * break backwards compatibility.
- */
-
-#ifndef _I830_DEFINES_
-#define _I830_DEFINES_
-
-#define I830_DMA_BUF_ORDER 12
-#define I830_DMA_BUF_SZ (1<<I830_DMA_BUF_ORDER)
-#define I830_DMA_BUF_NR 256
-#define I830_NR_SAREA_CLIPRECTS 8
-
-/* Each region is a minimum of 64k, and there are at most 64 of them.
- */
-#define I830_NR_TEX_REGIONS 64
-#define I830_LOG_MIN_TEX_REGION_SIZE 16
-
-/* KW: These aren't correct but someone set them to two and then
- * released the module. Now we can't change them as doing so would
- * break backwards compatibility.
- */
-#define I830_TEXTURE_COUNT 2
-#define I830_TEXBLEND_COUNT I830_TEXTURE_COUNT
-
-#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
-
-#define I830_UPLOAD_CTX 0x1
-#define I830_UPLOAD_BUFFERS 0x2
-#define I830_UPLOAD_CLIPRECTS 0x4
-#define I830_UPLOAD_TEX0_IMAGE 0x100 /* handled clientside */
-#define I830_UPLOAD_TEX0_CUBE 0x200 /* handled clientside */
-#define I830_UPLOAD_TEX1_IMAGE 0x400 /* handled clientside */
-#define I830_UPLOAD_TEX1_CUBE 0x800 /* handled clientside */
-#define I830_UPLOAD_TEX2_IMAGE 0x1000 /* handled clientside */
-#define I830_UPLOAD_TEX2_CUBE 0x2000 /* handled clientside */
-#define I830_UPLOAD_TEX3_IMAGE 0x4000 /* handled clientside */
-#define I830_UPLOAD_TEX3_CUBE 0x8000 /* handled clientside */
-#define I830_UPLOAD_TEX_N_IMAGE(n) (0x100 << (n * 2))
-#define I830_UPLOAD_TEX_N_CUBE(n) (0x200 << (n * 2))
-#define I830_UPLOAD_TEXIMAGE_MASK 0xff00
-#define I830_UPLOAD_TEX0 0x10000
-#define I830_UPLOAD_TEX1 0x20000
-#define I830_UPLOAD_TEX2 0x40000
-#define I830_UPLOAD_TEX3 0x80000
-#define I830_UPLOAD_TEX_N(n) (0x10000 << (n))
-#define I830_UPLOAD_TEX_MASK 0xf0000
-#define I830_UPLOAD_TEXBLEND0 0x100000
-#define I830_UPLOAD_TEXBLEND1 0x200000
-#define I830_UPLOAD_TEXBLEND2 0x400000
-#define I830_UPLOAD_TEXBLEND3 0x800000
-#define I830_UPLOAD_TEXBLEND_N(n) (0x100000 << (n))
-#define I830_UPLOAD_TEXBLEND_MASK 0xf00000
-#define I830_UPLOAD_TEX_PALETTE_N(n) (0x1000000 << (n))
-#define I830_UPLOAD_TEX_PALETTE_SHARED 0x4000000
-#define I830_UPLOAD_STIPPLE 0x8000000
-
-/* Indices into buf.Setup where various bits of state are mirrored per
- * context and per buffer. These can be fired at the card as a unit,
- * or in a piecewise fashion as required.
- */
-
-/* Destbuffer state
- * - backbuffer linear offset and pitch -- invarient in the current dri
- * - zbuffer linear offset and pitch -- also invarient
- * - drawing origin in back and depth buffers.
- *
- * Keep the depth/back buffer state here to accommodate private buffers
- * in the future.
- */
-
-#define I830_DESTREG_CBUFADDR 0
-#define I830_DESTREG_DBUFADDR 1
-#define I830_DESTREG_DV0 2
-#define I830_DESTREG_DV1 3
-#define I830_DESTREG_SENABLE 4
-#define I830_DESTREG_SR0 5
-#define I830_DESTREG_SR1 6
-#define I830_DESTREG_SR2 7
-#define I830_DESTREG_DR0 8
-#define I830_DESTREG_DR1 9
-#define I830_DESTREG_DR2 10
-#define I830_DESTREG_DR3 11
-#define I830_DESTREG_DR4 12
-#define I830_DEST_SETUP_SIZE 13
-
-/* Context state
- */
-#define I830_CTXREG_STATE1 0
-#define I830_CTXREG_STATE2 1
-#define I830_CTXREG_STATE3 2
-#define I830_CTXREG_STATE4 3
-#define I830_CTXREG_STATE5 4
-#define I830_CTXREG_IALPHAB 5
-#define I830_CTXREG_STENCILTST 6
-#define I830_CTXREG_ENABLES_1 7
-#define I830_CTXREG_ENABLES_2 8
-#define I830_CTXREG_AA 9
-#define I830_CTXREG_FOGCOLOR 10
-#define I830_CTXREG_BLENDCOLR0 11
-#define I830_CTXREG_BLENDCOLR 12 /* Dword 1 of 2 dword command */
-#define I830_CTXREG_VF 13
-#define I830_CTXREG_VF2 14
-#define I830_CTXREG_MCSB0 15
-#define I830_CTXREG_MCSB1 16
-#define I830_CTX_SETUP_SIZE 17
-
-/* 1.3: Stipple state
- */
-#define I830_STPREG_ST0 0
-#define I830_STPREG_ST1 1
-#define I830_STP_SETUP_SIZE 2
-
-/* Texture state (per tex unit)
- */
-
-#define I830_TEXREG_MI0 0 /* GFX_OP_MAP_INFO (6 dwords) */
-#define I830_TEXREG_MI1 1
-#define I830_TEXREG_MI2 2
-#define I830_TEXREG_MI3 3
-#define I830_TEXREG_MI4 4
-#define I830_TEXREG_MI5 5
-#define I830_TEXREG_MF 6 /* GFX_OP_MAP_FILTER */
-#define I830_TEXREG_MLC 7 /* GFX_OP_MAP_LOD_CTL */
-#define I830_TEXREG_MLL 8 /* GFX_OP_MAP_LOD_LIMITS */
-#define I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS */
-#define I830_TEX_SETUP_SIZE 10
-
-#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
-#define I830_TEXREG_TM0S0 1
-#define I830_TEXREG_TM0S1 2
-#define I830_TEXREG_TM0S2 3
-#define I830_TEXREG_TM0S3 4
-#define I830_TEXREG_TM0S4 5
-#define I830_TEXREG_NOP0 6 /* noop */
-#define I830_TEXREG_NOP1 7 /* noop */
-#define I830_TEXREG_NOP2 8 /* noop */
-#define __I830_TEXREG_MCS 9 /* GFX_OP_MAP_COORD_SETS -- shared */
-#define __I830_TEX_SETUP_SIZE 10
-
-#define I830_FRONT 0x1
-#define I830_BACK 0x2
-#define I830_DEPTH 0x4
-
-#endif /* _I830_DEFINES_ */
-
-typedef struct _drm_i830_init {
- enum {
- I830_INIT_DMA = 0x01,
- I830_CLEANUP_DMA = 0x02
- } func;
- unsigned int mmio_offset;
- unsigned int buffers_offset;
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
- unsigned int back_pitch;
- unsigned int depth_pitch;
- unsigned int cpp;
-} drm_i830_init_t;
-
-/* Warning: If you change the SAREA structure you must change the Xserver
- * structure as well */
-
-typedef struct _drm_i830_tex_region {
- unsigned char next, prev; /* indices to form a circular LRU */
- unsigned char in_use; /* owned by a client, or free? */
- int age; /* tracked by clients to update local LRU's */
-} drm_i830_tex_region_t;
-
-typedef struct _drm_i830_sarea {
- unsigned int ContextState[I830_CTX_SETUP_SIZE];
- unsigned int BufferState[I830_DEST_SETUP_SIZE];
- unsigned int TexState[I830_TEXTURE_COUNT][I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState[I830_TEXBLEND_COUNT][I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed[I830_TEXBLEND_COUNT];
- unsigned int Palette[2][256];
- unsigned int dirty;
-
- unsigned int nbox;
- struct drm_clip_rect boxes[I830_NR_SAREA_CLIPRECTS];
-
- /* Maintain an LRU of contiguous regions of texture space. If
- * you think you own a region of texture memory, and it has an
- * age different to the one you set, then you are mistaken and
- * it has been stolen by another client. If global texAge
- * hasn't changed, there is no need to walk the list.
- *
- * These regions can be used as a proxy for the fine-grained
- * texture information of other clients - by maintaining them
- * in the same lru which is used to age their own textures,
- * clients have an approximate lru for the whole of global
- * texture space, and can make informed decisions as to which
- * areas to kick out. There is no need to choose whether to
- * kick out your own texture or someone else's - simply eject
- * them all in LRU order.
- */
-
- drm_i830_tex_region_t texList[I830_NR_TEX_REGIONS + 1];
- /* Last elt is sentinal */
- int texAge; /* last time texture was uploaded */
- int last_enqueue; /* last time a buffer was enqueued */
- int last_dispatch; /* age of the most recently dispatched buffer */
- int last_quiescent; /* */
- int ctxOwner; /* last context to upload state */
-
- int vertex_prim;
-
- int pf_enabled; /* is pageflipping allowed? */
- int pf_active;
- int pf_current_page; /* which buffer is being displayed? */
-
- int perf_boxes; /* performance boxes to be displayed */
-
- /* Here's the state for texunits 2,3:
- */
- unsigned int TexState2[I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState2[I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed2;
-
- unsigned int TexState3[I830_TEX_SETUP_SIZE];
- unsigned int TexBlendState3[I830_TEXBLEND_SIZE];
- unsigned int TexBlendStateWordsUsed3;
-
- unsigned int StippleState[I830_STP_SETUP_SIZE];
-} drm_i830_sarea_t;
-
-/* Flags for perf_boxes
- */
-#define I830_BOX_RING_EMPTY 0x1 /* populated by kernel */
-#define I830_BOX_FLIP 0x2 /* populated by kernel */
-#define I830_BOX_WAIT 0x4 /* populated by kernel & client */
-#define I830_BOX_TEXTURE_LOAD 0x8 /* populated by kernel */
-#define I830_BOX_LOST_CONTEXT 0x10 /* populated by client */
-
-/* I830 specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_I830_INIT 0x00
-#define DRM_I830_VERTEX 0x01
-#define DRM_I830_CLEAR 0x02
-#define DRM_I830_FLUSH 0x03
-#define DRM_I830_GETAGE 0x04
-#define DRM_I830_GETBUF 0x05
-#define DRM_I830_SWAP 0x06
-#define DRM_I830_COPY 0x07
-#define DRM_I830_DOCOPY 0x08
-#define DRM_I830_FLIP 0x09
-#define DRM_I830_IRQ_EMIT 0x0a
-#define DRM_I830_IRQ_WAIT 0x0b
-#define DRM_I830_GETPARAM 0x0c
-#define DRM_I830_SETPARAM 0x0d
-
-#define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t)
-#define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t)
-#define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t)
-#define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH)
-#define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE)
-#define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t)
-#define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP)
-#define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t)
-#define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY)
-#define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP)
-#define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t)
-#define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t)
-#define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t)
-#define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t)
-
-typedef struct _drm_i830_clear {
- int clear_color;
- int clear_depth;
- int flags;
- unsigned int clear_colormask;
- unsigned int clear_depthmask;
-} drm_i830_clear_t;
-
-/* These may be placeholders if we have more cliprects than
- * I830_NR_SAREA_CLIPRECTS. In that case, the client sets discard to
- * false, indicating that the buffer will be dispatched again with a
- * new set of cliprects.
- */
-typedef struct _drm_i830_vertex {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- int discard; /* client is finished with the buffer? */
-} drm_i830_vertex_t;
-
-typedef struct _drm_i830_copy_t {
- int idx; /* buffer index */
- int used; /* nr bytes in use */
- void __user *address; /* Address to copy from */
-} drm_i830_copy_t;
-
-typedef struct drm_i830_dma {
- void __user *virtual;
- int request_idx;
- int request_size;
- int granted;
-} drm_i830_dma_t;
-
-/* 1.3: Userspace can request & wait on irq's:
- */
-typedef struct drm_i830_irq_emit {
- int __user *irq_seq;
-} drm_i830_irq_emit_t;
-
-typedef struct drm_i830_irq_wait {
- int irq_seq;
-} drm_i830_irq_wait_t;
-
-/* 1.3: New ioctl to query kernel params:
- */
-#define I830_PARAM_IRQ_ACTIVE 1
-
-typedef struct drm_i830_getparam {
- int param;
- int __user *value;
-} drm_i830_getparam_t;
-
-/* 1.3: New ioctl to set kernel params:
- */
-#define I830_SETPARAM_USE_MI_BATCHBUFFER_START 1
-
-typedef struct drm_i830_setparam {
- int param;
- int value;
-} drm_i830_setparam_t;
-
-#endif /* _I830_DRM_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8c641bed9bbd..0039f1f97ad8 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -287,6 +287,9 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
+#define I915_PARAM_HAS_RELAXED_FENCING 12
+#define I915_PARAM_HAS_COHERENT_RINGS 13
+#define I915_PARAM_HAS_EXEC_CONSTANTS 14
typedef struct drm_i915_getparam {
int param;
@@ -633,6 +636,17 @@ struct drm_i915_gem_execbuffer2 {
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
#define I915_EXEC_BLT (3<<0)
+
+/* Used for switching the constants addressing mode on gen4+ RENDER ring.
+ * Gen6+ only supports relative addressing to dynamic state (default) and
+ * absolute addressing.
+ *
+ * These flags are ignored for the BSD and BLT rings.
+ */
+#define I915_EXEC_CONSTANTS_MASK (3<<6)
+#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
+#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
+#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index d3c81946f613..9e343c0998b4 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -2,17 +2,40 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
-struct intel_gtt {
- /* Number of stolen gtt entries at the beginning. */
- unsigned int gtt_stolen_entries;
+
+const struct intel_gtt {
+ /* Size of memory reserved for graphics by the BIOS */
+ unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
-};
+ /* Whether i915 needs to use the dmar apis or not. */
+ unsigned int needs_dmar : 1;
+} *intel_gtt_get(void);
-struct intel_gtt *intel_gtt_get(void);
+void intel_gtt_chipset_flush(void);
+void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
+void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
+int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
+ struct scatterlist **sg_list, int *num_sg);
+void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags);
+void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
+ struct page **pages, unsigned int flags);
-#endif
+/* Special gtt memory types */
+#define AGP_DCACHE_MEMORY 1
+#define AGP_PHYS_MEMORY 2
+
+/* New caching attributes for gen6/sandybridge */
+#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
+#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
+/* flag for GFDT type */
+#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
+
+#endif
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h
index 01a714119506..e2cfe80f6fca 100644
--- a/include/drm/nouveau_drm.h
+++ b/include/drm/nouveau_drm.h
@@ -71,15 +71,14 @@ struct drm_nouveau_gpuobj_free {
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
#define NOUVEAU_GETPARAM_PCI_DEVICE 4
#define NOUVEAU_GETPARAM_BUS_TYPE 5
-#define NOUVEAU_GETPARAM_FB_PHYSICAL 6
-#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
-#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
#define NOUVEAU_GETPARAM_CHIPSET_ID 11
#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
@@ -95,6 +94,12 @@ struct drm_nouveau_setparam {
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
+#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
+#define NOUVEAU_GEM_TILE_16BPP 0x00000001
+#define NOUVEAU_GEM_TILE_32BPP 0x00000002
+#define NOUVEAU_GEM_TILE_ZETA 0x00000004
+#define NOUVEAU_GEM_TILE_NONCONTIG 0x00000008
+
struct drm_nouveau_gem_info {
uint32_t handle;
uint32_t domain;
@@ -164,7 +169,6 @@ struct drm_nouveau_gem_pushbuf {
};
#define NOUVEAU_GEM_CPU_PREP_NOWAIT 0x00000001
-#define NOUVEAU_GEM_CPU_PREP_NOBLOCK 0x00000002
#define NOUVEAU_GEM_CPU_PREP_WRITE 0x00000004
struct drm_nouveau_gem_cpu_prep {
uint32_t handle;
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h
index 10f8b53bdd40..e5c607a02d57 100644
--- a/include/drm/radeon_drm.h
+++ b/include/drm/radeon_drm.h
@@ -906,6 +906,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_ACCEL_WORKING2 0x05
#define RADEON_INFO_TILING_CONFIG 0x06
#define RADEON_INFO_WANT_HYPERZ 0x07
+#define RADEON_INFO_WANT_CMASK 0x08 /* get access to CMASK on r300 */
+#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
struct drm_radeon_info {
uint32_t request;
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index beafc156a535..50852aad260a 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -74,6 +74,8 @@ struct ttm_placement {
* @is_iomem: is this io memory ?
* @size: size in byte
* @offset: offset from the base address
+ * @io_reserved_vm: The VM system has a refcount in @io_reserved_count
+ * @io_reserved_count: Refcounting the numbers of callers to ttm_mem_io_reserve
*
* Structure indicating the bus placement of an object.
*/
@@ -83,7 +85,8 @@ struct ttm_bus_placement {
unsigned long size;
unsigned long offset;
bool is_iomem;
- bool io_reserved;
+ bool io_reserved_vm;
+ uint64_t io_reserved_count;
};
@@ -154,7 +157,6 @@ struct ttm_tt;
* keeps one refcount. When this refcount reaches zero,
* the object is destroyed.
* @event_queue: Queue for processes waiting on buffer object status change.
- * @lock: spinlock protecting mostly synchronization members.
* @mem: structure describing current placement.
* @persistant_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
@@ -213,7 +215,6 @@ struct ttm_buffer_object {
struct kref kref;
struct kref list_kref;
wait_queue_head_t event_queue;
- spinlock_t lock;
/**
* Members protected by the bo::reserved lock.
@@ -237,6 +238,7 @@ struct ttm_buffer_object {
struct list_head lru;
struct list_head ddestroy;
struct list_head swap;
+ struct list_head io_reserve_lru;
uint32_t val_seq;
bool seq_valid;
@@ -248,10 +250,10 @@ struct ttm_buffer_object {
atomic_t reserved;
/**
- * Members protected by the bo::lock
+ * Members protected by struct buffer_object_device::fence_lock
* In addition, setting sync_obj to anything else
* than NULL requires bo::reserved to be held. This allows for
- * checking NULL while reserved but not holding bo::lock.
+ * checking NULL while reserved but not holding the mentioned lock.
*/
void *sync_obj_arg;
@@ -364,6 +366,44 @@ extern int ttm_bo_validate(struct ttm_buffer_object *bo,
*/
extern void ttm_bo_unref(struct ttm_buffer_object **bo);
+
+/**
+ * ttm_bo_list_ref_sub
+ *
+ * @bo: The buffer object.
+ * @count: The number of references with which to decrease @bo::list_kref;
+ * @never_free: The refcount should not reach zero with this operation.
+ *
+ * Release @count lru list references to this buffer object.
+ */
+extern void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
+ bool never_free);
+
+/**
+ * ttm_bo_add_to_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Add this bo to the relevant mem type lru and, if it's backed by
+ * system pages (ttms) to the swap list.
+ * This function must be called with struct ttm_bo_global::lru_lock held, and
+ * is typically called immediately prior to unreserving a bo.
+ */
+extern void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
+
+/**
+ * ttm_bo_del_from_lru
+ *
+ * @bo: The buffer object.
+ *
+ * Remove this bo from all lru lists used to lookup and reserve an object.
+ * This function must be called with struct ttm_bo_global::lru_lock held,
+ * and is usually called just immediately after the bo has been reserved to
+ * avoid recursive reservation from lru lists.
+ */
+extern int ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
+
+
/**
* ttm_bo_lock_delayed_workqueue
*
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 4d97014e8c8d..38ff06822609 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -183,30 +183,6 @@ struct ttm_tt {
#define TTM_MEMTYPE_FLAG_MAPPABLE (1 << 1) /* Memory mappable */
#define TTM_MEMTYPE_FLAG_CMA (1 << 3) /* Can't map aperture */
-/**
- * struct ttm_mem_type_manager
- *
- * @has_type: The memory type has been initialized.
- * @use_type: The memory type is enabled.
- * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
- * managed by this memory type.
- * @gpu_offset: If used, the GPU offset of the first managed page of
- * fixed memory or the first managed location in an aperture.
- * @size: Size of the managed region.
- * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
- * as defined in ttm_placement_common.h
- * @default_caching: The default caching policy used for a buffer object
- * placed in this memory type if the user doesn't provide one.
- * @manager: The range manager used for this memory type. FIXME: If the aperture
- * has a page size different from the underlying system, the granularity
- * of this manager should take care of this. But the range allocating code
- * in ttm_bo.c needs to be modified for this.
- * @lru: The lru list for this memory type.
- *
- * This structure is used to identify and manage memory types for a device.
- * It's set up by the ttm_bo_driver::init_mem_type method.
- */
-
struct ttm_mem_type_manager;
struct ttm_mem_type_manager_func {
@@ -291,6 +267,36 @@ struct ttm_mem_type_manager_func {
void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
};
+/**
+ * struct ttm_mem_type_manager
+ *
+ * @has_type: The memory type has been initialized.
+ * @use_type: The memory type is enabled.
+ * @flags: TTM_MEMTYPE_XX flags identifying the traits of the memory
+ * managed by this memory type.
+ * @gpu_offset: If used, the GPU offset of the first managed page of
+ * fixed memory or the first managed location in an aperture.
+ * @size: Size of the managed region.
+ * @available_caching: A mask of available caching types, TTM_PL_FLAG_XX,
+ * as defined in ttm_placement_common.h
+ * @default_caching: The default caching policy used for a buffer object
+ * placed in this memory type if the user doesn't provide one.
+ * @func: structure pointer implementing the range manager. See above
+ * @priv: Driver private closure for @func.
+ * @io_reserve_mutex: Mutex optionally protecting shared io_reserve structures
+ * @use_io_reserve_lru: Use an lru list to try to unreserve io_mem_regions
+ * reserved by the TTM vm system.
+ * @io_reserve_lru: Optional lru list for unreserving io mem regions.
+ * @io_reserve_fastpath: Only use bdev::driver::io_mem_reserve to obtain
+ * static information. bdev::driver::io_mem_free is never used.
+ * @lru: The lru list for this memory type.
+ *
+ * This structure is used to identify and manage memory types for a device.
+ * It's set up by the ttm_bo_driver::init_mem_type method.
+ */
+
+
+
struct ttm_mem_type_manager {
struct ttm_bo_device *bdev;
@@ -307,6 +313,15 @@ struct ttm_mem_type_manager {
uint32_t default_caching;
const struct ttm_mem_type_manager_func *func;
void *priv;
+ struct mutex io_reserve_mutex;
+ bool use_io_reserve_lru;
+ bool io_reserve_fastpath;
+
+ /*
+ * Protected by @io_reserve_mutex:
+ */
+
+ struct list_head io_reserve_lru;
/*
* Protected by the global->lru_lock.
@@ -514,9 +529,12 @@ struct ttm_bo_global {
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
+ * @fence_lock: Protects the synchronizing members on *all* bos belonging
+ * to this device.
* @addr_space_mm: Range manager for the device address space.
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
+ * @val_seq: Current validation sequence.
* @nice_mode: Try nicely to wait for buffer idle when cleaning a manager.
* If a GPU lockup has been detected, this is forced to 0.
* @dev_mapping: A pointer to the struct address_space representing the
@@ -536,6 +554,7 @@ struct ttm_bo_device {
struct device *dev;
rwlock_t vm_lock;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
+ spinlock_t fence_lock;
/*
* Protected by the vm lock.
*/
@@ -546,6 +565,7 @@ struct ttm_bo_device {
* Protected by the global:lru lock.
*/
struct list_head ddestroy;
+ uint32_t val_seq;
/*
* Protected by load / firstopen / lastclose /unload sync.
@@ -758,31 +778,6 @@ extern void ttm_bo_mem_put_locked(struct ttm_buffer_object *bo,
extern int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait);
-/**
- * ttm_bo_pci_offset - Get the PCI offset for the buffer object memory.
- *
- * @bo Pointer to a struct ttm_buffer_object.
- * @bus_base On return the base of the PCI region
- * @bus_offset On return the byte offset into the PCI region
- * @bus_size On return the byte size of the buffer object or zero if
- * the buffer object memory is not accessible through a PCI region.
- *
- * Returns:
- * -EINVAL if the buffer object is currently not mappable.
- * 0 otherwise.
- */
-
-extern int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem,
- unsigned long *bus_base,
- unsigned long *bus_offset,
- unsigned long *bus_size);
-
-extern int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-extern void ttm_mem_io_free(struct ttm_bo_device *bdev,
- struct ttm_mem_reg *mem);
-
extern void ttm_bo_global_release(struct drm_global_reference *ref);
extern int ttm_bo_global_init(struct drm_global_reference *ref);
@@ -815,6 +810,22 @@ extern int ttm_bo_device_init(struct ttm_bo_device *bdev,
extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
/**
+ * ttm_bo_unmap_virtual
+ *
+ * @bo: tear down the virtual mappings for this BO
+ *
+ * The caller must take ttm_mem_io_lock before calling this function.
+ */
+extern void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo);
+
+extern int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo);
+extern void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
+extern int ttm_mem_io_lock(struct ttm_mem_type_manager *man,
+ bool interruptible);
+extern void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
+
+
+/**
* ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -864,11 +875,44 @@ extern void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
* try again. (only if use_sequence == 1).
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
*/
extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible,
bool no_wait, bool use_sequence, uint32_t sequence);
+
+/**
+ * ttm_bo_reserve_locked:
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ * @interruptible: Sleep interruptible if waiting.
+ * @no_wait: Don't sleep while trying to reserve, rather return -EBUSY.
+ * @use_sequence: If @bo is already reserved, Only sleep waiting for
+ * it to become unreserved if @sequence < (@bo)->sequence.
+ *
+ * Must be called with struct ttm_bo_global::lru_lock held,
+ * and will not remove reserved buffers from the lru lists.
+ * The function may release the LRU spinlock if it needs to sleep.
+ * Otherwise identical to ttm_bo_reserve.
+ *
+ * Returns:
+ * -EAGAIN: The reservation may cause a deadlock.
+ * Release all buffer reservations, wait for @bo to become unreserved and
+ * try again. (only if use_sequence == 1).
+ * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ * -EBUSY: The function needed to sleep, but @no_wait was true
+ * -EDEADLK: Bo already reserved using @sequence. This error code will only
+ * be returned if @use_sequence is set to true.
+ */
+extern int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence,
+ uint32_t sequence);
+
/**
* ttm_bo_unreserve
*
@@ -879,6 +923,16 @@ extern int ttm_bo_reserve(struct ttm_buffer_object *bo,
extern void ttm_bo_unreserve(struct ttm_buffer_object *bo);
/**
+ * ttm_bo_unreserve_locked
+ *
+ * @bo: A pointer to a struct ttm_buffer_object.
+ *
+ * Unreserve a previous reservation of @bo.
+ * Needs to be called with struct ttm_bo_global::lru_lock held.
+ */
+extern void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo);
+
+/**
* ttm_bo_wait_unreserved
*
* @bo: A pointer to a struct ttm_buffer_object.
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index cd2c475da9ea..26cc7f9ffa41 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -41,7 +41,10 @@
* @bo: refcounted buffer object pointer.
* @new_sync_obj_arg: New sync_obj_arg for @bo, to be used once
* adding a new sync object.
- * @reservied: Indicates whether @bo has been reserved for validation.
+ * @reserved: Indicates whether @bo has been reserved for validation.
+ * @removed: Indicates whether @bo has been removed from lru lists.
+ * @put_count: Number of outstanding references on bo::list_kref.
+ * @old_sync_obj: Pointer to a sync object about to be unreferenced
*/
struct ttm_validate_buffer {
@@ -49,6 +52,9 @@ struct ttm_validate_buffer {
struct ttm_buffer_object *bo;
void *new_sync_obj_arg;
bool reserved;
+ bool removed;
+ int put_count;
+ void *old_sync_obj;
};
/**
@@ -66,7 +72,6 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* function ttm_eu_reserve_buffers
*
* @list: thread private list of ttm_validate_buffer structs.
- * @val_seq: A unique sequence number.
*
* Tries to reserve bos pointed to by the list entries for validation.
* If the function returns 0, all buffers are marked as "unfenced",
@@ -88,7 +93,7 @@ extern void ttm_eu_backoff_reservation(struct list_head *list);
* has failed.
*/
-extern int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq);
+extern int ttm_eu_reserve_buffers(struct list_head *list);
/**
* function ttm_eu_fence_buffer_objects.