diff options
226 files changed, 5269 insertions, 2028 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt new file mode 100644 index 000000000000..6ec1a880ac18 --- /dev/null +++ b/Documentation/devicetree/bindings/display/bridge/ti,ths8135.txt @@ -0,0 +1,46 @@ +THS8135 Video DAC +----------------- + +This is the binding for Texas Instruments THS8135 Video DAC bridge. + +Required properties: + +- compatible: Must be "ti,ths8135" + +Required nodes: + +This device has two video ports. Their connections are modelled using the OF +graph bindings specified in Documentation/devicetree/bindings/graph.txt. + +- Video port 0 for RGB input +- Video port 1 for VGA output + +Example +------- + +vga-bridge { + compatible = "ti,ths8135"; + #address-cells = <1>; + #size-cells = <0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + + vga_bridge_in: endpoint { + remote-endpoint = <&lcdc_out_vga>; + }; + }; + + port@1 { + reg = <1>; + + vga_bridge_out: endpoint { + remote-endpoint = <&vga_con_in>; + }; + }; + }; +}; diff --git a/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt index 38dc9d60eef8..305a0e72a900 100644 --- a/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt +++ b/Documentation/devicetree/bindings/display/hisilicon/hisi-ade.txt @@ -16,7 +16,7 @@ Required properties: "clk_ade_core" for the ADE core clock. "clk_codec_jpeg" for the media NOC QoS clock, which use the same clock with jpeg codec. - "clk_ade_pix" for the ADE pixel clok. + "clk_ade_pix" for the ADE pixel clock. - assigned-clocks: Should contain "clk_ade_core" and "clk_codec_jpeg" clocks' phandle + clock-specifier pairs. - assigned-clock-rates: clock rates, one for each entry in assigned-clocks. diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt deleted file mode 100644 index ca44c5820585..000000000000 --- a/Documentation/dma-buf-sharing.txt +++ /dev/null @@ -1,482 +0,0 @@ - DMA Buffer Sharing API Guide - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - - Sumit Semwal - <sumit dot semwal at linaro dot org> - <sumit dot semwal at ti dot com> - -This document serves as a guide to device-driver writers on what is the dma-buf -buffer sharing API, how to use it for exporting and using shared buffers. - -Any device driver which wishes to be a part of DMA buffer sharing, can do so as -either the 'exporter' of buffers, or the 'user' of buffers. - -Say a driver A wants to use buffers created by driver B, then we call B as the -exporter, and A as buffer-user. - -The exporter -- implements and manages operations[1] for the buffer -- allows other users to share the buffer by using dma_buf sharing APIs, -- manages the details of buffer allocation, -- decides about the actual backing storage where this allocation happens, -- takes care of any migration of scatterlist - for all (shared) users of this - buffer, - -The buffer-user -- is one of (many) sharing users of the buffer. -- doesn't need to worry about how the buffer is allocated, or where. -- needs a mechanism to get access to the scatterlist that makes up this buffer - in memory, mapped into its own address space, so it can access the same area - of memory. - -dma-buf operations for device dma only --------------------------------------- - -The dma_buf buffer sharing API usage contains the following steps: - -1. Exporter announces that it wishes to export a buffer -2. Userspace gets the file descriptor associated with the exported buffer, and - passes it around to potential buffer-users based on use case -3. Each buffer-user 'connects' itself to the buffer -4. When needed, buffer-user requests access to the buffer from exporter -5. When finished with its use, the buffer-user notifies end-of-DMA to exporter -6. when buffer-user is done using this buffer completely, it 'disconnects' - itself from the buffer. - - -1. Exporter's announcement of buffer export - - The buffer exporter announces its wish to export a buffer. In this, it - connects its own private buffer data, provides implementation for operations - that can be performed on the exported dma_buf, and flags for the file - associated with this buffer. All these fields are filled in struct - dma_buf_export_info, defined via the DEFINE_DMA_BUF_EXPORT_INFO macro. - - Interface: - DEFINE_DMA_BUF_EXPORT_INFO(exp_info) - struct dma_buf *dma_buf_export(struct dma_buf_export_info *exp_info) - - If this succeeds, dma_buf_export allocates a dma_buf structure, and - returns a pointer to the same. It also associates an anonymous file with this - buffer, so it can be exported. On failure to allocate the dma_buf object, - it returns NULL. - - 'exp_name' in struct dma_buf_export_info is the name of exporter - to - facilitate information while debugging. It is set to KBUILD_MODNAME by - default, so exporters don't have to provide a specific name, if they don't - wish to. - - DEFINE_DMA_BUF_EXPORT_INFO macro defines the struct dma_buf_export_info, - zeroes it out and pre-populates exp_name in it. - - -2. Userspace gets a handle to pass around to potential buffer-users - - Userspace entity requests for a file-descriptor (fd) which is a handle to the - anonymous file associated with the buffer. It can then share the fd with other - drivers and/or processes. - - Interface: - int dma_buf_fd(struct dma_buf *dmabuf, int flags) - - This API installs an fd for the anonymous file associated with this buffer; - returns either 'fd', or error. - -3. Each buffer-user 'connects' itself to the buffer - - Each buffer-user now gets a reference to the buffer, using the fd passed to - it. - - Interface: - struct dma_buf *dma_buf_get(int fd) - - This API will return a reference to the dma_buf, and increment refcount for - it. - - After this, the buffer-user needs to attach its device with the buffer, which - helps the exporter to know of device buffer constraints. - - Interface: - struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, - struct device *dev) - - This API returns reference to an attachment structure, which is then used - for scatterlist operations. It will optionally call the 'attach' dma_buf - operation, if provided by the exporter. - - The dma-buf sharing framework does the bookkeeping bits related to managing - the list of all attachments to a buffer. - -Until this stage, the buffer-exporter has the option to choose not to actually -allocate the backing storage for this buffer, but wait for the first buffer-user -to request use of buffer for allocation. - - -4. When needed, buffer-user requests access to the buffer - - Whenever a buffer-user wants to use the buffer for any DMA, it asks for - access to the buffer using dma_buf_map_attachment API. At least one attach to - the buffer must have happened before map_dma_buf can be called. - - Interface: - struct sg_table * dma_buf_map_attachment(struct dma_buf_attachment *, - enum dma_data_direction); - - This is a wrapper to dma_buf->ops->map_dma_buf operation, which hides the - "dma_buf->ops->" indirection from the users of this interface. - - In struct dma_buf_ops, map_dma_buf is defined as - struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); - - It is one of the buffer operations that must be implemented by the exporter. - It should return the sg_table containing scatterlist for this buffer, mapped - into caller's address space. - - If this is being called for the first time, the exporter can now choose to - scan through the list of attachments for this buffer, collate the requirements - of the attached devices, and choose an appropriate backing storage for the - buffer. - - Based on enum dma_data_direction, it might be possible to have multiple users - accessing at the same time (for reading, maybe), or any other kind of sharing - that the exporter might wish to make available to buffer-users. - - map_dma_buf() operation can return -EINTR if it is interrupted by a signal. - - -5. When finished, the buffer-user notifies end-of-DMA to exporter - - Once the DMA for the current buffer-user is over, it signals 'end-of-DMA' to - the exporter using the dma_buf_unmap_attachment API. - - Interface: - void dma_buf_unmap_attachment(struct dma_buf_attachment *, - struct sg_table *); - - This is a wrapper to dma_buf->ops->unmap_dma_buf() operation, which hides the - "dma_buf->ops->" indirection from the users of this interface. - - In struct dma_buf_ops, unmap_dma_buf is defined as - void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); - - unmap_dma_buf signifies the end-of-DMA for the attachment provided. Like - map_dma_buf, this API also must be implemented by the exporter. - - -6. when buffer-user is done using this buffer, it 'disconnects' itself from the - buffer. - - After the buffer-user has no more interest in using this buffer, it should - disconnect itself from the buffer: - - - it first detaches itself from the buffer. - - Interface: - void dma_buf_detach(struct dma_buf *dmabuf, - struct dma_buf_attachment *dmabuf_attach); - - This API removes the attachment from the list in dmabuf, and optionally calls - dma_buf->ops->detach(), if provided by exporter, for any housekeeping bits. - - - Then, the buffer-user returns the buffer reference to exporter. - - Interface: - void dma_buf_put(struct dma_buf *dmabuf); - - This API then reduces the refcount for this buffer. - - If, as a result of this call, the refcount becomes 0, the 'release' file - operation related to this fd is called. It calls the dmabuf->ops->release() - operation in turn, and frees the memory allocated for dmabuf when exported. - -NOTES: -- Importance of attach-detach and {map,unmap}_dma_buf operation pairs - The attach-detach calls allow the exporter to figure out backing-storage - constraints for the currently-interested devices. This allows preferential - allocation, and/or migration of pages across different types of storage - available, if possible. - - Bracketing of DMA access with {map,unmap}_dma_buf operations is essential - to allow just-in-time backing of storage, and migration mid-way through a - use-case. - -- Migration of backing storage if needed - If after - - at least one map_dma_buf has happened, - - and the backing storage has been allocated for this buffer, - another new buffer-user intends to attach itself to this buffer, it might - be allowed, if possible for the exporter. - - In case it is allowed by the exporter: - if the new buffer-user has stricter 'backing-storage constraints', and the - exporter can handle these constraints, the exporter can just stall on the - map_dma_buf until all outstanding access is completed (as signalled by - unmap_dma_buf). - Once all users have finished accessing and have unmapped this buffer, the - exporter could potentially move the buffer to the stricter backing-storage, - and then allow further {map,unmap}_dma_buf operations from any buffer-user - from the migrated backing-storage. - - If the exporter cannot fulfill the backing-storage constraints of the new - buffer-user device as requested, dma_buf_attach() would return an error to - denote non-compatibility of the new buffer-sharing request with the current - buffer. - - If the exporter chooses not to allow an attach() operation once a - map_dma_buf() API has been called, it simply returns an error. - -Kernel cpu access to a dma-buf buffer object --------------------------------------------- - -The motivation to allow cpu access from the kernel to a dma-buf object from the -importers side are: -- fallback operations, e.g. if the devices is connected to a usb bus and the - kernel needs to shuffle the data around first before sending it away. -- full transparency for existing users on the importer side, i.e. userspace - should not notice the difference between a normal object from that subsystem - and an imported one backed by a dma-buf. This is really important for drm - opengl drivers that expect to still use all the existing upload/download - paths. - -Access to a dma_buf from the kernel context involves three steps: - -1. Prepare access, which invalidate any necessary caches and make the object - available for cpu access. -2. Access the object page-by-page with the dma_buf map apis -3. Finish access, which will flush any necessary cpu caches and free reserved - resources. - -1. Prepare access - - Before an importer can access a dma_buf object with the cpu from the kernel - context, it needs to notify the exporter of the access that is about to - happen. - - Interface: - int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, - enum dma_data_direction direction) - - This allows the exporter to ensure that the memory is actually available for - cpu access - the exporter might need to allocate or swap-in and pin the - backing storage. The exporter also needs to ensure that cpu access is - coherent for the access direction. The direction can be used by the exporter - to optimize the cache flushing, i.e. access with a different direction (read - instead of write) might return stale or even bogus data (e.g. when the - exporter needs to copy the data to temporary storage). - - This step might fail, e.g. in oom conditions. - -2. Accessing the buffer - - To support dma_buf objects residing in highmem cpu access is page-based using - an api similar to kmap. Accessing a dma_buf is done in aligned chunks of - PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which returns - a pointer in kernel virtual address space. Afterwards the chunk needs to be - unmapped again. There is no limit on how often a given chunk can be mapped - and unmapped, i.e. the importer does not need to call begin_cpu_access again - before mapping the same chunk again. - - Interfaces: - void *dma_buf_kmap(struct dma_buf *, unsigned long); - void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); - - There are also atomic variants of these interfaces. Like for kmap they - facilitate non-blocking fast-paths. Neither the importer nor the exporter (in - the callback) is allowed to block when using these. - - Interfaces: - void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long); - void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); - - For importers all the restrictions of using kmap apply, like the limited - supply of kmap_atomic slots. Hence an importer shall only hold onto at most 2 - atomic dma_buf kmaps at the same time (in any given process context). - - dma_buf kmap calls outside of the range specified in begin_cpu_access are - undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on - the partial chunks at the beginning and end but may return stale or bogus - data outside of the range (in these partial chunks). - - Note that these calls need to always succeed. The exporter needs to complete - any preparations that might fail in begin_cpu_access. - - For some cases the overhead of kmap can be too high, a vmap interface - is introduced. This interface should be used very carefully, as vmalloc - space is a limited resources on many architectures. - - Interfaces: - void *dma_buf_vmap(struct dma_buf *dmabuf) - void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr) - - The vmap call can fail if there is no vmap support in the exporter, or if it - runs out of vmalloc space. Fallback to kmap should be implemented. Note that - the dma-buf layer keeps a reference count for all vmap access and calls down - into the exporter's vmap function only when no vmapping exists, and only - unmaps it once. Protection against concurrent vmap/vunmap calls is provided - by taking the dma_buf->lock mutex. - -3. Finish access - - When the importer is done accessing the CPU, it needs to announce this to - the exporter (to facilitate cache flushing and unpinning of any pinned - resources). The result of any dma_buf kmap calls after end_cpu_access is - undefined. - - Interface: - void dma_buf_end_cpu_access(struct dma_buf *dma_buf, - enum dma_data_direction dir); - - -Direct Userspace Access/mmap Support ------------------------------------- - -Being able to mmap an export dma-buf buffer object has 2 main use-cases: -- CPU fallback processing in a pipeline and -- supporting existing mmap interfaces in importers. - -1. CPU fallback processing in a pipeline - - In many processing pipelines it is sometimes required that the cpu can access - the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid - the need to handle this specially in userspace frameworks for buffer sharing - it's ideal if the dma_buf fd itself can be used to access the backing storage - from userspace using mmap. - - Furthermore Android's ION framework already supports this (and is otherwise - rather similar to dma-buf from a userspace consumer side with using fds as - handles, too). So it's beneficial to support this in a similar fashion on - dma-buf to have a good transition path for existing Android userspace. - - No special interfaces, userspace simply calls mmap on the dma-buf fd, making - sure that the cache synchronization ioctl (DMA_BUF_IOCTL_SYNC) is *always* - used when the access happens. Note that DMA_BUF_IOCTL_SYNC can fail with - -EAGAIN or -EINTR, in which case it must be restarted. - - Some systems might need some sort of cache coherency management e.g. when - CPU and GPU domains are being accessed through dma-buf at the same time. To - circumvent this problem there are begin/end coherency markers, that forward - directly to existing dma-buf device drivers vfunc hooks. Userspace can make - use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The sequence - would be used like following: - - mmap dma-buf fd - - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write - to mmap area 3. SYNC_END ioctl. This can be repeated as often as you - want (with the new data being consumed by the GPU or say scanout device) - - munmap once you don't need the buffer any more - - For correctness and optimal performance, it is always required to use - SYNC_START and SYNC_END before and after, respectively, when accessing the - mapped address. Userspace cannot rely on coherent access, even when there - are systems where it just works without calling these ioctls. - -2. Supporting existing mmap interfaces in importers - - Similar to the motivation for kernel cpu access it is again important that - the userspace code of a given importing subsystem can use the same interfaces - with a imported dma-buf buffer object as with a native buffer object. This is - especially important for drm where the userspace part of contemporary OpenGL, - X, and other drivers is huge, and reworking them to use a different way to - mmap a buffer rather invasive. - - The assumption in the current dma-buf interfaces is that redirecting the - initial mmap is all that's needed. A survey of some of the existing - subsystems shows that no driver seems to do any nefarious thing like syncing - up with outstanding asynchronous processing on the device or allocating - special resources at fault time. So hopefully this is good enough, since - adding interfaces to intercept pagefaults and allow pte shootdowns would - increase the complexity quite a bit. - - Interface: - int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, - unsigned long); - - If the importing subsystem simply provides a special-purpose mmap call to set - up a mapping in userspace, calling do_mmap with dma_buf->file will equally - achieve that for a dma-buf object. - -3. Implementation notes for exporters - - Because dma-buf buffers have invariant size over their lifetime, the dma-buf - core checks whether a vma is too large and rejects such mappings. The - exporter hence does not need to duplicate this check. - - Because existing importing subsystems might presume coherent mappings for - userspace, the exporter needs to set up a coherent mapping. If that's not - possible, it needs to fake coherency by manually shooting down ptes when - leaving the cpu domain and flushing caches at fault time. Note that all the - dma_buf files share the same anon inode, hence the exporter needs to replace - the dma_buf file stored in vma->vm_file with it's own if pte shootdown is - required. This is because the kernel uses the underlying inode's address_space - for vma tracking (and hence pte tracking at shootdown time with - unmap_mapping_range). - - If the above shootdown dance turns out to be too expensive in certain - scenarios, we can extend dma-buf with a more explicit cache tracking scheme - for userspace mappings. But the current assumption is that using mmap is - always a slower path, so some inefficiencies should be acceptable. - - Exporters that shoot down mappings (for any reasons) shall not do any - synchronization at fault time with outstanding device operations. - Synchronization is an orthogonal issue to sharing the backing storage of a - buffer and hence should not be handled by dma-buf itself. This is explicitly - mentioned here because many people seem to want something like this, but if - different exporters handle this differently, buffer sharing can fail in - interesting ways depending upong the exporter (if userspace starts depending - upon this implicit synchronization). - -Other Interfaces Exposed to Userspace on the dma-buf FD ------------------------------------------------------- - -- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only - with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow - the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other - llseek operation will report -EINVAL. - - If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all - cases. Userspace can use this to detect support for discovering the dma-buf - size using llseek. - -Miscellaneous notes -------------------- - -- Any exporters or users of the dma-buf buffer sharing framework must have - a 'select DMA_SHARED_BUFFER' in their respective Kconfigs. - -- In order to avoid fd leaks on exec, the FD_CLOEXEC flag must be set - on the file descriptor. This is not just a resource leak, but a - potential security hole. It could give the newly exec'd application - access to buffers, via the leaked fd, to which it should otherwise - not be permitted access. - - The problem with doing this via a separate fcntl() call, versus doing it - atomically when the fd is created, is that this is inherently racy in a - multi-threaded app[3]. The issue is made worse when it is library code - opening/creating the file descriptor, as the application may not even be - aware of the fd's. - - To avoid this problem, userspace must have a way to request O_CLOEXEC - flag be set when the dma-buf fd is created. So any API provided by - the exporting driver to create a dmabuf fd must provide a way to let - userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd(). - -- If an exporter needs to manually flush caches and hence needs to fake - coherency for mmap support, it needs to be able to zap all the ptes pointing - at the backing storage. Now linux mm needs a struct address_space associated - with the struct file stored in vma->vm_file to do that with the function - unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd - with the anon_file struct file, i.e. all dma_bufs share the same file. - - Hence exporters need to setup their own file (and address_space) association - by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap - callback. In the specific case of a gem driver the exporter could use the - shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then - zap ptes by unmapping the corresponding range of the struct address_space - associated with their own file. - -References: -[1] struct dma_buf_ops in include/linux/dma-buf.h -[2] All interfaces mentioned above defined in include/linux/dma-buf.h -[3] https://lwn.net/Articles/236486/ diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index a9b457a4b949..31671b469627 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -17,6 +17,98 @@ shared or exclusive fence(s) associated with the buffer. Shared DMA Buffers ------------------ +This document serves as a guide to device-driver writers on what is the dma-buf +buffer sharing API, how to use it for exporting and using shared buffers. + +Any device driver which wishes to be a part of DMA buffer sharing, can do so as +either the 'exporter' of buffers, or the 'user' or 'importer' of buffers. + +Say a driver A wants to use buffers created by driver B, then we call B as the +exporter, and A as buffer-user/importer. + +The exporter + + - implements and manages operations in :c:type:`struct dma_buf_ops + <dma_buf_ops>` for the buffer, + - allows other users to share the buffer by using dma_buf sharing APIs, + - manages the details of buffer allocation, wrapped int a :c:type:`struct + dma_buf <dma_buf>`, + - decides about the actual backing storage where this allocation happens, + - and takes care of any migration of scatterlist - for all (shared) users of + this buffer. + +The buffer-user + + - is one of (many) sharing users of the buffer. + - doesn't need to worry about how the buffer is allocated, or where. + - and needs a mechanism to get access to the scatterlist that makes up this + buffer in memory, mapped into its own address space, so it can access the + same area of memory. This interface is provided by :c:type:`struct + dma_buf_attachment <dma_buf_attachment>`. + +Any exporters or users of the dma-buf buffer sharing framework must have a +'select DMA_SHARED_BUFFER' in their respective Kconfigs. + +Userspace Interface Notes +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Mostly a DMA buffer file descriptor is simply an opaque object for userspace, +and hence the generic interface exposed is very minimal. There's a few things to +consider though: + +- Since kernel 3.12 the dma-buf FD supports the llseek system call, but only + with offset=0 and whence=SEEK_END|SEEK_SET. SEEK_SET is supported to allow + the usual size discover pattern size = SEEK_END(0); SEEK_SET(0). Every other + llseek operation will report -EINVAL. + + If llseek on dma-buf FDs isn't support the kernel will report -ESPIPE for all + cases. Userspace can use this to detect support for discovering the dma-buf + size using llseek. + +- In order to avoid fd leaks on exec, the FD_CLOEXEC flag must be set + on the file descriptor. This is not just a resource leak, but a + potential security hole. It could give the newly exec'd application + access to buffers, via the leaked fd, to which it should otherwise + not be permitted access. + + The problem with doing this via a separate fcntl() call, versus doing it + atomically when the fd is created, is that this is inherently racy in a + multi-threaded app[3]. The issue is made worse when it is library code + opening/creating the file descriptor, as the application may not even be + aware of the fd's. + + To avoid this problem, userspace must have a way to request O_CLOEXEC + flag be set when the dma-buf fd is created. So any API provided by + the exporting driver to create a dmabuf fd must provide a way to let + userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd(). + +- Memory mapping the contents of the DMA buffer is also supported. See the + discussion below on `CPU Access to DMA Buffer Objects`_ for the full details. + +- The DMA buffer FD is also pollable, see `Fence Poll Support`_ below for + details. + +Basic Operation and Device DMA Access +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/dma-buf/dma-buf.c + :doc: dma buf device access + +CPU Access to DMA Buffer Objects +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/dma-buf/dma-buf.c + :doc: cpu access + +Fence Poll Support +~~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: drivers/dma-buf/dma-buf.c + :doc: fence polling + +Kernel Functions and Structures Reference +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + .. kernel-doc:: drivers/dma-buf/dma-buf.c :export: diff --git a/MAINTAINERS b/MAINTAINERS index 5f0420a0da5b..bdc4843d4dc5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3966,7 +3966,7 @@ F: drivers/dma-buf/ F: include/linux/dma-buf* F: include/linux/reservation.h F: include/linux/*fence.h -F: Documentation/dma-buf-sharing.txt +F: Documentation/driver-api/dma-buf.rst T: git git://anongit.freedesktop.org/drm/drm-misc SYNC FILE FRAMEWORK diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index e72e64484131..91aff74ed092 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -124,6 +124,28 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence) return base + offset; } +/** + * DOC: fence polling + * + * To support cross-device and cross-driver synchronization of buffer access + * implicit fences (represented internally in the kernel with struct &fence) can + * be attached to a &dma_buf. The glue for that and a few related things are + * provided in the &reservation_object structure. + * + * Userspace can query the state of these implicitly tracked fences using poll() + * and related system calls: + * + * - Checking for POLLIN, i.e. read access, can be use to query the state of the + * most recent write or exclusive fence. + * + * - Checking for POLLOUT, i.e. write access, can be used to query the state of + * all attached fences, shared and exclusive ones. + * + * Note that this only signals the completion of the respective fences, i.e. the + * DMA transfers are complete. Cache flushing and any other necessary + * preparations before CPU access can begin still need to happen. + */ + static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb; @@ -314,19 +336,52 @@ static inline int is_dma_buf_file(struct file *file) } /** + * DOC: dma buf device access + * + * For device DMA access to a shared DMA buffer the usual sequence of operations + * is fairly simple: + * + * 1. The exporter defines his exporter instance using + * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private + * buffer object into a &dma_buf. It then exports that &dma_buf to userspace + * as a file descriptor by calling dma_buf_fd(). + * + * 2. Userspace passes this file-descriptors to all drivers it wants this buffer + * to share with: First the filedescriptor is converted to a &dma_buf using + * dma_buf_get(). The the buffer is attached to the device using + * dma_buf_attach(). + * + * Up to this stage the exporter is still free to migrate or reallocate the + * backing storage. + * + * 3. Once the buffer is attached to all devices userspace can inniate DMA + * access to the shared buffer. In the kernel this is done by calling + * dma_buf_map_attachment() and dma_buf_unmap_attachment(). + * + * 4. Once a driver is done with a shared buffer it needs to call + * dma_buf_detach() (after cleaning up any mappings) and then release the + * reference acquired with dma_buf_get by calling dma_buf_put(). + * + * For the detailed semantics exporters are expected to implement see + * &dma_buf_ops. + */ + +/** * dma_buf_export - Creates a new dma_buf, and associates an anon file * with this buffer, so it can be exported. * Also connect the allocator specific data and ops to the buffer. * Additionally, provide a name string for exporter; useful in debugging. * * @exp_info: [in] holds all the export related information provided - * by the exporter. see struct dma_buf_export_info + * by the exporter. see struct &dma_buf_export_info * for further details. * * Returns, on success, a newly created dma_buf object, which wraps the * supplied private data and operations for dma_buf_ops. On either missing * ops, or error in allocating struct dma_buf, will return negative error. * + * For most cases the easiest way to create @exp_info is through the + * %DEFINE_DMA_BUF_EXPORT_INFO macro. */ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) { @@ -458,7 +513,12 @@ EXPORT_SYMBOL_GPL(dma_buf_get); * dma_buf_put - decreases refcount of the buffer * @dmabuf: [in] buffer to reduce refcount of * - * Uses file's refcounting done implicitly by fput() + * Uses file's refcounting done implicitly by fput(). + * + * If, as a result of this call, the refcount becomes 0, the 'release' file + * operation related to this fd is called. It calls the release operation of + * struct &dma_buf_ops in turn, and frees the memory allocated for dmabuf when + * exported. */ void dma_buf_put(struct dma_buf *dmabuf) { @@ -475,8 +535,17 @@ EXPORT_SYMBOL_GPL(dma_buf_put); * @dmabuf: [in] buffer to attach device to. * @dev: [in] device to be attached. * - * Returns struct dma_buf_attachment * for this attachment; returns ERR_PTR on - * error. + * Returns struct dma_buf_attachment pointer for this attachment. Attachments + * must be cleaned up by calling dma_buf_detach(). + * + * Returns: + * + * A pointer to newly created &dma_buf_attachment on success, or a negative + * error code wrapped into a pointer on failure. + * + * Note that this can fail if the backing storage of @dmabuf is in a place not + * accessible to @dev, and cannot be moved to a more suitable place. This is + * indicated with the error code -EBUSY. */ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev) @@ -519,6 +588,7 @@ EXPORT_SYMBOL_GPL(dma_buf_attach); * @dmabuf: [in] buffer to detach from. * @attach: [in] attachment to be detached; is free'd after this call. * + * Clean up a device attachment obtained by calling dma_buf_attach(). */ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) { @@ -543,7 +613,12 @@ EXPORT_SYMBOL_GPL(dma_buf_detach); * @direction: [in] direction of DMA transfer * * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR - * on error. + * on error. May return -EINTR if it is interrupted by a signal. + * + * A mapping must be unmapped again using dma_buf_map_attachment(). Note that + * the underlying backing storage is pinned for as long as a mapping exists, + * therefore users/importers should not hold onto a mapping for undue amounts of + * time. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) @@ -571,6 +646,7 @@ EXPORT_SYMBOL_GPL(dma_buf_map_attachment); * @sg_table: [in] scatterlist info of the buffer to unmap * @direction: [in] direction of DMA transfer * + * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment(). */ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, struct sg_table *sg_table, @@ -586,6 +662,122 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); +/** + * DOC: cpu access + * + * There are mutliple reasons for supporting CPU access to a dma buffer object: + * + * - Fallback operations in the kernel, for example when a device is connected + * over USB and the kernel needs to shuffle the data around first before + * sending it away. Cache coherency is handled by braketing any transactions + * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access() + * access. + * + * To support dma_buf objects residing in highmem cpu access is page-based + * using an api similar to kmap. Accessing a dma_buf is done in aligned chunks + * of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which + * returns a pointer in kernel virtual address space. Afterwards the chunk + * needs to be unmapped again. There is no limit on how often a given chunk + * can be mapped and unmapped, i.e. the importer does not need to call + * begin_cpu_access again before mapping the same chunk again. + * + * Interfaces:: + * void \*dma_buf_kmap(struct dma_buf \*, unsigned long); + * void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*); + * + * There are also atomic variants of these interfaces. Like for kmap they + * facilitate non-blocking fast-paths. Neither the importer nor the exporter + * (in the callback) is allowed to block when using these. + * + * Interfaces:: + * void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long); + * void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*); + * + * For importers all the restrictions of using kmap apply, like the limited + * supply of kmap_atomic slots. Hence an importer shall only hold onto at + * max 2 atomic dma_buf kmaps at the same time (in any given process context). + * + * dma_buf kmap calls outside of the range specified in begin_cpu_access are + * undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on + * the partial chunks at the beginning and end but may return stale or bogus + * data outside of the range (in these partial chunks). + * + * Note that these calls need to always succeed. The exporter needs to + * complete any preparations that might fail in begin_cpu_access. + * + * For some cases the overhead of kmap can be too high, a vmap interface + * is introduced. This interface should be used very carefully, as vmalloc + * space is a limited resources on many architectures. + * + * Interfaces:: + * void \*dma_buf_vmap(struct dma_buf \*dmabuf) + * void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr) + * + * The vmap call can fail if there is no vmap support in the exporter, or if + * it runs out of vmalloc space. Fallback to kmap should be implemented. Note + * that the dma-buf layer keeps a reference count for all vmap access and + * calls down into the exporter's vmap function only when no vmapping exists, + * and only unmaps it once. Protection against concurrent vmap/vunmap calls is + * provided by taking the dma_buf->lock mutex. + * + * - For full compatibility on the importer side with existing userspace + * interfaces, which might already support mmap'ing buffers. This is needed in + * many processing pipelines (e.g. feeding a software rendered image into a + * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION + * framework already supported this and for DMA buffer file descriptors to + * replace ION buffers mmap support was needed. + * + * There is no special interfaces, userspace simply calls mmap on the dma-buf + * fd. But like for CPU access there's a need to braket the actual access, + * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that + * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must + * be restarted. + * + * Some systems might need some sort of cache coherency management e.g. when + * CPU and GPU domains are being accessed through dma-buf at the same time. + * To circumvent this problem there are begin/end coherency markers, that + * forward directly to existing dma-buf device drivers vfunc hooks. Userspace + * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The + * sequence would be used like following: + * + * - mmap dma-buf fd + * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write + * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you + * want (with the new data being consumed by say the GPU or the scanout + * device) + * - munmap once you don't need the buffer any more + * + * For correctness and optimal performance, it is always required to use + * SYNC_START and SYNC_END before and after, respectively, when accessing the + * mapped address. Userspace cannot rely on coherent access, even when there + * are systems where it just works without calling these ioctls. + * + * - And as a CPU fallback in userspace processing pipelines. + * + * Similar to the motivation for kernel cpu access it is again important that + * the userspace code of a given importing subsystem can use the same + * interfaces with a imported dma-buf buffer object as with a native buffer + * object. This is especially important for drm where the userspace part of + * contemporary OpenGL, X, and other drivers is huge, and reworking them to + * use a different way to mmap a buffer rather invasive. + * + * The assumption in the current dma-buf interfaces is that redirecting the + * initial mmap is all that's needed. A survey of some of the existing + * subsystems shows that no driver seems to do any nefarious thing like + * syncing up with outstanding asynchronous processing on the device or + * allocating special resources at fault time. So hopefully this is good + * enough, since adding interfaces to intercept pagefaults and allow pte + * shootdowns would increase the complexity quite a bit. + * + * Interface:: + * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*, + * unsigned long); + * + * If the importing subsystem simply provides a special-purpose mmap call to + * set up a mapping in userspace, calling do_mmap with dma_buf->file will + * equally achieve that for a dma-buf object. + */ + static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { @@ -611,6 +803,10 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf, * @dmabuf: [in] buffer to prepare cpu access for. * @direction: [in] length of range for cpu access. * + * After the cpu access is complete the caller should call + * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is + * it guaranteed to be coherent with other DMA access. + * * Can return negative error values, returns 0 on success. */ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, @@ -643,6 +839,8 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access); * @dmabuf: [in] buffer to complete cpu access for. * @direction: [in] length of range for cpu access. * + * This terminates CPU access started with dma_buf_begin_cpu_access(). + * * Can return negative error values, returns 0 on success. */ int dma_buf_end_cpu_access(struct dma_buf *dmabuf, diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 6d802f2d2881..07cb9b908f30 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -67,9 +67,10 @@ static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb) * sync_file_create() - creates a sync file * @fence: fence to add to the sync_fence * - * Creates a sync_file containg @fence. Once this is called, the sync_file - * takes ownership of @fence. The sync_file can be released with - * fput(sync_file->file). Returns the sync_file or NULL in case of error. + * Creates a sync_file containg @fence. This function acquires and additional + * reference of @fence for the newly-created &sync_file, if it succeeds. The + * sync_file can be released with fput(sync_file->file). Returns the + * sync_file or NULL in case of error. */ struct sync_file *sync_file_create(struct dma_fence *fence) { @@ -90,13 +91,6 @@ struct sync_file *sync_file_create(struct dma_fence *fence) } EXPORT_SYMBOL(sync_file_create); -/** - * sync_file_fdget() - get a sync_file from an fd - * @fd: fd referencing a fence - * - * Ensures @fd references a valid sync_file, increments the refcount of the - * backing file. Returns the sync_file or NULL in case of error. - */ static struct sync_file *sync_file_fdget(int fd) { struct file *file = fget(fd); @@ -468,4 +462,3 @@ static const struct file_operations sync_file_fops = { .unlocked_ioctl = sync_file_ioctl, .compat_ioctl = sync_file_ioctl, }; - diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index ebfe8404c25f..29146fa83001 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -48,6 +48,21 @@ config DRM_DEBUG_MM If in doubt, say "N". +config DRM_DEBUG_MM_SELFTEST + tristate "kselftests for DRM range manager (struct drm_mm)" + depends on DRM + depends on DEBUG_KERNEL + select PRIME_NUMBERS + select DRM_LIB_RANDOM + default n + help + This option provides a kernel module that can be used to test + the DRM range manager (drm_mm) and its API. This option is not + useful for distributions or general kernels, but only for kernel + developers working on DRM and associated drivers. + + If in doubt, say "N". + config DRM_KMS_HELPER tristate depends on DRM @@ -321,3 +336,7 @@ config DRM_SAVAGE chipset. If M is selected the module will be called savage. endif # DRM_LEGACY + +config DRM_LIB_RANDOM + bool + default n diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index b9ae4280de9d..d00c389f24bf 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -18,6 +18,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \ drm_plane.o drm_color_mgmt.o drm_print.o \ drm_dumb_buffers.o drm_mode_config.o +drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o drm-$(CONFIG_PCI) += ati_pcigart.o @@ -37,6 +38,7 @@ drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o drm_kms_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o +obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += selftests/ CFLAGS_drm_trace_points.o := -I$(src) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 581601ca6b89..d2036df145b3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -508,7 +508,7 @@ amdgpu_framebuffer_init(struct drm_device *dev, { int ret; rfb->obj = obj; - drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); ret = drm_framebuffer_init(dev, &rfb->base, &amdgpu_fb_funcs); if (ret) { rfb->obj = NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 24629bec181a..838943d0962e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -245,7 +245,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, strcpy(info->fix.id, "amdgpudrmfb"); - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &amdgpufb_ops; @@ -272,7 +272,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)adev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)amdgpu_bo_size(abo)); - DRM_INFO("fb depth is %d\n", fb->depth); + DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); vga_switcheroo_client_fb_set(adev->ddev->pdev, info); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index fb902932f571..e63ece049b05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -61,10 +61,8 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) struct drm_connector *connector; mutex_lock(&mode_config->mutex); - if (mode_config->num_connector) { - list_for_each_entry(connector, &mode_config->connector_list, head) - amdgpu_connector_hotplug(connector); - } + list_for_each_entry(connector, &mode_config->connector_list, head) + amdgpu_connector_hotplug(connector); mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h index 202b4176b74e..b60346792bf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h @@ -32,6 +32,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include <drm/drm_dp_helper.h> #include <drm/drm_fixed.h> #include <drm/drm_crtc_helper.h> diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9999dc71b998..84afaae97e65 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2072,7 +2072,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); - switch (target_fb->pixel_format) { + switch (target_fb->format->format) { case DRM_FORMAT_C8: fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); @@ -2145,7 +2145,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, break; default: DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format, &format_name)); + drm_get_format_name(target_fb->format->format, &format_name)); return -EINVAL; } @@ -2220,7 +2220,7 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); dce_v10_0_grph_enable(crtc, true); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index b3d62b909f43..7a7fa96d2e49 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2053,7 +2053,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); - switch (target_fb->pixel_format) { + switch (target_fb->format->format) { case DRM_FORMAT_C8: fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); @@ -2126,7 +2126,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, break; default: DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format, &format_name)); + drm_get_format_name(target_fb->format->format, &format_name)); return -EINVAL; } @@ -2201,7 +2201,7 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); dce_v11_0_grph_enable(crtc, true); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index b4e4ec630e8c..44f024c9b9aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -1501,7 +1501,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, amdgpu_bo_get_tiling_flags(abo, &tiling_flags); amdgpu_bo_unreserve(abo); - switch (target_fb->pixel_format) { + switch (target_fb->format->format) { case DRM_FORMAT_C8: fb_format = (GRPH_DEPTH(GRPH_DEPTH_8BPP) | GRPH_FORMAT(GRPH_FORMAT_INDEXED)); @@ -1567,7 +1567,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, break; default: DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format, &format_name)); + drm_get_format_name(target_fb->format->format, &format_name)); return -EINVAL; } @@ -1630,7 +1630,7 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); dce_v6_0_grph_enable(crtc, true); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 584abe834a3c..30945fe55ac7 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -1950,7 +1950,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); - switch (target_fb->pixel_format) { + switch (target_fb->format->format) { case DRM_FORMAT_C8: fb_format = ((GRPH_DEPTH_8BPP << GRPH_CONTROL__GRPH_DEPTH__SHIFT) | (GRPH_FORMAT_INDEXED << GRPH_CONTROL__GRPH_FORMAT__SHIFT)); @@ -2016,7 +2016,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, break; default: DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format, &format_name)); + drm_get_format_name(target_fb->format->format, &format_name)); return -EINVAL; } @@ -2079,7 +2079,7 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc, WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); dce_v8_0_grph_enable(crtc, true); diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 7130b044b004..ad9a95916f1f 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -35,7 +35,8 @@ static struct simplefb_format supported_formats[] = { static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc) { struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); - uint32_t pixel_format = crtc->primary->state->fb->pixel_format; + const struct drm_framebuffer *fb = crtc->primary->state->fb; + uint32_t pixel_format = fb->format->format; struct simplefb_format *format = NULL; int i; diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index b69c66b4897e..0ce7f398bcff 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c @@ -47,10 +47,7 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) return ret; /* Link drm_bridge to encoder */ - bridge->encoder = encoder; - encoder->bridge = bridge; - - ret = drm_bridge_attach(drm, bridge); + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) drm_encoder_cleanup(encoder); diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 7d4e5aa77195..20ebfb4fbdfa 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c @@ -60,11 +60,12 @@ static int hdlcd_set_pxl_fmt(struct drm_crtc *crtc) { unsigned int btpp; struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); + const struct drm_framebuffer *fb = crtc->primary->state->fb; uint32_t pixel_format; struct simplefb_format *format = NULL; int i; - pixel_format = crtc->primary->state->fb->pixel_format; + pixel_format = fb->format->format; for (i = 0; i < ARRAY_SIZE(supported_formats); i++) { if (supported_formats[i].fourcc == pixel_format) @@ -220,27 +221,28 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane, static void hdlcd_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *state) { + struct drm_framebuffer *fb = plane->state->fb; struct hdlcd_drm_private *hdlcd; struct drm_gem_cma_object *gem; u32 src_w, src_h, dest_w, dest_h; dma_addr_t scanout_start; - if (!plane->state->fb) + if (!fb) return; src_w = plane->state->src_w >> 16; src_h = plane->state->src_h >> 16; dest_w = plane->state->crtc_w; dest_h = plane->state->crtc_h; - gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0); - scanout_start = gem->paddr + plane->state->fb->offsets[0] + - plane->state->crtc_y * plane->state->fb->pitches[0] + + gem = drm_fb_cma_get_gem_obj(fb, 0); + scanout_start = gem->paddr + fb->offsets[0] + + plane->state->crtc_y * fb->pitches[0] + plane->state->crtc_x * - drm_format_plane_cpp(plane->state->fb->pixel_format, 0); + fb->format->cpp[0]; hdlcd = plane->dev->dev_private; - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, plane->state->fb->pitches[0]); - hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, plane->state->fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]); + hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]); hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1); hdlcd_write(hdlcd, HDLCD_REG_FB_BASE, scanout_start); } diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 63eec8f37cfc..eff2fe47e26a 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -112,11 +112,11 @@ static int malidp_de_plane_check(struct drm_plane *plane, fb = state->fb; ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id, - fb->pixel_format); + fb->format->format); if (ms->format == MALIDP_INVALID_FORMAT_ID) return -EINVAL; - ms->n_planes = drm_format_num_planes(fb->pixel_format); + ms->n_planes = fb->format->num_planes; for (i = 0; i < ms->n_planes; i++) { if (!malidp_hw_pitch_valid(mp->hwdev, fb->pitches[i])) { DRM_DEBUG_KMS("Invalid pitch %u for plane %d\n", @@ -137,8 +137,8 @@ static int malidp_de_plane_check(struct drm_plane *plane, /* packed RGB888 / BGR888 can't be rotated or flipped */ if (state->rotation != DRM_ROTATE_0 && - (state->fb->pixel_format == DRM_FORMAT_RGB888 || - state->fb->pixel_format == DRM_FORMAT_BGR888)) + (fb->format->format == DRM_FORMAT_RGB888 || + fb->format->format == DRM_FORMAT_BGR888)) return -EINVAL; ms->rotmem_size = 0; @@ -147,7 +147,7 @@ static int malidp_de_plane_check(struct drm_plane *plane, val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h, state->crtc_w, - state->fb->pixel_format); + fb->format->format); if (val < 0) return val; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 95cb3966b2ca..e62ee4498ce4 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -169,8 +169,7 @@ void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb, int x, int y) { u32 addr = drm_fb_obj(fb)->dev_addr; - u32 pixel_format = fb->pixel_format; - int num_planes = drm_format_num_planes(pixel_format); + int num_planes = fb->format->num_planes; int i; if (num_planes > 3) @@ -178,7 +177,7 @@ void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb, for (i = 0; i < num_planes; i++) addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] + - x * drm_format_plane_cpp(pixel_format, i); + x * fb->format->cpp[i]; for (; i < 3; i++) addrs[i] = 0; } @@ -191,7 +190,7 @@ static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb, unsigned i = 0; DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n", - pitch, x, y, fb->bits_per_pixel); + pitch, x, y, fb->format->cpp[0] * 8); armada_drm_plane_calc_addrs(addrs, fb, x, y); @@ -1036,7 +1035,7 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc, int ret; /* We don't support changing the pixel format */ - if (fb->pixel_format != crtc->primary->fb->pixel_format) + if (fb->format != crtc->primary->fb->format) return -EINVAL; work = kmalloc(sizeof(*work), GFP_KERNEL); diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index f03c212b754d..2a7eb6817c36 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -81,7 +81,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, dfb->mod = config; dfb->obj = obj; - drm_helper_mode_fill_fb_struct(&dfb->fb, mode); + drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode); ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs); if (ret) { diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index c5dc06a55883..78335100cbc3 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -89,11 +89,12 @@ static int armada_fb_create(struct drm_fb_helper *fbh, info->screen_base = ptr; fbh->fb = &dfb->fb; - drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth); + drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], + dfb->fb.format->depth); drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height); DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n", - dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel, + dfb->fb.width, dfb->fb.height, dfb->fb.format->cpp[0] * 8, (unsigned long long)obj->phys_addr); return 0; diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index 6743615232f5..34cb73d0db77 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -186,9 +186,9 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, armada_drm_plane_calc_addrs(addrs, fb, src_x, src_y); - pixel_format = fb->pixel_format; + pixel_format = fb->format->format; hsub = drm_format_horz_chroma_subsampling(pixel_format); - num_planes = drm_format_num_planes(pixel_format); + num_planes = fb->format->num_planes; /* * Annoyingly, shifting a YUYV-format image by one pixel diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 908011d2c8f5..6f3b6f50cf52 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -28,6 +28,7 @@ #ifndef __AST_DRV_H__ #define __AST_DRV_H__ +#include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/ttm/ttm_bo_api.h> diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c index d6f5ec64c667..b085140fae95 100644 --- a/drivers/gpu/drm/ast/ast_fb.c +++ b/drivers/gpu/drm/ast/ast_fb.c @@ -49,7 +49,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev, struct drm_gem_object *obj; struct ast_bo *bo; int src_offset, dst_offset; - int bpp = (afbdev->afb.base.bits_per_pixel + 7)/8; + int bpp = afbdev->afb.base.format->cpp[0]; int ret = -EBUSY; bool unmap = false; bool store_for_later = false; @@ -237,7 +237,7 @@ static int astfb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0); info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &afbdev->helper, sizes->fb_width, sizes->fb_height); info->screen_base = sysram; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index f75c6421db62..dcdd59d505bd 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -314,7 +314,7 @@ int ast_framebuffer_init(struct drm_device *dev, { int ret; - drm_helper_mode_fill_fb_struct(&ast_fb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &ast_fb->base, mode_cmd); ast_fb->obj = obj; ret = drm_framebuffer_init(dev, &ast_fb->base, &ast_fb_funcs); if (ret) { diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index e26c98f51eb4..606cb40f6c7c 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -79,12 +79,13 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo struct ast_vbios_mode_info *vbios_mode) { struct ast_private *ast = crtc->dev->dev_private; + const struct drm_framebuffer *fb = crtc->primary->fb; u32 refresh_rate_index = 0, mode_id, color_index, refresh_rate; u32 hborder, vborder; bool check_sync; struct ast_vbios_enhtable *best = NULL; - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: vbios_mode->std_table = &vbios_stdtable[VGAModeIndex]; color_index = VGAModeIndex - 1; @@ -207,7 +208,8 @@ static bool ast_get_vbios_mode_info(struct drm_crtc *crtc, struct drm_display_mo ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0x00); if (vbios_mode->enh_table->flags & NewModeInfo) { ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x91, 0xa8); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, crtc->primary->fb->bits_per_pixel); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x92, + fb->format->cpp[0] * 8); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x93, adjusted_mode->clock / 1000); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x94, adjusted_mode->crtc_hdisplay); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x95, adjusted_mode->crtc_hdisplay >> 8); @@ -369,10 +371,11 @@ static void ast_set_crtc_reg(struct drm_crtc *crtc, struct drm_display_mode *mod static void ast_set_offset_reg(struct drm_crtc *crtc) { struct ast_private *ast = crtc->dev->dev_private; + const struct drm_framebuffer *fb = crtc->primary->fb; u16 offset; - offset = crtc->primary->fb->pitches[0] >> 3; + offset = fb->pitches[0] >> 3; ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0x13, (offset & 0xff)); ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xb0, (offset >> 8) & 0x3f); } @@ -395,9 +398,10 @@ static void ast_set_ext_reg(struct drm_crtc *crtc, struct drm_display_mode *mode struct ast_vbios_mode_info *vbios_mode) { struct ast_private *ast = crtc->dev->dev_private; + const struct drm_framebuffer *fb = crtc->primary->fb; u8 jregA0 = 0, jregA3 = 0, jregA8 = 0; - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: jregA0 = 0x70; jregA3 = 0x01; @@ -452,7 +456,9 @@ static void ast_set_sync_reg(struct drm_device *dev, struct drm_display_mode *mo static bool ast_set_dac_reg(struct drm_crtc *crtc, struct drm_display_mode *mode, struct ast_vbios_mode_info *vbios_mode) { - switch (crtc->primary->fb->bits_per_pixel) { + const struct drm_framebuffer *fb = crtc->primary->fb; + + switch (fb->format->cpp[0] * 8) { case 8: break; default: diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c index 377e43cea9dd..63dfdbf34f80 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c @@ -446,7 +446,7 @@ void atmel_hlcdc_layer_update_set_fb(struct atmel_hlcdc_layer *layer, return; if (fb) - nplanes = drm_format_num_planes(fb->pixel_format); + nplanes = fb->format->num_planes; if (nplanes > layer->max_planes) return; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 6119b5085501..e7799b6ee829 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -230,9 +230,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, of_node_put(np); if (bridge) { - output->encoder.bridge = bridge; - bridge->encoder = &output->encoder; - ret = drm_bridge_attach(dev, bridge); + ret = drm_bridge_attach(&output->encoder, bridge, NULL); if (!ret) return 0; } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 246ed1e33d8a..bd2791c4b002 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -356,7 +356,7 @@ atmel_hlcdc_plane_update_general_settings(struct atmel_hlcdc_plane *plane, cfg |= ATMEL_HLCDC_LAYER_OVR | ATMEL_HLCDC_LAYER_ITER2BL | ATMEL_HLCDC_LAYER_ITER; - if (atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format)) + if (atmel_hlcdc_format_embeds_alpha(state->base.fb->format->format)) cfg |= ATMEL_HLCDC_LAYER_LAEN; else cfg |= ATMEL_HLCDC_LAYER_GAEN | @@ -386,13 +386,13 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, u32 cfg; int ret; - ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->pixel_format, + ret = atmel_hlcdc_format_to_plane_mode(state->base.fb->format->format, &cfg); if (ret) return; - if ((state->base.fb->pixel_format == DRM_FORMAT_YUV422 || - state->base.fb->pixel_format == DRM_FORMAT_NV61) && + if ((state->base.fb->format->format == DRM_FORMAT_YUV422 || + state->base.fb->format->format == DRM_FORMAT_NV61) && drm_rotation_90_or_270(state->base.rotation)) cfg |= ATMEL_HLCDC_YUV422ROT; @@ -405,7 +405,7 @@ static void atmel_hlcdc_plane_update_format(struct atmel_hlcdc_plane *plane, * Rotation optimization is not working on RGB888 (rotation is still * working but without any optimization). */ - if (state->base.fb->pixel_format == DRM_FORMAT_RGB888) + if (state->base.fb->format->format == DRM_FORMAT_RGB888) cfg = ATMEL_HLCDC_LAYER_DMA_ROTDIS; else cfg = 0; @@ -514,7 +514,7 @@ atmel_hlcdc_plane_prepare_disc_area(struct drm_crtc_state *c_state) ovl_state = drm_plane_state_to_atmel_hlcdc_plane_state(ovl_s); if (!ovl_s->fb || - atmel_hlcdc_format_embeds_alpha(ovl_s->fb->pixel_format) || + atmel_hlcdc_format_embeds_alpha(ovl_s->fb->format->format) || ovl_state->alpha != 255) continue; @@ -621,7 +621,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, state->src_w >>= 16; state->src_h >>= 16; - state->nplanes = drm_format_num_planes(fb->pixel_format); + state->nplanes = fb->format->num_planes; if (state->nplanes > ATMEL_HLCDC_MAX_PLANES) return -EINVAL; @@ -664,15 +664,15 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, patched_src_h = DIV_ROUND_CLOSEST(patched_crtc_h * state->src_h, state->crtc_h); - hsub = drm_format_horz_chroma_subsampling(fb->pixel_format); - vsub = drm_format_vert_chroma_subsampling(fb->pixel_format); + hsub = drm_format_horz_chroma_subsampling(fb->format->format); + vsub = drm_format_vert_chroma_subsampling(fb->format->format); for (i = 0; i < state->nplanes; i++) { unsigned int offset = 0; int xdiv = i ? hsub : 1; int ydiv = i ? vsub : 1; - state->bpp[i] = drm_format_plane_cpp(fb->pixel_format, i); + state->bpp[i] = fb->format->cpp[i]; if (!state->bpp[i]) return -EINVAL; @@ -741,7 +741,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) && (!layout->memsize || - atmel_hlcdc_format_embeds_alpha(state->base.fb->pixel_format))) + atmel_hlcdc_format_embeds_alpha(state->base.fb->format->format))) return -EINVAL; if (state->crtc_x < 0 || state->crtc_y < 0) diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 32dfe418cc98..f626bab7f5e3 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h @@ -4,6 +4,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c index da790a1c302a..0317c3df6a22 100644 --- a/drivers/gpu/drm/bochs/bochs_fbdev.c +++ b/drivers/gpu/drm/bochs/bochs_fbdev.c @@ -123,7 +123,7 @@ static int bochsfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT; info->fbops = &bochsfb_ops; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &bochs->fb.helper, sizes->fb_width, sizes->fb_height); diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 099a3c688c26..ceb1fecf02dd 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -484,7 +484,7 @@ int bochs_framebuffer_init(struct drm_device *dev, { int ret; - drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd); gfb->obj = obj; ret = drm_framebuffer_init(dev, &gfb->base, &bochs_fb_funcs); if (ret) { diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index eb9bf8786c24..02b97bf64ee4 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -133,6 +133,7 @@ int analogix_dp_disable_psr(struct device *dev) { struct analogix_dp_device *dp = dev_get_drvdata(dev); struct edp_vsc_psr psr_vsc; + int ret; if (!dp->psr_support) return 0; @@ -147,6 +148,10 @@ int analogix_dp_disable_psr(struct device *dev) psr_vsc.DB0 = 0; psr_vsc.DB1 = 0; + ret = drm_dp_dpcd_writeb(&dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret != 1) + dev_err(dp->dev, "Failed to set DP Power0 %d\n", ret); + analogix_dp_send_psr_spd(dp, &psr_vsc); return 0; } @@ -1227,12 +1232,10 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev, dp->bridge = bridge; - dp->encoder->bridge = bridge; bridge->driver_private = dp; - bridge->encoder = dp->encoder; bridge->funcs = &analogix_dp_bridge_funcs; - ret = drm_bridge_attach(drm_dev, bridge); + ret = drm_bridge_attach(dp->encoder, bridge, NULL); if (ret) { DRM_ERROR("failed to attach drm bridge\n"); return -EINVAL; diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index e5706981c934..86e9f9c7b59c 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -237,6 +237,7 @@ static int dumb_vga_remove(struct platform_device *pdev) static const struct of_device_id dumb_vga_match[] = { { .compatible = "dumb-vga-dac" }, + { .compatible = "ti,ths8135" }, {}, }; MODULE_DEVICE_TABLE(of, dumb_vga_match); diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c index 235ce7d1583d..f5009ae39b89 100644 --- a/drivers/gpu/drm/bridge/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/dw-hdmi.c @@ -1841,13 +1841,12 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi) hdmi->bridge = bridge; bridge->driver_private = hdmi; bridge->funcs = &dw_hdmi_bridge_funcs; - ret = drm_bridge_attach(drm, bridge); + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) { DRM_ERROR("Failed to initialize bridge with drm\n"); return -EINVAL; } - encoder->bridge = bridge; hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; drm_connector_helper_add(&hdmi->connector, diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h index 2188d6b61b3e..b59aeef4635a 100644 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ b/drivers/gpu/drm/cirrus/cirrus_drv.h @@ -13,6 +13,7 @@ #include <video/vga.h> +#include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/ttm/ttm_bo_api.h> diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c index 3a6309d7d8e4..79a5cd108245 100644 --- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c +++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c @@ -22,7 +22,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev, struct drm_gem_object *obj; struct cirrus_bo *bo; int src_offset, dst_offset; - int bpp = (afbdev->gfb.base.bits_per_pixel + 7)/8; + int bpp = afbdev->gfb.base.format->cpp[0]; int ret = -EBUSY; bool unmap = false; bool store_for_later = false; @@ -218,7 +218,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT; info->fbops = &cirrusfb_ops; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &gfbdev->helper, sizes->fb_width, sizes->fb_height); @@ -238,7 +238,7 @@ static int cirrusfb_create(struct drm_fb_helper *helper, DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram aper at 0x%lX\n", (unsigned long)info->fix.smem_start); DRM_INFO("size %lu\n", (unsigned long)info->fix.smem_len); - DRM_INFO("fb depth is %d\n", fb->depth); + DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); return 0; diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c index 2c3c0d4072ce..52d901fa8687 100644 --- a/drivers/gpu/drm/cirrus/cirrus_main.c +++ b/drivers/gpu/drm/cirrus/cirrus_main.c @@ -34,7 +34,7 @@ int cirrus_framebuffer_init(struct drm_device *dev, { int ret; - drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd); gfb->obj = obj; ret = drm_framebuffer_init(dev, &gfb->base, &cirrus_fb_funcs); if (ret) { diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c index 9a4a27c1afd2..ed43ab10ac99 100644 --- a/drivers/gpu/drm/cirrus/cirrus_mode.c +++ b/drivers/gpu/drm/cirrus/cirrus_mode.c @@ -185,6 +185,7 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct cirrus_device *cdev = dev->dev_private; + const struct drm_framebuffer *fb = crtc->primary->fb; int hsyncstart, hsyncend, htotal, hdispend; int vtotal, vdispend; int tmp; @@ -257,7 +258,7 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc, sr07 = RREG8(SEQ_DATA); sr07 &= 0xe0; hdr = 0; - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: sr07 |= 0x11; break; @@ -280,13 +281,13 @@ static int cirrus_crtc_mode_set(struct drm_crtc *crtc, WREG_SEQ(0x7, sr07); /* Program the pitch */ - tmp = crtc->primary->fb->pitches[0] / 8; + tmp = fb->pitches[0] / 8; WREG_CRT(VGA_CRTC_OFFSET, tmp); /* Enable extended blanking and pitch bits, and enable full memory */ tmp = 0x22; - tmp |= (crtc->primary->fb->pitches[0] >> 7) & 0x10; - tmp |= (crtc->primary->fb->pitches[0] >> 6) & 0x40; + tmp |= (fb->pitches[0] >> 7) & 0x10; + tmp |= (fb->pitches[0] >> 6) & 0x40; WREG_CRT(0x1b, tmp); /* Enable high-colour modes */ diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 60697482b94c..b1b54011a92c 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -902,11 +902,11 @@ static int drm_atomic_plane_check(struct drm_plane *plane, } /* Check whether this plane supports the fb pixel format. */ - ret = drm_plane_check_pixel_format(plane, state->fb->pixel_format); + ret = drm_plane_check_pixel_format(plane, state->fb->format->format); if (ret) { struct drm_format_name_buf format_name; DRM_DEBUG_ATOMIC("Invalid pixel format %s\n", - drm_get_format_name(state->fb->pixel_format, + drm_get_format_name(state->fb->format->format, &format_name)); return ret; } @@ -960,11 +960,11 @@ static void drm_atomic_plane_print_state(struct drm_printer *p, drm_printf(p, "\tfb=%u\n", state->fb ? state->fb->base.id : 0); if (state->fb) { struct drm_framebuffer *fb = state->fb; - int i, n = drm_format_num_planes(fb->pixel_format); + int i, n = fb->format->num_planes; struct drm_format_name_buf format_name; drm_printf(p, "\t\tformat=%s\n", - drm_get_format_name(fb->pixel_format, &format_name)); + drm_get_format_name(fb->format->format, &format_name)); drm_printf(p, "\t\t\tmodifier=0x%llx\n", fb->modifier); drm_printf(p, "\t\tsize=%dx%d\n", fb->width, fb->height); drm_printf(p, "\t\tlayers:\n"); @@ -1417,6 +1417,7 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, struct drm_mode_config *config = &state->dev->mode_config; struct drm_connector *connector; struct drm_connector_state *conn_state; + struct drm_connector_list_iter conn_iter; int ret; ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx); @@ -1430,14 +1431,18 @@ drm_atomic_add_affected_connectors(struct drm_atomic_state *state, * Changed connectors are already in @state, so only need to look at the * current configuration. */ - drm_for_each_connector(connector, state->dev) { + drm_connector_list_iter_get(state->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->state->crtc != crtc) continue; conn_state = drm_atomic_get_connector_state(state, connector); - if (IS_ERR(conn_state)) + if (IS_ERR(conn_state)) { + drm_connector_list_iter_put(&conn_iter); return PTR_ERR(conn_state); + } } + drm_connector_list_iter_put(&conn_iter); return 0; } @@ -1692,6 +1697,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p) struct drm_plane *plane; struct drm_crtc *crtc; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; if (!drm_core_check_feature(dev, DRIVER_ATOMIC)) return; @@ -1702,8 +1708,10 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p) list_for_each_entry(crtc, &config->crtc_list, head) drm_atomic_crtc_print_state(p, crtc->state); - list_for_each_entry(connector, &config->connector_list, head) + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) drm_atomic_connector_print_state(p, connector->state); + drm_connector_list_iter_put(&conn_iter); } EXPORT_SYMBOL(drm_state_dump); @@ -2195,10 +2203,6 @@ retry: goto out; if (arg->flags & DRM_MODE_ATOMIC_TEST_ONLY) { - /* - * Unlike commit, check_only does not clean up state. - * Below we call drm_atomic_state_put for it. - */ ret = drm_atomic_check_only(state); } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { ret = drm_atomic_nonblocking_commit(state); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 583f47f27b36..799c1564a4f8 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -94,9 +94,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, { struct drm_connector_state *conn_state; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; struct drm_encoder *encoder; unsigned encoder_mask = 0; - int i, ret; + int i, ret = 0; /* * First loop, find all newly assigned encoders from the connectors @@ -144,7 +145,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, * and the crtc is disabled if no encoder is left. This preserves * compatibility with the legacy set_config behavior. */ - drm_for_each_connector(connector, state->dev) { + drm_connector_list_iter_get(state->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { struct drm_crtc_state *crtc_state; if (drm_atomic_get_existing_connector_state(state, connector)) @@ -160,12 +162,15 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, connector->state->crtc->base.id, connector->state->crtc->name, connector->base.id, connector->name); - return -EINVAL; + ret = -EINVAL; + goto out; } conn_state = drm_atomic_get_connector_state(state, connector); - if (IS_ERR(conn_state)) - return PTR_ERR(conn_state); + if (IS_ERR(conn_state)) { + ret = PTR_ERR(conn_state); + goto out; + } DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] in use on [CRTC:%d:%s], disabling [CONNECTOR:%d:%s]\n", encoder->base.id, encoder->name, @@ -176,19 +181,21 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, ret = drm_atomic_set_crtc_for_connector(conn_state, NULL); if (ret) - return ret; + goto out; if (!crtc_state->connector_mask) { ret = drm_atomic_set_mode_prop_for_crtc(crtc_state, NULL); if (ret < 0) - return ret; + goto out; crtc_state->active = false; } } +out: + drm_connector_list_iter_put(&conn_iter); - return 0; + return ret; } static void @@ -1058,41 +1065,6 @@ int drm_atomic_helper_wait_for_fences(struct drm_device *dev, EXPORT_SYMBOL(drm_atomic_helper_wait_for_fences); /** - * drm_atomic_helper_framebuffer_changed - check if framebuffer has changed - * @dev: DRM device - * @old_state: atomic state object with old state structures - * @crtc: DRM crtc - * - * Checks whether the framebuffer used for this CRTC changes as a result of - * the atomic update. This is useful for drivers which cannot use - * drm_atomic_helper_wait_for_vblanks() and need to reimplement its - * functionality. - * - * Returns: - * true if the framebuffer changed. - */ -bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, - struct drm_atomic_state *old_state, - struct drm_crtc *crtc) -{ - struct drm_plane *plane; - struct drm_plane_state *old_plane_state; - int i; - - for_each_plane_in_state(old_state, plane, old_plane_state, i) { - if (plane->state->crtc != crtc && - old_plane_state->crtc != crtc) - continue; - - if (plane->state->fb != old_plane_state->fb) - return true; - } - - return false; -} -EXPORT_SYMBOL(drm_atomic_helper_framebuffer_changed); - -/** * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs * @dev: DRM device * @old_state: atomic state object with old state structures @@ -1110,39 +1082,35 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int i, ret; + unsigned crtc_mask = 0; - for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { - /* No one cares about the old state, so abuse it for tracking - * and store whether we hold a vblank reference (and should do a - * vblank wait) in the ->enable boolean. */ - old_crtc_state->enable = false; - - if (!crtc->state->enable) - continue; + /* + * Legacy cursor ioctls are completely unsynced, and userspace + * relies on that (by doing tons of cursor updates). + */ + if (old_state->legacy_cursor_update) + return; - /* Legacy cursor ioctls are completely unsynced, and userspace - * relies on that (by doing tons of cursor updates). */ - if (old_state->legacy_cursor_update) - continue; + for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { + struct drm_crtc_state *new_crtc_state = crtc->state; - if (!drm_atomic_helper_framebuffer_changed(dev, - old_state, crtc)) + if (!new_crtc_state->active || !new_crtc_state->planes_changed) continue; ret = drm_crtc_vblank_get(crtc); if (ret != 0) continue; - old_crtc_state->enable = true; - old_crtc_state->last_vblank_count = drm_crtc_vblank_count(crtc); + crtc_mask |= drm_crtc_mask(crtc); + old_state->crtcs[i].last_vblank_count = drm_crtc_vblank_count(crtc); } for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) { - if (!old_crtc_state->enable) + if (!(crtc_mask & drm_crtc_mask(crtc))) continue; ret = wait_event_timeout(dev->vblank[i].queue, - old_crtc_state->last_vblank_count != + old_state->crtcs[i].last_vblank_count != drm_crtc_vblank_count(crtc), msecs_to_jiffies(50)); @@ -1664,9 +1632,6 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev, funcs = plane->helper_private; - if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) - continue; - if (funcs->prepare_fb) { ret = funcs->prepare_fb(plane, plane_state); if (ret) @@ -1683,9 +1648,6 @@ fail: if (j >= i) continue; - if (!drm_atomic_helper_framebuffer_changed(dev, state, plane_state->crtc)) - continue; - funcs = plane->helper_private; if (funcs->cleanup_fb) @@ -1952,9 +1914,6 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev, for_each_plane_in_state(old_state, plane, plane_state, i) { const struct drm_plane_helper_funcs *funcs; - if (!drm_atomic_helper_framebuffer_changed(dev, old_state, plane_state->crtc)) - continue; - funcs = plane->helper_private; if (funcs->cleanup_fb) @@ -2442,6 +2401,7 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, { struct drm_atomic_state *state; struct drm_connector *conn; + struct drm_connector_list_iter conn_iter; int err; state = drm_atomic_state_alloc(dev); @@ -2450,7 +2410,8 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, state->acquire_ctx = ctx; - drm_for_each_connector(conn, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(conn, &conn_iter) { struct drm_crtc *crtc = conn->state->crtc; struct drm_crtc_state *crtc_state; @@ -2468,6 +2429,7 @@ int drm_atomic_helper_disable_all(struct drm_device *dev, err = drm_atomic_commit(state); free: + drm_connector_list_iter_put(&conn_iter); drm_atomic_state_put(state); return err; } @@ -2840,6 +2802,7 @@ int drm_atomic_helper_connector_dpms(struct drm_connector *connector, struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; struct drm_connector *tmp_connector; + struct drm_connector_list_iter conn_iter; int ret; bool active = false; int old_mode = connector->dpms; @@ -2867,7 +2830,8 @@ retry: WARN_ON(!drm_modeset_is_locked(&config->connection_mutex)); - drm_for_each_connector(tmp_connector, connector->dev) { + drm_connector_list_iter_get(connector->dev, &conn_iter); + drm_for_each_connector_iter(tmp_connector, &conn_iter) { if (tmp_connector->state->crtc != crtc) continue; @@ -2876,6 +2840,7 @@ retry: break; } } + drm_connector_list_iter_put(&conn_iter); crtc_state->active = active; ret = drm_atomic_commit(state); @@ -3253,6 +3218,7 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev, { struct drm_atomic_state *state; struct drm_connector *conn; + struct drm_connector_list_iter conn_iter; struct drm_plane *plane; struct drm_crtc *crtc; int err = 0; @@ -3283,15 +3249,18 @@ drm_atomic_helper_duplicate_state(struct drm_device *dev, } } - drm_for_each_connector(conn, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(conn, &conn_iter) { struct drm_connector_state *conn_state; conn_state = drm_atomic_get_connector_state(state, conn); if (IS_ERR(conn_state)) { err = PTR_ERR(conn_state); + drm_connector_list_iter_put(&conn_iter); goto free; } } + drm_connector_list_iter_put(&conn_iter); /* clear the acquire context so that it isn't accidentally reused */ state->acquire_ctx = NULL; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 0ee052b7c21a..cd10095e8d00 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -26,6 +26,9 @@ #include <linux/mutex.h> #include <drm/drm_bridge.h> +#include <drm/drm_encoder.h> + +#include "drm_crtc_internal.h" /** * DOC: overview @@ -92,47 +95,58 @@ void drm_bridge_remove(struct drm_bridge *bridge) EXPORT_SYMBOL(drm_bridge_remove); /** - * drm_bridge_attach - associate given bridge to our DRM device + * drm_bridge_attach - attach the bridge to an encoder's chain * - * @dev: DRM device - * @bridge: bridge control structure + * @encoder: DRM encoder + * @bridge: bridge to attach + * @previous: previous bridge in the chain (optional) * - * Called by a kms driver to link one of our encoder/bridge to the given - * bridge. + * Called by a kms driver to link the bridge to an encoder's chain. The previous + * argument specifies the previous bridge in the chain. If NULL, the bridge is + * linked directly at the encoder's output. Otherwise it is linked at the + * previous bridge's output. * - * Note that setting up links between the bridge and our encoder/bridge - * objects needs to be handled by the kms driver itself. + * If non-NULL the previous bridge must be already attached by a call to this + * function. * * RETURNS: * Zero on success, error code on failure */ -int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge) +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous) { - if (!dev || !bridge) + int ret; + + if (!encoder || !bridge) + return -EINVAL; + + if (previous && (!previous->dev || previous->encoder != encoder)) return -EINVAL; if (bridge->dev) return -EBUSY; - bridge->dev = dev; + bridge->dev = encoder->dev; + bridge->encoder = encoder; + + if (bridge->funcs->attach) { + ret = bridge->funcs->attach(bridge); + if (ret < 0) { + bridge->dev = NULL; + bridge->encoder = NULL; + return ret; + } + } - if (bridge->funcs->attach) - return bridge->funcs->attach(bridge); + if (previous) + previous->next = bridge; + else + encoder->bridge = bridge; return 0; } EXPORT_SYMBOL(drm_bridge_attach); -/** - * drm_bridge_detach - deassociate given bridge from its DRM device - * - * @bridge: bridge control structure - * - * Called by a kms driver to unlink the given bridge from its DRM device. - * - * Note that tearing down links between the bridge and our encoder/bridge - * objects needs to be handled by the kms driver itself. - */ void drm_bridge_detach(struct drm_bridge *bridge) { if (WARN_ON(!bridge)) @@ -146,7 +160,6 @@ void drm_bridge_detach(struct drm_bridge *bridge) bridge->dev = NULL; } -EXPORT_SYMBOL(drm_bridge_detach); /** * DOC: bridge callbacks diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 5a4526289392..3115db2ae6b1 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -23,6 +23,7 @@ #include <drm/drmP.h> #include <drm/drm_connector.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -189,13 +190,11 @@ int drm_connector_init(struct drm_device *dev, struct ida *connector_ida = &drm_connector_enum_list[connector_type].ida; - drm_modeset_lock_all(dev); - ret = drm_mode_object_get_reg(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR, false, drm_connector_free); if (ret) - goto out_unlock; + return ret; connector->base.properties = &connector->properties; connector->dev = dev; @@ -225,6 +224,7 @@ int drm_connector_init(struct drm_device *dev, INIT_LIST_HEAD(&connector->probed_modes); INIT_LIST_HEAD(&connector->modes); + mutex_init(&connector->mutex); connector->edid_blob_ptr = NULL; connector->status = connector_status_unknown; @@ -232,8 +232,10 @@ int drm_connector_init(struct drm_device *dev, /* We should add connectors at the end to avoid upsetting the connector * index too much. */ + spin_lock_irq(&config->connector_list_lock); list_add_tail(&connector->head, &config->connector_list); config->num_connector++; + spin_unlock_irq(&config->connector_list_lock); if (connector_type != DRM_MODE_CONNECTOR_VIRTUAL) drm_object_attach_property(&connector->base, @@ -258,9 +260,6 @@ out_put: if (ret) drm_mode_object_unregister(dev, &connector->base); -out_unlock: - drm_modeset_unlock_all(dev); - return ret; } EXPORT_SYMBOL(drm_connector_init); @@ -351,14 +350,18 @@ void drm_connector_cleanup(struct drm_connector *connector) drm_mode_object_unregister(dev, &connector->base); kfree(connector->name); connector->name = NULL; + spin_lock_irq(&dev->mode_config.connector_list_lock); list_del(&connector->head); dev->mode_config.num_connector--; + spin_unlock_irq(&dev->mode_config.connector_list_lock); WARN_ON(connector->state && !connector->funcs->atomic_destroy_state); if (connector->state && connector->funcs->atomic_destroy_state) connector->funcs->atomic_destroy_state(connector, connector->state); + mutex_destroy(&connector->mutex); + memset(connector, 0, sizeof(*connector)); } EXPORT_SYMBOL(drm_connector_cleanup); @@ -374,14 +377,15 @@ EXPORT_SYMBOL(drm_connector_cleanup); */ int drm_connector_register(struct drm_connector *connector) { - int ret; + int ret = 0; + mutex_lock(&connector->mutex); if (connector->registered) - return 0; + goto unlock; ret = drm_sysfs_connector_add(connector); if (ret) - return ret; + goto unlock; ret = drm_debugfs_connector_add(connector); if (ret) { @@ -397,12 +401,14 @@ int drm_connector_register(struct drm_connector *connector) drm_mode_object_register(connector->dev, &connector->base); connector->registered = true; - return 0; + goto unlock; err_debugfs: drm_debugfs_connector_remove(connector); err_sysfs: drm_sysfs_connector_remove(connector); +unlock: + mutex_unlock(&connector->mutex); return ret; } EXPORT_SYMBOL(drm_connector_register); @@ -415,8 +421,11 @@ EXPORT_SYMBOL(drm_connector_register); */ void drm_connector_unregister(struct drm_connector *connector) { - if (!connector->registered) + mutex_lock(&connector->mutex); + if (!connector->registered) { + mutex_unlock(&connector->mutex); return; + } if (connector->funcs->early_unregister) connector->funcs->early_unregister(connector); @@ -425,36 +434,37 @@ void drm_connector_unregister(struct drm_connector *connector) drm_debugfs_connector_remove(connector); connector->registered = false; + mutex_unlock(&connector->mutex); } EXPORT_SYMBOL(drm_connector_unregister); void drm_connector_unregister_all(struct drm_device *dev) { struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; - /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) drm_connector_unregister(connector); + drm_connector_list_iter_put(&conn_iter); } int drm_connector_register_all(struct drm_device *dev) { struct drm_connector *connector; - int ret; + struct drm_connector_list_iter conn_iter; + int ret = 0; - /* FIXME: taking the mode config mutex ends up in a clash with - * fbcon/backlight registration */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { ret = drm_connector_register(connector); if (ret) - goto err; + break; } + drm_connector_list_iter_put(&conn_iter); - return 0; - -err: - mutex_unlock(&dev->mode_config.mutex); - drm_connector_unregister_all(dev); + if (ret) + drm_connector_unregister_all(dev); return ret; } @@ -476,6 +486,87 @@ const char *drm_get_connector_status_name(enum drm_connector_status status) } EXPORT_SYMBOL(drm_get_connector_status_name); +#ifdef CONFIG_LOCKDEP +static struct lockdep_map connector_list_iter_dep_map = { + .name = "drm_connector_list_iter" +}; +#endif + +/** + * drm_connector_list_iter_get - initialize a connector_list iterator + * @dev: DRM device + * @iter: connector_list iterator + * + * Sets @iter up to walk the connector list in &drm_mode_config of @dev. @iter + * must always be cleaned up again by calling drm_connector_list_iter_put(). + * Iteration itself happens using drm_connector_list_iter_next() or + * drm_for_each_connector_iter(). + */ +void drm_connector_list_iter_get(struct drm_device *dev, + struct drm_connector_list_iter *iter) +{ + iter->dev = dev; + iter->conn = NULL; + lock_acquire_shared_recursive(&connector_list_iter_dep_map, 0, 1, NULL, _RET_IP_); +} +EXPORT_SYMBOL(drm_connector_list_iter_get); + +/** + * drm_connector_list_iter_next - return next connector + * @iter: connectr_list iterator + * + * Returns the next connector for @iter, or NULL when the list walk has + * completed. + */ +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter) +{ + struct drm_connector *old_conn = iter->conn; + struct drm_mode_config *config = &iter->dev->mode_config; + struct list_head *lhead; + unsigned long flags; + + spin_lock_irqsave(&config->connector_list_lock, flags); + lhead = old_conn ? &old_conn->head : &config->connector_list; + + do { + if (lhead->next == &config->connector_list) { + iter->conn = NULL; + break; + } + + lhead = lhead->next; + iter->conn = list_entry(lhead, struct drm_connector, head); + + /* loop until it's not a zombie connector */ + } while (!kref_get_unless_zero(&iter->conn->base.refcount)); + spin_unlock_irqrestore(&config->connector_list_lock, flags); + + if (old_conn) + drm_connector_unreference(old_conn); + + return iter->conn; +} +EXPORT_SYMBOL(drm_connector_list_iter_next); + +/** + * drm_connector_list_iter_put - tear down a connector_list iterator + * @iter: connector_list iterator + * + * Tears down @iter and releases any resources (like &drm_connector references) + * acquired while walking the list. This must always be called, both when the + * iteration completes fully or when it was aborted without walking the entire + * list. + */ +void drm_connector_list_iter_put(struct drm_connector_list_iter *iter) +{ + iter->dev = NULL; + if (iter->conn) + drm_connector_unreference(iter->conn); + lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); +} +EXPORT_SYMBOL(drm_connector_list_iter_put); + static const struct drm_prop_enum_list drm_subpixel_enum_list[] = { { SubPixelUnknown, "Unknown" }, { SubPixelHorizontalRGB, "Horizontal RGB" }, @@ -1072,43 +1163,65 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); - mutex_lock(&dev->mode_config.mutex); - connector = drm_connector_lookup(dev, out_resp->connector_id); - if (!connector) { - ret = -ENOENT; - goto out_unlock; - } + if (!connector) + return -ENOENT; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + encoder = drm_connector_get_encoder(connector); + if (encoder) + out_resp->encoder_id = encoder->base.id; + else + out_resp->encoder_id = 0; + + ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, + (uint32_t __user *)(unsigned long)(out_resp->props_ptr), + (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), + &out_resp->count_props); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + if (ret) + goto out_unref; for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) if (connector->encoder_ids[i] != 0) encoders_count++; + if ((out_resp->count_encoders >= encoders_count) && encoders_count) { + copied = 0; + encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); + for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { + if (connector->encoder_ids[i] != 0) { + if (put_user(connector->encoder_ids[i], + encoder_ptr + copied)) { + ret = -EFAULT; + goto out_unref; + } + copied++; + } + } + } + out_resp->count_encoders = encoders_count; + + out_resp->connector_id = connector->base.id; + out_resp->connector_type = connector->connector_type; + out_resp->connector_type_id = connector->connector_type_id; + + mutex_lock(&dev->mode_config.mutex); if (out_resp->count_modes == 0) { connector->funcs->fill_modes(connector, dev->mode_config.max_width, dev->mode_config.max_height); } - /* delayed so we get modes regardless of pre-fill_modes state */ - list_for_each_entry(mode, &connector->modes, head) - if (drm_mode_expose_to_userspace(mode, file_priv)) - mode_count++; - - out_resp->connector_id = connector->base.id; - out_resp->connector_type = connector->connector_type; - out_resp->connector_type_id = connector->connector_type_id; out_resp->mm_width = connector->display_info.width_mm; out_resp->mm_height = connector->display_info.height_mm; out_resp->subpixel = connector->display_info.subpixel_order; out_resp->connection = connector->status; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - encoder = drm_connector_get_encoder(connector); - if (encoder) - out_resp->encoder_id = encoder->base.id; - else - out_resp->encoder_id = 0; + /* delayed so we get modes regardless of pre-fill_modes state */ + list_for_each_entry(mode, &connector->modes, head) + if (drm_mode_expose_to_userspace(mode, file_priv)) + mode_count++; /* * This ioctl is called twice, once to determine how much space is @@ -1131,36 +1244,10 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, } } out_resp->count_modes = mode_count; - - ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic, - (uint32_t __user *)(unsigned long)(out_resp->props_ptr), - (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr), - &out_resp->count_props); - if (ret) - goto out; - - if ((out_resp->count_encoders >= encoders_count) && encoders_count) { - copied = 0; - encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); - for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { - if (connector->encoder_ids[i] != 0) { - if (put_user(connector->encoder_ids[i], - encoder_ptr + copied)) { - ret = -EFAULT; - goto out; - } - copied++; - } - } - } - out_resp->count_encoders = encoders_count; - out: - drm_modeset_unlock(&dev->mode_config.connection_mutex); - - drm_connector_unreference(connector); -out_unlock: mutex_unlock(&dev->mode_config.mutex); +out_unref: + drm_connector_unreference(connector); return ret; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index e75f62cd8a65..080c8d361f1f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -357,7 +357,10 @@ int drm_mode_getcrtc(struct drm_device *dev, drm_modeset_lock_crtc(crtc, crtc->primary); crtc_resp->gamma_size = crtc->gamma_size; - if (crtc->primary->fb) + + if (crtc->primary->state && crtc->primary->state->fb) + crtc_resp->fb_id = crtc->primary->state->fb->base.id; + else if (!crtc->primary->state && crtc->primary->fb) crtc_resp->fb_id = crtc->primary->fb->base.id; else crtc_resp->fb_id = 0; @@ -572,11 +575,11 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, */ if (!crtc->primary->format_default) { ret = drm_plane_check_pixel_format(crtc->primary, - fb->pixel_format); + fb->format->format); if (ret) { struct drm_format_name_buf format_name; DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(fb->pixel_format, + drm_get_format_name(fb->format->format, &format_name)); goto out; } diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 5d2cb138eba6..923a17c05e01 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -36,6 +36,7 @@ #include <drm/drmP.h> #include <drm/drm_atomic.h> #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> #include <drm/drm_fourcc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> @@ -88,6 +89,7 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) { struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; struct drm_device *dev = encoder->dev; /* @@ -99,9 +101,15 @@ bool drm_helper_encoder_in_use(struct drm_encoder *encoder) WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); } - drm_for_each_connector(connector, dev) - if (connector->encoder == encoder) + + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->encoder == encoder) { + drm_connector_list_iter_put(&conn_iter); return true; + } + } + drm_connector_list_iter_put(&conn_iter); return false; } EXPORT_SYMBOL(drm_helper_encoder_in_use); @@ -436,10 +444,13 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) /* Decouple all encoders and their attached connectors from this crtc */ drm_for_each_encoder(encoder, dev) { + struct drm_connector_list_iter conn_iter; + if (encoder->crtc != crtc) continue; - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder != encoder) continue; @@ -456,6 +467,7 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) /* we keep a reference while the encoder is bound */ drm_connector_unreference(connector); } + drm_connector_list_iter_put(&conn_iter); } __drm_helper_disable_unused_functions(dev); @@ -507,6 +519,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) bool mode_changed = false; /* if true do a full mode set */ bool fb_changed = false; /* if true and !mode_changed just do a flip */ struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int count = 0, ro, fail = 0; const struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; @@ -571,9 +584,10 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } count = 0; - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) save_connector_encoders[count++] = connector->encoder; - } + drm_connector_list_iter_put(&conn_iter); save_set.crtc = set->crtc; save_set.mode = &set->crtc->mode; @@ -588,8 +602,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (set->crtc->primary->fb == NULL) { DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); mode_changed = true; - } else if (set->fb->pixel_format != - set->crtc->primary->fb->pixel_format) { + } else if (set->fb->format != set->crtc->primary->fb->format) { mode_changed = true; } else fb_changed = true; @@ -616,7 +629,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) /* a) traverse passed in connector list and get encoders for them */ count = 0; - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; new_encoder = connector->encoder; @@ -649,6 +663,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) connector->encoder = new_encoder; } } + drm_connector_list_iter_put(&conn_iter); if (fail) { ret = -EINVAL; @@ -656,7 +671,8 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } count = 0; - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (!connector->encoder) continue; @@ -674,6 +690,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) if (new_crtc && !drm_encoder_crtc_ok(connector->encoder, new_crtc)) { ret = -EINVAL; + drm_connector_list_iter_put(&conn_iter); goto fail; } if (new_crtc != connector->encoder->crtc) { @@ -690,6 +707,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) connector->base.id, connector->name); } } + drm_connector_list_iter_put(&conn_iter); /* mode_set_base is not a required function */ if (fb_changed && !crtc_funcs->mode_set_base) @@ -744,9 +762,10 @@ fail: } count = 0; - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) connector->encoder = save_connector_encoders[count++]; - } + drm_connector_list_iter_put(&conn_iter); /* after fail drop reference on all unbound connectors in set, let * bound connectors keep their reference @@ -773,12 +792,16 @@ static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; struct drm_device *dev = encoder->dev; - drm_for_each_connector(connector, dev) + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) if (connector->encoder == encoder) if (connector->dpms < dpms) dpms = connector->dpms; + drm_connector_list_iter_put(&conn_iter); + return dpms; } @@ -810,12 +833,16 @@ static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc) { int dpms = DRM_MODE_DPMS_OFF; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; struct drm_device *dev = crtc->dev; - drm_for_each_connector(connector, dev) + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) if (connector->encoder && connector->encoder->crtc == crtc) if (connector->dpms < dpms) dpms = connector->dpms; + drm_connector_list_iter_put(&conn_iter); + return dpms; } diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index cdf6860c9d22..724c329186d5 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -174,6 +174,12 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* drm_atomic.c */ +#ifdef CONFIG_DEBUG_FS +struct drm_minor; +int drm_atomic_debugfs_init(struct drm_minor *minor); +int drm_atomic_debugfs_cleanup(struct drm_minor *minor); +#endif + int drm_atomic_get_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val); int drm_mode_atomic_ioctl(struct drm_device *dev, @@ -186,6 +192,9 @@ void drm_plane_unregister_all(struct drm_device *dev); int drm_plane_check_pixel_format(const struct drm_plane *plane, u32 format); +/* drm_bridge.c */ +void drm_bridge_detach(struct drm_bridge *bridge); + /* IOCTL */ int drm_mode_getplane_res(struct drm_device *dev, void *data, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 2e3e46a53805..37fd612d57a6 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -38,6 +38,7 @@ #include <drm/drm_edid.h> #include <drm/drm_atomic.h> #include "drm_internal.h" +#include "drm_crtc_internal.h" #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index a525751b4559..4a7b3e98d586 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -323,9 +323,8 @@ void drm_minor_release(struct drm_minor *minor) * historical baggage. Hence use the reference counting provided by * drm_dev_ref() and drm_dev_unref() only carefully. * - * Also note that embedding of &drm_device is currently not (yet) supported (but - * it would be easy to add). Drivers can store driver-private data in the - * dev_priv field of &drm_device. + * It is recommended that drivers embed struct &drm_device into their own device + * structure, which is supported through drm_dev_init(). */ /** @@ -462,7 +461,11 @@ static void drm_fs_inode_free(struct inode *inode) * Note that for purely virtual devices @parent can be NULL. * * Drivers that do not want to allocate their own device struct - * embedding struct &drm_device can call drm_dev_alloc() instead. + * embedding struct &drm_device can call drm_dev_alloc() instead. For drivers + * that do embed struct &drm_device it must be placed first in the overall + * structure, and the overall structure must be allocated using kmalloc(): The + * drm core's release function unconditionally calls kfree() on the @dev pointer + * when the final reference is released. * * RETURNS: * 0 on success, or error code on failure. diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 336be31ff3de..67d6a73731d8 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -35,6 +35,7 @@ #include <linux/vga_switcheroo.h> #include <drm/drmP.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include <drm/drm_displayid.h> #define version_greater(edid, maj, min) \ diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c index 992879f15f23..5f0598e4bf6f 100644 --- a/drivers/gpu/drm/drm_encoder.c +++ b/drivers/gpu/drm/drm_encoder.c @@ -159,6 +159,17 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) * the indices on the drm_encoder after us in the encoder_list. */ + if (encoder->bridge) { + struct drm_bridge *bridge = encoder->bridge; + struct drm_bridge *next; + + while (bridge) { + next = bridge->next; + drm_bridge_detach(bridge); + bridge = next; + } + } + drm_mode_object_unregister(dev, &encoder->base); kfree(encoder->name); list_del(&encoder->head); @@ -173,10 +184,12 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) struct drm_connector *connector; struct drm_device *dev = encoder->dev; bool uses_atomic = false; + struct drm_connector_list_iter conn_iter; /* For atomic drivers only state objects are synchronously updated and * protected by modeset locks, so check those first. */ - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (!connector->state) continue; @@ -185,8 +198,10 @@ static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) if (connector->state->best_encoder != encoder) continue; + drm_connector_list_iter_put(&conn_iter); return connector->state->crtc; } + drm_connector_list_iter_put(&conn_iter); /* Don't return stale data (e.g. pending async disable). */ if (uses_atomic) diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index 81b3558302b5..591f30ebc42a 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -147,7 +147,7 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev, if (!fb_cma) return ERR_PTR(-ENOMEM); - drm_helper_mode_fill_fb_struct(&fb_cma->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &fb_cma->fb, mode_cmd); for (i = 0; i < num_planes; i++) fb_cma->obj[i] = obj[i]; @@ -304,15 +304,12 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_prepare_fb); static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) { struct drm_fb_cma *fb_cma = to_fb_cma(fb); - const struct drm_format_info *info; int i; seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height, - (char *)&fb->pixel_format); - - info = drm_format_info(fb->pixel_format); + (char *)&fb->format->format); - for (i = 0; i < info->num_planes; i++) { + for (i = 0; i < fb->format->num_planes; i++) { seq_printf(m, " %d: offset=%d pitch=%d, obj: ", i, fb->offsets[i], fb->pitches[i]); drm_gem_cma_describe(fb_cma->obj[i], m); @@ -467,7 +464,7 @@ int drm_fbdev_cma_create_with_funcs(struct drm_fb_helper *helper, fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &drm_fbdev_cma_ops; - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); offset = fbi->var.xoffset * bytes_per_pixel; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index e934b541feea..730342cbe899 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -120,20 +120,22 @@ int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) { struct drm_device *dev = fb_helper->dev; struct drm_connector *connector; - int i, ret; + struct drm_connector_list_iter conn_iter; + int i, ret = 0; if (!drm_fbdev_emulation) return 0; mutex_lock(&dev->mode_config.mutex); - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { ret = drm_fb_helper_add_one_connector(fb_helper, connector); if (ret) goto fail; } - mutex_unlock(&dev->mode_config.mutex); - return 0; + goto out; + fail: drm_fb_helper_for_each_connector(fb_helper, i) { struct drm_fb_helper_connector *fb_helper_connector = @@ -145,6 +147,8 @@ fail: fb_helper->connector_info[i] = NULL; } fb_helper->connector_count = 0; +out: + drm_connector_list_iter_put(&conn_iter); mutex_unlock(&dev->mode_config.mutex); return ret; @@ -401,7 +405,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper) drm_warn_on_modeset_not_all_locked(dev); - if (dev->mode_config.funcs->atomic_commit) + if (drm_drv_uses_atomic_modeset(dev)) return restore_fbdev_mode_atomic(fb_helper); drm_for_each_plane(plane, dev) { @@ -1169,7 +1173,7 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green, !fb_helper->funcs->gamma_get)) return -EINVAL; - WARN_ON(fb->bits_per_pixel != 8); + WARN_ON(fb->format->cpp[0] != 1); fb_helper->funcs->gamma_set(crtc, red, green, blue, regno); @@ -1252,14 +1256,14 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, * Changes struct fb_var_screeninfo are currently not pushed back * to KMS, hence fail if different settings are requested. */ - if (var->bits_per_pixel != fb->bits_per_pixel || + if (var->bits_per_pixel != fb->format->cpp[0] * 8 || var->xres != fb->width || var->yres != fb->height || var->xres_virtual != fb->width || var->yres_virtual != fb->height) { DRM_DEBUG("fb userspace requested width/height/bpp different than current fb " "request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel, var->xres_virtual, var->yres_virtual, - fb->width, fb->height, fb->bits_per_pixel); + fb->width, fb->height, fb->format->cpp[0] * 8); return -EINVAL; } @@ -1440,7 +1444,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, return -EBUSY; } - if (dev->mode_config.funcs->atomic_commit) { + if (drm_drv_uses_atomic_modeset(dev)) { ret = pan_display_atomic(var, info); goto unlock; } @@ -1645,7 +1649,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe info->pseudo_palette = fb_helper->pseudo_palette; info->var.xres_virtual = fb->width; info->var.yres_virtual = fb->height; - info->var.bits_per_pixel = fb->bits_per_pixel; + info->var.bits_per_pixel = fb->format->cpp[0] * 8; info->var.accel_flags = FB_ACCELF_TEXT; info->var.xoffset = 0; info->var.yoffset = 0; @@ -1653,7 +1657,7 @@ void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helpe info->var.height = -1; info->var.width = -1; - switch (fb->depth) { + switch (fb->format->depth) { case 8: info->var.red.offset = 0; info->var.green.offset = 0; @@ -2056,7 +2060,7 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper, * NULL we fallback to the default drm_atomic_helper_best_encoder() * helper. */ - if (fb_helper->dev->mode_config.funcs->atomic_commit && + if (drm_drv_uses_atomic_modeset(fb_helper->dev) && !connector_funcs->best_encoder) encoder = drm_atomic_helper_best_encoder(connector); else diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c index 5d96de40b63f..48e106557c92 100644 --- a/drivers/gpu/drm/drm_fops.c +++ b/drivers/gpu/drm/drm_fops.c @@ -622,7 +622,7 @@ EXPORT_SYMBOL(drm_event_reserve_init_locked); * kmalloc and @p must be the first member element. * * Callers which already hold dev->event_lock should use - * drm_event_reserve_init() instead. + * drm_event_reserve_init_locked() instead. * * RETURNS: * diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index cbf0c893f426..94ddab41f24f 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -432,8 +432,8 @@ int drm_mode_getfb(struct drm_device *dev, r->height = fb->height; r->width = fb->width; - r->depth = fb->depth; - r->bpp = fb->bits_per_pixel; + r->depth = fb->format->depth; + r->bpp = fb->format->cpp[0] * 8; r->pitch = fb->pitches[0]; if (fb->funcs->create_handle) { if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) || @@ -631,8 +631,11 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, { int ret; + if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) + return -EINVAL; + INIT_LIST_HEAD(&fb->filp_head); - fb->dev = dev; + fb->funcs = funcs; ret = drm_mode_object_get_reg(dev, &fb->base, DRM_MODE_OBJECT_FB, @@ -790,3 +793,47 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) drm_framebuffer_unreference(fb); } EXPORT_SYMBOL(drm_framebuffer_remove); + +/** + * drm_framebuffer_plane_width - width of the plane given the first plane + * @width: width of the first plane + * @fb: the framebuffer + * @plane: plane index + * + * Returns: + * The width of @plane, given that the width of the first plane is @width. + */ +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane) +{ + if (plane >= fb->format->num_planes) + return 0; + + if (plane == 0) + return width; + + return width / fb->format->hsub; +} +EXPORT_SYMBOL(drm_framebuffer_plane_width); + +/** + * drm_framebuffer_plane_height - height of the plane given the first plane + * @height: height of the first plane + * @fb: the framebuffer + * @plane: plane index + * + * Returns: + * The height of @plane, given that the height of the first plane is @height. + */ +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane) +{ + if (plane >= fb->format->num_planes) + return 0; + + if (plane == 0) + return height; + + return height / fb->format->vsub; +} +EXPORT_SYMBOL(drm_framebuffer_plane_height); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index db80ec860e33..a6213f814345 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -58,10 +58,10 @@ extern unsigned int drm_timestamp_monotonic; /* IOCTLS */ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); -int drm_control(struct drm_device *dev, void *data, - struct drm_file *file_priv); -int drm_modeset_ctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); +int drm_legacy_irq_control(struct drm_device *dev, void *data, + struct drm_file *file_priv); +int drm_legacy_modeset_ctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* drm_auth.c */ int drm_getmagic(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index fed22c2b98b6..d180673c1323 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -115,11 +115,15 @@ static int drm_getunique(struct drm_device *dev, void *data, struct drm_unique *u = data; struct drm_master *master = file_priv->master; + mutex_lock(&master->dev->master_mutex); if (u->unique_len >= master->unique_len) { - if (copy_to_user(u->unique, master->unique, master->unique_len)) + if (copy_to_user(u->unique, master->unique, master->unique_len)) { + mutex_unlock(&master->dev->master_mutex); return -EFAULT; + } } u->unique_len = master->unique_len; + mutex_unlock(&master->dev->master_mutex); return 0; } @@ -340,6 +344,7 @@ static int drm_setversion(struct drm_device *dev, void *data, struct drm_file *f struct drm_set_version *sv = data; int if_version, retcode = 0; + mutex_lock(&dev->master_mutex); if (sv->drm_di_major != -1) { if (sv->drm_di_major != DRM_IF_MAJOR || sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { @@ -374,6 +379,7 @@ done: sv->drm_di_minor = DRM_IF_MINOR; sv->drm_dd_major = dev->driver->major; sv->drm_dd_minor = dev->driver->minor; + mutex_unlock(&dev->master_mutex); return retcode; } @@ -528,15 +534,15 @@ EXPORT_SYMBOL(drm_ioctl_permit); static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED), DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED|DRM_RENDER_ALLOW), - DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), - DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), + DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, DRM_UNLOCKED), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_UNLOCKED | DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -575,7 +581,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_legacy_freebufs, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_legacy_dma_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #if IS_ENABLED(CONFIG_AGP) DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -593,7 +599,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED), - DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), + DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_legacy_modeset_ctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), @@ -729,9 +735,8 @@ long drm_ioctl(struct file *filp, if (ksize > in_size) memset(kdata + in_size, 0, ksize - in_size); - /* Enforce sane locking for modern driver ioctls. Core ioctls are - * too messy still. */ - if ((!drm_core_check_feature(dev, DRIVER_LEGACY) && is_driver_ioctl) || + /* Enforce sane locking for modern driver ioctls. */ + if (!drm_core_check_feature(dev, DRIVER_LEGACY) || (ioctl->flags & DRM_UNLOCKED)) retcode = func(dev, kdata, file_priv); else { diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 273625a85036..feb091310ffe 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -579,19 +579,8 @@ int drm_irq_uninstall(struct drm_device *dev) } EXPORT_SYMBOL(drm_irq_uninstall); -/* - * IRQ control ioctl. - * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument, pointing to a drm_control structure. - * \return zero on success or a negative number on failure. - * - * Calls irq_install() or irq_uninstall() according to \p arg. - */ -int drm_control(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_irq_control(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_control *ctl = data; int ret = 0, irq; @@ -1442,19 +1431,8 @@ static void drm_legacy_vblank_post_modeset(struct drm_device *dev, } } -/* - * drm_modeset_ctl - handle vblank event counter changes across mode switch - * @DRM_IOCTL_ARGS: standard ioctl arguments - * - * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET - * ioctls around modesetting so that any lost vblank events are accounted for. - * - * Generally the counter will reset across mode sets. If interrupts are - * enabled around this call, we don't have to do anything since the counter - * will have already been incremented. - */ -int drm_modeset_ctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int drm_legacy_modeset_ctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; unsigned int pipe; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index ca1e344f318d..1a5b4eba2386 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -1,6 +1,7 @@ /************************************************************************** * * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. + * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -31,9 +32,9 @@ * class implementation for more advanced memory managers. * * Note that the algorithm used is quite simple and there might be substantial - * performance gains if a smarter free list is implemented. Currently it is just an - * unordered stack of free regions. This could easily be improved if an RB-tree - * is used instead. At least if we expect heavy fragmentation. + * performance gains if a smarter free list is implemented. Currently it is + * just an unordered stack of free regions. This could easily be improved if + * an RB-tree is used instead. At least if we expect heavy fragmentation. * * Aligned allocations can also see improvement. * @@ -67,7 +68,7 @@ * where an object needs to be created which exactly matches the firmware's * scanout target. As long as the range is still free it can be inserted anytime * after the allocator is initialized, which helps with avoiding looped - * depencies in the driver load sequence. + * dependencies in the driver load sequence. * * drm_mm maintains a stack of most recently freed holes, which of all * simplistic datastructures seems to be a fairly decent approach to clustering @@ -78,27 +79,27 @@ * * drm_mm supports a few features: Alignment and range restrictions can be * supplied. Further more every &drm_mm_node has a color value (which is just an - * opaqua unsigned long) which in conjunction with a driver callback can be used + * opaque unsigned long) which in conjunction with a driver callback can be used * to implement sophisticated placement restrictions. The i915 DRM driver uses * this to implement guard pages between incompatible caching domains in the * graphics TT. * - * Two behaviors are supported for searching and allocating: bottom-up and top-down. - * The default is bottom-up. Top-down allocation can be used if the memory area - * has different restrictions, or just to reduce fragmentation. + * Two behaviors are supported for searching and allocating: bottom-up and + * top-down. The default is bottom-up. Top-down allocation can be used if the + * memory area has different restrictions, or just to reduce fragmentation. * * Finally iteration helpers to walk all nodes and all holes are provided as are * some basic allocator dumpers for debugging. + * + * Note that this range allocator is not thread-safe, drivers need to protect + * modifications with their on locking. The idea behind this is that for a full + * memory manager additional data needs to be protected anyway, hence internal + * locking would be fully redundant. */ -static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color, - enum drm_mm_search_flags flags); static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, u64 size, - unsigned alignment, + u64 alignment, unsigned long color, u64 start, u64 end, @@ -138,7 +139,7 @@ static void show_leaks(struct drm_mm *mm) if (!buf) return; - list_for_each_entry(node, &mm->head_node.node_list, node_list) { + list_for_each_entry(node, drm_mm_nodes(mm), node_list) { struct stack_trace trace = { .entries = entries, .max_entries = STACKDEPTH @@ -174,9 +175,9 @@ INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, START, LAST, static inline, drm_mm_interval_tree) struct drm_mm_node * -__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last) +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) { - return drm_mm_interval_tree_iter_first(&mm->interval_tree, + return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, start, last); } EXPORT_SYMBOL(__drm_mm_interval_first); @@ -227,8 +228,9 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, static void drm_mm_insert_helper(struct drm_mm_node *hole_node, struct drm_mm_node *node, - u64 size, unsigned alignment, + u64 size, u64 alignment, unsigned long color, + u64 range_start, u64 range_end, enum drm_mm_allocator_flags flags) { struct drm_mm *mm = hole_node->mm; @@ -237,19 +239,21 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, u64 adj_start = hole_start; u64 adj_end = hole_end; - BUG_ON(node->allocated); + DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node) || node->allocated); if (mm->color_adjust) mm->color_adjust(hole_node, color, &adj_start, &adj_end); + adj_start = max(adj_start, range_start); + adj_end = min(adj_end, range_end); + if (flags & DRM_MM_CREATE_TOP) adj_start = adj_end - size; if (alignment) { - u64 tmp = adj_start; - unsigned rem; + u64 rem; - rem = do_div(tmp, alignment); + div64_u64_rem(adj_start, alignment, &rem); if (rem) { if (flags & DRM_MM_CREATE_TOP) adj_start -= rem; @@ -258,9 +262,6 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, } } - BUG_ON(adj_start < hole_start); - BUG_ON(adj_end > hole_end); - if (adj_start == hole_start) { hole_node->hole_follows = 0; list_del(&hole_node->hole_stack); @@ -276,7 +277,10 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node, drm_mm_interval_tree_add_node(hole_node, node); - BUG_ON(node->start + node->size > adj_end); + DRM_MM_BUG_ON(node->start < range_start); + DRM_MM_BUG_ON(node->start < adj_start); + DRM_MM_BUG_ON(node->start + node->size > adj_end); + DRM_MM_BUG_ON(node->start + node->size > range_end); node->hole_follows = 0; if (__drm_mm_hole_node_start(node) < hole_end) { @@ -308,10 +312,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) u64 hole_start, hole_end; u64 adj_start, adj_end; - if (WARN_ON(node->size == 0)) - return -EINVAL; - end = node->start + node->size; + if (unlikely(end <= node->start)) + return -ENOSPC; /* Find the relevant hole to add our node to */ hole = drm_mm_interval_tree_iter_first(&mm->interval_tree, @@ -320,12 +323,11 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) if (hole->start < end) return -ENOSPC; } else { - hole = list_entry(&mm->head_node.node_list, - typeof(*hole), node_list); + hole = list_entry(drm_mm_nodes(mm), typeof(*hole), node_list); } hole = list_last_entry(&hole->node_list, typeof(*hole), node_list); - if (!hole->hole_follows) + if (!drm_mm_hole_follows(hole)) return -ENOSPC; adj_start = hole_start = __drm_mm_hole_node_start(hole); @@ -362,110 +364,6 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) EXPORT_SYMBOL(drm_mm_reserve_node); /** - * drm_mm_insert_node_generic - search for space and insert @node - * @mm: drm_mm to allocate from - * @node: preallocate node to insert - * @size: size of the allocation - * @alignment: alignment of the allocation - * @color: opaque tag value to use for this node - * @sflags: flags to fine-tune the allocation search - * @aflags: flags to fine-tune the allocation behavior - * - * The preallocated node must be cleared to 0. - * - * Returns: - * 0 on success, -ENOSPC if there's no suitable hole. - */ -int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, - u64 size, unsigned alignment, - unsigned long color, - enum drm_mm_search_flags sflags, - enum drm_mm_allocator_flags aflags) -{ - struct drm_mm_node *hole_node; - - if (WARN_ON(size == 0)) - return -EINVAL; - - hole_node = drm_mm_search_free_generic(mm, size, alignment, - color, sflags); - if (!hole_node) - return -ENOSPC; - - drm_mm_insert_helper(hole_node, node, size, alignment, color, aflags); - return 0; -} -EXPORT_SYMBOL(drm_mm_insert_node_generic); - -static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, - struct drm_mm_node *node, - u64 size, unsigned alignment, - unsigned long color, - u64 start, u64 end, - enum drm_mm_allocator_flags flags) -{ - struct drm_mm *mm = hole_node->mm; - u64 hole_start = drm_mm_hole_node_start(hole_node); - u64 hole_end = drm_mm_hole_node_end(hole_node); - u64 adj_start = hole_start; - u64 adj_end = hole_end; - - BUG_ON(!hole_node->hole_follows || node->allocated); - - if (adj_start < start) - adj_start = start; - if (adj_end > end) - adj_end = end; - - if (mm->color_adjust) - mm->color_adjust(hole_node, color, &adj_start, &adj_end); - - if (flags & DRM_MM_CREATE_TOP) - adj_start = adj_end - size; - - if (alignment) { - u64 tmp = adj_start; - unsigned rem; - - rem = do_div(tmp, alignment); - if (rem) { - if (flags & DRM_MM_CREATE_TOP) - adj_start -= rem; - else - adj_start += alignment - rem; - } - } - - if (adj_start == hole_start) { - hole_node->hole_follows = 0; - list_del(&hole_node->hole_stack); - } - - node->start = adj_start; - node->size = size; - node->mm = mm; - node->color = color; - node->allocated = 1; - - list_add(&node->node_list, &hole_node->node_list); - - drm_mm_interval_tree_add_node(hole_node, node); - - BUG_ON(node->start < start); - BUG_ON(node->start < adj_start); - BUG_ON(node->start + node->size > adj_end); - BUG_ON(node->start + node->size > end); - - node->hole_follows = 0; - if (__drm_mm_hole_node_start(node) < hole_end) { - list_add(&node->hole_stack, &mm->hole_stack); - node->hole_follows = 1; - } - - save_stack(node); -} - -/** * drm_mm_insert_node_in_range_generic - ranged search for space and insert @node * @mm: drm_mm to allocate from * @node: preallocate node to insert @@ -483,7 +381,7 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node, * 0 on success, -ENOSPC if there's no suitable hole. */ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, - u64 size, unsigned alignment, + u64 size, u64 alignment, unsigned long color, u64 start, u64 end, enum drm_mm_search_flags sflags, @@ -500,9 +398,9 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *n if (!hole_node) return -ENOSPC; - drm_mm_insert_helper_range(hole_node, node, - size, alignment, color, - start, end, aflags); + drm_mm_insert_helper(hole_node, node, + size, alignment, color, + start, end, aflags); return 0; } EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); @@ -513,32 +411,29 @@ EXPORT_SYMBOL(drm_mm_insert_node_in_range_generic); * * This just removes a node from its drm_mm allocator. The node does not need to * be cleared again before it can be re-inserted into this or any other drm_mm - * allocator. It is a bug to call this function on a un-allocated node. + * allocator. It is a bug to call this function on a unallocated node. */ void drm_mm_remove_node(struct drm_mm_node *node) { struct drm_mm *mm = node->mm; struct drm_mm_node *prev_node; - if (WARN_ON(!node->allocated)) - return; - - BUG_ON(node->scanned_block || node->scanned_prev_free - || node->scanned_next_free); + DRM_MM_BUG_ON(!node->allocated); + DRM_MM_BUG_ON(node->scanned_block); prev_node = list_entry(node->node_list.prev, struct drm_mm_node, node_list); - if (node->hole_follows) { - BUG_ON(__drm_mm_hole_node_start(node) == - __drm_mm_hole_node_end(node)); + if (drm_mm_hole_follows(node)) { + DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) == + __drm_mm_hole_node_end(node)); list_del(&node->hole_stack); - } else - BUG_ON(__drm_mm_hole_node_start(node) != - __drm_mm_hole_node_end(node)); - + } else { + DRM_MM_BUG_ON(__drm_mm_hole_node_start(node) != + __drm_mm_hole_node_end(node)); + } - if (!prev_node->hole_follows) { + if (!drm_mm_hole_follows(prev_node)) { prev_node->hole_follows = 1; list_add(&prev_node->hole_stack, &mm->hole_stack); } else @@ -550,16 +445,15 @@ void drm_mm_remove_node(struct drm_mm_node *node) } EXPORT_SYMBOL(drm_mm_remove_node); -static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) +static int check_free_hole(u64 start, u64 end, u64 size, u64 alignment) { if (end - start < size) return 0; if (alignment) { - u64 tmp = start; - unsigned rem; + u64 rem; - rem = do_div(tmp, alignment); + div64_u64_rem(start, alignment, &rem); if (rem) start += alignment - rem; } @@ -567,51 +461,9 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) return end >= start + size; } -static struct drm_mm_node *drm_mm_search_free_generic(const struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color, - enum drm_mm_search_flags flags) -{ - struct drm_mm_node *entry; - struct drm_mm_node *best; - u64 adj_start; - u64 adj_end; - u64 best_size; - - BUG_ON(mm->scanned_blocks); - - best = NULL; - best_size = ~0UL; - - __drm_mm_for_each_hole(entry, mm, adj_start, adj_end, - flags & DRM_MM_SEARCH_BELOW) { - u64 hole_size = adj_end - adj_start; - - if (mm->color_adjust) { - mm->color_adjust(entry, color, &adj_start, &adj_end); - if (adj_end <= adj_start) - continue; - } - - if (!check_free_hole(adj_start, adj_end, size, alignment)) - continue; - - if (!(flags & DRM_MM_SEARCH_BEST)) - return entry; - - if (hole_size < best_size) { - best = entry; - best_size = hole_size; - } - } - - return best; -} - static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_mm *mm, u64 size, - unsigned alignment, + u64 alignment, unsigned long color, u64 start, u64 end, @@ -623,7 +475,7 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ u64 adj_end; u64 best_size; - BUG_ON(mm->scanned_blocks); + DRM_MM_BUG_ON(mm->scan_active); best = NULL; best_size = ~0UL; @@ -632,17 +484,15 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ flags & DRM_MM_SEARCH_BELOW) { u64 hole_size = adj_end - adj_start; - if (adj_start < start) - adj_start = start; - if (adj_end > end) - adj_end = end; - if (mm->color_adjust) { mm->color_adjust(entry, color, &adj_start, &adj_end); if (adj_end <= adj_start) continue; } + adj_start = max(adj_start, start); + adj_end = min(adj_end, end); + if (!check_free_hole(adj_start, adj_end, size, alignment)) continue; @@ -669,6 +519,8 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_ */ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) { + DRM_MM_BUG_ON(!old->allocated); + list_replace(&old->node_list, &new->node_list); list_replace(&old->hole_stack, &new->hole_stack); rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree); @@ -692,96 +544,82 @@ EXPORT_SYMBOL(drm_mm_replace_node); * efficient when we simply start to select all objects from the tail of an LRU * until there's a suitable hole: Especially for big objects or nodes that * otherwise have special allocation constraints there's a good chance we evict - * lots of (smaller) objects unecessarily. + * lots of (smaller) objects unnecessarily. * * The DRM range allocator supports this use-case through the scanning * interfaces. First a scan operation needs to be initialized with - * drm_mm_init_scan() or drm_mm_init_scan_with_range(). The the driver adds - * objects to the roaster (probably by walking an LRU list, but this can be - * freely implemented) until a suitable hole is found or there's no further - * evitable object. + * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds + * objects to the roster (probably by walking an LRU list, but this can be + * freely implemented) (using drm_mm_scan_add_block()) until a suitable hole + * is found or there are no further evictable objects. * - * The the driver must walk through all objects again in exactly the reverse + * The driver must walk through all objects again in exactly the reverse * order to restore the allocator state. Note that while the allocator is used * in the scan mode no other operation is allowed. * - * Finally the driver evicts all objects selected in the scan. Adding and - * removing an object is O(1), and since freeing a node is also O(1) the overall - * complexity is O(scanned_objects). So like the free stack which needs to be - * walked before a scan operation even begins this is linear in the number of - * objects. It doesn't seem to hurt badly. + * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() + * reported true) in the scan, and any overlapping nodes after color adjustment + * (drm_mm_scan_evict_color()). Adding and removing an object is O(1), and + * since freeing a node is also O(1) the overall complexity is + * O(scanned_objects). So like the free stack which needs to be walked before a + * scan operation even begins this is linear in the number of objects. It + * doesn't seem to hurt too badly. */ /** - * drm_mm_init_scan - initialize lru scanning - * @mm: drm_mm to scan - * @size: size of the allocation - * @alignment: alignment of the allocation - * @color: opaque tag value to use for the allocation - * - * This simply sets up the scanning routines with the parameters for the desired - * hole. Note that there's no need to specify allocation flags, since they only - * change the place a node is allocated from within a suitable hole. - * - * Warning: - * As long as the scan list is non-empty, no other operations than - * adding/removing nodes to/from the scan list are allowed. - */ -void drm_mm_init_scan(struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color) -{ - mm->scan_color = color; - mm->scan_alignment = alignment; - mm->scan_size = size; - mm->scanned_blocks = 0; - mm->scan_hit_start = 0; - mm->scan_hit_end = 0; - mm->scan_check_range = 0; - mm->prev_scanned_node = NULL; -} -EXPORT_SYMBOL(drm_mm_init_scan); - -/** - * drm_mm_init_scan - initialize range-restricted lru scanning + * drm_mm_scan_init_with_range - initialize range-restricted lru scanning + * @scan: scan state * @mm: drm_mm to scan * @size: size of the allocation * @alignment: alignment of the allocation * @color: opaque tag value to use for the allocation * @start: start of the allowed range for the allocation * @end: end of the allowed range for the allocation + * @flags: flags to specify how the allocation will be performed afterwards * * This simply sets up the scanning routines with the parameters for the desired - * hole. Note that there's no need to specify allocation flags, since they only - * change the place a node is allocated from within a suitable hole. + * hole. * * Warning: * As long as the scan list is non-empty, no other operations than * adding/removing nodes to/from the scan list are allowed. */ -void drm_mm_init_scan_with_range(struct drm_mm *mm, +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, u64 size, - unsigned alignment, + u64 alignment, unsigned long color, u64 start, - u64 end) + u64 end, + unsigned int flags) { - mm->scan_color = color; - mm->scan_alignment = alignment; - mm->scan_size = size; - mm->scanned_blocks = 0; - mm->scan_hit_start = 0; - mm->scan_hit_end = 0; - mm->scan_start = start; - mm->scan_end = end; - mm->scan_check_range = 1; - mm->prev_scanned_node = NULL; + DRM_MM_BUG_ON(start >= end); + DRM_MM_BUG_ON(!size || size > end - start); + DRM_MM_BUG_ON(mm->scan_active); + + scan->mm = mm; + + if (alignment <= 1) + alignment = 0; + + scan->color = color; + scan->alignment = alignment; + scan->remainder_mask = is_power_of_2(alignment) ? alignment - 1 : 0; + scan->size = size; + scan->flags = flags; + + DRM_MM_BUG_ON(end <= start); + scan->range_start = start; + scan->range_end = end; + + scan->hit_start = U64_MAX; + scan->hit_end = 0; } -EXPORT_SYMBOL(drm_mm_init_scan_with_range); +EXPORT_SYMBOL(drm_mm_scan_init_with_range); /** * drm_mm_scan_add_block - add a node to the scan list + * @scan: the active drm_mm scanner * @node: drm_mm_node to add * * Add a node to the scan list that might be freed to make space for the desired @@ -790,60 +628,87 @@ EXPORT_SYMBOL(drm_mm_init_scan_with_range); * Returns: * True if a hole has been found, false otherwise. */ -bool drm_mm_scan_add_block(struct drm_mm_node *node) +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node) { - struct drm_mm *mm = node->mm; - struct drm_mm_node *prev_node; + struct drm_mm *mm = scan->mm; + struct drm_mm_node *hole; u64 hole_start, hole_end; + u64 col_start, col_end; u64 adj_start, adj_end; - mm->scanned_blocks++; + DRM_MM_BUG_ON(node->mm != mm); + DRM_MM_BUG_ON(!node->allocated); + DRM_MM_BUG_ON(node->scanned_block); + node->scanned_block = true; + mm->scan_active++; + + /* Remove this block from the node_list so that we enlarge the hole + * (distance between the end of our previous node and the start of + * or next), without poisoning the link so that we can restore it + * later in drm_mm_scan_remove_block(). + */ + hole = list_prev_entry(node, node_list); + DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); + __list_del_entry(&node->node_list); + + hole_start = __drm_mm_hole_node_start(hole); + hole_end = __drm_mm_hole_node_end(hole); + + col_start = hole_start; + col_end = hole_end; + if (mm->color_adjust) + mm->color_adjust(hole, scan->color, &col_start, &col_end); - BUG_ON(node->scanned_block); - node->scanned_block = 1; + adj_start = max(col_start, scan->range_start); + adj_end = min(col_end, scan->range_end); + if (adj_end <= adj_start || adj_end - adj_start < scan->size) + return false; - prev_node = list_entry(node->node_list.prev, struct drm_mm_node, - node_list); + if (scan->flags == DRM_MM_CREATE_TOP) + adj_start = adj_end - scan->size; - node->scanned_preceeds_hole = prev_node->hole_follows; - prev_node->hole_follows = 1; - list_del(&node->node_list); - node->node_list.prev = &prev_node->node_list; - node->node_list.next = &mm->prev_scanned_node->node_list; - mm->prev_scanned_node = node; - - adj_start = hole_start = drm_mm_hole_node_start(prev_node); - adj_end = hole_end = drm_mm_hole_node_end(prev_node); - - if (mm->scan_check_range) { - if (adj_start < mm->scan_start) - adj_start = mm->scan_start; - if (adj_end > mm->scan_end) - adj_end = mm->scan_end; - } + if (scan->alignment) { + u64 rem; - if (mm->color_adjust) - mm->color_adjust(prev_node, mm->scan_color, - &adj_start, &adj_end); - - if (check_free_hole(adj_start, adj_end, - mm->scan_size, mm->scan_alignment)) { - mm->scan_hit_start = hole_start; - mm->scan_hit_end = hole_end; - return true; + if (likely(scan->remainder_mask)) + rem = adj_start & scan->remainder_mask; + else + div64_u64_rem(adj_start, scan->alignment, &rem); + if (rem) { + adj_start -= rem; + if (scan->flags != DRM_MM_CREATE_TOP) + adj_start += scan->alignment; + if (adj_start < max(col_start, scan->range_start) || + min(col_end, scan->range_end) - adj_start < scan->size) + return false; + + if (adj_end <= adj_start || + adj_end - adj_start < scan->size) + return false; + } } - return false; + scan->hit_start = adj_start; + scan->hit_end = adj_start + scan->size; + + DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); + DRM_MM_BUG_ON(scan->hit_start < hole_start); + DRM_MM_BUG_ON(scan->hit_end > hole_end); + + return true; } EXPORT_SYMBOL(drm_mm_scan_add_block); /** * drm_mm_scan_remove_block - remove a node from the scan list + * @scan: the active drm_mm scanner * @node: drm_mm_node to remove * - * Nodes _must_ be removed in the exact same order from the scan list as they - * have been added, otherwise the internal state of the memory manager will be - * corrupted. + * Nodes _must_ be removed in exactly the reverse order from the scan list as + * they have been added (e.g. using list_add as they are added and then + * list_for_each over that eviction list to remove), otherwise the internal + * state of the memory manager will be corrupted. * * When the scan list is empty, the selected memory nodes can be freed. An * immediately following drm_mm_search_free with !DRM_MM_SEARCH_BEST will then @@ -853,42 +718,74 @@ EXPORT_SYMBOL(drm_mm_scan_add_block); * True if this block should be evicted, false otherwise. Will always * return false when no hole has been found. */ -bool drm_mm_scan_remove_block(struct drm_mm_node *node) +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node) { - struct drm_mm *mm = node->mm; struct drm_mm_node *prev_node; - mm->scanned_blocks--; - - BUG_ON(!node->scanned_block); - node->scanned_block = 0; - - prev_node = list_entry(node->node_list.prev, struct drm_mm_node, - node_list); - - prev_node->hole_follows = node->scanned_preceeds_hole; + DRM_MM_BUG_ON(node->mm != scan->mm); + DRM_MM_BUG_ON(!node->scanned_block); + node->scanned_block = false; + + DRM_MM_BUG_ON(!node->mm->scan_active); + node->mm->scan_active--; + + /* During drm_mm_scan_add_block() we decoupled this node leaving + * its pointers intact. Now that the caller is walking back along + * the eviction list we can restore this block into its rightful + * place on the full node_list. To confirm that the caller is walking + * backwards correctly we check that prev_node->next == node->next, + * i.e. both believe the same node should be on the other side of the + * hole. + */ + prev_node = list_prev_entry(node, node_list); + DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != + list_next_entry(node, node_list)); list_add(&node->node_list, &prev_node->node_list); - return (drm_mm_hole_node_end(node) > mm->scan_hit_start && - node->start < mm->scan_hit_end); + return (node->start + node->size > scan->hit_start && + node->start < scan->hit_end); } EXPORT_SYMBOL(drm_mm_scan_remove_block); /** - * drm_mm_clean - checks whether an allocator is clean - * @mm: drm_mm allocator to check + * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole + * @scan: drm_mm scan with target hole + * + * After completing an eviction scan and removing the selected nodes, we may + * need to remove a few more nodes from either side of the target hole if + * mm.color_adjust is being used. * * Returns: - * True if the allocator is completely free, false if there's still a node - * allocated in it. + * A node to evict, or NULL if there are no overlapping nodes. */ -bool drm_mm_clean(struct drm_mm * mm) +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) { - struct list_head *head = &mm->head_node.node_list; + struct drm_mm *mm = scan->mm; + struct drm_mm_node *hole; + u64 hole_start, hole_end; + + DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); + + if (!mm->color_adjust) + return NULL; + + hole = list_first_entry(&mm->hole_stack, typeof(*hole), hole_stack); + hole_start = __drm_mm_hole_node_start(hole); + hole_end = __drm_mm_hole_node_end(hole); + + DRM_MM_BUG_ON(hole_start > scan->hit_start); + DRM_MM_BUG_ON(hole_end < scan->hit_end); - return (head->next->next == head); + mm->color_adjust(hole, scan->color, &hole_start, &hole_end); + if (hole_start > scan->hit_start) + return hole; + if (hole_end < scan->hit_end) + return list_next_entry(hole, node_list); + + return NULL; } -EXPORT_SYMBOL(drm_mm_clean); +EXPORT_SYMBOL(drm_mm_scan_color_evict); /** * drm_mm_init - initialize a drm-mm allocator @@ -898,18 +795,17 @@ EXPORT_SYMBOL(drm_mm_clean); * * Note that @mm must be cleared to 0 before calling this function. */ -void drm_mm_init(struct drm_mm * mm, u64 start, u64 size) +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) { + DRM_MM_BUG_ON(start + size <= start); + INIT_LIST_HEAD(&mm->hole_stack); - mm->scanned_blocks = 0; + mm->scan_active = 0; /* Clever trick to avoid a special case in the free hole tracking. */ INIT_LIST_HEAD(&mm->head_node.node_list); mm->head_node.allocated = 0; mm->head_node.hole_follows = 1; - mm->head_node.scanned_block = 0; - mm->head_node.scanned_prev_free = 0; - mm->head_node.scanned_next_free = 0; mm->head_node.mm = mm; mm->head_node.start = start + size; mm->head_node.size = start - mm->head_node.start; @@ -930,15 +826,14 @@ EXPORT_SYMBOL(drm_mm_init); */ void drm_mm_takedown(struct drm_mm *mm) { - if (WARN(!list_empty(&mm->head_node.node_list), + if (WARN(!drm_mm_clean(mm), "Memory manager not clean during takedown.\n")) show_leaks(mm); - } EXPORT_SYMBOL(drm_mm_takedown); -static u64 drm_mm_debug_hole(struct drm_mm_node *entry, - const char *prefix) +static u64 drm_mm_debug_hole(const struct drm_mm_node *entry, + const char *prefix) { u64 hole_start, hole_end, hole_size; @@ -959,9 +854,9 @@ static u64 drm_mm_debug_hole(struct drm_mm_node *entry, * @mm: drm_mm allocator to dump * @prefix: prefix to use for dumping to dmesg */ -void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) +void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix) { - struct drm_mm_node *entry; + const struct drm_mm_node *entry; u64 total_used = 0, total_free = 0, total = 0; total_free += drm_mm_debug_hole(&mm->head_node, prefix); @@ -980,7 +875,7 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) EXPORT_SYMBOL(drm_mm_debug_table); #if defined(CONFIG_DEBUG_FS) -static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) +static u64 drm_mm_dump_hole(struct seq_file *m, const struct drm_mm_node *entry) { u64 hole_start, hole_end, hole_size; @@ -1001,9 +896,9 @@ static u64 drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) * @m: seq_file to dump to * @mm: drm_mm allocator to dump */ -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm) { - struct drm_mm_node *entry; + const struct drm_mm_node *entry; u64 total_used = 0, total_free = 0, total = 0; total_free += drm_mm_dump_hole(m, &mm->head_node); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 2735a5847ffa..ed1ee5a44a7b 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -20,6 +20,7 @@ * OF THIS SOFTWARE. */ +#include <drm/drm_encoder.h> #include <drm/drm_mode_config.h> #include <drm/drmP.h> @@ -84,113 +85,74 @@ int drm_mode_getresources(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_mode_card_res *card_res = data; - struct list_head *lh; struct drm_framebuffer *fb; struct drm_connector *connector; struct drm_crtc *crtc; struct drm_encoder *encoder; - int ret = 0; - int connector_count = 0; - int crtc_count = 0; - int fb_count = 0; - int encoder_count = 0; - int copied = 0; + int count, ret = 0; uint32_t __user *fb_id; uint32_t __user *crtc_id; uint32_t __user *connector_id; uint32_t __user *encoder_id; + struct drm_connector_list_iter conn_iter; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; mutex_lock(&file_priv->fbs_lock); - /* - * For the non-control nodes we need to limit the list of resources - * by IDs in the group list for this node - */ - list_for_each(lh, &file_priv->fbs) - fb_count++; - - /* handle this in 4 parts */ - /* FBs */ - if (card_res->count_fbs >= fb_count) { - copied = 0; - fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; - list_for_each_entry(fb, &file_priv->fbs, filp_head) { - if (put_user(fb->base.id, fb_id + copied)) { - mutex_unlock(&file_priv->fbs_lock); - return -EFAULT; - } - copied++; + count = 0; + fb_id = u64_to_user_ptr(card_res->fb_id_ptr); + list_for_each_entry(fb, &file_priv->fbs, filp_head) { + if (count < card_res->count_fbs && + put_user(fb->base.id, fb_id + count)) { + mutex_unlock(&file_priv->fbs_lock); + return -EFAULT; } + count++; } - card_res->count_fbs = fb_count; + card_res->count_fbs = count; mutex_unlock(&file_priv->fbs_lock); - /* mode_config.mutex protects the connector list against e.g. DP MST - * connector hot-adding. CRTC/Plane lists are invariant. */ - mutex_lock(&dev->mode_config.mutex); - drm_for_each_crtc(crtc, dev) - crtc_count++; - - drm_for_each_connector(connector, dev) - connector_count++; - - drm_for_each_encoder(encoder, dev) - encoder_count++; - card_res->max_height = dev->mode_config.max_height; card_res->min_height = dev->mode_config.min_height; card_res->max_width = dev->mode_config.max_width; card_res->min_width = dev->mode_config.min_width; - /* CRTCs */ - if (card_res->count_crtcs >= crtc_count) { - copied = 0; - crtc_id = (uint32_t __user *)(unsigned long)card_res->crtc_id_ptr; - drm_for_each_crtc(crtc, dev) { - if (put_user(crtc->base.id, crtc_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; - } + count = 0; + crtc_id = u64_to_user_ptr(card_res->crtc_id_ptr); + drm_for_each_crtc(crtc, dev) { + if (count < card_res->count_crtcs && + put_user(crtc->base.id, crtc_id + count)) + return -EFAULT; + count++; } - card_res->count_crtcs = crtc_count; - - /* Encoders */ - if (card_res->count_encoders >= encoder_count) { - copied = 0; - encoder_id = (uint32_t __user *)(unsigned long)card_res->encoder_id_ptr; - drm_for_each_encoder(encoder, dev) { - if (put_user(encoder->base.id, encoder_id + - copied)) { - ret = -EFAULT; - goto out; - } - copied++; - } + card_res->count_crtcs = count; + + count = 0; + encoder_id = u64_to_user_ptr(card_res->encoder_id_ptr); + drm_for_each_encoder(encoder, dev) { + if (count < card_res->count_encoders && + put_user(encoder->base.id, encoder_id + count)) + return -EFAULT; + count++; } - card_res->count_encoders = encoder_count; - - /* Connectors */ - if (card_res->count_connectors >= connector_count) { - copied = 0; - connector_id = (uint32_t __user *)(unsigned long)card_res->connector_id_ptr; - drm_for_each_connector(connector, dev) { - if (put_user(connector->base.id, - connector_id + copied)) { - ret = -EFAULT; - goto out; - } - copied++; + card_res->count_encoders = count; + + drm_connector_list_iter_get(dev, &conn_iter); + count = 0; + connector_id = u64_to_user_ptr(card_res->connector_id_ptr); + drm_for_each_connector_iter(connector, &conn_iter) { + if (count < card_res->count_connectors && + put_user(connector->base.id, connector_id + count)) { + drm_connector_list_iter_put(&conn_iter); + return -EFAULT; } + count++; } - card_res->count_connectors = connector_count; + card_res->count_connectors = count; + drm_connector_list_iter_put(&conn_iter); -out: - mutex_unlock(&dev->mode_config.mutex); return ret; } @@ -208,6 +170,7 @@ void drm_mode_config_reset(struct drm_device *dev) struct drm_plane *plane; struct drm_encoder *encoder; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; drm_for_each_plane(plane, dev) if (plane->funcs->reset) @@ -221,11 +184,11 @@ void drm_mode_config_reset(struct drm_device *dev) if (encoder->funcs->reset) encoder->funcs->reset(encoder); - mutex_lock(&dev->mode_config.mutex); - drm_for_each_connector(connector, dev) + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) if (connector->funcs->reset) connector->funcs->reset(connector); - mutex_unlock(&dev->mode_config.mutex); + drm_connector_list_iter_put(&conn_iter); } EXPORT_SYMBOL(drm_mode_config_reset); @@ -406,10 +369,9 @@ void drm_mode_config_init(struct drm_device *dev) idr_init(&dev->mode_config.crtc_idr); idr_init(&dev->mode_config.tile_idr); ida_init(&dev->mode_config.connector_ida); + spin_lock_init(&dev->mode_config.connector_list_lock); - drm_modeset_lock_all(dev); drm_mode_create_standard_properties(dev); - drm_modeset_unlock_all(dev); /* Just to be sure */ dev->mode_config.num_fb = 0; @@ -436,7 +398,8 @@ EXPORT_SYMBOL(drm_mode_config_init); */ void drm_mode_config_cleanup(struct drm_device *dev) { - struct drm_connector *connector, *ot; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; struct drm_crtc *crtc, *ct; struct drm_encoder *encoder, *enct; struct drm_framebuffer *fb, *fbt; @@ -449,10 +412,16 @@ void drm_mode_config_cleanup(struct drm_device *dev) encoder->funcs->destroy(encoder); } - list_for_each_entry_safe(connector, ot, - &dev->mode_config.connector_list, head) { - connector->funcs->destroy(connector); + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + /* drm_connector_list_iter holds an full reference to the + * current connector itself, which means it is inherently safe + * against unreferencing the current connector - but not against + * deleting it right away. */ + drm_connector_unreference(connector); } + drm_connector_list_iter_put(&conn_iter); + WARN_ON(!list_empty(&dev->mode_config.connector_list)); list_for_each_entry_safe(property, pt, &dev->mode_config.property_list, head) { diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c index 9f17085b1fdd..14543ff08c51 100644 --- a/drivers/gpu/drm/drm_mode_object.c +++ b/drivers/gpu/drm/drm_mode_object.c @@ -23,6 +23,7 @@ #include <linux/export.h> #include <drm/drmP.h> #include <drm/drm_mode_object.h> +#include <drm/drm_atomic.h> #include "drm_crtc_internal.h" @@ -273,7 +274,7 @@ int drm_object_property_get_value(struct drm_mode_object *obj, * their value in obj->properties->values[].. mostly to avoid * having to deal w/ EDID and similar props in atomic paths: */ - if (drm_core_check_feature(property->dev, DRIVER_ATOMIC) && + if (drm_drv_uses_atomic_modeset(property->dev) && !(property->flags & DRM_MODE_PROP_IMMUTABLE)) return drm_atomic_get_property(obj, property, val); diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index cc232ac6c950..cc44a9a4b004 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -48,6 +48,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) INIT_LIST_HEAD(&panel_list); + spin_lock_irq(&dev->mode_config.connector_list_lock); list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS || @@ -57,38 +58,27 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *dev) } list_splice(&panel_list, &dev->mode_config.connector_list); + spin_unlock_irq(&dev->mode_config.connector_list_lock); } EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); /** * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata + * @dev: DRM device * @fb: drm_framebuffer object to fill out * @mode_cmd: metadata from the userspace fb creation request * * This helper can be used in a drivers fb_create callback to pre-fill the fb's * metadata fields. */ -void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, +void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info; int i; - info = drm_format_info(mode_cmd->pixel_format); - if (!info || !info->depth) { - struct drm_format_name_buf format_name; - - DRM_DEBUG_KMS("non-RGB pixel format %s\n", - drm_get_format_name(mode_cmd->pixel_format, - &format_name)); - - fb->depth = 0; - fb->bits_per_pixel = 0; - } else { - fb->depth = info->depth; - fb->bits_per_pixel = info->cpp[0] * 8; - } - + fb->dev = dev; + fb->format = drm_format_info(mode_cmd->pixel_format); fb->width = mode_cmd->width; fb->height = mode_cmd->height; for (i = 0; i < 4; i++) { @@ -96,7 +86,6 @@ void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, fb->offsets[i] = mode_cmd->offsets[i]; } fb->modifier = mode_cmd->modifier[0]; - fb->pixel_format = mode_cmd->pixel_format; fb->flags = mode_cmd->flags; } EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 47848ed8ca48..b5f2f0fece99 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -4,6 +4,7 @@ #include <linux/of_graph.h> #include <drm/drmP.h> #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> #include <drm/drm_of.h> static void drm_release_of(struct device *dev, void *data) diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index 62b98f386fd1..8ad20af88ed7 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -392,12 +392,16 @@ int drm_mode_getplane(struct drm_device *dev, void *data, return -ENOENT; drm_modeset_lock(&plane->mutex, NULL); - if (plane->crtc) + if (plane->state && plane->state->crtc) + plane_resp->crtc_id = plane->state->crtc->base.id; + else if (!plane->state && plane->crtc) plane_resp->crtc_id = plane->crtc->base.id; else plane_resp->crtc_id = 0; - if (plane->fb) + if (plane->state && plane->state->fb) + plane_resp->fb_id = plane->state->fb->base.id; + else if (!plane->state && plane->fb) plane_resp->fb_id = plane->fb->base.id; else plane_resp->fb_id = 0; @@ -478,11 +482,11 @@ static int __setplane_internal(struct drm_plane *plane, } /* Check whether this plane supports the fb pixel format. */ - ret = drm_plane_check_pixel_format(plane, fb->pixel_format); + ret = drm_plane_check_pixel_format(plane, fb->format->format); if (ret) { struct drm_format_name_buf format_name; DRM_DEBUG_KMS("Invalid pixel format %s\n", - drm_get_format_name(fb->pixel_format, + drm_get_format_name(fb->format->format, &format_name)); goto out; } @@ -854,7 +858,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev, if (ret) goto out; - if (crtc->primary->fb->pixel_format != fb->pixel_format) { + if (crtc->primary->fb->format != fb->format) { DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); ret = -EINVAL; goto out; diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c index 7a7dddf604d7..8b042a193613 100644 --- a/drivers/gpu/drm/drm_plane_helper.c +++ b/drivers/gpu/drm/drm_plane_helper.c @@ -29,6 +29,7 @@ #include <drm/drm_rect.h> #include <drm/drm_atomic.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder.h> #include <drm/drm_atomic_helper.h> #define SUBPIXEL_MASK 0xffff @@ -74,6 +75,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; int count = 0; /* @@ -83,7 +85,8 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, */ WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->encoder && connector->encoder->crtc == crtc) { if (connector_list != NULL && count < num_connectors) *(connector_list++) = connector; @@ -91,6 +94,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc, count++; } } + drm_connector_list_iter_put(&conn_iter); return count; } diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index ac953f037be7..7cff91e7497f 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -129,6 +129,7 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) { bool poll = false; struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; unsigned long delay = DRM_OUTPUT_POLL_PERIOD; WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); @@ -136,11 +137,13 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev) if (!dev->mode_config.poll_enabled || !drm_kms_helper_poll) return; - drm_for_each_connector(connector, dev) { + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { if (connector->polled & (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT)) poll = true; } + drm_connector_list_iter_put(&conn_iter); if (dev->mode_config.delayed_event) { poll = true; @@ -382,6 +385,7 @@ static void output_poll_execute(struct work_struct *work) struct delayed_work *delayed_work = to_delayed_work(work); struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; enum drm_connector_status old_status; bool repoll = false, changed; @@ -397,8 +401,8 @@ static void output_poll_execute(struct work_struct *work) goto out; } - drm_for_each_connector(connector, dev) { - + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { /* Ignore forced connectors. */ if (connector->force) continue; @@ -451,6 +455,7 @@ static void output_poll_execute(struct work_struct *work) changed = true; } } + drm_connector_list_iter_put(&conn_iter); mutex_unlock(&dev->mode_config.mutex); @@ -562,6 +567,7 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini); bool drm_helper_hpd_irq_event(struct drm_device *dev) { struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; enum drm_connector_status old_status; bool changed = false; @@ -569,8 +575,8 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev) return false; mutex_lock(&dev->mode_config.mutex); - drm_for_each_connector(connector, dev) { - + drm_connector_list_iter_get(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { /* Only handle HPD capable connectors. */ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD)) continue; @@ -586,7 +592,7 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev) if (old_status != connector->status) changed = true; } - + drm_connector_list_iter_put(&conn_iter); mutex_unlock(&dev->mode_config.mutex); if (changed) diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 7bae08c2bf0a..3cc42f5dfba1 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -182,30 +182,11 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = { int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, struct drm_bridge *bridge) { - bridge->encoder = &pipe->encoder; - pipe->encoder.bridge = bridge; - return drm_bridge_attach(pipe->encoder.dev, bridge); + return drm_bridge_attach(&pipe->encoder, bridge, NULL); } EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge); /** - * drm_simple_display_pipe_detach_bridge - Detach the bridge from the display pipe - * @pipe: simple display pipe object - * - * Detaches the drm bridge previously attached with - * drm_simple_display_pipe_attach_bridge() - */ -void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe) -{ - if (WARN_ON(!pipe->encoder.bridge)) - return; - - drm_bridge_detach(pipe->encoder.bridge); - pipe->encoder.bridge = NULL; -} -EXPORT_SYMBOL(drm_simple_display_pipe_detach_bridge); - -/** * drm_simple_display_pipe_init - Initialize a simple display pipeline * @dev: DRM device * @pipe: simple display pipe object to initialize diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 00368b14d08d..30d70ed143f7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -592,7 +592,7 @@ static void etnaviv_unbind(struct device *dev) drm->dev_private = NULL; kfree(priv); - drm_put_dev(drm); + drm_dev_unref(drm); } static const struct component_master_ops etnaviv_master_ops = { diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c index 169ac96e8f08..ae2733a609ba 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c @@ -113,6 +113,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, while (1) { struct etnaviv_vram_mapping *m, *n; + struct drm_mm_scan scan; struct list_head list; bool found; @@ -134,7 +135,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, } /* Try to retire some entries */ - drm_mm_init_scan(&mmu->mm, size, 0, 0); + drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, 0); found = 0; INIT_LIST_HEAD(&list); @@ -151,7 +152,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, continue; list_add(&free->scan_node, &list); - if (drm_mm_scan_add_block(&free->vram_node)) { + if (drm_mm_scan_add_block(&scan, &free->vram_node)) { found = true; break; } @@ -160,7 +161,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, if (!found) { /* Nothing found, clean up and fail */ list_for_each_entry_safe(m, n, &list, scan_node) - BUG_ON(drm_mm_scan_remove_block(&m->vram_node)); + BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node)); break; } @@ -171,7 +172,7 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, * can leave the block pinned. */ list_for_each_entry_safe(m, n, &list, scan_node) - if (!drm_mm_scan_remove_block(&m->vram_node)) + if (!drm_mm_scan_remove_block(&scan, &m->vram_node)) list_del_init(&m->scan_node); /* diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c index 6ca1f3117fe8..c5c01628c715 100644 --- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c @@ -200,7 +200,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, val = readl(ctx->addr + DECON_WINCONx(win)); val &= ~WINCONx_BPPMODE_MASK; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_XRGB1555: val |= WINCONx_BPPMODE_16BPP_I1555; val |= WINCONx_HAWSWP_F; @@ -226,7 +226,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, return; } - DRM_DEBUG_KMS("bpp = %u\n", fb->bits_per_pixel); + DRM_DEBUG_KMS("bpp = %u\n", fb->format->cpp[0] * 8); /* * In case of exynos, setting dma-burst to 16Word causes permanent @@ -275,7 +275,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, struct decon_context *ctx = crtc->ctx; struct drm_framebuffer *fb = state->base.fb; unsigned int win = plane->index; - unsigned int bpp = fb->bits_per_pixel >> 3; + unsigned int bpp = fb->format->cpp[0]; unsigned int pitch = fb->pitches[0]; dma_addr_t dma_addr = exynos_drm_fb_dma_addr(fb, 0); u32 val; diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f4d5a2133777..f9ab19e205e2 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -281,7 +281,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, val = readl(ctx->regs + WINCON(win)); val &= ~WINCONx_BPPMODE_MASK; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_RGB565: val |= WINCONx_BPPMODE_16BPP_565; val |= WINCONx_BURSTLEN_16WORD; @@ -330,7 +330,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, break; } - DRM_DEBUG_KMS("bpp = %d\n", fb->bits_per_pixel); + DRM_DEBUG_KMS("bpp = %d\n", fb->format->cpp[0] * 8); /* * In case of exynos, setting dma-burst to 16Word causes permanent @@ -340,7 +340,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win, * movement causes unstable DMA which results into iommu crash/tear. */ - padding = (fb->pitches[0] / (fb->bits_per_pixel >> 3)) - fb->width; + padding = (fb->pitches[0] / fb->format->cpp[0]) - fb->width; if (fb->width + padding < MIN_FB_WIDTH_FOR_16WORD_BURST) { val &= ~WINCONx_BURSTLEN_MASK; val |= WINCONx_BURSTLEN_8WORD; @@ -407,7 +407,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc, unsigned int last_x; unsigned int last_y; unsigned int win = plane->index; - unsigned int bpp = fb->bits_per_pixel >> 3; + unsigned int bpp = fb->format->cpp[0]; unsigned int pitch = fb->pitches[0]; if (ctx->suspended) diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 528229faffe4..1ef0be338b85 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -99,7 +99,6 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, struct drm_connector *connector) { struct exynos_dp_device *dp = to_dp(plat_data); - struct drm_encoder *encoder = &dp->encoder; int ret; drm_connector_register(connector); @@ -107,9 +106,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, /* Pre-empt DP connector creation if there's a bridge */ if (dp->ptn_bridge) { - bridge->next = dp->ptn_bridge; - dp->ptn_bridge->encoder = encoder; - ret = drm_bridge_attach(encoder->dev, dp->ptn_bridge); + ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge); if (ret) { DRM_ERROR("Failed to attach bridge to drm\n"); bridge->next = NULL; diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index e07cb1fe4860..812e2ec0761d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -1718,10 +1718,8 @@ static int exynos_dsi_bind(struct device *dev, struct device *master, } bridge = of_drm_find_bridge(dsi->bridge_node); - if (bridge) { - encoder->bridge = bridge; - drm_bridge_attach(drm_dev, bridge); - } + if (bridge) + drm_bridge_attach(encoder, bridge, NULL); return mipi_dsi_host_register(&dsi->dsi_host); } diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 23cce0a3f5fc..68d414227533 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -126,7 +126,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev, + mode_cmd->offsets[i]; } - drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); if (ret < 0) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 9f35deb56170..d8808158d418 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -76,7 +76,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, { struct fb_info *fbi; struct drm_framebuffer *fb = helper->fb; - unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3); + unsigned int size = fb->width * fb->height * fb->format->cpp[0]; unsigned int nr_pages; unsigned long offset; @@ -90,7 +90,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, fbi->flags = FBINFO_FLAG_DEFAULT; fbi->fbops = &exynos_drm_fb_ops; - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); nr_pages = exynos_gem->size >> PAGE_SHIFT; @@ -103,7 +103,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, return -EIO; } - offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); + offset = fbi->var.xoffset * fb->format->cpp[0]; offset += fbi->var.yoffset * fb->pitches[0]; fbi->screen_base = exynos_gem->kvaddr + offset; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index e2e405170d35..745cfbdf6b39 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -738,7 +738,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc, unsigned long val, size, offset; unsigned int last_x, last_y, buf_offsize, line_size; unsigned int win = plane->index; - unsigned int bpp = fb->bits_per_pixel >> 3; + unsigned int bpp = fb->format->cpp[0]; unsigned int pitch = fb->pitches[0]; if (ctx->suspended) @@ -804,7 +804,7 @@ static void fimd_update_plane(struct exynos_drm_crtc *crtc, DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val); } - fimd_win_set_pixfmt(ctx, win, fb->pixel_format, state->src.w); + fimd_win_set_pixfmt(ctx, win, fb->format->format, state->src.w); /* hardware window 0 doesn't support color key. */ if (win != 0) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index edb20a34c66c..a106046e0c93 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -485,7 +485,7 @@ static void vp_video_buffer(struct mixer_context *ctx, bool crcb_mode = false; u32 val; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_NV12: crcb_mode = false; break; @@ -494,7 +494,7 @@ static void vp_video_buffer(struct mixer_context *ctx, break; default: DRM_ERROR("pixel format for vp is wrong [%d].\n", - fb->pixel_format); + fb->format->format); return; } @@ -597,7 +597,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int fmt; u32 val; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_XRGB4444: case DRM_FORMAT_ARGB4444: fmt = MXR_FORMAT_ARGB4444; @@ -631,7 +631,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, /* converting dma address base and source offset */ dma_addr = exynos_drm_fb_dma_addr(fb, 0) - + (state->src.x * fb->bits_per_pixel >> 3) + + (state->src.x * fb->format->cpp[0]) + (state->src.y * fb->pitches[0]); src_x_offset = 0; src_y_offset = 0; @@ -649,7 +649,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, /* setup geometry */ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), - fb->pitches[0] / (fb->bits_per_pixel >> 3)); + fb->pitches[0] / fb->format->cpp[0]); /* setup display size */ if (ctx->mxr_ver == MXR_VER_128_0_0_184 && @@ -681,7 +681,7 @@ static void mixer_graph_buffer(struct mixer_context *ctx, mixer_cfg_scan(ctx, mode->vdisplay); mixer_cfg_rgb_fmt(ctx, mode->vdisplay); mixer_cfg_layer(ctx, win, priority, true); - mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->pixel_format)); + mixer_cfg_gfx_blend(ctx, win, is_alpha_format(fb->format->format)); /* layer update mandatory for mixer 16.0.33.0 */ if (ctx->mxr_ver == MXR_VER_16_0_33_0 || diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 537ca159ffe5..0b35da73c2b0 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -434,7 +434,8 @@ static int fsl_dcu_drm_remove(struct platform_device *pdev) { struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev); - drm_put_dev(fsl_dev->drm); + drm_dev_unregister(fsl_dev->drm); + drm_dev_unref(fsl_dev->drm); clk_disable_unprepare(fsl_dev->clk); clk_unregister(fsl_dev->pix_clk); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index e9e9aeecf2eb..da9bfd432ca6 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -12,6 +12,8 @@ #ifndef __FSL_DCU_DRM_DRV_H__ #define __FSL_DCU_DRM_DRV_H__ +#include <drm/drm_encoder.h> + #include "fsl_dcu_drm_crtc.h" #include "fsl_dcu_drm_output.h" #include "fsl_dcu_drm_plane.h" diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index a99f48847420..0a20723aa6e1 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -44,7 +44,7 @@ static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane, if (!state->fb || !state->crtc) return 0; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_RGB888: case DRM_FORMAT_XRGB8888: @@ -96,7 +96,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, gem = drm_fb_cma_get_gem_obj(fb, 0); - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_RGB565: bpp = FSL_DCU_RGB565; break; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index 05a8ee106879..c3651456c963 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -160,10 +160,7 @@ static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev, if (!bridge) return -ENODEV; - fsl_dev->encoder.bridge = bridge; - bridge->encoder = &fsl_dev->encoder; - - return drm_bridge_attach(fsl_dev->drm, bridge); + return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL); } int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev) diff --git a/drivers/gpu/drm/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c index 0d2bb1682508..c51d9259c7a7 100644 --- a/drivers/gpu/drm/gma500/accel_2d.c +++ b/drivers/gpu/drm/gma500/accel_2d.c @@ -254,7 +254,7 @@ static void psbfb_copyarea_accel(struct fb_info *info, offset = psbfb->gtt->offset; stride = fb->pitches[0]; - switch (fb->depth) { + switch (fb->format->depth) { case 8: src_format = PSB_2D_SRC_332RGB; dst_format = PSB_2D_DST_332RGB; diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 8b44fa542562..fd1488bf5189 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -77,7 +77,7 @@ static int psbfb_setcolreg(unsigned regno, unsigned red, unsigned green, (transp << info->var.transp.offset); if (regno < 16) { - switch (fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 16: ((uint32_t *) info->pseudo_palette)[regno] = v; break; @@ -244,7 +244,7 @@ static int psb_framebuffer_init(struct drm_device *dev, if (mode_cmd->pitches[0] & 63) return -EINVAL; - drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd); fb->gtt = gt; ret = drm_framebuffer_init(dev, &fb->base, &psb_fb_funcs); if (ret) { @@ -407,7 +407,7 @@ static int psbfb_create(struct psb_fbdev *fbdev, fbdev->psb_fb_helper.fb = fb; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); strcpy(info->fix.id, "psbdrmfb"); info->flags = FBINFO_DEFAULT; diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 1a1cf7a3b5ef..d1c5642b1c1e 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -59,7 +59,8 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb); + struct drm_framebuffer *fb = crtc->primary->fb; + struct psb_framebuffer *psbfb = to_psb_fb(fb); int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; @@ -70,7 +71,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; /* no fb bound */ - if (!crtc->primary->fb) { + if (!fb) { dev_err(dev->dev, "No FB bound\n"); goto gma_pipe_cleaner; } @@ -81,19 +82,19 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y, if (ret < 0) goto gma_pipe_set_base_exit; start = psbfb->gtt->offset; - offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8); + offset = y * fb->pitches[0] + x * fb->format->cpp[0]; - REG_WRITE(map->stride, crtc->primary->fb->pitches[0]); + REG_WRITE(map->stride, fb->pitches[0]); dspcntr = REG_READ(map->cntr); dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: - if (crtc->primary->fb->depth == 15) + if (fb->format->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index 92e3f93ee682..63c6e08600ae 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -148,7 +148,7 @@ static int check_fb(struct drm_framebuffer *fb) if (!fb) return 0; - switch (fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: case 16: case 24: @@ -165,8 +165,9 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, { struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; + struct drm_framebuffer *fb = crtc->primary->fb; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb); + struct psb_framebuffer *psbfb = to_psb_fb(fb); int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; @@ -178,12 +179,12 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, dev_dbg(dev->dev, "pipe = 0x%x.\n", pipe); /* no fb bound */ - if (!crtc->primary->fb) { + if (!fb) { dev_dbg(dev->dev, "No FB bound\n"); return 0; } - ret = check_fb(crtc->primary->fb); + ret = check_fb(fb); if (ret) return ret; @@ -196,18 +197,18 @@ static int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, return 0; start = psbfb->gtt->offset; - offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8); + offset = y * fb->pitches[0] + x * fb->format->cpp[0]; - REG_WRITE(map->stride, crtc->primary->fb->pitches[0]); + REG_WRITE(map->stride, fb->pitches[0]); dspcntr = REG_READ(map->cntr); dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: - if (crtc->primary->fb->depth == 15) + if (fb->format->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index da9fd34b9550..0fff269d3fe6 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -599,7 +599,8 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_psb_private *dev_priv = dev->dev_private; struct gma_crtc *gma_crtc = to_gma_crtc(crtc); - struct psb_framebuffer *psbfb = to_psb_fb(crtc->primary->fb); + struct drm_framebuffer *fb = crtc->primary->fb; + struct psb_framebuffer *psbfb = to_psb_fb(fb); int pipe = gma_crtc->pipe; const struct psb_offset *map = &dev_priv->regmap[pipe]; unsigned long start, offset; @@ -608,7 +609,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc, int ret = 0; /* no fb bound */ - if (!crtc->primary->fb) { + if (!fb) { dev_dbg(dev->dev, "No FB bound\n"); return 0; } @@ -617,19 +618,19 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc, return 0; start = psbfb->gtt->offset; - offset = y * crtc->primary->fb->pitches[0] + x * (crtc->primary->fb->bits_per_pixel / 8); + offset = y * fb->pitches[0] + x * fb->format->cpp[0]; - REG_WRITE(map->stride, crtc->primary->fb->pitches[0]); + REG_WRITE(map->stride, fb->pitches[0]); dspcntr = REG_READ(map->cntr); dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: dspcntr |= DISPPLANE_8BPP; break; case 16: - if (crtc->primary->fb->depth == 15) + if (fb->format->depth == 15) dspcntr |= DISPPLANE_15_16BPP; else dspcntr |= DISPPLANE_16BPP; diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 2a3b7c684db2..6a10215fc42d 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -23,6 +23,7 @@ #include <linux/i2c-algo-bit.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder.h> #include <linux/gpio.h> #include "gma_display.h" diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index 2a1386e33126..c655883d3613 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -122,11 +122,11 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); - reg = state->fb->width * (state->fb->bits_per_pixel / 8); + reg = state->fb->width * (state->fb->format->cpp[0]); /* now line_pad is 16 */ reg = PADDING(16, reg); - line_l = state->fb->width * state->fb->bits_per_pixel / 8; + line_l = state->fb->width * state->fb->format->cpp[0]; line_l = PADDING(16, line_l); writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) | HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l), @@ -136,7 +136,7 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL); reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK; reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT, - state->fb->bits_per_pixel / 16); + state->fb->format->cpp[0] * 8 / 16); writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL); } diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c index 9b0696735ba1..7a6957ae4b44 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_fbdev.c @@ -135,7 +135,7 @@ static int hibmc_drm_fb_create(struct drm_fb_helper *helper, info->fbops = &hibmc_drm_fb_ops; drm_fb_helper_fill_fix(info, hi_fbdev->fb->fb.pitches[0], - hi_fbdev->fb->fb.depth); + hi_fbdev->fb->fb.format->depth); drm_fb_helper_fill_var(info, &priv->fbdev->helper, sizes->fb_width, sizes->fb_height); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index e76abf61edae..3c6f750389fb 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -512,7 +512,7 @@ hibmc_framebuffer_init(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - drm_helper_mode_fill_fb_struct(&hibmc_fb->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &hibmc_fb->fb, mode_cmd); hibmc_fb->obj = obj; ret = drm_framebuffer_init(dev, &hibmc_fb->fb, &hibmc_fb_funcs); if (ret) { diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 998452ad0fcb..1737e98bc10a 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -709,10 +709,7 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi) int ret; /* associate the bridge to dsi encoder */ - encoder->bridge = bridge; - bridge->encoder = encoder; - - ret = drm_bridge_attach(dev, bridge); + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) { DRM_ERROR("failed to attach external bridge\n"); return ret; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index afc2b5d2d5f0..307d460ab684 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -617,7 +617,7 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb, ch + 1, y, in_h, stride, (u32)obj->paddr); DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n", addr, fb->width, fb->height, fmt, - drm_get_format_name(fb->pixel_format, &format_name)); + drm_get_format_name(fb->format->format, &format_name)); /* get reg offset */ reg_ctrl = RD_CH_CTRL(ch); @@ -773,7 +773,7 @@ static void ade_update_channel(struct ade_plane *aplane, { struct ade_hw_ctx *ctx = aplane->ctx; void __iomem *base = ctx->base; - u32 fmt = ade_get_format(fb->pixel_format); + u32 fmt = ade_get_format(fb->format->format); u32 ch = aplane->ch; u32 in_w; u32 in_h; @@ -835,7 +835,7 @@ static int ade_plane_atomic_check(struct drm_plane *plane, if (!crtc || !fb) return 0; - fmt = ade_get_format(fb->pixel_format); + fmt = ade_get_format(fb->format->format); if (fmt == ADE_FORMAT_UNSUPPORT) return -EINVAL; @@ -973,9 +973,9 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx) return 0; } -static int ade_drm_init(struct drm_device *dev) +static int ade_drm_init(struct platform_device *pdev) { - struct platform_device *pdev = dev->platformdev; + struct drm_device *dev = platform_get_drvdata(pdev); struct ade_data *ade; struct ade_hw_ctx *ctx; struct ade_crtc *acrtc; @@ -1034,13 +1034,8 @@ static int ade_drm_init(struct drm_device *dev) return 0; } -static void ade_drm_cleanup(struct drm_device *dev) +static void ade_drm_cleanup(struct platform_device *pdev) { - struct platform_device *pdev = dev->platformdev; - struct ade_data *ade = platform_get_drvdata(pdev); - struct drm_crtc *crtc = &ade->acrtc.base; - - drm_crtc_cleanup(crtc); } const struct kirin_dc_ops ade_dc_ops = { diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index ebd5f4fe4c23..fa228b7b022c 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -42,7 +42,7 @@ static int kirin_drm_kms_cleanup(struct drm_device *dev) #endif drm_kms_helper_poll_fini(dev); drm_vblank_cleanup(dev); - dc_ops->cleanup(dev); + dc_ops->cleanup(to_platform_device(dev->dev)); drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); dev->dev_private = NULL; @@ -104,7 +104,7 @@ static int kirin_drm_kms_init(struct drm_device *dev) kirin_drm_mode_config_init(dev); /* display controller init */ - ret = dc_ops->init(dev); + ret = dc_ops->init(to_platform_device(dev->dev)); if (ret) goto err_mode_config_cleanup; @@ -138,7 +138,7 @@ static int kirin_drm_kms_init(struct drm_device *dev) err_unbind_all: component_unbind_all(dev->dev, dev); err_dc_cleanup: - dc_ops->cleanup(dev); + dc_ops->cleanup(to_platform_device(dev->dev)); err_mode_config_cleanup: drm_mode_config_cleanup(dev); devm_kfree(dev->dev, priv); @@ -209,8 +209,6 @@ static int kirin_drm_bind(struct device *dev) if (IS_ERR(drm_dev)) return PTR_ERR(drm_dev); - drm_dev->platformdev = to_platform_device(dev); - ret = kirin_drm_kms_init(drm_dev); if (ret) goto err_drm_dev_unref; diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h index 1a07caf8e7f4..a0bb217c4c64 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.h @@ -15,8 +15,8 @@ /* display controller init/cleanup ops */ struct kirin_dc_ops { - int (*init)(struct drm_device *dev); - void (*cleanup)(struct drm_device *dev); + int (*init)(struct platform_device *pdev); + void (*cleanup)(struct platform_device *pdev); }; struct kirin_drm_private { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 791bfc760075..b77b53b47acc 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1873,8 +1873,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fbdev_fb->base.width, fbdev_fb->base.height, - fbdev_fb->base.depth, - fbdev_fb->base.bits_per_pixel, + fbdev_fb->base.format->depth, + fbdev_fb->base.format->cpp[0] * 8, fbdev_fb->base.modifier, drm_framebuffer_read_refcount(&fbdev_fb->base)); describe_obj(m, fbdev_fb->obj); @@ -1891,8 +1891,8 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", fb->base.width, fb->base.height, - fb->base.depth, - fb->base.bits_per_pixel, + fb->base.format->depth, + fb->base.format->cpp[0] * 8, fb->base.modifier, drm_framebuffer_read_refcount(&fb->base)); describe_obj(m, fb->obj); @@ -3021,7 +3021,8 @@ static void intel_plane_info(struct seq_file *m, struct intel_crtc *intel_crtc) state = plane->state; if (state->fb) { - drm_get_format_name(state->fb->pixel_format, &format_name); + drm_get_format_name(state->fb->format->format, + &format_name); } else { sprintf(format_name.str, "N/A"); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 243224aeabf8..c3c1d32b65a3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1026,7 +1026,7 @@ struct intel_fbc { struct { u64 ilk_ggtt_offset; - uint32_t pixel_format; + const struct drm_format_info *format; unsigned int stride; int fence_reg; unsigned int tiling_mode; @@ -1042,7 +1042,7 @@ struct intel_fbc { struct { u64 ggtt_offset; - uint32_t pixel_format; + const struct drm_format_info *format; unsigned int stride; int fence_reg; } fb; diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index bd08814b015c..85ceff1b74b6 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -51,7 +51,10 @@ static bool ggtt_is_idle(struct drm_i915_private *dev_priv) } static bool -mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind) +mark_free(struct drm_mm_scan *scan, + struct i915_vma *vma, + unsigned int flags, + struct list_head *unwind) { if (i915_vma_is_pinned(vma)) return false; @@ -63,7 +66,7 @@ mark_free(struct i915_vma *vma, unsigned int flags, struct list_head *unwind) return false; list_add(&vma->exec_list, unwind); - return drm_mm_scan_add_block(&vma->node); + return drm_mm_scan_add_block(scan, &vma->node); } /** @@ -97,6 +100,7 @@ i915_gem_evict_something(struct i915_address_space *vm, unsigned flags) { struct drm_i915_private *dev_priv = to_i915(vm->dev); + struct drm_mm_scan scan; struct list_head eviction_list; struct list_head *phases[] = { &vm->inactive_list, @@ -104,6 +108,7 @@ i915_gem_evict_something(struct i915_address_space *vm, NULL, }, **phase; struct i915_vma *vma, *next; + struct drm_mm_node *node; int ret; lockdep_assert_held(&vm->dev->struct_mutex); @@ -122,12 +127,10 @@ i915_gem_evict_something(struct i915_address_space *vm, * On each list, the oldest objects lie at the HEAD with the freshest * object on the TAIL. */ - if (start != 0 || end != vm->total) { - drm_mm_init_scan_with_range(&vm->mm, min_size, - alignment, cache_level, - start, end); - } else - drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); + drm_mm_scan_init_with_range(&scan, &vm->mm, + min_size, alignment, cache_level, + start, end, + flags & PIN_HIGH ? DRM_MM_CREATE_TOP : 0); if (flags & PIN_NONBLOCK) phases[1] = NULL; @@ -137,13 +140,13 @@ search_again: phase = phases; do { list_for_each_entry(vma, *phase, vm_link) - if (mark_free(vma, flags, &eviction_list)) + if (mark_free(&scan, vma, flags, &eviction_list)) goto found; } while (*++phase); /* Nothing found, clean up and bail out! */ list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { - ret = drm_mm_scan_remove_block(&vma->node); + ret = drm_mm_scan_remove_block(&scan, &vma->node); BUG_ON(ret); INIT_LIST_HEAD(&vma->exec_list); @@ -192,7 +195,7 @@ found: * of any of our objects, thus corrupting the list). */ list_for_each_entry_safe(vma, next, &eviction_list, exec_list) { - if (drm_mm_scan_remove_block(&vma->node)) + if (drm_mm_scan_remove_block(&scan, &vma->node)) __i915_vma_pin(vma); else list_del_init(&vma->exec_list); @@ -209,6 +212,12 @@ found: if (ret == 0) ret = i915_vma_unbind(vma); } + + while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { + vma = container_of(node, struct i915_vma, node); + ret = i915_vma_unbind(vma); + } + return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b4bde1452f2a..d49a04eb584a 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -2721,7 +2721,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); } -static void i915_gtt_color_adjust(struct drm_mm_node *node, +static void i915_gtt_color_adjust(const struct drm_mm_node *node, unsigned long color, u64 *start, u64 *end) @@ -2729,10 +2729,8 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node, if (node->color != color) *start += 4096; - node = list_first_entry_or_null(&node->node_list, - struct drm_mm_node, - node_list); - if (node && node->allocated && node->color != color) + node = list_next_entry(node, node_list); + if (node->allocated && node->color != color) *end -= 4096; } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index a792dcb902b5..325b917c5ad7 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -320,11 +320,11 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, return true; other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list); - if (other->allocated && !other->hole_follows && other->color != cache_level) + if (other->allocated && !drm_mm_hole_follows(other) && other->color != cache_level) return false; other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list); - if (other->allocated && !gtt_space->hole_follows && other->color != cache_level) + if (other->allocated && !drm_mm_hole_follows(gtt_space) && other->color != cache_level) return false; return true; diff --git a/drivers/gpu/drm/i915/intel_atomic_plane.c b/drivers/gpu/drm/i915/intel_atomic_plane.c index dbe9fb41ae53..4612ffd555a7 100644 --- a/drivers/gpu/drm/i915/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/intel_atomic_plane.c @@ -103,36 +103,24 @@ intel_plane_destroy_state(struct drm_plane *plane, drm_atomic_helper_plane_destroy_state(plane, state); } -static int intel_plane_atomic_check(struct drm_plane *plane, - struct drm_plane_state *state) +int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, + struct intel_plane_state *intel_state) { + struct drm_plane *plane = intel_state->base.plane; struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_crtc *crtc = state->crtc; - struct intel_crtc *intel_crtc; - struct intel_crtc_state *crtc_state; + struct drm_plane_state *state = &intel_state->base; struct intel_plane *intel_plane = to_intel_plane(plane); - struct intel_plane_state *intel_state = to_intel_plane_state(state); - struct drm_crtc_state *drm_crtc_state; int ret; - crtc = crtc ? crtc : plane->state->crtc; - intel_crtc = to_intel_crtc(crtc); - /* * Both crtc and plane->crtc could be NULL if we're updating a * property while the plane is disabled. We don't actually have * anything driver-specific we need to test in that case, so * just return success. */ - if (!crtc) + if (!intel_state->base.crtc && !plane->state->crtc) return 0; - drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); - if (WARN_ON(!drm_crtc_state)) - return -EINVAL; - - crtc_state = to_intel_crtc_state(drm_crtc_state); - /* Clip all planes to CRTC size, or 0x0 if CRTC is disabled */ intel_state->clip.x1 = 0; intel_state->clip.y1 = 0; @@ -155,11 +143,11 @@ static int intel_plane_atomic_check(struct drm_plane *plane, * RGB 16-bit 5:6:5, and Indexed 8-bit. * TBD: Add RGB64 case once its added in supported format list. */ - switch (state->fb->pixel_format) { + switch (state->fb->format->format) { case DRM_FORMAT_C8: case DRM_FORMAT_RGB565: DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n", - drm_get_format_name(state->fb->pixel_format, + drm_get_format_name(state->fb->format->format, &format_name)); return -EINVAL; @@ -184,6 +172,31 @@ static int intel_plane_atomic_check(struct drm_plane *plane, return intel_plane_atomic_calc_changes(&crtc_state->base, state); } +static int intel_plane_atomic_check(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *drm_crtc_state; + + crtc = crtc ? crtc : plane->state->crtc; + + /* + * Both crtc and plane->crtc could be NULL if we're updating a + * property while the plane is disabled. We don't actually have + * anything driver-specific we need to test in that case, so + * just return success. + */ + if (!crtc) + return 0; + + drm_crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc); + if (WARN_ON(!drm_crtc_state)) + return -EINVAL; + + return intel_plane_atomic_check_with_state(to_intel_crtc_state(drm_crtc_state), + to_intel_plane_state(state)); +} + static void intel_plane_atomic_update(struct drm_plane *plane, struct drm_plane_state *old_state) { diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3dc8724df400..252aaabc7eef 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2275,7 +2275,7 @@ u32 intel_fb_xy_to_linear(int x, int y, int plane) { const struct drm_framebuffer *fb = state->base.fb; - unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int cpp = fb->format->cpp[plane]; unsigned int pitch = fb->pitches[plane]; return y * pitch + x * cpp; @@ -2344,7 +2344,7 @@ static u32 intel_adjust_tile_offset(int *x, int *y, { const struct drm_i915_private *dev_priv = to_i915(state->base.plane->dev); const struct drm_framebuffer *fb = state->base.fb; - unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int cpp = fb->format->cpp[plane]; unsigned int rotation = state->base.rotation; unsigned int pitch = intel_fb_pitch(fb, plane, rotation); @@ -2400,7 +2400,7 @@ static u32 _intel_compute_tile_offset(const struct drm_i915_private *dev_priv, u32 alignment) { uint64_t fb_modifier = fb->modifier; - unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int cpp = fb->format->cpp[plane]; u32 offset, offset_aligned; if (alignment) @@ -2455,7 +2455,7 @@ u32 intel_compute_tile_offset(int *x, int *y, u32 alignment; /* AUX_DIST needs only 4K alignment */ - if (fb->pixel_format == DRM_FORMAT_NV12 && plane == 1) + if (fb->format->format == DRM_FORMAT_NV12 && plane == 1) alignment = 4096; else alignment = intel_surf_alignment(dev_priv, fb->modifier); @@ -2468,7 +2468,7 @@ u32 intel_compute_tile_offset(int *x, int *y, static void intel_fb_offset_to_xy(int *x, int *y, const struct drm_framebuffer *fb, int plane) { - unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + unsigned int cpp = fb->format->cpp[plane]; unsigned int pitch = fb->pitches[plane]; u32 linear_offset = fb->offsets[plane]; @@ -2496,8 +2496,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, struct intel_rotation_info *rot_info = &intel_fb->rot_info; u32 gtt_offset_rotated = 0; unsigned int max_size = 0; - uint32_t format = fb->pixel_format; - int i, num_planes = drm_format_num_planes(format); + int i, num_planes = fb->format->num_planes; unsigned int tile_size = intel_tile_size(dev_priv); for (i = 0; i < num_planes; i++) { @@ -2506,9 +2505,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, u32 offset; int x, y; - cpp = drm_format_plane_cpp(format, i); - width = drm_format_plane_width(fb->width, format, i); - height = drm_format_plane_height(fb->height, format, i); + cpp = fb->format->cpp[i]; + width = drm_framebuffer_plane_width(fb->width, fb, i); + height = drm_framebuffer_plane_height(fb->height, fb, i); intel_fb_offset_to_xy(&x, &y, fb, i); @@ -2701,7 +2700,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, if (plane_config->tiling == I915_TILING_X) obj->tiling_and_stride = fb->pitches[0] | I915_TILING_X; - mode_cmd.pixel_format = fb->pixel_format; + mode_cmd.pixel_format = fb->format->format; mode_cmd.width = fb->width; mode_cmd.height = fb->height; mode_cmd.pitches[0] = fb->pitches[0]; @@ -2833,7 +2832,7 @@ valid_fb: static int skl_max_plane_width(const struct drm_framebuffer *fb, int plane, unsigned int rotation) { - int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + int cpp = fb->format->cpp[plane]; switch (fb->modifier) { case DRM_FORMAT_MOD_NONE: @@ -2912,7 +2911,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * TODO: linear and Y-tiled seem fine, Yf untested, */ if (fb->modifier == I915_FORMAT_MOD_X_TILED) { - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = fb->format->cpp[0]; while ((x + w) * cpp > fb->pitches[0]) { if (offset == 0) { @@ -2977,7 +2976,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since * the main surface setup depends on it. */ - if (fb->pixel_format == DRM_FORMAT_NV12) { + if (fb->format->format == DRM_FORMAT_NV12) { ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; @@ -3032,7 +3031,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary, I915_WRITE(PRIMCNSTALPHA(plane), 0); } - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_C8: dspcntr |= DISPPLANE_8BPP; break; @@ -3147,7 +3146,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary, if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_C8: dspcntr |= DISPPLANE_8BPP; break; @@ -3278,12 +3277,12 @@ u32 skl_plane_stride(const struct drm_framebuffer *fb, int plane, * linear buffers or in number of tiles for tiled buffers. */ if (drm_rotation_90_or_270(rotation)) { - int cpp = drm_format_plane_cpp(fb->pixel_format, plane); + int cpp = fb->format->cpp[plane]; stride /= intel_tile_height(dev_priv, fb->modifier, cpp); } else { stride /= intel_fb_stride_alignment(dev_priv, fb->modifier, - fb->pixel_format); + fb->format->format); } return stride; @@ -3397,7 +3396,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane, PLANE_CTL_PIPE_GAMMA_ENABLE | PLANE_CTL_PIPE_CSC_ENABLE; - plane_ctl |= skl_plane_ctl_format(fb->pixel_format); + plane_ctl |= skl_plane_ctl_format(fb->format->format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier); plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE; plane_ctl |= skl_plane_ctl_rotation(rotation); @@ -4769,7 +4768,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, } /* Check src format */ - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_RGB565: case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB8888: @@ -4785,7 +4784,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, default: DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", intel_plane->base.base.id, intel_plane->base.name, - fb->base.id, fb->pixel_format); + fb->base.id, fb->format->format); return -EINVAL; } @@ -8693,6 +8692,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; + fb->dev = dev; + if (INTEL_GEN(dev_priv) >= 4) { if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; @@ -8702,8 +8703,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, pixel_format = val & DISPPLANE_PIXFORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); - fb->pixel_format = fourcc; - fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; + fb->format = drm_format_info(fourcc); if (INTEL_GEN(dev_priv) >= 4) { if (plane_config->tiling) @@ -8724,14 +8724,14 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_fb_align_height(dev, fb->height, - fb->pixel_format, + fb->format->format, fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), plane, fb->width, fb->height, - fb->bits_per_pixel, base, fb->pitches[0], + fb->format->cpp[0] * 8, base, fb->pitches[0], plane_config->size); plane_config->fb = intel_fb; @@ -9723,6 +9723,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; + fb->dev = dev; + val = I915_READ(PLANE_CTL(pipe, 0)); if (!(val & PLANE_CTL_ENABLE)) goto error; @@ -9731,8 +9733,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, fourcc = skl_format_to_fourcc(pixel_format, val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ALPHA_MASK); - fb->pixel_format = fourcc; - fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; + fb->format = drm_format_info(fourcc); tiling = val & PLANE_CTL_TILED_MASK; switch (tiling) { @@ -9765,18 +9766,18 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, val = I915_READ(PLANE_STRIDE(pipe, 0)); stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier, - fb->pixel_format); + fb->format->format); fb->pitches[0] = (val & 0x3ff) * stride_mult; aligned_height = intel_fb_align_height(dev, fb->height, - fb->pixel_format, + fb->format->format, fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), fb->width, fb->height, - fb->bits_per_pixel, base, fb->pitches[0], + fb->format->cpp[0] * 8, base, fb->pitches[0], plane_config->size); plane_config->fb = intel_fb; @@ -9835,6 +9836,8 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb = &intel_fb->base; + fb->dev = dev; + if (INTEL_GEN(dev_priv) >= 4) { if (val & DISPPLANE_TILED) { plane_config->tiling = I915_TILING_X; @@ -9844,8 +9847,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, pixel_format = val & DISPPLANE_PIXFORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); - fb->pixel_format = fourcc; - fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8; + fb->format = drm_format_info(fourcc); base = I915_READ(DSPSURF(pipe)) & 0xfffff000; if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { @@ -9866,14 +9868,14 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, fb->pitches[0] = val & 0xffffffc0; aligned_height = intel_fb_align_height(dev, fb->height, - fb->pixel_format, + fb->format->format, fb->modifier); plane_config->size = fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), fb->width, fb->height, - fb->bits_per_pixel, base, fb->pitches[0], + fb->format->cpp[0] * 8, base, fb->pitches[0], plane_config->size); plane_config->fb = intel_fb; @@ -11032,7 +11034,7 @@ mode_fits_in_fbdev(struct drm_device *dev, fb = &dev_priv->fbdev->fb->base; if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay, - fb->bits_per_pixel)) + fb->format->cpp[0] * 8)) return NULL; if (obj->base.size < mode->vdisplay * fb->pitches[0]) @@ -12134,7 +12136,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return -EBUSY; /* Can't change pixel format via MI display flips. */ - if (fb->pixel_format != crtc->primary->fb->pixel_format) + if (fb->format != crtc->primary->fb->format) return -EINVAL; /* @@ -12831,7 +12833,7 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc, DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d, fb = %ux%u format = %s\n", plane->base.id, plane->name, fb->base.id, fb->width, fb->height, - drm_get_format_name(fb->pixel_format, &format_name)); + drm_get_format_name(fb->format->format, &format_name)); if (INTEL_GEN(dev_priv) >= 9) DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n", state->scaler_id, @@ -14941,6 +14943,136 @@ const struct drm_plane_funcs intel_plane_funcs = { .atomic_destroy_state = intel_plane_destroy_state, }; +static int +intel_legacy_cursor_update(struct drm_plane *plane, + struct drm_crtc *crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + uint32_t src_x, uint32_t src_y, + uint32_t src_w, uint32_t src_h) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->dev); + int ret; + struct drm_plane_state *old_plane_state, *new_plane_state; + struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_framebuffer *old_fb; + struct drm_crtc_state *crtc_state = crtc->state; + + /* + * When crtc is inactive or there is a modeset pending, + * wait for it to complete in the slowpath + */ + if (!crtc_state->active || needs_modeset(crtc_state) || + to_intel_crtc_state(crtc_state)->update_pipe) + goto slow; + + old_plane_state = plane->state; + + /* + * If any parameters change that may affect watermarks, + * take the slowpath. Only changing fb or position should be + * in the fastpath. + */ + if (old_plane_state->crtc != crtc || + old_plane_state->src_w != src_w || + old_plane_state->src_h != src_h || + old_plane_state->crtc_w != crtc_w || + old_plane_state->crtc_h != crtc_h || + !old_plane_state->visible || + old_plane_state->fb->modifier != fb->modifier) + goto slow; + + new_plane_state = intel_plane_duplicate_state(plane); + if (!new_plane_state) + return -ENOMEM; + + drm_atomic_set_fb_for_plane(new_plane_state, fb); + + new_plane_state->src_x = src_x; + new_plane_state->src_y = src_y; + new_plane_state->src_w = src_w; + new_plane_state->src_h = src_h; + new_plane_state->crtc_x = crtc_x; + new_plane_state->crtc_y = crtc_y; + new_plane_state->crtc_w = crtc_w; + new_plane_state->crtc_h = crtc_h; + + ret = intel_plane_atomic_check_with_state(to_intel_crtc_state(crtc->state), + to_intel_plane_state(new_plane_state)); + if (ret) + goto out_free; + + /* Visibility changed, must take slowpath. */ + if (!new_plane_state->visible) + goto slow_free; + + ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); + if (ret) + goto out_free; + + if (INTEL_INFO(dev_priv)->cursor_needs_physical) { + int align = IS_I830(dev_priv) ? 16 * 1024 : 256; + + ret = i915_gem_object_attach_phys(intel_fb_obj(fb), align); + if (ret) { + DRM_DEBUG_KMS("failed to attach phys object\n"); + goto out_unlock; + } + } else { + struct i915_vma *vma; + + vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation); + if (IS_ERR(vma)) { + DRM_DEBUG_KMS("failed to pin object\n"); + + ret = PTR_ERR(vma); + goto out_unlock; + } + } + + old_fb = old_plane_state->fb; + + i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb), + intel_plane->frontbuffer_bit); + + /* Swap plane state */ + new_plane_state->fence = old_plane_state->fence; + *to_intel_plane_state(old_plane_state) = *to_intel_plane_state(new_plane_state); + new_plane_state->fence = NULL; + new_plane_state->fb = old_fb; + + intel_plane->update_plane(plane, + to_intel_crtc_state(crtc->state), + to_intel_plane_state(plane->state)); + + intel_cleanup_plane_fb(plane, new_plane_state); + +out_unlock: + mutex_unlock(&dev_priv->drm.struct_mutex); +out_free: + intel_plane_destroy_state(plane, new_plane_state); + return ret; + +slow_free: + intel_plane_destroy_state(plane, new_plane_state); +slow: + return drm_atomic_helper_update_plane(plane, crtc, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h); +} + +static const struct drm_plane_funcs intel_cursor_plane_funcs = { + .update_plane = intel_legacy_cursor_update, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .set_property = drm_atomic_helper_plane_set_property, + .atomic_get_property = intel_plane_atomic_get_property, + .atomic_set_property = intel_plane_atomic_set_property, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, +}; + static struct intel_plane * intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -15185,7 +15317,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) cursor->disable_plane = intel_disable_cursor_plane; ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, - 0, &intel_plane_funcs, + 0, &intel_cursor_plane_funcs, intel_cursor_formats, ARRAY_SIZE(intel_cursor_formats), DRM_PLANE_TYPE_CURSOR, @@ -15866,7 +15998,7 @@ static int intel_framebuffer_init(struct drm_device *dev, if (mode_cmd->offsets[0] != 0) return -EINVAL; - drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &intel_fb->base, mode_cmd); intel_fb->obj = obj; ret = intel_fill_fb_info(dev_priv, &intel_fb->base); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index cd132c216a67..007a8258ce6b 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -32,6 +32,7 @@ #include "i915_drv.h" #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_dp_dual_mode_helper.h> #include <drm/drm_dp_mst_helper.h> @@ -1814,6 +1815,8 @@ struct drm_plane_state *intel_plane_duplicate_state(struct drm_plane *plane); void intel_plane_destroy_state(struct drm_plane *plane, struct drm_plane_state *state); extern const struct drm_plane_helper_funcs intel_plane_helper_funcs; +int intel_plane_atomic_check_with_state(struct intel_crtc_state *crtc_state, + struct intel_plane_state *intel_state); /* intel_color.c */ void intel_color_init(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 62f215b12eb5..659cebc3bfd2 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -188,7 +188,7 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv) u32 dpfc_ctl; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane) | DPFC_SR_EN; - if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) + if (params->fb.format->cpp[0] == 2) dpfc_ctl |= DPFC_CTL_LIMIT_2X; else dpfc_ctl |= DPFC_CTL_LIMIT_1X; @@ -235,7 +235,7 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv) int threshold = dev_priv->fbc.threshold; dpfc_ctl = DPFC_CTL_PLANE(params->crtc.plane); - if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) + if (params->fb.format->cpp[0] == 2) threshold++; switch (threshold) { @@ -303,7 +303,7 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) if (IS_IVYBRIDGE(dev_priv)) dpfc_ctl |= IVB_DPFC_CTL_PLANE(params->crtc.plane); - if (drm_format_plane_cpp(params->fb.pixel_format, 0) == 2) + if (params->fb.format->cpp[0] == 2) threshold++; switch (threshold) { @@ -581,7 +581,7 @@ static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb)); size = intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache); - fb_cpp = drm_format_plane_cpp(fbc->state_cache.fb.pixel_format, 0); + fb_cpp = fbc->state_cache.fb.format->cpp[0]; ret = find_compression_threshold(dev_priv, &fbc->compressed_fb, size, fb_cpp); @@ -764,7 +764,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, * platforms that need. */ if (IS_GEN(dev_priv, 5, 6)) cache->fb.ilk_ggtt_offset = i915_gem_object_ggtt_offset(obj, NULL); - cache->fb.pixel_format = fb->pixel_format; + cache->fb.format = fb->format; cache->fb.stride = fb->pitches[0]; cache->fb.fence_reg = get_fence_id(fb); cache->fb.tiling_mode = i915_gem_object_get_tiling(obj); @@ -823,7 +823,7 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } - if (!pixel_format_is_valid(dev_priv, cache->fb.pixel_format)) { + if (!pixel_format_is_valid(dev_priv, cache->fb.format->format)) { fbc->no_fbc_reason = "pixel format is invalid"; return false; } @@ -892,7 +892,7 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc, params->crtc.plane = crtc->plane; params->crtc.fence_y_offset = get_crtc_fence_y_offset(crtc); - params->fb.pixel_format = cache->fb.pixel_format; + params->fb.format = cache->fb.format; params->fb.stride = cache->fb.stride; params->fb.fence_reg = cache->fb.fence_reg; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index beb08982dc0b..570c07d59d1a 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -261,7 +261,7 @@ static int intelfb_create(struct drm_fb_helper *helper, /* This driver doesn't need a VT switch to restore the mode on resume */ info->skip_vt_switch = true; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height); /* If the object is shmemfs backed, it will have given us zeroed pages. @@ -621,7 +621,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, * rather than the current pipe's, since they differ. */ cur_size = intel_crtc->config->base.adjusted_mode.crtc_hdisplay; - cur_size = cur_size * fb->base.bits_per_pixel / 8; + cur_size = cur_size * fb->base.format->cpp[0]; if (fb->base.pitches[0] < cur_size) { DRM_DEBUG_KMS("fb not wide enough for plane %c (%d vs %d)\n", pipe_name(intel_crtc->pipe), @@ -632,14 +632,14 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, cur_size = intel_crtc->config->base.adjusted_mode.crtc_vdisplay; cur_size = intel_fb_align_height(dev, cur_size, - fb->base.pixel_format, + fb->base.format->format, fb->base.modifier); cur_size *= fb->base.pitches[0]; DRM_DEBUG_KMS("pipe %c area: %dx%d, bpp: %d, size: %d\n", pipe_name(intel_crtc->pipe), intel_crtc->config->base.adjusted_mode.crtc_hdisplay, intel_crtc->config->base.adjusted_mode.crtc_vdisplay, - fb->base.bits_per_pixel, + fb->base.format->cpp[0] * 8, cur_size); if (cur_size > max_size) { @@ -660,7 +660,7 @@ static bool intel_fbdev_init_bios(struct drm_device *dev, goto out; } - ifbdev->preferred_bpp = fb->base.bits_per_pixel; + ifbdev->preferred_bpp = fb->base.format->cpp[0] * 8; ifbdev->fb = fb; drm_framebuffer_reference(&ifbdev->fb->base); diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c index e589e17876dc..10610b4077d1 100644 --- a/drivers/gpu/drm/i915/intel_overlay.c +++ b/drivers/gpu/drm/i915/intel_overlay.c @@ -659,6 +659,8 @@ static bool update_scaling_factors(struct intel_overlay *overlay, static void update_colorkey(struct intel_overlay *overlay, struct overlay_registers __iomem *regs) { + const struct drm_framebuffer *fb = + overlay->crtc->base.primary->fb; u32 key = overlay->color_key; u32 flags; @@ -666,24 +668,20 @@ static void update_colorkey(struct intel_overlay *overlay, if (overlay->color_key_enabled) flags |= DST_KEY_ENABLE; - switch (overlay->crtc->base.primary->fb->bits_per_pixel) { - case 8: + switch (fb->format->format) { + case DRM_FORMAT_C8: key = 0; flags |= CLK_RGB8I_MASK; break; - - case 16: - if (overlay->crtc->base.primary->fb->depth == 15) { - key = RGB15_TO_COLORKEY(key); - flags |= CLK_RGB15_MASK; - } else { - key = RGB16_TO_COLORKEY(key); - flags |= CLK_RGB16_MASK; - } + case DRM_FORMAT_XRGB1555: + key = RGB15_TO_COLORKEY(key); + flags |= CLK_RGB15_MASK; break; - - case 24: - case 32: + case DRM_FORMAT_RGB565: + key = RGB16_TO_COLORKEY(key); + flags |= CLK_RGB16_MASK; + break; + default: flags |= CLK_RGB24_MASK; break; } diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ae2c0bb4b2e8..9f3b78dfa997 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -652,7 +652,7 @@ static void pineview_update_wm(struct intel_crtc *unused_crtc) &crtc->config->base.adjusted_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = fb->format->cpp[0]; int clock = adjusted_mode->crtc_clock; /* Display SR */ @@ -727,7 +727,7 @@ static bool g4x_compute_wm0(struct drm_i915_private *dev_priv, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = crtc->config->pipe_src_w; - cpp = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = fb->format->cpp[0]; /* Use the small buffer method to calculate plane watermark */ entries = ((clock * cpp / 1000) * display_latency_ns) / 1000; @@ -816,7 +816,7 @@ static bool g4x_compute_srwm(struct drm_i915_private *dev_priv, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = crtc->config->pipe_src_w; - cpp = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = fb->format->cpp[0]; line_time_us = max(htotal * 1000 / clock, 1); line_count = (latency_ns / line_time_us + 1000) / 1000; @@ -963,7 +963,7 @@ static uint16_t vlv_compute_wm_level(struct intel_plane *plane, if (!state->base.visible) return 0; - cpp = drm_format_plane_cpp(state->base.fb->pixel_format, 0); + cpp = state->base.fb->format->cpp[0]; clock = crtc->config->base.adjusted_mode.crtc_clock; htotal = crtc->config->base.adjusted_mode.crtc_htotal; width = crtc->config->pipe_src_w; @@ -1004,7 +1004,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) if (state->base.visible) { wm_state->num_active_planes++; - total_rate += drm_format_plane_cpp(state->base.fb->pixel_format, 0); + total_rate += state->base.fb->format->cpp[0]; } } @@ -1023,7 +1023,7 @@ static void vlv_compute_fifo(struct intel_crtc *crtc) continue; } - rate = drm_format_plane_cpp(state->base.fb->pixel_format, 0); + rate = state->base.fb->format->cpp[0]; plane->wm.fifo_size = fifo_size * rate / total_rate; fifo_left -= plane->wm.fifo_size; } @@ -1455,7 +1455,7 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = crtc->config->pipe_src_w; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = fb->format->cpp[0]; unsigned long line_time_us; int entries; @@ -1541,7 +1541,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) if (IS_GEN2(dev_priv)) cpp = 4; else - cpp = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = fb->format->cpp[0]; planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock, wm_info, fifo_size, cpp, @@ -1568,7 +1568,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) if (IS_GEN2(dev_priv)) cpp = 4; else - cpp = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = fb->format->cpp[0]; planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock, wm_info, fifo_size, cpp, @@ -1621,7 +1621,7 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) if (IS_I915GM(dev_priv) || IS_I945GM(dev_priv)) cpp = 4; else - cpp = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = fb->format->cpp[0]; line_time_us = max(htotal * 1000 / clock, 1); @@ -1781,13 +1781,14 @@ static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, uint32_t mem_value, bool is_lp) { - int cpp = pstate->base.fb ? - drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; + int cpp; if (!cstate->base.active || !pstate->base.visible) return 0; + cpp = pstate->base.fb->format->cpp[0]; + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); if (!is_lp) @@ -1809,13 +1810,14 @@ static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t mem_value) { - int cpp = pstate->base.fb ? - drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; uint32_t method1, method2; + int cpp; if (!cstate->base.active || !pstate->base.visible) return 0; + cpp = pstate->base.fb->format->cpp[0]; + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), cpp, mem_value); method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), cstate->base.adjusted_mode.crtc_htotal, @@ -1853,12 +1855,13 @@ static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, const struct intel_plane_state *pstate, uint32_t pri_val) { - int cpp = pstate->base.fb ? - drm_format_plane_cpp(pstate->base.fb->pixel_format, 0) : 0; + int cpp; if (!cstate->base.active || !pstate->base.visible) return 0; + cpp = pstate->base.fb->format->cpp[0]; + return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->base.dst), cpp); } @@ -3213,13 +3216,17 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, int y) { struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); - struct drm_framebuffer *fb = pstate->fb; uint32_t down_scale_amount, data_rate; uint32_t width = 0, height = 0; - unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888; + struct drm_framebuffer *fb; + u32 format; if (!intel_pstate->base.visible) return 0; + + fb = pstate->fb; + format = fb->format->format; + if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR) return 0; if (y && format != DRM_FORMAT_NV12) @@ -3235,13 +3242,13 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, if (format == DRM_FORMAT_NV12) { if (y) /* y-plane data rate */ data_rate = width * height * - drm_format_plane_cpp(format, 0); + fb->format->cpp[0]; else /* uv-plane data rate */ data_rate = (width / 2) * (height / 2) * - drm_format_plane_cpp(format, 1); + fb->format->cpp[1]; } else { /* for packed formats */ - data_rate = width * height * drm_format_plane_cpp(format, 0); + data_rate = width * height * fb->format->cpp[0]; } down_scale_amount = skl_plane_downscale_amount(intel_pstate); @@ -3307,7 +3314,7 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, return 0; /* For packed formats, no y-plane, return 0 */ - if (y && fb->pixel_format != DRM_FORMAT_NV12) + if (y && fb->format->format != DRM_FORMAT_NV12) return 0; /* For Non Y-tile return 8-blocks */ @@ -3322,15 +3329,15 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate, swap(src_w, src_h); /* Halve UV plane width and height for NV12 */ - if (fb->pixel_format == DRM_FORMAT_NV12 && !y) { + if (fb->format->format == DRM_FORMAT_NV12 && !y) { src_w /= 2; src_h /= 2; } - if (fb->pixel_format == DRM_FORMAT_NV12 && !y) - plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1); + if (fb->format->format == DRM_FORMAT_NV12 && !y) + plane_bpp = fb->format->cpp[1]; else - plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0); + plane_bpp = fb->format->cpp[0]; if (drm_rotation_90_or_270(pstate->rotation)) { switch (plane_bpp) { @@ -3590,13 +3597,13 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv, if (drm_rotation_90_or_270(pstate->rotation)) swap(width, height); - cpp = drm_format_plane_cpp(fb->pixel_format, 0); + cpp = fb->format->cpp[0]; plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); if (drm_rotation_90_or_270(pstate->rotation)) { - int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? - drm_format_plane_cpp(fb->pixel_format, 1) : - drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = (fb->format->format == DRM_FORMAT_NV12) ? + fb->format->cpp[1] : + fb->format->cpp[0]; switch (cpp) { case 1: diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 8f131a08d440..ff766c0cb873 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -223,7 +223,7 @@ skl_update_plane(struct drm_plane *drm_plane, PLANE_CTL_PIPE_GAMMA_ENABLE | PLANE_CTL_PIPE_CSC_ENABLE; - plane_ctl |= skl_plane_ctl_format(fb->pixel_format); + plane_ctl |= skl_plane_ctl_format(fb->format->format); plane_ctl |= skl_plane_ctl_tiling(fb->modifier); plane_ctl |= skl_plane_ctl_rotation(rotation); @@ -357,7 +357,7 @@ vlv_update_plane(struct drm_plane *dplane, sprctl = SP_ENABLE; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_YUYV: sprctl |= SP_FORMAT_YUV422 | SP_YUV_ORDER_YUYV; break; @@ -443,7 +443,7 @@ vlv_update_plane(struct drm_plane *dplane, sprctl |= SP_SOURCE_KEY; if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) - chv_update_csc(intel_plane, fb->pixel_format); + chv_update_csc(intel_plane, fb->format->format); I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); @@ -502,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane, sprctl = SPRITE_ENABLE; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_XBGR8888: sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; break; @@ -640,7 +640,7 @@ ilk_update_plane(struct drm_plane *plane, dvscntr = DVS_ENABLE; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_XBGR8888: dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR; break; @@ -866,7 +866,7 @@ intel_check_sprite_plane(struct drm_plane *plane, src_y = src->y1 >> 16; src_h = drm_rect_height(src) >> 16; - if (format_is_yuv(fb->pixel_format)) { + if (format_is_yuv(fb->format->format)) { src_x &= ~1; src_w &= ~1; @@ -885,7 +885,7 @@ intel_check_sprite_plane(struct drm_plane *plane, /* Check size restrictions when scaling */ if (state->base.visible && (src_w != crtc_w || src_h != crtc_h)) { unsigned int width_bytes; - int cpp = drm_format_plane_cpp(fb->pixel_format, 0); + int cpp = fb->format->cpp[0]; WARN_ON(!can_scale); diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 516d06490465..88cd11d30134 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -454,10 +454,8 @@ static int imx_ldb_register(struct drm_device *drm, DRM_MODE_ENCODER_LVDS, NULL); if (imx_ldb_ch->bridge) { - imx_ldb_ch->bridge->encoder = encoder; - - imx_ldb_ch->encoder.bridge = imx_ldb_ch->bridge; - ret = drm_bridge_attach(drm, imx_ldb_ch->bridge); + ret = drm_bridge_attach(&imx_ldb_ch->encoder, + imx_ldb_ch->bridge, NULL); if (ret) { DRM_ERROR("Failed to initialize bridge with drm\n"); return ret; @@ -738,8 +736,6 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, for (i = 0; i < 2; i++) { struct imx_ldb_channel *channel = &imx_ldb->channel[i]; - if (channel->bridge) - drm_bridge_detach(channel->bridge); if (channel->panel) drm_panel_detach(channel->panel); diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index e74a0ad52950..8b5294d47cee 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -77,7 +77,7 @@ drm_plane_state_to_eba(struct drm_plane_state *state) BUG_ON(!cma_obj); return cma_obj->paddr + fb->offsets[0] + fb->pitches[0] * y + - drm_format_plane_cpp(fb->pixel_format, 0) * x; + fb->format->cpp[0] * x; } static inline unsigned long @@ -92,11 +92,11 @@ drm_plane_state_to_ubo(struct drm_plane_state *state) cma_obj = drm_fb_cma_get_gem_obj(fb, 1); BUG_ON(!cma_obj); - x /= drm_format_horz_chroma_subsampling(fb->pixel_format); - y /= drm_format_vert_chroma_subsampling(fb->pixel_format); + x /= drm_format_horz_chroma_subsampling(fb->format->format); + y /= drm_format_vert_chroma_subsampling(fb->format->format); return cma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y + - drm_format_plane_cpp(fb->pixel_format, 1) * x - eba; + fb->format->cpp[1] * x - eba; } static inline unsigned long @@ -111,11 +111,11 @@ drm_plane_state_to_vbo(struct drm_plane_state *state) cma_obj = drm_fb_cma_get_gem_obj(fb, 2); BUG_ON(!cma_obj); - x /= drm_format_horz_chroma_subsampling(fb->pixel_format); - y /= drm_format_vert_chroma_subsampling(fb->pixel_format); + x /= drm_format_horz_chroma_subsampling(fb->format->format); + y /= drm_format_vert_chroma_subsampling(fb->format->format); return cma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y + - drm_format_plane_cpp(fb->pixel_format, 2) * x - eba; + fb->format->cpp[2] * x - eba; } void ipu_plane_put_resources(struct ipu_plane *ipu_plane) @@ -281,7 +281,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, */ if (old_fb && (state->src_w != old_state->src_w || state->src_h != old_state->src_h || - fb->pixel_format != old_fb->pixel_format)) + fb->format != old_fb->format)) crtc_state->mode_changed = true; eba = drm_plane_state_to_eba(state); @@ -295,7 +295,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (old_fb && fb->pitches[0] != old_fb->pitches[0]) crtc_state->mode_changed = true; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: @@ -315,7 +315,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (vbo & 0x7 || vbo > 0xfffff8) return -EINVAL; - if (old_fb && (fb->pixel_format == old_fb->pixel_format)) { + if (old_fb && (fb->format == old_fb->format)) { old_vbo = drm_plane_state_to_vbo(old_state); if (vbo != old_vbo) crtc_state->mode_changed = true; @@ -332,7 +332,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, if (ubo & 0x7 || ubo > 0xfffff8) return -EINVAL; - if (old_fb && (fb->pixel_format == old_fb->pixel_format)) { + if (old_fb && (fb->format == old_fb->format)) { old_ubo = drm_plane_state_to_ubo(old_state); if (ubo != old_ubo) crtc_state->mode_changed = true; @@ -348,8 +348,8 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, * The x/y offsets must be even in case of horizontal/vertical * chroma subsampling. */ - hsub = drm_format_horz_chroma_subsampling(fb->pixel_format); - vsub = drm_format_vert_chroma_subsampling(fb->pixel_format); + hsub = drm_format_horz_chroma_subsampling(fb->format->format); + vsub = drm_format_vert_chroma_subsampling(fb->format->format); if (((state->src_x >> 16) & (hsub - 1)) || ((state->src_y >> 16) & (vsub - 1))) return -EINVAL; @@ -392,13 +392,13 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true); break; case IPU_DP_FLOW_SYNC_FG: - ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format); + ics = ipu_drm_fourcc_to_colorspace(state->fb->format->format); ipu_dp_setup_channel(ipu_plane->dp, ics, IPUV3_COLORSPACE_UNKNOWN); ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x, state->crtc_y); /* Enable local alpha on partial plane */ - switch (state->fb->pixel_format) { + switch (state->fb->format->format) { case DRM_FORMAT_ARGB1555: case DRM_FORMAT_ABGR1555: case DRM_FORMAT_RGBA5551: @@ -421,11 +421,11 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ipu_cpmem_zero(ipu_plane->ipu_ch); ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16, state->src_h >> 16); - ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format); + ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->format->format); ipu_cpmem_set_high_priority(ipu_plane->ipu_ch); ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1); ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]); - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_YUV420: case DRM_FORMAT_YVU420: case DRM_FORMAT_YUV422: @@ -434,9 +434,9 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, case DRM_FORMAT_YVU444: ubo = drm_plane_state_to_ubo(state); vbo = drm_plane_state_to_vbo(state); - if (fb->pixel_format == DRM_FORMAT_YVU420 || - fb->pixel_format == DRM_FORMAT_YVU422 || - fb->pixel_format == DRM_FORMAT_YVU444) + if (fb->format->format == DRM_FORMAT_YVU420 || + fb->format->format == DRM_FORMAT_YVU422 || + fb->format->format == DRM_FORMAT_YVU444) swap(ubo, vbo); ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 8582a83c0d9b..d5c06fd89f90 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -191,9 +191,7 @@ static int imx_pd_register(struct drm_device *drm, drm_panel_attach(imxpd->panel, &imxpd->connector); if (imxpd->bridge) { - imxpd->bridge->encoder = encoder; - encoder->bridge = imxpd->bridge; - ret = drm_bridge_attach(drm, imxpd->bridge); + ret = drm_bridge_attach(encoder, imxpd->bridge, NULL); if (ret < 0) { dev_err(imxpd->dev, "failed to attach bridge: %d\n", ret); @@ -286,8 +284,6 @@ static void imx_pd_unbind(struct device *dev, struct device *master, { struct imx_parallel_display *imxpd = dev_get_drvdata(dev); - if (imxpd->bridge) - drm_bridge_detach(imxpd->bridge); if (imxpd->panel) drm_panel_detach(imxpd->panel); diff --git a/drivers/gpu/drm/lib/drm_random.c b/drivers/gpu/drm/lib/drm_random.c new file mode 100644 index 000000000000..7b12a68c3b54 --- /dev/null +++ b/drivers/gpu/drm/lib/drm_random.c @@ -0,0 +1,41 @@ +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/types.h> + +#include "drm_random.h" + +static inline u32 drm_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) +{ + return upper_32_bits((u64)prandom_u32_state(state) * ep_ro); +} + +void drm_random_reorder(unsigned int *order, unsigned int count, + struct rnd_state *state) +{ + unsigned int i, j; + + for (i = 0; i < count; ++i) { + BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32)); + j = drm_prandom_u32_max_state(count, state); + swap(order[i], order[j]); + } +} +EXPORT_SYMBOL(drm_random_reorder); + +unsigned int *drm_random_order(unsigned int count, struct rnd_state *state) +{ + unsigned int *order, i; + + order = kmalloc_array(count, sizeof(*order), GFP_TEMPORARY); + if (!order) + return order; + + for (i = 0; i < count; i++) + order[i] = i; + + drm_random_reorder(order, count, state); + return order; +} +EXPORT_SYMBOL(drm_random_order); diff --git a/drivers/gpu/drm/lib/drm_random.h b/drivers/gpu/drm/lib/drm_random.h new file mode 100644 index 000000000000..a78644bea7f9 --- /dev/null +++ b/drivers/gpu/drm/lib/drm_random.h @@ -0,0 +1,25 @@ +#ifndef __DRM_RANDOM_H__ +#define __DRM_RANDOM_H__ + +/* This is a temporary home for a couple of utility functions that should + * be transposed to lib/ at the earliest convenience. + */ + +#include <linux/random.h> + +#define DRM_RND_STATE_INITIALIZER(seed__) ({ \ + struct rnd_state state__; \ + prandom_seed_state(&state__, (seed__)); \ + state__; \ +}) + +#define DRM_RND_STATE(name__, seed__) \ + struct rnd_state name__ = DRM_RND_STATE_INITIALIZER(seed__) + +unsigned int *drm_random_order(unsigned int count, + struct rnd_state *state); +void drm_random_reorder(unsigned int *order, + unsigned int count, + struct rnd_state *state); + +#endif /* !__DRM_RANDOM_H__ */ diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 90fb831ef031..3bd3bd688d1a 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -63,6 +63,7 @@ enum mtk_dpi_out_color_format { struct mtk_dpi { struct mtk_ddp_comp ddp_comp; struct drm_encoder encoder; + struct drm_bridge *bridge; void __iomem *regs; struct device *dev; struct clk *engine_clk; @@ -620,8 +621,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data) /* Currently DPI0 is fixed to be driven by OVL1 */ dpi->encoder.possible_crtcs = BIT(1); - dpi->encoder.bridge->encoder = &dpi->encoder; - ret = drm_bridge_attach(dpi->encoder.dev, dpi->encoder.bridge); + ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL); if (ret) { dev_err(dev, "Failed to attach bridge: %d\n", ret); goto err_cleanup; @@ -718,9 +718,9 @@ static int mtk_dpi_probe(struct platform_device *pdev) dev_info(dev, "Found bridge node: %s\n", bridge_node->full_name); - dpi->encoder.bridge = of_drm_find_bridge(bridge_node); + dpi->bridge = of_drm_find_bridge(bridge_node); of_node_put(bridge_node); - if (!dpi->encoder.bridge) + if (!dpi->bridge) return -EPROBE_DEFER; comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DPI); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 4b7fe7eaec01..b5f88e6d078e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -321,7 +321,8 @@ static void mtk_drm_unbind(struct device *dev) { struct mtk_drm_private *private = dev_get_drvdata(dev); - drm_put_dev(private->drm); + drm_dev_unregister(private->drm); + drm_dev_unref(private->drm); private->drm = NULL; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_fb.c b/drivers/gpu/drm/mediatek/mtk_drm_fb.c index 147df85399ab..d4246c9dceae 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_fb.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_fb.c @@ -82,7 +82,7 @@ static struct mtk_drm_fb *mtk_drm_framebuffer_init(struct drm_device *dev, if (!mtk_fb) return ERR_PTR(-ENOMEM); - drm_helper_mode_fill_fb_struct(&mtk_fb->base, mode); + drm_helper_mode_fill_fb_struct(dev, &mtk_fb->base, mode); mtk_fb->gem_obj = obj; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index c461a232cbf5..e405e89ed5e5 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -133,9 +133,9 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, mtk_gem = to_mtk_gem_obj(gem); addr = mtk_gem->dma_addr; pitch = fb->pitches[0]; - format = fb->pixel_format; + format = fb->format->format; - addr += (plane->state->src.x1 >> 16) * drm_format_plane_cpp(format, 0); + addr += (plane->state->src.x1 >> 16) * fb->format->cpp[0]; addr += (plane->state->src.y1 >> 16) * pitch; state->pending.enable = true; diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 2c42f90809d8..dd71cbb1a622 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -622,26 +622,6 @@ static const struct drm_connector_helper_funcs .get_modes = mtk_dsi_connector_get_modes, }; -static int mtk_drm_attach_bridge(struct drm_bridge *bridge, - struct drm_encoder *encoder) -{ - int ret; - - if (!bridge) - return -ENOENT; - - encoder->bridge = bridge; - bridge->encoder = encoder; - ret = drm_bridge_attach(encoder->dev, bridge); - if (ret) { - DRM_ERROR("Failed to attach bridge to drm\n"); - encoder->bridge = NULL; - bridge->encoder = NULL; - } - - return ret; -} - static int mtk_dsi_create_connector(struct drm_device *drm, struct mtk_dsi *dsi) { int ret; @@ -692,8 +672,10 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi) dsi->encoder.possible_crtcs = 1; /* If there's a bridge, attach to it and let it create the connector */ - ret = mtk_drm_attach_bridge(dsi->bridge, &dsi->encoder); + ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL); if (ret) { + DRM_ERROR("Failed to attach bridge to drm\n"); + /* Otherwise create our own connector and attach to a panel */ ret = mtk_dsi_create_connector(drm, dsi); if (ret) diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 0e8c4d9af340..c26251260b83 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -149,6 +149,7 @@ struct hdmi_audio_param { struct mtk_hdmi { struct drm_bridge bridge; + struct drm_bridge *next_bridge; struct drm_connector conn; struct device *dev; struct phy *phy; @@ -1314,9 +1315,9 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge) return ret; } - if (bridge->next) { - bridge->next->encoder = bridge->encoder; - ret = drm_bridge_attach(bridge->encoder->dev, bridge->next); + if (hdmi->next_bridge) { + ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge, + bridge); if (ret) { dev_err(hdmi->dev, "Failed to attach external bridge: %d\n", ret); @@ -1510,8 +1511,8 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, of_node_put(ep); if (!of_device_is_compatible(remote, "hdmi-connector")) { - hdmi->bridge.next = of_drm_find_bridge(remote); - if (!hdmi->bridge.next) { + hdmi->next_bridge = of_drm_find_bridge(remote); + if (!hdmi->next_bridge) { dev_err(dev, "Waiting for external bridge\n"); of_node_put(remote); return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c index 4942ca090b46..642b2fab42ff 100644 --- a/drivers/gpu/drm/meson/meson_plane.c +++ b/drivers/gpu/drm/meson/meson_plane.c @@ -113,7 +113,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane, if (meson_vpu_is_compatible(priv, "amlogic,meson-gxbb-vpu")) priv->viu.osd1_blk0_cfg[0] |= OSD_OUTPUT_COLOR_RGB; - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_XRGB8888: /* For XRGB, replace the pixel's alpha by 0xFF */ writel_bits_relaxed(OSD_REPLACE_EN, OSD_REPLACE_EN, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 3e02ac20777c..87e0934773de 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -15,6 +15,7 @@ #include <video/vga.h> +#include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c index 88dd2214114d..1a665e1671b8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_fb.c +++ b/drivers/gpu/drm/mgag200/mgag200_fb.c @@ -24,7 +24,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev, struct drm_gem_object *obj; struct mgag200_bo *bo; int src_offset, dst_offset; - int bpp = (mfbdev->mfb.base.bits_per_pixel + 7)/8; + int bpp = mfbdev->mfb.base.format->cpp[0]; int ret = -EBUSY; bool unmap = false; bool store_for_later = false; @@ -217,7 +217,7 @@ static int mgag200fb_create(struct drm_fb_helper *helper, info->apertures->ranges[0].base = mdev->dev->mode_config.fb_base; info->apertures->ranges[0].size = mdev->mc.vram_size; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &mfbdev->helper, sizes->fb_width, sizes->fb_height); diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index e79cbc25ae3c..95d628b9457e 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -34,7 +34,7 @@ int mgag200_framebuffer_init(struct drm_device *dev, { int ret; - drm_helper_mode_fill_fb_struct(&gfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &gfb->base, mode_cmd); gfb->obj = obj; ret = drm_framebuffer_init(dev, &gfb->base, &mga_fb_funcs); if (ret) { diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 3a03ac4045d8..067dfbc91b1c 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -38,11 +38,11 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) WREG8(DAC_INDEX + MGA1064_INDEX, 0); - if (fb && fb->bits_per_pixel == 16) { - int inc = (fb->depth == 15) ? 8 : 4; + if (fb && fb->format->cpp[0] * 8 == 16) { + int inc = (fb->format->depth == 15) ? 8 : 4; u8 r, b; for (i = 0; i < MGAG200_LUT_SIZE; i += inc) { - if (fb->depth == 16) { + if (fb->format->depth == 16) { if (i > (MGAG200_LUT_SIZE >> 1)) { r = b = 0; } else { @@ -880,6 +880,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct mga_device *mdev = dev->dev_private; + const struct drm_framebuffer *fb = crtc->primary->fb; int hdisplay, hsyncstart, hsyncend, htotal; int vdisplay, vsyncstart, vsyncend, vtotal; int pitch; @@ -902,7 +903,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, /* 0x48: */ 0, 0, 0, 0, 0, 0, 0, 0 }; - bppshift = mdev->bpp_shifts[(crtc->primary->fb->bits_per_pixel >> 3) - 1]; + bppshift = mdev->bpp_shifts[fb->format->cpp[0] - 1]; switch (mdev->type) { case G200_SE_A: @@ -941,12 +942,12 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, break; } - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_8bits; break; case 16: - if (crtc->primary->fb->depth == 15) + if (fb->format->depth == 15) dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_15bits; else dacvalue[MGA1064_MUL_CTL] = MGA1064_MUL_CTL_16bits; @@ -997,8 +998,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, WREG_SEQ(3, 0); WREG_SEQ(4, 0xe); - pitch = crtc->primary->fb->pitches[0] / (crtc->primary->fb->bits_per_pixel / 8); - if (crtc->primary->fb->bits_per_pixel == 24) + pitch = fb->pitches[0] / fb->format->cpp[0]; + if (fb->format->cpp[0] * 8 == 24) pitch = (pitch * 3) >> (4 - bppshift); else pitch = pitch >> (4 - bppshift); @@ -1075,7 +1076,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, ((vdisplay & 0xc00) >> 7) | ((vsyncstart & 0xc00) >> 5) | ((vdisplay & 0x400) >> 3); - if (crtc->primary->fb->bits_per_pixel == 24) + if (fb->format->cpp[0] * 8 == 24) ext_vga[3] = (((1 << bppshift) * 3) - 1) | 0x80; else ext_vga[3] = ((1 << bppshift) - 1) | 0x80; @@ -1138,9 +1139,9 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, u32 bpp; u32 mb; - if (crtc->primary->fb->bits_per_pixel > 16) + if (fb->format->cpp[0] * 8 > 16) bpp = 32; - else if (crtc->primary->fb->bits_per_pixel > 8) + else if (fb->format->cpp[0] * 8 > 8) bpp = 16; else bpp = 8; diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index c8d1f19c9a6d..2bd8dad76105 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -579,6 +579,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); struct drm_bridge *bridge = NULL; struct dsi_bridge *dsi_bridge; + struct drm_encoder *encoder; int ret; dsi_bridge = devm_kzalloc(msm_dsi->dev->dev, @@ -590,10 +591,18 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id) dsi_bridge->id = id; + /* + * HACK: we may not know the external DSI bridge device's mode + * flags here. We'll get to know them only when the device + * attaches to the dsi host. For now, assume the bridge supports + * DSI video mode + */ + encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; + bridge = &dsi_bridge->base; bridge->funcs = &dsi_mgr_bridge_funcs; - ret = drm_bridge_attach(msm_dsi->dev, bridge); + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) goto fail; @@ -628,11 +637,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id) encoder = msm_dsi->encoders[MSM_DSI_VIDEO_ENCODER_ID]; /* link the internal dsi bridge to the external bridge */ - int_bridge->next = ext_bridge; - /* set the external bridge's encoder as dsi's encoder */ - ext_bridge->encoder = encoder; - - drm_bridge_attach(dev, ext_bridge); + drm_bridge_attach(encoder, ext_bridge, int_bridge); /* * we need the drm_connector created by the external bridge diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c index 2bc73f82f3f5..931a5c97cccf 100644 --- a/drivers/gpu/drm/msm/edp/edp_bridge.c +++ b/drivers/gpu/drm/msm/edp/edp_bridge.c @@ -106,7 +106,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp) bridge = &edp_bridge->base; bridge->funcs = &edp_bridge_funcs; - ret = drm_bridge_attach(edp->dev, bridge); + ret = drm_bridge_attach(edp->encoder, bridge, NULL); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index bacbd5d8df0e..4e6d1bf27474 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -227,7 +227,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi) bridge = &hdmi_bridge->base; bridge->funcs = &msm_hdmi_bridge_funcs; - ret = drm_bridge_attach(hdmi->dev, bridge); + ret = drm_bridge_attach(hdmi->encoder, bridge, NULL); if (ret) goto fail; diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c index 911e4690d36a..53619d07677e 100644 --- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c @@ -43,7 +43,7 @@ enum mdp4_frame_format mdp4_get_frame_format(struct drm_framebuffer *fb) if (fb->modifier == DRM_FORMAT_MOD_SAMSUNG_64_32_TILE) is_tile = true; - if (fb->pixel_format == DRM_FORMAT_NV12 && is_tile) + if (fb->format->format == DRM_FORMAT_NV12 && is_tile) return FRAME_TILE_YCBCR_420; return FRAME_LINEAR; diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index c099da7bc212..75247ea4335b 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c @@ -699,7 +699,7 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, unsigned long flags; int ret; - nplanes = drm_format_num_planes(fb->pixel_format); + nplanes = fb->format->num_planes; /* bad formats should already be rejected: */ if (WARN_ON(nplanes > pipe2nclients(pipe))) diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 9acf544e7a8f..5cf165c9c3a9 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -41,7 +41,7 @@ static int msm_framebuffer_create_handle(struct drm_framebuffer *fb, static void msm_framebuffer_destroy(struct drm_framebuffer *fb) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + int i, n = fb->format->num_planes; DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); @@ -65,10 +65,10 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + int i, n = fb->format->num_planes; seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n", - fb->width, fb->height, (char *)&fb->pixel_format, + fb->width, fb->height, (char *)&fb->format->format, drm_framebuffer_read_refcount(fb), fb->base.id); for (i = 0; i < n; i++) { @@ -87,7 +87,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int ret, i, n = drm_format_num_planes(fb->pixel_format); + int ret, i, n = fb->format->num_planes; uint64_t iova; for (i = 0; i < n; i++) { @@ -103,7 +103,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id) void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id) { struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + int i, n = fb->format->num_planes; for (i = 0; i < n; i++) msm_gem_put_iova(msm_fb->planes[i], id); @@ -217,7 +217,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, msm_fb->planes[i] = bos[i]; } - drm_helper_mode_fill_fb_struct(fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs); if (ret) { diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index bffe93498512..5d68ab362d75 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -148,7 +148,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, strcpy(fbi->fix.id, "msm"); - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); dev->mode_config.fb_base = paddr; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 081890336ce7..e10a4eda4078 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -46,7 +46,7 @@ static int mxsfb_set_pixel_fmt(struct mxsfb_drm_private *mxsfb) { struct drm_crtc *crtc = &mxsfb->pipe.crtc; struct drm_device *drm = crtc->dev; - const u32 format = crtc->primary->state->fb->pixel_format; + const u32 format = crtc->primary->state->fb->format->format; u32 ctrl, ctrl1; ctrl = CTRL_BYPASS_COUNT | CTRL_MASTER; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 79a18bf48b54..955441f71500 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -395,8 +395,8 @@ static int mxsfb_probe(struct platform_device *pdev) pdev->id_entry = of_id->data; drm = drm_dev_alloc(&mxsfb_driver, &pdev->dev); - if (!drm) - return -ENOMEM; + if (IS_ERR(drm)) + return PTR_ERR(drm); ret = mxsfb_load(drm, 0); if (ret) diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 59d1d1c5de5f..a72754d73c84 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -460,6 +460,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index]; struct nv04_crtc_reg *savep = &nv04_display(dev)->saved_reg.crtc_reg[nv_crtc->index]; + const struct drm_framebuffer *fb = crtc->primary->fb; struct drm_encoder *encoder; bool lvds_output = false, tmds_output = false, tv_output = false, off_chip_digital = false; @@ -569,7 +570,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) regp->CRTC[NV_CIO_CRE_86] = 0x1; } - regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (crtc->primary->fb->depth + 1) / 8; + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] = (fb->format->depth + 1) / 8; /* Enable slaved mode (called MODE_TV in nv4ref.h) */ if (lvds_output || tmds_output || tv_output) regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (1 << 7); @@ -583,7 +584,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) regp->ramdac_gen_ctrl = NV_PRAMDAC_GENERAL_CONTROL_BPC_8BITS | NV_PRAMDAC_GENERAL_CONTROL_VGA_STATE_SEL | NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON; - if (crtc->primary->fb->depth == 16) + if (fb->format->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; if (drm->device.info.chipset >= 0x11) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG; @@ -847,16 +848,16 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, nv_crtc->fb.offset = fb->nvbo->bo.offset; - if (nv_crtc->lut.depth != drm_fb->depth) { - nv_crtc->lut.depth = drm_fb->depth; + if (nv_crtc->lut.depth != drm_fb->format->depth) { + nv_crtc->lut.depth = drm_fb->format->depth; nv_crtc_gamma_load(crtc); } /* Update the framebuffer format. */ regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] &= ~3; - regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (crtc->primary->fb->depth + 1) / 8; + regp->CRTC[NV_CIO_CRE_PIXEL_INDEX] |= (drm_fb->format->depth + 1) / 8; regp->ramdac_gen_ctrl &= ~NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; - if (crtc->primary->fb->depth == 16) + if (drm_fb->format->depth == 16) regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL; crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_PIXEL_INDEX); NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL, @@ -873,11 +874,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, /* Update the framebuffer location. */ regp->fb_start = nv_crtc->fb.offset & ~3; - regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8); + regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->format->cpp[0]); nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start); /* Update the arbitration parameters. */ - nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->bits_per_pixel, + nouveau_calc_arb(dev, crtc->mode.clock, drm_fb->format->cpp[0] * 8, &arb_burst, &arb_lwm); regp->CRTC[NV_CIO_CRE_FF_INDEX] = arb_burst; diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index c2947ef7d4fc..2e5bb2afda7c 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -290,6 +290,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_display_mode *output_mode = &nv_encoder->mode; struct drm_connector *connector = &nv_connector->base; + const struct drm_framebuffer *fb = encoder->crtc->primary->fb; uint32_t mode_ratio, panel_ratio; NV_DEBUG(drm, "Output mode on CRTC %d:\n", nv_crtc->index); @@ -415,7 +416,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, /* Output property. */ if ((nv_connector->dithering_mode == DITHERING_MODE_ON) || (nv_connector->dithering_mode == DITHERING_MODE_AUTO && - encoder->crtc->primary->fb->depth > connector->display_info.bpc * 3)) { + fb->format->depth > connector->display_info.bpc * 3)) { if (drm->device.info.chipset == 0x11) regp->dither = savep->dither | 0x00010000; else { diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c index a79514d440b3..6275c270df25 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c +++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c @@ -145,16 +145,16 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, nvif_wr32(dev, NV_PVIDEO_POINT_OUT(flip), crtc_y << 16 | crtc_x); nvif_wr32(dev, NV_PVIDEO_SIZE_OUT(flip), crtc_h << 16 | crtc_w); - if (fb->pixel_format != DRM_FORMAT_UYVY) + if (fb->format->format != DRM_FORMAT_UYVY) format |= NV_PVIDEO_FORMAT_COLOR_LE_CR8YB8CB8YA8; - if (fb->pixel_format == DRM_FORMAT_NV12) + if (fb->format->format == DRM_FORMAT_NV12) format |= NV_PVIDEO_FORMAT_PLANAR; if (nv_plane->iturbt_709) format |= NV_PVIDEO_FORMAT_MATRIX_ITURBT709; if (nv_plane->colorkey & (1 << 24)) format |= NV_PVIDEO_FORMAT_DISPLAY_COLOR_KEY; - if (fb->pixel_format == DRM_FORMAT_NV12) { + if (fb->format->format == DRM_FORMAT_NV12) { nvif_wr32(dev, NV_PVIDEO_UVPLANE_BASE(flip), 0); nvif_wr32(dev, NV_PVIDEO_UVPLANE_OFFSET_BUFF(flip), nv_fb->nvbo->bo.offset + fb->offsets[1]); @@ -411,7 +411,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, if (nv_plane->colorkey & (1 << 24)) overlay |= 0x10; - if (fb->pixel_format == DRM_FORMAT_YUYV) + if (fb->format->format == DRM_FORMAT_YUYV) overlay |= 0x100; nvif_wr32(dev, NV_PVIDEO_OVERLAY, overlay); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 947c200655b4..966d20ab4de4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -33,6 +33,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_atomic.h> #include "nouveau_reg.h" #include "nouveau_drv.h" @@ -769,7 +770,7 @@ nouveau_connector_set_property(struct drm_connector *connector, struct drm_encoder *encoder = to_drm_encoder(nv_encoder); int ret; - if (connector->dev->mode_config.funcs->atomic_commit) + if (drm_drv_uses_atomic_modeset(connector->dev)) return drm_atomic_helper_connector_set_property(connector, property, value); ret = connector->funcs->atomic_set_property(&nv_connector->base, @@ -1074,7 +1075,7 @@ nouveau_connector_helper_funcs = { static int nouveau_connector_dpms(struct drm_connector *connector, int mode) { - if (connector->dev->mode_config.funcs->atomic_commit) + if (drm_drv_uses_atomic_modeset(connector->dev)) return drm_atomic_helper_connector_dpms(connector, mode); return drm_helper_connector_dpms(connector, mode); } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h index 096983c42a1f..a4d1a059bd3d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.h +++ b/drivers/gpu/drm/nouveau/nouveau_connector.h @@ -30,6 +30,7 @@ #include <nvif/notify.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include <drm/drm_dp_helper.h> #include "nouveau_crtc.h" diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index cef08da1da4e..add353e230f4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -162,7 +162,7 @@ nouveau_display_vblstamp(struct drm_device *dev, unsigned int pipe, list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { if (nouveau_crtc(crtc)->index == pipe) { struct drm_display_mode *mode; - if (dev->mode_config.funcs->atomic_commit) + if (drm_drv_uses_atomic_modeset(dev)) mode = &crtc->state->adjusted_mode; else mode = &crtc->hwmode; @@ -259,7 +259,7 @@ nouveau_framebuffer_new(struct drm_device *dev, if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL))) return -ENOMEM; - drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &fb->base, mode_cmd); fb->nvbo = nvbo; ret = drm_framebuffer_init(dev, &fb->base, &nouveau_framebuffer_funcs); @@ -738,7 +738,7 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime) struct nouveau_display *disp = nouveau_display(dev); struct drm_crtc *crtc; - if (dev->mode_config.funcs->atomic_commit) { + if (drm_drv_uses_atomic_modeset(dev)) { if (!runtime) { disp->suspend = nouveau_atomic_suspend(dev); if (IS_ERR(disp->suspend)) { @@ -784,7 +784,7 @@ nouveau_display_resume(struct drm_device *dev, bool runtime) struct drm_crtc *crtc; int ret; - if (dev->mode_config.funcs->atomic_commit) { + if (drm_drv_uses_atomic_modeset(dev)) { nouveau_display_init(dev); if (disp->suspend) { drm_atomic_helper_resume(dev, disp->suspend); @@ -947,7 +947,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Initialize a page flip struct */ *s = (struct nouveau_page_flip_state) - { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0], + { { }, event, crtc, fb->format->cpp[0] * 8, fb->pitches[0], new_bo->bo.offset }; /* Keep vblanks on during flip, for the target crtc of this flip */ diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 2f2a3dcd4ad7..9de6abb65781 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -41,6 +41,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_atomic.h> #include "nouveau_drv.h" #include "nouveau_gem.h" @@ -400,7 +401,8 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, info->screen_base = nvbo_kmap_obj_iovirtual(fb->nvbo); info->screen_size = fb->nvbo->bo.mem.num_pages << PAGE_SHIFT; - drm_fb_helper_fill_fix(info, fb->base.pitches[0], fb->base.depth); + drm_fb_helper_fill_fix(info, fb->base.pitches[0], + fb->base.format->depth); drm_fb_helper_fill_var(info, &fbcon->helper, sizes->fb_width, sizes->fb_height); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -523,7 +525,7 @@ nouveau_fbcon_init(struct drm_device *dev) preferred_bpp = 32; /* disable all the possible outputs/crtcs before entering KMS mode */ - if (!dev->mode_config.funcs->atomic_commit) + if (!drm_drv_uses_atomic_modeset(dev)) drm_helper_disable_unused_functions(dev); ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index a6dbe8258040..ec4668a41e01 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -107,10 +107,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func nouveau_vram_manager = { - nouveau_vram_manager_init, - nouveau_vram_manager_fini, - nouveau_vram_manager_new, - nouveau_vram_manager_del, + .init = nouveau_vram_manager_init, + .takedown = nouveau_vram_manager_fini, + .get_node = nouveau_vram_manager_new, + .put_node = nouveau_vram_manager_del, }; static int @@ -184,11 +184,11 @@ nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) } const struct ttm_mem_type_manager_func nouveau_gart_manager = { - nouveau_gart_manager_init, - nouveau_gart_manager_fini, - nouveau_gart_manager_new, - nouveau_gart_manager_del, - nouveau_gart_manager_debug + .init = nouveau_gart_manager_init, + .takedown = nouveau_gart_manager_fini, + .get_node = nouveau_gart_manager_new, + .put_node = nouveau_gart_manager_del, + .debug = nouveau_gart_manager_debug }; /*XXX*/ @@ -257,11 +257,11 @@ nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) } const struct ttm_mem_type_manager_func nv04_gart_manager = { - nv04_gart_manager_init, - nv04_gart_manager_fini, - nv04_gart_manager_new, - nv04_gart_manager_del, - nv04_gart_manager_debug + .init = nv04_gart_manager_init, + .takedown = nv04_gart_manager_fini, + .get_node = nv04_gart_manager_new, + .put_node = nv04_gart_manager_del, + .debug = nv04_gart_manager_debug }; int diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2c2c64507661..cb85cb72dc1c 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -1153,7 +1153,7 @@ nv50_curs_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, if (asyw->state.fb->width != asyw->state.fb->height) return -EINVAL; - switch (asyw->state.fb->pixel_format) { + switch (asyw->state.fb->format->format) { case DRM_FORMAT_ARGB8888: asyh->curs.format = 1; break; default: WARN_ON(1); @@ -1418,12 +1418,10 @@ static int nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, struct nv50_head_atom *asyh) { - const u32 format = asyw->state.fb->pixel_format; - const struct drm_format_info *info; + const struct drm_framebuffer *fb = asyw->state.fb; int ret; - info = drm_format_info(format); - if (!info || !info->depth) + if (!fb->format->depth) return -EINVAL; ret = drm_plane_helper_check_state(&asyw->state, &asyw->clip, @@ -1433,14 +1431,14 @@ nv50_base_acquire(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw, if (ret) return ret; - asyh->base.depth = info->depth; - asyh->base.cpp = info->cpp[0]; + asyh->base.depth = fb->format->depth; + asyh->base.cpp = fb->format->cpp[0]; asyh->base.x = asyw->state.src.x1 >> 16; asyh->base.y = asyw->state.src.y1 >> 16; asyh->base.w = asyw->state.fb->width; asyh->base.h = asyw->state.fb->height; - switch (format) { + switch (fb->format->format) { case DRM_FORMAT_C8 : asyw->image.format = 0x1e; break; case DRM_FORMAT_RGB565 : asyw->image.format = 0xe8; break; case DRM_FORMAT_XRGB1555 : diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 5f3337f1e9aa..bd6b94c38613 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -107,7 +107,7 @@ static int omap_framebuffer_create_handle(struct drm_framebuffer *fb, static void omap_framebuffer_destroy(struct drm_framebuffer *fb) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + int i, n = fb->format->num_planes; DBG("destroy: FB ID: %d (%p)", fb->base.id, fb); @@ -252,7 +252,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int omap_framebuffer_pin(struct drm_framebuffer *fb) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - int ret, i, n = drm_format_num_planes(fb->pixel_format); + int ret, i, n = fb->format->num_planes; mutex_lock(&omap_fb->lock); @@ -292,7 +292,7 @@ fail: void omap_framebuffer_unpin(struct drm_framebuffer *fb) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + int i, n = fb->format->num_planes; mutex_lock(&omap_fb->lock); @@ -343,10 +343,10 @@ struct drm_connector *omap_framebuffer_get_next_connector( void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); - int i, n = drm_format_num_planes(fb->pixel_format); + int i, n = fb->format->num_planes; seq_printf(m, "fb: %dx%d@%4.4s\n", fb->width, fb->height, - (char *)&fb->pixel_format); + (char *)&fb->format->format); for (i = 0; i < n; i++) { struct plane *plane = &omap_fb->planes[i]; @@ -457,7 +457,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, plane->paddr = 0; } - drm_helper_mode_fill_fb_struct(fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); if (ret) { diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 8d8ac173f55d..aed99a0fc44b 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -190,7 +190,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper, strcpy(fbi->fix.id, MODULE_NAME); - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); dev->mode_config.fb_base = paddr; diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 4b5eab8a47b3..659c77742649 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -624,12 +624,12 @@ qxl_framebuffer_init(struct drm_device *dev, int ret; qfb->obj = obj; + drm_helper_mode_fill_fb_struct(dev, &qfb->base, mode_cmd); ret = drm_framebuffer_init(dev, &qfb->base, funcs); if (ret) { qfb->obj = NULL; return ret; } - drm_helper_mode_fill_fb_struct(&qfb->base, mode_cmd); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 9b728edf1b49..4d8681e84e68 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -283,7 +283,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, struct qxl_rect *rects; int stride = qxl_fb->base.pitches[0]; /* depth is not actually interesting, we don't mask with it */ - int depth = qxl_fb->base.bits_per_pixel; + int depth = qxl_fb->base.format->cpp[0] * 8; uint8_t *surface_base; struct qxl_release *release; struct qxl_bo *clips_bo; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 785aad42e9bb..f15ddd0eb85b 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -43,6 +43,7 @@ #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> +#include <drm/drm_encoder.h> #include <drm/drm_gem.h> /* just for ttm_validate_buffer */ diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index fd7e5e94be5b..e6ade6aab54c 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -279,7 +279,7 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, qfbdev->shadow = shadow; strcpy(info->fix.id, "qxldrmfb"); - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->fbops = &qxlfb_ops; @@ -316,7 +316,8 @@ static int qxlfb_create(struct qxl_fbdev *qfbdev, qdev->fbdev_info = info; qdev->fbdev_qfb = &qfbdev->qfb; DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size); - DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height); + DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", + fb->format->depth, fb->pitches[0], fb->width, fb->height); return 0; out_destroy_fbi: diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 05f4ebe31ce2..3c492a0aa6bd 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1195,7 +1195,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (target_fb->pixel_format) { + switch (target_fb->format->format) { case DRM_FORMAT_C8: fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) | EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED)); @@ -1261,7 +1261,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, break; default: DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format, &format_name)); + drm_get_format_name(target_fb->format->format, &format_name)); return -EINVAL; } @@ -1277,7 +1277,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, /* Calculate the macrotile mode index. */ tile_split_bytes = 64 << tile_split; - tileb = 8 * 8 * target_fb->bits_per_pixel / 8; + tileb = 8 * 8 * target_fb->format->cpp[0]; tileb = min(tile_split_bytes, tileb); for (index = 0; tileb > 64; index++) @@ -1285,13 +1285,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, if (index >= 16) { DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n", - target_fb->bits_per_pixel, tile_split); + target_fb->format->cpp[0] * 8, + tile_split); return -EINVAL; } num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3; } else { - switch (target_fb->bits_per_pixel) { + switch (target_fb->format->cpp[0] * 8) { case 8: index = 10; break; @@ -1414,7 +1415,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc, WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1); @@ -1510,7 +1511,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL); radeon_bo_unreserve(rbo); - switch (target_fb->pixel_format) { + switch (target_fb->format->format) { case DRM_FORMAT_C8: fb_format = AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | @@ -1563,7 +1564,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, break; default: DRM_ERROR("Unsupported screen format %s\n", - drm_get_format_name(target_fb->pixel_format, &format_name)); + drm_get_format_name(target_fb->format->format, &format_name)); return -EINVAL; } @@ -1621,7 +1622,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc, WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width); WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height); - fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); + fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels); WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index f5e84f4b58e6..e3399310d41d 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3225,13 +3225,19 @@ void r100_bandwidth_update(struct radeon_device *rdev) radeon_update_display_priority(rdev); if (rdev->mode_info.crtcs[0]->base.enabled) { + const struct drm_framebuffer *fb = + rdev->mode_info.crtcs[0]->base.primary->fb; + mode1 = &rdev->mode_info.crtcs[0]->base.mode; - pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8; + pixel_bytes1 = fb->format->cpp[0]; } if (!(rdev->flags & RADEON_SINGLE_CRTC)) { if (rdev->mode_info.crtcs[1]->base.enabled) { + const struct drm_framebuffer *fb = + rdev->mode_info.crtcs[1]->base.primary->fb; + mode2 = &rdev->mode_info.crtcs[1]->base.mode; - pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8; + pixel_bytes2 = fb->format->cpp[0]; } } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index e7409e8a9f87..aea8b62835a4 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -549,19 +549,19 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc, if (!ASIC_IS_AVIVO(rdev)) { /* crtc offset is from display base addr not FB location */ base -= radeon_crtc->legacy_display_base_addr; - pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8); + pitch_pixels = fb->pitches[0] / fb->format->cpp[0]; if (tiling_flags & RADEON_TILING_MACRO) { if (ASIC_IS_R300(rdev)) { base &= ~0x7ff; } else { - int byteshift = fb->bits_per_pixel >> 4; + int byteshift = fb->format->cpp[0] * 8 >> 4; int tile_addr = (((crtc->y >> 3) * pitch_pixels + crtc->x) >> (8 - byteshift)) << 11; base += tile_addr + ((crtc->x << byteshift) % 256) + ((crtc->y % 8) << 8); } } else { int offset = crtc->y * pitch_pixels + crtc->x; - switch (fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: default: offset *= 1; @@ -1327,7 +1327,7 @@ radeon_framebuffer_init(struct drm_device *dev, { int ret; rfb->obj = obj; - drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs); if (ret) { rfb->obj = NULL; diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 899b6a1644bd..6c10a83f3362 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -263,7 +263,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, strcpy(info->fix.id, "radeondrmfb"); - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &radeonfb_ops; @@ -290,7 +290,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); - DRM_INFO("fb depth is %d\n", fb->depth); + DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index c084cadcbf21..1b7528df7f7f 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -85,10 +85,8 @@ static void radeon_hotplug_work_func(struct work_struct *work) return; mutex_lock(&mode_config->mutex); - if (mode_config->num_connector) { - list_for_each_entry(connector, &mode_config->connector_list, head) - radeon_connector_hotplug(connector); - } + list_for_each_entry(connector, &mode_config->connector_list, head) + radeon_connector_hotplug(connector); mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); @@ -103,10 +101,8 @@ static void radeon_dp_work_func(struct work_struct *work) struct drm_connector *connector; /* this should take a mutex */ - if (mode_config->num_connector) { - list_for_each_entry(connector, &mode_config->connector_list, head) - radeon_connector_hotplug(connector); - } + list_for_each_entry(connector, &mode_config->connector_list, head) + radeon_connector_hotplug(connector); } /** * radeon_driver_irq_preinstall_kms - drm irq preinstall callback diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index d0de4022fff9..ce6cb6666212 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -402,7 +402,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, target_fb = crtc->primary->fb; } - switch (target_fb->bits_per_pixel) { + switch (target_fb->format->cpp[0] * 8) { case 8: format = 2; break; @@ -476,10 +476,9 @@ retry: crtc_offset_cntl = 0; - pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8); - crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) + - ((target_fb->bits_per_pixel * 8) - 1)) / - (target_fb->bits_per_pixel * 8)); + pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; + crtc_pitch = DIV_ROUND_UP(pitch_pixels * target_fb->format->cpp[0] * 8, + target_fb->format->cpp[0] * 8 * 8); crtc_pitch |= crtc_pitch << 16; crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; @@ -504,14 +503,14 @@ retry: crtc_tile_x0_y0 = x | (y << 16); base &= ~0x7ff; } else { - int byteshift = target_fb->bits_per_pixel >> 4; + int byteshift = target_fb->format->cpp[0] * 8 >> 4; int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); crtc_offset_cntl |= (y % 16); } } else { int offset = y * pitch_pixels + x; - switch (target_fb->bits_per_pixel) { + switch (target_fb->format->cpp[0] * 8) { case 8: offset *= 1; break; @@ -579,6 +578,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + const struct drm_framebuffer *fb = crtc->primary->fb; struct drm_encoder *encoder; int format; int hsync_start; @@ -602,7 +602,7 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod } } - switch (crtc->primary->fb->bits_per_pixel) { + switch (fb->format->cpp[0] * 8) { case 8: format = 2; break; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index f1da484864a9..ad282648fc8b 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -32,6 +32,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include <drm/drm_dp_helper.h> #include <drm/drm_dp_mst_helper.h> #include <drm/drm_fixed.h> diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index 7fc10a9c34c3..a050a3699857 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -15,6 +15,7 @@ #define __RCAR_DU_ENCODER_H__ #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> struct rcar_du_device; struct rcar_du_hdmienc; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c index f9515f53cc5b..c4c5d1abcff8 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c @@ -124,10 +124,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu, hdmienc->renc = renc; /* Link the bridge to the encoder. */ - bridge->encoder = encoder; - encoder->bridge = bridge; - - ret = drm_bridge_attach(rcdu->ddev, bridge); + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) { drm_encoder_cleanup(encoder); return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index a74f8ed8ca2e..dcde6288da6c 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -567,10 +567,10 @@ static int rcar_du_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - rstate->format = rcar_du_format_info(state->fb->pixel_format); + rstate->format = rcar_du_format_info(state->fb->format->format); if (rstate->format == NULL) { dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__, - state->fb->pixel_format); + state->fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index 83ebd162f3ef..b5bfbe50bd87 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -201,10 +201,10 @@ static int rcar_du_vsp_plane_atomic_check(struct drm_plane *plane, return -EINVAL; } - rstate->format = rcar_du_format_info(state->fb->pixel_format); + rstate->format = rcar_du_format_info(state->fb->format->format); if (rstate->format == NULL) { dev_dbg(rcdu->dev, "%s: unsupported format %08x\n", __func__, - state->fb->pixel_format); + state->fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 0f6eda023bd0..d5e1f8627d38 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -92,7 +92,7 @@ rockchip_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cm if (!rockchip_fb) return ERR_PTR(-ENOMEM); - drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &rockchip_fb->fb, mode_cmd); for (i = 0; i < num_planes; i++) rockchip_fb->obj[i] = obj[i]; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c index 8f639c8597a5..52d1fdf9f9da 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c @@ -94,7 +94,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, fbi->fbops = &rockchip_drm_fbdev_ops; fb = helper->fb; - drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height); offset = fbi->var.xoffset * bytes_per_pixel; @@ -106,7 +106,8 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper, fbi->fix.smem_len = rk_obj->base.size; DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n", - fb->width, fb->height, fb->depth, rk_obj->kvaddr, + fb->width, fb->height, fb->format->depth, + rk_obj->kvaddr, offset, size); fbi->skip_vt_switch = true; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index c7eba305c488..fb5f001f51c3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -668,7 +668,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, if (!state->visible) return 0; - ret = vop_convert_format(fb->pixel_format); + ret = vop_convert_format(fb->format->format); if (ret < 0) return ret; @@ -676,7 +676,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * Src.x1 can be odd when do clip, but yuv plane start point * need align with 2 pixel. */ - if (is_yuv_support(fb->pixel_format) && ((state->src.x1 >> 16) % 2)) + if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) return -EINVAL; return 0; @@ -749,21 +749,21 @@ static void vop_plane_atomic_update(struct drm_plane *plane, dsp_sty = dest->y1 + crtc->mode.vtotal - crtc->mode.vsync_start; dsp_st = dsp_sty << 16 | (dsp_stx & 0xffff); - offset = (src->x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0); + offset = (src->x1 >> 16) * fb->format->cpp[0]; offset += (src->y1 >> 16) * fb->pitches[0]; dma_addr = rk_obj->dma_addr + offset + fb->offsets[0]; - format = vop_convert_format(fb->pixel_format); + format = vop_convert_format(fb->format->format); spin_lock(&vop->reg_lock); VOP_WIN_SET(vop, win, format, format); VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); - if (is_yuv_support(fb->pixel_format)) { - int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format); - int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format); - int bpp = drm_format_plane_cpp(fb->pixel_format, 1); + if (is_yuv_support(fb->format->format)) { + int hsub = drm_format_horz_chroma_subsampling(fb->format->format); + int vsub = drm_format_vert_chroma_subsampling(fb->format->format); + int bpp = fb->format->cpp[1]; uv_obj = rockchip_fb_get_gem_obj(fb, 1); rk_uv_obj = to_rockchip_obj(uv_obj); @@ -779,16 +779,16 @@ static void vop_plane_atomic_update(struct drm_plane *plane, if (win->phy->scl) scl_vop_cal_scl_fac(vop, win, actual_w, actual_h, drm_rect_width(dest), drm_rect_height(dest), - fb->pixel_format); + fb->format->format); VOP_WIN_SET(vop, win, act_info, act_info); VOP_WIN_SET(vop, win, dsp_info, dsp_info); VOP_WIN_SET(vop, win, dsp_st, dsp_st); - rb_swap = has_rb_swapped(fb->pixel_format); + rb_swap = has_rb_swapped(fb->format->format); VOP_WIN_SET(vop, win, rb_swap, rb_swap); - if (is_alpha_support(fb->pixel_format)) { + if (is_alpha_support(fb->format->format)) { VOP_WIN_SET(vop, win, dst_alpha_ctl, DST_FACTOR_M0(ALPHA_SRC_INVERSE)); val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) | diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile new file mode 100644 index 000000000000..4aebfc7f27d4 --- /dev/null +++ b/drivers/gpu/drm/selftests/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_DRM_DEBUG_MM_SELFTEST) += test-drm_mm.o diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h new file mode 100644 index 000000000000..6a4575fdc1c0 --- /dev/null +++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h @@ -0,0 +1,23 @@ +/* List each unit test as selftest(name, function) + * + * The name is used as both an enum and expanded as igt__name to create + * a module parameter. It must be unique and legal for a C identifier. + * + * Tests are executed in order by igt/drm_mm + */ +selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */ +selftest(init, igt_init) +selftest(debug, igt_debug) +selftest(reserve, igt_reserve) +selftest(insert, igt_insert) +selftest(replace, igt_replace) +selftest(insert_range, igt_insert_range) +selftest(align, igt_align) +selftest(align32, igt_align32) +selftest(align64, igt_align64) +selftest(evict, igt_evict) +selftest(evict_range, igt_evict_range) +selftest(topdown, igt_topdown) +selftest(color, igt_color) +selftest(color_evict, igt_color_evict) +selftest(color_evict_range, igt_color_evict_range) diff --git a/drivers/gpu/drm/selftests/drm_selftest.c b/drivers/gpu/drm/selftests/drm_selftest.c new file mode 100644 index 000000000000..e29ed9faef5b --- /dev/null +++ b/drivers/gpu/drm/selftests/drm_selftest.c @@ -0,0 +1,109 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/compiler.h> + +#define selftest(name, func) __idx_##name, +enum { +#include TESTS +}; +#undef selftest + +#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f }, +static struct drm_selftest { + bool enabled; + const char *name; + int (*func)(void *); +} selftests[] = { +#include TESTS +}; +#undef selftest + +/* Embed the line number into the parameter name so that we can order tests */ +#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n)) +#define selftest_0(n, func, id) \ +module_param_named(id, selftests[__idx_##n].enabled, bool, 0400); +#define selftest(n, func) selftest_0(n, func, param(n)) +#include TESTS +#undef selftest + +static void set_default_test_all(struct drm_selftest *st, unsigned long count) +{ + unsigned long i; + + for (i = 0; i < count; i++) + if (st[i].enabled) + return; + + for (i = 0; i < count; i++) + st[i].enabled = true; +} + +static int run_selftests(struct drm_selftest *st, + unsigned long count, + void *data) +{ + int err = 0; + + set_default_test_all(st, count); + + /* Tests are listed in natural order in drm_*_selftests.h */ + for (; count--; st++) { + if (!st->enabled) + continue; + + pr_debug("drm: Running %s\n", st->name); + err = st->func(data); + if (err) + break; + } + + if (WARN(err > 0 || err == -ENOTTY, + "%s returned %d, conflicting with selftest's magic values!\n", + st->name, err)) + err = -1; + + rcu_barrier(); + return err; +} + +static int __maybe_unused +__drm_subtests(const char *caller, + const struct drm_subtest *st, + int count, + void *data) +{ + int err; + + for (; count--; st++) { + pr_debug("Running %s/%s\n", caller, st->name); + err = st->func(data); + if (err) { + pr_err("%s: %s failed with error %d\n", + caller, st->name, err); + return err; + } + } + + return 0; +} diff --git a/drivers/gpu/drm/selftests/drm_selftest.h b/drivers/gpu/drm/selftests/drm_selftest.h new file mode 100644 index 000000000000..c784ec02ff53 --- /dev/null +++ b/drivers/gpu/drm/selftests/drm_selftest.h @@ -0,0 +1,41 @@ +/* + * Copyright © 2016 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#ifndef __DRM_SELFTEST_H__ +#define __DRM_SELFTEST_H__ + +struct drm_subtest { + int (*func)(void *data); + const char *name; +}; + +static int __drm_subtests(const char *caller, + const struct drm_subtest *st, + int count, + void *data); +#define drm_subtests(T, data) \ + __drm_subtests(__func__, T, ARRAY_SIZE(T), data) + +#define SUBTEST(x) { x, #x } + +#endif /* __DRM_SELFTEST_H__ */ diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c new file mode 100644 index 000000000000..2ce92f4dcfc7 --- /dev/null +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -0,0 +1,2172 @@ +/* + * Test cases for the drm_mm range manager + */ + +#define pr_fmt(fmt) "drm_mm: " fmt + +#include <linux/module.h> +#include <linux/prime_numbers.h> +#include <linux/slab.h> +#include <linux/random.h> +#include <linux/vmalloc.h> + +#include <drm/drm_mm.h> + +#include "../lib/drm_random.h" + +#define TESTS "drm_mm_selftests.h" +#include "drm_selftest.h" + +static unsigned int random_seed; +static unsigned int max_iterations = 8192; +static unsigned int max_prime = 128; + +enum { + DEFAULT, + TOPDOWN, + BEST, +}; + +static const struct insert_mode { + const char *name; + unsigned int search_flags; + unsigned int create_flags; +} insert_modes[] = { + [DEFAULT] = { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT }, + [TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP }, + [BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT }, + {} +}, evict_modes[] = { + { "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT }, + { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP }, + {} +}; + +static int igt_sanitycheck(void *ignored) +{ + pr_info("%s - ok!\n", __func__); + return 0; +} + +static bool assert_no_holes(const struct drm_mm *mm) +{ + struct drm_mm_node *hole; + u64 hole_start, hole_end; + unsigned long count; + + count = 0; + drm_mm_for_each_hole(hole, mm, hole_start, hole_end) + count++; + if (count) { + pr_err("Expected to find no holes (after reserve), found %lu instead\n", count); + return false; + } + + drm_mm_for_each_node(hole, mm) { + if (drm_mm_hole_follows(hole)) { + pr_err("Hole follows node, expected none!\n"); + return false; + } + } + + return true; +} + +static bool assert_one_hole(const struct drm_mm *mm, u64 start, u64 end) +{ + struct drm_mm_node *hole; + u64 hole_start, hole_end; + unsigned long count; + bool ok = true; + + if (end <= start) + return true; + + count = 0; + drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { + if (start != hole_start || end != hole_end) { + if (ok) + pr_err("empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n", + hole_start, hole_end, + start, end); + ok = false; + } + count++; + } + if (count != 1) { + pr_err("Expected to find one hole, found %lu instead\n", count); + ok = false; + } + + return ok; +} + +static bool assert_continuous(const struct drm_mm *mm, u64 size) +{ + struct drm_mm_node *node, *check, *found; + unsigned long n; + u64 addr; + + if (!assert_no_holes(mm)) + return false; + + n = 0; + addr = 0; + drm_mm_for_each_node(node, mm) { + if (node->start != addr) { + pr_err("node[%ld] list out of order, expected %llx found %llx\n", + n, addr, node->start); + return false; + } + + if (node->size != size) { + pr_err("node[%ld].size incorrect, expected %llx, found %llx\n", + n, size, node->size); + return false; + } + + if (drm_mm_hole_follows(node)) { + pr_err("node[%ld] is followed by a hole!\n", n); + return false; + } + + found = NULL; + drm_mm_for_each_node_in_range(check, mm, addr, addr + size) { + if (node != check) { + pr_err("lookup return wrong node, expected start %llx, found %llx\n", + node->start, check->start); + return false; + } + found = check; + } + if (!found) { + pr_err("lookup failed for node %llx + %llx\n", + addr, size); + return false; + } + + addr += size; + n++; + } + + return true; +} + +static u64 misalignment(struct drm_mm_node *node, u64 alignment) +{ + u64 rem; + + if (!alignment) + return 0; + + div64_u64_rem(node->start, alignment, &rem); + return rem; +} + +static bool assert_node(struct drm_mm_node *node, struct drm_mm *mm, + u64 size, u64 alignment, unsigned long color) +{ + bool ok = true; + + if (!drm_mm_node_allocated(node) || node->mm != mm) { + pr_err("node not allocated\n"); + ok = false; + } + + if (node->size != size) { + pr_err("node has wrong size, found %llu, expected %llu\n", + node->size, size); + ok = false; + } + + if (misalignment(node, alignment)) { + pr_err("node is misalinged, start %llx rem %llu, expected alignment %llu\n", + node->start, misalignment(node, alignment), alignment); + ok = false; + } + + if (node->color != color) { + pr_err("node has wrong color, found %lu, expected %lu\n", + node->color, color); + ok = false; + } + + return ok; +} + +static int igt_init(void *ignored) +{ + const unsigned int size = 4096; + struct drm_mm mm; + struct drm_mm_node tmp; + int ret = -EINVAL; + + /* Start with some simple checks on initialising the struct drm_mm */ + memset(&mm, 0, sizeof(mm)); + if (drm_mm_initialized(&mm)) { + pr_err("zeroed mm claims to be initialized\n"); + return ret; + } + + memset(&mm, 0xff, sizeof(mm)); + drm_mm_init(&mm, 0, size); + if (!drm_mm_initialized(&mm)) { + pr_err("mm claims not to be initialized\n"); + goto out; + } + + if (!drm_mm_clean(&mm)) { + pr_err("mm not empty on creation\n"); + goto out; + } + + /* After creation, it should all be one massive hole */ + if (!assert_one_hole(&mm, 0, size)) { + ret = -EINVAL; + goto out; + } + + memset(&tmp, 0, sizeof(tmp)); + tmp.start = 0; + tmp.size = size; + ret = drm_mm_reserve_node(&mm, &tmp); + if (ret) { + pr_err("failed to reserve whole drm_mm\n"); + goto out; + } + + /* After filling the range entirely, there should be no holes */ + if (!assert_no_holes(&mm)) { + ret = -EINVAL; + goto out; + } + + /* And then after emptying it again, the massive hole should be back */ + drm_mm_remove_node(&tmp); + if (!assert_one_hole(&mm, 0, size)) { + ret = -EINVAL; + goto out; + } + +out: + if (ret) + drm_mm_debug_table(&mm, __func__); + drm_mm_takedown(&mm); + return ret; +} + +static int igt_debug(void *ignored) +{ + struct drm_mm mm; + struct drm_mm_node nodes[2]; + int ret; + + /* Create a small drm_mm with a couple of nodes and a few holes, and + * check that the debug iterator doesn't explode over a trivial drm_mm. + */ + + drm_mm_init(&mm, 0, 4096); + + memset(nodes, 0, sizeof(nodes)); + nodes[0].start = 512; + nodes[0].size = 1024; + ret = drm_mm_reserve_node(&mm, &nodes[0]); + if (ret) { + pr_err("failed to reserve node[0] {start=%lld, size=%lld)\n", + nodes[0].start, nodes[0].size); + return ret; + } + + nodes[1].size = 1024; + nodes[1].start = 4096 - 512 - nodes[1].size; + ret = drm_mm_reserve_node(&mm, &nodes[1]); + if (ret) { + pr_err("failed to reserve node[1] {start=%lld, size=%lld)\n", + nodes[1].start, nodes[1].size); + return ret; + } + + drm_mm_debug_table(&mm, __func__); + return 0; +} + +static struct drm_mm_node *set_node(struct drm_mm_node *node, + u64 start, u64 size) +{ + node->start = start; + node->size = size; + return node; +} + +static bool expect_reserve_fail(struct drm_mm *mm, struct drm_mm_node *node) +{ + int err; + + err = drm_mm_reserve_node(mm, node); + if (likely(err == -ENOSPC)) + return true; + + if (!err) { + pr_err("impossible reserve succeeded, node %llu + %llu\n", + node->start, node->size); + drm_mm_remove_node(node); + } else { + pr_err("impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n", + err, -ENOSPC, node->start, node->size); + } + return false; +} + +static bool check_reserve_boundaries(struct drm_mm *mm, + unsigned int count, + u64 size) +{ + const struct boundary { + u64 start, size; + const char *name; + } boundaries[] = { +#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" } + B(0, 0), + B(-size, 0), + B(size, 0), + B(size * count, 0), + B(-size, size), + B(-size, -size), + B(-size, 2*size), + B(0, -size), + B(size, -size), + B(count*size, size), + B(count*size, -size), + B(count*size, count*size), + B(count*size, -count*size), + B(count*size, -(count+1)*size), + B((count+1)*size, size), + B((count+1)*size, -size), + B((count+1)*size, -2*size), +#undef B + }; + struct drm_mm_node tmp = {}; + int n; + + for (n = 0; n < ARRAY_SIZE(boundaries); n++) { + if (!expect_reserve_fail(mm, + set_node(&tmp, + boundaries[n].start, + boundaries[n].size))) { + pr_err("boundary[%d:%s] failed, count=%u, size=%lld\n", + n, boundaries[n].name, count, size); + return false; + } + } + + return true; +} + +static int __igt_reserve(unsigned int count, u64 size) +{ + DRM_RND_STATE(prng, random_seed); + struct drm_mm mm; + struct drm_mm_node tmp, *nodes, *node, *next; + unsigned int *order, n, m, o = 0; + int ret, err; + + /* For exercising drm_mm_reserve_node(), we want to check that + * reservations outside of the drm_mm range are rejected, and to + * overlapping and otherwise already occupied ranges. Afterwards, + * the tree and nodes should be intact. + */ + + DRM_MM_BUG_ON(!count); + DRM_MM_BUG_ON(!size); + + ret = -ENOMEM; + order = drm_random_order(count, &prng); + if (!order) + goto err; + + nodes = vzalloc(sizeof(*nodes) * count); + if (!nodes) + goto err_order; + + ret = -EINVAL; + drm_mm_init(&mm, 0, count * size); + + if (!check_reserve_boundaries(&mm, count, size)) + goto out; + + for (n = 0; n < count; n++) { + nodes[n].start = order[n] * size; + nodes[n].size = size; + + err = drm_mm_reserve_node(&mm, &nodes[n]); + if (err) { + pr_err("reserve failed, step %d, start %llu\n", + n, nodes[n].start); + ret = err; + goto out; + } + + if (!drm_mm_node_allocated(&nodes[n])) { + pr_err("reserved node not allocated! step %d, start %llu\n", + n, nodes[n].start); + goto out; + } + + if (!expect_reserve_fail(&mm, &nodes[n])) + goto out; + } + + /* After random insertion the nodes should be in order */ + if (!assert_continuous(&mm, size)) + goto out; + + /* Repeated use should then fail */ + drm_random_reorder(order, count, &prng); + for (n = 0; n < count; n++) { + if (!expect_reserve_fail(&mm, + set_node(&tmp, order[n] * size, 1))) + goto out; + + /* Remove and reinsert should work */ + drm_mm_remove_node(&nodes[order[n]]); + err = drm_mm_reserve_node(&mm, &nodes[order[n]]); + if (err) { + pr_err("reserve failed, step %d, start %llu\n", + n, nodes[n].start); + ret = err; + goto out; + } + } + + if (!assert_continuous(&mm, size)) + goto out; + + /* Overlapping use should then fail */ + for (n = 0; n < count; n++) { + if (!expect_reserve_fail(&mm, set_node(&tmp, 0, size*count))) + goto out; + } + for (n = 0; n < count; n++) { + if (!expect_reserve_fail(&mm, + set_node(&tmp, + size * n, + size * (count - n)))) + goto out; + } + + /* Remove several, reinsert, check full */ + for_each_prime_number(n, min(max_prime, count)) { + for (m = 0; m < n; m++) { + node = &nodes[order[(o + m) % count]]; + drm_mm_remove_node(node); + } + + for (m = 0; m < n; m++) { + node = &nodes[order[(o + m) % count]]; + err = drm_mm_reserve_node(&mm, node); + if (err) { + pr_err("reserve failed, step %d/%d, start %llu\n", + m, n, node->start); + ret = err; + goto out; + } + } + + o += n; + + if (!assert_continuous(&mm, size)) + goto out; + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + vfree(nodes); +err_order: + kfree(order); +err: + return ret; +} + +static int igt_reserve(void *ignored) +{ + const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); + int n, ret; + + for_each_prime_number_from(n, 1, 54) { + u64 size = BIT_ULL(n); + + ret = __igt_reserve(count, size - 1); + if (ret) + return ret; + + ret = __igt_reserve(count, size); + if (ret) + return ret; + + ret = __igt_reserve(count, size + 1); + if (ret) + return ret; + } + + return 0; +} + +static bool expect_insert(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, unsigned long color, + const struct insert_mode *mode) +{ + int err; + + err = drm_mm_insert_node_generic(mm, node, + size, alignment, color, + mode->search_flags, + mode->create_flags); + if (err) { + pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n", + size, alignment, color, mode->name, err); + return false; + } + + if (!assert_node(node, mm, size, alignment, color)) { + drm_mm_remove_node(node); + return false; + } + + return true; +} + +static bool expect_insert_fail(struct drm_mm *mm, u64 size) +{ + struct drm_mm_node tmp = {}; + int err; + + err = drm_mm_insert_node(mm, &tmp, size, 0, DRM_MM_SEARCH_DEFAULT); + if (likely(err == -ENOSPC)) + return true; + + if (!err) { + pr_err("impossible insert succeeded, node %llu + %llu\n", + tmp.start, tmp.size); + drm_mm_remove_node(&tmp); + } else { + pr_err("impossible insert failed with wrong error %d [expected %d], size %llu\n", + err, -ENOSPC, size); + } + return false; +} + +static int __igt_insert(unsigned int count, u64 size, bool replace) +{ + DRM_RND_STATE(prng, random_seed); + const struct insert_mode *mode; + struct drm_mm mm; + struct drm_mm_node *nodes, *node, *next; + unsigned int *order, n, m, o = 0; + int ret; + + /* Fill a range with lots of nodes, check it doesn't fail too early */ + + DRM_MM_BUG_ON(!count); + DRM_MM_BUG_ON(!size); + + ret = -ENOMEM; + nodes = vmalloc(count * sizeof(*nodes)); + if (!nodes) + goto err; + + order = drm_random_order(count, &prng); + if (!order) + goto err_nodes; + + ret = -EINVAL; + drm_mm_init(&mm, 0, count * size); + + for (mode = insert_modes; mode->name; mode++) { + for (n = 0; n < count; n++) { + struct drm_mm_node tmp; + + node = replace ? &tmp : &nodes[n]; + memset(node, 0, sizeof(*node)); + if (!expect_insert(&mm, node, size, 0, n, mode)) { + pr_err("%s insert failed, size %llu step %d\n", + mode->name, size, n); + goto out; + } + + if (replace) { + drm_mm_replace_node(&tmp, &nodes[n]); + if (drm_mm_node_allocated(&tmp)) { + pr_err("replaced old-node still allocated! step %d\n", + n); + goto out; + } + + if (!assert_node(&nodes[n], &mm, size, 0, n)) { + pr_err("replaced node did not inherit parameters, size %llu step %d\n", + size, n); + goto out; + } + + if (tmp.start != nodes[n].start) { + pr_err("replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n", + tmp.start, size, + nodes[n].start, nodes[n].size); + goto out; + } + } + } + + /* After random insertion the nodes should be in order */ + if (!assert_continuous(&mm, size)) + goto out; + + /* Repeated use should then fail */ + if (!expect_insert_fail(&mm, size)) + goto out; + + /* Remove one and reinsert, as the only hole it should refill itself */ + for (n = 0; n < count; n++) { + u64 addr = nodes[n].start; + + drm_mm_remove_node(&nodes[n]); + if (!expect_insert(&mm, &nodes[n], size, 0, n, mode)) { + pr_err("%s reinsert failed, size %llu step %d\n", + mode->name, size, n); + goto out; + } + + if (nodes[n].start != addr) { + pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n", + mode->name, n, addr, nodes[n].start); + goto out; + } + + if (!assert_continuous(&mm, size)) + goto out; + } + + /* Remove several, reinsert, check full */ + for_each_prime_number(n, min(max_prime, count)) { + for (m = 0; m < n; m++) { + node = &nodes[order[(o + m) % count]]; + drm_mm_remove_node(node); + } + + for (m = 0; m < n; m++) { + node = &nodes[order[(o + m) % count]]; + if (!expect_insert(&mm, node, size, 0, n, mode)) { + pr_err("%s multiple reinsert failed, size %llu step %d\n", + mode->name, size, n); + goto out; + } + } + + o += n; + + if (!assert_continuous(&mm, size)) + goto out; + + if (!expect_insert_fail(&mm, size)) + goto out; + } + + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + DRM_MM_BUG_ON(!drm_mm_clean(&mm)); + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + kfree(order); +err_nodes: + vfree(nodes); +err: + return ret; +} + +static int igt_insert(void *ignored) +{ + const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); + unsigned int n; + int ret; + + for_each_prime_number_from(n, 1, 54) { + u64 size = BIT_ULL(n); + + ret = __igt_insert(count, size - 1, false); + if (ret) + return ret; + + ret = __igt_insert(count, size, false); + if (ret) + return ret; + + ret = __igt_insert(count, size + 1, false); + } + + return 0; +} + +static int igt_replace(void *ignored) +{ + const unsigned int count = min_t(unsigned int, BIT(10), max_iterations); + unsigned int n; + int ret; + + /* Reuse igt_insert to exercise replacement by inserting a dummy node, + * then replacing it with the intended node. We want to check that + * the tree is intact and all the information we need is carried + * across to the target node. + */ + + for_each_prime_number_from(n, 1, 54) { + u64 size = BIT_ULL(n); + + ret = __igt_insert(count, size - 1, true); + if (ret) + return ret; + + ret = __igt_insert(count, size, true); + if (ret) + return ret; + + ret = __igt_insert(count, size + 1, true); + } + + return 0; +} + +static bool expect_insert_in_range(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, unsigned long color, + u64 range_start, u64 range_end, + const struct insert_mode *mode) +{ + int err; + + err = drm_mm_insert_node_in_range_generic(mm, node, + size, alignment, color, + range_start, range_end, + mode->search_flags, + mode->create_flags); + if (err) { + pr_err("insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n", + size, alignment, color, mode->name, + range_start, range_end, err); + return false; + } + + if (!assert_node(node, mm, size, alignment, color)) { + drm_mm_remove_node(node); + return false; + } + + return true; +} + +static bool expect_insert_in_range_fail(struct drm_mm *mm, + u64 size, + u64 range_start, + u64 range_end) +{ + struct drm_mm_node tmp = {}; + int err; + + err = drm_mm_insert_node_in_range_generic(mm, &tmp, + size, 0, 0, + range_start, range_end, + DRM_MM_SEARCH_DEFAULT, + DRM_MM_CREATE_DEFAULT); + if (likely(err == -ENOSPC)) + return true; + + if (!err) { + pr_err("impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n", + tmp.start, tmp.size, range_start, range_end); + drm_mm_remove_node(&tmp); + } else { + pr_err("impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n", + err, -ENOSPC, size, range_start, range_end); + } + + return false; +} + +static bool assert_contiguous_in_range(struct drm_mm *mm, + u64 size, + u64 start, + u64 end) +{ + struct drm_mm_node *node; + unsigned int n; + + if (!expect_insert_in_range_fail(mm, size, start, end)) + return false; + + n = div64_u64(start + size - 1, size); + drm_mm_for_each_node(node, mm) { + if (node->start < start || node->start + node->size > end) { + pr_err("node %d out of range, address [%llx + %llu], range [%llx, %llx]\n", + n, node->start, node->start + node->size, start, end); + return false; + } + + if (node->start != n * size) { + pr_err("node %d out of order, expected start %llx, found %llx\n", + n, n * size, node->start); + return false; + } + + if (node->size != size) { + pr_err("node %d has wrong size, expected size %llx, found %llx\n", + n, size, node->size); + return false; + } + + if (drm_mm_hole_follows(node) && + drm_mm_hole_node_end(node) < end) { + pr_err("node %d is followed by a hole!\n", n); + return false; + } + + n++; + } + + drm_mm_for_each_node_in_range(node, mm, 0, start) { + if (node) { + pr_err("node before start: node=%llx+%llu, start=%llx\n", + node->start, node->size, start); + return false; + } + } + + drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) { + if (node) { + pr_err("node after end: node=%llx+%llu, end=%llx\n", + node->start, node->size, end); + return false; + } + } + + return true; +} + +static int __igt_insert_range(unsigned int count, u64 size, u64 start, u64 end) +{ + const struct insert_mode *mode; + struct drm_mm mm; + struct drm_mm_node *nodes, *node, *next; + unsigned int n, start_n, end_n; + int ret; + + DRM_MM_BUG_ON(!count); + DRM_MM_BUG_ON(!size); + DRM_MM_BUG_ON(end <= start); + + /* Very similar to __igt_insert(), but now instead of populating the + * full range of the drm_mm, we try to fill a small portion of it. + */ + + ret = -ENOMEM; + nodes = vzalloc(count * sizeof(*nodes)); + if (!nodes) + goto err; + + ret = -EINVAL; + drm_mm_init(&mm, 0, count * size); + + start_n = div64_u64(start + size - 1, size); + end_n = div64_u64(end - size, size); + + for (mode = insert_modes; mode->name; mode++) { + for (n = start_n; n <= end_n; n++) { + if (!expect_insert_in_range(&mm, &nodes[n], + size, size, n, + start, end, mode)) { + pr_err("%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n", + mode->name, size, n, + start_n, end_n, + start, end); + goto out; + } + } + + if (!assert_contiguous_in_range(&mm, size, start, end)) { + pr_err("%s: range [%llx, %llx] not full after initialisation, size=%llu\n", + mode->name, start, end, size); + goto out; + } + + /* Remove one and reinsert, it should refill itself */ + for (n = start_n; n <= end_n; n++) { + u64 addr = nodes[n].start; + + drm_mm_remove_node(&nodes[n]); + if (!expect_insert_in_range(&mm, &nodes[n], + size, size, n, + start, end, mode)) { + pr_err("%s reinsert failed, step %d\n", mode->name, n); + goto out; + } + + if (nodes[n].start != addr) { + pr_err("%s reinsert node moved, step %d, expected %llx, found %llx\n", + mode->name, n, addr, nodes[n].start); + goto out; + } + } + + if (!assert_contiguous_in_range(&mm, size, start, end)) { + pr_err("%s: range [%llx, %llx] not full after reinsertion, size=%llu\n", + mode->name, start, end, size); + goto out; + } + + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + DRM_MM_BUG_ON(!drm_mm_clean(&mm)); + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + vfree(nodes); +err: + return ret; +} + +static int insert_outside_range(void) +{ + struct drm_mm mm; + const unsigned int start = 1024; + const unsigned int end = 2048; + const unsigned int size = end - start; + + drm_mm_init(&mm, start, size); + + if (!expect_insert_in_range_fail(&mm, 1, 0, start)) + return -EINVAL; + + if (!expect_insert_in_range_fail(&mm, size, + start - size/2, start + (size+1)/2)) + return -EINVAL; + + if (!expect_insert_in_range_fail(&mm, size, + end - (size+1)/2, end + size/2)) + return -EINVAL; + + if (!expect_insert_in_range_fail(&mm, 1, end, end + size)) + return -EINVAL; + + drm_mm_takedown(&mm); + return 0; +} + +static int igt_insert_range(void *ignored) +{ + const unsigned int count = min_t(unsigned int, BIT(13), max_iterations); + unsigned int n; + int ret; + + /* Check that requests outside the bounds of drm_mm are rejected. */ + ret = insert_outside_range(); + if (ret) + return ret; + + for_each_prime_number_from(n, 1, 50) { + const u64 size = BIT_ULL(n); + const u64 max = count * size; + + ret = __igt_insert_range(count, size, 0, max); + if (ret) + return ret; + + ret = __igt_insert_range(count, size, 1, max); + if (ret) + return ret; + + ret = __igt_insert_range(count, size, 0, max - 1); + if (ret) + return ret; + + ret = __igt_insert_range(count, size, 0, max/2); + if (ret) + return ret; + + ret = __igt_insert_range(count, size, max/2, max); + if (ret) + return ret; + + ret = __igt_insert_range(count, size, max/4+1, 3*max/4-1); + if (ret) + return ret; + } + + return 0; +} + +static int igt_align(void *ignored) +{ + const struct insert_mode *mode; + const unsigned int max_count = min(8192u, max_prime); + struct drm_mm mm; + struct drm_mm_node *nodes, *node, *next; + unsigned int prime; + int ret = -EINVAL; + + /* For each of the possible insertion modes, we pick a few + * arbitrary alignments and check that the inserted node + * meets our requirements. + */ + + nodes = vzalloc(max_count * sizeof(*nodes)); + if (!nodes) + goto err; + + drm_mm_init(&mm, 1, U64_MAX - 2); + + for (mode = insert_modes; mode->name; mode++) { + unsigned int i = 0; + + for_each_prime_number_from(prime, 1, max_count) { + u64 size = next_prime_number(prime); + + if (!expect_insert(&mm, &nodes[i], + size, prime, i, + mode)) { + pr_err("%s insert failed with alignment=%d", + mode->name, prime); + goto out; + } + + i++; + } + + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + DRM_MM_BUG_ON(!drm_mm_clean(&mm)); + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + vfree(nodes); +err: + return ret; +} + +static int igt_align_pot(int max) +{ + struct drm_mm mm; + struct drm_mm_node *node, *next; + int bit; + int ret = -EINVAL; + + /* Check that we can align to the full u64 address space */ + + drm_mm_init(&mm, 1, U64_MAX - 2); + + for (bit = max - 1; bit; bit--) { + u64 align, size; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + ret = -ENOMEM; + goto out; + } + + align = BIT_ULL(bit); + size = BIT_ULL(bit-1) + 1; + if (!expect_insert(&mm, node, + size, align, bit, + &insert_modes[0])) { + pr_err("insert failed with alignment=%llx [%d]", + align, bit); + goto out; + } + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) { + drm_mm_remove_node(node); + kfree(node); + } + drm_mm_takedown(&mm); + return ret; +} + +static int igt_align32(void *ignored) +{ + return igt_align_pot(32); +} + +static int igt_align64(void *ignored) +{ + return igt_align_pot(64); +} + +static void show_scan(const struct drm_mm_scan *scan) +{ + pr_info("scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n", + scan->hit_start, scan->hit_end, + scan->size, scan->alignment, scan->color); +} + +static void show_holes(const struct drm_mm *mm, int count) +{ + u64 hole_start, hole_end; + struct drm_mm_node *hole; + + drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { + struct drm_mm_node *next = list_next_entry(hole, node_list); + const char *node1 = NULL, *node2 = NULL; + + if (hole->allocated) + node1 = kasprintf(GFP_KERNEL, + "[%llx + %lld, color=%ld], ", + hole->start, hole->size, hole->color); + + if (next->allocated) + node2 = kasprintf(GFP_KERNEL, + ", [%llx + %lld, color=%ld]", + next->start, next->size, next->color); + + pr_info("%sHole [%llx - %llx, size %lld]%s\n", + node1, + hole_start, hole_end, hole_end - hole_start, + node2); + + kfree(node2); + kfree(node1); + + if (!--count) + break; + } +} + +struct evict_node { + struct drm_mm_node node; + struct list_head link; +}; + +static bool evict_nodes(struct drm_mm_scan *scan, + struct evict_node *nodes, + unsigned int *order, + unsigned int count, + bool use_color, + struct list_head *evict_list) +{ + struct evict_node *e, *en; + unsigned int i; + + for (i = 0; i < count; i++) { + e = &nodes[order ? order[i] : i]; + list_add(&e->link, evict_list); + if (drm_mm_scan_add_block(scan, &e->node)) + break; + } + list_for_each_entry_safe(e, en, evict_list, link) { + if (!drm_mm_scan_remove_block(scan, &e->node)) + list_del(&e->link); + } + if (list_empty(evict_list)) { + pr_err("Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n", + scan->size, count, scan->alignment, scan->color); + return false; + } + + list_for_each_entry(e, evict_list, link) + drm_mm_remove_node(&e->node); + + if (use_color) { + struct drm_mm_node *node; + + while ((node = drm_mm_scan_color_evict(scan))) { + e = container_of(node, typeof(*e), node); + drm_mm_remove_node(&e->node); + list_add(&e->link, evict_list); + } + } else { + if (drm_mm_scan_color_evict(scan)) { + pr_err("drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n"); + return false; + } + } + + return true; +} + +static bool evict_nothing(struct drm_mm *mm, + unsigned int total_size, + struct evict_node *nodes) +{ + struct drm_mm_scan scan; + LIST_HEAD(evict_list); + struct evict_node *e; + struct drm_mm_node *node; + unsigned int n; + + drm_mm_scan_init(&scan, mm, 1, 0, 0, 0); + for (n = 0; n < total_size; n++) { + e = &nodes[n]; + list_add(&e->link, &evict_list); + drm_mm_scan_add_block(&scan, &e->node); + } + list_for_each_entry(e, &evict_list, link) + drm_mm_scan_remove_block(&scan, &e->node); + + for (n = 0; n < total_size; n++) { + e = &nodes[n]; + + if (!drm_mm_node_allocated(&e->node)) { + pr_err("node[%d] no longer allocated!\n", n); + return false; + } + + e->link.next = NULL; + } + + drm_mm_for_each_node(node, mm) { + e = container_of(node, typeof(*e), node); + e->link.next = &e->link; + } + + for (n = 0; n < total_size; n++) { + e = &nodes[n]; + + if (!e->link.next) { + pr_err("node[%d] no longer connected!\n", n); + return false; + } + } + + return assert_continuous(mm, nodes[0].node.size); +} + +static bool evict_everything(struct drm_mm *mm, + unsigned int total_size, + struct evict_node *nodes) +{ + struct drm_mm_scan scan; + LIST_HEAD(evict_list); + struct evict_node *e; + unsigned int n; + int err; + + drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0); + for (n = 0; n < total_size; n++) { + e = &nodes[n]; + list_add(&e->link, &evict_list); + if (drm_mm_scan_add_block(&scan, &e->node)) + break; + } + list_for_each_entry(e, &evict_list, link) { + if (!drm_mm_scan_remove_block(&scan, &e->node)) { + pr_err("Node %lld not marked for eviction!\n", + e->node.start); + list_del(&e->link); + } + } + + list_for_each_entry(e, &evict_list, link) + drm_mm_remove_node(&e->node); + + if (!assert_one_hole(mm, 0, total_size)) + return false; + + list_for_each_entry(e, &evict_list, link) { + err = drm_mm_reserve_node(mm, &e->node); + if (err) { + pr_err("Failed to reinsert node after eviction: start=%llx\n", + e->node.start); + return false; + } + } + + return assert_continuous(mm, nodes[0].node.size); +} + +static int evict_something(struct drm_mm *mm, + u64 range_start, u64 range_end, + struct evict_node *nodes, + unsigned int *order, + unsigned int count, + unsigned int size, + unsigned int alignment, + const struct insert_mode *mode) +{ + struct drm_mm_scan scan; + LIST_HEAD(evict_list); + struct evict_node *e; + struct drm_mm_node tmp; + int err; + + drm_mm_scan_init_with_range(&scan, mm, + size, alignment, 0, + range_start, range_end, + mode->create_flags); + if (!evict_nodes(&scan, + nodes, order, count, false, + &evict_list)) + return -EINVAL; + + memset(&tmp, 0, sizeof(tmp)); + err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0, + mode->search_flags, + mode->create_flags); + if (err) { + pr_err("Failed to insert into eviction hole: size=%d, align=%d\n", + size, alignment); + show_scan(&scan); + show_holes(mm, 3); + return err; + } + + if (tmp.start < range_start || tmp.start + tmp.size > range_end) { + pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", + tmp.start, tmp.size, range_start, range_end); + err = -EINVAL; + } + + if (!assert_node(&tmp, mm, size, alignment, 0) || + drm_mm_hole_follows(&tmp)) { + pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n", + tmp.size, size, + alignment, misalignment(&tmp, alignment), + tmp.start, drm_mm_hole_follows(&tmp)); + err = -EINVAL; + } + + drm_mm_remove_node(&tmp); + if (err) + return err; + + list_for_each_entry(e, &evict_list, link) { + err = drm_mm_reserve_node(mm, &e->node); + if (err) { + pr_err("Failed to reinsert node after eviction: start=%llx\n", + e->node.start); + return err; + } + } + + if (!assert_continuous(mm, nodes[0].node.size)) { + pr_err("range is no longer continuous\n"); + return -EINVAL; + } + + return 0; +} + +static int igt_evict(void *ignored) +{ + DRM_RND_STATE(prng, random_seed); + const unsigned int size = 8192; + const struct insert_mode *mode; + struct drm_mm mm; + struct evict_node *nodes; + struct drm_mm_node *node, *next; + unsigned int *order, n; + int ret, err; + + /* Here we populate a full drm_mm and then try and insert a new node + * by evicting other nodes in a random order. The drm_mm_scan should + * pick the first matching hole it finds from the random list. We + * repeat that for different allocation strategies, alignments and + * sizes to try and stress the hole finder. + */ + + ret = -ENOMEM; + nodes = vzalloc(size * sizeof(*nodes)); + if (!nodes) + goto err; + + order = drm_random_order(size, &prng); + if (!order) + goto err_nodes; + + ret = -EINVAL; + drm_mm_init(&mm, 0, size); + for (n = 0; n < size; n++) { + err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0, + DRM_MM_SEARCH_DEFAULT); + if (err) { + pr_err("insert failed, step %d\n", n); + ret = err; + goto out; + } + } + + /* First check that using the scanner doesn't break the mm */ + if (!evict_nothing(&mm, size, nodes)) { + pr_err("evict_nothing() failed\n"); + goto out; + } + if (!evict_everything(&mm, size, nodes)) { + pr_err("evict_everything() failed\n"); + goto out; + } + + for (mode = evict_modes; mode->name; mode++) { + for (n = 1; n <= size; n <<= 1) { + drm_random_reorder(order, size, &prng); + err = evict_something(&mm, 0, U64_MAX, + nodes, order, size, + n, 1, + mode); + if (err) { + pr_err("%s evict_something(size=%u) failed\n", + mode->name, n); + ret = err; + goto out; + } + } + + for (n = 1; n < size; n <<= 1) { + drm_random_reorder(order, size, &prng); + err = evict_something(&mm, 0, U64_MAX, + nodes, order, size, + size/2, n, + mode); + if (err) { + pr_err("%s evict_something(size=%u, alignment=%u) failed\n", + mode->name, size/2, n); + ret = err; + goto out; + } + } + + for_each_prime_number_from(n, 1, min(size, max_prime)) { + unsigned int nsize = (size - n + 1) / 2; + + DRM_MM_BUG_ON(!nsize); + + drm_random_reorder(order, size, &prng); + err = evict_something(&mm, 0, U64_MAX, + nodes, order, size, + nsize, n, + mode); + if (err) { + pr_err("%s evict_something(size=%u, alignment=%u) failed\n", + mode->name, nsize, n); + ret = err; + goto out; + } + } + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + kfree(order); +err_nodes: + vfree(nodes); +err: + return ret; +} + +static int igt_evict_range(void *ignored) +{ + DRM_RND_STATE(prng, random_seed); + const unsigned int size = 8192; + const unsigned int range_size = size / 2; + const unsigned int range_start = size / 4; + const unsigned int range_end = range_start + range_size; + const struct insert_mode *mode; + struct drm_mm mm; + struct evict_node *nodes; + struct drm_mm_node *node, *next; + unsigned int *order, n; + int ret, err; + + /* Like igt_evict() but now we are limiting the search to a + * small portion of the full drm_mm. + */ + + ret = -ENOMEM; + nodes = vzalloc(size * sizeof(*nodes)); + if (!nodes) + goto err; + + order = drm_random_order(size, &prng); + if (!order) + goto err_nodes; + + ret = -EINVAL; + drm_mm_init(&mm, 0, size); + for (n = 0; n < size; n++) { + err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0, + DRM_MM_SEARCH_DEFAULT); + if (err) { + pr_err("insert failed, step %d\n", n); + ret = err; + goto out; + } + } + + for (mode = evict_modes; mode->name; mode++) { + for (n = 1; n <= range_size; n <<= 1) { + drm_random_reorder(order, size, &prng); + err = evict_something(&mm, range_start, range_end, + nodes, order, size, + n, 1, + mode); + if (err) { + pr_err("%s evict_something(size=%u) failed with range [%u, %u]\n", + mode->name, n, range_start, range_end); + goto out; + } + } + + for (n = 1; n <= range_size; n <<= 1) { + drm_random_reorder(order, size, &prng); + err = evict_something(&mm, range_start, range_end, + nodes, order, size, + range_size/2, n, + mode); + if (err) { + pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", + mode->name, range_size/2, n, range_start, range_end); + goto out; + } + } + + for_each_prime_number_from(n, 1, min(range_size, max_prime)) { + unsigned int nsize = (range_size - n + 1) / 2; + + DRM_MM_BUG_ON(!nsize); + + drm_random_reorder(order, size, &prng); + err = evict_something(&mm, range_start, range_end, + nodes, order, size, + nsize, n, + mode); + if (err) { + pr_err("%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n", + mode->name, nsize, n, range_start, range_end); + goto out; + } + } + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + kfree(order); +err_nodes: + vfree(nodes); +err: + return ret; +} + +static unsigned int node_index(const struct drm_mm_node *node) +{ + return div64_u64(node->start, node->size); +} + +static int igt_topdown(void *ignored) +{ + const struct insert_mode *topdown = &insert_modes[TOPDOWN]; + DRM_RND_STATE(prng, random_seed); + const unsigned int count = 8192; + unsigned int size; + unsigned long *bitmap = NULL; + struct drm_mm mm; + struct drm_mm_node *nodes, *node, *next; + unsigned int *order, n, m, o = 0; + int ret; + + /* When allocating top-down, we expect to be returned a node + * from a suitable hole at the top of the drm_mm. We check that + * the returned node does match the highest available slot. + */ + + ret = -ENOMEM; + nodes = vzalloc(count * sizeof(*nodes)); + if (!nodes) + goto err; + + bitmap = kzalloc(count / BITS_PER_LONG * sizeof(unsigned long), + GFP_TEMPORARY); + if (!bitmap) + goto err_nodes; + + order = drm_random_order(count, &prng); + if (!order) + goto err_bitmap; + + ret = -EINVAL; + for (size = 1; size <= 64; size <<= 1) { + drm_mm_init(&mm, 0, size*count); + for (n = 0; n < count; n++) { + if (!expect_insert(&mm, &nodes[n], + size, 0, n, + topdown)) { + pr_err("insert failed, size %u step %d\n", size, n); + goto out; + } + + if (drm_mm_hole_follows(&nodes[n])) { + pr_err("hole after topdown insert %d, start=%llx\n, size=%u", + n, nodes[n].start, size); + goto out; + } + + if (!assert_one_hole(&mm, 0, size*(count - n - 1))) + goto out; + } + + if (!assert_continuous(&mm, size)) + goto out; + + drm_random_reorder(order, count, &prng); + for_each_prime_number_from(n, 1, min(count, max_prime)) { + for (m = 0; m < n; m++) { + node = &nodes[order[(o + m) % count]]; + drm_mm_remove_node(node); + __set_bit(node_index(node), bitmap); + } + + for (m = 0; m < n; m++) { + unsigned int last; + + node = &nodes[order[(o + m) % count]]; + if (!expect_insert(&mm, node, + size, 0, 0, + topdown)) { + pr_err("insert failed, step %d/%d\n", m, n); + goto out; + } + + if (drm_mm_hole_follows(node)) { + pr_err("hole after topdown insert %d/%d, start=%llx\n", + m, n, node->start); + goto out; + } + + last = find_last_bit(bitmap, count); + if (node_index(node) != last) { + pr_err("node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n", + m, n, size, last, node_index(node)); + goto out; + } + + __clear_bit(last, bitmap); + } + + DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count); + + o += n; + } + + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + DRM_MM_BUG_ON(!drm_mm_clean(&mm)); + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + kfree(order); +err_bitmap: + kfree(bitmap); +err_nodes: + vfree(nodes); +err: + return ret; +} + +static void separate_adjacent_colors(const struct drm_mm_node *node, + unsigned long color, + u64 *start, + u64 *end) +{ + if (node->allocated && node->color != color) + ++*start; + + node = list_next_entry(node, node_list); + if (node->allocated && node->color != color) + --*end; +} + +static bool colors_abutt(const struct drm_mm_node *node) +{ + if (!drm_mm_hole_follows(node) && + list_next_entry(node, node_list)->allocated) { + pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n", + node->color, node->start, node->size, + list_next_entry(node, node_list)->color, + list_next_entry(node, node_list)->start, + list_next_entry(node, node_list)->size); + return true; + } + + return false; +} + +static int igt_color(void *ignored) +{ + const unsigned int count = min(4096u, max_iterations); + const struct insert_mode *mode; + struct drm_mm mm; + struct drm_mm_node *node, *nn; + unsigned int n; + int ret = -EINVAL, err; + + /* Color adjustment complicates everything. First we just check + * that when we insert a node we apply any color_adjustment callback. + * The callback we use should ensure that there is a gap between + * any two nodes, and so after each insertion we check that those + * holes are inserted and that they are preserved. + */ + + drm_mm_init(&mm, 0, U64_MAX); + + for (n = 1; n <= count; n++) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + ret = -ENOMEM; + goto out; + } + + if (!expect_insert(&mm, node, + n, 0, n, + &insert_modes[0])) { + pr_err("insert failed, step %d\n", n); + kfree(node); + goto out; + } + } + + drm_mm_for_each_node_safe(node, nn, &mm) { + if (node->color != node->size) { + pr_err("invalid color stored: expected %lld, found %ld\n", + node->size, node->color); + + goto out; + } + + drm_mm_remove_node(node); + kfree(node); + } + + /* Now, let's start experimenting with applying a color callback */ + mm.color_adjust = separate_adjacent_colors; + for (mode = insert_modes; mode->name; mode++) { + u64 last; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + ret = -ENOMEM; + goto out; + } + + node->size = 1 + 2*count; + node->color = node->size; + + err = drm_mm_reserve_node(&mm, node); + if (err) { + pr_err("initial reserve failed!\n"); + ret = err; + goto out; + } + + last = node->start + node->size; + + for (n = 1; n <= count; n++) { + int rem; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + ret = -ENOMEM; + goto out; + } + + node->start = last; + node->size = n + count; + node->color = node->size; + + err = drm_mm_reserve_node(&mm, node); + if (err != -ENOSPC) { + pr_err("reserve %d did not report color overlap! err=%d\n", + n, err); + goto out; + } + + node->start += n + 1; + rem = misalignment(node, n + count); + node->start += n + count - rem; + + err = drm_mm_reserve_node(&mm, node); + if (err) { + pr_err("reserve %d failed, err=%d\n", n, err); + ret = err; + goto out; + } + + last = node->start + node->size; + } + + for (n = 1; n <= count; n++) { + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) { + ret = -ENOMEM; + goto out; + } + + if (!expect_insert(&mm, node, + n, n, n, + mode)) { + pr_err("%s insert failed, step %d\n", + mode->name, n); + kfree(node); + goto out; + } + } + + drm_mm_for_each_node_safe(node, nn, &mm) { + u64 rem; + + if (node->color != node->size) { + pr_err("%s invalid color stored: expected %lld, found %ld\n", + mode->name, node->size, node->color); + + goto out; + } + + if (colors_abutt(node)) + goto out; + + div64_u64_rem(node->start, node->size, &rem); + if (rem) { + pr_err("%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n", + mode->name, node->start, node->size, rem); + goto out; + } + + drm_mm_remove_node(node); + kfree(node); + } + } + + ret = 0; +out: + drm_mm_for_each_node_safe(node, nn, &mm) { + drm_mm_remove_node(node); + kfree(node); + } + drm_mm_takedown(&mm); + return ret; +} + +static int evict_color(struct drm_mm *mm, + u64 range_start, u64 range_end, + struct evict_node *nodes, + unsigned int *order, + unsigned int count, + unsigned int size, + unsigned int alignment, + unsigned long color, + const struct insert_mode *mode) +{ + struct drm_mm_scan scan; + LIST_HEAD(evict_list); + struct evict_node *e; + struct drm_mm_node tmp; + int err; + + drm_mm_scan_init_with_range(&scan, mm, + size, alignment, color, + range_start, range_end, + mode->create_flags); + if (!evict_nodes(&scan, + nodes, order, count, true, + &evict_list)) + return -EINVAL; + + memset(&tmp, 0, sizeof(tmp)); + err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color, + mode->search_flags, + mode->create_flags); + if (err) { + pr_err("Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n", + size, alignment, color, err); + show_scan(&scan); + show_holes(mm, 3); + return err; + } + + if (tmp.start < range_start || tmp.start + tmp.size > range_end) { + pr_err("Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n", + tmp.start, tmp.size, range_start, range_end); + err = -EINVAL; + } + + if (colors_abutt(&tmp)) + err = -EINVAL; + + if (!assert_node(&tmp, mm, size, alignment, color)) { + pr_err("Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n", + tmp.size, size, + alignment, misalignment(&tmp, alignment), tmp.start); + err = -EINVAL; + } + + drm_mm_remove_node(&tmp); + if (err) + return err; + + list_for_each_entry(e, &evict_list, link) { + err = drm_mm_reserve_node(mm, &e->node); + if (err) { + pr_err("Failed to reinsert node after eviction: start=%llx\n", + e->node.start); + return err; + } + } + + return 0; +} + +static int igt_color_evict(void *ignored) +{ + DRM_RND_STATE(prng, random_seed); + const unsigned int total_size = min(8192u, max_iterations); + const struct insert_mode *mode; + unsigned long color = 0; + struct drm_mm mm; + struct evict_node *nodes; + struct drm_mm_node *node, *next; + unsigned int *order, n; + int ret, err; + + /* Check that the drm_mm_scan also honours color adjustment when + * choosing its victims to create a hole. Our color_adjust does not + * allow two nodes to be placed together without an intervening hole + * enlarging the set of victims that must be evicted. + */ + + ret = -ENOMEM; + nodes = vzalloc(total_size * sizeof(*nodes)); + if (!nodes) + goto err; + + order = drm_random_order(total_size, &prng); + if (!order) + goto err_nodes; + + ret = -EINVAL; + drm_mm_init(&mm, 0, 2*total_size - 1); + mm.color_adjust = separate_adjacent_colors; + for (n = 0; n < total_size; n++) { + if (!expect_insert(&mm, &nodes[n].node, + 1, 0, color++, + &insert_modes[0])) { + pr_err("insert failed, step %d\n", n); + goto out; + } + } + + for (mode = evict_modes; mode->name; mode++) { + for (n = 1; n <= total_size; n <<= 1) { + drm_random_reorder(order, total_size, &prng); + err = evict_color(&mm, 0, U64_MAX, + nodes, order, total_size, + n, 1, color++, + mode); + if (err) { + pr_err("%s evict_color(size=%u) failed\n", + mode->name, n); + goto out; + } + } + + for (n = 1; n < total_size; n <<= 1) { + drm_random_reorder(order, total_size, &prng); + err = evict_color(&mm, 0, U64_MAX, + nodes, order, total_size, + total_size/2, n, color++, + mode); + if (err) { + pr_err("%s evict_color(size=%u, alignment=%u) failed\n", + mode->name, total_size/2, n); + goto out; + } + } + + for_each_prime_number_from(n, 1, min(total_size, max_prime)) { + unsigned int nsize = (total_size - n + 1) / 2; + + DRM_MM_BUG_ON(!nsize); + + drm_random_reorder(order, total_size, &prng); + err = evict_color(&mm, 0, U64_MAX, + nodes, order, total_size, + nsize, n, color++, + mode); + if (err) { + pr_err("%s evict_color(size=%u, alignment=%u) failed\n", + mode->name, nsize, n); + goto out; + } + } + } + + ret = 0; +out: + if (ret) + drm_mm_debug_table(&mm, __func__); + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + kfree(order); +err_nodes: + vfree(nodes); +err: + return ret; +} + +static int igt_color_evict_range(void *ignored) +{ + DRM_RND_STATE(prng, random_seed); + const unsigned int total_size = 8192; + const unsigned int range_size = total_size / 2; + const unsigned int range_start = total_size / 4; + const unsigned int range_end = range_start + range_size; + const struct insert_mode *mode; + unsigned long color = 0; + struct drm_mm mm; + struct evict_node *nodes; + struct drm_mm_node *node, *next; + unsigned int *order, n; + int ret, err; + + /* Like igt_color_evict(), but limited to small portion of the full + * drm_mm range. + */ + + ret = -ENOMEM; + nodes = vzalloc(total_size * sizeof(*nodes)); + if (!nodes) + goto err; + + order = drm_random_order(total_size, &prng); + if (!order) + goto err_nodes; + + ret = -EINVAL; + drm_mm_init(&mm, 0, 2*total_size - 1); + mm.color_adjust = separate_adjacent_colors; + for (n = 0; n < total_size; n++) { + if (!expect_insert(&mm, &nodes[n].node, + 1, 0, color++, + &insert_modes[0])) { + pr_err("insert failed, step %d\n", n); + goto out; + } + } + + for (mode = evict_modes; mode->name; mode++) { + for (n = 1; n <= range_size; n <<= 1) { + drm_random_reorder(order, range_size, &prng); + err = evict_color(&mm, range_start, range_end, + nodes, order, total_size, + n, 1, color++, + mode); + if (err) { + pr_err("%s evict_color(size=%u) failed for range [%x, %x]\n", + mode->name, n, range_start, range_end); + goto out; + } + } + + for (n = 1; n < range_size; n <<= 1) { + drm_random_reorder(order, total_size, &prng); + err = evict_color(&mm, range_start, range_end, + nodes, order, total_size, + range_size/2, n, color++, + mode); + if (err) { + pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", + mode->name, total_size/2, n, range_start, range_end); + goto out; + } + } + + for_each_prime_number_from(n, 1, min(range_size, max_prime)) { + unsigned int nsize = (range_size - n + 1) / 2; + + DRM_MM_BUG_ON(!nsize); + + drm_random_reorder(order, total_size, &prng); + err = evict_color(&mm, range_start, range_end, + nodes, order, total_size, + nsize, n, color++, + mode); + if (err) { + pr_err("%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n", + mode->name, nsize, n, range_start, range_end); + goto out; + } + } + } + + ret = 0; +out: + if (ret) + drm_mm_debug_table(&mm, __func__); + drm_mm_for_each_node_safe(node, next, &mm) + drm_mm_remove_node(node); + drm_mm_takedown(&mm); + kfree(order); +err_nodes: + vfree(nodes); +err: + return ret; +} + +#include "drm_selftest.c" + +static int __init test_drm_mm_init(void) +{ + int err; + + while (!random_seed) + random_seed = get_random_int(); + + pr_info("Testing DRM range manger (struct drm_mm), with random_seed=0x%x max_iterations=%u max_prime=%u\n", + random_seed, max_iterations, max_prime); + err = run_selftests(selftests, ARRAY_SIZE(selftests), NULL); + + return err > 0 ? 0 : err; +} + +static void __exit test_drm_mm_exit(void) +{ +} + +module_init(test_drm_mm_init); +module_exit(test_drm_mm_exit); + +module_param(random_seed, uint, 0400); +module_param(max_iterations, uint, 0400); +module_param(max_prime, uint, 0400); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c index dddbdd62bed0..445476551695 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c @@ -174,7 +174,7 @@ static void shmob_drm_crtc_start(struct shmob_drm_crtc *scrtc) if (scrtc->started) return; - format = shmob_drm_format_info(crtc->primary->fb->pixel_format); + format = shmob_drm_format_info(crtc->primary->fb->format->format); if (WARN_ON(format == NULL)) return; @@ -376,10 +376,10 @@ static int shmob_drm_crtc_mode_set(struct drm_crtc *crtc, const struct shmob_drm_format_info *format; void *cache; - format = shmob_drm_format_info(crtc->primary->fb->pixel_format); + format = shmob_drm_format_info(crtc->primary->fb->format->format); if (format == NULL) { dev_dbg(sdev->dev, "mode_set: unsupported format %08x\n", - crtc->primary->fb->pixel_format); + crtc->primary->fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h index 38ed4ff8aaf2..818b31549ddc 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.h +++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.h @@ -16,6 +16,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> struct backlight_device; struct shmob_drm_device; diff --git a/drivers/gpu/drm/shmobile/shmob_drm_plane.c b/drivers/gpu/drm/shmobile/shmob_drm_plane.c index 1805bb23b113..2023a93cee2b 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_plane.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_plane.c @@ -183,10 +183,10 @@ shmob_drm_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, struct shmob_drm_device *sdev = plane->dev->dev_private; const struct shmob_drm_format_info *format; - format = shmob_drm_format_info(fb->pixel_format); + format = shmob_drm_format_info(fb->format->format); if (format == NULL) { dev_dbg(sdev->dev, "update_plane: unsupported format %08x\n", - fb->pixel_format); + fb->format->format); return -EINVAL; } diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index e8c1ed08a9f7..411dc6ec976e 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -478,14 +478,13 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data) return err; } - err = drm_bridge_attach(drm_dev, bridge); + err = drm_bridge_attach(encoder, bridge, NULL); if (err) { DRM_ERROR("Failed to attach bridge\n"); return err; } dvo->bridge = bridge; - encoder->bridge = bridge; connector->encoder = encoder; dvo->encoder = encoder; diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 81df3097b545..877d053d86f4 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -636,10 +636,10 @@ static int sti_gdp_atomic_check(struct drm_plane *drm_plane, src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX); src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX); - format = sti_gdp_fourcc2format(fb->pixel_format); + format = sti_gdp_fourcc2format(fb->format->format); if (format == -1) { DRM_ERROR("Format not supported by GDP %.4s\n", - (char *)&fb->pixel_format); + (char *)&fb->format->format); return -EINVAL; } @@ -745,7 +745,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, /* build the top field */ top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE; top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC; - format = sti_gdp_fourcc2format(fb->pixel_format); + format = sti_gdp_fourcc2format(fb->format->format); top_field->gam_gdp_ctl |= format; top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format); top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE; @@ -753,11 +753,11 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane, cma_obj = drm_fb_cma_get_gem_obj(fb, 0); DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, - (char *)&fb->pixel_format, + (char *)&fb->format->format, (unsigned long)cma_obj->paddr); /* pixel memory location */ - bpp = drm_format_plane_cpp(fb->pixel_format, 0); + bpp = fb->format->cpp[0]; top_field->gam_gdp_pml = (u32)cma_obj->paddr + fb->offsets[0]; top_field->gam_gdp_pml += src_x * bpp; top_field->gam_gdp_pml += src_y * fb->pitches[0]; diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 96f336dd0e29..66d37d78152a 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -707,9 +707,8 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) bridge->driver_private = hda; bridge->funcs = &sti_hda_bridge_funcs; - drm_bridge_attach(drm_dev, bridge); + drm_bridge_attach(encoder, bridge, NULL); - encoder->bridge = bridge; connector->encoder = encoder; drm_connector = (struct drm_connector *)connector; diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 376b0763c874..f0af1ae82ee9 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1308,9 +1308,8 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) bridge->driver_private = hdmi; bridge->funcs = &sti_hdmi_bridge_funcs; - drm_bridge_attach(drm_dev, bridge); + drm_bridge_attach(encoder, bridge, NULL); - encoder->bridge = bridge; connector->encoder = encoder; drm_connector = (struct drm_connector *)connector; diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index f88130f2eb48..becf10d255c4 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1147,7 +1147,7 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane, cma_obj = drm_fb_cma_get_gem_obj(fb, 0); DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id, - (char *)&fb->pixel_format, + (char *)&fb->format->format, (unsigned long)cma_obj->paddr); /* Buffer planes address */ diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 2e08f969bb64..a278e1f44661 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -189,7 +189,8 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend, DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n", interlaced ? "on" : "off"); - ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val); + ret = sun4i_backend_drm_format_to_layer(plane, fb->format->format, + &val); if (ret) { DRM_DEBUG_DRIVER("Invalid format\n"); return val; @@ -218,7 +219,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr); /* Compute the start of the displayed memory */ - bpp = drm_format_plane_cpp(fb->pixel_format, 0); + bpp = fb->format->cpp[0]; paddr = gem->paddr + fb->offsets[0]; paddr += (state->src_x >> 16) * bpp; paddr += (state->src_y >> 16) * fb->pitches[0]; diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index f5e86fe7750e..757208f51731 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -208,6 +208,7 @@ int sun4i_rgb_init(struct drm_device *drm) struct sun4i_drv *drv = drm->dev_private; struct sun4i_tcon *tcon = drv->tcon; struct drm_encoder *encoder; + struct drm_bridge *bridge; struct sun4i_rgb *rgb; int ret; @@ -218,8 +219,8 @@ int sun4i_rgb_init(struct drm_device *drm) encoder = &rgb->encoder; tcon->panel = sun4i_tcon_find_panel(tcon->dev->of_node); - encoder->bridge = sun4i_tcon_find_bridge(tcon->dev->of_node); - if (IS_ERR(tcon->panel) && IS_ERR(encoder->bridge)) { + bridge = sun4i_tcon_find_bridge(tcon->dev->of_node); + if (IS_ERR(tcon->panel) && IS_ERR(bridge)) { dev_info(drm->dev, "No panel or bridge found... RGB output disabled\n"); return 0; } @@ -260,16 +261,12 @@ int sun4i_rgb_init(struct drm_device *drm) } } - if (!IS_ERR(encoder->bridge)) { - encoder->bridge->encoder = &rgb->encoder; - - ret = drm_bridge_attach(drm, encoder->bridge); + if (!IS_ERR(bridge)) { + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) { dev_err(drm->dev, "Couldn't attach our bridge\n"); goto err_cleanup_connector; } - } else { - encoder->bridge = NULL; } return 0; diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 4010d69cbd08..7561a95a54e3 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -511,7 +511,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane, if (!state->crtc) return 0; - err = tegra_dc_format(state->fb->pixel_format, &plane_state->format, + err = tegra_dc_format(state->fb->format->format, &plane_state->format, &plane_state->swap); if (err < 0) return err; @@ -531,7 +531,7 @@ static int tegra_plane_atomic_check(struct drm_plane *plane, * error out if the user tries to display a framebuffer with such a * configuration. */ - if (drm_format_num_planes(state->fb->pixel_format) > 2) { + if (state->fb->format->num_planes > 2) { if (state->fb->pitches[2] != state->fb->pitches[1]) { DRM_ERROR("unsupported UV-plane configuration\n"); return -EINVAL; @@ -568,7 +568,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, window.dst.y = plane->state->crtc_y; window.dst.w = plane->state->crtc_w; window.dst.h = plane->state->crtc_h; - window.bits_per_pixel = fb->bits_per_pixel; + window.bits_per_pixel = fb->format->cpp[0] * 8; window.bottom_up = tegra_fb_is_bottom_up(fb); /* copy from state */ @@ -576,7 +576,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, window.format = state->format; window.swap = state->swap; - for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { + for (i = 0; i < fb->format->num_planes; i++) { struct tegra_bo *bo = tegra_fb_get_plane(fb, i); window.base[i] = bo->paddr + fb->offsets[i]; diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index b8be3ee4d3b8..e289dbc6ad82 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -875,8 +875,9 @@ static int tegra_debugfs_framebuffers(struct seq_file *s, void *data) list_for_each_entry(fb, &drm->mode_config.fb_list, head) { seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n", - fb->base.id, fb->width, fb->height, fb->depth, - fb->bits_per_pixel, + fb->base.id, fb->width, fb->height, + fb->format->depth, + fb->format->cpp[0] * 8, drm_framebuffer_read_refcount(fb)); } diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 0ddcce1b420d..5205790dd679 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -17,6 +17,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> +#include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_fixed.h> diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index e4a5ab0a9677..8df7783cecc2 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -32,7 +32,7 @@ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, { struct tegra_fb *fb = to_tegra_fb(framebuffer); - if (index >= drm_format_num_planes(framebuffer->pixel_format)) + if (index >= framebuffer->format->num_planes) return NULL; return fb->planes[index]; @@ -114,7 +114,7 @@ static struct tegra_fb *tegra_fb_alloc(struct drm_device *drm, fb->num_planes = num_planes; - drm_helper_mode_fill_fb_struct(&fb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(drm, &fb->base, mode_cmd); for (i = 0; i < fb->num_planes; i++) fb->planes[i] = planes[i]; @@ -246,7 +246,7 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, info->flags = FBINFO_FLAG_DEFAULT; info->fbops = &tegra_fb_ops; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, helper, fb->width, fb->height); offset = info->var.xoffset * bytes_per_pixel + diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 725dffad5640..3a763f7cb743 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -91,7 +91,7 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) start = gem->paddr + fb->offsets[0] + crtc->y * fb->pitches[0] + - crtc->x * drm_format_plane_cpp(fb->pixel_format, 0); + crtc->x * fb->format->cpp[0]; end = start + (crtc->mode.vdisplay * fb->pitches[0]); @@ -399,7 +399,7 @@ static void tilcdc_crtc_set_mode(struct drm_crtc *crtc) if (info->tft_alt_mode) reg |= LCDC_TFT_ALT_ENABLE; if (priv->rev == 2) { - switch (fb->pixel_format) { + switch (fb->format->format) { case DRM_FORMAT_BGR565: case DRM_FORMAT_RGB565: break; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index c67d7cd7d57e..b0dd5e8634ae 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -167,10 +167,8 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge) int ret; priv->external_encoder->possible_crtcs = BIT(0); - priv->external_encoder->bridge = bridge; - bridge->encoder = priv->external_encoder; - ret = drm_bridge_attach(ddev, bridge); + ret = drm_bridge_attach(priv->external_encoder, bridge, NULL); if (ret) { dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret); return ret; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c index 8a6a50d74aff..ba0d66c0d8ac 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c @@ -69,7 +69,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane, } pitch = crtc_state->mode.hdisplay * - drm_format_plane_cpp(state->fb->pixel_format, 0); + state->fb->format->cpp[0]; if (state->fb->pitches[0] != pitch) { dev_err(plane->dev->dev, "Invalid pitch: fb and crtc widths must be the same"); @@ -77,7 +77,7 @@ static int tilcdc_plane_atomic_check(struct drm_plane *plane, } if (state->fb && old_state->fb && - state->fb->pixel_format != old_state->fb->pixel_format) { + state->fb->format != old_state->fb->format) { dev_dbg(plane->dev->dev, "%s(): pixel format change requires mode_change\n", __func__); diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c index aa0bd054d3e9..aea6a01500e1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_manager.c +++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c @@ -148,10 +148,10 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func ttm_bo_manager_func = { - ttm_bo_man_init, - ttm_bo_man_takedown, - ttm_bo_man_get_node, - ttm_bo_man_put_node, - ttm_bo_man_debug + .init = ttm_bo_man_init, + .takedown = ttm_bo_man_takedown, + .get_node = ttm_bo_man_get_node, + .put_node = ttm_bo_man_put_node, + .debug = ttm_bo_man_debug }; EXPORT_SYMBOL(ttm_bo_manager_func); diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c index 167f42c67c7c..b8dc06d68777 100644 --- a/drivers/gpu/drm/udl/udl_fb.c +++ b/drivers/gpu/drm/udl/udl_fb.c @@ -89,7 +89,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y, int bytes_identical = 0; struct urb *urb; int aligned_x; - int bpp = (fb->base.bits_per_pixel / 8); + int bpp = fb->base.format->cpp[0]; if (!fb->active_16) return 0; @@ -330,7 +330,7 @@ udl_framebuffer_init(struct drm_device *dev, int ret; ufb->obj = obj; - drm_helper_mode_fill_fb_struct(&ufb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &ufb->base, mode_cmd); ret = drm_framebuffer_init(dev, &ufb->base, &udlfb_funcs); return ret; } @@ -395,7 +395,7 @@ static int udlfb_create(struct drm_fb_helper *helper, info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &udlfb_ops; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &ufbdev->helper, sizes->fb_width, sizes->fb_height); DRM_DEBUG_KMS("allocated %dx%d vmal %p\n", diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index b5c4bb14d0d1..489956efbff8 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -9,6 +9,8 @@ #include "drmP.h" #include "drm_gem_cma_helper.h" +#include <drm/drm_encoder.h> + struct vc4_dev { struct drm_device *dev; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 881bf489478b..110d1518f5d5 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -295,8 +295,8 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) struct drm_framebuffer *fb = state->fb; struct drm_gem_cma_object *bo = drm_fb_cma_get_gem_obj(fb, 0); u32 subpixel_src_mask = (1 << 16) - 1; - u32 format = fb->pixel_format; - int num_planes = drm_format_num_planes(format); + u32 format = fb->format->format; + int num_planes = fb->format->num_planes; u32 h_subsample = 1; u32 v_subsample = 1; int i; @@ -369,7 +369,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state) */ if (vc4_state->crtc_x < 0) { for (i = 0; i < num_planes; i++) { - u32 cpp = drm_format_plane_cpp(fb->pixel_format, i); + u32 cpp = fb->format->cpp[i]; u32 subs = ((i == 0) ? 1 : h_subsample); vc4_state->offsets[i] += (cpp * @@ -496,7 +496,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); struct drm_framebuffer *fb = state->fb; u32 ctl0_offset = vc4_state->dlist_count; - const struct hvs_format *format = vc4_get_hvs_format(fb->pixel_format); + const struct hvs_format *format = vc4_get_hvs_format(fb->format->format); int num_planes = drm_format_num_planes(format->drm); u32 scl0, scl1; u32 lbm_size; diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 58048709c34e..fad5a1cc5903 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -88,12 +88,13 @@ virtio_gpu_framebuffer_init(struct drm_device *dev, bo = gem_to_virtio_gpu_obj(obj); + drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); + ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); if (ret) { vgfb->obj = NULL; return ret; } - drm_helper_mode_fill_fb_struct(&vgfb->base, mode_cmd); spin_lock_init(&vgfb->dirty_lock); vgfb->x1 = vgfb->y1 = INT_MAX; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 08906c8ce3fa..025f2e3d483f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -35,6 +35,7 @@ #include <drm/drm_gem.h> #include <drm/drm_atomic.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder.h> #include <ttm/ttm_bo_api.h> #include <ttm/ttm_bo_driver.h> #include <ttm/ttm_placement.h> diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c index dd21f950e129..61254b991265 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fb.c +++ b/drivers/gpu/drm/virtio/virtgpu_fb.c @@ -43,7 +43,7 @@ static int virtio_gpu_dirty_update(struct virtio_gpu_framebuffer *fb, struct drm_device *dev = fb->base.dev; struct virtio_gpu_device *vgdev = dev->dev_private; bool store_for_later = false; - int bpp = fb->base.bits_per_pixel / 8; + int bpp = fb->base.format->cpp[0]; int x2, y2; unsigned long flags; struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(fb->obj); @@ -333,7 +333,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper, info->screen_base = obj->vmap; info->screen_size = obj->gem_base.size; - drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); drm_fb_helper_fill_var(info, &vfbdev->helper, sizes->fb_width, sizes->fb_height); diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index 4a1de9f81193..63b3d5d35cf6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -198,11 +198,11 @@ static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, } static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = { - ttm_bo_man_init, - ttm_bo_man_takedown, - ttm_bo_man_get_node, - ttm_bo_man_put_node, - ttm_bo_man_debug + .init = ttm_bo_man_init, + .takedown = ttm_bo_man_takedown, + .get_node = ttm_bo_man_get_node, + .put_node = ttm_bo_man_put_node, + .debug = ttm_bo_man_debug }; static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 723fd763da8e..867a8442220c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -83,7 +83,7 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, return 1; } - switch (par->set_fb->depth) { + switch (par->set_fb->format->depth) { case 24: case 32: pal[regno] = ((red & 0xff00) << 8) | @@ -91,8 +91,9 @@ static int vmw_fb_setcolreg(unsigned regno, unsigned red, unsigned green, ((blue & 0xff00) >> 8); break; default: - DRM_ERROR("Bad depth %u, bpp %u.\n", par->set_fb->depth, - par->set_fb->bits_per_pixel); + DRM_ERROR("Bad depth %u, bpp %u.\n", + par->set_fb->format->depth, + par->set_fb->format->cpp[0] * 8); return 1; } @@ -197,7 +198,7 @@ static void vmw_fb_dirty_flush(struct work_struct *work) * Handle panning when copying from vmalloc to framebuffer. * Clip dirty area to framebuffer. */ - cpp = (cur_fb->bits_per_pixel + 7) / 8; + cpp = cur_fb->format->cpp[0]; max_x = par->fb_x + cur_fb->width; max_y = par->fb_y + cur_fb->height; @@ -487,7 +488,7 @@ static int vmw_fb_kms_framebuffer(struct fb_info *info) cur_fb = par->set_fb; if (cur_fb && cur_fb->width == mode_cmd.width && cur_fb->height == mode_cmd.height && - cur_fb->pixel_format == mode_cmd.pixel_format && + cur_fb->format->format == mode_cmd.pixel_format && cur_fb->pitches[0] == mode_cmd.pitches[0]) return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 170b61be1e4e..fec7348cea2c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -164,9 +164,9 @@ static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man, } const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = { - vmw_gmrid_man_init, - vmw_gmrid_man_takedown, - vmw_gmrid_man_get_node, - vmw_gmrid_man_put_node, - vmw_gmrid_man_debug + .init = vmw_gmrid_man_init, + .takedown = vmw_gmrid_man_takedown, + .get_node = vmw_gmrid_man_get_node, + .put_node = vmw_gmrid_man_put_node, + .debug = vmw_gmrid_man_debug }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e7daf59bac80..cf22110e9eee 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -583,7 +583,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, goto out_err1; } - drm_helper_mode_fill_fb_struct(&vfbs->base.base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); vfbs->surface = vmw_surface_reference(surface); vfbs->base.user_handle = mode_cmd->handles[0]; vfbs->is_dmabuf_proxy = is_dmabuf_proxy; @@ -864,7 +864,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv, goto out_err1; } - drm_helper_mode_fill_fb_struct(&vfbd->base.base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); vfbd->base.dmabuf = true; vfbd->buffer = vmw_dmabuf_reference(dmabuf); vfbd->base.user_handle = mode_cmd->handles[0]; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index f42ce9a1c3ac..cb36e1d70133 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_encoder.h> #include "vmwgfx_drv.h" /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 23ec673d5e16..3806148e1bdb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -97,7 +97,8 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) fb = entry->base.crtc.primary->fb; return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0], - fb->bits_per_pixel, fb->depth); + fb->format->cpp[0] * 8, + fb->format->depth); } if (!list_empty(&lds->active)) { @@ -105,7 +106,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) fb = entry->base.crtc.primary->fb; vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0], - fb->bits_per_pixel, fb->depth); + fb->format->cpp[0] * 8, fb->format->depth); } /* Make sure we always show something. */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index f42359084adc..d4268efc37d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -598,7 +598,7 @@ static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf = container_of(framebuffer, struct vmw_framebuffer_dmabuf, base)->buffer; - int depth = framebuffer->base.depth; + int depth = framebuffer->base.format->depth; struct { uint32_t header; SVGAFifoCmdDefineGMRFB body; @@ -618,7 +618,7 @@ static int do_dmabuf_define_gmrfb(struct vmw_private *dev_priv, } cmd->header = SVGA_CMD_DEFINE_GMRFB; - cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel; + cmd->body.format.bitsPerPixel = framebuffer->base.format->cpp[0] * 8; cmd->body.format.colorDepth = depth; cmd->body.format.reserved = 0; cmd->body.bytesPerLine = framebuffer->base.pitches[0]; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 94ad8d2acf9a..b27cd18ee66a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -424,7 +424,7 @@ static int vmw_stdu_bind_fb(struct vmw_private *dev_priv, */ if (new_content_type == SEPARATE_DMA) { - switch (new_fb->bits_per_pixel) { + switch (new_fb->format->cpp[0] * 8) { case 32: content_srf.format = SVGA3D_X8R8G8B8; break; diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c index 546eb92a94e8..b634b090cdc1 100644 --- a/drivers/gpu/drm/zte/zx_plane.c +++ b/drivers/gpu/drm/zte/zx_plane.c @@ -146,7 +146,7 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane, if (!fb) return; - format = fb->pixel_format; + format = fb->format->format; stride = fb->pitches[0]; src_x = plane->state->src_x >> 16; @@ -159,7 +159,7 @@ static void zx_gl_plane_atomic_update(struct drm_plane *plane, dst_w = plane->state->crtc_w; dst_h = plane->state->crtc_h; - bpp = drm_format_plane_cpp(format, 0); + bpp = fb->format->cpp[0]; cma_obj = drm_fb_cma_get_gem_obj(fb, 0); paddr = cma_obj->paddr + fb->offsets[0]; diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 192016e2b518..6acea8cf746d 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h @@ -634,6 +634,19 @@ struct drm_device { int switch_power_state; }; +/** + * drm_drv_uses_atomic_modeset - check if the driver implements + * atomic_commit() + * @dev: DRM device + * + * This check is useful if drivers do not have DRIVER_ATOMIC set but + * have atomic modesetting internally implemented. + */ +static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev) +{ + return dev->mode_config.funcs->atomic_commit != NULL; +} + #include <drm/drm_irq.h> #define DRM_SWITCH_POWER_ON 0 diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index d6d241f63b9f..b0ebe0fafc41 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -145,6 +145,7 @@ struct __drm_crtcs_state { struct drm_crtc_state *state; struct drm_crtc_commit *commit; s64 __user *out_fence_ptr; + unsigned last_vblank_count; }; struct __drm_connnectors_state { @@ -369,12 +370,6 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state); void drm_state_dump(struct drm_device *dev, struct drm_printer *p); -#ifdef CONFIG_DEBUG_FS -struct drm_minor; -int drm_atomic_debugfs_init(struct drm_minor *minor); -int drm_atomic_debugfs_cleanup(struct drm_minor *minor); -#endif - #define for_each_connector_in_state(__state, connector, connector_state, __i) \ for ((__i) = 0; \ (__i) < (__state)->num_connector && \ @@ -424,5 +419,4 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state) state->connectors_changed; } - #endif /* DRM_ATOMIC_H_ */ diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h index 7ff92b09fd9c..4b2353dc34ba 100644 --- a/include/drm/drm_atomic_helper.h +++ b/include/drm/drm_atomic_helper.h @@ -48,9 +48,6 @@ int drm_atomic_helper_commit(struct drm_device *dev, int drm_atomic_helper_wait_for_fences(struct drm_device *dev, struct drm_atomic_state *state, bool pre_swap); -bool drm_atomic_helper_framebuffer_changed(struct drm_device *dev, - struct drm_atomic_state *old_state, - struct drm_crtc *crtc); void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev, struct drm_atomic_state *old_state); diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h index 610223b0481b..155588eb8ccf 100644 --- a/include/drm/drm_auth.h +++ b/include/drm/drm_auth.h @@ -33,10 +33,7 @@ * * @refcount: Refcount for this master object. * @dev: Link back to the DRM device - * @unique: Unique identifier: e.g. busid. Protected by drm_global_mutex. - * @unique_len: Length of unique field. Protected by drm_global_mutex. - * @magic_map: Map of used authentication tokens. Protected by struct_mutex. - * @lock: DRI lock information. + * @lock: DRI1 lock information. * @driver_priv: Pointer to driver-private information. * * Note that master structures are only relevant for the legacy/primary device @@ -45,8 +42,20 @@ struct drm_master { struct kref refcount; struct drm_device *dev; + /** + * @unique: Unique identifier: e.g. busid. Protected by struct + * &drm_device master_mutex. + */ char *unique; + /** + * @unique_len: Length of unique field. Protected by struct &drm_device + * master_mutex. + */ int unique_len; + /** + * @magic_map: Map of used authentication tokens. Protected by struct + * &drm_device master_mutex. + */ struct idr magic_map; struct drm_lock_data lock; void *driver_priv; diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 530a1d6e8cde..435be20029f7 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -201,8 +201,8 @@ struct drm_bridge { int drm_bridge_add(struct drm_bridge *bridge); void drm_bridge_remove(struct drm_bridge *bridge); struct drm_bridge *of_drm_find_bridge(struct device_node *np); -int drm_bridge_attach(struct drm_device *dev, struct drm_bridge *bridge); -void drm_bridge_detach(struct drm_bridge *bridge); +int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, + struct drm_bridge *previous); bool drm_bridge_mode_fixup(struct drm_bridge *bridge, const struct drm_display_mode *mode, diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index a9b95246e26e..6e352a0b5c81 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h @@ -117,7 +117,7 @@ struct drm_display_info { /** * @pixel_clock: Maximum pixel clock supported by the sink, in units of - * 100Hz. This mismatches the clok in &drm_display_mode (which is in + * 100Hz. This mismatches the clock in &drm_display_mode (which is in * kHZ), because that's what the EDID uses as base unit. */ unsigned int pixel_clock; @@ -381,6 +381,8 @@ struct drm_connector_funcs { * core drm connector interfaces. Everything added from this callback * should be unregistered in the early_unregister callback. * + * This is called while holding drm_connector->mutex. + * * Returns: * * 0 on success, or a negative error code on failure. @@ -395,6 +397,8 @@ struct drm_connector_funcs { * late_register(). It is called from drm_connector_unregister(), * early in the driver unload sequence to disable userspace access * before data structures are torndown. + * + * This is called while holding drm_connector->mutex. */ void (*early_unregister)(struct drm_connector *connector); @@ -559,10 +563,6 @@ struct drm_cmdline_mode { * @interlace_allowed: can this connector handle interlaced modes? * @doublescan_allowed: can this connector handle doublescan? * @stereo_allowed: can this connector handle stereo modes? - * @registered: is this connector exposed (registered) with userspace? - * @modes: modes available on this connector (from fill_modes() + user) - * @status: one of the drm_connector_status enums (connected, not, or unknown) - * @probed_modes: list of modes derived directly from the display * @funcs: connector control functions * @edid_blob_ptr: DRM property containing EDID if present * @properties: property tracking for this connector @@ -608,6 +608,13 @@ struct drm_connector { char *name; /** + * @mutex: Lock for general connector state, but currently only protects + * @registered. Most of the connector state is still protected by the + * mutex in &drm_mode_config. + */ + struct mutex mutex; + + /** * @index: Compacted connector index, which matches the position inside * the mode_config.list for drivers not supporting hot-add/removing. Can * be used as an array index. It is invariant over the lifetime of the @@ -620,12 +627,32 @@ struct drm_connector { bool interlace_allowed; bool doublescan_allowed; bool stereo_allowed; + /** + * @registered: Is this connector exposed (registered) with userspace? + * Protected by @mutex. + */ bool registered; + + /** + * @modes: + * Modes available on this connector (from fill_modes() + user). + * Protected by dev->mode_config.mutex. + */ struct list_head modes; /* list of modes on this connector */ + /** + * @status: + * One of the drm_connector_status enums (connected, not, or unknown). + * Protected by dev->mode_config.mutex. + */ enum drm_connector_status status; - /* these are modes added by probing with DDC or the BIOS */ + /** + * @probed_modes: + * These are modes added by probing with DDC or the BIOS, before + * filtering is applied. Used by the probe helpers.Protected by + * dev->mode_config.mutex. + */ struct list_head probed_modes; /** @@ -634,6 +661,8 @@ struct drm_connector { * flat panels in embedded systems, the driver should initialize the * display_info.width_mm and display_info.height_mm fields with the * physical size of the display. + * + * Protected by dev->mode_config.mutex. */ struct drm_display_info display_info; const struct drm_connector_funcs *funcs; @@ -839,6 +868,11 @@ void drm_mode_put_tile_group(struct drm_device *dev, * @dev: the DRM device * * Iterate over all connectors of @dev. + * + * WARNING: + * + * This iterator is not safe against hotadd/removal of connectors and is + * deprecated. Use drm_for_each_connector_iter() instead. */ #define drm_for_each_connector(connector, dev) \ for (assert_drm_connector_list_read_locked(&(dev)->mode_config), \ @@ -847,4 +881,37 @@ void drm_mode_put_tile_group(struct drm_device *dev, &connector->head != (&(dev)->mode_config.connector_list); \ connector = list_next_entry(connector, head)) +/** + * struct drm_connector_list_iter - connector_list iterator + * + * This iterator tracks state needed to be able to walk the connector_list + * within struct drm_mode_config. Only use together with + * drm_connector_list_iter_get(), drm_connector_list_iter_put() and + * drm_connector_list_iter_next() respectively the convenience macro + * drm_for_each_connector_iter(). + */ +struct drm_connector_list_iter { +/* private: */ + struct drm_device *dev; + struct drm_connector *conn; +}; + +void drm_connector_list_iter_get(struct drm_device *dev, + struct drm_connector_list_iter *iter); +struct drm_connector * +drm_connector_list_iter_next(struct drm_connector_list_iter *iter); +void drm_connector_list_iter_put(struct drm_connector_list_iter *iter); + +/** + * drm_for_each_connector_iter - connector_list iterator macro + * @connector: struct &drm_connector pointer used as cursor + * @iter: struct &drm_connector_list_iter + * + * Note that @connector is only valid within the list body, if you want to use + * @connector after calling drm_connector_list_iter_put() then you need to grab + * your own reference first using drm_connector_reference(). + */ +#define drm_for_each_connector_iter(connector, iter) \ + while ((connector = drm_connector_list_iter_next(iter))) + #endif diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 946672f97e1e..6920dee3a2d1 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -39,7 +39,6 @@ #include <drm/drm_framebuffer.h> #include <drm/drm_modes.h> #include <drm/drm_connector.h> -#include <drm/drm_encoder.h> #include <drm/drm_property.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -68,14 +67,12 @@ static inline uint64_t I642U64(int64_t val) } struct drm_crtc; -struct drm_encoder; struct drm_pending_vblank_event; struct drm_plane; struct drm_bridge; struct drm_atomic_state; struct drm_crtc_helper_funcs; -struct drm_encoder_helper_funcs; struct drm_plane_helper_funcs; /** @@ -93,8 +90,6 @@ struct drm_plane_helper_funcs; * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders - * @last_vblank_count: for helpers and drivers to capture the vblank of the - * update to ensure framebuffer cleanup isn't done too early * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings * @mode: current mode timings * @mode_blob: &drm_property_blob for @mode @@ -140,9 +135,6 @@ struct drm_crtc_state { u32 connector_mask; u32 encoder_mask; - /* last_vblank_count: for vblank waits before cleanup */ - u32 last_vblank_count; - /* adjusted_mode: for use by helpers and drivers */ struct drm_display_mode adjusted_mode; diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h index c7438ff0d609..5f58f65344e0 100644 --- a/include/drm/drm_encoder.h +++ b/include/drm/drm_encoder.h @@ -25,8 +25,12 @@ #include <linux/list.h> #include <linux/ctype.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mode.h> #include <drm/drm_mode_object.h> +struct drm_encoder; + /** * struct drm_encoder_funcs - encoder controls * @@ -188,9 +192,6 @@ static inline unsigned int drm_encoder_index(struct drm_encoder *encoder) return encoder->index; } -/* FIXME: We have an include file mess still, drm_crtc.h needs untangling. */ -static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc); - /** * drm_encoder_crtc_ok - can a given crtc drive a given encoder? * @encoder: encoder to test diff --git a/include/drm/drm_encoder_slave.h b/include/drm/drm_encoder_slave.h index 82cdf611393d..1107b4b1c599 100644 --- a/include/drm/drm_encoder_slave.h +++ b/include/drm/drm_encoder_slave.h @@ -29,6 +29,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> /** * struct drm_encoder_slave_funcs - Entry points exposed by a slave encoder driver diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h index 1ddfa2928802..f0dde1d02be4 100644 --- a/include/drm/drm_framebuffer.h +++ b/include/drm/drm_framebuffer.h @@ -122,6 +122,10 @@ struct drm_framebuffer { */ struct drm_mode_object base; /** + * @format: framebuffer format information + */ + const struct drm_format_info *format; + /** * @funcs: framebuffer vfunc table */ const struct drm_framebuffer_funcs *funcs; @@ -166,28 +170,11 @@ struct drm_framebuffer { */ unsigned int height; /** - * @depth: Depth in bits per pixel for RGB formats. 0 for everything - * else. Legacy information derived from @pixel_format, it's suggested to use - * the DRM FOURCC codes and helper functions directly instead. - */ - unsigned int depth; - /** - * @bits_per_pixel: Storage used bits per pixel for RGB formats. 0 for - * everything else. Legacy information derived from @pixel_format, it's - * suggested to use the DRM FOURCC codes and helper functions directly - * instead. - */ - int bits_per_pixel; - /** * @flags: Framebuffer flags like DRM_MODE_FB_INTERLACED or * DRM_MODE_FB_MODIFIERS. */ int flags; /** - * @pixel_format: DRM FOURCC code describing the pixel format. - */ - uint32_t pixel_format; /* fourcc format */ - /** * @hot_x: X coordinate of the cursor hotspot. Used by the legacy cursor * IOCTL when the driver supports cursor through a DRM_PLANE_TYPE_CURSOR * universal plane. @@ -282,4 +269,10 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p, struct drm_framebuffer, head); \ &fb->head != (&(dev)->mode_config.fb_list); \ fb = list_next_entry(fb, head)) + +int drm_framebuffer_plane_width(int width, + const struct drm_framebuffer *fb, int plane); +int drm_framebuffer_plane_height(int height, + const struct drm_framebuffer *fb, int plane); + #endif diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index 0b8371795aeb..92ec5759caae 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -1,6 +1,7 @@ /************************************************************************** * * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA. + * Copyright 2016 Intel Corporation * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a @@ -48,6 +49,12 @@ #include <linux/stackdepot.h> #endif +#ifdef CONFIG_DRM_DEBUG_MM +#define DRM_MM_BUG_ON(expr) BUG_ON(expr) +#else +#define DRM_MM_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) +#endif + enum drm_mm_search_flags { DRM_MM_SEARCH_DEFAULT = 0, DRM_MM_SEARCH_BEST = 1 << 0, @@ -67,11 +74,8 @@ struct drm_mm_node { struct list_head hole_stack; struct rb_node rb; unsigned hole_follows : 1; - unsigned scanned_block : 1; - unsigned scanned_prev_free : 1; - unsigned scanned_next_free : 1; - unsigned scanned_preceeds_hole : 1; unsigned allocated : 1; + bool scanned_block : 1; unsigned long color; u64 start; u64 size; @@ -91,32 +95,44 @@ struct drm_mm { /* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */ struct rb_root interval_tree; - unsigned int scan_check_range : 1; - unsigned scan_alignment; - unsigned long scan_color; - u64 scan_size; - u64 scan_hit_start; - u64 scan_hit_end; - unsigned scanned_blocks; - u64 scan_start; - u64 scan_end; - struct drm_mm_node *prev_scanned_node; - - void (*color_adjust)(struct drm_mm_node *node, unsigned long color, + void (*color_adjust)(const struct drm_mm_node *node, + unsigned long color, u64 *start, u64 *end); + + unsigned long scan_active; +}; + +struct drm_mm_scan { + struct drm_mm *mm; + + u64 size; + u64 alignment; + u64 remainder_mask; + + u64 range_start; + u64 range_end; + + u64 hit_start; + u64 hit_end; + + unsigned long color; + unsigned int flags; }; /** * drm_mm_node_allocated - checks whether a node is allocated * @node: drm_mm_node to check * - * Drivers should use this helpers for proper encapusulation of drm_mm + * Drivers are required to clear a node prior to using it with the + * drm_mm range manager. + * + * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @node is allocated. */ -static inline bool drm_mm_node_allocated(struct drm_mm_node *node) +static inline bool drm_mm_node_allocated(const struct drm_mm_node *node) { return node->allocated; } @@ -125,18 +141,37 @@ static inline bool drm_mm_node_allocated(struct drm_mm_node *node) * drm_mm_initialized - checks whether an allocator is initialized * @mm: drm_mm to check * - * Drivers should use this helpers for proper encapusulation of drm_mm + * Drivers should clear the struct drm_mm prior to initialisation if they + * want to use this function. + * + * Drivers should use this helper for proper encapsulation of drm_mm * internals. * * Returns: * True if the @mm is initialized. */ -static inline bool drm_mm_initialized(struct drm_mm *mm) +static inline bool drm_mm_initialized(const struct drm_mm *mm) { return mm->hole_stack.next; } -static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) +/** + * drm_mm_hole_follows - checks whether a hole follows this node + * @node: drm_mm_node to check + * + * Holes are embedded into the drm_mm using the tail of a drm_mm_node. + * If you wish to know whether a hole follows this particular node, + * query this function. + * + * Returns: + * True if a hole follows the @node. + */ +static inline bool drm_mm_hole_follows(const struct drm_mm_node *node) +{ + return node->hole_follows; +} + +static inline u64 __drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } @@ -145,20 +180,20 @@ static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) * drm_mm_hole_node_start - computes the start of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-sepific debug dumpers. Otherwise drivers should not - * inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at node->hole_follows. + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows() * * Returns: * Start of the subsequent hole. */ -static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_start(const struct drm_mm_node *hole_node) { - BUG_ON(!hole_node->hole_follows); + DRM_MM_BUG_ON(!drm_mm_hole_follows(hole_node)); return __drm_mm_hole_node_start(hole_node); } -static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return list_next_entry(hole_node, node_list)->start; } @@ -167,19 +202,33 @@ static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) * drm_mm_hole_node_end - computes the end of the hole following @node * @hole_node: drm_mm_node which implicitly tracks the following hole * - * This is useful for driver-sepific debug dumpers. Otherwise drivers should not - * inspect holes themselves. Drivers must check first whether a hole indeed - * follows by looking at node->hole_follows. + * This is useful for driver-specific debug dumpers. Otherwise drivers should + * not inspect holes themselves. Drivers must check first whether a hole indeed + * follows by looking at drm_mm_hole_follows(). * * Returns: * End of the subsequent hole. */ -static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_end(const struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } /** + * drm_mm_nodes - list of nodes under the drm_mm range manager + * @mm: the struct drm_mm range manger + * + * As the drm_mm range manager hides its node_list deep with its + * structure, extracting it looks painful and repetitive. This is + * not expected to be used outside of the drm_mm_for_each_node() + * macros and similar internal functions. + * + * Returns: + * The node list, may be empty. + */ +#define drm_mm_nodes(mm) (&(mm)->head_node.node_list) + +/** * drm_mm_for_each_node - iterator to walk over all allocated nodes * @entry: drm_mm_node structure to assign to in each iteration step * @mm: drm_mm allocator to walk @@ -187,9 +236,20 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) * This iterator walks over all nodes in the range allocator. It is implemented * with list_for_each, so not save against removal of elements. */ -#define drm_mm_for_each_node(entry, mm) list_for_each_entry(entry, \ - &(mm)->head_node.node_list, \ - node_list) +#define drm_mm_for_each_node(entry, mm) \ + list_for_each_entry(entry, drm_mm_nodes(mm), node_list) + +/** + * drm_mm_for_each_node_safe - iterator to walk over all allocated nodes + * @entry: drm_mm_node structure to assign to in each iteration step + * @next: drm_mm_node structure to store the next step + * @mm: drm_mm allocator to walk + * + * This iterator walks over all nodes in the range allocator. It is implemented + * with list_for_each_safe, so save against removal of elements. + */ +#define drm_mm_for_each_node_safe(entry, next, mm) \ + list_for_each_entry_safe(entry, next, drm_mm_nodes(mm), node_list) #define __drm_mm_for_each_hole(entry, mm, hole_start, hole_end, backwards) \ for (entry = list_entry((backwards) ? (mm)->hole_stack.prev : (mm)->hole_stack.next, struct drm_mm_node, hole_stack); \ @@ -225,49 +285,16 @@ static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) * Basic range manager support (drm_mm.c) */ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); - -int drm_mm_insert_node_generic(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - unsigned long color, - enum drm_mm_search_flags sflags, - enum drm_mm_allocator_flags aflags); -/** - * drm_mm_insert_node - search for space and insert @node - * @mm: drm_mm to allocate from - * @node: preallocate node to insert - * @size: size of the allocation - * @alignment: alignment of the allocation - * @flags: flags to fine-tune the allocation - * - * This is a simplified version of drm_mm_insert_node_generic() with @color set - * to 0. - * - * The preallocated node must be cleared to 0. - * - * Returns: - * 0 on success, -ENOSPC if there's no suitable hole. - */ -static inline int drm_mm_insert_node(struct drm_mm *mm, - struct drm_mm_node *node, - u64 size, - unsigned alignment, - enum drm_mm_search_flags flags) -{ - return drm_mm_insert_node_generic(mm, node, size, alignment, 0, flags, - DRM_MM_CREATE_DEFAULT); -} - int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, u64 size, - unsigned alignment, + u64 alignment, unsigned long color, u64 start, u64 end, enum drm_mm_search_flags sflags, enum drm_mm_allocator_flags aflags); + /** * drm_mm_insert_node_in_range - ranged search for space and insert @node * @mm: drm_mm to allocate from @@ -289,7 +316,7 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, u64 size, - unsigned alignment, + u64 alignment, u64 start, u64 end, enum drm_mm_search_flags flags) @@ -299,16 +326,81 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, DRM_MM_CREATE_DEFAULT); } +/** + * drm_mm_insert_node_generic - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for this node + * @sflags: flags to fine-tune the allocation search + * @aflags: flags to fine-tune the allocation behavior + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int +drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, + u64 size, u64 alignment, + unsigned long color, + enum drm_mm_search_flags sflags, + enum drm_mm_allocator_flags aflags) +{ + return drm_mm_insert_node_in_range_generic(mm, node, + size, alignment, 0, + 0, U64_MAX, + sflags, aflags); +} + +/** + * drm_mm_insert_node - search for space and insert @node + * @mm: drm_mm to allocate from + * @node: preallocate node to insert + * @size: size of the allocation + * @alignment: alignment of the allocation + * @flags: flags to fine-tune the allocation + * + * This is a simplified version of drm_mm_insert_node_generic() with @color set + * to 0. + * + * The preallocated node must be cleared to 0. + * + * Returns: + * 0 on success, -ENOSPC if there's no suitable hole. + */ +static inline int drm_mm_insert_node(struct drm_mm *mm, + struct drm_mm_node *node, + u64 size, + u64 alignment, + enum drm_mm_search_flags flags) +{ + return drm_mm_insert_node_generic(mm, node, + size, alignment, 0, + flags, DRM_MM_CREATE_DEFAULT); +} + void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); -void drm_mm_init(struct drm_mm *mm, - u64 start, - u64 size); +void drm_mm_init(struct drm_mm *mm, u64 start, u64 size); void drm_mm_takedown(struct drm_mm *mm); -bool drm_mm_clean(struct drm_mm *mm); + +/** + * drm_mm_clean - checks whether an allocator is clean + * @mm: drm_mm allocator to check + * + * Returns: + * True if the allocator is completely free, false if there's still a node + * allocated in it. + */ +static inline bool drm_mm_clean(const struct drm_mm *mm) +{ + return list_empty(drm_mm_nodes(mm)); +} struct drm_mm_node * -__drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); +__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); /** * drm_mm_for_each_node_in_range - iterator to walk over a range of @@ -329,22 +421,50 @@ __drm_mm_interval_first(struct drm_mm *mm, u64 start, u64 last); node__ && node__->start < (end__); \ node__ = list_next_entry(node__, node_list)) -void drm_mm_init_scan(struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color); -void drm_mm_init_scan_with_range(struct drm_mm *mm, - u64 size, - unsigned alignment, - unsigned long color, - u64 start, - u64 end); -bool drm_mm_scan_add_block(struct drm_mm_node *node); -bool drm_mm_scan_remove_block(struct drm_mm_node *node); - -void drm_mm_debug_table(struct drm_mm *mm, const char *prefix); +void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, u64 alignment, unsigned long color, + u64 start, u64 end, + unsigned int flags); + +/** + * drm_mm_scan_init - initialize lru scanning + * @scan: scan state + * @mm: drm_mm to scan + * @size: size of the allocation + * @alignment: alignment of the allocation + * @color: opaque tag value to use for the allocation + * @flags: flags to specify how the allocation will be performed afterwards + * + * This simply sets up the scanning routines with the parameters for the desired + * hole. + * + * Warning: + * As long as the scan list is non-empty, no other operations than + * adding/removing nodes to/from the scan list are allowed. + */ +static inline void drm_mm_scan_init(struct drm_mm_scan *scan, + struct drm_mm *mm, + u64 size, + u64 alignment, + unsigned long color, + unsigned int flags) +{ + drm_mm_scan_init_with_range(scan, mm, + size, alignment, color, + 0, U64_MAX, + flags); +} + +bool drm_mm_scan_add_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, + struct drm_mm_node *node); +struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan); + +void drm_mm_debug_table(const struct drm_mm *mm, const char *prefix); #ifdef CONFIG_DEBUG_FS -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm); +int drm_mm_dump_table(struct seq_file *m, const struct drm_mm *mm); #endif #endif diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index bf9991b20611..5b735549bd51 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -365,7 +365,13 @@ struct drm_mode_config { struct list_head fb_list; /** - * @num_connector: Number of connectors on this device. + * @connector_list_lock: Protects @num_connector and + * @connector_list. + */ + spinlock_t connector_list_lock; + /** + * @num_connector: Number of connectors on this device. Protected by + * @connector_list_lock. */ int num_connector; /** @@ -373,7 +379,9 @@ struct drm_mode_config { */ struct ida connector_ida; /** - * @connector_list: List of connector objects. + * @connector_list: List of connector objects. Protected by + * @connector_list_lock. Only use drm_for_each_connector_iter() and + * struct &drm_connector_list_iter to walk this list. */ struct list_head connector_list; int num_encoder; diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h index b8051d5abe10..cb0ec92e11e6 100644 --- a/include/drm/drm_modeset_helper.h +++ b/include/drm/drm_modeset_helper.h @@ -27,7 +27,8 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *); -void drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, +void drm_helper_mode_fill_fb_struct(struct drm_device *dev, + struct drm_framebuffer *fb, const struct drm_mode_fb_cmd2 *mode_cmd); int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h index 69c3974bf133..625c7475c5df 100644 --- a/include/drm/drm_modeset_helper_vtables.h +++ b/include/drm/drm_modeset_helper_vtables.h @@ -30,6 +30,7 @@ #define __DRM_MODESET_HELPER_VTABLES_H__ #include <drm/drm_crtc.h> +#include <drm/drm_encoder.h> /** * DOC: overview diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h index 01a8436ccb0a..2bbc610ec3a2 100644 --- a/include/drm/drm_simple_kms_helper.h +++ b/include/drm/drm_simple_kms_helper.h @@ -114,8 +114,6 @@ struct drm_simple_display_pipe { int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe, struct drm_bridge *bridge); -void drm_simple_display_pipe_detach_bridge(struct drm_simple_display_pipe *pipe); - int drm_simple_display_pipe_init(struct drm_device *dev, struct drm_simple_display_pipe *pipe, const struct drm_simple_display_pipe_funcs *funcs, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 8daeb3ce0016..57828154e440 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -39,23 +39,6 @@ struct dma_buf_attachment; /** * struct dma_buf_ops - operations possible on struct dma_buf - * @attach: [optional] allows different devices to 'attach' themselves to the - * given buffer. It might return -EBUSY to signal that backing storage - * is already allocated and incompatible with the requirements - * of requesting device. - * @detach: [optional] detach a given device from this buffer. - * @map_dma_buf: returns list of scatter pages allocated, increases usecount - * of the buffer. Requires atleast one attach to be called - * before. Returned sg list should already be mapped into - * _device_ address space. This call may sleep. May also return - * -EINTR. Should return -EINVAL if attach hasn't been called yet. - * @unmap_dma_buf: decreases usecount of buffer, might deallocate scatter - * pages. - * @release: release this buffer; to be called after the last dma_buf_put. - * @begin_cpu_access: [optional] called before cpu access to invalidate cpu - * caches and allocate backing storage (if not yet done) - * respectively pin the object into memory. - * @end_cpu_access: [optional] called after cpu access to flush caches. * @kmap_atomic: maps a page from the buffer into kernel address * space, users may not block until the subsequent unmap call. * This callback must not sleep. @@ -63,43 +46,206 @@ struct dma_buf_attachment; * This Callback must not sleep. * @kmap: maps a page from the buffer into kernel address space. * @kunmap: [optional] unmaps a page from the buffer. - * @mmap: used to expose the backing storage to userspace. Note that the - * mapping needs to be coherent - if the exporter doesn't directly - * support this, it needs to fake coherency by shooting down any ptes - * when transitioning away from the cpu domain. * @vmap: [optional] creates a virtual mapping for the buffer into kernel * address space. Same restrictions as for vmap and friends apply. * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { + /** + * @attach: + * + * This is called from dma_buf_attach() to make sure that a given + * &device can access the provided &dma_buf. Exporters which support + * buffer objects in special locations like VRAM or device-specific + * carveout areas should check whether the buffer could be move to + * system memory (or directly accessed by the provided device), and + * otherwise need to fail the attach operation. + * + * The exporter should also in general check whether the current + * allocation fullfills the DMA constraints of the new device. If this + * is not the case, and the allocation cannot be moved, it should also + * fail the attach operation. + * + * Any exporter-private housekeeping data can be stored in the priv + * pointer of &dma_buf_attachment structure. + * + * This callback is optional. + * + * Returns: + * + * 0 on success, negative error code on failure. It might return -EBUSY + * to signal that backing storage is already allocated and incompatible + * with the requirements of requesting device. + */ int (*attach)(struct dma_buf *, struct device *, - struct dma_buf_attachment *); + struct dma_buf_attachment *); + /** + * @detach: + * + * This is called by dma_buf_detach() to release a &dma_buf_attachment. + * Provided so that exporters can clean up any housekeeping for an + * &dma_buf_attachment. + * + * This callback is optional. + */ void (*detach)(struct dma_buf *, struct dma_buf_attachment *); - /* For {map,unmap}_dma_buf below, any specific buffer attributes - * required should get added to device_dma_parameters accessible - * via dev->dma_params. + /** + * @map_dma_buf: + * + * This is called by dma_buf_map_attachment() and is used to map a + * shared &dma_buf into device address space, and it is mandatory. It + * can only be called if @attach has been called successfully. This + * essentially pins the DMA buffer into place, and it cannot be moved + * any more + * + * This call may sleep, e.g. when the backing storage first needs to be + * allocated, or moved to a location suitable for all currently attached + * devices. + * + * Note that any specific buffer attributes required for this function + * should get added to device_dma_parameters accessible via + * device->dma_params from the &dma_buf_attachment. The @attach callback + * should also check these constraints. + * + * If this is being called for the first time, the exporter can now + * choose to scan through the list of attachments for this buffer, + * collate the requirements of the attached devices, and choose an + * appropriate backing storage for the buffer. + * + * Based on enum dma_data_direction, it might be possible to have + * multiple users accessing at the same time (for reading, maybe), or + * any other kind of sharing that the exporter might wish to make + * available to buffer-users. + * + * Returns: + * + * A &sg_table scatter list of or the backing storage of the DMA buffer, + * already mapped into the device address space of the &device attached + * with the provided &dma_buf_attachment. + * + * On failure, returns a negative error value wrapped into a pointer. + * May also return -EINTR when a signal was received while being + * blocked. */ struct sg_table * (*map_dma_buf)(struct dma_buf_attachment *, - enum dma_data_direction); + enum dma_data_direction); + /** + * @unmap_dma_buf: + * + * This is called by dma_buf_unmap_attachment() and should unmap and + * release the &sg_table allocated in @map_dma_buf, and it is mandatory. + * It should also unpin the backing storage if this is the last mapping + * of the DMA buffer, it the exporter supports backing storage + * migration. + */ void (*unmap_dma_buf)(struct dma_buf_attachment *, - struct sg_table *, - enum dma_data_direction); + struct sg_table *, + enum dma_data_direction); + /* TODO: Add try_map_dma_buf version, to return immed with -EBUSY * if the call would block. */ - /* after final dma_buf_put() */ + /** + * @release: + * + * Called after the last dma_buf_put to release the &dma_buf, and + * mandatory. + */ void (*release)(struct dma_buf *); + /** + * @begin_cpu_access: + * + * This is called from dma_buf_begin_cpu_access() and allows the + * exporter to ensure that the memory is actually available for cpu + * access - the exporter might need to allocate or swap-in and pin the + * backing storage. The exporter also needs to ensure that cpu access is + * coherent for the access direction. The direction can be used by the + * exporter to optimize the cache flushing, i.e. access with a different + * direction (read instead of write) might return stale or even bogus + * data (e.g. when the exporter needs to copy the data to temporary + * storage). + * + * This callback is optional. + * + * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command + * from userspace (where storage shouldn't be pinned to avoid handing + * de-factor mlock rights to userspace) and for the kernel-internal + * users of the various kmap interfaces, where the backing storage must + * be pinned to guarantee that the atomic kmap calls can succeed. Since + * there's no in-kernel users of the kmap interfaces yet this isn't a + * real problem. + * + * Returns: + * + * 0 on success or a negative error code on failure. This can for + * example fail when the backing storage can't be allocated. Can also + * return -ERESTARTSYS or -EINTR when the call has been interrupted and + * needs to be restarted. + */ int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction); + + /** + * @end_cpu_access: + * + * This is called from dma_buf_end_cpu_access() when the importer is + * done accessing the CPU. The exporter can use this to flush caches and + * unpin any resources pinned in @begin_cpu_access. + * The result of any dma_buf kmap calls after end_cpu_access is + * undefined. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. Can return + * -ERESTARTSYS or -EINTR when the call has been interrupted and needs + * to be restarted. + */ int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction); void *(*kmap_atomic)(struct dma_buf *, unsigned long); void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); void *(*kmap)(struct dma_buf *, unsigned long); void (*kunmap)(struct dma_buf *, unsigned long, void *); + /** + * @mmap: + * + * This callback is used by the dma_buf_mmap() function + * + * Note that the mapping needs to be incoherent, userspace is expected + * to braket CPU access using the DMA_BUF_IOCTL_SYNC interface. + * + * Because dma-buf buffers have invariant size over their lifetime, the + * dma-buf core checks whether a vma is too large and rejects such + * mappings. The exporter hence does not need to duplicate this check. + * Drivers do not need to check this themselves. + * + * If an exporter needs to manually flush caches and hence needs to fake + * coherency for mmap support, it needs to be able to zap all the ptes + * pointing at the backing storage. Now linux mm needs a struct + * address_space associated with the struct file stored in vma->vm_file + * to do that with the function unmap_mapping_range. But the dma_buf + * framework only backs every dma_buf fd with the anon_file struct file, + * i.e. all dma_bufs share the same file. + * + * Hence exporters need to setup their own file (and address_space) + * association by setting vma->vm_file and adjusting vma->vm_pgoff in + * the dma_buf mmap callback. In the specific case of a gem driver the + * exporter could use the shmem file already provided by gem (and set + * vm_pgoff = 0). Exporters can then zap ptes by unmapping the + * corresponding range of the struct address_space associated with their + * own file. + * + * This callback is optional. + * + * Returns: + * + * 0 on success or a negative error code on failure. + */ int (*mmap)(struct dma_buf *, struct vm_area_struct *vma); void *(*vmap)(struct dma_buf *); @@ -124,6 +270,15 @@ struct dma_buf_ops { * @poll: for userspace poll support * @cb_excl: for userspace poll support * @cb_shared: for userspace poll support + * + * This represents a shared buffer, created by calling dma_buf_export(). The + * userspace representation is a normal file descriptor, which can be created by + * calling dma_buf_fd(). + * + * Shared dma buffers are reference counted using dma_buf_put() and + * get_dma_buf(). + * + * Device DMA access is handled by the separate struct &dma_buf_attachment. */ struct dma_buf { size_t size; @@ -160,6 +315,11 @@ struct dma_buf { * This structure holds the attachment information between the dma_buf buffer * and its user device(s). The list contains one attachment struct per device * attached to the buffer. + * + * An attachment is created by calling dma_buf_attach(), and released again by + * calling dma_buf_detach(). The DMA mapping itself needed to initiate a + * transfer is created by dma_buf_map_attachment() and freed again by calling + * dma_buf_unmap_attachment(). */ struct dma_buf_attachment { struct dma_buf *dmabuf; @@ -192,9 +352,11 @@ struct dma_buf_export_info { }; /** - * helper macro for exporters; zeros and fills in most common values - * + * DEFINE_DMA_BUF_EXPORT_INFO - helper macro for exporters * @name: export-info name + * + * DEFINE_DMA_BUF_EXPORT_INFO macro defines the struct &dma_buf_export_info, + * zeroes it out and pre-populates exp_name in it. */ #define DEFINE_DMA_BUF_EXPORT_INFO(name) \ struct dma_buf_export_info name = { .exp_name = KBUILD_MODNAME, \ diff --git a/include/linux/kref.h b/include/linux/kref.h index e15828fd71f1..62f0a84ae94e 100644 --- a/include/linux/kref.h +++ b/include/linux/kref.h @@ -133,6 +133,6 @@ static inline int kref_put_mutex(struct kref *kref, */ static inline int __must_check kref_get_unless_zero(struct kref *kref) { - return atomic_add_unless(&kref->refcount, 1, 0); + return atomic_inc_not_zero(&kref->refcount); } #endif /* _KREF_H_ */ diff --git a/include/linux/prime_numbers.h b/include/linux/prime_numbers.h new file mode 100644 index 000000000000..14ec4f567342 --- /dev/null +++ b/include/linux/prime_numbers.h @@ -0,0 +1,37 @@ +#ifndef __LINUX_PRIME_NUMBERS_H +#define __LINUX_PRIME_NUMBERS_H + +#include <linux/types.h> + +bool is_prime_number(unsigned long x); +unsigned long next_prime_number(unsigned long x); + +/** + * for_each_prime_number - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @max: the upper limit + * + * Starting from the first prime number 2 iterate over each prime number up to + * the @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX to ensure termination. To begin with + * @prime set to 1 on the first iteration use for_each_prime_number_from() + * instead. + */ +#define for_each_prime_number(prime, max) \ + for_each_prime_number_from((prime), 2, (max)) + +/** + * for_each_prime_number_from - iterate over each prime upto a value + * @prime: the current prime number in this iteration + * @from: the initial value + * @max: the upper limit + * + * Starting from @from iterate over each successive prime number up to the + * @max value. On each iteration, @prime is set to the current prime number. + * @max should be less than ULONG_MAX, and @from less than @max, to ensure + * termination. + */ +#define for_each_prime_number_from(prime, from, max) \ + for (prime = (from); prime <= (max); prime = next_prime_number(prime)) + +#endif /* !__LINUX_PRIME_NUMBERS_H */ diff --git a/include/linux/reservation.h b/include/linux/reservation.h index d9706a6f5ae2..2b5a4679daea 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -145,6 +145,40 @@ reservation_object_get_list(struct reservation_object *obj) } /** + * reservation_object_lock - lock the reservation object + * @obj: the reservation object + * @ctx: the locking context + * + * Locks the reservation object for exclusive access and modification. Note, + * that the lock is only against other writers, readers will run concurrently + * with a writer under RCU. The seqlock is used to notify readers if they + * overlap with a writer. + * + * As the reservation object may be locked by multiple parties in an + * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle + * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation + * object may be locked by itself by passing NULL as @ctx. + */ +static inline int +reservation_object_lock(struct reservation_object *obj, + struct ww_acquire_ctx *ctx) +{ + return ww_mutex_lock(&obj->lock, ctx); +} + +/** + * reservation_object_unlock - unlock the reservation object + * @obj: the reservation object + * + * Unlocks the reservation object following exclusive access. + */ +static inline void +reservation_object_unlock(struct reservation_object *obj) +{ + ww_mutex_unlock(&obj->lock); +} + +/** * reservation_object_get_excl - get the reservation object's * exclusive fence, with update-side lock held * @obj: the reservation object diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index a5890bf44c0a..9e1bb7fabcde 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -154,6 +154,7 @@ extern "C" { /* Vendor Ids: */ #define DRM_FORMAT_MOD_NONE 0 +#define DRM_FORMAT_MOD_VENDOR_NONE 0 #define DRM_FORMAT_MOD_VENDOR_INTEL 0x01 #define DRM_FORMAT_MOD_VENDOR_AMD 0x02 #define DRM_FORMAT_MOD_VENDOR_NV 0x03 @@ -172,6 +173,16 @@ extern "C" { * authoritative source for all of these. */ +/* + * Linear Layout + * + * Just plain linear layout. Note that this is different from no specifying any + * modifier (e.g. not setting DRM_MODE_FB_MODIFIERS in the DRM_ADDFB2 ioctl), + * which tells the driver to also take driver-internal information into account + * and so might actually result in a tiled framebuffer. + */ +#define DRM_FORMAT_MOD_LINEAR fourcc_mod_code(NONE, 0) + /* Intel framebuffer modifiers */ /* diff --git a/lib/Kconfig b/lib/Kconfig index 260a80e313b9..1788a1f50d28 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -550,4 +550,11 @@ config STACKDEPOT config SBITMAP bool +config PRIME_NUMBERS + tristate "Prime number generator" + default n + help + Provides a helper module to generate prime numbers. Useful for writing + test code, especially when checking multiplication and divison. + endmenu diff --git a/lib/Makefile b/lib/Makefile index bc4073a8cd08..bdfc284c1d5a 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -196,6 +196,8 @@ obj-$(CONFIG_ASN1) += asn1_decoder.o obj-$(CONFIG_FONT_SUPPORT) += fonts/ +obj-$(CONFIG_PRIME_NUMBERS) += prime_numbers.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/prime_numbers.c b/lib/prime_numbers.c new file mode 100644 index 000000000000..c9b3c29614aa --- /dev/null +++ b/lib/prime_numbers.c @@ -0,0 +1,314 @@ +#define pr_fmt(fmt) "prime numbers: " fmt "\n" + +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/prime_numbers.h> +#include <linux/slab.h> + +#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long)) + +struct primes { + struct rcu_head rcu; + unsigned long last, sz; + unsigned long primes[]; +}; + +#if BITS_PER_LONG == 64 +static const struct primes small_primes = { + .last = 61, + .sz = 64, + .primes = { + BIT(2) | + BIT(3) | + BIT(5) | + BIT(7) | + BIT(11) | + BIT(13) | + BIT(17) | + BIT(19) | + BIT(23) | + BIT(29) | + BIT(31) | + BIT(37) | + BIT(41) | + BIT(43) | + BIT(47) | + BIT(53) | + BIT(59) | + BIT(61) + } +}; +#elif BITS_PER_LONG == 32 +static const struct primes small_primes = { + .last = 31, + .sz = 32, + .primes = { + BIT(2) | + BIT(3) | + BIT(5) | + BIT(7) | + BIT(11) | + BIT(13) | + BIT(17) | + BIT(19) | + BIT(23) | + BIT(29) | + BIT(31) + } +}; +#else +#error "unhandled BITS_PER_LONG" +#endif + +static DEFINE_MUTEX(lock); +static const struct primes __rcu *primes = RCU_INITIALIZER(&small_primes); + +static unsigned long selftest_max; + +static bool slow_is_prime_number(unsigned long x) +{ + unsigned long y = int_sqrt(x); + + while (y > 1) { + if ((x % y) == 0) + break; + y--; + } + + return y == 1; +} + +static unsigned long slow_next_prime_number(unsigned long x) +{ + while (x < ULONG_MAX && !slow_is_prime_number(++x)) + ; + + return x; +} + +static unsigned long clear_multiples(unsigned long x, + unsigned long *p, + unsigned long start, + unsigned long end) +{ + unsigned long m; + + m = 2 * x; + if (m < start) + m = roundup(start, x); + + while (m < end) { + __clear_bit(m, p); + m += x; + } + + return x; +} + +static bool expand_to_next_prime(unsigned long x) +{ + const struct primes *p; + struct primes *new; + unsigned long sz, y; + + /* Betrand's Postulate (or Chebyshev's theorem) states that if n > 3, + * there is always at least one prime p between n and 2n - 2. + * Equivalently, if n > 1, then there is always at least one prime p + * such that n < p < 2n. + * + * http://mathworld.wolfram.com/BertrandsPostulate.html + * https://en.wikipedia.org/wiki/Bertrand's_postulate + */ + sz = 2 * x; + if (sz < x) + return false; + + sz = round_up(sz, BITS_PER_LONG); + new = kmalloc(sizeof(*new) + bitmap_size(sz), GFP_KERNEL); + if (!new) + return false; + + mutex_lock(&lock); + p = rcu_dereference_protected(primes, lockdep_is_held(&lock)); + if (x < p->last) { + kfree(new); + goto unlock; + } + + /* Where memory permits, track the primes using the + * Sieve of Eratosthenes. The sieve is to remove all multiples of known + * primes from the set, what remains in the set is therefore prime. + */ + bitmap_fill(new->primes, sz); + bitmap_copy(new->primes, p->primes, p->sz); + for (y = 2UL; y < sz; y = find_next_bit(new->primes, sz, y + 1)) + new->last = clear_multiples(y, new->primes, p->sz, sz); + new->sz = sz; + + BUG_ON(new->last <= x); + + rcu_assign_pointer(primes, new); + if (p != &small_primes) + kfree_rcu((struct primes *)p, rcu); + +unlock: + mutex_unlock(&lock); + return true; +} + +static void free_primes(void) +{ + const struct primes *p; + + mutex_lock(&lock); + p = rcu_dereference_protected(primes, lockdep_is_held(&lock)); + if (p != &small_primes) { + rcu_assign_pointer(primes, &small_primes); + kfree_rcu((struct primes *)p, rcu); + } + mutex_unlock(&lock); +} + +/** + * next_prime_number - return the next prime number + * @x: the starting point for searching to test + * + * A prime number is an integer greater than 1 that is only divisible by + * itself and 1. The set of prime numbers is computed using the Sieve of + * Eratoshenes (on finding a prime, all multiples of that prime are removed + * from the set) enabling a fast lookup of the next prime number larger than + * @x. If the sieve fails (memory limitation), the search falls back to using + * slow trial-divison, up to the value of ULONG_MAX (which is reported as the + * final prime as a sentinel). + * + * Returns: the next prime number larger than @x + */ +unsigned long next_prime_number(unsigned long x) +{ + const struct primes *p; + + rcu_read_lock(); + p = rcu_dereference(primes); + while (x >= p->last) { + rcu_read_unlock(); + + if (!expand_to_next_prime(x)) + return slow_next_prime_number(x); + + rcu_read_lock(); + p = rcu_dereference(primes); + } + x = find_next_bit(p->primes, p->last, x + 1); + rcu_read_unlock(); + + return x; +} +EXPORT_SYMBOL(next_prime_number); + +/** + * is_prime_number - test whether the given number is prime + * @x: the number to test + * + * A prime number is an integer greater than 1 that is only divisible by + * itself and 1. Internally a cache of prime numbers is kept (to speed up + * searching for sequential primes, see next_prime_number()), but if the number + * falls outside of that cache, its primality is tested using trial-divison. + * + * Returns: true if @x is prime, false for composite numbers. + */ +bool is_prime_number(unsigned long x) +{ + const struct primes *p; + bool result; + + rcu_read_lock(); + p = rcu_dereference(primes); + while (x >= p->sz) { + rcu_read_unlock(); + + if (!expand_to_next_prime(x)) + return slow_is_prime_number(x); + + rcu_read_lock(); + p = rcu_dereference(primes); + } + result = test_bit(x, p->primes); + rcu_read_unlock(); + + return result; +} +EXPORT_SYMBOL(is_prime_number); + +static void dump_primes(void) +{ + const struct primes *p; + char *buf; + + buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + + rcu_read_lock(); + p = rcu_dereference(primes); + + if (buf) + bitmap_print_to_pagebuf(true, buf, p->primes, p->sz); + pr_info("primes.{last=%lu, .sz=%lu, .primes[]=...x%lx} = %s", + p->last, p->sz, p->primes[BITS_TO_LONGS(p->sz) - 1], buf); + + rcu_read_unlock(); + + kfree(buf); +} + +static int selftest(unsigned long max) +{ + unsigned long x, last; + + if (!max) + return 0; + + for (last = 0, x = 2; x < max; x++) { + bool slow = slow_is_prime_number(x); + bool fast = is_prime_number(x); + + if (slow != fast) { + pr_err("inconsistent result for is-prime(%lu): slow=%s, fast=%s!", + x, slow ? "yes" : "no", fast ? "yes" : "no"); + goto err; + } + + if (!slow) + continue; + + if (next_prime_number(last) != x) { + pr_err("incorrect result for next-prime(%lu): expected %lu, got %lu", + last, x, next_prime_number(last)); + goto err; + } + last = x; + } + + pr_info("selftest(%lu) passed, last prime was %lu", x, last); + return 0; + +err: + dump_primes(); + return -EINVAL; +} + +static int __init primes_init(void) +{ + return selftest(selftest_max); +} + +static void __exit primes_exit(void) +{ + free_primes(); +} + +module_init(primes_init); +module_exit(primes_exit); + +module_param_named(selftest, selftest_max, ulong, 0400); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); diff --git a/tools/testing/selftests/drivers/gpu/drm_mm.sh b/tools/testing/selftests/drivers/gpu/drm_mm.sh new file mode 100755 index 000000000000..96dd55c92799 --- /dev/null +++ b/tools/testing/selftests/drivers/gpu/drm_mm.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# Runs API tests for struct drm_mm (DRM range manager) + +if ! /sbin/modprobe -n -q test-drm_mm; then + echo "drivers/gpu/drm_mm: [skip]" + exit 77 +fi + +if /sbin/modprobe -q test-drm_mm; then + /sbin/modprobe -q -r test-drm_mm + echo "drivers/gpu/drm_mm: ok" +else + echo "drivers/gpu/drm_mm: [FAIL]" + exit 1 +fi diff --git a/tools/testing/selftests/lib/prime_numbers.sh b/tools/testing/selftests/lib/prime_numbers.sh new file mode 100755 index 000000000000..da4cbcd766f5 --- /dev/null +++ b/tools/testing/selftests/lib/prime_numbers.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# Checks fast/slow prime_number generation for inconsistencies + +if ! /sbin/modprobe -q -r prime_numbers; then + echo "prime_numbers: [SKIP]" + exit 77 +fi + +if /sbin/modprobe -q prime_numbers selftest=65536; then + /sbin/modprobe -q -r prime_numbers + echo "prime_numbers: ok" +else + echo "prime_numbers: [FAIL]" + exit 1 +fi |