From 2b2d7ca7ce25fbec8389e7d85e57742caa47c97d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 10 Dec 2024 10:08:42 +0100 Subject: [PATCH 0001/1627] dma-buf: fix incorrect dma-fence documentation v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There isn't much worse than documentation giving an incorrect advise. Grabbing a spinlock while interrupts are disabled usually means that you must also disable interrupts for all other uses of this spinlock. Otherwise really hard to debug issues can occur. So fix that invalid documentation. v2: use Dmitry's suggestion on the documentation Signed-off-by: Christian König Reviewed-by: Simona Vetter (v1) Link: https://patchwork.freedesktop.org/patch/msgid/20250211163109.12200-2-christian.koenig@amd.com --- include/linux/dma-fence.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index e7ad819962e3..52587d390aca 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -169,8 +169,8 @@ struct dma_fence_ops { * implementation know that there is another driver waiting on the * signal (ie. hw->sw case). * - * This function can be called from atomic context, but not - * from irq context, so normal spinlocks can be used. + * This is called with irq's disabled, so only spinlocks which disable + * IRQ's can be used in the code outside of this callback. * * A return value of false indicates the fence already passed, * or some failure occurred that made it impossible to enable From 2ce07fea3cc8b866f7955a7ce1d62b0cc1f74819 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 18 Sep 2024 08:16:57 +0200 Subject: [PATCH 0002/1627] dma-buf/dma-fence: remove unnecessary callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The fence_value_str and timeline_value_str callbacks were just an unnecessary abstraction in the SW sync implementation. The only caller of those callbacks already knew that the fence in questions is a timeline_fence. So print the values directly instead of using a redirection. Additional to that remove the implementations from virtgpu and vgem. As far as I can see those were never used in the first place. Signed-off-by: Christian König Reviewed-by: Simona Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20250211163109.12200-3-christian.koenig@amd.com --- drivers/dma-buf/sw_sync.c | 16 ---------------- drivers/dma-buf/sync_debug.c | 21 ++------------------- drivers/gpu/drm/vgem/vgem_fence.c | 15 --------------- drivers/gpu/drm/virtio/virtgpu_fence.c | 16 ---------------- include/linux/dma-fence.h | 21 --------------------- 5 files changed, 2 insertions(+), 87 deletions(-) diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index f5905d67dedb..849280ae79a9 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -173,20 +173,6 @@ static bool timeline_fence_signaled(struct dma_fence *fence) return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops); } -static void timeline_fence_value_str(struct dma_fence *fence, - char *str, int size) -{ - snprintf(str, size, "%lld", fence->seqno); -} - -static void timeline_fence_timeline_value_str(struct dma_fence *fence, - char *str, int size) -{ - struct sync_timeline *parent = dma_fence_parent(fence); - - snprintf(str, size, "%d", parent->value); -} - static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) { struct sync_pt *pt = dma_fence_to_sync_pt(fence); @@ -208,8 +194,6 @@ static const struct dma_fence_ops timeline_fence_ops = { .get_timeline_name = timeline_fence_get_timeline_name, .signaled = timeline_fence_signaled, .release = timeline_fence_release, - .fence_value_str = timeline_fence_value_str, - .timeline_value_str = timeline_fence_timeline_value_str, .set_deadline = timeline_fence_set_deadline, }; diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 237bce21d1e7..270daae7d89a 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -82,25 +82,8 @@ static void sync_print_fence(struct seq_file *s, seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec); } - if (fence->ops->timeline_value_str && - fence->ops->fence_value_str) { - char value[64]; - bool success; - - fence->ops->fence_value_str(fence, value, sizeof(value)); - success = strlen(value); - - if (success) { - seq_printf(s, ": %s", value); - - fence->ops->timeline_value_str(fence, value, - sizeof(value)); - - if (strlen(value)) - seq_printf(s, " / %s", value); - } - } - + seq_printf(s, ": %lld", fence->seqno); + seq_printf(s, " / %d", parent->value); seq_putc(s, '\n'); } diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c index e15754178395..5298d995faa7 100644 --- a/drivers/gpu/drm/vgem/vgem_fence.c +++ b/drivers/gpu/drm/vgem/vgem_fence.c @@ -53,25 +53,10 @@ static void vgem_fence_release(struct dma_fence *base) dma_fence_free(&fence->base); } -static void vgem_fence_value_str(struct dma_fence *fence, char *str, int size) -{ - snprintf(str, size, "%llu", fence->seqno); -} - -static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str, - int size) -{ - snprintf(str, size, "%llu", - dma_fence_is_signaled(fence) ? fence->seqno : 0); -} - static const struct dma_fence_ops vgem_fence_ops = { .get_driver_name = vgem_fence_get_driver_name, .get_timeline_name = vgem_fence_get_timeline_name, .release = vgem_fence_release, - - .fence_value_str = vgem_fence_value_str, - .timeline_value_str = vgem_fence_timeline_value_str, }; static void vgem_fence_timeout(struct timer_list *t) diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index f28357dbde35..44c1d8ef3c4d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -49,26 +49,10 @@ static bool virtio_gpu_fence_signaled(struct dma_fence *f) return false; } -static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size) -{ - snprintf(str, size, "[%llu, %llu]", f->context, f->seqno); -} - -static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str, - int size) -{ - struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f); - - snprintf(str, size, "%llu", - (u64)atomic64_read(&fence->drv->last_fence_id)); -} - static const struct dma_fence_ops virtio_gpu_fence_ops = { .get_driver_name = virtio_gpu_get_driver_name, .get_timeline_name = virtio_gpu_get_timeline_name, .signaled = virtio_gpu_fence_signaled, - .fence_value_str = virtio_gpu_fence_value_str, - .timeline_value_str = virtio_gpu_timeline_value_str, }; struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 52587d390aca..b12776883d14 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h @@ -238,27 +238,6 @@ struct dma_fence_ops { */ void (*release)(struct dma_fence *fence); - /** - * @fence_value_str: - * - * Callback to fill in free-form debug info specific to this fence, like - * the sequence number. - * - * This callback is optional. - */ - void (*fence_value_str)(struct dma_fence *fence, char *str, int size); - - /** - * @timeline_value_str: - * - * Fills in the current value of the timeline as a string, like the - * sequence number. Note that the specific fence passed to this function - * should not matter, drivers should only use it to look up the - * corresponding timeline structures. - */ - void (*timeline_value_str)(struct dma_fence *fence, - char *str, int size); - /** * @set_deadline: * From de68b17d5d0716c9a02b8a6ffa34f47c8f2f7690 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Feb 2025 15:26:16 +0100 Subject: [PATCH 0003/1627] dma-buf: dma-buf: stop mapping sg_tables on attach v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As a workaround to smoothly transit from static to dynamic DMA-buf handling we cached the sg_table on attach if dynamic handling mismatched between exporter and importer. Since Dmitry and Thomas cleaned that up and also documented the lock handling we can drop this workaround now. V2: implement Sima's comments Signed-off-by: Christian König Reviewed-by: Simona Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20250211163109.12200-4-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 151 +++++++++++++++----------------------- include/linux/dma-buf.h | 14 ---- 2 files changed, 59 insertions(+), 106 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 5baa83b85515..1f7349b08df8 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -782,7 +782,7 @@ static void mangle_sg_table(struct sg_table *sg_table) /* To catch abuse of the underlying struct page by importers mix * up the bits, but take care to preserve the low SG_ bits to - * not corrupt the sgt. The mixing is undone in __unmap_dma_buf + * not corrupt the sgt. The mixing is undone on unmap * before passing the sgt back to the exporter. */ for_each_sgtable_sg(sg_table, sg, i) @@ -790,29 +790,19 @@ static void mangle_sg_table(struct sg_table *sg_table) #endif } -static struct sg_table *__map_dma_buf(struct dma_buf_attachment *attach, - enum dma_data_direction direction) + +static inline bool +dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) { - struct sg_table *sg_table; - signed long ret; + return !!attach->importer_ops; +} - sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); - if (IS_ERR_OR_NULL(sg_table)) - return sg_table; - - if (!dma_buf_attachment_is_dynamic(attach)) { - ret = dma_resv_wait_timeout(attach->dmabuf->resv, - DMA_RESV_USAGE_KERNEL, true, - MAX_SCHEDULE_TIMEOUT); - if (ret < 0) { - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, - direction); - return ERR_PTR(ret); - } - } - - mangle_sg_table(sg_table); - return sg_table; +static bool +dma_buf_pin_on_map(struct dma_buf_attachment *attach) +{ + return attach->dmabuf->ops->pin && + (!dma_buf_attachment_is_dynamic(attach) || + !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)); } /** @@ -935,48 +925,11 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, list_add(&attach->node, &dmabuf->attachments); dma_resv_unlock(dmabuf->resv); - /* When either the importer or the exporter can't handle dynamic - * mappings we cache the mapping here to avoid issues with the - * reservation object lock. - */ - if (dma_buf_attachment_is_dynamic(attach) != - dma_buf_is_dynamic(dmabuf)) { - struct sg_table *sgt; - - dma_resv_lock(attach->dmabuf->resv, NULL); - if (dma_buf_is_dynamic(attach->dmabuf)) { - ret = dmabuf->ops->pin(attach); - if (ret) - goto err_unlock; - } - - sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL); - if (!sgt) - sgt = ERR_PTR(-ENOMEM); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto err_unpin; - } - dma_resv_unlock(attach->dmabuf->resv); - attach->sgt = sgt; - attach->dir = DMA_BIDIRECTIONAL; - } - return attach; err_attach: kfree(attach); return ERR_PTR(ret); - -err_unpin: - if (dma_buf_is_dynamic(attach->dmabuf)) - dmabuf->ops->unpin(attach); - -err_unlock: - dma_resv_unlock(attach->dmabuf->resv); - - dma_buf_detach(dmabuf, attach); - return ERR_PTR(ret); } EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF"); @@ -995,16 +948,6 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, } EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF"); -static void __unmap_dma_buf(struct dma_buf_attachment *attach, - struct sg_table *sg_table, - enum dma_data_direction direction) -{ - /* uses XOR, hence this unmangles */ - mangle_sg_table(sg_table); - - attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); -} - /** * dma_buf_detach - Remove the given attachment from dmabuf's attachments list * @dmabuf: [in] buffer to detach from. @@ -1022,11 +965,12 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) dma_resv_lock(dmabuf->resv, NULL); if (attach->sgt) { + mangle_sg_table(attach->sgt); + attach->dmabuf->ops->unmap_dma_buf(attach, attach->sgt, + attach->dir); - __unmap_dma_buf(attach, attach->sgt, attach->dir); - - if (dma_buf_is_dynamic(attach->dmabuf)) - dmabuf->ops->unpin(attach); + if (dma_buf_pin_on_map(attach)) + dma_buf_unpin(attach); } list_del(&attach->node); @@ -1058,7 +1002,7 @@ int dma_buf_pin(struct dma_buf_attachment *attach) struct dma_buf *dmabuf = attach->dmabuf; int ret = 0; - WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + WARN_ON(!attach->importer_ops); dma_resv_assert_held(dmabuf->resv); @@ -1081,7 +1025,7 @@ void dma_buf_unpin(struct dma_buf_attachment *attach) { struct dma_buf *dmabuf = attach->dmabuf; - WARN_ON(!dma_buf_attachment_is_dynamic(attach)); + WARN_ON(!attach->importer_ops); dma_resv_assert_held(dmabuf->resv); @@ -1115,7 +1059,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) { struct sg_table *sg_table; - int r; + signed long ret; might_sleep(); @@ -1136,29 +1080,42 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, return attach->sgt; } - if (dma_buf_is_dynamic(attach->dmabuf)) { - if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { - r = attach->dmabuf->ops->pin(attach); - if (r) - return ERR_PTR(r); - } + if (dma_buf_pin_on_map(attach)) { + ret = attach->dmabuf->ops->pin(attach); + /* + * Catch exporters making buffers inaccessible even when + * attachments preventing that exist. + */ + WARN_ON_ONCE(ret == EBUSY); + if (ret) + return ERR_PTR(ret); } - sg_table = __map_dma_buf(attach, direction); + sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction); if (!sg_table) sg_table = ERR_PTR(-ENOMEM); + if (IS_ERR(sg_table)) + goto error_unpin; - if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) && - !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) - attach->dmabuf->ops->unpin(attach); + /* + * Importers with static attachments don't wait for fences. + */ + if (!dma_buf_attachment_is_dynamic(attach)) { + ret = dma_resv_wait_timeout(attach->dmabuf->resv, + DMA_RESV_USAGE_KERNEL, true, + MAX_SCHEDULE_TIMEOUT); + if (ret < 0) + goto error_unmap; + } + mangle_sg_table(sg_table); - if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) { + if (attach->dmabuf->ops->cache_sgt_mapping) { attach->sgt = sg_table; attach->dir = direction; } #ifdef CONFIG_DMA_API_DEBUG - if (!IS_ERR(sg_table)) { + { struct scatterlist *sg; u64 addr; int len; @@ -1175,6 +1132,16 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } #endif /* CONFIG_DMA_API_DEBUG */ return sg_table; + +error_unmap: + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); + sg_table = ERR_PTR(ret); + +error_unpin: + if (dma_buf_pin_on_map(attach)) + attach->dmabuf->ops->unpin(attach); + + return sg_table; } EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF"); @@ -1230,11 +1197,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, if (attach->sgt == sg_table) return; - __unmap_dma_buf(attach, sg_table, direction); + mangle_sg_table(sg_table); + attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); - if (dma_buf_is_dynamic(attach->dmabuf) && - !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) - dma_buf_unpin(attach); + if (dma_buf_pin_on_map(attach)) + attach->dmabuf->ops->unpin(attach); } EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF"); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 36216d28d8bd..c54ff2dda8cb 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -583,20 +583,6 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf) return !!dmabuf->ops->pin; } -/** - * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic - * mappings - * @attach: the DMA-buf attachment to check - * - * Returns true if a DMA-buf importer wants to call the map/unmap functions with - * the dma_resv lock held. - */ -static inline bool -dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach) -{ - return !!attach->importer_ops; -} - struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, struct device *dev); struct dma_buf_attachment * From b72f66f22c0e39ae6684c43fead774c13db24e73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Tue, 11 Feb 2025 17:20:53 +0100 Subject: [PATCH 0004/1627] dma-buf: drop caching of sg_tables MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That was purely for the transition from static to dynamic dma-buf handling and can be removed again now. Signed-off-by: Christian König Reviewed-by: Simona Vetter Reviewed-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250211163109.12200-5-christian.koenig@amd.com --- drivers/dma-buf/dma-buf.c | 34 -------------------------- drivers/dma-buf/udmabuf.c | 1 - drivers/gpu/drm/drm_prime.c | 1 - drivers/gpu/drm/virtio/virtgpu_prime.c | 1 - include/linux/dma-buf.h | 13 ---------- 5 files changed, 50 deletions(-) diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 1f7349b08df8..0c48d41dd5eb 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -636,10 +636,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) || !exp_info->ops->release)) return ERR_PTR(-EINVAL); - if (WARN_ON(exp_info->ops->cache_sgt_mapping && - (exp_info->ops->pin || exp_info->ops->unpin))) - return ERR_PTR(-EINVAL); - if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin)) return ERR_PTR(-EINVAL); @@ -963,17 +959,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) return; dma_resv_lock(dmabuf->resv, NULL); - - if (attach->sgt) { - mangle_sg_table(attach->sgt); - attach->dmabuf->ops->unmap_dma_buf(attach, attach->sgt, - attach->dir); - - if (dma_buf_pin_on_map(attach)) - dma_buf_unpin(attach); - } list_del(&attach->node); - dma_resv_unlock(dmabuf->resv); if (dmabuf->ops->detach) @@ -1068,18 +1054,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, dma_resv_assert_held(attach->dmabuf->resv); - if (attach->sgt) { - /* - * Two mappings with different directions for the same - * attachment are not allowed. - */ - if (attach->dir != direction && - attach->dir != DMA_BIDIRECTIONAL) - return ERR_PTR(-EBUSY); - - return attach->sgt; - } - if (dma_buf_pin_on_map(attach)) { ret = attach->dmabuf->ops->pin(attach); /* @@ -1109,11 +1083,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } mangle_sg_table(sg_table); - if (attach->dmabuf->ops->cache_sgt_mapping) { - attach->sgt = sg_table; - attach->dir = direction; - } - #ifdef CONFIG_DMA_API_DEBUG { struct scatterlist *sg; @@ -1194,9 +1163,6 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, dma_resv_assert_held(attach->dmabuf->resv); - if (attach->sgt == sg_table) - return; - mangle_sg_table(sg_table); attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction); diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index cc7398cc17d6..2fa2c9135eac 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -285,7 +285,6 @@ static int end_cpu_udmabuf(struct dma_buf *buf, } static const struct dma_buf_ops udmabuf_ops = { - .cache_sgt_mapping = true, .map_dma_buf = map_udmabuf, .unmap_dma_buf = unmap_udmabuf, .release = release_udmabuf, diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index bdb51c8f262e..a3d64f93a225 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -804,7 +804,6 @@ int drm_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma) EXPORT_SYMBOL(drm_gem_dmabuf_mmap); static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { - .cache_sgt_mapping = true, .attach = drm_gem_map_attach, .detach = drm_gem_map_detach, .map_dma_buf = drm_gem_map_dma_buf, diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index fe6a0b018571..c6f3be3cb914 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -75,7 +75,6 @@ static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { .ops = { - .cache_sgt_mapping = true, .attach = virtio_dma_buf_attach, .detach = drm_gem_map_detach, .map_dma_buf = virtgpu_gem_map_dma_buf, diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index c54ff2dda8cb..544f8f8c3f44 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -34,15 +34,6 @@ struct dma_buf_attachment; * @vunmap: [optional] unmaps a vmap from the buffer */ struct dma_buf_ops { - /** - * @cache_sgt_mapping: - * - * If true the framework will cache the first mapping made for each - * attachment. This avoids creating mappings for attachments multiple - * times. - */ - bool cache_sgt_mapping; - /** * @attach: * @@ -493,8 +484,6 @@ struct dma_buf_attach_ops { * @dmabuf: buffer for this attachment. * @dev: device attached to the buffer. * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf. - * @sgt: cached mapping. - * @dir: direction of cached mapping. * @peer2peer: true if the importer can handle peer resources without pages. * @priv: exporter specific attachment data. * @importer_ops: importer operations for this attachment, if provided @@ -514,8 +503,6 @@ struct dma_buf_attachment { struct dma_buf *dmabuf; struct device *dev; struct list_head node; - struct sg_table *sgt; - enum dma_data_direction dir; bool peer2peer; const struct dma_buf_attach_ops *importer_ops; void *importer_priv; From 87edca6261c1327977d86c16857e74f4fc7c3ae8 Mon Sep 17 00:00:00 2001 From: Philipp Stanner Date: Wed, 5 Mar 2025 14:05:50 +0100 Subject: [PATCH 0005/1627] drm/sched: Adjust outdated docu for run_job() The documentation for drm_sched_backend_ops.run_job() mentions a certain function called drm_sched_job_recovery(). This function does not exist. What's actually meant is drm_sched_resubmit_jobs(), which is by now also deprecated. Furthermore, the scheduler expects to "inherit" a reference on the fence from the run_job() callback. This, so far, is also not documented. Remove the mention of the removed function. Discourage the behavior of drm_sched_backend_ops.run_job() being called multiple times for the same job. Document the necessity of incrementing the refcount in run_job(). Acked-by: Danilo Krummrich Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250305130551.136682-3-phasta@kernel.org --- include/drm/gpu_scheduler.h | 34 ++++++++++++++++++++++++++++++---- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 50928a7ae98e..6381baae8024 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -410,10 +410,36 @@ struct drm_sched_backend_ops { struct drm_sched_entity *s_entity); /** - * @run_job: Called to execute the job once all of the dependencies - * have been resolved. This may be called multiple times, if - * timedout_job() has happened and drm_sched_job_recovery() - * decides to try it again. + * @run_job: Called to execute the job once all of the dependencies + * have been resolved. + * + * @sched_job: the job to run + * + * The deprecated drm_sched_resubmit_jobs() (called by &struct + * drm_sched_backend_ops.timedout_job) can invoke this again with the + * same parameters. Using this is discouraged because it violates + * dma_fence rules, notably dma_fence_init() has to be called on + * already initialized fences for a second time. Moreover, this is + * dangerous because attempts to allocate memory might deadlock with + * memory management code waiting for the reset to complete. + * + * TODO: Document what drivers should do / use instead. + * + * This method is called in a workqueue context - either from the + * submit_wq the driver passed through drm_sched_init(), or, if the + * driver passed NULL, a separate, ordered workqueue the scheduler + * allocated. + * + * Note that the scheduler expects to 'inherit' its own reference to + * this fence from the callback. It does not invoke an extra + * dma_fence_get() on it. Consequently, this callback must take a + * reference for the scheduler, and additional ones for the driver's + * respective needs. + * + * Return: + * * On success: dma_fence the driver must signal once the hardware has + * completed the job ("hardware fence"). + * * On failure: NULL or an ERR_PTR. */ struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); From 72ebc18b34993777bd6473be36cd63f37b3574ba Mon Sep 17 00:00:00 2001 From: Philipp Stanner Date: Wed, 5 Mar 2025 14:05:51 +0100 Subject: [PATCH 0006/1627] drm/sched: Document run_job() refcount hazard drm_sched_backend_ops.run_job() returns a dma_fence for the scheduler. That fence is signalled by the driver once the hardware completed the associated job. The scheduler does not increment the reference count on that fence, but implicitly expects to inherit this fence from run_job(). This is relatively subtle and prone to misunderstandings. This implies that, to keep a reference for itself, a driver needs to call dma_fence_get() in addition to dma_fence_init() in that callback. It's further complicated by the fact that the scheduler even decrements the refcount in drm_sched_run_job_work() since it created a new reference in drm_sched_fence_scheduled(). It does, however, still use its pointer to the fence after calling dma_fence_put() - which is safe because of the aforementioned new reference, but actually still violates the refcounting rules. Move the call to dma_fence_put() to the position behind the last usage of the fence. Suggested-by: Danilo Krummrich Signed-off-by: Philipp Stanner Reviewed-by: Danilo Krummrich Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250305130551.136682-4-phasta@kernel.org --- drivers/gpu/drm/scheduler/sched_main.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index bfea608a7106..53e6aec37b46 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1220,20 +1220,23 @@ static void drm_sched_run_job_work(struct work_struct *w) drm_sched_job_begin(sched_job); trace_drm_run_job(sched_job, entity); + /* + * The run_job() callback must by definition return a fence whose + * refcount has been incremented for the scheduler already. + */ fence = sched->ops->run_job(sched_job); complete_all(&entity->entity_idle); drm_sched_fence_scheduled(s_fence, fence); if (!IS_ERR_OR_NULL(fence)) { - /* Drop for original kref_init of the fence */ - dma_fence_put(fence); - r = dma_fence_add_callback(fence, &sched_job->cb, drm_sched_job_done_cb); if (r == -ENOENT) drm_sched_job_done(sched_job, fence->error); else if (r) DRM_DEV_ERROR(sched->dev, "fence add callback failed (%d)\n", r); + + dma_fence_put(fence); } else { drm_sched_job_done(sched_job, IS_ERR(fence) ? PTR_ERR(fence) : 0); From 2eeed61db4550171a32854e4d3fdf974b749c214 Mon Sep 17 00:00:00 2001 From: Philipp Stanner Date: Wed, 5 Mar 2025 14:05:52 +0100 Subject: [PATCH 0007/1627] drm/sched: Update timedout_job()'s documentation drm_sched_backend_ops.timedout_job()'s documentation is outdated. It mentions the deprecated function drm_sched_resubmit_jobs(). Furthermore, it does not point out the important distinction between hardware and firmware schedulers. Since firmware schedulers typically only use one entity per scheduler, timeout handling is significantly more simple because the entity the faulted job came from can just be killed without affecting innocent processes. Update the documentation with that distinction and other details. Reformat the docstring to work to a unified style with the other handles. Acked-by: Danilo Krummrich Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250305130551.136682-5-phasta@kernel.org --- include/drm/gpu_scheduler.h | 78 ++++++++++++++++++++++--------------- 1 file changed, 47 insertions(+), 31 deletions(-) diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 6381baae8024..1a7e377d4cbb 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -383,8 +383,15 @@ struct drm_sched_job { struct xarray dependencies; }; +/** + * enum drm_gpu_sched_stat - the scheduler's status + * + * @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use. + * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded. + * @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore. + */ enum drm_gpu_sched_stat { - DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */ + DRM_GPU_SCHED_STAT_NONE, DRM_GPU_SCHED_STAT_NOMINAL, DRM_GPU_SCHED_STAT_ENODEV, }; @@ -447,43 +454,52 @@ struct drm_sched_backend_ops { * @timedout_job: Called when a job has taken too long to execute, * to trigger GPU recovery. * - * This method is called in a workqueue context. + * @sched_job: The job that has timed out * - * Drivers typically issue a reset to recover from GPU hangs, and this - * procedure usually follows the following workflow: + * Drivers typically issue a reset to recover from GPU hangs. + * This procedure looks very different depending on whether a firmware + * or a hardware scheduler is being used. * - * 1. Stop the scheduler using drm_sched_stop(). This will park the - * scheduler thread and cancel the timeout work, guaranteeing that - * nothing is queued while we reset the hardware queue - * 2. Try to gracefully stop non-faulty jobs (optional) - * 3. Issue a GPU reset (driver-specific) - * 4. Re-submit jobs using drm_sched_resubmit_jobs() - * 5. Restart the scheduler using drm_sched_start(). At that point, new - * jobs can be queued, and the scheduler thread is unblocked + * For a FIRMWARE SCHEDULER, each ring has one scheduler, and each + * scheduler has one entity. Hence, the steps taken typically look as + * follows: + * + * 1. Stop the scheduler using drm_sched_stop(). This will pause the + * scheduler workqueues and cancel the timeout work, guaranteeing + * that nothing is queued while the ring is being removed. + * 2. Remove the ring. The firmware will make sure that the + * corresponding parts of the hardware are resetted, and that other + * rings are not impacted. + * 3. Kill the entity and the associated scheduler. + * + * + * For a HARDWARE SCHEDULER, a scheduler instance schedules jobs from + * one or more entities to one ring. This implies that all entities + * associated with the affected scheduler cannot be torn down, because + * this would effectively also affect innocent userspace processes which + * did not submit faulty jobs (for example). + * + * Consequently, the procedure to recover with a hardware scheduler + * should look like this: + * + * 1. Stop all schedulers impacted by the reset using drm_sched_stop(). + * 2. Kill the entity the faulty job stems from. + * 3. Issue a GPU reset on all faulty rings (driver-specific). + * 4. Re-submit jobs on all schedulers impacted by re-submitting them to + * the entities which are still alive. + * 5. Restart all schedulers that were stopped in step #1 using + * drm_sched_start(). * * Note that some GPUs have distinct hardware queues but need to reset * the GPU globally, which requires extra synchronization between the - * timeout handler of the different &drm_gpu_scheduler. One way to - * achieve this synchronization is to create an ordered workqueue - * (using alloc_ordered_workqueue()) at the driver level, and pass this - * queue to drm_sched_init(), to guarantee that timeout handlers are - * executed sequentially. The above workflow needs to be slightly - * adjusted in that case: + * timeout handlers of different schedulers. One way to achieve this + * synchronization is to create an ordered workqueue (using + * alloc_ordered_workqueue()) at the driver level, and pass this queue + * as drm_sched_init()'s @timeout_wq parameter. This will guarantee + * that timeout handlers are executed sequentially. * - * 1. Stop all schedulers impacted by the reset using drm_sched_stop() - * 2. Try to gracefully stop non-faulty jobs on all queues impacted by - * the reset (optional) - * 3. Issue a GPU reset on all faulty queues (driver-specific) - * 4. Re-submit jobs on all schedulers impacted by the reset using - * drm_sched_resubmit_jobs() - * 5. Restart all schedulers that were stopped in step #1 using - * drm_sched_start() + * Return: The scheduler's status, defined by &enum drm_gpu_sched_stat * - * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal, - * and the underlying driver has started or completed recovery. - * - * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer - * available, i.e. has been unplugged. */ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); From fa0af721bd1f983c5c9c109815a154bd1cb29c75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 29 Jan 2025 16:28:48 +0100 Subject: [PATCH 0008/1627] drm/ttm: test private resv obj on release/destroy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Test the fences in the private dma_resv object instead of the pointer to a potentially shared dma_resv object. This only matters for imported BOs with an SG table since those don't get their dma_resv pointer replaced on release. Signed-off-by: Christian König Signed-off-by: James Zhu Reviewed-by: James Zhu Tested-by: James Zhu Link: https://patchwork.freedesktop.org/patch/msgid/20250129152849.15777-1-christian.koenig@amd.com --- drivers/gpu/drm/ttm/ttm_bo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 95b86003c50d..e218a7ce490e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -235,7 +235,7 @@ static void ttm_bo_delayed_delete(struct work_struct *work) bo = container_of(work, typeof(*bo), delayed_delete); - dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_BOOKKEEP, false, + dma_resv_wait_timeout(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP, false, MAX_SCHEDULE_TIMEOUT); dma_resv_lock(bo->base.resv, NULL); ttm_bo_cleanup_memtype_use(bo); @@ -270,7 +270,7 @@ static void ttm_bo_release(struct kref *kref) drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); ttm_mem_io_free(bdev, bo->resource); - if (!dma_resv_test_signaled(bo->base.resv, + if (!dma_resv_test_signaled(&bo->base._resv, DMA_RESV_USAGE_BOOKKEEP) || (want_init_on_free() && (bo->ttm != NULL)) || bo->type == ttm_bo_type_sg || From 41668e792e4606882b67f4febb59672fe68c4e9e Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 4 Mar 2025 16:05:31 -0500 Subject: [PATCH 0009/1627] drm/fsl-dcu: move to devm_platform_ioremap_resource() usage Replace platform_get_resource + devm_ioremap_resource with just devm_platform_ioremap_resource() Used Coccinelle to do this change. SmPl patch: @rule_1@ identifier res; expression ioremap_res; identifier pdev; @@ -struct resource *res; ... -res = platform_get_resource(pdev,...); -ioremap_res = devm_ioremap_resource(...); +ioremap_res = devm_platform_ioremap_resource(pdev,0); Cc: Stefan Agner Cc: Alison Wang Reviewed-by: Maxime Ripard Signed-off-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/640851/?series=144073&rev=5 --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 03b076db9381..3bbfc1b56a65 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -260,7 +260,6 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) struct fsl_dcu_drm_device *fsl_dev; struct drm_device *drm; struct device *dev = &pdev->dev; - struct resource *res; void __iomem *base; struct clk *pix_clk_in; char pix_clk_name[32]; @@ -278,8 +277,7 @@ static int fsl_dcu_drm_probe(struct platform_device *pdev) return -ENODEV; fsl_dev->soc = id->data; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(base)) { ret = PTR_ERR(base); return ret; From 9da894756ee1c67c020a80f4037a8a20652337c6 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 4 Mar 2025 16:05:32 -0500 Subject: [PATCH 0010/1627] drm/hisilicon: move to devm_platform_ioremap_resource() usage Replace platform_get_resource + devm_ioremap_resource with just devm_platform_ioremap_resource() Used Coccinelle to do this change. SmPl patch: @rule_1@ identifier res; expression ioremap_res; identifier pdev; @@ -struct resource *res; ... -res = platform_get_resource(pdev,...); -ioremap_res = devm_ioremap_resource(...); +ioremap_res = devm_platform_ioremap_resource(pdev,0); Cc: Xinliang Liu Cc: Tian Tao Cc: Xinwei Kong Cc: Sumit Semwal Cc: Yongqin Liu Cc: John Stultz Reviewed-by: Maxime Ripard Signed-off-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/640850/?series=144073&rev=5 --- drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c | 4 +--- drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 2eea9fb0e76b..e80debdc4176 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -825,7 +825,6 @@ static const struct component_ops dsi_ops = { static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi) { struct dsi_hw_ctx *ctx = dsi->ctx; - struct resource *res; ctx->pclk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(ctx->pclk)) { @@ -833,8 +832,7 @@ static int dsi_parse_dt(struct platform_device *pdev, struct dw_dsi *dsi) return PTR_ERR(ctx->pclk); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->base = devm_ioremap_resource(&pdev->dev, res); + ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) { DRM_ERROR("failed to remap dsi io region\n"); return PTR_ERR(ctx->base); diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index 2eb49177ac42..45c4eb008ad5 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -844,7 +844,6 @@ static struct drm_plane_funcs ade_plane_funcs = { static void *ade_hw_ctx_alloc(struct platform_device *pdev, struct drm_crtc *crtc) { - struct resource *res; struct device *dev = &pdev->dev; struct device_node *np = pdev->dev.of_node; struct ade_hw_ctx *ctx = NULL; @@ -856,8 +855,7 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev, return ERR_PTR(-ENOMEM); } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctx->base = devm_ioremap_resource(dev, res); + ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) { DRM_ERROR("failed to remap ade io base\n"); return ERR_PTR(-EIO); From 46babeac0e0856ca7d3c68a28328b55867922fe3 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 4 Mar 2025 16:05:33 -0500 Subject: [PATCH 0011/1627] drm/mxsfb: move to devm_platform_ioremap_resource() usage Replace platform_get_resource + devm_ioremap_resource with just devm_platform_ioremap_resource() Used Coccinelle to do this change. SmPl patch: @rule_1@ identifier res; expression ioremap_res; identifier pdev; @@ -struct resource *res; ... -res = platform_get_resource(pdev,...); -ioremap_res = devm_ioremap_resource(...); +ioremap_res = devm_platform_ioremap_resource(pdev,0); Cc: Marek Vasut Cc: Stefan Agner Reviewed-by: Maxime Ripard Signed-off-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/640852/?series=144073&rev=5 --- drivers/gpu/drm/mxsfb/lcdif_drv.c | 4 +--- drivers/gpu/drm/mxsfb/mxsfb_drv.c | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c index 8ee00f59ca82..fcb2a7517377 100644 --- a/drivers/gpu/drm/mxsfb/lcdif_drv.c +++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c @@ -134,7 +134,6 @@ static int lcdif_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct lcdif_drm_private *lcdif; - struct resource *res; int ret; lcdif = devm_kzalloc(&pdev->dev, sizeof(*lcdif), GFP_KERNEL); @@ -144,8 +143,7 @@ static int lcdif_load(struct drm_device *drm) lcdif->drm = drm; drm->dev_private = lcdif; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - lcdif->base = devm_ioremap_resource(drm->dev, res); + lcdif->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(lcdif->base)) return PTR_ERR(lcdif->base); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 59020862cf65..377d4c4c9979 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -215,7 +215,6 @@ static int mxsfb_load(struct drm_device *drm, { struct platform_device *pdev = to_platform_device(drm->dev); struct mxsfb_drm_private *mxsfb; - struct resource *res; int ret; mxsfb = devm_kzalloc(&pdev->dev, sizeof(*mxsfb), GFP_KERNEL); @@ -226,8 +225,7 @@ static int mxsfb_load(struct drm_device *drm, drm->dev_private = mxsfb; mxsfb->devdata = devdata; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mxsfb->base = devm_ioremap_resource(drm->dev, res); + mxsfb->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mxsfb->base)) return PTR_ERR(mxsfb->base); From fc51acfca9ca2049e0f6bde0ca41f760e081c281 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 4 Mar 2025 16:05:36 -0500 Subject: [PATCH 0012/1627] drm/tegra: move to devm_platform_ioremap_resource() usage Replace platform_get_resource + devm_ioremap_resource with just devm_platform_ioremap_resource() Used Coccinelle to do this change. SmPl patch: @rule_1@ identifier res; expression ioremap_res; identifier pdev; @@ -struct resource *res; ... -res = platform_get_resource(pdev,...); -ioremap_res = devm_ioremap_resource(...); +ioremap_res = devm_platform_ioremap_resource(pdev,0); Cc: Thierry Reding Cc: Mikko Perttunen Reviewed-by: Maxime Ripard Signed-off-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/640855/?series=144073&rev=5 --- drivers/gpu/drm/tegra/dsi.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c index 9bb077558167..b5089b772267 100644 --- a/drivers/gpu/drm/tegra/dsi.c +++ b/drivers/gpu/drm/tegra/dsi.c @@ -1564,7 +1564,6 @@ static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi) static int tegra_dsi_probe(struct platform_device *pdev) { struct tegra_dsi *dsi; - struct resource *regs; int err; dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL); @@ -1636,8 +1635,7 @@ static int tegra_dsi_probe(struct platform_device *pdev) goto remove; } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dsi->regs = devm_ioremap_resource(&pdev->dev, regs); + dsi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->regs)) { err = PTR_ERR(dsi->regs); goto remove; From 5f7a654b5ed2828e0da8e8331f236854c70f0893 Mon Sep 17 00:00:00 2001 From: Charles Han Date: Wed, 5 Mar 2025 18:30:42 +0800 Subject: [PATCH 0013/1627] drm/imx: legacy-bridge: fix inconsistent indenting warning Fix below inconsistent indenting smatch warning. smatch warnings: drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c:79 devm_imx_drm_legacy_bridge() warn: inconsistent indenting Signed-off-by: Charles Han Reviewed-by: Liu Ying Signed-off-by: Liu Ying Link: https://patchwork.freedesktop.org/patch/msgid/20250305103042.3017-1-hanchunchao@inspur.com --- drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c index 3ebf0b9866de..55a763045812 100644 --- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c +++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c @@ -76,9 +76,9 @@ struct drm_bridge *devm_imx_drm_legacy_bridge(struct device *dev, imx_bridge->base.ops = DRM_BRIDGE_OP_MODES; imx_bridge->base.type = type; - ret = devm_drm_bridge_add(dev, &imx_bridge->base); - if (ret) - return ERR_PTR(ret); + ret = devm_drm_bridge_add(dev, &imx_bridge->base); + if (ret) + return ERR_PTR(ret); return &imx_bridge->base; } From 8e8d76f62329127b31c64a034b052fb9e30e92af Mon Sep 17 00:00:00 2001 From: Tejas Upadhyay Date: Thu, 6 Mar 2025 18:42:11 +0530 Subject: [PATCH 0014/1627] drm/xe: Release guc ids before cancelling work A GT resets can be occurring in parallel while cancelling work in async call which can requeue these workers. to avoid that, lets first release guc ids and then cancel work so they don't requeued. Fixes: 8ae8a2e8dd21 ("drm/xe: Long running job update") Fixes: 18fbd567e75f ("drm/xe: cancel pending job timer before freeing scheduler") Signed-off-by: Tejas Upadhyay Suggested-by: Matthew Brost Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20250306131211.975503-1-tejas.upadhyay@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_guc_submit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index b95934055f72..31bc2022bfc2 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -1254,11 +1254,11 @@ static void __guc_exec_queue_fini_async(struct work_struct *w) xe_pm_runtime_get(guc_to_xe(guc)); trace_xe_exec_queue_destroy(q); + release_guc_id(guc, q); if (xe_exec_queue_is_lr(q)) cancel_work_sync(&ge->lr_tdr); /* Confirm no work left behind accessing device structures */ cancel_delayed_work_sync(&ge->sched.base.work_tdr); - release_guc_id(guc, q); xe_sched_entity_fini(&ge->entity); xe_sched_fini(&ge->sched); From 9249a900fee4d7bdbe0dd183efbea09fbc8ce409 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Thu, 6 Mar 2025 15:51:55 +0000 Subject: [PATCH 0015/1627] drm/gma500: Remove unused mrst_clock_funcs The mrst_clock_funcs const was added in 2013 by commit ac6113ebb70d ("drm/gma500/mrst: Add SDVO clock calculation") and commented as 'Not used yet'. It's not been used since, so remove it. The helper functions it points to are still used elsewhere. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250306155155.212599-1-linux@treblig.org --- drivers/gpu/drm/gma500/oaktrail_crtc.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index de8ccfe9890f..ea9b41af0867 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -658,10 +658,3 @@ const struct drm_crtc_helper_funcs oaktrail_helper_funcs = { .prepare = gma_crtc_prepare, .commit = gma_crtc_commit, }; - -/* Not used yet */ -const struct gma_clock_funcs mrst_clock_funcs = { - .clock = mrst_lvds_clock, - .limit = mrst_limit, - .pll_is_valid = gma_pll_is_valid, -}; From 2d4d775d11d314a505283cf31ae83460ef26ec70 Mon Sep 17 00:00:00 2001 From: Charles Han Date: Wed, 5 Mar 2025 18:25:40 +0800 Subject: [PATCH 0016/1627] drm: pl111: fix inconsistent indenting warning Fix below inconsistent indenting smatch warning. smatch warnings: drivers/gpu/drm/pl111/pl111_versatile.c:504 pl111_versatile_init() warn: inconsistent indenting Signed-off-by: Charles Han Signed-off-by: Linus Walleij Link: https://patchwork.freedesktop.org/patch/msgid/20250305102540.2815-1-hanchunchao@inspur.com --- drivers/gpu/drm/pl111/pl111_versatile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c index 1e4b28d03f4d..5f460b296c0c 100644 --- a/drivers/gpu/drm/pl111/pl111_versatile.c +++ b/drivers/gpu/drm/pl111/pl111_versatile.c @@ -501,7 +501,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv) * if we find it, it will take precedence. This is on the Integrator/AP * which only has this option for PL110 graphics. */ - if (versatile_clcd_type == INTEGRATOR_CLCD_CM) { + if (versatile_clcd_type == INTEGRATOR_CLCD_CM) { np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match, &clcd_id); if (np) From 6fdbc11502b2fd47206b833ded2e407c84b6658c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:01 +0100 Subject: [PATCH 0017/1627] drm/vkms: Extract vkms_connector header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Up until now, the logic to manage connectors was in vkms_output.c. Since more options will be added to connectors in the future, extract the code to its own file. Refactor, no functional changes. Reviewed-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-2-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vkms/Makefile | 3 +- drivers/gpu/drm/vkms/vkms_connector.c | 50 +++++++++++++++++++++++++++ drivers/gpu/drm/vkms/vkms_connector.h | 17 +++++++++ drivers/gpu/drm/vkms/vkms_output.c | 41 +++------------------- 4 files changed, 73 insertions(+), 38 deletions(-) create mode 100644 drivers/gpu/drm/vkms/vkms_connector.c create mode 100644 drivers/gpu/drm/vkms/vkms_connector.h diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 1b28a6a32948..6b0615c424f2 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -6,6 +6,7 @@ vkms-y := \ vkms_formats.o \ vkms_crtc.o \ vkms_composer.o \ - vkms_writeback.o + vkms_writeback.o \ + vkms_connector.o obj-$(CONFIG_DRM_VKMS) += vkms.o diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c new file mode 100644 index 000000000000..fc97f265dea6 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_connector.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include +#include +#include +#include + +#include "vkms_connector.h" + +static const struct drm_connector_funcs vkms_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vkms_conn_get_modes(struct drm_connector *connector) +{ + int count; + + /* Use the default modes list from DRM */ + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + + return count; +} + +static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { + .get_modes = vkms_conn_get_modes, +}; + +struct drm_connector *vkms_connector_init(struct vkms_device *vkmsdev) +{ + struct drm_device *dev = &vkmsdev->drm; + struct drm_connector *connector; + int ret; + + connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); + if (!connector) + return ERR_PTR(-ENOMEM); + + ret = drmm_connector_init(dev, connector, &vkms_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, NULL); + if (ret) + return ERR_PTR(ret); + + drm_connector_helper_add(connector, &vkms_conn_helper_funcs); + + return connector; +} diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h new file mode 100644 index 000000000000..beb5ebe09155 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_connector.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_CONNECTOR_H_ +#define _VKMS_CONNECTOR_H_ + +#include "vkms_drv.h" + +/** + * vkms_connector_init() - Initialize a connector + * @vkmsdev: VKMS device containing the connector + * + * Returns: + * The connector or an error on failure. + */ +struct drm_connector *vkms_connector_init(struct vkms_device *vkmsdev); + +#endif /* _VKMS_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 22f0d678af3a..b01c3e9289d0 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -1,32 +1,8 @@ // SPDX-License-Identifier: GPL-2.0+ +#include "vkms_connector.h" #include "vkms_drv.h" -#include -#include #include -#include - -static const struct drm_connector_funcs vkms_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int vkms_conn_get_modes(struct drm_connector *connector) -{ - int count; - - /* Use the default modes list from DRM */ - count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); - drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); - - return count; -} - -static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { - .get_modes = vkms_conn_get_modes, -}; int vkms_output_init(struct vkms_device *vkmsdev) { @@ -73,21 +49,12 @@ int vkms_output_init(struct vkms_device *vkmsdev) } } - connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); - if (!connector) { - DRM_ERROR("Failed to allocate connector\n"); - return -ENOMEM; - } - - ret = drmm_connector_init(dev, connector, &vkms_connector_funcs, - DRM_MODE_CONNECTOR_VIRTUAL, NULL); - if (ret) { + connector = vkms_connector_init(vkmsdev); + if (IS_ERR(connector)) { DRM_ERROR("Failed to init connector\n"); - return ret; + return PTR_ERR(connector); } - drm_connector_helper_add(connector, &vkms_conn_helper_funcs); - encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL); if (!encoder) { DRM_ERROR("Failed to allocate encoder\n"); From a833c5880a5fbecd71f60efca6ad641e51d0d29e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:02 +0100 Subject: [PATCH 0018/1627] drm/vkms: Create vkms_connector struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Create a structure wrapping the drm_connector. Reviewed-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-3-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vkms/vkms_connector.c | 8 ++++---- drivers/gpu/drm/vkms/vkms_connector.h | 11 ++++++++++- drivers/gpu/drm/vkms/vkms_output.c | 4 ++-- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c index fc97f265dea6..ab8b52a84151 100644 --- a/drivers/gpu/drm/vkms/vkms_connector.c +++ b/drivers/gpu/drm/vkms/vkms_connector.c @@ -29,22 +29,22 @@ static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { .get_modes = vkms_conn_get_modes, }; -struct drm_connector *vkms_connector_init(struct vkms_device *vkmsdev) +struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev) { struct drm_device *dev = &vkmsdev->drm; - struct drm_connector *connector; + struct vkms_connector *connector; int ret; connector = drmm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); if (!connector) return ERR_PTR(-ENOMEM); - ret = drmm_connector_init(dev, connector, &vkms_connector_funcs, + ret = drmm_connector_init(dev, &connector->base, &vkms_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL, NULL); if (ret) return ERR_PTR(ret); - drm_connector_helper_add(connector, &vkms_conn_helper_funcs); + drm_connector_helper_add(&connector->base, &vkms_conn_helper_funcs); return connector; } diff --git a/drivers/gpu/drm/vkms/vkms_connector.h b/drivers/gpu/drm/vkms/vkms_connector.h index beb5ebe09155..c9149c1b7af0 100644 --- a/drivers/gpu/drm/vkms/vkms_connector.h +++ b/drivers/gpu/drm/vkms/vkms_connector.h @@ -5,6 +5,15 @@ #include "vkms_drv.h" +/** + * struct vkms_connector - VKMS custom type wrapping around the DRM connector + * + * @drm: Base DRM connector + */ +struct vkms_connector { + struct drm_connector base; +}; + /** * vkms_connector_init() - Initialize a connector * @vkmsdev: VKMS device containing the connector @@ -12,6 +21,6 @@ * Returns: * The connector or an error on failure. */ -struct drm_connector *vkms_connector_init(struct vkms_device *vkmsdev); +struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev); #endif /* _VKMS_CONNECTOR_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index b01c3e9289d0..4b5abe159add 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -7,7 +7,7 @@ int vkms_output_init(struct vkms_device *vkmsdev) { struct drm_device *dev = &vkmsdev->drm; - struct drm_connector *connector; + struct vkms_connector *connector; struct drm_encoder *encoder; struct vkms_output *output; struct vkms_plane *primary, *overlay, *cursor = NULL; @@ -69,7 +69,7 @@ int vkms_output_init(struct vkms_device *vkmsdev) encoder->possible_crtcs = drm_crtc_mask(&output->crtc); /* Attach the encoder and the connector */ - ret = drm_connector_attach_encoder(connector, encoder); + ret = drm_connector_attach_encoder(&connector->base, encoder); if (ret) { DRM_ERROR("Failed to attach connector to encoder\n"); return ret; From 5b5a56d9a2d64e8395dfbaddecb3e5149d7ecae8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:03 +0100 Subject: [PATCH 0019/1627] drm/vkms: Add KUnit test scaffolding MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the required boilerplate to start creating KUnit test. To run the tests: $ ./tools/testing/kunit/kunit.py run \ --kunitconfig=drivers/gpu/drm/vkms/tests Reviewed-by: Louis Chauvet Co-developed-by: Arthur Grillo Signed-off-by: Arthur Grillo Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-4-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vkms/Kconfig | 15 +++++++++++++++ drivers/gpu/drm/vkms/Makefile | 1 + drivers/gpu/drm/vkms/tests/.kunitconfig | 4 ++++ drivers/gpu/drm/vkms/tests/Makefile | 3 +++ drivers/gpu/drm/vkms/tests/vkms_config_test.c | 19 +++++++++++++++++++ 5 files changed, 42 insertions(+) create mode 100644 drivers/gpu/drm/vkms/tests/.kunitconfig create mode 100644 drivers/gpu/drm/vkms/tests/Makefile create mode 100644 drivers/gpu/drm/vkms/tests/vkms_config_test.c diff --git a/drivers/gpu/drm/vkms/Kconfig b/drivers/gpu/drm/vkms/Kconfig index 9def079f685b..3c02f928ffe6 100644 --- a/drivers/gpu/drm/vkms/Kconfig +++ b/drivers/gpu/drm/vkms/Kconfig @@ -14,3 +14,18 @@ config DRM_VKMS a VKMS. If M is selected the module will be called vkms. + +config DRM_VKMS_KUNIT_TEST + tristate "KUnit tests for VKMS" if !KUNIT_ALL_TESTS + depends on DRM_VKMS && KUNIT + default KUNIT_ALL_TESTS + help + This builds unit tests for VKMS. This option is not useful for + distributions or general kernels, but only for kernel + developers working on VKMS. + + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in + Documentation/dev-tools/kunit/. + + If in doubt, say "N". diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index 6b0615c424f2..c23eee2f3df4 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -10,3 +10,4 @@ vkms-y := \ vkms_connector.o obj-$(CONFIG_DRM_VKMS) += vkms.o +obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/vkms/tests/.kunitconfig b/drivers/gpu/drm/vkms/tests/.kunitconfig new file mode 100644 index 000000000000..6a2d87068edc --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/.kunitconfig @@ -0,0 +1,4 @@ +CONFIG_KUNIT=y +CONFIG_DRM=y +CONFIG_DRM_VKMS=y +CONFIG_DRM_VKMS_KUNIT_TEST=y diff --git a/drivers/gpu/drm/vkms/tests/Makefile b/drivers/gpu/drm/vkms/tests/Makefile new file mode 100644 index 000000000000..9ded37b67a46 --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += vkms_config_test.o diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c new file mode 100644 index 000000000000..1177e62e19cb --- /dev/null +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include + +MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); + +static struct kunit_case vkms_config_test_cases[] = { + {} +}; + +static struct kunit_suite vkms_config_test_suite = { + .name = "vkms-config", + .test_cases = vkms_config_test_cases, +}; + +kunit_test_suite(vkms_config_test_suite); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Kunit test for vkms config utility"); From d3ae1e394bdc267a1699f7ac2ff3d3a990e791b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:04 +0100 Subject: [PATCH 0020/1627] drm/vkms: Extract vkms_config header MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Creating a new vkms_config structure will be more complex once we start adding more options. Extract the vkms_config structure to its own header and source files and add functions to create and delete a vkms_config and to initialize debugfs. Refactor, no functional changes. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-5-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vkms/Makefile | 3 +- drivers/gpu/drm/vkms/tests/vkms_config_test.c | 13 +++++ drivers/gpu/drm/vkms/vkms_config.c | 50 +++++++++++++++++++ drivers/gpu/drm/vkms/vkms_config.h | 47 +++++++++++++++++ drivers/gpu/drm/vkms/vkms_drv.c | 34 +++---------- drivers/gpu/drm/vkms/vkms_drv.h | 15 +----- drivers/gpu/drm/vkms/vkms_output.c | 1 + 7 files changed, 121 insertions(+), 42 deletions(-) create mode 100644 drivers/gpu/drm/vkms/vkms_config.c create mode 100644 drivers/gpu/drm/vkms/vkms_config.h diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile index c23eee2f3df4..d657865e573f 100644 --- a/drivers/gpu/drm/vkms/Makefile +++ b/drivers/gpu/drm/vkms/Makefile @@ -7,7 +7,8 @@ vkms-y := \ vkms_crtc.o \ vkms_composer.o \ vkms_writeback.o \ - vkms_connector.o + vkms_connector.o \ + vkms_config.o obj-$(CONFIG_DRM_VKMS) += vkms.o obj-$(CONFIG_DRM_VKMS_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 1177e62e19cb..a7060504f3dc 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -2,9 +2,22 @@ #include +#include "../vkms_config.h" + MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); +static void vkms_config_test_empty_config(struct kunit *test) +{ + struct vkms_config *config; + + config = vkms_config_create(); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + vkms_config_destroy(config); +} + static struct kunit_case vkms_config_test_cases[] = { + KUNIT_CASE(vkms_config_test_empty_config), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c new file mode 100644 index 000000000000..42caa421876e --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -0,0 +1,50 @@ +// SPDX-License-Identifier: GPL-2.0+ + +#include + +#include +#include +#include + +#include "vkms_config.h" + +struct vkms_config *vkms_config_create(void) +{ + struct vkms_config *config; + + config = kzalloc(sizeof(*config), GFP_KERNEL); + if (!config) + return ERR_PTR(-ENOMEM); + + return config; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create); + +void vkms_config_destroy(struct vkms_config *config) +{ + kfree(config); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy); + +static int vkms_config_show(struct seq_file *m, void *data) +{ + struct drm_debugfs_entry *entry = m->private; + struct drm_device *dev = entry->dev; + struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); + + seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback); + seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor); + seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay); + + return 0; +} + +static const struct drm_debugfs_info vkms_config_debugfs_list[] = { + { "vkms_config", vkms_config_show, 0 }, +}; + +void vkms_config_register_debugfs(struct vkms_device *vkms_device) +{ + drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list, + ARRAY_SIZE(vkms_config_debugfs_list)); +} diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h new file mode 100644 index 000000000000..ced10f56a812 --- /dev/null +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ + +#ifndef _VKMS_CONFIG_H_ +#define _VKMS_CONFIG_H_ + +#include + +#include "vkms_drv.h" + +/** + * struct vkms_config - General configuration for VKMS driver + * + * @writeback: If true, a writeback buffer can be attached to the CRTC + * @cursor: If true, a cursor plane is created in the VKMS device + * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device + * @dev: Used to store the current VKMS device. Only set when the device is instantiated. + */ +struct vkms_config { + bool writeback; + bool cursor; + bool overlay; + struct vkms_device *dev; +}; + +/** + * vkms_config_create() - Create a new VKMS configuration + * + * Returns: + * The new vkms_config or an error. Call vkms_config_destroy() to free the + * returned configuration. + */ +struct vkms_config *vkms_config_create(void); + +/** + * vkms_config_destroy() - Free a VKMS configuration + * @config: vkms_config to free + */ +void vkms_config_destroy(struct vkms_config *config); + +/** + * vkms_config_register_debugfs() - Register a debugfs file to show the device's + * configuration + * @vkms_device: Device to register + */ +void vkms_config_register_debugfs(struct vkms_device *vkms_device); + +#endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index b6de91134a22..37de0658e6ee 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -27,11 +27,9 @@ #include #include +#include "vkms_config.h" #include "vkms_drv.h" -#include -#include - #define DRIVER_NAME "vkms" #define DRIVER_DESC "Virtual Kernel Mode Setting" #define DRIVER_MAJOR 1 @@ -81,23 +79,6 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_cleanup_planes(dev, old_state); } -static int vkms_config_show(struct seq_file *m, void *data) -{ - struct drm_debugfs_entry *entry = m->private; - struct drm_device *dev = entry->dev; - struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); - - seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback); - seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor); - seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay); - - return 0; -} - -static const struct drm_debugfs_info vkms_config_debugfs_list[] = { - { "vkms_config", vkms_config_show, 0 }, -}; - static const struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .fops = &vkms_driver_fops, @@ -208,8 +189,7 @@ static int vkms_create(struct vkms_config *config) if (ret) goto out_devres; - drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list, - ARRAY_SIZE(vkms_config_debugfs_list)); + vkms_config_register_debugfs(vkms_device); ret = drm_dev_register(&vkms_device->drm, 0); if (ret) @@ -231,9 +211,9 @@ static int __init vkms_init(void) int ret; struct vkms_config *config; - config = kmalloc(sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; + config = vkms_config_create(); + if (IS_ERR(config)) + return PTR_ERR(config); config->cursor = enable_cursor; config->writeback = enable_writeback; @@ -241,7 +221,7 @@ static int __init vkms_init(void) ret = vkms_create(config); if (ret) { - kfree(config); + vkms_config_destroy(config); return ret; } @@ -275,7 +255,7 @@ static void __exit vkms_exit(void) return; vkms_destroy(default_config); - kfree(default_config); + vkms_config_destroy(default_config); } module_init(vkms_init); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index abbb652be2b5..af7081c940d6 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -189,20 +189,7 @@ struct vkms_output { spinlock_t composer_lock; }; -/** - * struct vkms_config - General configuration for VKMS driver - * - * @writeback: If true, a writeback buffer can be attached to the CRTC - * @cursor: If true, a cursor plane is created in the VKMS device - * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device - * @dev: Used to store the current VKMS device. Only set when the device is instantiated. - */ -struct vkms_config { - bool writeback; - bool cursor; - bool overlay; - struct vkms_device *dev; -}; +struct vkms_config; /** * struct vkms_device - Description of a VKMS device diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 4b5abe159add..068a7f87ecec 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ +#include "vkms_config.h" #include "vkms_connector.h" #include "vkms_drv.h" #include From 8b059b0c3f721373f45c9d72d0481345e765be86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:05 +0100 Subject: [PATCH 0021/1627] drm/vkms: Move default_config creation to its own function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extract the initialization of the default configuration to a function. Refactor, no functional changes. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-6-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vkms/tests/vkms_config_test.c | 38 +++++++++++++++++++ drivers/gpu/drm/vkms/vkms_config.c | 18 +++++++++ drivers/gpu/drm/vkms/vkms_config.h | 14 +++++++ drivers/gpu/drm/vkms/vkms_drv.c | 6 +-- 4 files changed, 71 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index a7060504f3dc..d8644a1e3e18 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -6,6 +6,12 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); +struct default_config_case { + bool enable_cursor; + bool enable_writeback; + bool enable_overlay; +}; + static void vkms_config_test_empty_config(struct kunit *test) { struct vkms_config *config; @@ -16,8 +22,40 @@ static void vkms_config_test_empty_config(struct kunit *test) vkms_config_destroy(config); } +static struct default_config_case default_config_cases[] = { + { false, false, false }, + { true, false, false }, + { true, true, false }, + { true, false, true }, + { false, true, false }, + { false, true, true }, + { false, false, true }, + { true, true, true }, +}; + +KUNIT_ARRAY_PARAM(default_config, default_config_cases, NULL); + +static void vkms_config_test_default_config(struct kunit *test) +{ + const struct default_config_case *params = test->param_value; + struct vkms_config *config; + + config = vkms_config_default_create(params->enable_cursor, + params->enable_writeback, + params->enable_overlay); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + KUNIT_EXPECT_EQ(test, config->cursor, params->enable_cursor); + KUNIT_EXPECT_EQ(test, config->writeback, params->enable_writeback); + KUNIT_EXPECT_EQ(test, config->overlay, params->enable_overlay); + + vkms_config_destroy(config); +} + static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_empty_config), + KUNIT_CASE_PARAM(vkms_config_test_default_config, + default_config_gen_params), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index 42caa421876e..0af8e6dc0a01 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -20,6 +20,24 @@ struct vkms_config *vkms_config_create(void) } EXPORT_SYMBOL_IF_KUNIT(vkms_config_create); +struct vkms_config *vkms_config_default_create(bool enable_cursor, + bool enable_writeback, + bool enable_overlay) +{ + struct vkms_config *config; + + config = vkms_config_create(); + if (IS_ERR(config)) + return config; + + config->cursor = enable_cursor; + config->writeback = enable_writeback; + config->overlay = enable_overlay; + + return config; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create); + void vkms_config_destroy(struct vkms_config *config) { kfree(config); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index ced10f56a812..d0868750826a 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -31,6 +31,20 @@ struct vkms_config { */ struct vkms_config *vkms_config_create(void); +/** + * vkms_config_default_create() - Create the configuration for the default device + * @enable_cursor: Create or not a cursor plane + * @enable_writeback: Create or not a writeback connector + * @enable_overlay: Create or not overlay planes + * + * Returns: + * The default vkms_config or an error. Call vkms_config_destroy() to free the + * returned configuration. + */ +struct vkms_config *vkms_config_default_create(bool enable_cursor, + bool enable_writeback, + bool enable_overlay); + /** * vkms_config_destroy() - Free a VKMS configuration * @config: vkms_config to free diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 37de0658e6ee..582d5825f42b 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -211,14 +211,10 @@ static int __init vkms_init(void) int ret; struct vkms_config *config; - config = vkms_config_create(); + config = vkms_config_default_create(enable_cursor, enable_writeback, enable_overlay); if (IS_ERR(config)) return PTR_ERR(config); - config->cursor = enable_cursor; - config->writeback = enable_writeback; - config->overlay = enable_overlay; - ret = vkms_create(config); if (ret) { vkms_config_destroy(config); From 969a3a4e2ba30138f065dbf8904798a80a528ca5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:06 +0100 Subject: [PATCH 0022/1627] drm/vkms: Set device name from vkms_config MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to be able to create multiple devices, the device name needs to be unique. Allow to set it in the VKMS configuration. Reviewed-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-7-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vkms/tests/vkms_config_test.c | 7 ++++++- drivers/gpu/drm/vkms/vkms_config.c | 14 ++++++++++++-- drivers/gpu/drm/vkms/vkms_config.h | 18 +++++++++++++++++- drivers/gpu/drm/vkms/vkms_drv.c | 4 +++- drivers/gpu/drm/vkms/vkms_drv.h | 2 ++ 5 files changed, 40 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index d8644a1e3e18..92798590051b 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -15,10 +15,15 @@ struct default_config_case { static void vkms_config_test_empty_config(struct kunit *test) { struct vkms_config *config; + const char *dev_name = "test"; - config = vkms_config_create(); + config = vkms_config_create(dev_name); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + /* The dev_name string and the config have different lifetimes */ + dev_name = NULL; + KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test"); + vkms_config_destroy(config); } diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index 0af8e6dc0a01..9fb08d94a351 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -8,7 +8,7 @@ #include "vkms_config.h" -struct vkms_config *vkms_config_create(void) +struct vkms_config *vkms_config_create(const char *dev_name) { struct vkms_config *config; @@ -16,6 +16,12 @@ struct vkms_config *vkms_config_create(void) if (!config) return ERR_PTR(-ENOMEM); + config->dev_name = kstrdup_const(dev_name, GFP_KERNEL); + if (!config->dev_name) { + kfree(config); + return ERR_PTR(-ENOMEM); + } + return config; } EXPORT_SYMBOL_IF_KUNIT(vkms_config_create); @@ -26,7 +32,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, { struct vkms_config *config; - config = vkms_config_create(); + config = vkms_config_create(DEFAULT_DEVICE_NAME); if (IS_ERR(config)) return config; @@ -40,6 +46,7 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create); void vkms_config_destroy(struct vkms_config *config) { + kfree_const(config->dev_name); kfree(config); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy); @@ -49,7 +56,10 @@ static int vkms_config_show(struct seq_file *m, void *data) struct drm_debugfs_entry *entry = m->private; struct drm_device *dev = entry->dev; struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); + const char *dev_name; + dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config); + seq_printf(m, "dev_name=%s\n", dev_name); seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback); seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor); seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index d0868750826a..fcaa909fb2e0 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -10,12 +10,14 @@ /** * struct vkms_config - General configuration for VKMS driver * + * @dev_name: Name of the device * @writeback: If true, a writeback buffer can be attached to the CRTC * @cursor: If true, a cursor plane is created in the VKMS device * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device * @dev: Used to store the current VKMS device. Only set when the device is instantiated. */ struct vkms_config { + const char *dev_name; bool writeback; bool cursor; bool overlay; @@ -24,12 +26,13 @@ struct vkms_config { /** * vkms_config_create() - Create a new VKMS configuration + * @dev_name: Name of the device * * Returns: * The new vkms_config or an error. Call vkms_config_destroy() to free the * returned configuration. */ -struct vkms_config *vkms_config_create(void); +struct vkms_config *vkms_config_create(const char *dev_name); /** * vkms_config_default_create() - Create the configuration for the default device @@ -51,6 +54,19 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, */ void vkms_config_destroy(struct vkms_config *config); +/** + * vkms_config_get_device_name() - Return the name of the device + * @config: Configuration to get the device name from + * + * Returns: + * The device name. Only valid while @config is valid. + */ +static inline const char * +vkms_config_get_device_name(struct vkms_config *config) +{ + return config->dev_name; +} + /** * vkms_config_register_debugfs() - Register a debugfs file to show the device's * configuration diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 582d5825f42b..ba977ef09b2b 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -151,8 +151,10 @@ static int vkms_create(struct vkms_config *config) int ret; struct platform_device *pdev; struct vkms_device *vkms_device; + const char *dev_name; - pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); + dev_name = vkms_config_get_device_name(config); + pdev = platform_device_register_simple(dev_name, -1, NULL, 0); if (IS_ERR(pdev)) return PTR_ERR(pdev); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index af7081c940d6..a74a7fc3a056 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -12,6 +12,8 @@ #include #include +#define DEFAULT_DEVICE_NAME "vkms" + #define XRES_MIN 10 #define YRES_MIN 10 From d1386d721d19f8127c2edd43601693e2856db8dd Mon Sep 17 00:00:00 2001 From: Louis Chauvet Date: Tue, 18 Feb 2025 11:12:07 +0100 Subject: [PATCH 0023/1627] drm/vkms: Add a validation function for VKMS configuration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As the configuration will be used by userspace, add a validator to avoid creating a broken DRM device. For the moment, the function always returns true, but rules will be added in future patches. Reviewed-by: Louis Chauvet Signed-off-by: Louis Chauvet Co-developed-by: José Expósito Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-8-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- drivers/gpu/drm/vkms/tests/vkms_config_test.c | 2 ++ drivers/gpu/drm/vkms/vkms_config.c | 6 ++++++ drivers/gpu/drm/vkms/vkms_config.h | 10 ++++++++++ drivers/gpu/drm/vkms/vkms_output.c | 3 +++ 4 files changed, 21 insertions(+) diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 92798590051b..6e07139d261c 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -54,6 +54,8 @@ static void vkms_config_test_default_config(struct kunit *test) KUNIT_EXPECT_EQ(test, config->writeback, params->enable_writeback); KUNIT_EXPECT_EQ(test, config->overlay, params->enable_overlay); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + vkms_config_destroy(config); } diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index 9fb08d94a351..d1947537834c 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -51,6 +51,12 @@ void vkms_config_destroy(struct vkms_config *config) } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy); +bool vkms_config_is_valid(const struct vkms_config *config) +{ + return true; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid); + static int vkms_config_show(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index fcaa909fb2e0..31c758631c37 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -67,6 +67,16 @@ vkms_config_get_device_name(struct vkms_config *config) return config->dev_name; } +/** + * vkms_config_is_valid() - Validate a configuration + * @config: Configuration to validate + * + * Returns: + * Whether the configuration is valid or not. + * For example, a configuration without primary planes is not valid. + */ +bool vkms_config_is_valid(const struct vkms_config *config); + /** * vkms_config_register_debugfs() - Register a debugfs file to show the device's * configuration diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 068a7f87ecec..414cc933af41 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -16,6 +16,9 @@ int vkms_output_init(struct vkms_device *vkmsdev) int writeback; unsigned int n; + if (!vkms_config_is_valid(vkmsdev->config)) + return -EINVAL; + /* * Initialize used plane. One primary plane is required to perform the composition. * From bc5b0d5dccf3c842872d63b937a1bb2a6e93d5b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:08 +0100 Subject: [PATCH 0024/1627] drm/vkms: Allow to configure multiple planes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a list of planes to vkms_config and create as many planes as configured during output initialization. For backwards compatibility, add one primary plane and, if configured, one cursor plane and NUM_OVERLAY_PLANES planes to the default configuration. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-9-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- .clang-format | 1 + drivers/gpu/drm/vkms/tests/vkms_config_test.c | 161 +++++++++++++++++- drivers/gpu/drm/vkms/vkms_config.c | 127 +++++++++++++- drivers/gpu/drm/vkms/vkms_config.h | 75 +++++++- drivers/gpu/drm/vkms/vkms_output.c | 42 ++--- 5 files changed, 370 insertions(+), 36 deletions(-) diff --git a/.clang-format b/.clang-format index fe1aa1a30d40..c585d2a5b395 100644 --- a/.clang-format +++ b/.clang-format @@ -690,6 +690,7 @@ ForEachMacros: - 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf_safe' - 'virtio_device_for_each_vq' + - 'vkms_config_for_each_plane' - 'while_for_each_ftrace_op' - 'xa_for_each' - 'xa_for_each_marked' diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 6e07139d261c..116db01ba8a0 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -6,6 +6,27 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); +static size_t vkms_config_get_num_planes(struct vkms_config *config) +{ + struct vkms_config_plane *plane_cfg; + size_t count = 0; + + vkms_config_for_each_plane(config, plane_cfg) + count++; + + return count; +} + +static struct vkms_config_plane *get_first_plane(struct vkms_config *config) +{ + struct vkms_config_plane *plane_cfg; + + vkms_config_for_each_plane(config, plane_cfg) + return plane_cfg; + + return NULL; +} + struct default_config_case { bool enable_cursor; bool enable_writeback; @@ -24,6 +45,10 @@ static void vkms_config_test_empty_config(struct kunit *test) dev_name = NULL; KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test"); + KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + vkms_config_destroy(config); } @@ -44,16 +69,145 @@ static void vkms_config_test_default_config(struct kunit *test) { const struct default_config_case *params = test->param_value; struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + int n_primaries = 0; + int n_cursors = 0; + int n_overlays = 0; config = vkms_config_default_create(params->enable_cursor, params->enable_writeback, params->enable_overlay); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); - KUNIT_EXPECT_EQ(test, config->cursor, params->enable_cursor); KUNIT_EXPECT_EQ(test, config->writeback, params->enable_writeback); - KUNIT_EXPECT_EQ(test, config->overlay, params->enable_overlay); + /* Planes */ + vkms_config_for_each_plane(config, plane_cfg) { + switch (vkms_config_plane_get_type(plane_cfg)) { + case DRM_PLANE_TYPE_PRIMARY: + n_primaries++; + break; + case DRM_PLANE_TYPE_CURSOR: + n_cursors++; + break; + case DRM_PLANE_TYPE_OVERLAY: + n_overlays++; + break; + default: + KUNIT_FAIL_AND_ABORT(test, "Unknown plane type"); + } + } + KUNIT_EXPECT_EQ(test, n_primaries, 1); + KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0); + KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0); + + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_get_planes(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_plane *plane_cfg1, *plane_cfg2; + int n_planes = 0; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + vkms_config_for_each_plane(config, plane_cfg) + n_planes++; + KUNIT_ASSERT_EQ(test, n_planes, 0); + + plane_cfg1 = vkms_config_create_plane(config); + vkms_config_for_each_plane(config, plane_cfg) { + n_planes++; + if (plane_cfg != plane_cfg1) + KUNIT_FAIL(test, "Unexpected plane"); + } + KUNIT_ASSERT_EQ(test, n_planes, 1); + n_planes = 0; + + plane_cfg2 = vkms_config_create_plane(config); + vkms_config_for_each_plane(config, plane_cfg) { + n_planes++; + if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2) + KUNIT_FAIL(test, "Unexpected plane"); + } + KUNIT_ASSERT_EQ(test, n_planes, 2); + n_planes = 0; + + vkms_config_destroy_plane(plane_cfg1); + vkms_config_for_each_plane(config, plane_cfg) { + n_planes++; + if (plane_cfg != plane_cfg2) + KUNIT_FAIL(test, "Unexpected plane"); + } + KUNIT_ASSERT_EQ(test, n_planes, 1); + + vkms_config_destroy(config); +} + +static void vkms_config_test_invalid_plane_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No planes */ + plane_cfg = get_first_plane(config); + vkms_config_destroy_plane(plane_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many planes */ + for (n = 0; n <= 32; n++) + vkms_config_create_plane(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_valid_plane_type(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + plane_cfg = get_first_plane(config); + vkms_config_destroy_plane(plane_cfg); + + /* Invalid: No primary plane */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Multiple primary planes */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: One primary plane */ + vkms_config_destroy_plane(plane_cfg); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + /* Invalid: Multiple cursor planes */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: One primary and one cursor plane */ + vkms_config_destroy_plane(plane_cfg); KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); vkms_config_destroy(config); @@ -63,6 +217,9 @@ static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_empty_config), KUNIT_CASE_PARAM(vkms_config_test_default_config, default_config_gen_params), + KUNIT_CASE(vkms_config_test_get_planes), + KUNIT_CASE(vkms_config_test_invalid_plane_number), + KUNIT_CASE(vkms_config_test_valid_plane_type), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index d1947537834c..3c3f5cf79058 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -22,6 +22,8 @@ struct vkms_config *vkms_config_create(const char *dev_name) return ERR_PTR(-ENOMEM); } + INIT_LIST_HEAD(&config->planes); + return config; } EXPORT_SYMBOL_IF_KUNIT(vkms_config_create); @@ -31,28 +33,116 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, bool enable_overlay) { struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + int n; config = vkms_config_create(DEFAULT_DEVICE_NAME); if (IS_ERR(config)) return config; - config->cursor = enable_cursor; config->writeback = enable_writeback; - config->overlay = enable_overlay; + + plane_cfg = vkms_config_create_plane(config); + if (IS_ERR(plane_cfg)) + goto err_alloc; + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + + if (enable_overlay) { + for (n = 0; n < NUM_OVERLAY_PLANES; n++) { + plane_cfg = vkms_config_create_plane(config); + if (IS_ERR(plane_cfg)) + goto err_alloc; + vkms_config_plane_set_type(plane_cfg, + DRM_PLANE_TYPE_OVERLAY); + } + } + + if (enable_cursor) { + plane_cfg = vkms_config_create_plane(config); + if (IS_ERR(plane_cfg)) + goto err_alloc; + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + } return config; + +err_alloc: + vkms_config_destroy(config); + return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create); void vkms_config_destroy(struct vkms_config *config) { + struct vkms_config_plane *plane_cfg, *plane_tmp; + + list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link) + vkms_config_destroy_plane(plane_cfg); + kfree_const(config->dev_name); kfree(config); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy); +static bool valid_plane_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_planes; + + n_planes = list_count_nodes((struct list_head *)&config->planes); + if (n_planes <= 0 || n_planes >= 32) { + drm_info(dev, "The number of planes must be between 1 and 31\n"); + return false; + } + + return true; +} + +static bool valid_plane_type(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_plane *plane_cfg; + bool has_primary_plane = false; + bool has_cursor_plane = false; + + vkms_config_for_each_plane(config, plane_cfg) { + enum drm_plane_type type; + + type = vkms_config_plane_get_type(plane_cfg); + + if (type == DRM_PLANE_TYPE_PRIMARY) { + if (has_primary_plane) { + drm_info(dev, "Multiple primary planes\n"); + return false; + } + + has_primary_plane = true; + } else if (type == DRM_PLANE_TYPE_CURSOR) { + if (has_cursor_plane) { + drm_info(dev, "Multiple cursor planes\n"); + return false; + } + + has_cursor_plane = true; + } + } + + if (!has_primary_plane) { + drm_info(dev, "Primary plane not found\n"); + return false; + } + + return true; +} + bool vkms_config_is_valid(const struct vkms_config *config) { + if (!valid_plane_number(config)) + return false; + + if (!valid_plane_type(config)) + return false; + return true; } EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid); @@ -63,12 +153,17 @@ static int vkms_config_show(struct seq_file *m, void *data) struct drm_device *dev = entry->dev; struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); const char *dev_name; + struct vkms_config_plane *plane_cfg; dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config); seq_printf(m, "dev_name=%s\n", dev_name); seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback); - seq_printf(m, "cursor=%d\n", vkmsdev->config->cursor); - seq_printf(m, "overlay=%d\n", vkmsdev->config->overlay); + + vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { + seq_puts(m, "plane:\n"); + seq_printf(m, "\ttype=%d\n", + vkms_config_plane_get_type(plane_cfg)); + } return 0; } @@ -82,3 +177,27 @@ void vkms_config_register_debugfs(struct vkms_device *vkms_device) drm_debugfs_add_files(&vkms_device->drm, vkms_config_debugfs_list, ARRAY_SIZE(vkms_config_debugfs_list)); } + +struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config) +{ + struct vkms_config_plane *plane_cfg; + + plane_cfg = kzalloc(sizeof(*plane_cfg), GFP_KERNEL); + if (!plane_cfg) + return ERR_PTR(-ENOMEM); + + plane_cfg->config = config; + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + + list_add_tail(&plane_cfg->link, &config->planes); + + return plane_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane); + +void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg) +{ + list_del(&plane_cfg->link); + kfree(plane_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 31c758631c37..613e98760640 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -3,6 +3,7 @@ #ifndef _VKMS_CONFIG_H_ #define _VKMS_CONFIG_H_ +#include #include #include "vkms_drv.h" @@ -12,18 +13,46 @@ * * @dev_name: Name of the device * @writeback: If true, a writeback buffer can be attached to the CRTC - * @cursor: If true, a cursor plane is created in the VKMS device - * @overlay: If true, NUM_OVERLAY_PLANES will be created for the VKMS device + * @planes: List of planes configured for the device * @dev: Used to store the current VKMS device. Only set when the device is instantiated. */ struct vkms_config { const char *dev_name; bool writeback; - bool cursor; - bool overlay; + struct list_head planes; struct vkms_device *dev; }; +/** + * struct vkms_config_plane + * + * @link: Link to the others planes in vkms_config + * @config: The vkms_config this plane belongs to + * @type: Type of the plane. The creator of configuration needs to ensures that + * at least one primary plane is present. + * @plane: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS plane during + * device creation. This pointer is not managed by the configuration and + * must be managed by other means. + */ +struct vkms_config_plane { + struct list_head link; + struct vkms_config *config; + + enum drm_plane_type type; + + /* Internal usage */ + struct vkms_plane *plane; +}; + +/** + * vkms_config_for_each_plane - Iterate over the vkms_config planes + * @config: &struct vkms_config pointer + * @plane_cfg: &struct vkms_config_plane pointer used as cursor + */ +#define vkms_config_for_each_plane(config, plane_cfg) \ + list_for_each_entry((plane_cfg), &(config)->planes, link) + /** * vkms_config_create() - Create a new VKMS configuration * @dev_name: Name of the device @@ -84,4 +113,42 @@ bool vkms_config_is_valid(const struct vkms_config *config); */ void vkms_config_register_debugfs(struct vkms_device *vkms_device); +/** + * vkms_config_create_plane() - Add a new plane configuration + * @config: Configuration to add the plane to + * + * Returns: + * The new plane configuration or an error. Call vkms_config_destroy_plane() to + * free the returned plane configuration. + */ +struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config); + +/** + * vkms_config_destroy_plane() - Remove and free a plane configuration + * @plane_cfg: Plane configuration to destroy + */ +void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg); + +/** + * vkms_config_plane_type() - Return the plane type + * @plane_cfg: Plane to get the type from + */ +static inline enum drm_plane_type +vkms_config_plane_get_type(struct vkms_config_plane *plane_cfg) +{ + return plane_cfg->type; +} + +/** + * vkms_config_plane_set_type() - Set the plane type + * @plane_cfg: Plane to set the type to + * @type: New plane type + */ +static inline void +vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg, + enum drm_plane_type type) +{ + plane_cfg->type = type; +} + #endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 414cc933af41..08ea691db299 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -11,28 +11,29 @@ int vkms_output_init(struct vkms_device *vkmsdev) struct vkms_connector *connector; struct drm_encoder *encoder; struct vkms_output *output; - struct vkms_plane *primary, *overlay, *cursor = NULL; + struct vkms_plane *primary = NULL, *cursor = NULL; + struct vkms_config_plane *plane_cfg; int ret; int writeback; - unsigned int n; if (!vkms_config_is_valid(vkmsdev->config)) return -EINVAL; - /* - * Initialize used plane. One primary plane is required to perform the composition. - * - * The overlay and cursor planes are not mandatory, but can be used to perform complex - * composition. - */ - primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY); - if (IS_ERR(primary)) - return PTR_ERR(primary); + vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { + enum drm_plane_type type; - if (vkmsdev->config->cursor) { - cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR); - if (IS_ERR(cursor)) - return PTR_ERR(cursor); + type = vkms_config_plane_get_type(plane_cfg); + + plane_cfg->plane = vkms_plane_init(vkmsdev, type); + if (IS_ERR(plane_cfg->plane)) { + DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); + return PTR_ERR(plane_cfg->plane); + } + + if (type == DRM_PLANE_TYPE_PRIMARY) + primary = plane_cfg->plane; + else if (type == DRM_PLANE_TYPE_CURSOR) + cursor = plane_cfg->plane; } output = vkms_crtc_init(dev, &primary->base, @@ -42,17 +43,6 @@ int vkms_output_init(struct vkms_device *vkmsdev) return PTR_ERR(output); } - if (vkmsdev->config->overlay) { - for (n = 0; n < NUM_OVERLAY_PLANES; n++) { - overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY); - if (IS_ERR(overlay)) { - DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); - return PTR_ERR(overlay); - } - overlay->base.possible_crtcs = drm_crtc_mask(&output->crtc); - } - } - connector = vkms_connector_init(vkmsdev); if (IS_ERR(connector)) { DRM_ERROR("Failed to init connector\n"); From 600df32dac40ad4b1069615a970063f0b15abfda Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:09 +0100 Subject: [PATCH 0025/1627] drm/vkms: Allow to configure multiple CRTCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a list of CRTCs to vkms_config and helper functions to add and remove as many CRTCs as wanted. For backwards compatibility, add one CRTC to the default configuration. A future patch will allow to attach planes and CRTCs, but for the moment there are no changes in the way the output is configured. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-10-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- .clang-format | 1 + drivers/gpu/drm/vkms/tests/vkms_config_test.c | 83 ++++++++++++++++++- drivers/gpu/drm/vkms/vkms_config.c | 63 +++++++++++++- drivers/gpu/drm/vkms/vkms_config.h | 80 ++++++++++++++++++ 4 files changed, 222 insertions(+), 5 deletions(-) diff --git a/.clang-format b/.clang-format index c585d2a5b395..e7a901c3617d 100644 --- a/.clang-format +++ b/.clang-format @@ -690,6 +690,7 @@ ForEachMacros: - 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf_safe' - 'virtio_device_for_each_vq' + - 'vkms_config_for_each_crtc' - 'vkms_config_for_each_plane' - 'while_for_each_ftrace_op' - 'xa_for_each' diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 116db01ba8a0..104120c91c39 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -27,6 +27,16 @@ static struct vkms_config_plane *get_first_plane(struct vkms_config *config) return NULL; } +static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config) +{ + struct vkms_config_crtc *crtc_cfg; + + vkms_config_for_each_crtc(config, crtc_cfg) + return crtc_cfg; + + return NULL; +} + struct default_config_case { bool enable_cursor; bool enable_writeback; @@ -46,6 +56,7 @@ static void vkms_config_test_empty_config(struct kunit *test) KUNIT_EXPECT_STREQ(test, vkms_config_get_device_name(config), "test"); KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0); + KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); @@ -70,6 +81,7 @@ static void vkms_config_test_default_config(struct kunit *test) const struct default_config_case *params = test->param_value; struct vkms_config *config; struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; int n_primaries = 0; int n_cursors = 0; int n_overlays = 0; @@ -79,8 +91,6 @@ static void vkms_config_test_default_config(struct kunit *test) params->enable_overlay); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); - KUNIT_EXPECT_EQ(test, config->writeback, params->enable_writeback); - /* Planes */ vkms_config_for_each_plane(config, plane_cfg) { switch (vkms_config_plane_get_type(plane_cfg)) { @@ -101,6 +111,13 @@ static void vkms_config_test_default_config(struct kunit *test) KUNIT_EXPECT_EQ(test, n_cursors, params->enable_cursor ? 1 : 0); KUNIT_EXPECT_EQ(test, n_overlays, params->enable_overlay ? 8 : 0); + /* CRTCs */ + KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 1); + + crtc_cfg = get_first_crtc(config); + KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg), + params->enable_writeback); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); vkms_config_destroy(config); @@ -149,6 +166,43 @@ static void vkms_config_test_get_planes(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_get_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 0); + vkms_config_for_each_crtc(config, crtc_cfg) + KUNIT_FAIL(test, "Unexpected CRTC"); + + crtc_cfg1 = vkms_config_create_crtc(config); + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1); + vkms_config_for_each_crtc(config, crtc_cfg) { + if (crtc_cfg != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected CRTC"); + } + + crtc_cfg2 = vkms_config_create_crtc(config); + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2); + vkms_config_for_each_crtc(config, crtc_cfg) { + if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected CRTC"); + } + + vkms_config_destroy_crtc(config, crtc_cfg2); + KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1); + vkms_config_for_each_crtc(config, crtc_cfg) { + if (crtc_cfg != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected CRTC"); + } + + vkms_config_destroy(config); +} + static void vkms_config_test_invalid_plane_number(struct kunit *test) { struct vkms_config *config; @@ -213,13 +267,38 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_invalid_crtc_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_crtc *crtc_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No CRTCs */ + crtc_cfg = get_first_crtc(config); + vkms_config_destroy_crtc(config, crtc_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many CRTCs */ + for (n = 0; n <= 32; n++) + vkms_config_create_crtc(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_empty_config), KUNIT_CASE_PARAM(vkms_config_test_default_config, default_config_gen_params), KUNIT_CASE(vkms_config_test_get_planes), + KUNIT_CASE(vkms_config_test_get_crtcs), KUNIT_CASE(vkms_config_test_invalid_plane_number), KUNIT_CASE(vkms_config_test_valid_plane_type), + KUNIT_CASE(vkms_config_test_invalid_crtc_number), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index 3c3f5cf79058..d195db770fae 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -23,6 +23,7 @@ struct vkms_config *vkms_config_create(const char *dev_name) } INIT_LIST_HEAD(&config->planes); + INIT_LIST_HEAD(&config->crtcs); return config; } @@ -34,19 +35,23 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, { struct vkms_config *config; struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; int n; config = vkms_config_create(DEFAULT_DEVICE_NAME); if (IS_ERR(config)) return config; - config->writeback = enable_writeback; - plane_cfg = vkms_config_create_plane(config); if (IS_ERR(plane_cfg)) goto err_alloc; vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + crtc_cfg = vkms_config_create_crtc(config); + if (IS_ERR(crtc_cfg)) + goto err_alloc; + vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback); + if (enable_overlay) { for (n = 0; n < NUM_OVERLAY_PLANES; n++) { plane_cfg = vkms_config_create_plane(config); @@ -75,10 +80,14 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_default_create); void vkms_config_destroy(struct vkms_config *config) { struct vkms_config_plane *plane_cfg, *plane_tmp; + struct vkms_config_crtc *crtc_cfg, *crtc_tmp; list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link) vkms_config_destroy_plane(plane_cfg); + list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link) + vkms_config_destroy_crtc(config, crtc_cfg); + kfree_const(config->dev_name); kfree(config); } @@ -135,11 +144,28 @@ static bool valid_plane_type(const struct vkms_config *config) return true; } +static bool valid_crtc_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_crtcs; + + n_crtcs = list_count_nodes((struct list_head *)&config->crtcs); + if (n_crtcs <= 0 || n_crtcs >= 32) { + drm_info(dev, "The number of CRTCs must be between 1 and 31\n"); + return false; + } + + return true; +} + bool vkms_config_is_valid(const struct vkms_config *config) { if (!valid_plane_number(config)) return false; + if (!valid_crtc_number(config)) + return false; + if (!valid_plane_type(config)) return false; @@ -154,10 +180,10 @@ static int vkms_config_show(struct seq_file *m, void *data) struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev); const char *dev_name; struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config); seq_printf(m, "dev_name=%s\n", dev_name); - seq_printf(m, "writeback=%d\n", vkmsdev->config->writeback); vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { seq_puts(m, "plane:\n"); @@ -165,6 +191,12 @@ static int vkms_config_show(struct seq_file *m, void *data) vkms_config_plane_get_type(plane_cfg)); } + vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) { + seq_puts(m, "crtc:\n"); + seq_printf(m, "\twriteback=%d\n", + vkms_config_crtc_get_writeback(crtc_cfg)); + } + return 0; } @@ -201,3 +233,28 @@ void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg) kfree(plane_cfg); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane); + +struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config) +{ + struct vkms_config_crtc *crtc_cfg; + + crtc_cfg = kzalloc(sizeof(*crtc_cfg), GFP_KERNEL); + if (!crtc_cfg) + return ERR_PTR(-ENOMEM); + + crtc_cfg->config = config; + vkms_config_crtc_set_writeback(crtc_cfg, false); + + list_add_tail(&crtc_cfg->link, &config->crtcs); + + return crtc_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc); + +void vkms_config_destroy_crtc(struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) +{ + list_del(&crtc_cfg->link); + kfree(crtc_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 613e98760640..978418db84b9 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -14,12 +14,14 @@ * @dev_name: Name of the device * @writeback: If true, a writeback buffer can be attached to the CRTC * @planes: List of planes configured for the device + * @crtcs: List of CRTCs configured for the device * @dev: Used to store the current VKMS device. Only set when the device is instantiated. */ struct vkms_config { const char *dev_name; bool writeback; struct list_head planes; + struct list_head crtcs; struct vkms_device *dev; }; @@ -45,6 +47,27 @@ struct vkms_config_plane { struct vkms_plane *plane; }; +/** + * struct vkms_config_crtc + * + * @link: Link to the others CRTCs in vkms_config + * @config: The vkms_config this CRTC belongs to + * @writeback: If true, a writeback buffer can be attached to the CRTC + * @crtc: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS CRTC during + * device creation. This pointer is not managed by the configuration and + * must be managed by other means. + */ +struct vkms_config_crtc { + struct list_head link; + struct vkms_config *config; + + bool writeback; + + /* Internal usage */ + struct vkms_output *crtc; +}; + /** * vkms_config_for_each_plane - Iterate over the vkms_config planes * @config: &struct vkms_config pointer @@ -53,6 +76,14 @@ struct vkms_config_plane { #define vkms_config_for_each_plane(config, plane_cfg) \ list_for_each_entry((plane_cfg), &(config)->planes, link) +/** + * vkms_config_for_each_crtc - Iterate over the vkms_config CRTCs + * @config: &struct vkms_config pointer + * @crtc_cfg: &struct vkms_config_crtc pointer used as cursor + */ +#define vkms_config_for_each_crtc(config, crtc_cfg) \ + list_for_each_entry((crtc_cfg), &(config)->crtcs, link) + /** * vkms_config_create() - Create a new VKMS configuration * @dev_name: Name of the device @@ -96,6 +127,15 @@ vkms_config_get_device_name(struct vkms_config *config) return config->dev_name; } +/** + * vkms_config_get_num_crtcs() - Return the number of CRTCs in the configuration + * @config: Configuration to get the number of CRTCs from + */ +static inline size_t vkms_config_get_num_crtcs(struct vkms_config *config) +{ + return list_count_nodes(&config->crtcs); +} + /** * vkms_config_is_valid() - Validate a configuration * @config: Configuration to validate @@ -151,4 +191,44 @@ vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg, plane_cfg->type = type; } +/** + * vkms_config_create_crtc() - Add a new CRTC configuration + * @config: Configuration to add the CRTC to + * + * Returns: + * The new CRTC configuration or an error. Call vkms_config_destroy_crtc() to + * free the returned CRTC configuration. + */ +struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config); + +/** + * vkms_config_destroy_crtc() - Remove and free a CRTC configuration + * @config: Configuration to remove the CRTC from + * @crtc_cfg: CRTC configuration to destroy + */ +void vkms_config_destroy_crtc(struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_crtc_get_writeback() - If a writeback connector will be created + * @crtc_cfg: CRTC with or without a writeback connector + */ +static inline bool +vkms_config_crtc_get_writeback(struct vkms_config_crtc *crtc_cfg) +{ + return crtc_cfg->writeback; +} + +/** + * vkms_config_crtc_set_writeback() - If a writeback connector will be created + * @crtc_cfg: Target CRTC + * @writeback: Enable or disable the writeback connector + */ +static inline void +vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg, + bool writeback) +{ + crtc_cfg->writeback = writeback; +} + #endif /* _VKMS_CONFIG_H_ */ From c204bf652a5b9e03bbd420199a326f02c2e5cb65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:10 +0100 Subject: [PATCH 0026/1627] drm/vkms: Allow to attach planes and CRTCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a list of possible CRTCs to the plane configuration and helpers to attach, detach and get the primary and cursor planes attached to a CRTC. Now that the default configuration has its planes and CRTC correctly attached, configure the output following the configuration. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-11-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- .clang-format | 1 + drivers/gpu/drm/vkms/tests/vkms_config_test.c | 222 ++++++++++++++++++ drivers/gpu/drm/vkms/vkms_config.c | 154 ++++++++++-- drivers/gpu/drm/vkms/vkms_config.h | 59 ++++- drivers/gpu/drm/vkms/vkms_drv.c | 3 +- drivers/gpu/drm/vkms/vkms_output.c | 53 +++-- 6 files changed, 455 insertions(+), 37 deletions(-) diff --git a/.clang-format b/.clang-format index e7a901c3617d..6f944fa39841 100644 --- a/.clang-format +++ b/.clang-format @@ -692,6 +692,7 @@ ForEachMacros: - 'virtio_device_for_each_vq' - 'vkms_config_for_each_crtc' - 'vkms_config_for_each_plane' + - 'vkms_config_plane_for_each_possible_crtc' - 'while_for_each_ftrace_op' - 'xa_for_each' - 'xa_for_each_marked' diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 104120c91c39..0997ea924ab7 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -118,6 +118,18 @@ static void vkms_config_test_default_config(struct kunit *test) KUNIT_EXPECT_EQ(test, vkms_config_crtc_get_writeback(crtc_cfg), params->enable_writeback); + vkms_config_for_each_plane(config, plane_cfg) { + struct vkms_config_crtc *possible_crtc; + int n_possible_crtcs = 0; + unsigned long idx = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + KUNIT_EXPECT_PTR_EQ(test, crtc_cfg, possible_crtc); + n_possible_crtcs++; + } + KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1); + } + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); vkms_config_destroy(config); @@ -230,6 +242,8 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) { struct vkms_config *config; struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + int err; config = vkms_config_default_create(false, false, false); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); @@ -237,16 +251,26 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) plane_cfg = get_first_plane(config); vkms_config_destroy_plane(plane_cfg); + crtc_cfg = get_first_crtc(config); + /* Invalid: No primary plane */ plane_cfg = vkms_config_create_plane(config); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); /* Invalid: Multiple primary planes */ plane_cfg = vkms_config_create_plane(config); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + plane_cfg = vkms_config_create_plane(config); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); /* Valid: One primary plane */ @@ -256,14 +280,50 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) /* Invalid: Multiple cursor planes */ plane_cfg = vkms_config_create_plane(config); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + plane_cfg = vkms_config_create_plane(config); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); /* Valid: One primary and one cursor plane */ vkms_config_destroy_plane(plane_cfg); KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + /* Invalid: Second CRTC without primary plane */ + crtc_cfg = vkms_config_create_crtc(config); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: Second CRTC with a primary plane */ + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_valid_plane_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + plane_cfg = get_first_plane(config); + crtc_cfg = get_first_crtc(config); + + /* Invalid: Primary plane without a possible CRTC */ + vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + vkms_config_destroy(config); } @@ -290,6 +350,164 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_attach_different_configs(struct kunit *test) +{ + struct vkms_config *config1, *config2; + struct vkms_config_plane *plane_cfg1, *plane_cfg2; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + int err; + + config1 = vkms_config_create("test1"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config1); + + config2 = vkms_config_create("test2"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config2); + + plane_cfg1 = vkms_config_create_plane(config1); + crtc_cfg1 = vkms_config_create_crtc(config1); + + plane_cfg2 = vkms_config_create_plane(config2); + crtc_cfg2 = vkms_config_create_crtc(config2); + + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); + + err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2); + KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1); + KUNIT_EXPECT_NE(test, err, 0); + + vkms_config_destroy(config1); + vkms_config_destroy(config2); +} + +static void vkms_config_test_plane_attach_crtc(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *overlay_cfg; + struct vkms_config_plane *primary_cfg; + struct vkms_config_plane *cursor_cfg; + struct vkms_config_crtc *crtc_cfg; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + overlay_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY); + primary_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY); + cursor_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR); + + crtc_cfg = vkms_config_create_crtc(config); + + /* No primary or cursor planes */ + KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg)); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + /* Overlay plane, but no primary or cursor planes */ + err = vkms_config_plane_attach_crtc(overlay_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg)); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + /* Primary plane, attaching it twice must fail */ + err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_plane_attach_crtc(primary_cfg, crtc_cfg); + KUNIT_EXPECT_NE(test, err, 0); + KUNIT_EXPECT_PTR_EQ(test, + vkms_config_crtc_primary_plane(config, crtc_cfg), + primary_cfg); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + /* Primary and cursor planes */ + err = vkms_config_plane_attach_crtc(cursor_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_EXPECT_PTR_EQ(test, + vkms_config_crtc_primary_plane(config, crtc_cfg), + primary_cfg); + KUNIT_EXPECT_PTR_EQ(test, + vkms_config_crtc_cursor_plane(config, crtc_cfg), + cursor_cfg); + + /* Detach primary and destroy cursor plane */ + vkms_config_plane_detach_crtc(overlay_cfg, crtc_cfg); + vkms_config_plane_detach_crtc(primary_cfg, crtc_cfg); + vkms_config_destroy_plane(cursor_cfg); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg)); + KUNIT_EXPECT_NULL(test, vkms_config_crtc_cursor_plane(config, crtc_cfg)); + + vkms_config_destroy(config); +} + +static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg1, *plane_cfg2; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + int n_crtcs = 0; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + plane_cfg1 = vkms_config_create_plane(config); + plane_cfg2 = vkms_config_create_plane(config); + crtc_cfg1 = vkms_config_create_crtc(config); + crtc_cfg2 = vkms_config_create_crtc(config); + + /* No possible CRTCs */ + vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Plane 1 attached to CRTC 1 and 2 */ + err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 2); + n_crtcs = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Plane 1 attached to CRTC 1 and plane 2 to CRTC 2 */ + vkms_config_plane_detach_crtc(plane_cfg1, crtc_cfg2); + vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + n_crtcs = 0; + + err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + vkms_config_plane_for_each_possible_crtc(plane_cfg2, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + + vkms_config_destroy(config); +} + static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_empty_config), KUNIT_CASE_PARAM(vkms_config_test_default_config, @@ -298,7 +516,11 @@ static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_get_crtcs), KUNIT_CASE(vkms_config_test_invalid_plane_number), KUNIT_CASE(vkms_config_test_valid_plane_type), + KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs), KUNIT_CASE(vkms_config_test_invalid_crtc_number), + KUNIT_CASE(vkms_config_test_attach_different_configs), + KUNIT_CASE(vkms_config_test_plane_attach_crtc), + KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index d195db770fae..458385413648 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -52,13 +52,20 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, goto err_alloc; vkms_config_crtc_set_writeback(crtc_cfg, enable_writeback); + if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) + goto err_alloc; + if (enable_overlay) { for (n = 0; n < NUM_OVERLAY_PLANES; n++) { plane_cfg = vkms_config_create_plane(config); if (IS_ERR(plane_cfg)) goto err_alloc; + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + + if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) + goto err_alloc; } } @@ -66,7 +73,11 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, plane_cfg = vkms_config_create_plane(config); if (IS_ERR(plane_cfg)) goto err_alloc; + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); + + if (vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg)) + goto err_alloc; } return config; @@ -107,7 +118,8 @@ static bool valid_plane_number(const struct vkms_config *config) return true; } -static bool valid_plane_type(const struct vkms_config *config) +static bool valid_planes_for_crtc(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) { struct drm_device *dev = config->dev ? &config->dev->drm : NULL; struct vkms_config_plane *plane_cfg; @@ -115,24 +127,31 @@ static bool valid_plane_type(const struct vkms_config *config) bool has_cursor_plane = false; vkms_config_for_each_plane(config, plane_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; enum drm_plane_type type; type = vkms_config_plane_get_type(plane_cfg); - if (type == DRM_PLANE_TYPE_PRIMARY) { - if (has_primary_plane) { - drm_info(dev, "Multiple primary planes\n"); - return false; - } + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc != crtc_cfg) + continue; - has_primary_plane = true; - } else if (type == DRM_PLANE_TYPE_CURSOR) { - if (has_cursor_plane) { - drm_info(dev, "Multiple cursor planes\n"); - return false; - } + if (type == DRM_PLANE_TYPE_PRIMARY) { + if (has_primary_plane) { + drm_info(dev, "Multiple primary planes\n"); + return false; + } - has_cursor_plane = true; + has_primary_plane = true; + } else if (type == DRM_PLANE_TYPE_CURSOR) { + if (has_cursor_plane) { + drm_info(dev, "Multiple cursor planes\n"); + return false; + } + + has_cursor_plane = true; + } } } @@ -144,6 +163,21 @@ static bool valid_plane_type(const struct vkms_config *config) return true; } +static bool valid_plane_possible_crtcs(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_plane *plane_cfg; + + vkms_config_for_each_plane(config, plane_cfg) { + if (xa_empty(&plane_cfg->possible_crtcs)) { + drm_info(dev, "All planes must have at least one possible CRTC\n"); + return false; + } + } + + return true; +} + static bool valid_crtc_number(const struct vkms_config *config) { struct drm_device *dev = config->dev ? &config->dev->drm : NULL; @@ -160,15 +194,22 @@ static bool valid_crtc_number(const struct vkms_config *config) bool vkms_config_is_valid(const struct vkms_config *config) { + struct vkms_config_crtc *crtc_cfg; + if (!valid_plane_number(config)) return false; if (!valid_crtc_number(config)) return false; - if (!valid_plane_type(config)) + if (!valid_plane_possible_crtcs(config)) return false; + vkms_config_for_each_crtc(config, crtc_cfg) { + if (!valid_planes_for_crtc(config, crtc_cfg)) + return false; + } + return true; } EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid); @@ -220,6 +261,7 @@ struct vkms_config_plane *vkms_config_create_plane(struct vkms_config *config) plane_cfg->config = config; vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); + xa_init_flags(&plane_cfg->possible_crtcs, XA_FLAGS_ALLOC); list_add_tail(&plane_cfg->link, &config->planes); @@ -229,11 +271,45 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_plane); void vkms_config_destroy_plane(struct vkms_config_plane *plane_cfg) { + xa_destroy(&plane_cfg->possible_crtcs); list_del(&plane_cfg->link); kfree(plane_cfg); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_plane); +int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + u32 crtc_idx = 0; + + if (plane_cfg->config != crtc_cfg->config) + return -EINVAL; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + return -EEXIST; + } + + return xa_alloc(&plane_cfg->possible_crtcs, &crtc_idx, crtc_cfg, + xa_limit_32b, GFP_KERNEL); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_attach_crtc); + +void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + xa_erase(&plane_cfg->possible_crtcs, idx); + } +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_plane_detach_crtc); + struct vkms_config_crtc *vkms_config_create_crtc(struct vkms_config *config) { struct vkms_config_crtc *crtc_cfg; @@ -254,7 +330,57 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_crtc); void vkms_config_destroy_crtc(struct vkms_config *config, struct vkms_config_crtc *crtc_cfg) { + struct vkms_config_plane *plane_cfg; + + vkms_config_for_each_plane(config, plane_cfg) + vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg); + list_del(&crtc_cfg->link); kfree(crtc_cfg); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_crtc); + +/** + * vkms_config_crtc_get_plane() - Return the first attached plane to a CRTC with + * the specific type + * @config: Configuration containing the CRTC and the plane + * @crtc_cfg: Only find planes attached to this CRTC + * @type: Plane type to search + * + * Returns: + * The first plane found attached to @crtc_cfg with the type @type. + */ +static struct vkms_config_plane *vkms_config_crtc_get_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg, + enum drm_plane_type type) +{ + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *possible_crtc; + enum drm_plane_type current_type; + unsigned long idx = 0; + + vkms_config_for_each_plane(config, plane_cfg) { + current_type = vkms_config_plane_get_type(plane_cfg); + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg && current_type == type) + return plane_cfg; + } + } + + return NULL; +} + +struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) +{ + return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_PRIMARY); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_primary_plane); + +struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg) +{ + return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 978418db84b9..ad303b34ee03 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -5,6 +5,7 @@ #include #include +#include #include "vkms_drv.h" @@ -12,14 +13,12 @@ * struct vkms_config - General configuration for VKMS driver * * @dev_name: Name of the device - * @writeback: If true, a writeback buffer can be attached to the CRTC * @planes: List of planes configured for the device * @crtcs: List of CRTCs configured for the device * @dev: Used to store the current VKMS device. Only set when the device is instantiated. */ struct vkms_config { const char *dev_name; - bool writeback; struct list_head planes; struct list_head crtcs; struct vkms_device *dev; @@ -32,6 +31,7 @@ struct vkms_config { * @config: The vkms_config this plane belongs to * @type: Type of the plane. The creator of configuration needs to ensures that * at least one primary plane is present. + * @possible_crtcs: Array of CRTCs that can be used with this plane * @plane: Internal usage. This pointer should never be considered as valid. * It can be used to store a temporary reference to a VKMS plane during * device creation. This pointer is not managed by the configuration and @@ -42,6 +42,7 @@ struct vkms_config_plane { struct vkms_config *config; enum drm_plane_type type; + struct xarray possible_crtcs; /* Internal usage */ struct vkms_plane *plane; @@ -84,6 +85,16 @@ struct vkms_config_crtc { #define vkms_config_for_each_crtc(config, crtc_cfg) \ list_for_each_entry((crtc_cfg), &(config)->crtcs, link) +/** + * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane + * possible CRTCs + * @plane_cfg: &struct vkms_config_plane pointer + * @idx: Index of the cursor + * @possible_crtc: &struct vkms_config_crtc pointer used as cursor + */ +#define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \ + xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc)) + /** * vkms_config_create() - Create a new VKMS configuration * @dev_name: Name of the device @@ -191,6 +202,22 @@ vkms_config_plane_set_type(struct vkms_config_plane *plane_cfg, plane_cfg->type = type; } +/** + * vkms_config_plane_attach_crtc - Attach a plane to a CRTC + * @plane_cfg: Plane to attach + * @crtc_cfg: CRTC to attach @plane_cfg to + */ +int __must_check vkms_config_plane_attach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_plane_detach_crtc - Detach a plane from a CRTC + * @plane_cfg: Plane to detach + * @crtc_cfg: CRTC to detach @plane_cfg from + */ +void vkms_config_plane_detach_crtc(struct vkms_config_plane *plane_cfg, + struct vkms_config_crtc *crtc_cfg); + /** * vkms_config_create_crtc() - Add a new CRTC configuration * @config: Configuration to add the CRTC to @@ -231,4 +258,32 @@ vkms_config_crtc_set_writeback(struct vkms_config_crtc *crtc_cfg, crtc_cfg->writeback = writeback; } +/** + * vkms_config_crtc_primary_plane() - Return the primary plane for a CRTC + * @config: Configuration containing the CRTC + * @crtc_config: Target CRTC + * + * Note that, if multiple primary planes are found, the first one is returned. + * In this case, the configuration will be invalid. See vkms_config_is_valid(). + * + * Returns: + * The primary plane or NULL if none is assigned yet. + */ +struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_crtc_cursor_plane() - Return the cursor plane for a CRTC + * @config: Configuration containing the CRTC + * @crtc_config: Target CRTC + * + * Note that, if multiple cursor planes are found, the first one is returned. + * In this case, the configuration will be invalid. See vkms_config_is_valid(). + * + * Returns: + * The cursor plane or NULL if none is assigned yet. + */ +struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config, + struct vkms_config_crtc *crtc_cfg); + #endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index ba977ef09b2b..a24d1655f7b8 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -181,7 +181,8 @@ static int vkms_create(struct vkms_config *config) goto out_devres; } - ret = drm_vblank_init(&vkms_device->drm, 1); + ret = drm_vblank_init(&vkms_device->drm, + vkms_config_get_num_crtcs(config)); if (ret) { DRM_ERROR("Failed to vblank\n"); goto out_devres; diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 08ea691db299..f63bc8e3014b 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -10,9 +10,8 @@ int vkms_output_init(struct vkms_device *vkmsdev) struct drm_device *dev = &vkmsdev->drm; struct vkms_connector *connector; struct drm_encoder *encoder; - struct vkms_output *output; - struct vkms_plane *primary = NULL, *cursor = NULL; struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg; int ret; int writeback; @@ -29,18 +28,37 @@ int vkms_output_init(struct vkms_device *vkmsdev) DRM_DEV_ERROR(dev->dev, "Failed to init vkms plane\n"); return PTR_ERR(plane_cfg->plane); } - - if (type == DRM_PLANE_TYPE_PRIMARY) - primary = plane_cfg->plane; - else if (type == DRM_PLANE_TYPE_CURSOR) - cursor = plane_cfg->plane; } - output = vkms_crtc_init(dev, &primary->base, - cursor ? &cursor->base : NULL); - if (IS_ERR(output)) { - DRM_ERROR("Failed to allocate CRTC\n"); - return PTR_ERR(output); + vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) { + struct vkms_config_plane *primary, *cursor; + + primary = vkms_config_crtc_primary_plane(vkmsdev->config, crtc_cfg); + cursor = vkms_config_crtc_cursor_plane(vkmsdev->config, crtc_cfg); + + crtc_cfg->crtc = vkms_crtc_init(dev, &primary->plane->base, + cursor ? &cursor->plane->base : NULL); + if (IS_ERR(crtc_cfg->crtc)) { + DRM_ERROR("Failed to allocate CRTC\n"); + return PTR_ERR(crtc_cfg->crtc); + } + + /* Initialize the writeback component */ + if (vkms_config_crtc_get_writeback(crtc_cfg)) { + writeback = vkms_enable_writeback_connector(vkmsdev, crtc_cfg->crtc); + if (writeback) + DRM_ERROR("Failed to init writeback connector\n"); + } + } + + vkms_config_for_each_plane(vkmsdev->config, plane_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) { + plane_cfg->plane->base.possible_crtcs |= + drm_crtc_mask(&possible_crtc->crtc->crtc); + } } connector = vkms_connector_init(vkmsdev); @@ -60,7 +78,9 @@ int vkms_output_init(struct vkms_device *vkmsdev) DRM_ERROR("Failed to init encoder\n"); return ret; } - encoder->possible_crtcs = drm_crtc_mask(&output->crtc); + + vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) + encoder->possible_crtcs = drm_crtc_mask(&crtc_cfg->crtc->crtc); /* Attach the encoder and the connector */ ret = drm_connector_attach_encoder(&connector->base, encoder); @@ -69,13 +89,6 @@ int vkms_output_init(struct vkms_device *vkmsdev) return ret; } - /* Initialize the writeback component */ - if (vkmsdev->config->writeback) { - writeback = vkms_enable_writeback_connector(vkmsdev, output); - if (writeback) - DRM_ERROR("Failed to init writeback connector\n"); - } - drm_mode_config_reset(dev); return ret; From f60a183dc9105e3dc5120d5aa03b294d182965fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:11 +0100 Subject: [PATCH 0027/1627] drm/vkms: Allow to configure multiple encoders MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a list of encoders to vkms_config and helper functions to add and remove as many encoders as wanted. For backwards compatibility, add one encoder to the default configuration. A future patch will allow to attach encoders and CRTCs, but for the moment there are no changes in the way the output is configured. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-12-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- .clang-format | 1 + drivers/gpu/drm/vkms/tests/vkms_config_test.c | 94 +++++++++++++++++++ drivers/gpu/drm/vkms/vkms_config.c | 54 +++++++++++ drivers/gpu/drm/vkms/vkms_config.h | 46 +++++++++ 4 files changed, 195 insertions(+) diff --git a/.clang-format b/.clang-format index 6f944fa39841..c355a2f58eed 100644 --- a/.clang-format +++ b/.clang-format @@ -691,6 +691,7 @@ ForEachMacros: - 'v4l2_m2m_for_each_src_buf_safe' - 'virtio_device_for_each_vq' - 'vkms_config_for_each_crtc' + - 'vkms_config_for_each_encoder' - 'vkms_config_for_each_plane' - 'vkms_config_plane_for_each_possible_crtc' - 'while_for_each_ftrace_op' diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 0997ea924ab7..fa8b4f23cb49 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -17,6 +17,17 @@ static size_t vkms_config_get_num_planes(struct vkms_config *config) return count; } +static size_t vkms_config_get_num_encoders(struct vkms_config *config) +{ + struct vkms_config_encoder *encoder_cfg; + size_t count = 0; + + vkms_config_for_each_encoder(config, encoder_cfg) + count++; + + return count; +} + static struct vkms_config_plane *get_first_plane(struct vkms_config *config) { struct vkms_config_plane *plane_cfg; @@ -37,6 +48,16 @@ static struct vkms_config_crtc *get_first_crtc(struct vkms_config *config) return NULL; } +static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config) +{ + struct vkms_config_encoder *encoder_cfg; + + vkms_config_for_each_encoder(config, encoder_cfg) + return encoder_cfg; + + return NULL; +} + struct default_config_case { bool enable_cursor; bool enable_writeback; @@ -57,6 +78,7 @@ static void vkms_config_test_empty_config(struct kunit *test) KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0); KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0); + KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); @@ -130,6 +152,9 @@ static void vkms_config_test_default_config(struct kunit *test) KUNIT_EXPECT_EQ(test, n_possible_crtcs, 1); } + /* Encoders */ + KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); vkms_config_destroy(config); @@ -215,6 +240,50 @@ static void vkms_config_test_get_crtcs(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_get_encoders(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + int n_encoders = 0; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + vkms_config_for_each_encoder(config, encoder_cfg) + n_encoders++; + KUNIT_ASSERT_EQ(test, n_encoders, 0); + + encoder_cfg1 = vkms_config_create_encoder(config); + vkms_config_for_each_encoder(config, encoder_cfg) { + n_encoders++; + if (encoder_cfg != encoder_cfg1) + KUNIT_FAIL(test, "Unexpected encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + n_encoders = 0; + + encoder_cfg2 = vkms_config_create_encoder(config); + vkms_config_for_each_encoder(config, encoder_cfg) { + n_encoders++; + if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2) + KUNIT_FAIL(test, "Unexpected encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 2); + n_encoders = 0; + + vkms_config_destroy_encoder(config, encoder_cfg2); + vkms_config_for_each_encoder(config, encoder_cfg) { + n_encoders++; + if (encoder_cfg != encoder_cfg1) + KUNIT_FAIL(test, "Unexpected encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + n_encoders = 0; + + vkms_config_destroy(config); +} + static void vkms_config_test_invalid_plane_number(struct kunit *test) { struct vkms_config *config; @@ -350,6 +419,29 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_invalid_encoder_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No encoders */ + encoder_cfg = get_first_encoder(config); + vkms_config_destroy_encoder(config, encoder_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many encoders */ + for (n = 0; n <= 32; n++) + vkms_config_create_encoder(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + static void vkms_config_test_attach_different_configs(struct kunit *test) { struct vkms_config *config1, *config2; @@ -514,10 +606,12 @@ static struct kunit_case vkms_config_test_cases[] = { default_config_gen_params), KUNIT_CASE(vkms_config_test_get_planes), KUNIT_CASE(vkms_config_test_get_crtcs), + KUNIT_CASE(vkms_config_test_get_encoders), KUNIT_CASE(vkms_config_test_invalid_plane_number), KUNIT_CASE(vkms_config_test_valid_plane_type), KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs), KUNIT_CASE(vkms_config_test_invalid_crtc_number), + KUNIT_CASE(vkms_config_test_invalid_encoder_number), KUNIT_CASE(vkms_config_test_attach_different_configs), KUNIT_CASE(vkms_config_test_plane_attach_crtc), KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs), diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index 458385413648..db8be054f6f4 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -24,6 +24,7 @@ struct vkms_config *vkms_config_create(const char *dev_name) INIT_LIST_HEAD(&config->planes); INIT_LIST_HEAD(&config->crtcs); + INIT_LIST_HEAD(&config->encoders); return config; } @@ -36,6 +37,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, struct vkms_config *config; struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; int n; config = vkms_config_create(DEFAULT_DEVICE_NAME); @@ -80,6 +82,10 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, goto err_alloc; } + encoder_cfg = vkms_config_create_encoder(config); + if (IS_ERR(encoder_cfg)) + goto err_alloc; + return config; err_alloc: @@ -92,6 +98,7 @@ void vkms_config_destroy(struct vkms_config *config) { struct vkms_config_plane *plane_cfg, *plane_tmp; struct vkms_config_crtc *crtc_cfg, *crtc_tmp; + struct vkms_config_encoder *encoder_cfg, *encoder_tmp; list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link) vkms_config_destroy_plane(plane_cfg); @@ -99,6 +106,9 @@ void vkms_config_destroy(struct vkms_config *config) list_for_each_entry_safe(crtc_cfg, crtc_tmp, &config->crtcs, link) vkms_config_destroy_crtc(config, crtc_cfg); + list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link) + vkms_config_destroy_encoder(config, encoder_cfg); + kfree_const(config->dev_name); kfree(config); } @@ -192,6 +202,20 @@ static bool valid_crtc_number(const struct vkms_config *config) return true; } +static bool valid_encoder_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_encoders; + + n_encoders = list_count_nodes((struct list_head *)&config->encoders); + if (n_encoders <= 0 || n_encoders >= 32) { + drm_info(dev, "The number of encoders must be between 1 and 31\n"); + return false; + } + + return true; +} + bool vkms_config_is_valid(const struct vkms_config *config) { struct vkms_config_crtc *crtc_cfg; @@ -202,6 +226,9 @@ bool vkms_config_is_valid(const struct vkms_config *config) if (!valid_crtc_number(config)) return false; + if (!valid_encoder_number(config)) + return false; + if (!valid_plane_possible_crtcs(config)) return false; @@ -222,6 +249,7 @@ static int vkms_config_show(struct seq_file *m, void *data) const char *dev_name; struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config); seq_printf(m, "dev_name=%s\n", dev_name); @@ -238,6 +266,9 @@ static int vkms_config_show(struct seq_file *m, void *data) vkms_config_crtc_get_writeback(crtc_cfg)); } + vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) + seq_puts(m, "encoder\n"); + return 0; } @@ -384,3 +415,26 @@ struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config return vkms_config_crtc_get_plane(config, crtc_cfg, DRM_PLANE_TYPE_CURSOR); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_crtc_cursor_plane); + +struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config) +{ + struct vkms_config_encoder *encoder_cfg; + + encoder_cfg = kzalloc(sizeof(*encoder_cfg), GFP_KERNEL); + if (!encoder_cfg) + return ERR_PTR(-ENOMEM); + + encoder_cfg->config = config; + list_add_tail(&encoder_cfg->link, &config->encoders); + + return encoder_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder); + +void vkms_config_destroy_encoder(struct vkms_config *config, + struct vkms_config_encoder *encoder_cfg) +{ + list_del(&encoder_cfg->link); + kfree(encoder_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index ad303b34ee03..024cbed0e439 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -15,12 +15,14 @@ * @dev_name: Name of the device * @planes: List of planes configured for the device * @crtcs: List of CRTCs configured for the device + * @encoders: List of encoders configured for the device * @dev: Used to store the current VKMS device. Only set when the device is instantiated. */ struct vkms_config { const char *dev_name; struct list_head planes; struct list_head crtcs; + struct list_head encoders; struct vkms_device *dev; }; @@ -69,6 +71,24 @@ struct vkms_config_crtc { struct vkms_output *crtc; }; +/** + * struct vkms_config_encoder + * + * @link: Link to the others encoders in vkms_config + * @config: The vkms_config this CRTC belongs to + * @encoder: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS encoder + * during device creation. This pointer is not managed by the + * configuration and must be managed by other means. + */ +struct vkms_config_encoder { + struct list_head link; + struct vkms_config *config; + + /* Internal usage */ + struct drm_encoder *encoder; +}; + /** * vkms_config_for_each_plane - Iterate over the vkms_config planes * @config: &struct vkms_config pointer @@ -85,6 +105,14 @@ struct vkms_config_crtc { #define vkms_config_for_each_crtc(config, crtc_cfg) \ list_for_each_entry((crtc_cfg), &(config)->crtcs, link) +/** + * vkms_config_for_each_encoder - Iterate over the vkms_config encoders + * @config: &struct vkms_config pointer + * @encoder_cfg: &struct vkms_config_encoder pointer used as cursor + */ +#define vkms_config_for_each_encoder(config, encoder_cfg) \ + list_for_each_entry((encoder_cfg), &(config)->encoders, link) + /** * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane * possible CRTCs @@ -286,4 +314,22 @@ struct vkms_config_plane *vkms_config_crtc_primary_plane(const struct vkms_confi struct vkms_config_plane *vkms_config_crtc_cursor_plane(const struct vkms_config *config, struct vkms_config_crtc *crtc_cfg); +/** + * vkms_config_create_encoder() - Add a new encoder configuration + * @config: Configuration to add the encoder to + * + * Returns: + * The new encoder configuration or an error. Call vkms_config_destroy_encoder() + * to free the returned encoder configuration. + */ +struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *config); + +/** + * vkms_config_destroy_encoder() - Remove and free a encoder configuration + * @config: Configuration to remove the encoder from + * @encoder_cfg: Encoder configuration to destroy + */ +void vkms_config_destroy_encoder(struct vkms_config *config, + struct vkms_config_encoder *encoder_cfg); + #endif /* _VKMS_CONFIG_H_ */ From b8776fc9b2863c55193f66e1146a89bbccd2b4e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:12 +0100 Subject: [PATCH 0028/1627] drm/vkms: Allow to attach encoders and CRTCs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a list of possible CRTCs to the encoder configuration and helpers to attach and detach them. Now that the default configuration has its encoder and CRTC correctly attached, configure the output following the configuration. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-13-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- .clang-format | 1 + drivers/gpu/drm/vkms/tests/vkms_config_test.c | 125 ++++++++++++++++++ drivers/gpu/drm/vkms/vkms_config.c | 82 ++++++++++++ drivers/gpu/drm/vkms/vkms_config.h | 29 ++++ drivers/gpu/drm/vkms/vkms_output.c | 49 ++++--- 5 files changed, 266 insertions(+), 20 deletions(-) diff --git a/.clang-format b/.clang-format index c355a2f58eed..5d21c0e4edbd 100644 --- a/.clang-format +++ b/.clang-format @@ -693,6 +693,7 @@ ForEachMacros: - 'vkms_config_for_each_crtc' - 'vkms_config_for_each_encoder' - 'vkms_config_for_each_plane' + - 'vkms_config_encoder_for_each_possible_crtc' - 'vkms_config_plane_for_each_possible_crtc' - 'while_for_each_ftrace_op' - 'xa_for_each' diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index fa8b4f23cb49..600f563dd0a8 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -312,6 +312,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) struct vkms_config *config; struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; int err; config = vkms_config_default_create(false, false, false); @@ -365,6 +366,9 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) /* Invalid: Second CRTC without primary plane */ crtc_cfg = vkms_config_create_crtc(config); + encoder_cfg = vkms_config_create_encoder(config); + err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg); + KUNIT_EXPECT_EQ(test, err, 0); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); /* Valid: Second CRTC with a primary plane */ @@ -442,11 +446,57 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_plane *plane_cfg; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_encoder *encoder_cfg; + int err; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + crtc_cfg1 = get_first_crtc(config); + + /* Invalid: Encoder without a possible CRTC */ + encoder_cfg = vkms_config_create_encoder(config); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: Second CRTC with shared encoder */ + crtc_cfg2 = vkms_config_create_crtc(config); + + plane_cfg = vkms_config_create_plane(config); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); + err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + + err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + /* Invalid: Second CRTC without encoders */ + vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg2); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Valid: First CRTC with 2 possible encoder */ + vkms_config_destroy_plane(plane_cfg); + vkms_config_destroy_crtc(config, crtc_cfg2); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + static void vkms_config_test_attach_different_configs(struct kunit *test) { struct vkms_config *config1, *config2; struct vkms_config_plane *plane_cfg1, *plane_cfg2; struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; int err; config1 = vkms_config_create("test1"); @@ -457,20 +507,29 @@ static void vkms_config_test_attach_different_configs(struct kunit *test) plane_cfg1 = vkms_config_create_plane(config1); crtc_cfg1 = vkms_config_create_crtc(config1); + encoder_cfg1 = vkms_config_create_encoder(config1); plane_cfg2 = vkms_config_create_plane(config2); crtc_cfg2 = vkms_config_create_crtc(config2); + encoder_cfg2 = vkms_config_create_encoder(config2); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2); err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2); KUNIT_EXPECT_NE(test, err, 0); err = vkms_config_plane_attach_crtc(plane_cfg2, crtc_cfg1); KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2); + KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1); + KUNIT_EXPECT_NE(test, err, 0); + vkms_config_destroy(config1); vkms_config_destroy(config2); } @@ -600,6 +659,70 @@ static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + int n_crtcs = 0; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + encoder_cfg1 = vkms_config_create_encoder(config); + encoder_cfg2 = vkms_config_create_encoder(config); + crtc_cfg1 = vkms_config_create_crtc(config); + crtc_cfg2 = vkms_config_create_crtc(config); + + /* No possible CRTCs */ + vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Encoder 1 attached to CRTC 1 and 2 */ + err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_encoder_attach_crtc(encoder_cfg1, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1 && possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 2); + n_crtcs = 0; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + + /* Encoder 1 attached to CRTC 1 and encoder 2 to CRTC 2 */ + vkms_config_encoder_detach_crtc(encoder_cfg1, crtc_cfg2); + vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg1) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + n_crtcs = 0; + + err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + vkms_config_encoder_for_each_possible_crtc(encoder_cfg2, idx, possible_crtc) { + n_crtcs++; + if (possible_crtc != crtc_cfg2) + KUNIT_FAIL(test, "Unexpected possible CRTC"); + } + KUNIT_ASSERT_EQ(test, n_crtcs, 1); + + vkms_config_destroy(config); +} + static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_empty_config), KUNIT_CASE_PARAM(vkms_config_test_default_config, @@ -612,9 +735,11 @@ static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs), KUNIT_CASE(vkms_config_test_invalid_crtc_number), KUNIT_CASE(vkms_config_test_invalid_encoder_number), + KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs), KUNIT_CASE(vkms_config_test_attach_different_configs), KUNIT_CASE(vkms_config_test_plane_attach_crtc), KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs), + KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index db8be054f6f4..17262a9c2567 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -86,6 +86,9 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, if (IS_ERR(encoder_cfg)) goto err_alloc; + if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg)) + goto err_alloc; + return config; err_alloc: @@ -216,6 +219,42 @@ static bool valid_encoder_number(const struct vkms_config *config) return true; } +static bool valid_encoder_possible_crtcs(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; + + vkms_config_for_each_encoder(config, encoder_cfg) { + if (xa_empty(&encoder_cfg->possible_crtcs)) { + drm_info(dev, "All encoders must have at least one possible CRTC\n"); + return false; + } + } + + vkms_config_for_each_crtc(config, crtc_cfg) { + bool crtc_has_encoder = false; + + vkms_config_for_each_encoder(config, encoder_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, + idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + crtc_has_encoder = true; + } + } + + if (!crtc_has_encoder) { + drm_info(dev, "All CRTCs must have at least one possible encoder\n"); + return false; + } + } + + return true; +} + bool vkms_config_is_valid(const struct vkms_config *config) { struct vkms_config_crtc *crtc_cfg; @@ -237,6 +276,9 @@ bool vkms_config_is_valid(const struct vkms_config *config) return false; } + if (!valid_encoder_possible_crtcs(config)) + return false; + return true; } EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid); @@ -362,10 +404,14 @@ void vkms_config_destroy_crtc(struct vkms_config *config, struct vkms_config_crtc *crtc_cfg) { struct vkms_config_plane *plane_cfg; + struct vkms_config_encoder *encoder_cfg; vkms_config_for_each_plane(config, plane_cfg) vkms_config_plane_detach_crtc(plane_cfg, crtc_cfg); + vkms_config_for_each_encoder(config, encoder_cfg) + vkms_config_encoder_detach_crtc(encoder_cfg, crtc_cfg); + list_del(&crtc_cfg->link); kfree(crtc_cfg); } @@ -425,6 +471,8 @@ struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *confi return ERR_PTR(-ENOMEM); encoder_cfg->config = config; + xa_init_flags(&encoder_cfg->possible_crtcs, XA_FLAGS_ALLOC); + list_add_tail(&encoder_cfg->link, &config->encoders); return encoder_cfg; @@ -434,7 +482,41 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder); void vkms_config_destroy_encoder(struct vkms_config *config, struct vkms_config_encoder *encoder_cfg) { + xa_destroy(&encoder_cfg->possible_crtcs); list_del(&encoder_cfg->link); kfree(encoder_cfg); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_encoder); + +int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + u32 crtc_idx = 0; + + if (encoder_cfg->config != crtc_cfg->config) + return -EINVAL; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + return -EEXIST; + } + + return xa_alloc(&encoder_cfg->possible_crtcs, &crtc_idx, crtc_cfg, + xa_limit_32b, GFP_KERNEL); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_attach_crtc); + +void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg) +{ + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) { + if (possible_crtc == crtc_cfg) + xa_erase(&encoder_cfg->possible_crtcs, idx); + } +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 024cbed0e439..3e5b2e407378 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -76,6 +76,7 @@ struct vkms_config_crtc { * * @link: Link to the others encoders in vkms_config * @config: The vkms_config this CRTC belongs to + * @possible_crtcs: Array of CRTCs that can be used with this encoder * @encoder: Internal usage. This pointer should never be considered as valid. * It can be used to store a temporary reference to a VKMS encoder * during device creation. This pointer is not managed by the @@ -85,6 +86,8 @@ struct vkms_config_encoder { struct list_head link; struct vkms_config *config; + struct xarray possible_crtcs; + /* Internal usage */ struct drm_encoder *encoder; }; @@ -123,6 +126,16 @@ struct vkms_config_encoder { #define vkms_config_plane_for_each_possible_crtc(plane_cfg, idx, possible_crtc) \ xa_for_each(&(plane_cfg)->possible_crtcs, idx, (possible_crtc)) +/** + * vkms_config_encoder_for_each_possible_crtc - Iterate over the + * vkms_config_encoder possible CRTCs + * @encoder_cfg: &struct vkms_config_encoder pointer + * @idx: Index of the cursor + * @possible_crtc: &struct vkms_config_crtc pointer used as cursor + */ +#define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \ + xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc)) + /** * vkms_config_create() - Create a new VKMS configuration * @dev_name: Name of the device @@ -332,4 +345,20 @@ struct vkms_config_encoder *vkms_config_create_encoder(struct vkms_config *confi void vkms_config_destroy_encoder(struct vkms_config *config, struct vkms_config_encoder *encoder_cfg); +/** + * vkms_config_encoder_attach_crtc - Attach a encoder to a CRTC + * @encoder_cfg: Encoder to attach + * @crtc_cfg: CRTC to attach @encoder_cfg to + */ +int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg); + +/** + * vkms_config_encoder_detach_crtc - Detach a encoder from a CRTC + * @encoder_cfg: Encoder to detach + * @crtc_cfg: CRTC to detach @encoder_cfg from + */ +void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg, + struct vkms_config_crtc *crtc_cfg); + #endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index f63bc8e3014b..8920d6b5d105 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -9,9 +9,9 @@ int vkms_output_init(struct vkms_device *vkmsdev) { struct drm_device *dev = &vkmsdev->drm; struct vkms_connector *connector; - struct drm_encoder *encoder; struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; + struct vkms_config_encoder *encoder_cfg; int ret; int writeback; @@ -61,32 +61,41 @@ int vkms_output_init(struct vkms_device *vkmsdev) } } + vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) { + struct vkms_config_crtc *possible_crtc; + unsigned long idx = 0; + + encoder_cfg->encoder = drmm_kzalloc(dev, sizeof(*encoder_cfg->encoder), GFP_KERNEL); + if (!encoder_cfg->encoder) { + DRM_ERROR("Failed to allocate encoder\n"); + return -ENOMEM; + } + ret = drmm_encoder_init(dev, encoder_cfg->encoder, NULL, + DRM_MODE_ENCODER_VIRTUAL, NULL); + if (ret) { + DRM_ERROR("Failed to init encoder\n"); + return ret; + } + + vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) { + encoder_cfg->encoder->possible_crtcs |= + drm_crtc_mask(&possible_crtc->crtc->crtc); + } + } + connector = vkms_connector_init(vkmsdev); if (IS_ERR(connector)) { DRM_ERROR("Failed to init connector\n"); return PTR_ERR(connector); } - encoder = drmm_kzalloc(dev, sizeof(*encoder), GFP_KERNEL); - if (!encoder) { - DRM_ERROR("Failed to allocate encoder\n"); - return -ENOMEM; - } - ret = drmm_encoder_init(dev, encoder, NULL, - DRM_MODE_ENCODER_VIRTUAL, NULL); - if (ret) { - DRM_ERROR("Failed to init encoder\n"); - return ret; - } - - vkms_config_for_each_crtc(vkmsdev->config, crtc_cfg) - encoder->possible_crtcs = drm_crtc_mask(&crtc_cfg->crtc->crtc); - /* Attach the encoder and the connector */ - ret = drm_connector_attach_encoder(&connector->base, encoder); - if (ret) { - DRM_ERROR("Failed to attach connector to encoder\n"); - return ret; + vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) { + ret = drm_connector_attach_encoder(&connector->base, encoder_cfg->encoder); + if (ret) { + DRM_ERROR("Failed to attach connector to encoder\n"); + return ret; + } } drm_mode_config_reset(dev); From da38c72018e28b08197975cec7ffd5e07f189a9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:13 +0100 Subject: [PATCH 0029/1627] drm/vkms: Allow to configure multiple connectors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a list of connectors to vkms_config and helper functions to add and remove as many connectors as wanted. For backwards compatibility, add one enabled connector to the default configuration. A future patch will allow to attach connectors and encoders, but for the moment there are no changes in the way the output is configured. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-14-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- .clang-format | 1 + drivers/gpu/drm/vkms/tests/vkms_config_test.c | 95 +++++++++++++++++++ drivers/gpu/drm/vkms/vkms_config.c | 54 +++++++++++ drivers/gpu/drm/vkms/vkms_config.h | 44 +++++++++ drivers/gpu/drm/vkms/vkms_connector.c | 11 +++ 5 files changed, 205 insertions(+) diff --git a/.clang-format b/.clang-format index 5d21c0e4edbd..ca49832993c5 100644 --- a/.clang-format +++ b/.clang-format @@ -690,6 +690,7 @@ ForEachMacros: - 'v4l2_m2m_for_each_src_buf' - 'v4l2_m2m_for_each_src_buf_safe' - 'virtio_device_for_each_vq' + - 'vkms_config_for_each_connector' - 'vkms_config_for_each_crtc' - 'vkms_config_for_each_encoder' - 'vkms_config_for_each_plane' diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 600f563dd0a8..610bcde3e018 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -28,6 +28,17 @@ static size_t vkms_config_get_num_encoders(struct vkms_config *config) return count; } +static size_t vkms_config_get_num_connectors(struct vkms_config *config) +{ + struct vkms_config_connector *connector_cfg; + size_t count = 0; + + vkms_config_for_each_connector(config, connector_cfg) + count++; + + return count; +} + static struct vkms_config_plane *get_first_plane(struct vkms_config *config) { struct vkms_config_plane *plane_cfg; @@ -58,6 +69,16 @@ static struct vkms_config_encoder *get_first_encoder(struct vkms_config *config) return NULL; } +static struct vkms_config_connector *get_first_connector(struct vkms_config *config) +{ + struct vkms_config_connector *connector_cfg; + + vkms_config_for_each_connector(config, connector_cfg) + return connector_cfg; + + return NULL; +} + struct default_config_case { bool enable_cursor; bool enable_writeback; @@ -79,6 +100,7 @@ static void vkms_config_test_empty_config(struct kunit *test) KUNIT_EXPECT_EQ(test, vkms_config_get_num_planes(config), 0); KUNIT_EXPECT_EQ(test, vkms_config_get_num_crtcs(config), 0); KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 0); + KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 0); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); @@ -155,6 +177,9 @@ static void vkms_config_test_default_config(struct kunit *test) /* Encoders */ KUNIT_EXPECT_EQ(test, vkms_config_get_num_encoders(config), 1); + /* Connectors */ + KUNIT_EXPECT_EQ(test, vkms_config_get_num_connectors(config), 1); + KUNIT_EXPECT_TRUE(test, vkms_config_is_valid(config)); vkms_config_destroy(config); @@ -284,6 +309,51 @@ static void vkms_config_test_get_encoders(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_get_connectors(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_connector *connector_cfg; + struct vkms_config_connector *connector_cfg1, *connector_cfg2; + int n_connectors = 0; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + vkms_config_for_each_connector(config, connector_cfg) + n_connectors++; + KUNIT_ASSERT_EQ(test, n_connectors, 0); + + connector_cfg1 = vkms_config_create_connector(config); + vkms_config_for_each_connector(config, connector_cfg) { + n_connectors++; + if (connector_cfg != connector_cfg1) + KUNIT_FAIL(test, "Unexpected connector"); + } + KUNIT_ASSERT_EQ(test, n_connectors, 1); + n_connectors = 0; + + connector_cfg2 = vkms_config_create_connector(config); + vkms_config_for_each_connector(config, connector_cfg) { + n_connectors++; + if (connector_cfg != connector_cfg1 && + connector_cfg != connector_cfg2) + KUNIT_FAIL(test, "Unexpected connector"); + } + KUNIT_ASSERT_EQ(test, n_connectors, 2); + n_connectors = 0; + + vkms_config_destroy_connector(connector_cfg2); + vkms_config_for_each_connector(config, connector_cfg) { + n_connectors++; + if (connector_cfg != connector_cfg1) + KUNIT_FAIL(test, "Unexpected connector"); + } + KUNIT_ASSERT_EQ(test, n_connectors, 1); + n_connectors = 0; + + vkms_config_destroy(config); +} + static void vkms_config_test_invalid_plane_number(struct kunit *test) { struct vkms_config *config; @@ -491,6 +561,29 @@ static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_invalid_connector_number(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_connector *connector_cfg; + int n; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + /* Invalid: No connectors */ + connector_cfg = get_first_connector(config); + vkms_config_destroy_connector(connector_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + /* Invalid: Too many connectors */ + for (n = 0; n <= 32; n++) + connector_cfg = vkms_config_create_connector(config); + + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + static void vkms_config_test_attach_different_configs(struct kunit *test) { struct vkms_config *config1, *config2; @@ -730,12 +823,14 @@ static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_get_planes), KUNIT_CASE(vkms_config_test_get_crtcs), KUNIT_CASE(vkms_config_test_get_encoders), + KUNIT_CASE(vkms_config_test_get_connectors), KUNIT_CASE(vkms_config_test_invalid_plane_number), KUNIT_CASE(vkms_config_test_valid_plane_type), KUNIT_CASE(vkms_config_test_valid_plane_possible_crtcs), KUNIT_CASE(vkms_config_test_invalid_crtc_number), KUNIT_CASE(vkms_config_test_invalid_encoder_number), KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs), + KUNIT_CASE(vkms_config_test_invalid_connector_number), KUNIT_CASE(vkms_config_test_attach_different_configs), KUNIT_CASE(vkms_config_test_plane_attach_crtc), KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs), diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index 17262a9c2567..fbbdee6068ce 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -25,6 +25,7 @@ struct vkms_config *vkms_config_create(const char *dev_name) INIT_LIST_HEAD(&config->planes); INIT_LIST_HEAD(&config->crtcs); INIT_LIST_HEAD(&config->encoders); + INIT_LIST_HEAD(&config->connectors); return config; } @@ -38,6 +39,7 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; int n; config = vkms_config_create(DEFAULT_DEVICE_NAME); @@ -89,6 +91,10 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, if (vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg)) goto err_alloc; + connector_cfg = vkms_config_create_connector(config); + if (IS_ERR(connector_cfg)) + goto err_alloc; + return config; err_alloc: @@ -102,6 +108,7 @@ void vkms_config_destroy(struct vkms_config *config) struct vkms_config_plane *plane_cfg, *plane_tmp; struct vkms_config_crtc *crtc_cfg, *crtc_tmp; struct vkms_config_encoder *encoder_cfg, *encoder_tmp; + struct vkms_config_connector *connector_cfg, *connector_tmp; list_for_each_entry_safe(plane_cfg, plane_tmp, &config->planes, link) vkms_config_destroy_plane(plane_cfg); @@ -112,6 +119,9 @@ void vkms_config_destroy(struct vkms_config *config) list_for_each_entry_safe(encoder_cfg, encoder_tmp, &config->encoders, link) vkms_config_destroy_encoder(config, encoder_cfg); + list_for_each_entry_safe(connector_cfg, connector_tmp, &config->connectors, link) + vkms_config_destroy_connector(connector_cfg); + kfree_const(config->dev_name); kfree(config); } @@ -255,6 +265,20 @@ static bool valid_encoder_possible_crtcs(const struct vkms_config *config) return true; } +static bool valid_connector_number(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + size_t n_connectors; + + n_connectors = list_count_nodes((struct list_head *)&config->connectors); + if (n_connectors <= 0 || n_connectors >= 32) { + drm_info(dev, "The number of connectors must be between 1 and 31\n"); + return false; + } + + return true; +} + bool vkms_config_is_valid(const struct vkms_config *config) { struct vkms_config_crtc *crtc_cfg; @@ -268,6 +292,9 @@ bool vkms_config_is_valid(const struct vkms_config *config) if (!valid_encoder_number(config)) return false; + if (!valid_connector_number(config)) + return false; + if (!valid_plane_possible_crtcs(config)) return false; @@ -292,6 +319,7 @@ static int vkms_config_show(struct seq_file *m, void *data) struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; dev_name = vkms_config_get_device_name((struct vkms_config *)vkmsdev->config); seq_printf(m, "dev_name=%s\n", dev_name); @@ -311,6 +339,9 @@ static int vkms_config_show(struct seq_file *m, void *data) vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) seq_puts(m, "encoder\n"); + vkms_config_for_each_connector(vkmsdev->config, connector_cfg) + seq_puts(m, "connector\n"); + return 0; } @@ -520,3 +551,26 @@ void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg, } } EXPORT_SYMBOL_IF_KUNIT(vkms_config_encoder_detach_crtc); + +struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config) +{ + struct vkms_config_connector *connector_cfg; + + connector_cfg = kzalloc(sizeof(*connector_cfg), GFP_KERNEL); + if (!connector_cfg) + return ERR_PTR(-ENOMEM); + + connector_cfg->config = config; + + list_add_tail(&connector_cfg->link, &config->connectors); + + return connector_cfg; +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector); + +void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg) +{ + list_del(&connector_cfg->link); + kfree(connector_cfg); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 3e5b2e407378..73562c894102 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -16,6 +16,7 @@ * @planes: List of planes configured for the device * @crtcs: List of CRTCs configured for the device * @encoders: List of encoders configured for the device + * @connectors: List of connectors configured for the device * @dev: Used to store the current VKMS device. Only set when the device is instantiated. */ struct vkms_config { @@ -23,6 +24,7 @@ struct vkms_config { struct list_head planes; struct list_head crtcs; struct list_head encoders; + struct list_head connectors; struct vkms_device *dev; }; @@ -92,6 +94,24 @@ struct vkms_config_encoder { struct drm_encoder *encoder; }; +/** + * struct vkms_config_connector + * + * @link: Link to the others connector in vkms_config + * @config: The vkms_config this connector belongs to + * @connector: Internal usage. This pointer should never be considered as valid. + * It can be used to store a temporary reference to a VKMS connector + * during device creation. This pointer is not managed by the + * configuration and must be managed by other means. + */ +struct vkms_config_connector { + struct list_head link; + struct vkms_config *config; + + /* Internal usage */ + struct vkms_connector *connector; +}; + /** * vkms_config_for_each_plane - Iterate over the vkms_config planes * @config: &struct vkms_config pointer @@ -116,6 +136,14 @@ struct vkms_config_encoder { #define vkms_config_for_each_encoder(config, encoder_cfg) \ list_for_each_entry((encoder_cfg), &(config)->encoders, link) +/** + * vkms_config_for_each_connector - Iterate over the vkms_config connectors + * @config: &struct vkms_config pointer + * @connector_cfg: &struct vkms_config_connector pointer used as cursor + */ +#define vkms_config_for_each_connector(config, connector_cfg) \ + list_for_each_entry((connector_cfg), &(config)->connectors, link) + /** * vkms_config_plane_for_each_possible_crtc - Iterate over the vkms_config_plane * possible CRTCs @@ -361,4 +389,20 @@ int __must_check vkms_config_encoder_attach_crtc(struct vkms_config_encoder *enc void vkms_config_encoder_detach_crtc(struct vkms_config_encoder *encoder_cfg, struct vkms_config_crtc *crtc_cfg); +/** + * vkms_config_create_connector() - Add a new connector configuration + * @config: Configuration to add the connector to + * + * Returns: + * The new connector configuration or an error. Call + * vkms_config_destroy_connector() to free the returned connector configuration. + */ +struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *config); + +/** + * vkms_config_destroy_connector() - Remove and free a connector configuration + * @connector_cfg: Connector configuration to destroy + */ +void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg); + #endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_connector.c b/drivers/gpu/drm/vkms/vkms_connector.c index ab8b52a84151..48b10cba322a 100644 --- a/drivers/gpu/drm/vkms/vkms_connector.c +++ b/drivers/gpu/drm/vkms/vkms_connector.c @@ -25,8 +25,19 @@ static int vkms_conn_get_modes(struct drm_connector *connector) return count; } +static struct drm_encoder *vkms_conn_best_encoder(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + + drm_connector_for_each_possible_encoder(connector, encoder) + return encoder; + + return NULL; +} + static const struct drm_connector_helper_funcs vkms_conn_helper_funcs = { .get_modes = vkms_conn_get_modes, + .best_encoder = vkms_conn_best_encoder, }; struct vkms_connector *vkms_connector_init(struct vkms_device *vkmsdev) From 2c7aafc05c8330be4c5f0092b79843507a5e1023 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jos=C3=A9=20Exp=C3=B3sito?= Date: Tue, 18 Feb 2025 11:12:14 +0100 Subject: [PATCH 0030/1627] drm/vkms: Allow to attach connectors and encoders MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a list of possible encoders to the connector configuration and helpers to attach and detach them. Now that the default configuration has its connector and encoder correctly, configure the output following the configuration. Reviewed-by: Louis Chauvet Co-developed-by: Louis Chauvet Signed-off-by: Louis Chauvet Signed-off-by: José Expósito Link: https://patchwork.freedesktop.org/patch/msgid/20250218101214.5790-15-jose.exposito89@gmail.com Signed-off-by: Maxime Ripard --- .clang-format | 1 + drivers/gpu/drm/vkms/tests/vkms_config_test.c | 102 ++++++++++++++++++ drivers/gpu/drm/vkms/vkms_config.c | 64 +++++++++++ drivers/gpu/drm/vkms/vkms_config.h | 29 +++++ drivers/gpu/drm/vkms/vkms_output.c | 33 +++--- 5 files changed, 216 insertions(+), 13 deletions(-) diff --git a/.clang-format b/.clang-format index ca49832993c5..7630990aa07a 100644 --- a/.clang-format +++ b/.clang-format @@ -694,6 +694,7 @@ ForEachMacros: - 'vkms_config_for_each_crtc' - 'vkms_config_for_each_encoder' - 'vkms_config_for_each_plane' + - 'vkms_config_connector_for_each_possible_encoder' - 'vkms_config_encoder_for_each_possible_crtc' - 'vkms_config_plane_for_each_possible_crtc' - 'while_for_each_ftrace_op' diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index 610bcde3e018..ff4566cf9925 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -584,12 +584,32 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_valid_connector_possible_encoders(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; + + config = vkms_config_default_create(false, false, false); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + encoder_cfg = get_first_encoder(config); + connector_cfg = get_first_connector(config); + + /* Invalid: Connector without a possible encoder */ + vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg); + KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); + + vkms_config_destroy(config); +} + static void vkms_config_test_attach_different_configs(struct kunit *test) { struct vkms_config *config1, *config2; struct vkms_config_plane *plane_cfg1, *plane_cfg2; struct vkms_config_crtc *crtc_cfg1, *crtc_cfg2; struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + struct vkms_config_connector *connector_cfg1, *connector_cfg2; int err; config1 = vkms_config_create("test1"); @@ -601,10 +621,12 @@ static void vkms_config_test_attach_different_configs(struct kunit *test) plane_cfg1 = vkms_config_create_plane(config1); crtc_cfg1 = vkms_config_create_crtc(config1); encoder_cfg1 = vkms_config_create_encoder(config1); + connector_cfg1 = vkms_config_create_connector(config1); plane_cfg2 = vkms_config_create_plane(config2); crtc_cfg2 = vkms_config_create_crtc(config2); encoder_cfg2 = vkms_config_create_encoder(config2); + connector_cfg2 = vkms_config_create_connector(config2); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2); @@ -612,6 +634,8 @@ static void vkms_config_test_attach_different_configs(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2); err = vkms_config_plane_attach_crtc(plane_cfg1, crtc_cfg2); KUNIT_EXPECT_NE(test, err, 0); @@ -623,6 +647,11 @@ static void vkms_config_test_attach_different_configs(struct kunit *test) err = vkms_config_encoder_attach_crtc(encoder_cfg2, crtc_cfg1); KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2); + KUNIT_EXPECT_NE(test, err, 0); + err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg1); + KUNIT_EXPECT_NE(test, err, 0); + vkms_config_destroy(config1); vkms_config_destroy(config2); } @@ -816,6 +845,77 @@ static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test) vkms_config_destroy(config); } +static void vkms_config_test_connector_get_possible_encoders(struct kunit *test) +{ + struct vkms_config *config; + struct vkms_config_connector *connector_cfg1, *connector_cfg2; + struct vkms_config_encoder *encoder_cfg1, *encoder_cfg2; + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; + int n_encoders = 0; + int err; + + config = vkms_config_create("test"); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); + + connector_cfg1 = vkms_config_create_connector(config); + connector_cfg2 = vkms_config_create_connector(config); + encoder_cfg1 = vkms_config_create_encoder(config); + encoder_cfg2 = vkms_config_create_encoder(config); + + /* No possible encoders */ + vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx, + possible_encoder) + KUNIT_FAIL(test, "Unexpected possible encoder"); + + vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx, + possible_encoder) + KUNIT_FAIL(test, "Unexpected possible encoder"); + + /* Connector 1 attached to encoders 1 and 2 */ + err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg1); + KUNIT_EXPECT_EQ(test, err, 0); + err = vkms_config_connector_attach_encoder(connector_cfg1, encoder_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + + vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx, + possible_encoder) { + n_encoders++; + if (possible_encoder != encoder_cfg1 && + possible_encoder != encoder_cfg2) + KUNIT_FAIL(test, "Unexpected possible encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 2); + n_encoders = 0; + + vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx, + possible_encoder) + KUNIT_FAIL(test, "Unexpected possible encoder"); + + /* Connector 1 attached to encoder 1 and connector 2 to encoder 2 */ + vkms_config_connector_detach_encoder(connector_cfg1, encoder_cfg2); + vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx, + possible_encoder) { + n_encoders++; + if (possible_encoder != encoder_cfg1) + KUNIT_FAIL(test, "Unexpected possible encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + n_encoders = 0; + + err = vkms_config_connector_attach_encoder(connector_cfg2, encoder_cfg2); + KUNIT_EXPECT_EQ(test, err, 0); + vkms_config_connector_for_each_possible_encoder(connector_cfg2, idx, + possible_encoder) { + n_encoders++; + if (possible_encoder != encoder_cfg2) + KUNIT_FAIL(test, "Unexpected possible encoder"); + } + KUNIT_ASSERT_EQ(test, n_encoders, 1); + + vkms_config_destroy(config); +} + static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_empty_config), KUNIT_CASE_PARAM(vkms_config_test_default_config, @@ -831,10 +931,12 @@ static struct kunit_case vkms_config_test_cases[] = { KUNIT_CASE(vkms_config_test_invalid_encoder_number), KUNIT_CASE(vkms_config_test_valid_encoder_possible_crtcs), KUNIT_CASE(vkms_config_test_invalid_connector_number), + KUNIT_CASE(vkms_config_test_valid_connector_possible_encoders), KUNIT_CASE(vkms_config_test_attach_different_configs), KUNIT_CASE(vkms_config_test_plane_attach_crtc), KUNIT_CASE(vkms_config_test_plane_get_possible_crtcs), KUNIT_CASE(vkms_config_test_encoder_get_possible_crtcs), + KUNIT_CASE(vkms_config_test_connector_get_possible_encoders), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_config.c b/drivers/gpu/drm/vkms/vkms_config.c index fbbdee6068ce..a1df5659b0fb 100644 --- a/drivers/gpu/drm/vkms/vkms_config.c +++ b/drivers/gpu/drm/vkms/vkms_config.c @@ -95,6 +95,9 @@ struct vkms_config *vkms_config_default_create(bool enable_cursor, if (IS_ERR(connector_cfg)) goto err_alloc; + if (vkms_config_connector_attach_encoder(connector_cfg, encoder_cfg)) + goto err_alloc; + return config; err_alloc: @@ -279,6 +282,22 @@ static bool valid_connector_number(const struct vkms_config *config) return true; } +static bool valid_connector_possible_encoders(const struct vkms_config *config) +{ + struct drm_device *dev = config->dev ? &config->dev->drm : NULL; + struct vkms_config_connector *connector_cfg; + + vkms_config_for_each_connector(config, connector_cfg) { + if (xa_empty(&connector_cfg->possible_encoders)) { + drm_info(dev, + "All connectors must have at least one possible encoder\n"); + return false; + } + } + + return true; +} + bool vkms_config_is_valid(const struct vkms_config *config) { struct vkms_config_crtc *crtc_cfg; @@ -306,6 +325,9 @@ bool vkms_config_is_valid(const struct vkms_config *config) if (!valid_encoder_possible_crtcs(config)) return false; + if (!valid_connector_possible_encoders(config)) + return false; + return true; } EXPORT_SYMBOL_IF_KUNIT(vkms_config_is_valid); @@ -513,6 +535,11 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_encoder); void vkms_config_destroy_encoder(struct vkms_config *config, struct vkms_config_encoder *encoder_cfg) { + struct vkms_config_connector *connector_cfg; + + vkms_config_for_each_connector(config, connector_cfg) + vkms_config_connector_detach_encoder(connector_cfg, encoder_cfg); + xa_destroy(&encoder_cfg->possible_crtcs); list_del(&encoder_cfg->link); kfree(encoder_cfg); @@ -561,6 +588,7 @@ struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *c return ERR_PTR(-ENOMEM); connector_cfg->config = config; + xa_init_flags(&connector_cfg->possible_encoders, XA_FLAGS_ALLOC); list_add_tail(&connector_cfg->link, &config->connectors); @@ -570,7 +598,43 @@ EXPORT_SYMBOL_IF_KUNIT(vkms_config_create_connector); void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg) { + xa_destroy(&connector_cfg->possible_encoders); list_del(&connector_cfg->link); kfree(connector_cfg); } EXPORT_SYMBOL_IF_KUNIT(vkms_config_destroy_connector); + +int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg) +{ + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; + u32 encoder_idx = 0; + + if (connector_cfg->config != encoder_cfg->config) + return -EINVAL; + + vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, + possible_encoder) { + if (possible_encoder == encoder_cfg) + return -EEXIST; + } + + return xa_alloc(&connector_cfg->possible_encoders, &encoder_idx, + encoder_cfg, xa_limit_32b, GFP_KERNEL); +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_attach_encoder); + +void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg) +{ + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; + + vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, + possible_encoder) { + if (possible_encoder == encoder_cfg) + xa_erase(&connector_cfg->possible_encoders, idx); + } +} +EXPORT_SYMBOL_IF_KUNIT(vkms_config_connector_detach_encoder); diff --git a/drivers/gpu/drm/vkms/vkms_config.h b/drivers/gpu/drm/vkms/vkms_config.h index 73562c894102..0118e3f99706 100644 --- a/drivers/gpu/drm/vkms/vkms_config.h +++ b/drivers/gpu/drm/vkms/vkms_config.h @@ -99,6 +99,7 @@ struct vkms_config_encoder { * * @link: Link to the others connector in vkms_config * @config: The vkms_config this connector belongs to + * @possible_encoders: Array of encoders that can be used with this connector * @connector: Internal usage. This pointer should never be considered as valid. * It can be used to store a temporary reference to a VKMS connector * during device creation. This pointer is not managed by the @@ -108,6 +109,8 @@ struct vkms_config_connector { struct list_head link; struct vkms_config *config; + struct xarray possible_encoders; + /* Internal usage */ struct vkms_connector *connector; }; @@ -164,6 +167,16 @@ struct vkms_config_connector { #define vkms_config_encoder_for_each_possible_crtc(encoder_cfg, idx, possible_crtc) \ xa_for_each(&(encoder_cfg)->possible_crtcs, idx, (possible_crtc)) +/** + * vkms_config_connector_for_each_possible_encoder - Iterate over the + * vkms_config_connector possible encoders + * @connector_cfg: &struct vkms_config_connector pointer + * @idx: Index of the cursor + * @possible_encoder: &struct vkms_config_encoder pointer used as cursor + */ +#define vkms_config_connector_for_each_possible_encoder(connector_cfg, idx, possible_encoder) \ + xa_for_each(&(connector_cfg)->possible_encoders, idx, (possible_encoder)) + /** * vkms_config_create() - Create a new VKMS configuration * @dev_name: Name of the device @@ -405,4 +418,20 @@ struct vkms_config_connector *vkms_config_create_connector(struct vkms_config *c */ void vkms_config_destroy_connector(struct vkms_config_connector *connector_cfg); +/** + * vkms_config_connector_attach_encoder - Attach a connector to an encoder + * @connector_cfg: Connector to attach + * @encoder_cfg: Encoder to attach @connector_cfg to + */ +int __must_check vkms_config_connector_attach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg); + +/** + * vkms_config_connector_detach_encoder - Detach a connector from an encoder + * @connector_cfg: Connector to detach + * @encoder_cfg: Encoder to detach @connector_cfg from + */ +void vkms_config_connector_detach_encoder(struct vkms_config_connector *connector_cfg, + struct vkms_config_encoder *encoder_cfg); + #endif /* _VKMS_CONFIG_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 8920d6b5d105..8d7ca0cdd79f 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -8,10 +8,10 @@ int vkms_output_init(struct vkms_device *vkmsdev) { struct drm_device *dev = &vkmsdev->drm; - struct vkms_connector *connector; struct vkms_config_plane *plane_cfg; struct vkms_config_crtc *crtc_cfg; struct vkms_config_encoder *encoder_cfg; + struct vkms_config_connector *connector_cfg; int ret; int writeback; @@ -83,22 +83,29 @@ int vkms_output_init(struct vkms_device *vkmsdev) } } - connector = vkms_connector_init(vkmsdev); - if (IS_ERR(connector)) { - DRM_ERROR("Failed to init connector\n"); - return PTR_ERR(connector); - } + vkms_config_for_each_connector(vkmsdev->config, connector_cfg) { + struct vkms_config_encoder *possible_encoder; + unsigned long idx = 0; - /* Attach the encoder and the connector */ - vkms_config_for_each_encoder(vkmsdev->config, encoder_cfg) { - ret = drm_connector_attach_encoder(&connector->base, encoder_cfg->encoder); - if (ret) { - DRM_ERROR("Failed to attach connector to encoder\n"); - return ret; + connector_cfg->connector = vkms_connector_init(vkmsdev); + if (IS_ERR(connector_cfg->connector)) { + DRM_ERROR("Failed to init connector\n"); + return PTR_ERR(connector_cfg->connector); + } + + vkms_config_connector_for_each_possible_encoder(connector_cfg, + idx, + possible_encoder) { + ret = drm_connector_attach_encoder(&connector_cfg->connector->base, + possible_encoder->encoder); + if (ret) { + DRM_ERROR("Failed to attach connector to encoder\n"); + return ret; + } } } drm_mode_config_reset(dev); - return ret; + return 0; } From fc858ddf9c68696537cec530d2d48bf6ed06ea92 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Thu, 6 Mar 2025 17:06:43 -0500 Subject: [PATCH 0031/1627] drm/xe/guc_pc: Remove duplicated pc_start call xe_guc_pc_start() was getting called from both xe_uc_init_hw() and from xe_guc_start(). But both are called from do_gt_restart() and only xe_uc_init_hw() is called at initialization. So, let's remove the duplication in the regular gt_restart path. The only place where xe_guc_pc_start() won't get called now is on the gt_reset failure path. However, if gt_reset has failed, it is really unlikely that the PC start will work or is desired. Cc: Vinay Belgaumkar Reviewed-by: Jonathan Cavitt Reviewed-by: Vinay Belgaumkar Link: https://patchwork.freedesktop.org/patch/msgid/20250306220643.1014049-1-rodrigo.vivi@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_guc.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index bc1ff0a4e1e7..bc5714a5b36b 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -1496,14 +1496,6 @@ void xe_guc_stop(struct xe_guc *guc) int xe_guc_start(struct xe_guc *guc) { - if (!IS_SRIOV_VF(guc_to_xe(guc))) { - int err; - - err = xe_guc_pc_start(&guc->pc); - xe_gt_WARN(guc_to_gt(guc), err, "Failed to start GuC PC: %pe\n", - ERR_PTR(err)); - } - return xe_guc_submit_start(guc); } From 70e5043ba85eae199b232e39921abd706b5c1fa4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Fri, 7 Mar 2025 11:01:09 +0100 Subject: [PATCH 0032/1627] drm/xe/userptr: Fix an incorrect assert MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The assert incorrectly checks the total length processed which can in fact be greater than the number of pages. Fix. Fixes: ea3e66d280ce ("drm/xe/hmm: Don't dereference struct page pointers without notifier lock") Cc: Matthew Auld Cc: Matthew Brost Signed-off-by: Thomas Hellström Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20250307100109.21397-1-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/xe/xe_hmm.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_hmm.c b/drivers/gpu/drm/xe/xe_hmm.c index 392102515f3d..c3cc0fa105e8 100644 --- a/drivers/gpu/drm/xe/xe_hmm.c +++ b/drivers/gpu/drm/xe/xe_hmm.c @@ -138,13 +138,17 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range, i += size; if (unlikely(j == st->nents - 1)) { + xe_assert(xe, i >= npages); if (i > npages) size -= (i - npages); + sg_mark_end(sgl); + } else { + xe_assert(xe, i < npages); } + sg_set_page(sgl, page, size << PAGE_SHIFT, 0); } - xe_assert(xe, i == npages); return dma_map_sgtable(dev, st, write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING); From 4570355f8eaa476164cfb7ca959fdbf0cebbc9eb Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 27 Feb 2025 01:35:53 +0000 Subject: [PATCH 0033/1627] drm/nouveau/nvkm: factor out current GSP RPC command policies There can be multiple cases of handling the GSP RPC messages, which are the reply of GSP RPC commands according to the requirement of the callers and the nature of the GSP RPC commands. The current supported reply policies are "callers don't care" and "receive the entire message" according to the requirement of the callers. To introduce a new policy, factor out the current RPC command reply polices. Also, centralize the handling of the reply in a single function. Factor out NVKM_GSP_RPC_REPLY_NOWAIT as "callers don't care" and NVKM_GSP_RPC_REPLY_RECV as "receive the entire message". Introduce a kernel doc to document the policies. Factor out r535_gsp_rpc_handle_reply(). No functional change is intended for small GSP RPC commands. For large GSP commands, the caller decides the policy of how to handle the returned GSP RPC message. Cc: Ben Skeggs Cc: Alexandre Courbot Signed-off-by: Zhi Wang Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20250227013554.8269-2-zhiw@nvidia.com --- Documentation/gpu/nouveau.rst | 3 + .../gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 34 +++++++-- .../gpu/drm/nouveau/nvkm/subdev/bar/r535.c | 2 +- .../gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 75 ++++++++++--------- .../drm/nouveau/nvkm/subdev/instmem/r535.c | 2 +- 5 files changed, 72 insertions(+), 44 deletions(-) diff --git a/Documentation/gpu/nouveau.rst b/Documentation/gpu/nouveau.rst index 0f34131ccc27..b8c801e0068c 100644 --- a/Documentation/gpu/nouveau.rst +++ b/Documentation/gpu/nouveau.rst @@ -27,3 +27,6 @@ GSP Support .. kernel-doc:: drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c :doc: GSP message queue element + +.. kernel-doc:: drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h + :doc: GSP message handling policy diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 746e126c3ecf..e5fe44589bbd 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -31,6 +31,25 @@ typedef int (*nvkm_gsp_msg_ntfy_func)(void *priv, u32 fn, void *repv, u32 repc); struct nvkm_gsp_event; typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 repc); +/** + * DOC: GSP message handling policy + * + * When sending a GSP RPC command, there can be multiple cases of handling + * the GSP RPC messages, which are the reply of GSP RPC commands, according + * to the requirement of the callers and the nature of the GSP RPC commands. + * + * NVKM_GSP_RPC_REPLY_NOWAIT - If specified, immediately return to the + * caller after the GSP RPC command is issued. + * + * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP + * RPC message after the GSP RPC command is issued. + * + */ +enum nvkm_gsp_rpc_reply_policy { + NVKM_GSP_RPC_REPLY_NOWAIT = 0, + NVKM_GSP_RPC_REPLY_RECV, +}; + struct nvkm_gsp { const struct nvkm_gsp_func *func; struct nvkm_subdev subdev; @@ -188,7 +207,8 @@ struct nvkm_gsp { const struct nvkm_gsp_rm { void *(*rpc_get)(struct nvkm_gsp *, u32 fn, u32 argc); - void *(*rpc_push)(struct nvkm_gsp *, void *argv, bool wait, u32 repc); + void *(*rpc_push)(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy, u32 repc); void (*rpc_done)(struct nvkm_gsp *gsp, void *repv); void *(*rm_ctrl_get)(struct nvkm_gsp_object *, u32 cmd, u32 argc); @@ -255,9 +275,10 @@ nvkm_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc) } static inline void * -nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, bool wait, u32 repc) +nvkm_gsp_rpc_push(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy, u32 repc) { - return gsp->rm->rpc_push(gsp, argv, wait, repc); + return gsp->rm->rpc_push(gsp, argv, policy, repc); } static inline void * @@ -268,13 +289,14 @@ nvkm_gsp_rpc_rd(struct nvkm_gsp *gsp, u32 fn, u32 argc) if (IS_ERR_OR_NULL(argv)) return argv; - return nvkm_gsp_rpc_push(gsp, argv, true, argc); + return nvkm_gsp_rpc_push(gsp, argv, NVKM_GSP_RPC_REPLY_RECV, argc); } static inline int -nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, bool wait) +nvkm_gsp_rpc_wr(struct nvkm_gsp *gsp, void *argv, + enum nvkm_gsp_rpc_reply_policy policy) { - void *repv = nvkm_gsp_rpc_push(gsp, argv, wait, 0); + void *repv = nvkm_gsp_rpc_push(gsp, argv, policy, 0); if (IS_ERR(repv)) return PTR_ERR(repv); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c index 3a30bea30e36..90186f98065c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/r535.c @@ -56,7 +56,7 @@ r535_bar_bar2_update_pde(struct nvkm_gsp *gsp, u64 addr) rpc->info.entryValue = addr ? ((addr >> 4) | 2) : 0; /* PD3 entry format! */ rpc->info.entryLevelShift = 47; //XXX: probably fetch this from mmu! - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } static void diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index db2602e88006..f73dcc3e1c0d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -585,13 +585,34 @@ r535_gsp_rpc_poll(struct nvkm_gsp *gsp, u32 fn) } static void * -r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait, - u32 gsp_rpc_len) +r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn, + enum nvkm_gsp_rpc_reply_policy policy, + u32 gsp_rpc_len) +{ + struct nvfw_gsp_rpc *reply; + void *repv = NULL; + + switch (policy) { + case NVKM_GSP_RPC_REPLY_NOWAIT: + break; + case NVKM_GSP_RPC_REPLY_RECV: + reply = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); + if (!IS_ERR_OR_NULL(reply)) + repv = reply->data; + else + repv = reply; + break; + } + + return repv; +} + +static void * +r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) { struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); - struct nvfw_gsp_rpc *msg; u32 fn = rpc->function; - void *repv = NULL; int ret; if (gsp->subdev.debug >= NV_DBG_TRACE) { @@ -605,15 +626,7 @@ r535_gsp_rpc_send(struct nvkm_gsp *gsp, void *payload, bool wait, if (ret) return ERR_PTR(ret); - if (wait) { - msg = r535_gsp_msg_recv(gsp, fn, gsp_rpc_len); - if (!IS_ERR_OR_NULL(msg)) - repv = msg->data; - else - repv = msg; - } - - return repv; + return r535_gsp_rpc_handle_reply(gsp, fn, policy, gsp_rpc_len); } static void @@ -797,7 +810,7 @@ r535_gsp_rpc_rm_free(struct nvkm_gsp_object *object) rpc->params.hRoot = client->object.handle; rpc->params.hObjectParent = 0; rpc->params.hObjectOld = object->handle; - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } static void @@ -815,7 +828,7 @@ r535_gsp_rpc_rm_alloc_push(struct nvkm_gsp_object *object, void *params) struct nvkm_gsp *gsp = object->client->gsp; void *ret = NULL; - rpc = nvkm_gsp_rpc_push(gsp, rpc, true, sizeof(*rpc)); + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, sizeof(*rpc)); if (IS_ERR_OR_NULL(rpc)) return rpc; @@ -876,7 +889,7 @@ r535_gsp_rpc_rm_ctrl_push(struct nvkm_gsp_object *object, void **params, u32 rep struct nvkm_gsp *gsp = object->client->gsp; int ret = 0; - rpc = nvkm_gsp_rpc_push(gsp, rpc, true, repc); + rpc = nvkm_gsp_rpc_push(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV, repc); if (IS_ERR_OR_NULL(rpc)) { *params = NULL; return PTR_ERR(rpc); @@ -948,8 +961,8 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 payload_size) } static void * -r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, - u32 gsp_rpc_len) +r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, + enum nvkm_gsp_rpc_reply_policy policy, u32 gsp_rpc_len) { struct nvfw_gsp_rpc *rpc = to_gsp_hdr(payload, rpc); struct r535_gsp_msg *msg = to_gsp_hdr(rpc, msg); @@ -967,7 +980,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, rpc->length = sizeof(*rpc) + max_payload_size; msg->checksum = rpc->length; - repv = r535_gsp_rpc_send(gsp, payload, false, 0); + repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0); if (IS_ERR(repv)) goto done; @@ -988,7 +1001,7 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, memcpy(next, payload, size); - repv = r535_gsp_rpc_send(gsp, next, false, 0); + repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0); if (IS_ERR(repv)) goto done; @@ -997,20 +1010,10 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, bool wait, } /* Wait for reply. */ - rpc = r535_gsp_msg_recv(gsp, fn, payload_size + - sizeof(*rpc)); - if (!IS_ERR_OR_NULL(rpc)) { - if (wait) { - repv = rpc->data; - } else { - nvkm_gsp_rpc_done(gsp, rpc); - repv = NULL; - } - } else { - repv = wait ? rpc : NULL; - } + repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size + + sizeof(*rpc)); } else { - repv = r535_gsp_rpc_send(gsp, payload, wait, gsp_rpc_len); + repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len); } done: @@ -1327,7 +1330,7 @@ r535_gsp_rpc_unloading_guest_driver(struct nvkm_gsp *gsp, bool suspend) rpc->newLevel = NV2080_CTRL_GPU_SET_POWER_STATE_GPU_LEVEL_0; } - return nvkm_gsp_rpc_wr(gsp, rpc, true); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); } enum registry_type { @@ -1684,7 +1687,7 @@ r535_gsp_rpc_set_registry(struct nvkm_gsp *gsp) build_registry(gsp, rpc); - return nvkm_gsp_rpc_wr(gsp, rpc, false); + return nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_NOWAIT); fail: clean_registry(gsp); @@ -1893,7 +1896,7 @@ r535_gsp_rpc_set_system_info(struct nvkm_gsp *gsp) info->pciConfigMirrorSize = 0x001000; r535_gsp_acpi_info(gsp, &info->acpiMethodData); - return nvkm_gsp_rpc_wr(gsp, info, false); + return nvkm_gsp_rpc_wr(gsp, info, NVKM_GSP_RPC_REPLY_NOWAIT); } static int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c index 5f3c9c02a4c0..2789efe9c100 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c @@ -105,7 +105,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; } - ret = nvkm_gsp_rpc_wr(gsp, rpc, true); + ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); if (ret) return ret; From a738fa9105ac2897701ba4067c33e85faa27d1e2 Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Thu, 27 Feb 2025 01:35:54 +0000 Subject: [PATCH 0034/1627] drm/nouveau/nvkm: introduce new GSP reply policy NVKM_GSP_RPC_REPLY_POLL Some GSP RPC commands need a new reply policy: "caller don't care about the message content but want to make sure a reply is received". To support this case, a new reply policy is introduced. NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY is a large GSP RPC command. The actual required policy is NVKM_GSP_RPC_REPLY_POLL. This can be observed from the dump of the GSP message queue. After the large GSP RPC command is issued, GSP will write only an empty RPC header in the queue as the reply. Without this change, the policy "receiving the entire message" is used for NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY. This causes the timeout of receiving the returned GSP message in the suspend/resume path. Introduce the new reply policy NVKM_GSP_RPC_REPLY_POLL, which waits for the returned GSP message but discards it for the caller. Use the new policy NVKM_GSP_RPC_REPLY_POLL on the GSP RPC command NV_VGPU_MSG_FUNCTION_ALLOC_MEMORY. Fixes: 50f290053d79 ("drm/nouveau: support handling the return of large GSP message") Cc: Danilo Krummrich Cc: Alexandre Courbot Tested-by: Ben Skeggs Signed-off-by: Zhi Wang Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20250227013554.8269-3-zhiw@nvidia.com --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 4 ++++ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c | 3 +++ drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c | 2 +- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index e5fe44589bbd..1c12854a8550 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -44,10 +44,14 @@ typedef void (*nvkm_gsp_event_func)(struct nvkm_gsp_event *, void *repv, u32 rep * NVKM_GSP_RPC_REPLY_RECV - If specified, wait and receive the entire GSP * RPC message after the GSP RPC command is issued. * + * NVKM_GSP_RPC_REPLY_POLL - If specified, wait for the specific reply and + * discard the reply before returning to the caller. + * */ enum nvkm_gsp_rpc_reply_policy { NVKM_GSP_RPC_REPLY_NOWAIT = 0, NVKM_GSP_RPC_REPLY_RECV, + NVKM_GSP_RPC_REPLY_POLL, }; struct nvkm_gsp { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index f73dcc3e1c0d..969f6b921fdb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -602,6 +602,9 @@ r535_gsp_rpc_handle_reply(struct nvkm_gsp *gsp, u32 fn, else repv = reply; break; + case NVKM_GSP_RPC_REPLY_POLL: + repv = r535_gsp_msg_recv(gsp, fn, 0); + break; } return repv; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c index 2789efe9c100..35ba1798ee6e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/r535.c @@ -105,7 +105,7 @@ fbsr_memlist(struct nvkm_gsp_device *device, u32 handle, enum nvkm_memory_target rpc->pteDesc.pte_pde[i].pte = (phys >> 12) + i; } - ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_RECV); + ret = nvkm_gsp_rpc_wr(gsp, rpc, NVKM_GSP_RPC_REPLY_POLL); if (ret) return ret; From ce468a7b63f1e4e2b09f951ca0a7c8d402fed746 Mon Sep 17 00:00:00 2001 From: Charles Han Date: Wed, 5 Mar 2025 18:21:07 +0800 Subject: [PATCH 0035/1627] drm/vc4: plane: fix inconsistent indenting warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix below inconsistent indenting smatch warning. smatch warnings: drivers/gpu/drm/vc4/vc4_plane.c:2083 vc6_plane_mode_set() warn: inconsistent indenting Signed-off-by: Charles Han Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20250305102107.2595-1-hanchunchao@inspur.com --- drivers/gpu/drm/vc4/vc4_plane.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index c5e84d3494d2..056d344c5411 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -2080,7 +2080,7 @@ static int vc6_plane_mode_set(struct drm_plane *plane, /* HPPF plane 1 */ vc4_dlist_write(vc4_state, kernel); /* VPPF plane 1 */ - vc4_dlist_write(vc4_state, kernel); + vc4_dlist_write(vc4_state, kernel); } } From 4da1fb61e02a783fdd7eb725ea03d897b8ef19ea Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 7 Mar 2025 17:14:28 -0800 Subject: [PATCH 0036/1627] drm/gpusvm: Fix kernel-doc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Due to wrong `.. kernel-doc` directive in Documentation/gpu/rfc/gpusvm.rst the documentation was actually not parsing anything from drivers/gpu/drm/drm_gpusvm.c. This fixes the kernel-doc include and all warnings/errors created when doing so. Cc: Simona Vetter Cc: Dave Airlie Cc: Christian König Cc: dri-devel@lists.freedesktop.org Cc: Matthew Brost Cc: Thomas Hellström Reported-by: Stephen Rothwell Closes: https://lore.kernel.org/intel-xe/20250307195239.57abcd2d@canb.auug.org.au/ Fixes: 99624bdff867 ("drm/gpusvm: Add support for GPU Shared Virtual Memory") Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20250307-fix-svm-kerneldoc-v2-1-03c74b199620@intel.com Signed-off-by: Lucas De Marchi --- Documentation/gpu/rfc/gpusvm.rst | 15 ++-- drivers/gpu/drm/drm_gpusvm.c | 124 +++++++++++++++++-------------- 2 files changed, 79 insertions(+), 60 deletions(-) diff --git a/Documentation/gpu/rfc/gpusvm.rst b/Documentation/gpu/rfc/gpusvm.rst index 073e46065d9c..bcf66a8137a6 100644 --- a/Documentation/gpu/rfc/gpusvm.rst +++ b/Documentation/gpu/rfc/gpusvm.rst @@ -67,14 +67,19 @@ Agreed upon design principles Overview of baseline design =========================== -Baseline design is simple as possible to get a working basline in which can be -built upon. - -.. kernel-doc:: drivers/gpu/drm/xe/drm_gpusvm.c +.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c :doc: Overview + +.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c :doc: Locking - :doc: Migrataion + +.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c + :doc: Migration + +.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c :doc: Partial Unmapping of Ranges + +.. kernel-doc:: drivers/gpu/drm/drm_gpusvm.c :doc: Examples Possible future design features diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index f314f5c4af0f..2451c816edd5 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -23,37 +23,42 @@ * DOC: Overview * * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM) - * - * The GPU SVM layer is a component of the DRM framework designed to manage shared - * virtual memory between the CPU and GPU. It enables efficient data exchange and - * processing for GPU-accelerated applications by allowing memory sharing and + * is a component of the DRM framework designed to manage shared virtual memory + * between the CPU and GPU. It enables efficient data exchange and processing + * for GPU-accelerated applications by allowing memory sharing and * synchronization between the CPU's and GPU's virtual address spaces. * * Key GPU SVM Components: - * - Notifiers: Notifiers: Used for tracking memory intervals and notifying the - * GPU of changes, notifiers are sized based on a GPU SVM - * initialization parameter, with a recommendation of 512M or - * larger. They maintain a Red-BlacK tree and a list of ranges that - * fall within the notifier interval. Notifiers are tracked within - * a GPU SVM Red-BlacK tree and list and are dynamically inserted - * or removed as ranges within the interval are created or - * destroyed. - * - Ranges: Represent memory ranges mapped in a DRM device and managed - * by GPU SVM. They are sized based on an array of chunk sizes, which - * is a GPU SVM initialization parameter, and the CPU address space. - * Upon GPU fault, the largest aligned chunk that fits within the - * faulting CPU address space is chosen for the range size. Ranges are - * expected to be dynamically allocated on GPU fault and removed on an - * MMU notifier UNMAP event. As mentioned above, ranges are tracked in - * a notifier's Red-Black tree. - * - Operations: Define the interface for driver-specific GPU SVM operations - * such as range allocation, notifier allocation, and - * invalidations. - * - Device Memory Allocations: Embedded structure containing enough information - * for GPU SVM to migrate to / from device memory. - * - Device Memory Operations: Define the interface for driver-specific device - * memory operations release memory, populate pfns, - * and copy to / from device memory. + * + * - Notifiers: + * Used for tracking memory intervals and notifying the GPU of changes, + * notifiers are sized based on a GPU SVM initialization parameter, with a + * recommendation of 512M or larger. They maintain a Red-BlacK tree and a + * list of ranges that fall within the notifier interval. Notifiers are + * tracked within a GPU SVM Red-BlacK tree and list and are dynamically + * inserted or removed as ranges within the interval are created or + * destroyed. + * - Ranges: + * Represent memory ranges mapped in a DRM device and managed by GPU SVM. + * They are sized based on an array of chunk sizes, which is a GPU SVM + * initialization parameter, and the CPU address space. Upon GPU fault, + * the largest aligned chunk that fits within the faulting CPU address + * space is chosen for the range size. Ranges are expected to be + * dynamically allocated on GPU fault and removed on an MMU notifier UNMAP + * event. As mentioned above, ranges are tracked in a notifier's Red-Black + * tree. + * + * - Operations: + * Define the interface for driver-specific GPU SVM operations such as + * range allocation, notifier allocation, and invalidations. + * + * - Device Memory Allocations: + * Embedded structure containing enough information for GPU SVM to migrate + * to / from device memory. + * + * - Device Memory Operations: + * Define the interface for driver-specific device memory operations + * release memory, populate pfns, and copy to / from device memory. * * This layer provides interfaces for allocating, mapping, migrating, and * releasing memory ranges between the CPU and GPU. It handles all core memory @@ -63,14 +68,18 @@ * below. * * Expected Driver Components: - * - GPU page fault handler: Used to create ranges and notifiers based on the - * fault address, optionally migrate the range to - * device memory, and create GPU bindings. - * - Garbage collector: Used to unmap and destroy GPU bindings for ranges. - * Ranges are expected to be added to the garbage collector - * upon a MMU_NOTIFY_UNMAP event in notifier callback. - * - Notifier callback: Used to invalidate and DMA unmap GPU bindings for - * ranges. + * + * - GPU page fault handler: + * Used to create ranges and notifiers based on the fault address, + * optionally migrate the range to device memory, and create GPU bindings. + * + * - Garbage collector: + * Used to unmap and destroy GPU bindings for ranges. Ranges are expected + * to be added to the garbage collector upon a MMU_NOTIFY_UNMAP event in + * notifier callback. + * + * - Notifier callback: + * Used to invalidate and DMA unmap GPU bindings for ranges. */ /** @@ -83,9 +92,9 @@ * range RB tree and list, as well as the range's DMA mappings and sequence * number. GPU SVM manages all necessary locking and unlocking operations, * except for the recheck range's pages being valid - * (drm_gpusvm_range_pages_valid) when the driver is committing GPU bindings. This - * lock corresponds to the 'driver->update' lock mentioned in the HMM - * documentation (TODO: Link). Future revisions may transition from a GPU SVM + * (drm_gpusvm_range_pages_valid) when the driver is committing GPU bindings. + * This lock corresponds to the ``driver->update`` lock mentioned in + * Documentation/mm/hmm.rst. Future revisions may transition from a GPU SVM * global lock to a per-notifier lock if finer-grained locking is deemed * necessary. * @@ -102,11 +111,11 @@ * DOC: Migration * * The migration support is quite simple, allowing migration between RAM and - * device memory at the range granularity. For example, GPU SVM currently does not - * support mixing RAM and device memory pages within a range. This means that upon GPU - * fault, the entire range can be migrated to device memory, and upon CPU fault, the - * entire range is migrated to RAM. Mixed RAM and device memory storage within a range - * could be added in the future if required. + * device memory at the range granularity. For example, GPU SVM currently does + * not support mixing RAM and device memory pages within a range. This means + * that upon GPU fault, the entire range can be migrated to device memory, and + * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device + * memory storage within a range could be added in the future if required. * * The reasoning for only supporting range granularity is as follows: it * simplifies the implementation, and range sizes are driver-defined and should @@ -119,11 +128,11 @@ * Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting * in MMU_NOTIFY_UNMAP event) presents several challenges, with the main one * being that a subset of the range still has CPU and GPU mappings. If the - * backing store for the range is in device memory, a subset of the backing store has - * references. One option would be to split the range and device memory backing store, - * but the implementation for this would be quite complicated. Given that - * partial unmappings are rare and driver-defined range sizes are relatively - * small, GPU SVM does not support splitting of ranges. + * backing store for the range is in device memory, a subset of the backing + * store has references. One option would be to split the range and device + * memory backing store, but the implementation for this would be quite + * complicated. Given that partial unmappings are rare and driver-defined range + * sizes are relatively small, GPU SVM does not support splitting of ranges. * * With no support for range splitting, upon partial unmapping of a range, the * driver is expected to invalidate and destroy the entire range. If the range @@ -144,6 +153,8 @@ * * 1) GPU page fault handler * + * .. code-block:: c + * * int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range) * { * int err = 0; @@ -208,7 +219,9 @@ * return err; * } * - * 2) Garbage Collector. + * 2) Garbage Collector + * + * .. code-block:: c * * void __driver_garbage_collector(struct drm_gpusvm *gpusvm, * struct drm_gpusvm_range *range) @@ -231,7 +244,9 @@ * __driver_garbage_collector(gpusvm, range); * } * - * 3) Notifier callback. + * 3) Notifier callback + * + * .. code-block:: c * * void driver_invalidation(struct drm_gpusvm *gpusvm, * struct drm_gpusvm_notifier *notifier, @@ -499,7 +514,7 @@ drm_gpusvm_notifier_invalidate(struct mmu_interval_notifier *mni, return true; } -/** +/* * drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM */ static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = { @@ -2055,7 +2070,6 @@ err_out: /** * drm_gpusvm_range_evict - Evict GPU SVM range - * @pagemap: Pointer to the GPU SVM structure * @range: Pointer to the GPU SVM range to be removed * * This function evicts the specified GPU SVM range. This function will not @@ -2146,8 +2160,8 @@ static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf) return err ? VM_FAULT_SIGBUS : 0; } -/** - * drm_gpusvm_pagemap_ops() - Device page map operations for GPU SVM +/* + * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM */ static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = { .page_free = drm_gpusvm_page_free, From 629067565c8fa0cff7b4ac3dbd18e21e0d185c32 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sat, 8 Mar 2025 23:43:56 +0000 Subject: [PATCH 0037/1627] drm/gma500/psb_intel_modes: Remove unused psb_intel_ddc_probe psb_intel_ddc_probe() was added in 2011 by commit 89c78134cc54 ("gma500: Add Poulsbo support") but has remained unused (probably because drm_get_edid is used instead). Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250308234356.255114-1-linux@treblig.org --- drivers/gpu/drm/gma500/psb_intel_drv.h | 1 - drivers/gpu/drm/gma500/psb_intel_modes.c | 31 ------------------------ 2 files changed, 32 deletions(-) diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h index 9dc9dcd1b09f..979ea8ecf0d5 100644 --- a/drivers/gpu/drm/gma500/psb_intel_drv.h +++ b/drivers/gpu/drm/gma500/psb_intel_drv.h @@ -182,7 +182,6 @@ struct gma_i2c_chan *gma_i2c_create(struct drm_device *dev, const u32 reg, void gma_i2c_destroy(struct gma_i2c_chan *chan); int psb_intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter); -extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter); extern void psb_intel_crtc_init(struct drm_device *dev, int pipe, struct psb_intel_mode_device *mode_dev); diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c index 8be0ec340de5..45b10f30a2a9 100644 --- a/drivers/gpu/drm/gma500/psb_intel_modes.c +++ b/drivers/gpu/drm/gma500/psb_intel_modes.c @@ -11,37 +11,6 @@ #include "psb_intel_drv.h" -/** - * psb_intel_ddc_probe - * @adapter: Associated I2C adaptor - */ -bool psb_intel_ddc_probe(struct i2c_adapter *adapter) -{ - u8 out_buf[] = { 0x0, 0x0 }; - u8 buf[2]; - int ret; - struct i2c_msg msgs[] = { - { - .addr = 0x50, - .flags = 0, - .len = 1, - .buf = out_buf, - }, - { - .addr = 0x50, - .flags = I2C_M_RD, - .len = 1, - .buf = buf, - } - }; - - ret = i2c_transfer(adapter, msgs, 2); - if (ret == 2) - return true; - - return false; -} - /** * psb_intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use From 12ec4f30fcab97747f9df04c0078dbacceb0900e Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sat, 8 Mar 2025 23:44:28 +0000 Subject: [PATCH 0038/1627] drm/gma500: Remove unused psb_mmu_virtual_to_pfn psb_mmu_virtual_to_pfn() was added in 2011 by commit 8c8f1c958ab5 ("gma500: introduce the GTT and MMU handling logic") but hasn't been used. Remove it. Signed-off-by: Dr. David Alan Gilbert Signed-off-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250308234428.255164-1-linux@treblig.org --- drivers/gpu/drm/gma500/mmu.c | 41 ------------------------------------ drivers/gpu/drm/gma500/mmu.h | 2 -- 2 files changed, 43 deletions(-) diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index 4d78b33eaa82..e6753282e70e 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c @@ -730,44 +730,3 @@ out: return ret; } - -int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, - unsigned long *pfn) -{ - int ret; - struct psb_mmu_pt *pt; - uint32_t tmp; - spinlock_t *lock = &pd->driver->lock; - - down_read(&pd->driver->sem); - pt = psb_mmu_pt_map_lock(pd, virtual); - if (!pt) { - uint32_t *v; - - spin_lock(lock); - v = kmap_atomic(pd->p); - tmp = v[psb_mmu_pd_index(virtual)]; - kunmap_atomic(v); - spin_unlock(lock); - - if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || - !(pd->invalid_pte & PSB_PTE_VALID)) { - ret = -EINVAL; - goto out; - } - ret = 0; - *pfn = pd->invalid_pte >> PAGE_SHIFT; - goto out; - } - tmp = pt->v[psb_mmu_pt_index(virtual)]; - if (!(tmp & PSB_PTE_VALID)) { - ret = -EINVAL; - } else { - ret = 0; - *pfn = tmp >> PAGE_SHIFT; - } - psb_mmu_pt_unmap_unlock(pt); -out: - up_read(&pd->driver->sem); - return ret; -} diff --git a/drivers/gpu/drm/gma500/mmu.h b/drivers/gpu/drm/gma500/mmu.h index d4b5720ef08e..e6d39703718c 100644 --- a/drivers/gpu/drm/gma500/mmu.h +++ b/drivers/gpu/drm/gma500/mmu.h @@ -71,8 +71,6 @@ extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd, uint32_t start_pfn, unsigned long address, uint32_t num_pages, int type); -extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual, - unsigned long *pfn); extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context); extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, unsigned long address, uint32_t num_pages, From 57145afa3326947154c3a890b1118774b55212a0 Mon Sep 17 00:00:00 2001 From: Miguel Ojeda Date: Mon, 3 Mar 2025 10:32:42 +0100 Subject: [PATCH 0039/1627] drm/panic: clean Clippy warning Clippy warns: error: manual implementation of an assign operation --> drivers/gpu/drm/drm_panic_qr.rs:418:25 | 418 | self.carry = self.carry % pow; | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: replace it with: `self.carry %= pow` | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#assign_op_pattern Thus clean it up. Fixes: dbed4a797e00 ("drm/panic: Better binary encoding in QR code") Signed-off-by: Miguel Ojeda Reviewed-by: Alice Ryhl Reviewed-by: Jocelyn Falempe Signed-off-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250303093242.1011790-1-ojeda@kernel.org --- drivers/gpu/drm/drm_panic_qr.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index 62cb8a162483..3b0dd59781d4 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -415,7 +415,7 @@ impl Iterator for SegmentIterator<'_> { self.carry_len -= out_len; let pow = u64::pow(10, self.carry_len as u32); let out = (self.carry / pow) as u16; - self.carry = self.carry % pow; + self.carry %= pow; Some((out, NUM_CHARS_BITS[out_len])) } } From 6efda95a66c5651bb30c948d876b7239df9ec384 Mon Sep 17 00:00:00 2001 From: Vignesh Raman Date: Wed, 5 Feb 2025 19:17:47 +0530 Subject: [PATCH 0040/1627] MAINTAINERS: Update drm/ci maintainers Update drm/ci maintainer entries: * Add myself as drm/ci maintainer. * Update Helen's email address. Signed-off-by: Vignesh Raman Acked-by: Helen Koike Acked-by: Daniel Stone Link: https://patchwork.freedesktop.org/patch/msgid/20250205134811.2002718-1-vignesh.raman@collabora.com --- MAINTAINERS | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index d1ac30eae9fa..90e5b92d2382 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8036,7 +8036,8 @@ F: drivers/gpu/drm/ttm/ F: include/drm/ttm/ DRM AUTOMATED TESTING -M: Helen Koike +M: Helen Koike +M: Vignesh Raman L: dri-devel@lists.freedesktop.org S: Maintained T: git https://gitlab.freedesktop.org/drm/misc/kernel.git From a30d96ea656c926f908391dcd67fb6b5e285b1a3 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 6 Mar 2025 14:08:02 -0800 Subject: [PATCH 0041/1627] drm/xe: Remove pointless gen11 assertions xe driver doesn't really work in gen11. Stop asserting for >= 11, as it would likely explode anyway if tried on such platforms. Reviewed-by: Jonathan Cavitt Reviewed-by: Tejas Upadhyay Link: https://patchwork.freedesktop.org/patch/msgid/20250306-drop-gen-v1-1-03683e56006a@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_force_wake.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 4f6784e5abf8..8a5cba22b586 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -49,9 +49,6 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) fw->gt = gt; spin_lock_init(&fw->lock); - /* Assuming gen11+ so assert this assumption is correct */ - xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); - if (xe->info.graphics_verx100 >= 1270) { init_domain(fw, XE_FW_DOMAIN_ID_GT, FORCEWAKE_GT, @@ -67,9 +64,6 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) { int i, j; - /* Assuming gen11+ so assert this assumption is correct */ - xe_gt_assert(gt, GRAPHICS_VER(gt_to_xe(gt)) >= 11); - if (!xe_gt_is_media_type(gt)) init_domain(fw, XE_FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER, From 89f8d10f981ed1236eeb9705510b877c82cff9a1 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 6 Mar 2025 14:08:03 -0800 Subject: [PATCH 0042/1627] drm/xe: Remove GEN11 prefixes from documentation The registers are already named without the GEN11 prefix. Do the same in the memirq documentation. Reviewed-by: Jonathan Cavitt Reviewed-by: Tejas Upadhyay Link: https://patchwork.freedesktop.org/patch/msgid/20250306-drop-gen-v1-2-03683e56006a@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_memirq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_memirq.c b/drivers/gpu/drm/xe/xe_memirq.c index 404fa2a456d5..49c45ec3e83c 100644 --- a/drivers/gpu/drm/xe/xe_memirq.c +++ b/drivers/gpu/drm/xe/xe_memirq.c @@ -86,7 +86,7 @@ static const char *guc_name(struct xe_guc *guc) * This object needs to be 4KiB aligned. * * - _`Interrupt Source Report Page`: this is the equivalent of the - * GEN11_GT_INTR_DWx registers, with each bit in those registers being + * GT_INTR_DWx registers, with each bit in those registers being * mapped to a byte here. The offsets are the same, just bytes instead * of bits. This object needs to be cacheline aligned. * From eb4796d8625902adfd0bc7226306afcde617f7c9 Mon Sep 17 00:00:00 2001 From: Gustavo Sousa Date: Thu, 27 Feb 2025 16:14:25 -0300 Subject: [PATCH 0043/1627] drm/i915/xe3lpd: Map POWER_DOMAIN_AUDIO_PLAYBACK to DC_off In Xe3_LPD, display audio has the core audio logic located in PG0 and per-transcoder logic in the same power well that provides power for the transcoder [1]. For stuff like audio device enumeration, we need to ensure that PG0 is turned on. For playback, we additionally need the transcoder's power well to be enabled. That essentially means that, for audio playback, there isn't a special power well that needs to be enabled, because modeset sequences will ensure that the required power wells are enabled. That said, there might be cases where PG0 could be disabled due to display entering DC6 while the audio driver tries to interact with the graphics driver for stuff like audio device enumeration. We recently hit that kind of scenario, where "aplay -l" was being used to enumerate audio devices on a PTL machine with PSR enabled and no external displays attached. Since intel_audio_component_get_power() uses POWER_DOMAIN_AUDIO_PLAYBACK, make sure to map that power domain to DC_off power well, so that we disable dynamic DC states (which includes DC6) while the audio driver needs display audio power. [1] The core-audio vs per-transcoder logic split is not really new in Xe3_LPD. This is also true for previous display generations. We need to figure out the correct version where this split happened so that we can apply fixes in the current power domain mapping. Bspec: 72519 Reviewed-by: Kai Vehmanen Link: https://patchwork.freedesktop.org/patch/msgid/20250227-xe3lpd-power-domain-audio-playback-v1-1-5765f21da977@intel.com Signed-off-by: Gustavo Sousa --- drivers/gpu/drm/i915/display/intel_display_power_map.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index e80e1fd611ca..ab1163744bc5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1696,6 +1696,7 @@ I915_DECL_PW_DOMAINS(xe3lpd_pwdoms_dc_off, XE3LPD_PW_C_POWER_DOMAINS, XE3LPD_PW_D_POWER_DOMAINS, POWER_DOMAIN_AUDIO_MMIO, + POWER_DOMAIN_AUDIO_PLAYBACK, POWER_DOMAIN_INIT); static const struct i915_power_well_desc xe3lpd_power_wells_dcoff[] = { From 8aa8c2d4214e1771c32101d70740002662d31bb7 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Thu, 6 Mar 2025 20:00:05 -0800 Subject: [PATCH 0044/1627] drm/xe/rtp: Drop sentinels from arg to xe_rtp_process_to_sr() There's a mismatch on API: while xe_rtp_process_to_sr() processes entries until an entry without name, the active tracking with xe_rtp_process_ctx_enable_active_tracking() needs to use the number of elements. The number of elements is taken everywhere using ARRAY_SIZE(), but that will have one entry too many. This leads to the following warning, as reported by lkp: drivers/gpu/drm/xe/xe_tuning.c: In function 'xe_tuning_dump': >> include/drm/drm_print.h:228:31: warning: '%s' directive argument is null [-Wformat-overflow=] 228 | drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__) | ^~~~~~ drivers/gpu/drm/xe/xe_tuning.c:226:17: note: in expansion of macro 'drm_printf_indent' 226 | drm_printf_indent(p, 1, "%s\n", engine_tunings[idx].name); | ^~~~~~~~~~~~~~~~~ That's because it will still process the last entry when tracking the active tunings. The same issue exists in the WAs. Change xe_rtp_process_to_sr() to also take the number of elements so the empty entry can be removed and the warning should go away. Fixing on the active-tracking side would more fragile as the it would need a `- 1` everywhere and continue to use a different approach for number of elements. Aside from the warning, it's a non-issue as there would always be enough bits allocated and the last entry would never be active since xe_rtp_process_to_sr() stops on the sentinel. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202503021906.P2MwAvyK-lkp@intel.com/ Cc: Tvrtko Ursulin Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20250306-fix-print-warning-v1-1-979c3dc03c0d@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/tests/xe_rtp_test.c | 2 +- drivers/gpu/drm/xe/xe_hw_engine.c | 6 ++---- drivers/gpu/drm/xe/xe_reg_whitelist.c | 4 ++-- drivers/gpu/drm/xe/xe_rtp.c | 6 +++++- drivers/gpu/drm/xe/xe_rtp.h | 2 +- drivers/gpu/drm/xe/xe_tuning.c | 12 ++++-------- drivers/gpu/drm/xe/xe_wa.c | 12 +++--------- 7 files changed, 18 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c index 36a3b5420fef..b0254b014fe4 100644 --- a/drivers/gpu/drm/xe/tests/xe_rtp_test.c +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -320,7 +320,7 @@ static void xe_rtp_process_to_sr_tests(struct kunit *test) count_rtp_entries++; xe_rtp_process_ctx_enable_active_tracking(&ctx, &active, count_rtp_entries); - xe_rtp_process_to_sr(&ctx, param->entries, reg_sr); + xe_rtp_process_to_sr(&ctx, param->entries, count_rtp_entries, reg_sr); xa_for_each(®_sr->xa, idx, sre) { if (idx == param->expected_reg.addr) diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index fc447751fe78..223b95de388c 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -400,10 +400,9 @@ xe_hw_engine_setup_default_lrc_state(struct xe_hw_engine *hwe) PREEMPT_GPGPU_THREAD_GROUP_LEVEL)), XE_RTP_ENTRY_FLAG(FOREACH_ENGINE) }, - {} }; - xe_rtp_process_to_sr(&ctx, lrc_setup, &hwe->reg_lrc); + xe_rtp_process_to_sr(&ctx, lrc_setup, ARRAY_SIZE(lrc_setup), &hwe->reg_lrc); } static void @@ -459,10 +458,9 @@ hw_engine_setup_default_state(struct xe_hw_engine *hwe) XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0), CS_PRIORITY_MEM_READ, XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, - {} }; - xe_rtp_process_to_sr(&ctx, engine_entries, &hwe->reg_sr); + xe_rtp_process_to_sr(&ctx, engine_entries, ARRAY_SIZE(engine_entries), &hwe->reg_sr); } static const struct engine_info *find_engine_info(enum xe_engine_class class, int instance) diff --git a/drivers/gpu/drm/xe/xe_reg_whitelist.c b/drivers/gpu/drm/xe/xe_reg_whitelist.c index edab5d4e3ba5..23f6c81d9994 100644 --- a/drivers/gpu/drm/xe/xe_reg_whitelist.c +++ b/drivers/gpu/drm/xe/xe_reg_whitelist.c @@ -88,7 +88,6 @@ static const struct xe_rtp_entry_sr register_whitelist[] = { RING_FORCE_TO_NONPRIV_ACCESS_RD | RING_FORCE_TO_NONPRIV_RANGE_4)) }, - {} }; static void whitelist_apply_to_hwe(struct xe_hw_engine *hwe) @@ -137,7 +136,8 @@ void xe_reg_whitelist_process_engine(struct xe_hw_engine *hwe) { struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(hwe); - xe_rtp_process_to_sr(&ctx, register_whitelist, &hwe->reg_whitelist); + xe_rtp_process_to_sr(&ctx, register_whitelist, ARRAY_SIZE(register_whitelist), + &hwe->reg_whitelist); whitelist_apply_to_hwe(hwe); } diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 7a1c78fdfc92..13bb62d3e615 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -237,6 +237,7 @@ static void rtp_mark_active(struct xe_device *xe, * the save-restore argument. * @ctx: The context for processing the table, with one of device, gt or hwe * @entries: Table with RTP definitions + * @n_entries: Number of entries to process, usually ARRAY_SIZE(entries) * @sr: Save-restore struct where matching rules execute the action. This can be * viewed as the "coalesced view" of multiple the tables. The bits for each * register set are expected not to collide with previously added entries @@ -247,6 +248,7 @@ static void rtp_mark_active(struct xe_device *xe, */ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, const struct xe_rtp_entry_sr *entries, + size_t n_entries, struct xe_reg_sr *sr) { const struct xe_rtp_entry_sr *entry; @@ -259,7 +261,9 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, if (IS_SRIOV_VF(xe)) return; - for (entry = entries; entry && entry->name; entry++) { + xe_assert(xe, entries); + + for (entry = entries; entry - entries < n_entries; entry++) { bool match = false; if (entry->flags & XE_RTP_ENTRY_FLAG_FOREACH_ENGINE) { diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h index 38b9f13bba5e..4fe736a11c42 100644 --- a/drivers/gpu/drm/xe/xe_rtp.h +++ b/drivers/gpu/drm/xe/xe_rtp.h @@ -430,7 +430,7 @@ void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, const struct xe_rtp_entry_sr *entries, - struct xe_reg_sr *sr); + size_t n_entries, struct xe_reg_sr *sr); void xe_rtp_process(struct xe_rtp_process_ctx *ctx, const struct xe_rtp_entry *entries); diff --git a/drivers/gpu/drm/xe/xe_tuning.c b/drivers/gpu/drm/xe/xe_tuning.c index 77bc958f5a42..49ddbda7cdef 100644 --- a/drivers/gpu/drm/xe/xe_tuning.c +++ b/drivers/gpu/drm/xe/xe_tuning.c @@ -85,8 +85,6 @@ static const struct xe_rtp_entry_sr gt_tunings[] = { XE_RTP_RULES(MEDIA_VERSION(2000)), XE_RTP_ACTIONS(SET(XE2LPM_SCRATCH3_LBCF, RWFLUSHALLEN)) }, - - {} }; static const struct xe_rtp_entry_sr engine_tunings[] = { @@ -100,7 +98,6 @@ static const struct xe_rtp_entry_sr engine_tunings[] = { ENGINE_CLASS(RENDER)), XE_RTP_ACTIONS(SET(SAMPLER_MODE, INDIRECT_STATE_BASE_ADDR_OVERRIDE)) }, - {} }; static const struct xe_rtp_entry_sr lrc_tunings[] = { @@ -138,8 +135,6 @@ static const struct xe_rtp_entry_sr lrc_tunings[] = { XE_RTP_ACTIONS(FIELD_SET(FF_MODE, VS_HIT_MAX_VALUE_MASK, REG_FIELD_PREP(VS_HIT_MAX_VALUE_MASK, 0x3f))) }, - - {} }; /** @@ -180,7 +175,7 @@ void xe_tuning_process_gt(struct xe_gt *gt) xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->tuning_active.gt, ARRAY_SIZE(gt_tunings)); - xe_rtp_process_to_sr(&ctx, gt_tunings, >->reg_sr); + xe_rtp_process_to_sr(&ctx, gt_tunings, ARRAY_SIZE(gt_tunings), >->reg_sr); } EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_gt); @@ -191,7 +186,8 @@ void xe_tuning_process_engine(struct xe_hw_engine *hwe) xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->tuning_active.engine, ARRAY_SIZE(engine_tunings)); - xe_rtp_process_to_sr(&ctx, engine_tunings, &hwe->reg_sr); + xe_rtp_process_to_sr(&ctx, engine_tunings, ARRAY_SIZE(engine_tunings), + &hwe->reg_sr); } EXPORT_SYMBOL_IF_KUNIT(xe_tuning_process_engine); @@ -210,7 +206,7 @@ void xe_tuning_process_lrc(struct xe_hw_engine *hwe) xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->tuning_active.lrc, ARRAY_SIZE(lrc_tunings)); - xe_rtp_process_to_sr(&ctx, lrc_tunings, &hwe->reg_lrc); + xe_rtp_process_to_sr(&ctx, lrc_tunings, ARRAY_SIZE(lrc_tunings), &hwe->reg_lrc); } void xe_tuning_dump(struct xe_gt *gt, struct drm_printer *p) diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 55eb453f4b1f..a25afb757f70 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -279,8 +279,6 @@ static const struct xe_rtp_entry_sr gt_was[] = { XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), RAMDFTUNIT_CLKGATE_DIS)), XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), }, - - {} }; static const struct xe_rtp_entry_sr engine_was[] = { @@ -624,8 +622,6 @@ static const struct xe_rtp_entry_sr engine_was[] = { FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS)) }, - - {} }; static const struct xe_rtp_entry_sr lrc_was[] = { @@ -825,8 +821,6 @@ static const struct xe_rtp_entry_sr lrc_was[] = { DIS_PARTIAL_AUTOSTRIP | DIS_AUTOSTRIP)) }, - - {} }; static __maybe_unused const struct xe_rtp_entry oob_was[] = { @@ -868,7 +862,7 @@ void xe_wa_process_gt(struct xe_gt *gt) xe_rtp_process_ctx_enable_active_tracking(&ctx, gt->wa_active.gt, ARRAY_SIZE(gt_was)); - xe_rtp_process_to_sr(&ctx, gt_was, >->reg_sr); + xe_rtp_process_to_sr(&ctx, gt_was, ARRAY_SIZE(gt_was), >->reg_sr); } EXPORT_SYMBOL_IF_KUNIT(xe_wa_process_gt); @@ -886,7 +880,7 @@ void xe_wa_process_engine(struct xe_hw_engine *hwe) xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.engine, ARRAY_SIZE(engine_was)); - xe_rtp_process_to_sr(&ctx, engine_was, &hwe->reg_sr); + xe_rtp_process_to_sr(&ctx, engine_was, ARRAY_SIZE(engine_was), &hwe->reg_sr); } /** @@ -903,7 +897,7 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe) xe_rtp_process_ctx_enable_active_tracking(&ctx, hwe->gt->wa_active.lrc, ARRAY_SIZE(lrc_was)); - xe_rtp_process_to_sr(&ctx, lrc_was, &hwe->reg_lrc); + xe_rtp_process_to_sr(&ctx, lrc_was, ARRAY_SIZE(lrc_was), &hwe->reg_lrc); } /** From d945cc876277851053c0cf37927c8d7bd9d0e880 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Fri, 7 Mar 2025 19:56:35 -0500 Subject: [PATCH 0045/1627] drm/xe/pm: Temporarily disable D3Cold on BMG Currently, many instability cases related to D3Cold -> D0 transition on BMG are under investigation. Among them some bad cases where the device is lost after 1 to 3 transitions from D3Cold to D0 on the runtime pm, with pcieport upstream bridge port link retrain failure. In other cases, it works fine, but with some sudden random memory corruptions after D3cold, that could be 0xffff missed ack on GT forcewake or GuC reload related failures. In some other cases though, D3Cold -> D0 works pretty reliably. It looks like it is a combination of GPU cards and Host boards at this point. So, there is no possible/available quirk at this time. This patch disables the D3Cold by default on BMG by reducing the vram_d3cold_threshold to 0. Users and developers who wants to enable it are still able to via $ echo 300 > /sys/bus/pci/devices//vram_d3cold_threshold Fixes: 3adcf970dc7e ("drm/xe/bmg: Drop force_probe requirement") Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4037 Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4395 Link: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4396 Cc: Karthik Poosa Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20250308005636.1475420-1-rodrigo.vivi@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_pm.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 12200be7b43d..7b6b754ad6eb 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -277,6 +277,15 @@ int xe_pm_init_early(struct xe_device *xe) } ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ +static u32 vram_threshold_value(struct xe_device *xe) +{ + /* FIXME: D3Cold temporarily disabled by default on BMG */ + if (xe->info.platform == XE_BATTLEMAGE) + return 0; + + return DEFAULT_VRAM_THRESHOLD; +} + /** * xe_pm_init - Initialize Xe Power Management * @xe: xe device instance @@ -287,6 +296,7 @@ ALLOW_ERROR_INJECTION(xe_pm_init_early, ERRNO); /* See xe_pci_probe() */ */ int xe_pm_init(struct xe_device *xe) { + u32 vram_threshold; int err; /* For now suspend/resume is only allowed with GuC */ @@ -300,7 +310,8 @@ int xe_pm_init(struct xe_device *xe) if (err) return err; - err = xe_pm_set_vram_threshold(xe, DEFAULT_VRAM_THRESHOLD); + vram_threshold = vram_threshold_value(xe); + err = xe_pm_set_vram_threshold(xe, vram_threshold); if (err) return err; } From 1182bc74b39ba3d124b544dab22d5672fae54b67 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Mar 2025 11:13:59 +0000 Subject: [PATCH 0046/1627] drm/xe: Fix MOCS debugfs LNCF readout With only XE_FW_GT taken LNCF registers read back as all zeroes, leading to a wild goose chase trying to figure out why is register programming incorrect. Fix it by grabbing XE_FORCEWAKE_ALL for affected platforms. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20250307111402.26577-2-tvrtko.ursulin@igalia.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_mocs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_mocs.c b/drivers/gpu/drm/xe/xe_mocs.c index 54d199b5cfb2..31dade91a089 100644 --- a/drivers/gpu/drm/xe/xe_mocs.c +++ b/drivers/gpu/drm/xe/xe_mocs.c @@ -781,7 +781,9 @@ void xe_mocs_dump(struct xe_gt *gt, struct drm_printer *p) flags = get_mocs_settings(xe, &table); xe_pm_runtime_get_noresume(xe); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + fw_ref = xe_force_wake_get(gt_to_fw(gt), + flags & HAS_LNCF_MOCS ? + XE_FORCEWAKE_ALL : XE_FW_GT); if (!fw_ref) goto err_fw; From 08ea901d0b8f6ea261d9936e03fa690540af0126 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Mar 2025 11:14:00 +0000 Subject: [PATCH 0047/1627] drm/xe: Fix ring flush invalidation Emit_flush_invalidate() is incorrectly marking the write to LRC_PPHWSP as a GGTT write and also writing an atypical ~0 dword as the payload. Fix it. While at it drop the unused flags argument. Signed-off-by: Tvrtko Ursulin Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250307111402.26577-3-tvrtko.ursulin@igalia.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_ring_ops.c | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index d2f604aa96fa..3d1b4d3d788f 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -111,16 +111,13 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i) return i; } -static int emit_flush_invalidate(u32 flag, u32 *dw, int i) +static int emit_flush_invalidate(u32 *dw, int i) { - dw[i] = MI_FLUSH_DW; - dw[i] |= flag; - dw[i++] |= MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW | - MI_FLUSH_DW_STORE_INDEX; - - dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; + dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | + MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX; + dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR; + dw[i++] = 0; dw[i++] = 0; - dw[i++] = ~0U; return i; } @@ -413,7 +410,7 @@ static void emit_migration_job_gen12(struct xe_sched_job *job, if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) { /* XXX: Do we need this? Leaving for now. */ dw[i++] = preparser_disable(true); - i = emit_flush_invalidate(0, dw, i); + i = emit_flush_invalidate(dw, i); dw[i++] = preparser_disable(false); } From 52a237e8d6c4abcda40c71268ee6cec75aa62799 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Mar 2025 11:14:01 +0000 Subject: [PATCH 0048/1627] drm/xe: Pass flags directly to emit_flush_imm_ggtt This is more readable than the nameless booleans and will also come handy later. Signed-off-by: Tvrtko Ursulin Reviewed-by: Matt Roper Reviewed-by: Tejas Upadhyay Link: https://patchwork.freedesktop.org/patch/msgid/20250307111402.26577-4-tvrtko.ursulin@igalia.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_ring_ops.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index 3d1b4d3d788f..917fc16de866 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -90,11 +90,10 @@ static int emit_flush_dw(u32 *dw, int i) return i; } -static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb, - u32 *dw, int i) +static int emit_flush_imm_ggtt(u32 addr, u32 value, u32 flags, u32 *dw, int i) { dw[i++] = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_IMM_DW | - (invalidate_tlb ? MI_INVALIDATE_TLB : 0); + flags; dw[i++] = addr | MI_FLUSH_DW_USE_GTT; dw[i++] = 0; dw[i++] = value; @@ -254,7 +253,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc if (job->ring_ops_flush_tlb) { dw[i++] = preparser_disable(true); i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), - seqno, true, dw, i); + seqno, MI_INVALIDATE_TLB, dw, i); dw[i++] = preparser_disable(false); } else { i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), @@ -270,7 +269,7 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc dw, i); } - i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); + i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i); i = emit_user_interrupt(dw, i); @@ -316,7 +315,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, if (job->ring_ops_flush_tlb) i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), - seqno, true, dw, i); + seqno, MI_INVALIDATE_TLB, dw, i); dw[i++] = preparser_disable(false); @@ -333,7 +332,7 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, dw, i); } - i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i); + i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, 0, dw, i); i = emit_user_interrupt(dw, i); From c36e3442ea1c4c63f9876486dd9091487a77c5f2 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Fri, 7 Mar 2025 11:14:02 +0000 Subject: [PATCH 0049/1627] drm/xe: Use correct type width for alignment in fb pinning code Plane->min_alignment returns an unsigned int so lets use that in the whole relevant call chain. Signed-off-by: Tvrtko Ursulin Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20250307111402.26577-5-tvrtko.ursulin@igalia.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/display/xe_fb_pin.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index 11a6b996d739..3df51cd4a86b 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -81,7 +81,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, struct i915_vma *vma, - u64 physical_alignment) + unsigned int alignment) { struct xe_device *xe = to_xe_device(fb->base.dev); struct xe_tile *tile0 = xe_device_get_root_tile(xe); @@ -107,7 +107,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_BO_FLAG_VRAM0 | XE_BO_FLAG_GGTT | XE_BO_FLAG_PAGETABLE, - physical_alignment); + alignment); else dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, dpt_size, ~0ull, @@ -115,7 +115,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_BO_FLAG_STOLEN | XE_BO_FLAG_GGTT | XE_BO_FLAG_PAGETABLE, - physical_alignment); + alignment); if (IS_ERR(dpt)) dpt = xe_bo_create_pin_map_at_aligned(xe, tile0, NULL, dpt_size, ~0ull, @@ -123,7 +123,7 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT | XE_BO_FLAG_PAGETABLE, - physical_alignment); + alignment); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -193,7 +193,7 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, struct i915_vma *vma, - u64 physical_alignment) + unsigned int alignment) { struct drm_gem_object *obj = intel_fb_bo(&fb->base); struct xe_bo *bo = gem_to_xe_bo(obj); @@ -276,7 +276,7 @@ out: static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, const struct i915_gtt_view *view, - u64 physical_alignment) + unsigned int alignment) { struct drm_device *dev = fb->base.dev; struct xe_device *xe = to_xe_device(dev); @@ -326,9 +326,9 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, vma->bo = bo; if (intel_fb_uses_dpt(&fb->base)) - ret = __xe_pin_fb_vma_dpt(fb, view, vma, physical_alignment); + ret = __xe_pin_fb_vma_dpt(fb, view, vma, alignment); else - ret = __xe_pin_fb_vma_ggtt(fb, view, vma, physical_alignment); + ret = __xe_pin_fb_vma_ggtt(fb, view, vma, alignment); if (ret) goto err_unpin; @@ -421,7 +421,7 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state, struct i915_vma *vma; struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); - u64 phys_alignment = plane->min_alignment(plane, fb, 0); + unsigned int alignment = plane->min_alignment(plane, fb, 0); if (reuse_vma(new_plane_state, old_plane_state)) return 0; @@ -429,7 +429,7 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state, /* We reject creating !SCANOUT fb's, so this is weird.. */ drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); - vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, phys_alignment); + vma = __xe_pin_fb_vma(intel_fb, &new_plane_state->view.gtt, alignment); if (IS_ERR(vma)) return PTR_ERR(vma); From b4b05e53b550a886b4754b87fd0dd2b304579e85 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Fri, 7 Mar 2025 11:03:07 -0500 Subject: [PATCH 0050/1627] drm/xe/guc_pc: Retry and wait longer for GuC PC start In a rare situation of thermal limit during resume, GuC can be slow and run into delays like this: xe 0000:00:02.0: [drm] GT1: excessive init time: 667ms! \ [status = 0x8002F034, timeouts = 0] xe 0000:00:02.0: [drm] GT1: excessive init time: \ [freq = 100MHz (req = 800MHz), before = 100MHz, \ perf_limit_reasons = 0x1C001000] xe 0000:00:02.0: [drm] *ERROR* GT1: GuC PC Start failed ------------[ cut here ]------------ xe 0000:00:02.0: [drm] GT1: Failed to start GuC PC: -EIO When this happens, it will block entirely the GPU to be used. So, let's try and with a huge timeout in the hope it comes back. Also, let's collect some information on how long it is usually taking on situations like this, so perhaps the time can be tuned later. Cc: Vinay Belgaumkar Cc: Jonathan Cavitt Cc: John Harrison Reviewed-by: Jonathan Cavitt Link: https://patchwork.freedesktop.org/patch/msgid/20250307160307.1093391-1-rodrigo.vivi@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_guc_pc.c | 53 +++++++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 25040efa043f..85215313976c 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -6,6 +6,7 @@ #include "xe_guc_pc.h" #include +#include #include #include @@ -20,6 +21,7 @@ #include "xe_gt.h" #include "xe_gt_idle.h" #include "xe_gt_printk.h" +#include "xe_gt_throttle.h" #include "xe_gt_types.h" #include "xe_guc.h" #include "xe_guc_ct.h" @@ -50,6 +52,9 @@ #define LNL_MERT_FREQ_CAP 800 #define BMG_MERT_FREQ_CAP 2133 +#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ +#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ + /** * DOC: GuC Power Conservation (PC) * @@ -114,9 +119,10 @@ static struct iosys_map *pc_to_maps(struct xe_guc_pc *pc) FIELD_PREP(HOST2GUC_PC_SLPC_REQUEST_MSG_1_EVENT_ARGC, count)) static int wait_for_pc_state(struct xe_guc_pc *pc, - enum slpc_global_state state) + enum slpc_global_state state, + int timeout_ms) { - int timeout_us = 5000; /* rought 5ms, but no need for precision */ + int timeout_us = 1000 * timeout_ms; int slept, wait = 10; xe_device_assert_mem_access(pc_to_xe(pc)); @@ -165,7 +171,8 @@ static int pc_action_query_task_state(struct xe_guc_pc *pc) }; int ret; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) return -EAGAIN; /* Blocking here to ensure the results are ready before reading them */ @@ -188,7 +195,8 @@ static int pc_action_set_param(struct xe_guc_pc *pc, u8 id, u32 value) }; int ret; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) return -EAGAIN; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); @@ -209,7 +217,8 @@ static int pc_action_unset_param(struct xe_guc_pc *pc, u8 id) struct xe_guc_ct *ct = &pc_to_guc(pc)->ct; int ret; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) return -EAGAIN; ret = xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); @@ -443,6 +452,15 @@ u32 xe_guc_pc_get_act_freq(struct xe_guc_pc *pc) return freq; } +static u32 get_cur_freq(struct xe_gt *gt) +{ + u32 freq; + + freq = xe_mmio_read32(>->mmio, RPNSWREQ); + freq = REG_FIELD_GET(REQ_RATIO_MASK, freq); + return decode_freq(freq); +} + /** * xe_guc_pc_get_cur_freq - Get Current requested frequency * @pc: The GuC PC @@ -466,10 +484,7 @@ int xe_guc_pc_get_cur_freq(struct xe_guc_pc *pc, u32 *freq) return -ETIMEDOUT; } - *freq = xe_mmio_read32(>->mmio, RPNSWREQ); - - *freq = REG_FIELD_GET(REQ_RATIO_MASK, *freq); - *freq = decode_freq(*freq); + *freq = get_cur_freq(gt); xe_force_wake_put(gt_to_fw(gt), fw_ref); return 0; @@ -1016,6 +1031,7 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) struct xe_gt *gt = pc_to_gt(pc); u32 size = PAGE_ALIGN(sizeof(struct slpc_shared_data)); unsigned int fw_ref; + ktime_t earlier; int ret; xe_gt_assert(gt, xe_device_uc_enabled(xe)); @@ -1040,14 +1056,25 @@ int xe_guc_pc_start(struct xe_guc_pc *pc) memset(pc->bo->vmap.vaddr, 0, size); slpc_shared_data_write(pc, header.size, size); + earlier = ktime_get(); ret = pc_action_reset(pc); if (ret) goto out; - if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING)) { - xe_gt_err(gt, "GuC PC Start failed\n"); - ret = -EIO; - goto out; + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_TIMEOUT_MS)) { + xe_gt_warn(gt, "GuC PC start taking longer than normal [freq = %dMHz (req = %dMHz), perf_limit_reasons = 0x%08X]\n", + xe_guc_pc_get_act_freq(pc), get_cur_freq(gt), + xe_gt_throttle_get_limit_reasons(gt)); + + if (wait_for_pc_state(pc, SLPC_GLOBAL_STATE_RUNNING, + SLPC_RESET_EXTENDED_TIMEOUT_MS)) { + xe_gt_err(gt, "GuC PC Start failed: Dynamic GT frequency control and GT sleep states are now disabled.\n"); + goto out; + } + + xe_gt_warn(gt, "GuC PC excessive start time: %lldms", + ktime_ms_delta(ktime_get(), earlier)); } ret = pc_init_freqs(pc); From 9f1e253d789649745db33a205969169033f078c9 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 4 Mar 2025 17:29:12 +0200 Subject: [PATCH 0051/1627] drm/i915/hpd: Track HPD pins instead of ports for HPD pulse events MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Track the HPD pin instead of the corresponding encoder ports for pending short/long HPD pulse events. This is how the pending hotplug events are tracked and there is no reason for tracking the pulse events differently. After this change intel_hpd_trigger_irq() will set the short pulse event pending for all encoders using the given HPD pin. This doesn't change the behavior, as atm in case of multiple (2) encoders sharing the same pin only one will have a pulse handler, so for other encoders without a pulse handler the event is ignored. Also setting the pulse event pending for all encoders using the HPD pin is what happens after an actual HPD IRQ, the effect of calling intel_hpd_trigger_irq() should match this. In a following change this also makes it simpler to block the handling of a short/long pulse event on an HPD pin for all the encoders using this HPD pin. Suggested-by: Ville Syrjälä Reviewed-by: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20250304152917.3407080-2-imre.deak@intel.com --- .../gpu/drm/i915/display/intel_display_core.h | 4 +-- drivers/gpu/drm/i915/display/intel_hotplug.c | 30 +++++++++---------- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index eeb7ae3eaea8..afb2184bf233 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -170,8 +170,8 @@ struct intel_hotplug { u32 retry_bits; struct delayed_work reenable_work; - u32 long_port_mask; - u32 short_port_mask; + u32 long_hpd_pin_mask; + u32 short_hpd_pin_mask; struct work_struct dig_port_work; struct work_struct poll_init_work; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 00d7b1ccf190..9692b5c01aea 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -353,28 +353,28 @@ static void i915_digport_work_func(struct work_struct *work) { struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); - u32 long_port_mask, short_port_mask; + u32 long_hpd_pin_mask, short_hpd_pin_mask; struct intel_encoder *encoder; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); - long_port_mask = dev_priv->display.hotplug.long_port_mask; - dev_priv->display.hotplug.long_port_mask = 0; - short_port_mask = dev_priv->display.hotplug.short_port_mask; - dev_priv->display.hotplug.short_port_mask = 0; + long_hpd_pin_mask = dev_priv->display.hotplug.long_hpd_pin_mask; + dev_priv->display.hotplug.long_hpd_pin_mask = 0; + short_hpd_pin_mask = dev_priv->display.hotplug.short_hpd_pin_mask; + dev_priv->display.hotplug.short_hpd_pin_mask = 0; spin_unlock_irq(&dev_priv->irq_lock); for_each_intel_encoder(&dev_priv->drm, encoder) { struct intel_digital_port *dig_port; - enum port port = encoder->port; + enum hpd_pin pin = encoder->hpd_pin; bool long_hpd, short_hpd; enum irqreturn ret; if (!intel_encoder_has_hpd_pulse(encoder)) continue; - long_hpd = long_port_mask & BIT(port); - short_hpd = short_port_mask & BIT(port); + long_hpd = long_hpd_pin_mask & BIT(pin); + short_hpd = short_hpd_pin_mask & BIT(pin); if (!long_hpd && !short_hpd) continue; @@ -384,7 +384,7 @@ static void i915_digport_work_func(struct work_struct *work) ret = dig_port->hpd_pulse(dig_port, long_hpd); if (ret == IRQ_NONE) { /* fall back to old school hpd */ - old_bits |= BIT(encoder->hpd_pin); + old_bits |= BIT(pin); } } @@ -407,9 +407,10 @@ static void i915_digport_work_func(struct work_struct *work) void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_encoder *encoder = &dig_port->base; spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.short_port_mask |= BIT(dig_port->base.port); + i915->display.hotplug.short_hpd_pin_mask |= BIT(encoder->hpd_pin); spin_unlock_irq(&i915->irq_lock); queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); @@ -557,7 +558,6 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * only the one of them (DP) will have ->hpd_pulse(). */ for_each_intel_encoder(&dev_priv->drm, encoder) { - enum port port = encoder->port; bool long_hpd; pin = encoder->hpd_pin; @@ -577,10 +577,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (long_hpd) { long_hpd_pulse_mask |= BIT(pin); - dev_priv->display.hotplug.long_port_mask |= BIT(port); + dev_priv->display.hotplug.long_hpd_pin_mask |= BIT(pin); } else { short_hpd_pulse_mask |= BIT(pin); - dev_priv->display.hotplug.short_port_mask |= BIT(port); + dev_priv->display.hotplug.short_hpd_pin_mask |= BIT(pin); } } @@ -920,8 +920,8 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.long_port_mask = 0; - dev_priv->display.hotplug.short_port_mask = 0; + dev_priv->display.hotplug.long_hpd_pin_mask = 0; + dev_priv->display.hotplug.short_hpd_pin_mask = 0; dev_priv->display.hotplug.event_bits = 0; dev_priv->display.hotplug.retry_bits = 0; From 3b545b216cd1bcc33fb4b826fbfb517f1d8a3a93 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Fri, 7 Mar 2025 11:07:55 -0800 Subject: [PATCH 0052/1627] drm/xe/xe3: Recognize 3DSTATE_COARSE_PIXEL in LRC dumps Xe3 adds a new 3DSTATE_COARSE_PIXEL state instruction as part of the render engine LRC. Ensure we can recognize and report this properly in the LRC dumps. Bspec: 65182, 73415 Reviewed-by: Sai Teja Pottumuttu Link: https://patchwork.freedesktop.org/patch/msgid/20250307190754.678376-2-matthew.d.roper@intel.com Signed-off-by: Matt Roper --- drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h | 1 + drivers/gpu/drm/xe/xe_lrc.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h index 31d28a67ef6a..457881af8af9 100644 --- a/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_gfxpipe_commands.h @@ -137,6 +137,7 @@ #define CMD_3DSTATE_CLIP_MESH GFXPIPE_3D_CMD(0x0, 0x81) #define CMD_3DSTATE_SBE_MESH GFXPIPE_3D_CMD(0x0, 0x82) #define CMD_3DSTATE_CPSIZE_CONTROL_BUFFER GFXPIPE_3D_CMD(0x0, 0x83) +#define CMD_3DSTATE_COARSE_PIXEL GFXPIPE_3D_CMD(0x0, 0x89) #define CMD_3DSTATE_DRAWING_RECTANGLE GFXPIPE_3D_CMD(0x1, 0x0) #define CMD_3DSTATE_CHROMA_KEY GFXPIPE_3D_CMD(0x1, 0x4) diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index df3ceddede07..81def1792664 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1445,6 +1445,7 @@ static int dump_gfxpipe_command(struct drm_printer *p, MATCH3D(3DSTATE_CLIP_MESH); MATCH3D(3DSTATE_SBE_MESH); MATCH3D(3DSTATE_CPSIZE_CONTROL_BUFFER); + MATCH3D(3DSTATE_COARSE_PIXEL); MATCH3D(3DSTATE_DRAWING_RECTANGLE); MATCH3D(3DSTATE_CHROMA_KEY); From 8da8aecf1f2d89c2b8188bcf7aa252ec146ddd12 Mon Sep 17 00:00:00 2001 From: Xin Wang Date: Mon, 3 Mar 2025 08:49:41 +0800 Subject: [PATCH 0053/1627] drm/xe: remove redundant check in xe_vm_create_ioctl() The check for args->extensions is repeated twice in xe_vm_create_ioctl(). This commit removes the redundant check to streamline the code. Fixes: 7224788f6756 ("drm/xe: Kill XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS extension") Cc: Rodrigo Vivi Signed-off-by: Xin Wang Reviewed-by: Tejas Upadhyay Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20250303004942.951699-1-x.wang@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_vm.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 22a26aff3a6e..60303998bd61 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -2056,9 +2056,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) return -EINVAL; - if (XE_IOCTL_DBG(xe, args->extensions)) - return -EINVAL; - if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE) flags |= XE_VM_FLAG_SCRATCH_PAGE; if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) From 965544150d1cadf0e8f5bb6c13c19697e46e1429 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Fri, 7 Mar 2025 07:57:38 -0500 Subject: [PATCH 0054/1627] drm/vmwgfx: Refactor cursor handling Refactor cursor handling to make the code maintainable again. Over the last 12 years the svga device improved support for virtualized cursors and at the same time the drm interfaces evolved quite a bit from pre-atomic to current atomic ones. vmwgfx only added new code over the years, instead of adjusting/refactoring the paths. Export the cursor plane handling to its own file. Remove special handling of the legacy cursor support to make it fit within the global cursor plane mechanism. Finally redo dirty tracking because memcmp never worked correctly resulting in the cursor not being properly updated in the guest. Signed-off-by: Zack Rusin Reviewed-by: Maaz Mombasawala Reviewed-by: Martin Krastev Link: https://patchwork.freedesktop.org/patch/msgid/20250307125836.3877138-2-zack.rusin@broadcom.com --- drivers/gpu/drm/vmwgfx/Makefile | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 + drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 2 + drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c | 844 ++++++++++++++++++ drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h | 81 ++ drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 27 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 32 +- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 26 +- drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 874 +------------------ drivers/gpu/drm/vmwgfx/vmwgfx_kms.h | 71 +- drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c | 10 +- drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 63 +- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 10 +- drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c | 11 +- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 47 +- 15 files changed, 1042 insertions(+), 1064 deletions(-) create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c create mode 100644 drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 46a4ab688a7f..b168fd7fe9b3 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \ - vmwgfx_gem.o vmwgfx_vkms.o + vmwgfx_gem.o vmwgfx_vkms.o vmwgfx_cursor_plane.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 9b5b8c1f063b..b7766421d2f5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -887,3 +887,9 @@ out: surf = vmw_res_to_srf(res); return surf; } + +s32 vmw_bo_mobid(struct vmw_bo *vbo) +{ + WARN_ON(vbo->tbo.resource->mem_type != VMW_PL_MOB); + return (s32)vbo->tbo.resource->start; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index 11e330c7c7f5..e97cae2365c8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -233,4 +233,6 @@ static inline struct vmw_bo *to_vmw_bo(struct drm_gem_object *gobj) return container_of((gobj), struct vmw_bo, tbo.base); } +s32 vmw_bo_mobid(struct vmw_bo *vbo); + #endif // VMWGFX_BO_H diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c new file mode 100644 index 000000000000..718832b08d96 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.c @@ -0,0 +1,844 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/************************************************************************** + * + * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + **************************************************************************/ +#include "vmwgfx_cursor_plane.h" + +#include "vmwgfx_bo.h" +#include "vmwgfx_drv.h" +#include "vmwgfx_kms.h" +#include "vmwgfx_resource_priv.h" +#include "vmw_surface_cache.h" + +#include "drm/drm_atomic.h" +#include "drm/drm_atomic_helper.h" +#include "drm/drm_plane.h" +#include + +#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8 +#define VMW_CURSOR_SNOOP_WIDTH 64 +#define VMW_CURSOR_SNOOP_HEIGHT 64 + +struct vmw_svga_fifo_cmd_define_cursor { + u32 cmd; + SVGAFifoCmdDefineAlphaCursor cursor; +}; + +/** + * vmw_send_define_cursor_cmd - queue a define cursor command + * @dev_priv: the private driver struct + * @image: buffer which holds the cursor image + * @width: width of the mouse cursor image + * @height: height of the mouse cursor image + * @hotspotX: the horizontal position of mouse hotspot + * @hotspotY: the vertical position of mouse hotspot + */ +static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv, + u32 *image, u32 width, u32 height, + u32 hotspotX, u32 hotspotY) +{ + struct vmw_svga_fifo_cmd_define_cursor *cmd; + const u32 image_size = width * height * sizeof(*image); + const u32 cmd_size = sizeof(*cmd) + image_size; + + /* + * Try to reserve fifocmd space and swallow any failures; + * such reservations cannot be left unconsumed for long + * under the risk of clogging other fifocmd users, so + * we treat reservations separtely from the way we treat + * other fallible KMS-atomic resources at prepare_fb + */ + cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); + + if (unlikely(!cmd)) + return; + + memset(cmd, 0, sizeof(*cmd)); + + memcpy(&cmd[1], image, image_size); + + cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; + cmd->cursor.id = 0; + cmd->cursor.width = width; + cmd->cursor.height = height; + cmd->cursor.hotspotX = hotspotX; + cmd->cursor.hotspotY = hotspotY; + + vmw_cmd_commit_flush(dev_priv, cmd_size); +} + +static void +vmw_cursor_plane_update_legacy(struct vmw_private *vmw, + struct vmw_plane_state *vps) +{ + struct vmw_surface *surface = vmw_user_object_surface(&vps->uo); + s32 hotspot_x = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x; + s32 hotspot_y = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y; + + if (WARN_ON(!surface || !surface->snooper.image)) + return; + + if (vps->cursor.legacy.id != surface->snooper.id) { + vmw_send_define_cursor_cmd(vmw, surface->snooper.image, + vps->base.crtc_w, vps->base.crtc_h, + hotspot_x, hotspot_y); + vps->cursor.legacy.id = surface->snooper.id; + } +} + +static enum vmw_cursor_update_type +vmw_cursor_update_type(struct vmw_private *vmw, struct vmw_plane_state *vps) +{ + struct vmw_surface *surface = vmw_user_object_surface(&vps->uo); + + if (surface && surface->snooper.image) + return VMW_CURSOR_UPDATE_LEGACY; + + if (vmw->has_mob) { + if ((vmw->capabilities2 & SVGA_CAP2_CURSOR_MOB) != 0) + return VMW_CURSOR_UPDATE_MOB; + } + + return VMW_CURSOR_UPDATE_NONE; +} + +static void vmw_cursor_update_mob(struct vmw_private *vmw, + struct vmw_plane_state *vps) +{ + SVGAGBCursorHeader *header; + SVGAGBAlphaCursorHeader *alpha_header; + struct vmw_bo *bo = vmw_user_object_buffer(&vps->uo); + u32 *image = vmw_bo_map_and_cache(bo); + const u32 image_size = vps->base.crtc_w * vps->base.crtc_h * sizeof(*image); + + header = vmw_bo_map_and_cache(vps->cursor.mob); + alpha_header = &header->header.alphaHeader; + + memset(header, 0, sizeof(*header)); + + header->type = SVGA_ALPHA_CURSOR; + header->sizeInBytes = image_size; + + alpha_header->hotspotX = vps->cursor.legacy.hotspot_x + vps->base.hotspot_x; + alpha_header->hotspotY = vps->cursor.legacy.hotspot_y + vps->base.hotspot_y; + alpha_header->width = vps->base.crtc_w; + alpha_header->height = vps->base.crtc_h; + + memcpy(header + 1, image, image_size); + vmw_write(vmw, SVGA_REG_CURSOR_MOBID, vmw_bo_mobid(vps->cursor.mob)); + + vmw_bo_unmap(bo); + vmw_bo_unmap(vps->cursor.mob); +} + +static u32 vmw_cursor_mob_size(enum vmw_cursor_update_type update_type, + u32 w, u32 h) +{ + switch (update_type) { + case VMW_CURSOR_UPDATE_LEGACY: + case VMW_CURSOR_UPDATE_NONE: + return 0; + case VMW_CURSOR_UPDATE_MOB: + return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader); + } + return 0; +} + +static void vmw_cursor_mob_destroy(struct vmw_bo **vbo) +{ + if (!(*vbo)) + return; + + ttm_bo_unpin(&(*vbo)->tbo); + vmw_bo_unreference(vbo); +} + +/** + * vmw_cursor_mob_unmap - Unmaps the cursor mobs. + * + * @vps: state of the cursor plane + * + * Returns 0 on success + */ + +static int +vmw_cursor_mob_unmap(struct vmw_plane_state *vps) +{ + int ret = 0; + struct vmw_bo *vbo = vps->cursor.mob; + + if (!vbo || !vbo->map.virtual) + return 0; + + ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); + if (likely(ret == 0)) { + vmw_bo_unmap(vbo); + ttm_bo_unreserve(&vbo->tbo); + } + + return ret; +} + +static void vmw_cursor_mob_put(struct vmw_cursor_plane *vcp, + struct vmw_plane_state *vps) +{ + u32 i; + + if (!vps->cursor.mob) + return; + + vmw_cursor_mob_unmap(vps); + + /* Look for a free slot to return this mob to the cache. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (!vcp->cursor_mobs[i]) { + vcp->cursor_mobs[i] = vps->cursor.mob; + vps->cursor.mob = NULL; + return; + } + } + + /* Cache is full: See if this mob is bigger than an existing mob. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (vcp->cursor_mobs[i]->tbo.base.size < + vps->cursor.mob->tbo.base.size) { + vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]); + vcp->cursor_mobs[i] = vps->cursor.mob; + vps->cursor.mob = NULL; + return; + } + } + + /* Destroy it if it's not worth caching. */ + vmw_cursor_mob_destroy(&vps->cursor.mob); +} + +static int vmw_cursor_mob_get(struct vmw_cursor_plane *vcp, + struct vmw_plane_state *vps) +{ + struct vmw_private *dev_priv = vmw_priv(vcp->base.dev); + u32 size = vmw_cursor_mob_size(vps->cursor.update_type, + vps->base.crtc_w, vps->base.crtc_h); + u32 i; + u32 cursor_max_dim, mob_max_size; + struct vmw_fence_obj *fence = NULL; + int ret; + + if (!dev_priv->has_mob || + (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0) + return -EINVAL; + + mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); + cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION); + + if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim || + vps->base.crtc_h > cursor_max_dim) + return -EINVAL; + + if (vps->cursor.mob) { + if (vps->cursor.mob->tbo.base.size >= size) + return 0; + vmw_cursor_mob_put(vcp, vps); + } + + /* Look for an unused mob in the cache. */ + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { + if (vcp->cursor_mobs[i] && + vcp->cursor_mobs[i]->tbo.base.size >= size) { + vps->cursor.mob = vcp->cursor_mobs[i]; + vcp->cursor_mobs[i] = NULL; + return 0; + } + } + /* Create a new mob if we can't find an existing one. */ + ret = vmw_bo_create_and_populate(dev_priv, size, VMW_BO_DOMAIN_MOB, + &vps->cursor.mob); + + if (ret != 0) + return ret; + + /* Fence the mob creation so we are guarateed to have the mob */ + ret = ttm_bo_reserve(&vps->cursor.mob->tbo, false, false, NULL); + if (ret != 0) + goto teardown; + + ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); + if (ret != 0) { + ttm_bo_unreserve(&vps->cursor.mob->tbo); + goto teardown; + } + + dma_fence_wait(&fence->base, false); + dma_fence_put(&fence->base); + + ttm_bo_unreserve(&vps->cursor.mob->tbo); + + return 0; + +teardown: + vmw_cursor_mob_destroy(&vps->cursor.mob); + return ret; +} + +static void vmw_cursor_update_position(struct vmw_private *dev_priv, + bool show, int x, int y) +{ + const u32 svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW + : SVGA_CURSOR_ON_HIDE; + u32 count; + + spin_lock(&dev_priv->cursor_lock); + if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) { + vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x); + vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y); + vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID); + vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on); + vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1); + } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) { + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); + count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); + } else { + vmw_write(dev_priv, SVGA_REG_CURSOR_X, x); + vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y); + vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on); + } + spin_unlock(&dev_priv->cursor_lock); +} + +void vmw_kms_cursor_snoop(struct vmw_surface *srf, + struct ttm_object_file *tfile, + struct ttm_buffer_object *bo, + SVGA3dCmdHeader *header) +{ + struct ttm_bo_kmap_obj map; + unsigned long kmap_offset; + unsigned long kmap_num; + SVGA3dCopyBox *box; + u32 box_count; + void *virtual; + bool is_iomem; + struct vmw_dma_cmd { + SVGA3dCmdHeader header; + SVGA3dCmdSurfaceDMA dma; + } *cmd; + int i, ret; + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); + const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock; + + cmd = container_of(header, struct vmw_dma_cmd, header); + + /* No snooper installed, nothing to copy */ + if (!srf->snooper.image) + return; + + if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { + DRM_ERROR("face and mipmap for cursors should never != 0\n"); + return; + } + + if (cmd->header.size < 64) { + DRM_ERROR("at least one full copy box must be given\n"); + return; + } + + box = (SVGA3dCopyBox *)&cmd[1]; + box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / + sizeof(SVGA3dCopyBox); + + if (cmd->dma.guest.ptr.offset % PAGE_SIZE || + box->x != 0 || box->y != 0 || box->z != 0 || + box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || + box->d != 1 || box_count != 1 || + box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) { + /* TODO handle none page aligned offsets */ + /* TODO handle more dst & src != 0 */ + /* TODO handle more then one copy */ + DRM_ERROR("Can't snoop dma request for cursor!\n"); + DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", + box->srcx, box->srcy, box->srcz, + box->x, box->y, box->z, + box->w, box->h, box->d, box_count, + cmd->dma.guest.ptr.offset); + return; + } + + kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; + kmap_num = (VMW_CURSOR_SNOOP_HEIGHT * image_pitch) >> PAGE_SHIFT; + + ret = ttm_bo_reserve(bo, true, false, NULL); + if (unlikely(ret != 0)) { + DRM_ERROR("reserve failed\n"); + return; + } + + ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); + if (unlikely(ret != 0)) + goto err_unreserve; + + virtual = ttm_kmap_obj_virtual(&map, &is_iomem); + + if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) { + memcpy(srf->snooper.image, virtual, + VMW_CURSOR_SNOOP_HEIGHT * image_pitch); + } else { + /* Image is unsigned pointer. */ + for (i = 0; i < box->h; i++) + memcpy(srf->snooper.image + i * image_pitch, + virtual + i * cmd->dma.guest.pitch, + box->w * desc->pitchBytesPerBlock); + } + srf->snooper.id++; + + ttm_bo_kunmap(&map); +err_unreserve: + ttm_bo_unreserve(bo); +} + +void vmw_cursor_plane_destroy(struct drm_plane *plane) +{ + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + u32 i; + + vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0); + + for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) + vmw_cursor_mob_destroy(&vcp->cursor_mobs[i]); + + drm_plane_cleanup(plane); +} + +/** + * vmw_cursor_mob_map - Maps the cursor mobs. + * + * @vps: plane_state + * + * Returns 0 on success + */ + +static int +vmw_cursor_mob_map(struct vmw_plane_state *vps) +{ + int ret; + u32 size = vmw_cursor_mob_size(vps->cursor.update_type, + vps->base.crtc_w, vps->base.crtc_h); + struct vmw_bo *vbo = vps->cursor.mob; + + if (!vbo) + return -EINVAL; + + if (vbo->tbo.base.size < size) + return -EINVAL; + + if (vbo->map.virtual) + return 0; + + ret = ttm_bo_reserve(&vbo->tbo, false, false, NULL); + if (unlikely(ret != 0)) + return -ENOMEM; + + vmw_bo_map_and_cache(vbo); + + ttm_bo_unreserve(&vbo->tbo); + + return 0; +} + +/** + * vmw_cursor_plane_cleanup_fb - Unpins the plane surface + * + * @plane: cursor plane + * @old_state: contains the state to clean up + * + * Unmaps all cursor bo mappings and unpins the cursor surface + * + * Returns 0 on success + */ +void +vmw_cursor_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); + + if (!vmw_user_object_is_null(&vps->uo)) + vmw_user_object_unmap(&vps->uo); + + vmw_cursor_mob_unmap(vps); + vmw_cursor_mob_put(vcp, vps); + + vmw_du_plane_unpin_surf(vps); + vmw_user_object_unref(&vps->uo); +} + +static bool +vmw_cursor_buffer_changed(struct vmw_plane_state *new_vps, + struct vmw_plane_state *old_vps) +{ + struct vmw_bo *new_bo = vmw_user_object_buffer(&new_vps->uo); + struct vmw_bo *old_bo = vmw_user_object_buffer(&old_vps->uo); + struct vmw_surface *surf; + bool dirty = false; + int ret; + + if (new_bo != old_bo) + return true; + + if (new_bo) { + if (!old_bo) { + return true; + } else if (new_bo->dirty) { + vmw_bo_dirty_scan(new_bo); + dirty = vmw_bo_is_dirty(new_bo); + if (dirty) { + surf = vmw_user_object_surface(&new_vps->uo); + if (surf) + vmw_bo_dirty_transfer_to_res(&surf->res); + else + vmw_bo_dirty_clear(new_bo); + } + return dirty; + } else if (new_bo != old_bo) { + /* + * Currently unused because the top exits right away. + * In most cases buffer being different will mean + * that the contents is different. For the few percent + * of cases where that's not true the cost of doing + * the memcmp on all other seems to outweight the + * benefits. Leave the conditional to be able to + * trivially validate it by removing the initial + * if (new_bo != old_bo) at the start. + */ + void *old_image; + void *new_image; + bool changed = false; + struct ww_acquire_ctx ctx; + const u32 size = new_vps->base.crtc_w * + new_vps->base.crtc_h * sizeof(u32); + + ww_acquire_init(&ctx, &reservation_ww_class); + + ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx); + if (ret != 0) { + ww_acquire_fini(&ctx); + return true; + } + + ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx); + if (ret != 0) { + ttm_bo_unreserve(&old_bo->tbo); + ww_acquire_fini(&ctx); + return true; + } + + old_image = vmw_bo_map_and_cache(old_bo); + new_image = vmw_bo_map_and_cache(new_bo); + + if (old_image && new_image && old_image != new_image) + changed = memcmp(old_image, new_image, size) != + 0; + + ttm_bo_unreserve(&new_bo->tbo); + ttm_bo_unreserve(&old_bo->tbo); + + ww_acquire_fini(&ctx); + + return changed; + } + return false; + } + + return false; +} + +static bool +vmw_cursor_plane_changed(struct vmw_plane_state *new_vps, + struct vmw_plane_state *old_vps) +{ + if (old_vps->base.crtc_w != new_vps->base.crtc_w || + old_vps->base.crtc_h != new_vps->base.crtc_h) + return true; + + if (old_vps->base.hotspot_x != new_vps->base.hotspot_x || + old_vps->base.hotspot_y != new_vps->base.hotspot_y) + return true; + + if (old_vps->cursor.legacy.hotspot_x != + new_vps->cursor.legacy.hotspot_x || + old_vps->cursor.legacy.hotspot_y != + new_vps->cursor.legacy.hotspot_y) + return true; + + if (old_vps->base.fb != new_vps->base.fb) + return true; + + return false; +} + +/** + * vmw_cursor_plane_prepare_fb - Readies the cursor by referencing it + * + * @plane: display plane + * @new_state: info on the new plane state, including the FB + * + * Returns 0 on success + */ +int vmw_cursor_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_framebuffer *fb = new_state->fb; + struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(plane->state); + struct vmw_private *vmw = vmw_priv(plane->dev); + struct vmw_bo *bo = NULL; + struct vmw_surface *surface; + int ret = 0; + + if (!vmw_user_object_is_null(&vps->uo)) { + vmw_user_object_unmap(&vps->uo); + vmw_user_object_unref(&vps->uo); + } + + if (fb) { + if (vmw_framebuffer_to_vfb(fb)->bo) { + vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer; + vps->uo.surface = NULL; + } else { + memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo)); + } + vmw_user_object_ref(&vps->uo); + } + + vps->cursor.update_type = vmw_cursor_update_type(vmw, vps); + switch (vps->cursor.update_type) { + case VMW_CURSOR_UPDATE_LEGACY: + surface = vmw_user_object_surface(&vps->uo); + if (!surface || vps->cursor.legacy.id == surface->snooper.id) + vps->cursor.update_type = VMW_CURSOR_UPDATE_NONE; + break; + case VMW_CURSOR_UPDATE_MOB: { + bo = vmw_user_object_buffer(&vps->uo); + if (bo) { + struct ttm_operation_ctx ctx = { false, false }; + + ret = ttm_bo_reserve(&bo->tbo, true, false, NULL); + if (ret != 0) + return -ENOMEM; + + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret != 0) + return -ENOMEM; + + /* + * vmw_bo_pin_reserved also validates, so to skip + * the extra validation use ttm_bo_pin directly + */ + if (!bo->tbo.pin_count) + ttm_bo_pin(&bo->tbo); + + if (vmw_framebuffer_to_vfb(fb)->bo) { + const u32 size = new_state->crtc_w * + new_state->crtc_h * + sizeof(u32); + + (void)vmw_bo_map_and_cache_size(bo, size); + } else { + vmw_bo_map_and_cache(bo); + } + ttm_bo_unreserve(&bo->tbo); + } + if (!vmw_user_object_is_null(&vps->uo)) { + if (!vmw_cursor_plane_changed(vps, old_vps) && + !vmw_cursor_buffer_changed(vps, old_vps)) { + vps->cursor.update_type = + VMW_CURSOR_UPDATE_NONE; + } else { + vmw_cursor_mob_get(vcp, vps); + vmw_cursor_mob_map(vps); + } + } + } + break; + case VMW_CURSOR_UPDATE_NONE: + /* do nothing */ + break; + } + + return 0; +} + +/** + * vmw_cursor_plane_atomic_check - check if the new state is okay + * + * @plane: cursor plane + * @state: info on the new plane state + * + * This is a chance to fail if the new cursor state does not fit + * our requirements. + * + * Returns 0 on success + */ +int vmw_cursor_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = + drm_atomic_get_new_plane_state(state, plane); + struct vmw_private *vmw = vmw_priv(plane->dev); + int ret = 0; + struct drm_crtc_state *crtc_state = NULL; + struct vmw_surface *surface = NULL; + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + enum vmw_cursor_update_type update_type; + struct drm_framebuffer *fb = new_state->fb; + + if (new_state->crtc) + crtc_state = drm_atomic_get_new_crtc_state(new_state->state, + new_state->crtc); + + ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, true, + true); + if (ret) + return ret; + + /* Turning off */ + if (!fb) + return 0; + + update_type = vmw_cursor_update_type(vmw, vps); + if (update_type == VMW_CURSOR_UPDATE_LEGACY) { + if (new_state->crtc_w != VMW_CURSOR_SNOOP_WIDTH || + new_state->crtc_h != VMW_CURSOR_SNOOP_HEIGHT) { + drm_warn(&vmw->drm, + "Invalid cursor dimensions (%d, %d)\n", + new_state->crtc_w, new_state->crtc_h); + return -EINVAL; + } + surface = vmw_user_object_surface(&vps->uo); + if (!surface || !surface->snooper.image) { + drm_warn(&vmw->drm, + "surface not suitable for cursor\n"); + return -EINVAL; + } + } + + return 0; +} + +void +vmw_cursor_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_plane_state *old_state = + drm_atomic_get_old_plane_state(state, plane); + struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; + struct vmw_private *dev_priv = vmw_priv(plane->dev); + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); + s32 hotspot_x, hotspot_y, cursor_x, cursor_y; + + /* + * Hide the cursor if the new bo is null + */ + if (vmw_user_object_is_null(&vps->uo)) { + vmw_cursor_update_position(dev_priv, false, 0, 0); + return; + } + + switch (vps->cursor.update_type) { + case VMW_CURSOR_UPDATE_LEGACY: + vmw_cursor_plane_update_legacy(dev_priv, vps); + break; + case VMW_CURSOR_UPDATE_MOB: + vmw_cursor_update_mob(dev_priv, vps); + break; + case VMW_CURSOR_UPDATE_NONE: + /* do nothing */ + break; + } + + /* + * For all update types update the cursor position + */ + cursor_x = new_state->crtc_x + du->set_gui_x; + cursor_y = new_state->crtc_y + du->set_gui_y; + + hotspot_x = vps->cursor.legacy.hotspot_x + new_state->hotspot_x; + hotspot_y = vps->cursor.legacy.hotspot_y + new_state->hotspot_y; + + vmw_cursor_update_position(dev_priv, true, cursor_x + hotspot_x, + cursor_y + hotspot_y); +} + +int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_cursor_bypass_arg *arg = data; + struct vmw_display_unit *du; + struct vmw_plane_state *vps; + struct drm_crtc *crtc; + int ret = 0; + + mutex_lock(&dev->mode_config.mutex); + if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + du = vmw_crtc_to_du(crtc); + vps = vmw_plane_state_to_vps(du->cursor.base.state); + vps->cursor.legacy.hotspot_x = arg->xhot; + vps->cursor.legacy.hotspot_y = arg->yhot; + } + + mutex_unlock(&dev->mode_config.mutex); + return 0; + } + + crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); + if (!crtc) { + ret = -ENOENT; + goto out; + } + + du = vmw_crtc_to_du(crtc); + vps = vmw_plane_state_to_vps(du->cursor.base.state); + vps->cursor.legacy.hotspot_x = arg->xhot; + vps->cursor.legacy.hotspot_y = arg->yhot; + +out: + mutex_unlock(&dev->mode_config.mutex); + + return ret; +} + +void *vmw_cursor_snooper_create(struct drm_file *file_priv, + struct vmw_surface_metadata *metadata) +{ + if (!file_priv->atomic && metadata->scanout && + metadata->num_sizes == 1 && + metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && + metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT && + metadata->format == VMW_CURSOR_SNOOP_FORMAT) { + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); + const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH * + VMW_CURSOR_SNOOP_HEIGHT * + desc->pitchBytesPerBlock; + void *image = kzalloc(cursor_size_bytes, GFP_KERNEL); + + if (!image) { + DRM_ERROR("Failed to allocate cursor_image\n"); + return ERR_PTR(-ENOMEM); + } + return image; + } + return NULL; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h new file mode 100644 index 000000000000..40694925a70e --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cursor_plane.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright (c) 2024-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + **************************************************************************/ + +#ifndef VMWGFX_CURSOR_PLANE_H +#define VMWGFX_CURSOR_PLANE_H + +#include "device_include/svga3d_cmd.h" +#include "drm/drm_file.h" +#include "drm/drm_fourcc.h" +#include "drm/drm_plane.h" + +#include + +struct SVGA3dCmdHeader; +struct ttm_buffer_object; +struct vmw_bo; +struct vmw_cursor; +struct vmw_private; +struct vmw_surface; +struct vmw_user_object; + +#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base) + +static const u32 __maybe_unused vmw_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +enum vmw_cursor_update_type { + VMW_CURSOR_UPDATE_NONE = 0, + VMW_CURSOR_UPDATE_LEGACY, + VMW_CURSOR_UPDATE_MOB, +}; + +struct vmw_cursor_plane_state { + enum vmw_cursor_update_type update_type; + bool changed; + bool surface_changed; + struct vmw_bo *mob; + struct { + s32 hotspot_x; + s32 hotspot_y; + u32 id; + } legacy; +}; + +/** + * Derived class for cursor plane object + * + * @base DRM plane object + * @cursor.cursor_mobs Cursor mobs available for re-use + */ +struct vmw_cursor_plane { + struct drm_plane base; + + struct vmw_bo *cursor_mobs[3]; +}; + +struct vmw_surface_metadata; +void *vmw_cursor_snooper_create(struct drm_file *file_priv, + struct vmw_surface_metadata *metadata); +void vmw_cursor_cmd_dma_snoop(SVGA3dCmdHeader *header, + struct vmw_surface *srf, + struct ttm_buffer_object *bo); + +void vmw_cursor_plane_destroy(struct drm_plane *plane); + +int vmw_cursor_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state); +void vmw_cursor_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state); +int vmw_cursor_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state); +void vmw_cursor_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state); + +#endif /* VMWGFX_CURSOR_H */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 0f32471c8533..0695a342b1ef 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1,31 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ - #include "vmwgfx_drv.h" #include "vmwgfx_bo.h" @@ -1324,9 +1304,6 @@ static void vmw_master_set(struct drm_device *dev, static void vmw_master_drop(struct drm_device *dev, struct drm_file *file_priv) { - struct vmw_private *dev_priv = vmw_priv(dev); - - vmw_kms_legacy_hotspot_clear(dev_priv); } bool vmwgfx_supported(struct vmw_private *vmw) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5275ef632d4b..6fc810632c98 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -1,29 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ #ifndef _VMWGFX_DRV_H_ @@ -100,10 +80,6 @@ #define VMW_RES_SHADER ttm_driver_type4 #define VMW_RES_HT_ORDER 12 -#define VMW_CURSOR_SNOOP_FORMAT SVGA3D_A8R8G8B8 -#define VMW_CURSOR_SNOOP_WIDTH 64 -#define VMW_CURSOR_SNOOP_HEIGHT 64 - #define MKSSTAT_CAPACITY_LOG2 5U #define MKSSTAT_CAPACITY (1U << MKSSTAT_CAPACITY_LOG2) @@ -201,7 +177,7 @@ enum vmw_cmdbuf_res_type { struct vmw_cmdbuf_res_manager; struct vmw_cursor_snooper { - size_t age; + size_t id; uint32_t *image; }; @@ -1050,7 +1026,6 @@ int vmw_kms_init(struct vmw_private *dev_priv); int vmw_kms_close(struct vmw_private *dev_priv); int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv); void vmw_kms_cursor_snoop(struct vmw_surface *srf, struct ttm_object_file *tfile, struct ttm_buffer_object *bo, @@ -1067,7 +1042,6 @@ int vmw_kms_present(struct vmw_private *dev_priv, uint32_t num_clips); int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv); int vmw_kms_suspend(struct drm_device *dev); int vmw_kms_resume(struct drm_device *dev); void vmw_kms_lost_device(struct drm_device *dev); @@ -1393,8 +1367,10 @@ int vmw_mksstat_remove_all(struct vmw_private *dev_priv); DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__) /* Resource dirtying - vmwgfx_page_dirty.c */ +bool vmw_bo_is_dirty(struct vmw_bo *vbo); void vmw_bo_dirty_scan(struct vmw_bo *vbo); int vmw_bo_dirty_add(struct vmw_bo *vbo); +void vmw_bo_dirty_clear(struct vmw_bo *vbo); void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res); void vmw_bo_dirty_clear_res(struct vmw_resource *res); void vmw_bo_dirty_release(struct vmw_bo *vbo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 2e52d73eba48..f8325905388a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1,29 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ + #include "vmwgfx_binding.h" #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" @@ -4512,8 +4494,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out; - vmw_kms_cursor_post_execbuf(dev_priv); - out: if (in_fence) dma_fence_put(in_fence); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 1912ac1cde6d..05b1c54a070c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1,33 +1,15 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ + #include "vmwgfx_kms.h" #include "vmwgfx_bo.h" +#include "vmwgfx_resource_priv.h" #include "vmwgfx_vkms.h" #include "vmw_surface_cache.h" @@ -59,474 +41,6 @@ void vmw_du_cleanup(struct vmw_display_unit *du) drm_connector_cleanup(&du->connector); } -/* - * Display Unit Cursor functions - */ - -static int vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps); -static void vmw_cursor_update_mob(struct vmw_private *dev_priv, - struct vmw_plane_state *vps, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY); - -struct vmw_svga_fifo_cmd_define_cursor { - u32 cmd; - SVGAFifoCmdDefineAlphaCursor cursor; -}; - -/** - * vmw_send_define_cursor_cmd - queue a define cursor command - * @dev_priv: the private driver struct - * @image: buffer which holds the cursor image - * @width: width of the mouse cursor image - * @height: height of the mouse cursor image - * @hotspotX: the horizontal position of mouse hotspot - * @hotspotY: the vertical position of mouse hotspot - */ -static void vmw_send_define_cursor_cmd(struct vmw_private *dev_priv, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - struct vmw_svga_fifo_cmd_define_cursor *cmd; - const u32 image_size = width * height * sizeof(*image); - const u32 cmd_size = sizeof(*cmd) + image_size; - - /* Try to reserve fifocmd space and swallow any failures; - such reservations cannot be left unconsumed for long - under the risk of clogging other fifocmd users, so - we treat reservations separtely from the way we treat - other fallible KMS-atomic resources at prepare_fb */ - cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); - - if (unlikely(!cmd)) - return; - - memset(cmd, 0, sizeof(*cmd)); - - memcpy(&cmd[1], image, image_size); - - cmd->cmd = SVGA_CMD_DEFINE_ALPHA_CURSOR; - cmd->cursor.id = 0; - cmd->cursor.width = width; - cmd->cursor.height = height; - cmd->cursor.hotspotX = hotspotX; - cmd->cursor.hotspotY = hotspotY; - - vmw_cmd_commit_flush(dev_priv, cmd_size); -} - -/** - * vmw_cursor_update_image - update the cursor image on the provided plane - * @dev_priv: the private driver struct - * @vps: the plane state of the cursor plane - * @image: buffer which holds the cursor image - * @width: width of the mouse cursor image - * @height: height of the mouse cursor image - * @hotspotX: the horizontal position of mouse hotspot - * @hotspotY: the vertical position of mouse hotspot - */ -static void vmw_cursor_update_image(struct vmw_private *dev_priv, - struct vmw_plane_state *vps, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - if (vps->cursor.bo) - vmw_cursor_update_mob(dev_priv, vps, image, - vps->base.crtc_w, vps->base.crtc_h, - hotspotX, hotspotY); - - else - vmw_send_define_cursor_cmd(dev_priv, image, width, height, - hotspotX, hotspotY); -} - - -/** - * vmw_cursor_update_mob - Update cursor vis CursorMob mechanism - * - * Called from inside vmw_du_cursor_plane_atomic_update to actually - * make the cursor-image live. - * - * @dev_priv: device to work with - * @vps: the plane state of the cursor plane - * @image: cursor source data to fill the MOB with - * @width: source data width - * @height: source data height - * @hotspotX: cursor hotspot x - * @hotspotY: cursor hotspot Y - */ -static void vmw_cursor_update_mob(struct vmw_private *dev_priv, - struct vmw_plane_state *vps, - u32 *image, u32 width, u32 height, - u32 hotspotX, u32 hotspotY) -{ - SVGAGBCursorHeader *header; - SVGAGBAlphaCursorHeader *alpha_header; - const u32 image_size = width * height * sizeof(*image); - - header = vmw_bo_map_and_cache(vps->cursor.bo); - alpha_header = &header->header.alphaHeader; - - memset(header, 0, sizeof(*header)); - - header->type = SVGA_ALPHA_CURSOR; - header->sizeInBytes = image_size; - - alpha_header->hotspotX = hotspotX; - alpha_header->hotspotY = hotspotY; - alpha_header->width = width; - alpha_header->height = height; - - memcpy(header + 1, image, image_size); - vmw_write(dev_priv, SVGA_REG_CURSOR_MOBID, - vps->cursor.bo->tbo.resource->start); -} - - -static u32 vmw_du_cursor_mob_size(u32 w, u32 h) -{ - return w * h * sizeof(u32) + sizeof(SVGAGBCursorHeader); -} - -/** - * vmw_du_cursor_plane_acquire_image -- Acquire the image data - * @vps: cursor plane state - */ -static u32 *vmw_du_cursor_plane_acquire_image(struct vmw_plane_state *vps) -{ - struct vmw_surface *surf; - - if (vmw_user_object_is_null(&vps->uo)) - return NULL; - - surf = vmw_user_object_surface(&vps->uo); - if (surf && !vmw_user_object_is_mapped(&vps->uo)) - return surf->snooper.image; - - return vmw_user_object_map(&vps->uo); -} - -static bool vmw_du_cursor_plane_has_changed(struct vmw_plane_state *old_vps, - struct vmw_plane_state *new_vps) -{ - void *old_image; - void *new_image; - u32 size; - bool changed; - - if (old_vps->base.crtc_w != new_vps->base.crtc_w || - old_vps->base.crtc_h != new_vps->base.crtc_h) - return true; - - if (old_vps->cursor.hotspot_x != new_vps->cursor.hotspot_x || - old_vps->cursor.hotspot_y != new_vps->cursor.hotspot_y) - return true; - - size = new_vps->base.crtc_w * new_vps->base.crtc_h * sizeof(u32); - - old_image = vmw_du_cursor_plane_acquire_image(old_vps); - new_image = vmw_du_cursor_plane_acquire_image(new_vps); - - changed = false; - if (old_image && new_image && old_image != new_image) - changed = memcmp(old_image, new_image, size) != 0; - - return changed; -} - -static void vmw_du_destroy_cursor_mob(struct vmw_bo **vbo) -{ - if (!(*vbo)) - return; - - ttm_bo_unpin(&(*vbo)->tbo); - vmw_bo_unreference(vbo); -} - -static void vmw_du_put_cursor_mob(struct vmw_cursor_plane *vcp, - struct vmw_plane_state *vps) -{ - u32 i; - - if (!vps->cursor.bo) - return; - - vmw_du_cursor_plane_unmap_cm(vps); - - /* Look for a free slot to return this mob to the cache. */ - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { - if (!vcp->cursor_mobs[i]) { - vcp->cursor_mobs[i] = vps->cursor.bo; - vps->cursor.bo = NULL; - return; - } - } - - /* Cache is full: See if this mob is bigger than an existing mob. */ - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { - if (vcp->cursor_mobs[i]->tbo.base.size < - vps->cursor.bo->tbo.base.size) { - vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); - vcp->cursor_mobs[i] = vps->cursor.bo; - vps->cursor.bo = NULL; - return; - } - } - - /* Destroy it if it's not worth caching. */ - vmw_du_destroy_cursor_mob(&vps->cursor.bo); -} - -static int vmw_du_get_cursor_mob(struct vmw_cursor_plane *vcp, - struct vmw_plane_state *vps) -{ - struct vmw_private *dev_priv = vmw_priv(vcp->base.dev); - u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); - u32 i; - u32 cursor_max_dim, mob_max_size; - struct vmw_fence_obj *fence = NULL; - int ret; - - if (!dev_priv->has_mob || - (dev_priv->capabilities2 & SVGA_CAP2_CURSOR_MOB) == 0) - return -EINVAL; - - mob_max_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); - cursor_max_dim = vmw_read(dev_priv, SVGA_REG_CURSOR_MAX_DIMENSION); - - if (size > mob_max_size || vps->base.crtc_w > cursor_max_dim || - vps->base.crtc_h > cursor_max_dim) - return -EINVAL; - - if (vps->cursor.bo) { - if (vps->cursor.bo->tbo.base.size >= size) - return 0; - vmw_du_put_cursor_mob(vcp, vps); - } - - /* Look for an unused mob in the cache. */ - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) { - if (vcp->cursor_mobs[i] && - vcp->cursor_mobs[i]->tbo.base.size >= size) { - vps->cursor.bo = vcp->cursor_mobs[i]; - vcp->cursor_mobs[i] = NULL; - return 0; - } - } - /* Create a new mob if we can't find an existing one. */ - ret = vmw_bo_create_and_populate(dev_priv, size, - VMW_BO_DOMAIN_MOB, - &vps->cursor.bo); - - if (ret != 0) - return ret; - - /* Fence the mob creation so we are guarateed to have the mob */ - ret = ttm_bo_reserve(&vps->cursor.bo->tbo, false, false, NULL); - if (ret != 0) - goto teardown; - - ret = vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); - if (ret != 0) { - ttm_bo_unreserve(&vps->cursor.bo->tbo); - goto teardown; - } - - dma_fence_wait(&fence->base, false); - dma_fence_put(&fence->base); - - ttm_bo_unreserve(&vps->cursor.bo->tbo); - return 0; - -teardown: - vmw_du_destroy_cursor_mob(&vps->cursor.bo); - return ret; -} - - -static void vmw_cursor_update_position(struct vmw_private *dev_priv, - bool show, int x, int y) -{ - const uint32_t svga_cursor_on = show ? SVGA_CURSOR_ON_SHOW - : SVGA_CURSOR_ON_HIDE; - uint32_t count; - - spin_lock(&dev_priv->cursor_lock); - if (dev_priv->capabilities2 & SVGA_CAP2_EXTRA_REGS) { - vmw_write(dev_priv, SVGA_REG_CURSOR4_X, x); - vmw_write(dev_priv, SVGA_REG_CURSOR4_Y, y); - vmw_write(dev_priv, SVGA_REG_CURSOR4_SCREEN_ID, SVGA3D_INVALID_ID); - vmw_write(dev_priv, SVGA_REG_CURSOR4_ON, svga_cursor_on); - vmw_write(dev_priv, SVGA_REG_CURSOR4_SUBMIT, 1); - } else if (vmw_is_cursor_bypass3_enabled(dev_priv)) { - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, svga_cursor_on); - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); - count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); - } else { - vmw_write(dev_priv, SVGA_REG_CURSOR_X, x); - vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y); - vmw_write(dev_priv, SVGA_REG_CURSOR_ON, svga_cursor_on); - } - spin_unlock(&dev_priv->cursor_lock); -} - -void vmw_kms_cursor_snoop(struct vmw_surface *srf, - struct ttm_object_file *tfile, - struct ttm_buffer_object *bo, - SVGA3dCmdHeader *header) -{ - struct ttm_bo_kmap_obj map; - unsigned long kmap_offset; - unsigned long kmap_num; - SVGA3dCopyBox *box; - unsigned box_count; - void *virtual; - bool is_iomem; - struct vmw_dma_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA dma; - } *cmd; - int i, ret; - const struct SVGA3dSurfaceDesc *desc = - vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); - const u32 image_pitch = VMW_CURSOR_SNOOP_WIDTH * desc->pitchBytesPerBlock; - - cmd = container_of(header, struct vmw_dma_cmd, header); - - /* No snooper installed, nothing to copy */ - if (!srf->snooper.image) - return; - - if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) { - DRM_ERROR("face and mipmap for cursors should never != 0\n"); - return; - } - - if (cmd->header.size < 64) { - DRM_ERROR("at least one full copy box must be given\n"); - return; - } - - box = (SVGA3dCopyBox *)&cmd[1]; - box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) / - sizeof(SVGA3dCopyBox); - - if (cmd->dma.guest.ptr.offset % PAGE_SIZE || - box->x != 0 || box->y != 0 || box->z != 0 || - box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || - box->d != 1 || box_count != 1 || - box->w > VMW_CURSOR_SNOOP_WIDTH || box->h > VMW_CURSOR_SNOOP_HEIGHT) { - /* TODO handle none page aligned offsets */ - /* TODO handle more dst & src != 0 */ - /* TODO handle more then one copy */ - DRM_ERROR("Can't snoop dma request for cursor!\n"); - DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n", - box->srcx, box->srcy, box->srcz, - box->x, box->y, box->z, - box->w, box->h, box->d, box_count, - cmd->dma.guest.ptr.offset); - return; - } - - kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT; - kmap_num = (VMW_CURSOR_SNOOP_HEIGHT*image_pitch) >> PAGE_SHIFT; - - ret = ttm_bo_reserve(bo, true, false, NULL); - if (unlikely(ret != 0)) { - DRM_ERROR("reserve failed\n"); - return; - } - - ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); - if (unlikely(ret != 0)) - goto err_unreserve; - - virtual = ttm_kmap_obj_virtual(&map, &is_iomem); - - if (box->w == VMW_CURSOR_SNOOP_WIDTH && cmd->dma.guest.pitch == image_pitch) { - memcpy(srf->snooper.image, virtual, - VMW_CURSOR_SNOOP_HEIGHT*image_pitch); - } else { - /* Image is unsigned pointer. */ - for (i = 0; i < box->h; i++) - memcpy(srf->snooper.image + i * image_pitch, - virtual + i * cmd->dma.guest.pitch, - box->w * desc->pitchBytesPerBlock); - } - - srf->snooper.age++; - - ttm_bo_kunmap(&map); -err_unreserve: - ttm_bo_unreserve(bo); -} - -/** - * vmw_kms_legacy_hotspot_clear - Clear legacy hotspots - * - * @dev_priv: Pointer to the device private struct. - * - * Clears all legacy hotspots. - */ -void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->drm; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - - drm_modeset_lock_all(dev); - drm_for_each_crtc(crtc, dev) { - du = vmw_crtc_to_du(crtc); - - du->hotspot_x = 0; - du->hotspot_y = 0; - } - drm_modeset_unlock_all(dev); -} - -void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) -{ - struct drm_device *dev = &dev_priv->drm; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - - mutex_lock(&dev->mode_config.mutex); - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - du = vmw_crtc_to_du(crtc); - if (!du->cursor_surface || - du->cursor_age == du->cursor_surface->snooper.age || - !du->cursor_surface->snooper.image) - continue; - - du->cursor_age = du->cursor_surface->snooper.age; - vmw_send_define_cursor_cmd(dev_priv, - du->cursor_surface->snooper.image, - VMW_CURSOR_SNOOP_WIDTH, - VMW_CURSOR_SNOOP_HEIGHT, - du->hotspot_x + du->core_hotspot_x, - du->hotspot_y + du->core_hotspot_y); - } - - mutex_unlock(&dev->mode_config.mutex); -} - - -void vmw_du_cursor_plane_destroy(struct drm_plane *plane) -{ - struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); - u32 i; - - vmw_cursor_update_position(vmw_priv(plane->dev), false, 0, 0); - - for (i = 0; i < ARRAY_SIZE(vcp->cursor_mobs); i++) - vmw_du_destroy_cursor_mob(&vcp->cursor_mobs[i]); - - drm_plane_cleanup(plane); -} - void vmw_du_primary_plane_destroy(struct drm_plane *plane) { @@ -574,262 +88,6 @@ vmw_du_plane_cleanup_fb(struct drm_plane *plane, } -/** - * vmw_du_cursor_plane_map_cm - Maps the cursor mobs. - * - * @vps: plane_state - * - * Returns 0 on success - */ - -static int -vmw_du_cursor_plane_map_cm(struct vmw_plane_state *vps) -{ - int ret; - u32 size = vmw_du_cursor_mob_size(vps->base.crtc_w, vps->base.crtc_h); - struct ttm_buffer_object *bo; - - if (!vps->cursor.bo) - return -EINVAL; - - bo = &vps->cursor.bo->tbo; - - if (bo->base.size < size) - return -EINVAL; - - if (vps->cursor.bo->map.virtual) - return 0; - - ret = ttm_bo_reserve(bo, false, false, NULL); - if (unlikely(ret != 0)) - return -ENOMEM; - - vmw_bo_map_and_cache(vps->cursor.bo); - - ttm_bo_unreserve(bo); - - if (unlikely(ret != 0)) - return -ENOMEM; - - return 0; -} - - -/** - * vmw_du_cursor_plane_unmap_cm - Unmaps the cursor mobs. - * - * @vps: state of the cursor plane - * - * Returns 0 on success - */ - -static int -vmw_du_cursor_plane_unmap_cm(struct vmw_plane_state *vps) -{ - int ret = 0; - struct vmw_bo *vbo = vps->cursor.bo; - - if (!vbo || !vbo->map.virtual) - return 0; - - ret = ttm_bo_reserve(&vbo->tbo, true, false, NULL); - if (likely(ret == 0)) { - vmw_bo_unmap(vbo); - ttm_bo_unreserve(&vbo->tbo); - } - - return ret; -} - - -/** - * vmw_du_cursor_plane_cleanup_fb - Unpins the plane surface - * - * @plane: cursor plane - * @old_state: contains the state to clean up - * - * Unmaps all cursor bo mappings and unpins the cursor surface - * - * Returns 0 on success - */ -void -vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); - - if (!vmw_user_object_is_null(&vps->uo)) - vmw_user_object_unmap(&vps->uo); - - vmw_du_cursor_plane_unmap_cm(vps); - vmw_du_put_cursor_mob(vcp, vps); - - vmw_du_plane_unpin_surf(vps); - vmw_user_object_unref(&vps->uo); -} - - -/** - * vmw_du_cursor_plane_prepare_fb - Readies the cursor by referencing it - * - * @plane: display plane - * @new_state: info on the new plane state, including the FB - * - * Returns 0 on success - */ -int -vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct drm_framebuffer *fb = new_state->fb; - struct vmw_cursor_plane *vcp = vmw_plane_to_vcp(plane); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); - struct vmw_bo *bo = NULL; - int ret = 0; - - if (!vmw_user_object_is_null(&vps->uo)) { - vmw_user_object_unmap(&vps->uo); - vmw_user_object_unref(&vps->uo); - } - - if (fb) { - if (vmw_framebuffer_to_vfb(fb)->bo) { - vps->uo.buffer = vmw_framebuffer_to_vfbd(fb)->buffer; - vps->uo.surface = NULL; - } else { - memcpy(&vps->uo, &vmw_framebuffer_to_vfbs(fb)->uo, sizeof(vps->uo)); - } - vmw_user_object_ref(&vps->uo); - } - - bo = vmw_user_object_buffer(&vps->uo); - if (bo) { - struct ttm_operation_ctx ctx = {false, false}; - - ret = ttm_bo_reserve(&bo->tbo, true, false, NULL); - if (ret != 0) - return -ENOMEM; - - ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (ret != 0) - return -ENOMEM; - - vmw_bo_pin_reserved(bo, true); - if (vmw_framebuffer_to_vfb(fb)->bo) { - const u32 size = new_state->crtc_w * new_state->crtc_h * sizeof(u32); - - (void)vmw_bo_map_and_cache_size(bo, size); - } else { - vmw_bo_map_and_cache(bo); - } - ttm_bo_unreserve(&bo->tbo); - } - - if (!vmw_user_object_is_null(&vps->uo)) { - vmw_du_get_cursor_mob(vcp, vps); - vmw_du_cursor_plane_map_cm(vps); - } - - return 0; -} - - -void -vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, - plane); - struct drm_crtc *crtc = new_state->crtc ?: old_state->crtc; - struct vmw_private *dev_priv = vmw_priv(crtc->dev); - struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); - struct vmw_plane_state *old_vps = vmw_plane_state_to_vps(old_state); - struct vmw_bo *old_bo = NULL; - struct vmw_bo *new_bo = NULL; - struct ww_acquire_ctx ctx; - s32 hotspot_x, hotspot_y; - int ret; - - hotspot_x = du->hotspot_x + new_state->hotspot_x; - hotspot_y = du->hotspot_y + new_state->hotspot_y; - - du->cursor_surface = vmw_user_object_surface(&vps->uo); - - if (vmw_user_object_is_null(&vps->uo)) { - vmw_cursor_update_position(dev_priv, false, 0, 0); - return; - } - - vps->cursor.hotspot_x = hotspot_x; - vps->cursor.hotspot_y = hotspot_y; - - if (du->cursor_surface) - du->cursor_age = du->cursor_surface->snooper.age; - - ww_acquire_init(&ctx, &reservation_ww_class); - - if (!vmw_user_object_is_null(&old_vps->uo)) { - old_bo = vmw_user_object_buffer(&old_vps->uo); - ret = ttm_bo_reserve(&old_bo->tbo, false, false, &ctx); - if (ret != 0) - return; - } - - if (!vmw_user_object_is_null(&vps->uo)) { - new_bo = vmw_user_object_buffer(&vps->uo); - if (old_bo != new_bo) { - ret = ttm_bo_reserve(&new_bo->tbo, false, false, &ctx); - if (ret != 0) { - if (old_bo) { - ttm_bo_unreserve(&old_bo->tbo); - ww_acquire_fini(&ctx); - } - return; - } - } else { - new_bo = NULL; - } - } - if (!vmw_du_cursor_plane_has_changed(old_vps, vps)) { - /* - * If it hasn't changed, avoid making the device do extra - * work by keeping the old cursor active. - */ - struct vmw_cursor_plane_state tmp = old_vps->cursor; - old_vps->cursor = vps->cursor; - vps->cursor = tmp; - } else { - void *image = vmw_du_cursor_plane_acquire_image(vps); - if (image) - vmw_cursor_update_image(dev_priv, vps, image, - new_state->crtc_w, - new_state->crtc_h, - hotspot_x, hotspot_y); - } - - if (new_bo) - ttm_bo_unreserve(&new_bo->tbo); - if (old_bo) - ttm_bo_unreserve(&old_bo->tbo); - - ww_acquire_fini(&ctx); - - du->cursor_x = new_state->crtc_x + du->set_gui_x; - du->cursor_y = new_state->crtc_y + du->set_gui_y; - - vmw_cursor_update_position(dev_priv, true, - du->cursor_x + hotspot_x, - du->cursor_y + hotspot_y); - - du->core_hotspot_x = hotspot_x - du->hotspot_x; - du->core_hotspot_y = hotspot_y - du->hotspot_y; -} - - /** * vmw_du_primary_plane_atomic_check - check if the new state is okay * @@ -873,66 +131,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, return ret; } - -/** - * vmw_du_cursor_plane_atomic_check - check if the new state is okay - * - * @plane: cursor plane - * @state: info on the new plane state - * - * This is a chance to fail if the new cursor state does not fit - * our requirements. - * - * Returns 0 on success - */ -int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state) -{ - struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, - plane); - int ret = 0; - struct drm_crtc_state *crtc_state = NULL; - struct vmw_surface *surface = NULL; - struct drm_framebuffer *fb = new_state->fb; - - if (new_state->crtc) - crtc_state = drm_atomic_get_new_crtc_state(new_state->state, - new_state->crtc); - - ret = drm_atomic_helper_check_plane_state(new_state, crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - true, true); - if (ret) - return ret; - - /* Turning off */ - if (!fb) - return 0; - - /* A lot of the code assumes this */ - if (new_state->crtc_w != 64 || new_state->crtc_h != 64) { - DRM_ERROR("Invalid cursor dimensions (%d, %d)\n", - new_state->crtc_w, new_state->crtc_h); - return -EINVAL; - } - - if (!vmw_framebuffer_to_vfb(fb)->bo) { - surface = vmw_user_object_surface(&vmw_framebuffer_to_vfbs(fb)->uo); - - WARN_ON(!surface); - - if (!surface || - (!surface->snooper.image && !surface->res.guest_memory_bo)) { - DRM_ERROR("surface not suitable for cursor\n"); - return -EINVAL; - } - } - - return 0; -} - - int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { @@ -1076,7 +274,7 @@ vmw_du_plane_duplicate_state(struct drm_plane *plane) vps->pinned = 0; vps->cpp = 0; - memset(&vps->cursor, 0, sizeof(vps->cursor)); + vps->cursor.mob = NULL; /* Each ref counted resource needs to be acquired again */ vmw_user_object_ref(&vps->uo); @@ -1221,7 +419,20 @@ static void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer) { struct vmw_framebuffer_surface *vfbs = vmw_framebuffer_to_vfbs(framebuffer); + struct vmw_bo *bo = vmw_user_object_buffer(&vfbs->uo); + struct vmw_surface *surf = vmw_user_object_surface(&vfbs->uo); + if (bo) { + vmw_bo_dirty_release(bo); + /* + * bo->dirty is reference counted so it being NULL + * means that the surface wasn't coherent to begin + * with and so we have to free the dirty tracker + * in the vmw_resource + */ + if (!bo->dirty && surf && surf->res.dirty) + surf->res.func->dirty_free(&surf->res); + } drm_framebuffer_cleanup(framebuffer); vmw_user_object_unref(&vfbs->uo); @@ -1375,6 +586,7 @@ static void vmw_framebuffer_bo_destroy(struct drm_framebuffer *framebuffer) struct vmw_framebuffer_bo *vfbd = vmw_framebuffer_to_vfbd(framebuffer); + vmw_bo_dirty_release(vfbd->buffer); drm_framebuffer_cleanup(framebuffer); vmw_bo_unreference(&vfbd->buffer); @@ -1505,6 +717,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_framebuffer *vfb = NULL; struct vmw_user_object uo = {0}; + struct vmw_bo *bo; + struct vmw_surface *surface; int ret; /* returns either a bo or surface */ @@ -1534,6 +748,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, } err_out: + bo = vmw_user_object_buffer(&uo); + surface = vmw_user_object_surface(&uo); /* vmw_user_object_lookup takes one ref so does new_fb */ vmw_user_object_unref(&uo); @@ -1542,6 +758,14 @@ err_out: return ERR_PTR(ret); } + ttm_bo_reserve(&bo->tbo, false, false, NULL); + ret = vmw_bo_dirty_add(bo); + if (!ret && surface && surface->res.func->dirty_alloc) { + surface->res.coherent = true; + ret = surface->res.func->dirty_alloc(&surface->res); + } + ttm_bo_unreserve(&bo->tbo); + return &vfb->base; } @@ -1974,44 +1198,6 @@ int vmw_kms_close(struct vmw_private *dev_priv) return ret; } -int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_vmw_cursor_bypass_arg *arg = data; - struct vmw_display_unit *du; - struct drm_crtc *crtc; - int ret = 0; - - mutex_lock(&dev->mode_config.mutex); - if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) { - - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - du = vmw_crtc_to_du(crtc); - du->hotspot_x = arg->xhot; - du->hotspot_y = arg->yhot; - } - - mutex_unlock(&dev->mode_config.mutex); - return 0; - } - - crtc = drm_crtc_find(dev, file_priv, arg->crtc_id); - if (!crtc) { - ret = -ENOENT; - goto out; - } - - du = vmw_crtc_to_du(crtc); - - du->hotspot_x = arg->xhot; - du->hotspot_y = arg->yhot; - -out: - mutex_unlock(&dev->mode_config.mutex); - - return ret; -} - int vmw_kms_write_svga(struct vmw_private *vmw_priv, unsigned width, unsigned height, unsigned pitch, unsigned bpp, unsigned depth) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 4eab581883e2..511e29cdb987 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -1,40 +1,21 @@ /* SPDX-License-Identifier: GPL-2.0 OR MIT */ /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ #ifndef VMWGFX_KMS_H_ #define VMWGFX_KMS_H_ +#include "vmwgfx_cursor_plane.h" +#include "vmwgfx_drv.h" + #include #include #include -#include "vmwgfx_drv.h" - /** * struct vmw_du_update_plane - Closure structure for vmw_du_helper_plane_update * @plane: Plane which is being updated. @@ -235,16 +216,11 @@ static const uint32_t __maybe_unused vmw_primary_plane_formats[] = { DRM_FORMAT_XRGB1555, }; -static const uint32_t __maybe_unused vmw_cursor_plane_formats[] = { - DRM_FORMAT_ARGB8888, -}; - #define vmw_crtc_state_to_vcs(x) container_of(x, struct vmw_crtc_state, base) #define vmw_plane_state_to_vps(x) container_of(x, struct vmw_plane_state, base) #define vmw_connector_state_to_vcs(x) \ container_of(x, struct vmw_connector_state, base) -#define vmw_plane_to_vcp(x) container_of(x, struct vmw_cursor_plane, base) /** * Derived class for crtc state object @@ -255,11 +231,6 @@ struct vmw_crtc_state { struct drm_crtc_state base; }; -struct vmw_cursor_plane_state { - struct vmw_bo *bo; - s32 hotspot_x; - s32 hotspot_y; -}; /** * Derived class for plane state object @@ -283,7 +254,6 @@ struct vmw_plane_state { /* For CPU Blit */ unsigned int cpp; - bool surf_mapped; struct vmw_cursor_plane_state cursor; }; @@ -317,17 +287,6 @@ struct vmw_connector_state { int gui_y; }; -/** - * Derived class for cursor plane object - * - * @base DRM plane object - * @cursor.cursor_mobs Cursor mobs available for re-use - */ -struct vmw_cursor_plane { - struct drm_plane base; - - struct vmw_bo *cursor_mobs[3]; -}; /** * Base class display unit. @@ -343,17 +302,6 @@ struct vmw_display_unit { struct drm_plane primary; struct vmw_cursor_plane cursor; - struct vmw_surface *cursor_surface; - size_t cursor_age; - - int cursor_x; - int cursor_y; - - int hotspot_x; - int hotspot_y; - s32 core_hotspot_x; - s32 core_hotspot_y; - unsigned unit; /* @@ -403,8 +351,6 @@ struct vmw_display_unit { */ void vmw_du_init(struct vmw_display_unit *du); void vmw_du_cleanup(struct vmw_display_unit *du); -void vmw_du_crtc_save(struct drm_crtc *crtc); -void vmw_du_crtc_restore(struct drm_crtc *crtc); int vmw_du_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t size, @@ -460,19 +406,10 @@ void vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv); /* Universal Plane Helpers */ void vmw_du_primary_plane_destroy(struct drm_plane *plane); -void vmw_du_cursor_plane_destroy(struct drm_plane *plane); /* Atomic Helpers */ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state); -int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane, - struct drm_atomic_state *state); -void vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, - struct drm_atomic_state *state); -int vmw_du_cursor_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state); -void vmw_du_cursor_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state); void vmw_du_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state); void vmw_du_plane_reset(struct drm_plane *plane); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index f0b429525467..c23c9195f0dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -372,7 +372,7 @@ static const struct drm_plane_funcs vmw_ldu_plane_funcs = { static const struct drm_plane_funcs vmw_ldu_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = vmw_du_cursor_plane_destroy, + .destroy = vmw_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, @@ -383,10 +383,10 @@ static const struct drm_plane_funcs vmw_ldu_cursor_funcs = { */ static const struct drm_plane_helper_funcs vmw_ldu_cursor_plane_helper_funcs = { - .atomic_check = vmw_du_cursor_plane_atomic_check, - .atomic_update = vmw_du_cursor_plane_atomic_update, - .prepare_fb = vmw_du_cursor_plane_prepare_fb, - .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, + .atomic_check = vmw_cursor_plane_atomic_check, + .atomic_update = vmw_cursor_plane_atomic_update, + .prepare_fb = vmw_cursor_plane_prepare_fb, + .cleanup_fb = vmw_cursor_plane_cleanup_fb, }; static const struct diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 74ff2812d66a..7de20e56082c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -1,27 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2019-2023 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. + * Copyright (c) 2019-2025 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * **************************************************************************/ #include "vmwgfx_bo.h" @@ -71,6 +52,11 @@ struct vmw_bo_dirty { unsigned long bitmap[]; }; +bool vmw_bo_is_dirty(struct vmw_bo *vbo) +{ + return vbo->dirty && (vbo->dirty->start < vbo->dirty->end); +} + /** * vmw_bo_dirty_scan_pagetable - Perform a pagetable scan for dirty bits * @vbo: The buffer object to scan @@ -341,6 +327,41 @@ void vmw_bo_dirty_transfer_to_res(struct vmw_resource *res) dirty->end = res_start; } +void vmw_bo_dirty_clear(struct vmw_bo *vbo) +{ + struct vmw_bo_dirty *dirty = vbo->dirty; + pgoff_t start, cur, end; + unsigned long res_start = 0; + unsigned long res_end = vbo->tbo.base.size; + + WARN_ON_ONCE(res_start & ~PAGE_MASK); + res_start >>= PAGE_SHIFT; + res_end = DIV_ROUND_UP(res_end, PAGE_SIZE); + + if (res_start >= dirty->end || res_end <= dirty->start) + return; + + cur = max(res_start, dirty->start); + res_end = max(res_end, dirty->end); + while (cur < res_end) { + unsigned long num; + + start = find_next_bit(&dirty->bitmap[0], res_end, cur); + if (start >= res_end) + break; + + end = find_next_zero_bit(&dirty->bitmap[0], res_end, start + 1); + cur = end + 1; + num = end - start; + bitmap_clear(&dirty->bitmap[0], start, num); + } + + if (res_start <= dirty->start && res_end > dirty->start) + dirty->start = res_end; + if (res_start < dirty->end && res_end >= dirty->end) + dirty->end = res_start; +} + /** * vmw_bo_dirty_clear_res - Clear a resource's dirty region from * its backing mob. diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 32029d80b72b..6149a9c981da 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -764,7 +764,7 @@ static const struct drm_plane_funcs vmw_sou_plane_funcs = { static const struct drm_plane_funcs vmw_sou_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = vmw_du_cursor_plane_destroy, + .destroy = vmw_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, @@ -775,10 +775,10 @@ static const struct drm_plane_funcs vmw_sou_cursor_funcs = { */ static const struct drm_plane_helper_funcs vmw_sou_cursor_plane_helper_funcs = { - .atomic_check = vmw_du_cursor_plane_atomic_check, - .atomic_update = vmw_du_cursor_plane_atomic_update, - .prepare_fb = vmw_du_cursor_plane_prepare_fb, - .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, + .atomic_check = vmw_cursor_plane_atomic_check, + .atomic_update = vmw_cursor_plane_atomic_update, + .prepare_fb = vmw_cursor_plane_prepare_fb, + .cleanup_fb = vmw_cursor_plane_cleanup_fb, }; static const struct diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index f5d2ed1b0a72..20aab725e53a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -1482,7 +1482,7 @@ static const struct drm_plane_funcs vmw_stdu_plane_funcs = { static const struct drm_plane_funcs vmw_stdu_cursor_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = vmw_du_cursor_plane_destroy, + .destroy = vmw_cursor_plane_destroy, .reset = vmw_du_plane_reset, .atomic_duplicate_state = vmw_du_plane_duplicate_state, .atomic_destroy_state = vmw_du_plane_destroy_state, @@ -1494,10 +1494,10 @@ static const struct drm_plane_funcs vmw_stdu_cursor_funcs = { */ static const struct drm_plane_helper_funcs vmw_stdu_cursor_plane_helper_funcs = { - .atomic_check = vmw_du_cursor_plane_atomic_check, - .atomic_update = vmw_du_cursor_plane_atomic_update, - .prepare_fb = vmw_du_cursor_plane_prepare_fb, - .cleanup_fb = vmw_du_cursor_plane_cleanup_fb, + .atomic_check = vmw_cursor_plane_atomic_check, + .atomic_update = vmw_cursor_plane_atomic_update, + .prepare_fb = vmw_cursor_plane_prepare_fb, + .cleanup_fb = vmw_cursor_plane_cleanup_fb, }; static const struct @@ -1584,6 +1584,7 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) } drm_plane_helper_add(&cursor->base, &vmw_stdu_cursor_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(&cursor->base); ret = drm_connector_init(dev, connector, &vmw_stdu_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 5721c74da3e0..1a0a544b1ad0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -1,32 +1,13 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright (c) 2009-2024 Broadcom. All Rights Reserved. The term + * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * **************************************************************************/ #include "vmwgfx_bo.h" +#include "vmwgfx_cursor_plane.h" #include "vmwgfx_drv.h" #include "vmwgfx_resource_priv.h" #include "vmwgfx_so.h" @@ -818,25 +799,11 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, } } res->guest_memory_size = cur_bo_offset; - if (!file_priv->atomic && - metadata->scanout && - metadata->num_sizes == 1 && - metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && - metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT && - metadata->format == VMW_CURSOR_SNOOP_FORMAT) { - const struct SVGA3dSurfaceDesc *desc = - vmw_surface_get_desc(VMW_CURSOR_SNOOP_FORMAT); - const u32 cursor_size_bytes = VMW_CURSOR_SNOOP_WIDTH * - VMW_CURSOR_SNOOP_HEIGHT * - desc->pitchBytesPerBlock; - srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL); - if (!srf->snooper.image) { - DRM_ERROR("Failed to allocate cursor_image\n"); - ret = -ENOMEM; - goto out_no_copy; - } - } else { - srf->snooper.image = NULL; + + srf->snooper.image = vmw_cursor_snooper_create(file_priv, metadata); + if (IS_ERR(srf->snooper.image)) { + ret = PTR_ERR(srf->snooper.image); + goto out_no_copy; } if (drm_is_primary_client(file_priv)) From 171e3a45f42593b74434d740936d1d0dc80ed332 Mon Sep 17 00:00:00 2001 From: Zack Rusin Date: Fri, 7 Mar 2025 07:57:39 -0500 Subject: [PATCH 0055/1627] drm/vmwgfx: Bump the minor version Bump the minor version of vmwgfx in order to detect releases where the cursor issues have been fixed. Cursors created with dumb buffer were broken on vmwgfx. Userspace (e.g. kwin) has workarounds for those issues and often disables hardware cursors on vmwgfx. This allows enabling hardware cursors on vmwgfx again. Signed-off-by: Zack Rusin Reviewed-by: Maaz Mombasawala Reviewed-by: Martin Krastev Link: https://patchwork.freedesktop.org/patch/msgid/20250307125836.3877138-3-zack.rusin@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 6fc810632c98..0dfb88fb19e2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -38,7 +38,7 @@ #define VMWGFX_DRIVER_NAME "vmwgfx" #define VMWGFX_DRIVER_MAJOR 2 -#define VMWGFX_DRIVER_MINOR 20 +#define VMWGFX_DRIVER_MINOR 21 #define VMWGFX_DRIVER_PATCHLEVEL 0 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024) #define VMWGFX_NUM_DISPLAY_UNITS 8 From 0039a3b35b10d9c15d3d26320532ab56cc566750 Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Fri, 28 Feb 2025 14:06:33 -0600 Subject: [PATCH 0056/1627] drm/vmwgfx: Add seqno waiter for sync_files Because sync_files are passive waiters they do not participate in the processing of fences like the traditional vmw_fence_wait IOCTL. If userspace exclusively uses sync_files for synchronization then nothing in the kernel actually processes fence updates as interrupts for fences are masked and ignored if the kernel does not indicate to the SVGA device that there are active waiters. This oversight results in a bug where the entire GUI can freeze waiting on a sync_file that will never be signalled as we've masked the interrupts to signal its completion. This bug is incredibly racy as any process which interacts with the fencing code via the 3D stack can process the stuck fences on behalf of the stuck process causing it to run again. Even a simple app like eglinfo is enough to resume the stuck process. Usually this bug is seen at a login screen like GDM because there are no other 3D apps running. By adding a seqno waiter we re-enable interrupt based processing of the dma_fences associated with the sync_file which is signalled as part of a dma_fence_callback. This has likely been broken since it was initially added to the kernel in 2017 but has gone unnoticed until mutter recently started using sync_files heavily over the course of 2024 as part of their explicit sync support. Fixes: c906965dee22 ("drm/vmwgfx: Add export fence to file descriptor support") Signed-off-by: Ian Forbes Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20250228200633.642417-1-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f8325905388a..e831e324e737 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -4068,6 +4068,23 @@ static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, return 0; } +/* + * DMA fence callback to remove a seqno_waiter + */ +struct seqno_waiter_rm_context { + struct dma_fence_cb base; + struct vmw_private *dev_priv; +}; + +static void seqno_waiter_rm_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct seqno_waiter_rm_context *ctx = + container_of(cb, struct seqno_waiter_rm_context, base); + + vmw_seqno_waiter_remove(ctx->dev_priv); + kfree(ctx); +} + int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, @@ -4248,6 +4265,15 @@ int vmw_execbuf_process(struct drm_file *file_priv, } else { /* Link the fence with the FD created earlier */ fd_install(out_fence_fd, sync_file->file); + struct seqno_waiter_rm_context *ctx = + kmalloc(sizeof(*ctx), GFP_KERNEL); + ctx->dev_priv = dev_priv; + vmw_seqno_waiter_add(dev_priv); + if (dma_fence_add_callback(&fence->base, &ctx->base, + seqno_waiter_rm_cb) < 0) { + vmw_seqno_waiter_remove(dev_priv); + kfree(ctx); + } } } From 3282422bf251db541fe07c548ca304130d37d754 Mon Sep 17 00:00:00 2001 From: Keisuke Nishimura Date: Tue, 25 Feb 2025 15:52:23 +0100 Subject: [PATCH 0057/1627] drm/vmwgfx: Add error path for xa_store in vmw_bo_add_detached_resource The xa_store() may fail due to memory allocation failure because there is no guarantee that the index is already used. This fix introduces new paths to handle the error. This patch also aligns the order of function calls by calling vmw_bo_add_detached_resource() before ttm_prime_object_init() in order to allow consistent error handling. Fixes: d6667f0ddf46 ("drm/vmwgfx: Fix handling of dumb buffers") Signed-off-by: Keisuke Nishimura Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20250225145223.34773-1-keisuke.nishimura@inria.fr --- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 16 ++++++++++++++-- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index b7766421d2f5..8832e4de86f1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -848,9 +848,9 @@ void vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo) vmw_bo_placement_set(bo, domain, domain); } -void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) +int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) { - xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL); + return xa_err(xa_store(&vbo->detached_resources, (unsigned long)res, res, GFP_KERNEL)); } void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index e97cae2365c8..8c81ae3f5461 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -141,7 +141,7 @@ void vmw_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem); void vmw_bo_swap_notify(struct ttm_buffer_object *bo); -void vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); +int vmw_bo_add_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); void vmw_bo_del_detached_resource(struct vmw_bo *vbo, struct vmw_resource *res); struct vmw_surface *vmw_bo_surface(struct vmw_bo *vbo); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 1a0a544b1ad0..02ab65cc63ec 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -838,7 +838,12 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, vmw_resource_unreference(&res); goto out_unlock; } - vmw_bo_add_detached_resource(res->guest_memory_bo, res); + + ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } } tmp = vmw_resource_reference(&srf->res); @@ -1637,6 +1642,14 @@ vmw_gb_surface_define_internal(struct drm_device *dev, } + if (res->guest_memory_bo) { + ret = vmw_bo_add_detached_resource(res->guest_memory_bo, res); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + } + tmp = vmw_resource_reference(res); ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, VMW_RES_SURFACE, @@ -1651,7 +1664,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev, rep->handle = user_srf->prime.base.handle; rep->backup_size = res->guest_memory_size; if (res->guest_memory_bo) { - vmw_bo_add_detached_resource(res->guest_memory_bo, res); rep->buffer_map_handle = drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); rep->buffer_size = res->guest_memory_bo->tbo.base.size; From 92b8f062a620b1231cc7aef06be871b88b771123 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 4 Mar 2025 16:05:34 -0500 Subject: [PATCH 0058/1627] drm/sprd: move to devm_platform_ioremap_resource() usage Replace platform_get_resource + devm_ioremap with just devm_platform_ioremap_resource() Used Coccinelle to do this change. SmPl patch: @rule_2@ identifier res; expression ioremap; identifier pdev; @@ -struct resource *res; ... -res = platform_get_resource(pdev,...); <... -if (!res) { -... -} ...> -ioremap = devm_ioremap(...); +ioremap = devm_platform_ioremap_resource(pdev,0); v2: Address the return handling properly since the new API returns error pointers and not NULL. Cc: Chunyan Zhang Cc: Dmitry Baryshkov Signed-off-by: Anusha Srivatsa Reviewed-by: Chunyan Zhang (v1) Reviewed-by: Maxime Ripard (v1) Link: https://patchwork.freedesktop.org/patch/640854/?series=144073&rev=5 --- drivers/gpu/drm/sprd/sprd_dpu.c | 13 +++---------- drivers/gpu/drm/sprd/sprd_dsi.c | 13 +++---------- 2 files changed, 6 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/sprd/sprd_dpu.c b/drivers/gpu/drm/sprd/sprd_dpu.c index cb2816985305..a3447622a33c 100644 --- a/drivers/gpu/drm/sprd/sprd_dpu.c +++ b/drivers/gpu/drm/sprd/sprd_dpu.c @@ -784,19 +784,12 @@ static int sprd_dpu_context_init(struct sprd_dpu *dpu, { struct platform_device *pdev = to_platform_device(dev); struct dpu_context *ctx = &dpu->ctx; - struct resource *res; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get I/O resource\n"); - return -EINVAL; - } - - ctx->base = devm_ioremap(dev, res->start, resource_size(res)); - if (!ctx->base) { + ctx->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctx->base)) { dev_err(dev, "failed to map dpu registers\n"); - return -EFAULT; + return PTR_ERR(ctx->base); } ctx->irq = platform_get_irq(pdev, 0); diff --git a/drivers/gpu/drm/sprd/sprd_dsi.c b/drivers/gpu/drm/sprd/sprd_dsi.c index 8fc26479bb6b..23b0e1dc547a 100644 --- a/drivers/gpu/drm/sprd/sprd_dsi.c +++ b/drivers/gpu/drm/sprd/sprd_dsi.c @@ -901,18 +901,11 @@ static int sprd_dsi_context_init(struct sprd_dsi *dsi, { struct platform_device *pdev = to_platform_device(dev); struct dsi_context *ctx = &dsi->ctx; - struct resource *res; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get I/O resource\n"); - return -EINVAL; - } - - ctx->base = devm_ioremap(dev, res->start, resource_size(res)); - if (!ctx->base) { + ctx->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(ctx->base)) { drm_err(dsi->drm, "failed to map dsi host registers\n"); - return -ENXIO; + return PTR_ERR(ctx->base); } ctx->regmap = devm_regmap_init(dev, ®map_tst_io, dsi, &byte_config); From 67c4ea8267cf015653610278e0dc36c58e9a7363 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 4 Mar 2025 16:05:35 -0500 Subject: [PATCH 0059/1627] drm/sti: move to devm_platform_ioremap_resource() usage Replace platform_get_resource/_byname + devm_ioremap with just devm_platform_ioremap_resource() Used Coccinelle to do this change. SmPl patch: @rule@ identifier res; expression ioremap; identifier pdev; constant mem; expression name; @@ -struct resource *res; ... -res = platform_get_resource_byname(pdev,mem,name); <... -if (!res) { -... -} ...> -ioremap = devm_ioremap(...); +ioremap = devm_platform_ioremap_resource_byname(pdev,name); and @rule_2@ identifier res; expression ioremap; identifier pdev; @@ -struct resource *res; ... -res = platform_get_resource(pdev,...); <... -if (!res) { -... -} ...> -ioremap = devm_ioremap(...); +ioremap = devm_platform_ioremap_resource(pdev,0); v2: Fix compilation error. v3: Handle returns properly since the new API return error pointers and not NULL Cc: Raphael Gallais-Pou Cc: Alain Volmat Reviewed-by: Maxime Ripard (v2) Acked-by: Raphael Gallais-Pou (v2) Signed-off-by: Anusha Srivatsa Link: https://patchwork.freedesktop.org/patch/640854/?series=144073&rev=5 --- drivers/gpu/drm/sti/sti_compositor.c | 14 +++----------- drivers/gpu/drm/sti/sti_dvo.c | 14 +++----------- drivers/gpu/drm/sti/sti_hda.c | 13 +++---------- drivers/gpu/drm/sti/sti_hdmi.c | 15 +++------------ drivers/gpu/drm/sti/sti_hqvdp.c | 14 +++----------- drivers/gpu/drm/sti/sti_tvout.c | 14 +++----------- drivers/gpu/drm/sti/sti_vtg.c | 14 +++----------- 7 files changed, 21 insertions(+), 77 deletions(-) diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 063f82d23d80..8c529b0cca8b 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -177,7 +177,6 @@ static int sti_compositor_probe(struct platform_device *pdev) struct device_node *np = dev->of_node; struct device_node *vtg_np; struct sti_compositor *compo; - struct resource *res; unsigned int i; compo = devm_kzalloc(dev, sizeof(*compo), GFP_KERNEL); @@ -194,17 +193,10 @@ static int sti_compositor_probe(struct platform_device *pdev) memcpy(&compo->data, of_match_node(compositor_of_match, np)->data, sizeof(struct sti_compositor_data)); - - /* Get Memory ressources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - DRM_ERROR("Get memory resource failed\n"); - return -ENXIO; - } - compo->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (compo->regs == NULL) { + compo->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(compo->regs)) { DRM_ERROR("Register mapping failed\n"); - return -ENXIO; + return PTR_ERR(compo->regs); } /* Get clock resources */ diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index 4dcddd02629b..74a1eef4674e 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -511,7 +511,6 @@ static int sti_dvo_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sti_dvo *dvo; - struct resource *res; struct device_node *np = dev->of_node; DRM_INFO("%s\n", __func__); @@ -523,16 +522,9 @@ static int sti_dvo_probe(struct platform_device *pdev) } dvo->dev = pdev->dev; - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dvo-reg"); - if (!res) { - DRM_ERROR("Invalid dvo resource\n"); - return -ENOMEM; - } - dvo->regs = devm_ioremap(dev, res->start, - resource_size(res)); - if (!dvo->regs) - return -ENOMEM; + dvo->regs = devm_platform_ioremap_resource_byname(pdev, "dvo-reg"); + if (IS_ERR(dvo->regs)) + return PTR_ERR(dvo->regs); dvo->clk_pix = devm_clk_get(dev, "dvo_pix"); if (IS_ERR(dvo->clk_pix)) { diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 14fdc00d2ba0..eedccdf70833 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -750,16 +750,9 @@ static int sti_hda_probe(struct platform_device *pdev) return -ENOMEM; hda->dev = pdev->dev; - - /* Get resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hda-reg"); - if (!res) { - DRM_ERROR("Invalid hda resource\n"); - return -ENOMEM; - } - hda->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!hda->regs) - return -ENOMEM; + hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg"); + if (IS_ERR(hda->regs)) + return PTR_ERR(hda->regs); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "video-dacs-ctrl"); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 164a34d793d8..37b8d619066e 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -1380,7 +1380,6 @@ static int sti_hdmi_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sti_hdmi *hdmi; struct device_node *np = dev->of_node; - struct resource *res; struct device_node *ddc; int ret; @@ -1399,17 +1398,9 @@ static int sti_hdmi_probe(struct platform_device *pdev) } hdmi->dev = pdev->dev; - - /* Get resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hdmi-reg"); - if (!res) { - DRM_ERROR("Invalid hdmi resource\n"); - ret = -ENOMEM; - goto release_adapter; - } - hdmi->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!hdmi->regs) { - ret = -ENOMEM; + hdmi->regs = devm_platform_ioremap_resource_byname(pdev, "hdmi-reg"); + if (IS_ERR(hdmi->regs)) { + ret = PTR_ERR(hdmi->regs); goto release_adapter; } diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 0f658709c9d0..03684062309b 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1356,7 +1356,6 @@ static int sti_hqvdp_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *vtg_np; struct sti_hqvdp *hqvdp; - struct resource *res; DRM_DEBUG_DRIVER("\n"); @@ -1367,17 +1366,10 @@ static int sti_hqvdp_probe(struct platform_device *pdev) } hqvdp->dev = dev; - - /* Get Memory resources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("Get memory resource failed\n"); - return -ENXIO; - } - hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!hqvdp->regs) { + hqvdp->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(hqvdp->regs)) { DRM_ERROR("Register mapping failed\n"); - return -ENXIO; + return PTR_ERR(hqvdp->regs); } /* Get clock resources */ diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index af6c06f448c4..6a464b035de8 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -838,7 +838,6 @@ static int sti_tvout_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *node = dev->of_node; struct sti_tvout *tvout; - struct resource *res; DRM_INFO("%s\n", __func__); @@ -850,16 +849,9 @@ static int sti_tvout_probe(struct platform_device *pdev) return -ENOMEM; tvout->dev = dev; - - /* get memory resources */ - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tvout-reg"); - if (!res) { - DRM_ERROR("Invalid glue resource\n"); - return -ENOMEM; - } - tvout->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!tvout->regs) - return -ENOMEM; + tvout->regs = devm_platform_ioremap_resource_byname(pdev, "tvout-reg"); + if (IS_ERR(tvout->regs)) + return PTR_ERR(tvout->regs); /* get reset resources */ tvout->reset = devm_reset_control_get(dev, "tvout"); diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 5ba469b711b5..ee81691b3203 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -380,23 +380,15 @@ static int vtg_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sti_vtg *vtg; - struct resource *res; int ret; vtg = devm_kzalloc(dev, sizeof(*vtg), GFP_KERNEL); if (!vtg) return -ENOMEM; - - /* Get Memory ressources */ - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("Get memory resource failed\n"); - return -ENOMEM; - } - vtg->regs = devm_ioremap(dev, res->start, resource_size(res)); - if (!vtg->regs) { + vtg->regs = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(vtg->regs)) { DRM_ERROR("failed to remap I/O memory\n"); - return -ENOMEM; + return PTR_ERR(vtg->regs); } vtg->irq = platform_get_irq(pdev, 0); From 9e75b6ef407fee5d4ed8021cd7ddd9d6a8f7b0e8 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 4 Mar 2025 16:05:37 -0500 Subject: [PATCH 0060/1627] Documentation: Update the todo Update the Documentation to be more precise. v2: Update for clarity v3: Further details in Todo Cc: Thomas Zimmermann Signed-off-by: Anusha Srivatsa Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/640856/?series=144073&rev=5 --- Documentation/gpu/todo.rst | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 256d0d1cb216..c57777a24e03 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -441,14 +441,15 @@ Contact: Thomas Zimmermann Level: Intermediate -Request memory regions in all drivers -------------------------------------- +Request memory regions in all fbdev drivers +-------------------------------------------- -Go through all drivers and add code to request the memory regions that the -driver uses. This requires adding calls to request_mem_region(), +Old/ancient fbdev drivers do not request their memory properly. +Go through these drivers and add code to request the memory regions +that the driver uses. This requires adding calls to request_mem_region(), pci_request_region() or similar functions. Use helpers for managed cleanup -where possible. - +where possible. Problematic areas include hardware that has exclusive ranges +like VGA. VGA16fb does not request the range as it is expected. Drivers are pretty bad at doing this and there used to be conflicts among DRM and fbdev drivers. Still, it's the correct thing to do. From 4b16619608ff14338b6001acb810506079c49749 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 4 Mar 2025 17:29:13 +0200 Subject: [PATCH 0061/1627] drm/i915/hpd: Let an HPD pin be in the disabled state when handling missed IRQs After suspending and resuming the detection on connectors, HPD IRQs that arrived while the detection was suspended, are handled by scheduling the intel_hotplug::hotplug work for them. All HPD pins must be at this point in either the HPD_ENABLED (set for all pins during driver loading/system resuming) or HPD_MARK_DISABLED (set by IRQ storm detection) state: the HPD_DISABLED state for a pin can be set only from the HPD_MARK_DISABLED state by the hotplug work after a storm detection (enabling polling on the given pin/connector), however the hotplug work won't be scheduled while the detection is suspended. A follow-up change will add support for blocking the HPD IRQ handling on a given HPD pin (without disabling the IRQ generation on it), after which it becomes possible to see a pin in the HPD_DISABLED state when unblocking the IRQ handling (since the blocking could've happened for an already disabled pin). Adjust queue_work_for_missed_irqs() accordingly, so that this function can be reused for unblocking the IRQ handling. Reviewed-by: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20250304152917.3407080-3-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_hotplug.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 9692b5c01aea..3fb5feeefa14 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -980,6 +980,7 @@ static void queue_work_for_missed_irqs(struct drm_i915_private *i915) case HPD_MARK_DISABLED: queue_work = true; break; + case HPD_DISABLED: case HPD_ENABLED: break; default: From 0d77a3e0ea90a7ee25755a94694cdfd822c9db6b Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 5 Mar 2025 13:48:19 +0200 Subject: [PATCH 0062/1627] drm/i915/hpd: Add support for blocking the IRQ handling on an HPD pin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support for blocking the IRQ handling on the HPD pin of a given encoder, handling IRQs that arrived while in the blocked state after unblocking the IRQ handling. This will be used by a follow-up change, which blocks/unblocks the IRQ handling around DP link training. This is similar to the intel_hpd_disable/enable() functionality, by also handling encoders/ports with a pulse handler (i.e. also blocking/unblocking the short/long pulse handling) and handling the IRQs arrived in the blocked state after the handling is unblocked (vs. just dropping such IRQs). v2: - Handle encoders without a port assigned to them. - Fix clearing IRQs from intel_hotplug::short_port_mask. v3: - Rename intel_hpd_suspend/resume() to intel_hpd_block/unblock(). (Jani) - Refer to HPD pins as hpd_pin vs. hpd. - Flush dig_port_work in intel_hpd_block() if any encoder using the HPD pin has a pulse handler. v4: - Fix hpd_pin_has_pulse(), checking the encoder's HPD pin. v5: - Rebase on port->hpd_pin tracking. (Ville) v6: (Jani) - Add hpd_pin_is_blocked() helper. - Use the hpd_pin_mask term for a mask of pins instead of hpd_pins. - Prevent decrementing a 0 refcount in unblock_hpd_pin(). Cc: Jani Nikula Cc: Ville Syrjälä Reviewed-by: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20250305114820.3523077-1-imre.deak@intel.com --- .../gpu/drm/i915/display/intel_display_core.h | 1 + drivers/gpu/drm/i915/display/intel_hotplug.c | 210 +++++++++++++++--- drivers/gpu/drm/i915/display/intel_hotplug.h | 2 + 3 files changed, 188 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index afb2184bf233..3673275f9061 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -160,6 +160,7 @@ struct intel_hotplug { struct { unsigned long last_jiffies; int count; + int blocked_count; enum { HPD_ENABLED = 0, HPD_DISABLED = 1, diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 3fb5feeefa14..94b4dcf10f58 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -349,19 +349,62 @@ static bool intel_encoder_has_hpd_pulse(struct intel_encoder *encoder) enc_to_dig_port(encoder)->hpd_pulse != NULL; } +static bool hpd_pin_has_pulse(struct intel_display *display, enum hpd_pin pin) +{ + struct intel_encoder *encoder; + + for_each_intel_encoder(display->drm, encoder) { + if (encoder->hpd_pin != pin) + continue; + + if (intel_encoder_has_hpd_pulse(encoder)) + return true; + } + + return false; +} + +static bool hpd_pin_is_blocked(struct intel_display *display, enum hpd_pin pin) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + + lockdep_assert_held(&i915->irq_lock); + + return display->hotplug.stats[pin].blocked_count; +} + +static u32 get_blocked_hpd_pin_mask(struct intel_display *display) +{ + enum hpd_pin pin; + u32 hpd_pin_mask = 0; + + for_each_hpd_pin(pin) { + if (hpd_pin_is_blocked(display, pin)) + hpd_pin_mask |= BIT(pin); + } + + return hpd_pin_mask; +} + static void i915_digport_work_func(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, display.hotplug.dig_port_work); + struct intel_display *display = + container_of(work, struct intel_display, hotplug.dig_port_work); + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; u32 long_hpd_pin_mask, short_hpd_pin_mask; struct intel_encoder *encoder; + u32 blocked_hpd_pin_mask; u32 old_bits = 0; spin_lock_irq(&dev_priv->irq_lock); - long_hpd_pin_mask = dev_priv->display.hotplug.long_hpd_pin_mask; - dev_priv->display.hotplug.long_hpd_pin_mask = 0; - short_hpd_pin_mask = dev_priv->display.hotplug.short_hpd_pin_mask; - dev_priv->display.hotplug.short_hpd_pin_mask = 0; + + blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); + long_hpd_pin_mask = hotplug->long_hpd_pin_mask & ~blocked_hpd_pin_mask; + hotplug->long_hpd_pin_mask &= ~long_hpd_pin_mask; + short_hpd_pin_mask = hotplug->short_hpd_pin_mask & ~blocked_hpd_pin_mask; + hotplug->short_hpd_pin_mask &= ~short_hpd_pin_mask; + spin_unlock_irq(&dev_priv->irq_lock); for_each_intel_encoder(&dev_priv->drm, encoder) { @@ -406,14 +449,18 @@ static void i915_digport_work_func(struct work_struct *work) */ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) { - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_display *display = to_intel_display(dig_port); + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; struct intel_encoder *encoder = &dig_port->base; spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.short_hpd_pin_mask |= BIT(encoder->hpd_pin); - spin_unlock_irq(&i915->irq_lock); - queue_work(i915->display.hotplug.dp_wq, &i915->display.hotplug.dig_port_work); + hotplug->short_hpd_pin_mask |= BIT(encoder->hpd_pin); + if (!hpd_pin_is_blocked(display, encoder->hpd_pin)) + queue_work(hotplug->dp_wq, &hotplug->dig_port_work); + + spin_unlock_irq(&i915->irq_lock); } /* @@ -421,9 +468,10 @@ void intel_hpd_trigger_irq(struct intel_digital_port *dig_port) */ static void i915_hotplug_work_func(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - display.hotplug.hotplug_work.work); + struct intel_display *display = + container_of(work, struct intel_display, hotplug.hotplug_work.work); + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; struct drm_connector_list_iter conn_iter; struct intel_connector *connector; u32 changed = 0, retry = 0; @@ -431,16 +479,18 @@ static void i915_hotplug_work_func(struct work_struct *work) u32 hpd_retry_bits; struct drm_connector *first_changed_connector = NULL; int changed_connectors = 0; + u32 blocked_hpd_pin_mask; mutex_lock(&dev_priv->drm.mode_config.mutex); drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); spin_lock_irq(&dev_priv->irq_lock); - hpd_event_bits = dev_priv->display.hotplug.event_bits; - dev_priv->display.hotplug.event_bits = 0; - hpd_retry_bits = dev_priv->display.hotplug.retry_bits; - dev_priv->display.hotplug.retry_bits = 0; + blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); + hpd_event_bits = hotplug->event_bits & ~blocked_hpd_pin_mask; + hotplug->event_bits &= ~hpd_event_bits; + hpd_retry_bits = hotplug->retry_bits & ~blocked_hpd_pin_mask; + hotplug->retry_bits &= ~hpd_retry_bits; /* Enable polling for connectors which had HPD IRQ storms */ intel_hpd_irq_storm_switch_to_polling(dev_priv); @@ -539,6 +589,7 @@ static void i915_hotplug_work_func(struct work_struct *work) void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 pin_mask, u32 long_mask) { + struct intel_display *display = to_intel_display(&dev_priv->drm); struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; @@ -573,7 +624,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, "digital hpd on [ENCODER:%d:%s] - %s\n", encoder->base.base.id, encoder->base.name, long_hpd ? "long" : "short"); - queue_dig = true; + + if (!hpd_pin_is_blocked(display, pin)) + queue_dig = true; if (long_hpd) { long_hpd_pulse_mask |= BIT(pin); @@ -617,7 +670,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, } else { dev_priv->display.hotplug.event_bits |= BIT(pin); long_hpd = true; - queue_hp = true; + + if (!hpd_pin_is_blocked(display, pin)) + queue_hp = true; } if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { @@ -915,11 +970,15 @@ static bool cancel_all_detection_work(struct drm_i915_private *i915) void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) { + struct intel_display *display = to_intel_display(&dev_priv->drm); + if (!HAS_DISPLAY(dev_priv)) return; spin_lock_irq(&dev_priv->irq_lock); + drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display)); + dev_priv->display.hotplug.long_hpd_pin_mask = 0; dev_priv->display.hotplug.short_hpd_pin_mask = 0; dev_priv->display.hotplug.event_bits = 0; @@ -966,19 +1025,22 @@ void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) static void queue_work_for_missed_irqs(struct drm_i915_private *i915) { - bool queue_work = false; + struct intel_display *display = to_intel_display(&i915->drm); + struct intel_hotplug *hotplug = &display->hotplug; + bool queue_hp_work = false; + u32 blocked_hpd_pin_mask; enum hpd_pin pin; lockdep_assert_held(&i915->irq_lock); - if (i915->display.hotplug.event_bits || - i915->display.hotplug.retry_bits) - queue_work = true; + blocked_hpd_pin_mask = get_blocked_hpd_pin_mask(display); + if ((hotplug->event_bits | hotplug->retry_bits) & ~blocked_hpd_pin_mask) + queue_hp_work = true; for_each_hpd_pin(pin) { switch (i915->display.hotplug.stats[pin].state) { case HPD_MARK_DISABLED: - queue_work = true; + queue_hp_work = true; break; case HPD_DISABLED: case HPD_ENABLED: @@ -988,10 +1050,108 @@ static void queue_work_for_missed_irqs(struct drm_i915_private *i915) } } - if (queue_work) + if ((hotplug->long_hpd_pin_mask | hotplug->short_hpd_pin_mask) & ~blocked_hpd_pin_mask) + queue_work(hotplug->dp_wq, &hotplug->dig_port_work); + + if (queue_hp_work) queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); } +static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + + lockdep_assert_held(&i915->irq_lock); + + hotplug->stats[pin].blocked_count++; + + return hotplug->stats[pin].blocked_count == 1; +} + +static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + + lockdep_assert_held(&i915->irq_lock); + + if (drm_WARN_ON(display->drm, hotplug->stats[pin].blocked_count == 0)) + return true; + + hotplug->stats[pin].blocked_count--; + + return hotplug->stats[pin].blocked_count == 0; +} + +/** + * intel_hpd_block - Block handling of HPD IRQs on an HPD pin + * @encoder: Encoder to block the HPD handling for + * + * Blocks the handling of HPD IRQs on the HPD pin of @encoder. + * + * On return: + * - It's guaranteed that the blocked encoders' HPD pulse handler + * (via intel_digital_port::hpd_pulse()) is not running. + * - The hotplug event handling (via intel_encoder::hotplug()) of an + * HPD IRQ pending at the time this function is called may be still + * running. + * - Detection on the encoder's connector (via + * drm_connector_helper_funcs::detect_ctx(), + * drm_connector_funcs::detect()) remains allowed, for instance as part of + * userspace connector probing, or DRM core's connector polling. + * + * The call must be followed by calling intel_hpd_unblock(). + * + * Note that the handling of HPD IRQs for another encoder using the same HPD + * pin as that of @encoder will be also blocked. + */ +void intel_hpd_block(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + bool do_flush = false; + + if (encoder->hpd_pin == HPD_NONE) + return; + + spin_lock_irq(&i915->irq_lock); + + if (block_hpd_pin(display, encoder->hpd_pin)) + do_flush = true; + + spin_unlock_irq(&i915->irq_lock); + + if (do_flush && hpd_pin_has_pulse(display, encoder->hpd_pin)) + flush_work(&hotplug->dig_port_work); +} + +/** + * intel_hpd_unblock - Unblock handling of HPD IRQs on an HPD pin + * @encoder: Encoder to unblock the HPD handling for + * + * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was + * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the + * HPD pin while it was blocked will be handled for @encoder and for any + * other encoder sharing the same HPD pin. + */ +void intel_hpd_unblock(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); + + if (encoder->hpd_pin == HPD_NONE) + return; + + spin_lock_irq(&i915->irq_lock); + + if (unblock_hpd_pin(display, encoder->hpd_pin)) + queue_work_for_missed_irqs(i915); + + spin_unlock_irq(&i915->irq_lock); +} + void intel_hpd_enable_detection_work(struct drm_i915_private *i915) { spin_lock_irq(&i915->irq_lock); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index d6986902b054..5f9857136f5e 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -28,6 +28,8 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); enum hpd_pin intel_hpd_pin_default(enum port port); bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); +void intel_hpd_block(struct intel_encoder *encoder); +void intel_hpd_unblock(struct intel_encoder *encoder); void intel_hpd_debugfs_register(struct drm_i915_private *i915); void intel_hpd_enable_detection_work(struct drm_i915_private *i915); From 35021b5b15de0c4eceecda9e2dadab2e5e56b7e2 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 4 Mar 2025 17:29:15 +0200 Subject: [PATCH 0063/1627] drm/i915/dp: Fix link training interrupted by a short HPD pulse During Display Port link training the handling of HPD pulses should be prevented, as that handling can interfere with the link training: - Accessing DPCD registers outside the range of link training registers are not allowed by the Standard (see DP Standard v2.1, 3.5.2.16.1, 3.6.6.1). The pulse handler reads the DPRX capability registers, which are outside of the allowed range. - Switching of the LTTPR transparent/non-transparent mode may reset the LTTPRs on the link, thus aborting any ongoing link training. The pulse handler does set the LTTPR mode, thus it could unexpectedly abort the ongoing link training. Block/unblock the HPD pulse handling for the duration of the link training to prevent the above DPCD register accesses / LTTPR mode change. Apart from the above scenarios, there are other ways a non-link training DPCD register could be accessed during link training: via the DRM AUX device node, or via DPCD register probing (as performed by drm_dp_dpcd_probe()). These will be addressed by a follow-up change. v2: Rebase on the intel_hpd_suspend/resume -> intel_hpd_block/unblock() rename change. Reviewed-by: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20250304152917.3407080-5-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_dp_link_training.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 581f1dab618e..5d549ac4de1c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -1124,6 +1124,8 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, void intel_dp_stop_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + intel_dp->link_trained = true; intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); @@ -1134,6 +1136,8 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp, wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); } + + intel_hpd_unblock(encoder); } static bool @@ -1616,7 +1620,11 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, * non-transparent mode. During an earlier LTTPR detection this * could've been prevented by an active link. */ - int lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); + int lttpr_count; + + intel_hpd_block(encoder); + + lttpr_count = intel_dp_init_lttpr_and_dprx_caps(intel_dp); if (lttpr_count < 0) /* Still continue with enabling the port and link training. */ From 29c09cf200f736138707857696d1f6db2db0299b Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 5 Mar 2025 13:48:20 +0200 Subject: [PATCH 0064/1627] drm/i915/dp: Queue a link check after link training is complete After link training - both in case of a passing and failing LT result - a work is scheduled to check the link state. This check should take place after the link training is completed by disabling the link training pattern and setting intel_dp::link_trained=true. Atm, the work is scheduled before these steps, which may result in checking the link state too early (and thus not retraining the link as expected). Fix the above by scheduling the link check work after link training is complete. v2: - Add MAX_SEQ_TRAIN_FAILURES instead of open-coding it. (Jani) Reviewed-by: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20250305114820.3523077-2-imre.deak@intel.com --- .../gpu/drm/i915/display/intel_dp_link_training.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 5d549ac4de1c..ded246bbf232 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -56,6 +56,8 @@ lt_dbg(_intel_dp, _dp_phy, "Sink disconnected: " _format, ## __VA_ARGS__); \ } while (0) +#define MAX_SEQ_TRAIN_FAILURES 2 + static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp) { memset(intel_dp->lttpr_common_caps, 0, sizeof(intel_dp->lttpr_common_caps)); @@ -1124,6 +1126,7 @@ intel_dp_128b132b_intra_hop(struct intel_dp *intel_dp, void intel_dp_stop_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; intel_dp->link_trained = true; @@ -1138,6 +1141,13 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp, } intel_hpd_unblock(encoder); + + if (!display->hotplug.ignore_long_hpd && + intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) { + int delay_ms = intel_dp->link.seq_train_failures ? 0 : 2000; + + intel_encoder_link_check_queue_work(encoder, delay_ms); + } } static bool @@ -1642,7 +1652,6 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, lt_dbg(intel_dp, DP_PHY_DPRX, "Forcing link training failure\n"); } else if (passed) { intel_dp->link.seq_train_failures = 0; - intel_encoder_link_check_queue_work(encoder, 2000); return; } @@ -1665,10 +1674,8 @@ void intel_dp_start_link_train(struct intel_atomic_state *state, return; } - if (intel_dp->link.seq_train_failures < 2) { - intel_encoder_link_check_queue_work(encoder, 0); + if (intel_dp->link.seq_train_failures < MAX_SEQ_TRAIN_FAILURES) return; - } if (intel_dp_schedule_fallback_link_training(state, intel_dp, crtc_state)) return; From 6ace085c453ccdcad34e64eead21eb120270c383 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 4 Mar 2025 17:29:17 +0200 Subject: [PATCH 0065/1627] drm/i915/crt: Use intel_hpd_block/unblock() instead of intel_hpd_disable/enable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit intel_hpd_disable/enable() have the same purpose as intel_hpd_block/unblock(), except that disable/enable will drop any HPD IRQs which were triggered while the HPD was disabled, while block/unblock will handle such IRQs after the IRQ handling is unblocked. Use intel_hpd_block/unblock() for crt as well, by adding a helper to explicitly clear any pending IRQs before unblocking. v2: - Handle encoders without a port assigned to them. - Rebase on change in intel_hpd_suspend() documentation. v3: - Rebase on the suspend/resume -> block/unblock rename change. - Clear the pending events only after all encoders have unblocked the HPD handling. - Clear the short/long port events for all encoders using the given HPD pin. v4: - Rebase on port->hpd_pin tracking. (Ville) Cc: Ville Syrjälä Reviewed-by: Jani Nikula Signed-off-by: Imre Deak Link: https://patchwork.freedesktop.org/patch/msgid/20250304152917.3407080-7-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_crt.c | 7 +-- drivers/gpu/drm/i915/display/intel_hotplug.c | 60 +++++++++++--------- drivers/gpu/drm/i915/display/intel_hotplug.h | 3 +- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 76ffb3f8467c..bca91d49cb96 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -532,8 +532,6 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) { struct intel_display *display = to_intel_display(connector->dev); struct intel_crt *crt = intel_attached_crt(to_intel_connector(connector)); - struct drm_i915_private *dev_priv = to_i915(connector->dev); - bool reenable_hpd; u32 adpa; bool ret; u32 save_adpa; @@ -550,7 +548,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) * * Just disable HPD interrupts here to prevent this */ - reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); + intel_hpd_block(&crt->base); save_adpa = adpa = intel_de_read(display, crt->adpa_reg); drm_dbg_kms(display->drm, @@ -577,8 +575,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) drm_dbg_kms(display->drm, "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); - if (reenable_hpd) - intel_hpd_enable(dev_priv, crt->base.hpd_pin); + intel_hpd_clear_and_unblock(&crt->base); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 94b4dcf10f58..c69b1f5fd160 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -996,33 +996,6 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n"); } -bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) -{ - bool ret = false; - - if (pin == HPD_NONE) - return false; - - spin_lock_irq(&dev_priv->irq_lock); - if (dev_priv->display.hotplug.stats[pin].state == HPD_ENABLED) { - dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; - ret = true; - } - spin_unlock_irq(&dev_priv->irq_lock); - - return ret; -} - -void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) -{ - if (pin == HPD_NONE) - return; - - spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; - spin_unlock_irq(&dev_priv->irq_lock); -} - static void queue_work_for_missed_irqs(struct drm_i915_private *i915) { struct intel_display *display = to_intel_display(&i915->drm); @@ -1101,7 +1074,8 @@ static bool unblock_hpd_pin(struct intel_display *display, enum hpd_pin pin) * drm_connector_funcs::detect()) remains allowed, for instance as part of * userspace connector probing, or DRM core's connector polling. * - * The call must be followed by calling intel_hpd_unblock(). + * The call must be followed by calling intel_hpd_unblock(), or + * intel_hpd_clear_and_unblock(). * * Note that the handling of HPD IRQs for another encoder using the same HPD * pin as that of @encoder will be also blocked. @@ -1152,6 +1126,36 @@ void intel_hpd_unblock(struct intel_encoder *encoder) spin_unlock_irq(&i915->irq_lock); } +/** + * intel_hpd_clear_and_unblock - Unblock handling of new HPD IRQs on an HPD pin + * @encoder: Encoder to unblock the HPD handling for + * + * Unblock the handling of HPD IRQs on the HPD pin of @encoder, which was + * previously blocked by intel_hpd_block(). Any HPD IRQ raised on the + * HPD pin while it was blocked will be cleared, handling only new IRQs. + */ +void intel_hpd_clear_and_unblock(struct intel_encoder *encoder) +{ + struct intel_display *display = to_intel_display(encoder); + struct drm_i915_private *i915 = to_i915(display->drm); + struct intel_hotplug *hotplug = &display->hotplug; + enum hpd_pin pin = encoder->hpd_pin; + + if (pin == HPD_NONE) + return; + + spin_lock_irq(&i915->irq_lock); + + if (unblock_hpd_pin(display, pin)) { + hotplug->event_bits &= ~BIT(pin); + hotplug->retry_bits &= ~BIT(pin); + hotplug->short_hpd_pin_mask &= ~BIT(pin); + hotplug->long_hpd_pin_mask &= ~BIT(pin); + } + + spin_unlock_irq(&i915->irq_lock); +} + void intel_hpd_enable_detection_work(struct drm_i915_private *i915) { spin_lock_irq(&i915->irq_lock); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index 5f9857136f5e..f189b871904e 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -26,10 +26,9 @@ void intel_hpd_init(struct drm_i915_private *dev_priv); void intel_hpd_init_early(struct drm_i915_private *i915); void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); enum hpd_pin intel_hpd_pin_default(enum port port); -bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); -void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); void intel_hpd_block(struct intel_encoder *encoder); void intel_hpd_unblock(struct intel_encoder *encoder); +void intel_hpd_clear_and_unblock(struct intel_encoder *encoder); void intel_hpd_debugfs_register(struct drm_i915_private *i915); void intel_hpd_enable_detection_work(struct drm_i915_private *i915); From afb7a1d669b7c3f7c0f1678299377d88b57a48f8 Mon Sep 17 00:00:00 2001 From: Vignesh Raman Date: Mon, 17 Feb 2025 11:07:10 +0530 Subject: [PATCH 0066/1627] drm/ci: refactor software-driver stage jobs Move common job configuration for software-driver stage jobs to separate job. Acked-by: Helen Koike Reviewed-by: Daniel Stone Link: https://patchwork.freedesktop.org/patch/msgid/20250217053719.442644-2-vignesh.raman@collabora.com Signed-off-by: Vignesh Raman --- drivers/gpu/drm/ci/test.yml | 59 +++++++++++++++---------------------- 1 file changed, 24 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 6a1e059858e5..0eab020a33b9 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -89,6 +89,26 @@ tags: - $RUNNER_TAG +.software-driver: + stage: software-driver + timeout: "1h30m" + rules: + - !reference [.scheduled_pipeline-rules, rules] + - when: on_success + extends: + - .test-gl + tags: + - kvm + script: + - ln -sf $CI_PROJECT_DIR/install /install + - mv install/bzImage /lava-files/bzImage + - mkdir -p /lib/modules + - install/crosvm-runner.sh install/igt_runner.sh + needs: + - debian/x86_64_test-gl + - testing:x86_64 + - igt:x86_64 + .msm-sc7180: extends: - .lava-igt:arm64 @@ -440,47 +460,16 @@ panfrost:g12b: - .panfrost-gpu virtio_gpu:none: - stage: software-driver - timeout: "1h30m" - rules: - - !reference [.scheduled_pipeline-rules, rules] - - when: on_success + extends: + - .software-driver variables: CROSVM_GALLIUM_DRIVER: llvmpipe DRIVER_NAME: virtio_gpu GPU_VERSION: none - extends: - - .test-gl - tags: - - kvm - script: - - ln -sf $CI_PROJECT_DIR/install /install - - mv install/bzImage /lava-files/bzImage - - install/crosvm-runner.sh install/igt_runner.sh - needs: - - debian/x86_64_test-gl - - testing:x86_64 - - igt:x86_64 vkms:none: - stage: software-driver - timeout: "1h30m" - rules: - - !reference [.scheduled_pipeline-rules, rules] - - when: on_success + extends: + - .software-driver variables: DRIVER_NAME: vkms GPU_VERSION: none - extends: - - .test-gl - tags: - - kvm - script: - - ln -sf $CI_PROJECT_DIR/install /install - - mv install/bzImage /lava-files/bzImage - - mkdir -p /lib/modules - - ./install/crosvm-runner.sh ./install/igt_runner.sh - needs: - - debian/x86_64_test-gl - - testing:x86_64 - - igt:x86_64 From 7948fd1b8ea57791a3d5eaf5320ebcea56748d79 Mon Sep 17 00:00:00 2001 From: Vignesh Raman Date: Mon, 17 Feb 2025 11:07:11 +0530 Subject: [PATCH 0067/1627] drm/ci: enable CONFIG_DEBUG_WW_MUTEX_SLOWPATH Enable CONFIG_DEBUG_WW_MUTEX_SLOWPATH for mutex slowpath debugging. Acked-by: Helen Koike Reviewed-by: Daniel Stone Link: https://patchwork.freedesktop.org/patch/msgid/20250217053719.442644-3-vignesh.raman@collabora.com Signed-off-by: Vignesh Raman --- drivers/gpu/drm/ci/build.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/ci/build.yml b/drivers/gpu/drm/ci/build.yml index 274f118533a7..6c0dc10b547c 100644 --- a/drivers/gpu/drm/ci/build.yml +++ b/drivers/gpu/drm/ci/build.yml @@ -67,7 +67,7 @@ testing:arm32: # # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel # becoming too big for their bootloaders. - ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT" + ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH" UPLOAD_TO_MINIO: 1 MERGE_FRAGMENT: arm.config @@ -79,7 +79,7 @@ testing:arm64: # # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel # becoming too big for their bootloaders. - ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT" + ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH" UPLOAD_TO_MINIO: 1 MERGE_FRAGMENT: arm64.config @@ -91,7 +91,7 @@ testing:x86_64: # # db410c and db820c don't boot with KASAN_INLINE, probably due to the kernel # becoming too big for their bootloaders. - ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT" + ENABLE_KCONFIGS: "PROVE_LOCKING DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT DEBUG_WW_MUTEX_SLOWPATH" UPLOAD_TO_MINIO: 1 MERGE_FRAGMENT: x86_64.config From 27b6bce72c7721449d924ef987aa835daf015f26 Mon Sep 17 00:00:00 2001 From: Vignesh Raman Date: Mon, 17 Feb 2025 11:07:12 +0530 Subject: [PATCH 0068/1627] drm/ci: enable lockdep detection We have enabled PROVE_LOCKING (which enables LOCKDEP) in drm-ci. This will output warnings when kernel locking errors are encountered and will continue executing tests. To detect if lockdep has been triggered, check the debug_locks value in /proc/lockdep_stats after the tests have run. When debug_locks is 0, it indicates that lockdep has detected issues and turned itself off. Check this value, and if lockdep is detected, exit with an error and configure it as a warning in GitLab CI. GitLab CI ignores exit codes other than 1 by default. Pass the correct exit code with variable FF_USE_NEW_BASH_EVAL_STRATEGY set to true or exit on failure. Also update the documentation. Acked-by: Helen Koike Reviewed-by: Daniel Stone Link: https://patchwork.freedesktop.org/patch/msgid/20250217053719.442644-4-vignesh.raman@collabora.com Signed-off-by: Vignesh Raman --- Documentation/gpu/automated_testing.rst | 4 ++++ drivers/gpu/drm/ci/igt_runner.sh | 11 +++++++++++ drivers/gpu/drm/ci/test.yml | 17 ++++++++++++++--- 3 files changed, 29 insertions(+), 3 deletions(-) diff --git a/Documentation/gpu/automated_testing.rst b/Documentation/gpu/automated_testing.rst index 6d7c6086034d..62aa3ede02a5 100644 --- a/Documentation/gpu/automated_testing.rst +++ b/Documentation/gpu/automated_testing.rst @@ -115,6 +115,10 @@ created (eg. https://gitlab.freedesktop.org/janedoe/linux/-/pipelines) 5. The various jobs will be run and when the pipeline is finished, all jobs should be green unless a regression has been found. +6. Warnings in the pipeline indicate that lockdep +(see Documentation/locking/lockdep-design.rst) issues have been detected +during the tests. + How to update test expectations =============================== diff --git a/drivers/gpu/drm/ci/igt_runner.sh b/drivers/gpu/drm/ci/igt_runner.sh index 68b042e43b7f..2a0599f12c58 100755 --- a/drivers/gpu/drm/ci/igt_runner.sh +++ b/drivers/gpu/drm/ci/igt_runner.sh @@ -85,5 +85,16 @@ deqp-runner junit \ --limit 50 \ --template "See $ARTIFACTS_BASE_URL/results/{{testcase}}.xml" +# Check if /proc/lockdep_stats exists +if [ -f /proc/lockdep_stats ]; then + # If debug_locks is 0, it indicates lockdep is detected and it turns itself off. + debug_locks=$(grep 'debug_locks:' /proc/lockdep_stats | awk '{print $2}') + if [ "$debug_locks" -eq 0 ] && [ "$ret" -eq 0 ]; then + echo "Warning: LOCKDEP issue detected. Please check dmesg logs for more information." + cat /proc/lockdep_stats + ret=101 + fi +fi + cd $oldpath exit $ret diff --git a/drivers/gpu/drm/ci/test.yml b/drivers/gpu/drm/ci/test.yml index 0eab020a33b9..dbc4ff50d8ff 100644 --- a/drivers/gpu/drm/ci/test.yml +++ b/drivers/gpu/drm/ci/test.yml @@ -1,6 +1,14 @@ +.allow_failure_lockdep: + variables: + FF_USE_NEW_BASH_EVAL_STRATEGY: 'true' + allow_failure: + exit_codes: + - 101 + .lava-test: extends: - .container+build-rules + - .allow_failure_lockdep timeout: "1h30m" rules: - !reference [.scheduled_pipeline-rules, rules] @@ -69,6 +77,7 @@ extends: - .baremetal-test-arm64 - .use-debian/baremetal_arm64_test + - .allow_failure_lockdep timeout: "1h30m" rules: - !reference [.scheduled_pipeline-rules, rules] @@ -91,6 +100,8 @@ .software-driver: stage: software-driver + extends: + - .allow_failure_lockdep timeout: "1h30m" rules: - !reference [.scheduled_pipeline-rules, rules] @@ -153,7 +164,7 @@ msm:apq8016: BM_KERNEL_EXTRA_ARGS: clk_ignore_unused RUNNER_TAG: google-freedreno-db410c script: - - ./install/bare-metal/fastboot.sh + - ./install/bare-metal/fastboot.sh || exit $? msm:apq8096: extends: @@ -167,7 +178,7 @@ msm:apq8096: GPU_VERSION: apq8096 RUNNER_TAG: google-freedreno-db820c script: - - ./install/bare-metal/fastboot.sh + - ./install/bare-metal/fastboot.sh || exit $? msm:sdm845: extends: @@ -181,7 +192,7 @@ msm:sdm845: GPU_VERSION: sdm845 RUNNER_TAG: google-freedreno-cheza script: - - ./install/bare-metal/cros-servo.sh + - ./install/bare-metal/cros-servo.sh || exit $? msm:sm8350-hdk: extends: From 2b7970e9a632b1e45ccf52b620079ab9ea5cad1a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 5 Mar 2025 17:30:40 +0100 Subject: [PATCH 0069/1627] drm/ast: Replace AST_VIDMEM_SIZE_ with Linux SZ_ constants Ast's AST_VIDMEM_SIZE_ constants enumerate supported video-memory sizes from 8 MiB to 128 MiB. Replace them with Linux' SZ_ constants of the same value. When expanded, the literal values remain the same. The size constant for 128 MiB is unused and the default size is not necessary. Remove both of them. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250305163207.267650-2-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_drv.h | 8 -------- drivers/gpu/drm/ast/ast_mm.c | 9 ++++----- drivers/gpu/drm/ast/ast_post.c | 24 ++++++++++++------------ 3 files changed, 16 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index d2c2605d2728..2c7861835cfb 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -340,14 +340,6 @@ static inline void ast_set_index_reg_mask(struct ast_device *ast, u32 base, u8 i __ast_write8_i_masked(ast->ioregs, base, index, preserve_mask, val); } -#define AST_VIDMEM_SIZE_8M 0x00800000 -#define AST_VIDMEM_SIZE_16M 0x01000000 -#define AST_VIDMEM_SIZE_32M 0x02000000 -#define AST_VIDMEM_SIZE_64M 0x04000000 -#define AST_VIDMEM_SIZE_128M 0x08000000 - -#define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M - struct ast_vbios_stdtable { u8 misc; u8 seq[4]; diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index 6dfe6d9777d4..20d833632a01 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -38,20 +38,19 @@ static u32 ast_get_vram_size(struct ast_device *ast) u8 jreg; u32 vram_size; - vram_size = AST_VIDMEM_DEFAULT_SIZE; jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff); switch (jreg & 3) { case 0: - vram_size = AST_VIDMEM_SIZE_8M; + vram_size = SZ_8M; break; case 1: - vram_size = AST_VIDMEM_SIZE_16M; + vram_size = SZ_16M; break; case 2: - vram_size = AST_VIDMEM_SIZE_32M; + vram_size = SZ_32M; break; case 3: - vram_size = AST_VIDMEM_SIZE_64M; + vram_size = SZ_64M; break; } diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 91e85e457bdf..37568cf3822c 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -1075,16 +1075,16 @@ static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *par switch (param->vram_size) { default: - case AST_VIDMEM_SIZE_8M: + case SZ_8M: param->dram_config |= 0x00; break; - case AST_VIDMEM_SIZE_16M: + case SZ_16M: param->dram_config |= 0x04; break; - case AST_VIDMEM_SIZE_32M: + case SZ_32M: param->dram_config |= 0x08; break; - case AST_VIDMEM_SIZE_64M: + case SZ_64M: param->dram_config |= 0x0c; break; } @@ -1446,16 +1446,16 @@ static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *par switch (param->vram_size) { default: - case AST_VIDMEM_SIZE_8M: + case SZ_8M: param->dram_config |= 0x00; break; - case AST_VIDMEM_SIZE_16M: + case SZ_16M: param->dram_config |= 0x04; break; - case AST_VIDMEM_SIZE_32M: + case SZ_32M: param->dram_config |= 0x08; break; - case AST_VIDMEM_SIZE_64M: + case SZ_64M: param->dram_config |= 0x0c; break; } @@ -1635,19 +1635,19 @@ static void ast_post_chip_2300(struct ast_device *ast) switch (temp & 0x0c) { default: case 0x00: - param.vram_size = AST_VIDMEM_SIZE_8M; + param.vram_size = SZ_8M; break; case 0x04: - param.vram_size = AST_VIDMEM_SIZE_16M; + param.vram_size = SZ_16M; break; case 0x08: - param.vram_size = AST_VIDMEM_SIZE_32M; + param.vram_size = SZ_32M; break; case 0x0c: - param.vram_size = AST_VIDMEM_SIZE_64M; + param.vram_size = SZ_64M; break; } From 9f711d1877e052171ae21da4cb831e7184b9872e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 5 Mar 2025 17:30:41 +0100 Subject: [PATCH 0070/1627] drm/ast: Add VGACRAA register constants Add register constants for VGACRAA and use them when detecting the size of the VGA memory. Aligns the code with the programming manual. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250305163207.267650-3-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_mm.c | 5 +++-- drivers/gpu/drm/ast/ast_reg.h | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index 20d833632a01..8d8aac8c0814 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -37,9 +37,10 @@ static u32 ast_get_vram_size(struct ast_device *ast) { u8 jreg; u32 vram_size; + u8 vgacraa; - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xaa, 0xff); - switch (jreg & 3) { + vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa); + switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) { case 0: vram_size = SZ_8M; break; diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index bb2cc1d8b84e..039b93bed19e 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -33,6 +33,7 @@ #define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) #define AST_IO_VGACRA1_MMIO_ENABLED BIT(2) #define AST_IO_VGACRA3_DVO_ENABLED BIT(7) +#define AST_IO_VGACRAA_VGAMEM_SIZE_MASK GENMASK(1, 0) #define AST_IO_VGACRB6_HSYNC_OFF BIT(0) #define AST_IO_VGACRB6_VSYNC_OFF BIT(1) #define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */ From a958c7f13b0b4ffd41384293d307b9d6218fcc87 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 5 Mar 2025 17:30:42 +0100 Subject: [PATCH 0071/1627] drm/ast: Add VGACR99 register constants Add register constants for VGACR99 and use them when detecting the size of the VGA memory. Aligns the code with the programming manual. Also replace literal size values with Linux' SZ_ size constants. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250305163207.267650-4-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_mm.c | 13 ++++++------- drivers/gpu/drm/ast/ast_reg.h | 1 + 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index 8d8aac8c0814..3d03ef556d0a 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -35,9 +35,8 @@ static u32 ast_get_vram_size(struct ast_device *ast) { - u8 jreg; u32 vram_size; - u8 vgacraa; + u8 vgacr99, vgacraa; vgacraa = ast_get_index_reg(ast, AST_IO_VGACRI, 0xaa); switch (vgacraa & AST_IO_VGACRAA_VGAMEM_SIZE_MASK) { @@ -55,16 +54,16 @@ static u32 ast_get_vram_size(struct ast_device *ast) break; } - jreg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0x99, 0xff); - switch (jreg & 0x03) { + vgacr99 = ast_get_index_reg(ast, AST_IO_VGACRI, 0x99); + switch (vgacr99 & AST_IO_VGACR99_VGAMEM_RSRV_MASK) { case 1: - vram_size -= 0x100000; + vram_size -= SZ_1M; break; case 2: - vram_size -= 0x200000; + vram_size -= SZ_2M; break; case 3: - vram_size -= 0x400000; + vram_size -= SZ_4M; break; } diff --git a/drivers/gpu/drm/ast/ast_reg.h b/drivers/gpu/drm/ast/ast_reg.h index 039b93bed19e..e15adaf3a80e 100644 --- a/drivers/gpu/drm/ast/ast_reg.h +++ b/drivers/gpu/drm/ast/ast_reg.h @@ -30,6 +30,7 @@ #define AST_IO_VGACRI (0x54) #define AST_IO_VGACR80_PASSWORD (0xa8) +#define AST_IO_VGACR99_VGAMEM_RSRV_MASK GENMASK(1, 0) #define AST_IO_VGACRA1_VGAIO_DISABLED BIT(1) #define AST_IO_VGACRA1_MMIO_ENABLED BIT(2) #define AST_IO_VGACRA3_DVO_ENABLED BIT(7) From 4ee3229bbe6cb0e80f1dedcddda8072bb9cc884f Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 5 Mar 2025 17:30:43 +0100 Subject: [PATCH 0072/1627] drm/ast: cursor: Add helpers for computing location in video memory The ast drivers stores the cursor image at the end of the video memory. Add helpers to calculate the offset and size. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250305163207.267650-5-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_cursor.c | 21 +++++++++++++++++++-- drivers/gpu/drm/ast/ast_drv.h | 1 + 2 files changed, 20 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c index 139ab00dee8f..05e297f30b4e 100644 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ b/drivers/gpu/drm/ast/ast_cursor.c @@ -45,6 +45,21 @@ #define AST_HWC_SIGNATURE_HOTSPOTX 0x14 #define AST_HWC_SIGNATURE_HOTSPOTY 0x18 +static unsigned long ast_cursor_vram_size(void) +{ + return AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE; +} + +long ast_cursor_vram_offset(struct ast_device *ast) +{ + unsigned long size = ast_cursor_vram_size(); + + if (size > ast->vram_size) + return -EINVAL; + + return PAGE_ALIGN_DOWN(ast->vram_size - size); +} + static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height) { u32 csum = 0; @@ -276,7 +291,7 @@ int ast_cursor_plane_init(struct ast_device *ast) struct drm_plane *cursor_plane = &ast_plane->base; size_t size; void __iomem *vaddr; - u64 offset; + long offset; int ret; /* @@ -290,7 +305,9 @@ int ast_cursor_plane_init(struct ast_device *ast) return -ENOMEM; vaddr = ast->vram + ast->vram_fb_available - size; - offset = ast->vram_fb_available - size; + offset = ast_cursor_vram_offset(ast); + if (offset < 0) + return offset; ret = ast_plane_init(dev, ast_plane, vaddr, offset, size, 0x01, &ast_cursor_plane_funcs, diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 2c7861835cfb..ec9ec77260e9 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -432,6 +432,7 @@ int ast_vga_output_init(struct ast_device *ast); int ast_sil164_output_init(struct ast_device *ast); /* ast_cursor.c */ +long ast_cursor_vram_offset(struct ast_device *ast); int ast_cursor_plane_init(struct ast_device *ast); /* ast dp501 */ From ca7a8e8efc9cba4f131b65197bae5011d0a7a250 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 5 Mar 2025 17:30:44 +0100 Subject: [PATCH 0073/1627] drm/ast: Add helper for computing framebuffer location in video memory The ast driver stores the primary plane's image in the framebuffer memory up to where the cursor is located. Add helpers to calculate the offset and size. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250305163207.267650-6-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_mode.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index c3b950675485..4cac5c7f4547 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -51,6 +51,24 @@ #define AST_LUT_SIZE 256 +static unsigned long ast_fb_vram_offset(void) +{ + return 0; // with shmem, the primary plane is always at offset 0 +} + +static unsigned long ast_fb_vram_size(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + unsigned long offset = ast_fb_vram_offset(); // starts at offset + long cursor_offset = ast_cursor_vram_offset(ast); // ends at cursor offset + + if (cursor_offset < 0) + cursor_offset = ast->vram_size; // no cursor; it's all ours + if (drm_WARN_ON_ONCE(dev, offset > cursor_offset)) + return 0; // cannot legally happen; signal error + return cursor_offset - offset; +} + static inline void ast_load_palette_index(struct ast_device *ast, u8 index, u8 red, u8 green, u8 blue) @@ -609,9 +627,8 @@ static int ast_primary_plane_init(struct ast_device *ast) struct ast_plane *ast_primary_plane = &ast->primary_plane; struct drm_plane *primary_plane = &ast_primary_plane->base; void __iomem *vaddr = ast->vram; - u64 offset = 0; /* with shmem, the primary plane is always at offset 0 */ - unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); - unsigned long size = ast->vram_fb_available - cursor_size; + u64 offset = ast_fb_vram_offset(); + unsigned long size = ast_fb_vram_size(ast); int ret; ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size, @@ -942,7 +959,7 @@ static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev, struct ast_device *ast = to_ast_device(dev); unsigned long fbsize, fbpages, max_fbpages; - max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT; + max_fbpages = ast_fb_vram_size(ast) >> PAGE_SHIFT; fbsize = mode->hdisplay * mode->vdisplay * max_bpp; fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE); From e5f953b8eae7249bf50f86d79f80327621edc2d5 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 5 Mar 2025 17:30:45 +0100 Subject: [PATCH 0074/1627] drm/ast: Remove vram_fb_available from struct ast_device Helpers compute the offset and size of the available framebuffer memory. Remove the obsolete field vram_fb_available from struct ast_device. Also define the cursor-signature size next to its only user. v2: - initialize plane size Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250305163207.267650-7-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_cursor.c | 18 ++++-------------- drivers/gpu/drm/ast/ast_drv.h | 4 ---- drivers/gpu/drm/ast/ast_mm.c | 1 - 3 files changed, 4 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c index 05e297f30b4e..cb0c48d47207 100644 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ b/drivers/gpu/drm/ast/ast_cursor.c @@ -37,6 +37,7 @@ */ /* define for signature structure */ +#define AST_HWC_SIGNATURE_SIZE SZ_32 #define AST_HWC_SIGNATURE_CHECKSUM 0x00 #define AST_HWC_SIGNATURE_SizeX 0x04 #define AST_HWC_SIGNATURE_SizeY 0x08 @@ -289,25 +290,16 @@ int ast_cursor_plane_init(struct ast_device *ast) struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane; struct ast_plane *ast_plane = &ast_cursor_plane->base; struct drm_plane *cursor_plane = &ast_plane->base; - size_t size; + unsigned long size; void __iomem *vaddr; long offset; int ret; - /* - * Allocate backing storage for cursors. The BOs are permanently - * pinned to the top end of the VRAM. - */ - - size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); - - if (ast->vram_fb_available < size) - return -ENOMEM; - - vaddr = ast->vram + ast->vram_fb_available - size; + size = ast_cursor_vram_size(); offset = ast_cursor_vram_offset(ast); if (offset < 0) return offset; + vaddr = ast->vram + offset; ret = ast_plane_init(dev, ast_plane, vaddr, offset, size, 0x01, &ast_cursor_plane_funcs, @@ -320,7 +312,5 @@ int ast_cursor_plane_init(struct ast_device *ast) drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs); drm_plane_enable_fb_damage_clips(cursor_plane); - ast->vram_fb_available -= size; - return 0; } diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index ec9ec77260e9..d9da2328d46b 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -112,12 +112,9 @@ enum ast_config_mode { #define AST_MAX_HWC_WIDTH 64 #define AST_MAX_HWC_HEIGHT 64 - #define AST_HWC_PITCH (AST_MAX_HWC_WIDTH * SZ_2) #define AST_HWC_SIZE (AST_MAX_HWC_HEIGHT * AST_HWC_PITCH) -#define AST_HWC_SIGNATURE_SIZE 32 - /* * Planes */ @@ -183,7 +180,6 @@ struct ast_device { void __iomem *vram; unsigned long vram_base; unsigned long vram_size; - unsigned long vram_fb_available; struct mutex modeset_lock; /* Protects access to modeset I/O registers in ioregs */ diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index 3d03ef556d0a..0bc140319464 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -92,7 +92,6 @@ int ast_mm_init(struct ast_device *ast) ast->vram_base = base; ast->vram_size = vram_size; - ast->vram_fb_available = vram_size; return 0; } From c6a84bc9690afc40b103c5df3cdfb357439cb563 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 5 Mar 2025 17:30:46 +0100 Subject: [PATCH 0075/1627] drm/ast: cursor: Drop page alignment The cursor scanout address requires alignment to a multiple of 8, but does not require page alignment. Change the offset calculation accordingly. Frees up a few more bytes for the primary framebuffer. Signed-off-by: Thomas Zimmermann Reviewed-by: Jocelyn Falempe Link: https://patchwork.freedesktop.org/patch/msgid/20250305163207.267650-8-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_cursor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c index cb0c48d47207..5ee724bfd682 100644 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ b/drivers/gpu/drm/ast/ast_cursor.c @@ -58,7 +58,7 @@ long ast_cursor_vram_offset(struct ast_device *ast) if (size > ast->vram_size) return -EINVAL; - return PAGE_ALIGN_DOWN(ast->vram_size - size); + return ALIGN_DOWN(ast->vram_size - size, SZ_8); } static u32 ast_cursor_calculate_checksum(const void *src, unsigned int width, unsigned int height) From 143ec8d3f93965110689897f0d25165dc9664009 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 7 Mar 2025 09:03:59 +0100 Subject: [PATCH 0076/1627] drm/prime: Support dedicated DMA device for dma-buf imports Importing dma-bufs via PRIME requires a DMA-capable device. Devices on peripheral busses, such as USB, often cannot perform DMA by themselves. Without DMA-capable device PRIME import fails. DRM drivers for USB devices already use a separate DMA device for dma-buf imports. Make the mechanism generally available. Besides the case of USB, there are embedded DRM devices without DMA capability. DMA is performed by a separate controller. DRM drivers should set this accordingly. Add the field dma_dev to struct drm_device to refer to the device's DMA device. For USB this should be the USB controller. Use dma_dev in the PRIME import helpers, if set. v2: - acquire internal reference on dma_dev (Jani) - add DMA-controller usecase to docs (Maxime) Signed-off-by: Thomas Zimmermann Reviewed-by: Jani Nikula Reviewed-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20250307080836.42848-2-tzimmermann@suse.de --- drivers/gpu/drm/drm_drv.c | 21 +++++++++++++++++++ drivers/gpu/drm/drm_prime.c | 2 +- include/drm/drm_device.h | 41 +++++++++++++++++++++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 17fc5dc708f4..c9487bc88624 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -500,6 +500,25 @@ void drm_dev_unplug(struct drm_device *dev) } EXPORT_SYMBOL(drm_dev_unplug); +/** + * drm_dev_set_dma_dev - set the DMA device for a DRM device + * @dev: DRM device + * @dma_dev: DMA device or NULL + * + * Sets the DMA device of the given DRM device. Only required if + * the DMA device is different from the DRM device's parent. After + * calling this function, the DRM device holds a reference on + * @dma_dev. Pass NULL to clear the DMA device. + */ +void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev) +{ + dma_dev = get_device(dma_dev); + + put_device(dev->dma_dev); + dev->dma_dev = dma_dev; +} +EXPORT_SYMBOL(drm_dev_set_dma_dev); + /* * Available recovery methods for wedged device. To be sent along with device * wedged uevent. @@ -654,6 +673,8 @@ static void drm_dev_init_release(struct drm_device *dev, void *res) { drm_fs_inode_free(dev->anon_inode); + put_device(dev->dma_dev); + dev->dma_dev = NULL; put_device(dev->dev); /* Prevent use-after-free in drm_managed_release when debugging is * enabled. Slightly awkward, but can't really be helped. */ diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index a3d64f93a225..4b8c6075e46a 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -997,7 +997,7 @@ EXPORT_SYMBOL(drm_gem_prime_import_dev); struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { - return drm_gem_prime_import_dev(dev, dma_buf, dev->dev); + return drm_gem_prime_import_dev(dev, dma_buf, drm_dev_dma_dev(dev)); } EXPORT_SYMBOL(drm_gem_prime_import); diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h index 6ea54a578cda..e2f894f1b90a 100644 --- a/include/drm/drm_device.h +++ b/include/drm/drm_device.h @@ -64,6 +64,28 @@ struct drm_device { /** @dev: Device structure of bus-device */ struct device *dev; + /** + * @dma_dev: + * + * Device for DMA operations. Only required if the device @dev + * cannot perform DMA by itself. Should be NULL otherwise. Call + * drm_dev_dma_dev() to get the DMA device instead of using this + * field directly. Call drm_dev_set_dma_dev() to set this field. + * + * DRM devices are sometimes bound to virtual devices that cannot + * perform DMA by themselves. Drivers should set this field to the + * respective DMA controller. + * + * Devices on USB and other peripheral busses also cannot perform + * DMA by themselves. The @dma_dev field should point the bus + * controller that does DMA on behalve of such a device. Required + * for importing buffers via dma-buf. + * + * If set, the DRM core automatically releases the reference on the + * device. + */ + struct device *dma_dev; + /** * @managed: * @@ -327,4 +349,23 @@ struct drm_device { struct dentry *debugfs_root; }; +void drm_dev_set_dma_dev(struct drm_device *dev, struct device *dma_dev); + +/** + * drm_dev_dma_dev - returns the DMA device for a DRM device + * @dev: DRM device + * + * Returns the DMA device of the given DRM device. By default, this + * the DRM device's parent. See drm_dev_set_dma_dev(). + * + * Returns: + * A DMA-capable device for the DRM device. + */ +static inline struct device *drm_dev_dma_dev(struct drm_device *dev) +{ + if (dev->dma_dev) + return dev->dma_dev; + return dev->dev; +} + #endif From 7b7af1740108424158c6e1629c66fc8f603bf647 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 7 Mar 2025 09:04:00 +0100 Subject: [PATCH 0077/1627] drm/appletbdrm: Set struct drm_device.dma_dev Set the dma_dev field provided by the DRM device. Required for PRIME dma-buf import. Remove the driver's implementation. Signed-off-by: Thomas Zimmermann Tested-by: Aditya Garg Reviewed-by: Aditya Garg Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20250307080836.42848-3-tzimmermann@suse.de --- drivers/gpu/drm/tiny/appletbdrm.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/tiny/appletbdrm.c b/drivers/gpu/drm/tiny/appletbdrm.c index 394c8f9bd41a..703b9a41a086 100644 --- a/drivers/gpu/drm/tiny/appletbdrm.c +++ b/drivers/gpu/drm/tiny/appletbdrm.c @@ -45,7 +45,7 @@ #define APPLETBDRM_BULK_MSG_TIMEOUT 1000 #define drm_to_adev(_drm) container_of(_drm, struct appletbdrm_device, drm) -#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface(adev->dmadev)) +#define adev_to_udev(adev) interface_to_usbdev(to_usb_interface((adev)->drm.dev)) struct appletbdrm_msg_request_header { __le16 unk_00; @@ -123,8 +123,6 @@ struct appletbdrm_fb_request_response { } __packed; struct appletbdrm_device { - struct device *dmadev; - unsigned int in_ep; unsigned int out_ep; @@ -612,22 +610,10 @@ static const struct drm_encoder_funcs appletbdrm_encoder_funcs = { .destroy = drm_encoder_cleanup, }; -static struct drm_gem_object *appletbdrm_driver_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct appletbdrm_device *adev = drm_to_adev(dev); - - if (!adev->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(dev, dma_buf, adev->dmadev); -} - DEFINE_DRM_GEM_FOPS(appletbdrm_drm_fops); static const struct drm_driver appletbdrm_drm_driver = { DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = appletbdrm_driver_gem_prime_import, .name = "appletbdrm", .desc = "Apple Touch Bar DRM Driver", .major = 1, @@ -747,6 +733,7 @@ static int appletbdrm_probe(struct usb_interface *intf, struct device *dev = &intf->dev; struct appletbdrm_device *adev; struct drm_device *drm = NULL; + struct device *dma_dev; int ret; ret = usb_find_common_endpoints(intf->cur_altsetting, &bulk_in, &bulk_out, NULL, NULL); @@ -761,12 +748,19 @@ static int appletbdrm_probe(struct usb_interface *intf, adev->in_ep = bulk_in->bEndpointAddress; adev->out_ep = bulk_out->bEndpointAddress; - adev->dmadev = dev; drm = &adev->drm; usb_set_intfdata(intf, adev); + dma_dev = usb_intf_get_dma_device(intf); + if (dma_dev) { + drm_dev_set_dma_dev(drm, dma_dev); + put_device(dma_dev); + } else { + drm_warn(drm, "buffer sharing not supported"); /* not an error */ + } + ret = appletbdrm_get_information(adev); if (ret) { drm_err(drm, "Failed to get display information\n"); @@ -805,7 +799,6 @@ static void appletbdrm_disconnect(struct usb_interface *intf) struct appletbdrm_device *adev = usb_get_intfdata(intf); struct drm_device *drm = &adev->drm; - put_device(adev->dmadev); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); } From e3d4dfe91bdcb9c2f4ac07ea3885a94b863ba28e Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 7 Mar 2025 09:04:01 +0100 Subject: [PATCH 0078/1627] drm/gm12u320: Set struct drm_device.dma_dev Set the dma_dev field provided by the DRM device. Required for PRIME dma-buf import. Remove the driver's implementation. Signed-off-by: Thomas Zimmermann Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20250307080836.42848-4-tzimmermann@suse.de --- drivers/gpu/drm/tiny/gm12u320.c | 46 ++++++++++----------------------- 1 file changed, 13 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 41e9bfb2e2ff..fb0004166f4a 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -86,7 +86,6 @@ MODULE_PARM_DESC(eco_mode, "Turn on Eco mode (less bright, more silent)"); struct gm12u320_device { struct drm_device dev; - struct device *dmadev; struct drm_simple_display_pipe pipe; struct drm_connector conn; unsigned char *cmd_buf; @@ -602,22 +601,6 @@ static const uint64_t gm12u320_pipe_modifiers[] = { DRM_FORMAT_MOD_INVALID }; -/* - * FIXME: Dma-buf sharing requires DMA support by the importing device. - * This function is a workaround to make USB devices work as well. - * See todo.rst for how to fix the issue in the dma-buf framework. - */ -static struct drm_gem_object *gm12u320_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct gm12u320_device *gm12u320 = to_gm12u320(dev); - - if (!gm12u320->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(dev, dma_buf, gm12u320->dmadev); -} - DEFINE_DRM_GEM_FOPS(gm12u320_fops); static const struct drm_driver gm12u320_drm_driver = { @@ -630,7 +613,6 @@ static const struct drm_driver gm12u320_drm_driver = { .fops = &gm12u320_fops, DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = gm12u320_gem_prime_import, DRM_FBDEV_SHMEM_DRIVER_OPS, }; @@ -645,6 +627,7 @@ static int gm12u320_usb_probe(struct usb_interface *interface, { struct gm12u320_device *gm12u320; struct drm_device *dev; + struct device *dma_dev; int ret; /* @@ -660,16 +643,20 @@ static int gm12u320_usb_probe(struct usb_interface *interface, return PTR_ERR(gm12u320); dev = &gm12u320->dev; - gm12u320->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); - if (!gm12u320->dmadev) + dma_dev = usb_intf_get_dma_device(interface); + if (dma_dev) { + drm_dev_set_dma_dev(dev, dma_dev); + put_device(dma_dev); + } else { drm_warn(dev, "buffer sharing not supported"); /* not an error */ + } INIT_DELAYED_WORK(&gm12u320->fb_update.work, gm12u320_fb_update_work); mutex_init(&gm12u320->fb_update.lock); ret = drmm_mode_config_init(dev); if (ret) - goto err_put_device; + return ret; dev->mode_config.min_width = GM12U320_USER_WIDTH; dev->mode_config.max_width = GM12U320_USER_WIDTH; @@ -679,15 +666,15 @@ static int gm12u320_usb_probe(struct usb_interface *interface, ret = gm12u320_usb_alloc(gm12u320); if (ret) - goto err_put_device; + return ret; ret = gm12u320_set_ecomode(gm12u320); if (ret) - goto err_put_device; + return ret; ret = gm12u320_conn_init(gm12u320); if (ret) - goto err_put_device; + return ret; ret = drm_simple_display_pipe_init(&gm12u320->dev, &gm12u320->pipe, @@ -697,31 +684,24 @@ static int gm12u320_usb_probe(struct usb_interface *interface, gm12u320_pipe_modifiers, &gm12u320->conn); if (ret) - goto err_put_device; + return ret; drm_mode_config_reset(dev); usb_set_intfdata(interface, dev); ret = drm_dev_register(dev, 0); if (ret) - goto err_put_device; + return ret; drm_client_setup(dev, NULL); return 0; - -err_put_device: - put_device(gm12u320->dmadev); - return ret; } static void gm12u320_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); - struct gm12u320_device *gm12u320 = to_gm12u320(dev); - put_device(gm12u320->dmadev); - gm12u320->dmadev = NULL; drm_dev_unplug(dev); drm_atomic_helper_shutdown(dev); } From f5bd9d528ebac41a31919aa41f1a99eccb8917c8 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 7 Mar 2025 09:04:02 +0100 Subject: [PATCH 0079/1627] drm/gud: Set struct drm_device.dma_dev Set the dma_dev field provided by the DRM device. Required for PRIME dma-buf import. Remove the driver's implementation. Signed-off-by: Thomas Zimmermann Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20250307080836.42848-5-tzimmermann@suse.de --- drivers/gpu/drm/gud/gud_drv.c | 33 ++++++++---------------------- drivers/gpu/drm/gud/gud_internal.h | 1 - 2 files changed, 9 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index cb405771d6e2..5385a2126e45 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -309,21 +309,6 @@ out: return ret; } -/* - * FIXME: Dma-buf sharing requires DMA support by the importing device. - * This function is a workaround to make USB devices work as well. - * See todo.rst for how to fix the issue in the dma-buf framework. - */ -static struct drm_gem_object *gud_gem_prime_import(struct drm_device *drm, struct dma_buf *dma_buf) -{ - struct gud_device *gdrm = to_gud_device(drm); - - if (!gdrm->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(drm, dma_buf, gdrm->dmadev); -} - static int gud_stats_debugfs(struct seq_file *m, void *data) { struct drm_debugfs_entry *entry = m->private; @@ -376,7 +361,6 @@ static const struct drm_driver gud_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .fops = &gud_fops, DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = gud_gem_prime_import, DRM_FBDEV_SHMEM_DRIVER_OPS, .name = "gud", @@ -434,6 +418,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) size_t max_buffer_size = 0; struct gud_device *gdrm; struct drm_device *drm; + struct device *dma_dev; u8 *formats_dev; u32 *formats; int ret, i; @@ -609,17 +594,19 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) usb_set_intfdata(intf, gdrm); - gdrm->dmadev = usb_intf_get_dma_device(intf); - if (!gdrm->dmadev) - dev_warn(dev, "buffer sharing not supported"); + dma_dev = usb_intf_get_dma_device(intf); + if (dma_dev) { + drm_dev_set_dma_dev(drm, dma_dev); + put_device(dma_dev); + } else { + dev_warn(dev, "buffer sharing not supported"); /* not an error */ + } drm_debugfs_add_file(drm, "stats", gud_stats_debugfs, NULL); ret = drm_dev_register(drm, 0); - if (ret) { - put_device(gdrm->dmadev); + if (ret) return ret; - } drm_kms_helper_poll_init(drm); @@ -638,8 +625,6 @@ static void gud_disconnect(struct usb_interface *interface) drm_kms_helper_poll_fini(drm); drm_dev_unplug(drm); drm_atomic_helper_shutdown(drm); - put_device(gdrm->dmadev); - gdrm->dmadev = NULL; } static int gud_suspend(struct usb_interface *intf, pm_message_t message) diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h index 0d148a6f27aa..d6fb25388722 100644 --- a/drivers/gpu/drm/gud/gud_internal.h +++ b/drivers/gpu/drm/gud/gud_internal.h @@ -16,7 +16,6 @@ struct gud_device { struct drm_device drm; struct drm_simple_display_pipe pipe; - struct device *dmadev; struct work_struct work; u32 flags; const struct drm_format_info *xrgb8888_emulation_format; From edd9231f3af4e580bc6679309bde65cbe10783ca Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 7 Mar 2025 09:04:03 +0100 Subject: [PATCH 0080/1627] drm/udl: Set struct drm_device.dma_dev Set the dma_dev field provided by the DRM device. Required for PRIME dma-buf import. Remove the driver's implementation. Signed-off-by: Thomas Zimmermann Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20250307080836.42848-6-tzimmermann@suse.de --- drivers/gpu/drm/udl/udl_drv.c | 17 ----------------- drivers/gpu/drm/udl/udl_drv.h | 1 - drivers/gpu/drm/udl/udl_main.c | 14 +++++++------- 3 files changed, 7 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 05b3a152cc33..3b56ca2f6eb8 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -49,22 +49,6 @@ static int udl_usb_reset_resume(struct usb_interface *interface) return drm_mode_config_helper_resume(dev); } -/* - * FIXME: Dma-buf sharing requires DMA support by the importing device. - * This function is a workaround to make USB devices work as well. - * See todo.rst for how to fix the issue in the dma-buf framework. - */ -static struct drm_gem_object *udl_driver_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) -{ - struct udl_device *udl = to_udl(dev); - - if (!udl->dmadev) - return ERR_PTR(-ENODEV); - - return drm_gem_prime_import_dev(dev, dma_buf, udl->dmadev); -} - DEFINE_DRM_GEM_FOPS(udl_driver_fops); static const struct drm_driver driver = { @@ -73,7 +57,6 @@ static const struct drm_driver driver = { /* GEM hooks */ .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, - .gem_prime_import = udl_driver_gem_prime_import, DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h index be00dc1d87a1..e67e7e2e6f1f 100644 --- a/drivers/gpu/drm/udl/udl_drv.h +++ b/drivers/gpu/drm/udl/udl_drv.h @@ -51,7 +51,6 @@ struct urb_list { struct udl_device { struct drm_device drm; struct device *dev; - struct device *dmadev; struct drm_plane primary_plane; struct drm_crtc crtc; diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index 3ebe2ce55dfd..cbb0169cc030 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -308,12 +308,17 @@ int udl_init(struct udl_device *udl) { struct drm_device *dev = &udl->drm; int ret = -ENOMEM; + struct device *dma_dev; DRM_DEBUG("\n"); - udl->dmadev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); - if (!udl->dmadev) + dma_dev = usb_intf_get_dma_device(to_usb_interface(dev->dev)); + if (dma_dev) { + drm_dev_set_dma_dev(dev, dma_dev); + put_device(dma_dev); + } else { drm_warn(dev, "buffer sharing not supported"); /* not an error */ + } mutex_init(&udl->gem_lock); @@ -343,18 +348,13 @@ int udl_init(struct udl_device *udl) err: if (udl->urbs.count) udl_free_urb_list(dev); - put_device(udl->dmadev); DRM_ERROR("%d\n", ret); return ret; } int udl_drop_usb(struct drm_device *dev) { - struct udl_device *udl = to_udl(dev); - udl_free_urb_list(dev); - put_device(udl->dmadev); - udl->dmadev = NULL; return 0; } From c8e7b185d45be0915c08b4c91743071e0d2d298a Mon Sep 17 00:00:00 2001 From: Dario Binacchi Date: Wed, 5 Mar 2025 15:09:16 +0100 Subject: [PATCH 0081/1627] drm/mxsfb: Remove generic DRM drivers in probe function Use aperture helpers to remove all generic graphics drivers before loading mxsfb. Makes mxsfb compatible with simpledrm. Co-developed-by: Michael Trimarchi Signed-off-by: Michael Trimarchi Signed-off-by: Dario Binacchi Reviewed-by: Thomas Zimmermann Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20250305140929.174398-1-dario.binacchi@amarulasolutions.com --- drivers/gpu/drm/mxsfb/mxsfb_drv.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 377d4c4c9979..c183b1112bc4 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -8,6 +8,7 @@ * Copyright (C) 2008 Embedded Alley Solutions, Inc All Rights Reserved. */ +#include #include #include #include @@ -359,6 +360,15 @@ static int mxsfb_probe(struct platform_device *pdev) if (ret) goto err_free; + /* + * Remove early framebuffers (ie. simplefb). The framebuffer can be + * located anywhere in RAM + */ + ret = aperture_remove_all_conflicting_devices(mxsfb_driver.name); + if (ret) + return dev_err_probe(&pdev->dev, ret, + "can't kick out existing framebuffers\n"); + ret = drm_dev_register(drm, 0); if (ret) goto err_unload; From 36cb24049b911ed83d34441cd2e9adebfc999da8 Mon Sep 17 00:00:00 2001 From: Antonin Godard Date: Tue, 11 Mar 2025 17:40:05 +0100 Subject: [PATCH 0082/1627] dt-bindings: display: simple: Add POWERTIP PH128800T004-ZZA01 panel Add POWERTIP PH128800T004-ZZA01 10.1" LCD-TFT LVDS panel compatible string. Signed-off-by: Antonin Godard Acked-by: "Rob Herring (Arm)" Link: https://patchwork.freedesktop.org/patch/msgid/20250311-add-powertip-ph128800t004-v1-1-7f95e6984cea@bootlin.com Signed-off-by: Louis Chauvet --- .../devicetree/bindings/display/panel/panel-simple.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index b0de4fd6f3d4..b5c8eb4fa2d1 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -246,6 +246,8 @@ properties: - osddisplays,osd070t1718-19ts # One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel - osddisplays,osd101t2045-53ts + # POWERTIP PH128800T004-ZZA01 10.1" WXGA TFT LCD panel + - powertip,ph128800t004-zza01 # POWERTIP PH128800T006-ZHC01 10.1" WXGA TFT LCD panel - powertip,ph128800t006-zhc01 # POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel From 6374a1005f20c1c2f7bbcc1bc735c2be4910a685 Mon Sep 17 00:00:00 2001 From: Antonin Godard Date: Tue, 11 Mar 2025 17:40:06 +0100 Subject: [PATCH 0083/1627] drm/panel: simple: Add POWERTIP PH128800T004-ZZA01 panel entry Add support for the POWERTIP PH128800T004-ZZA01 10.1" (1280x800) LCD-TFT panel. Its panel description is very much like the POWERTIP PH128800T006-ZHC01 configured below this one, only its timings are different. Signed-off-by: Antonin Godard Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250311-add-powertip-ph128800t004-v1-2-7f95e6984cea@bootlin.com Signed-off-by: Louis Chauvet --- drivers/gpu/drm/panel/panel-simple.c | 29 ++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 232b03c1a259..6ba600f97aa4 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -3796,6 +3796,32 @@ static const struct panel_desc pda_91_00156_a0 = { .bus_format = MEDIA_BUS_FMT_RGB888_1X24, }; +static const struct drm_display_mode powertip_ph128800t004_zza01_mode = { + .clock = 71150, + .hdisplay = 1280, + .hsync_start = 1280 + 48, + .hsync_end = 1280 + 48 + 32, + .htotal = 1280 + 48 + 32 + 80, + .vdisplay = 800, + .vsync_start = 800 + 9, + .vsync_end = 800 + 9 + 8, + .vtotal = 800 + 9 + 8 + 6, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC, +}; + +static const struct panel_desc powertip_ph128800t004_zza01 = { + .modes = &powertip_ph128800t004_zza01_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 216, + .height = 135, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .bus_flags = DRM_BUS_FLAG_DE_HIGH, + .connector_type = DRM_MODE_CONNECTOR_LVDS, +}; + static const struct drm_display_mode powertip_ph128800t006_zhc01_mode = { .clock = 66500, .hdisplay = 1280, @@ -5153,6 +5179,9 @@ static const struct of_device_id platform_of_match[] = { }, { .compatible = "pda,91-00156-a0", .data = &pda_91_00156_a0, + }, { + .compatible = "powertip,ph128800t004-zza01", + .data = &powertip_ph128800t004_zza01, }, { .compatible = "powertip,ph128800t006-zhc01", .data = &powertip_ph128800t006_zhc01, From 9497c5a0f7c26ff81f11df738a94c6b80f890c0a Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 26 Feb 2025 22:23:52 +0100 Subject: [PATCH 0084/1627] drm/bridge: move bridges_show logic from drm_debugfs.c In preparation to expose more info about bridges in debugfs, which will require more insight into drm_bridge data structures, move the bridges_show code to drm_bridge.c. Suggested-by: Jani Nikula Suggested-by: Dmitry Baryshkov Signed-off-by: Luca Ceresoli Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20250226-drm-debugfs-show-all-bridges-v8-1-bb511cc49d83@bootlin.com Signed-off-by: Louis Chauvet --- drivers/gpu/drm/drm_bridge.c | 42 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/drm_debugfs.c | 38 +------------------------------ include/drm/drm_bridge.h | 2 ++ 3 files changed, 45 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index fa2794217a90..3e23e1f0394d 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include #include #include #include @@ -1300,6 +1301,47 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np) EXPORT_SYMBOL(of_drm_find_bridge); #endif +static int encoder_bridges_show(struct seq_file *m, void *data) +{ + struct drm_encoder *encoder = m->private; + struct drm_printer p = drm_seq_file_printer(m); + struct drm_bridge *bridge; + unsigned int idx = 0; + + drm_for_each_bridge_in_chain(encoder, bridge) { + drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs); + drm_printf(&p, "\ttype: [%d] %s\n", + bridge->type, + drm_get_connector_type_name(bridge->type)); + + if (bridge->of_node) + drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); + + drm_printf(&p, "\tops: [0x%x]", bridge->ops); + if (bridge->ops & DRM_BRIDGE_OP_DETECT) + drm_puts(&p, " detect"); + if (bridge->ops & DRM_BRIDGE_OP_EDID) + drm_puts(&p, " edid"); + if (bridge->ops & DRM_BRIDGE_OP_HPD) + drm_puts(&p, " hpd"); + if (bridge->ops & DRM_BRIDGE_OP_MODES) + drm_puts(&p, " modes"); + if (bridge->ops & DRM_BRIDGE_OP_HDMI) + drm_puts(&p, " hdmi"); + drm_puts(&p, "\n"); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(encoder_bridges); + +void drm_bridge_debugfs_encoder_params(struct dentry *root, + struct drm_encoder *encoder) +{ + /* bridges list */ + debugfs_create_file("bridges", 0444, root, encoder, &encoder_bridges_fops); +} + MODULE_AUTHOR("Ajay Kumar "); MODULE_DESCRIPTION("DRM bridge infrastructure"); MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index 6b2178864c7e..3dfd8b34dceb 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -740,40 +740,6 @@ void drm_debugfs_crtc_remove(struct drm_crtc *crtc) crtc->debugfs_entry = NULL; } -static int bridges_show(struct seq_file *m, void *data) -{ - struct drm_encoder *encoder = m->private; - struct drm_printer p = drm_seq_file_printer(m); - struct drm_bridge *bridge; - unsigned int idx = 0; - - drm_for_each_bridge_in_chain(encoder, bridge) { - drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs); - drm_printf(&p, "\ttype: [%d] %s\n", - bridge->type, - drm_get_connector_type_name(bridge->type)); - - if (bridge->of_node) - drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); - - drm_printf(&p, "\tops: [0x%x]", bridge->ops); - if (bridge->ops & DRM_BRIDGE_OP_DETECT) - drm_puts(&p, " detect"); - if (bridge->ops & DRM_BRIDGE_OP_EDID) - drm_puts(&p, " edid"); - if (bridge->ops & DRM_BRIDGE_OP_HPD) - drm_puts(&p, " hpd"); - if (bridge->ops & DRM_BRIDGE_OP_MODES) - drm_puts(&p, " modes"); - if (bridge->ops & DRM_BRIDGE_OP_HDMI) - drm_puts(&p, " hdmi"); - drm_puts(&p, "\n"); - } - - return 0; -} -DEFINE_SHOW_ATTRIBUTE(bridges); - void drm_debugfs_encoder_add(struct drm_encoder *encoder) { struct drm_minor *minor = encoder->dev->primary; @@ -789,9 +755,7 @@ void drm_debugfs_encoder_add(struct drm_encoder *encoder) encoder->debugfs_entry = root; - /* bridges list */ - debugfs_create_file("bridges", 0444, root, encoder, - &bridges_fops); + drm_bridge_debugfs_encoder_params(root, encoder); if (encoder->funcs && encoder->funcs->debugfs_init) encoder->funcs->debugfs_init(encoder, root); diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index d4c75d59fa12..bacbc5dbf281 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -1108,4 +1108,6 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif +void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder); + #endif From eff0347e7c228335e9ff64aaf02c66957803af6a Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Wed, 26 Feb 2025 22:23:53 +0100 Subject: [PATCH 0085/1627] drm/debugfs: add top-level 'bridges' file showing all added bridges The global bridges_list holding all the bridges between drm_bridge_add() and drm_bridge_remove() cannot be inspected via debugfs. Add a file showing it. To avoid code duplication, move the code printing a bridge info to a common function. Signed-off-by: Luca Ceresoli Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20250226-drm-debugfs-show-all-bridges-v8-2-bb511cc49d83@bootlin.com Signed-off-by: Louis Chauvet --- drivers/gpu/drm/drm_bridge.c | 72 +++++++++++++++++++++++++----------- drivers/gpu/drm/drm_drv.c | 2 + include/drm/drm_bridge.h | 1 + 3 files changed, 53 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 3e23e1f0394d..ea9525ec16b5 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1301,6 +1301,49 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np) EXPORT_SYMBOL(of_drm_find_bridge); #endif +static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, + struct drm_bridge *bridge, + unsigned int idx) +{ + drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); + drm_printf(p, "\ttype: [%d] %s\n", + bridge->type, + drm_get_connector_type_name(bridge->type)); + + if (bridge->of_node) + drm_printf(p, "\tOF: %pOFfc\n", bridge->of_node); + + drm_printf(p, "\tops: [0x%x]", bridge->ops); + if (bridge->ops & DRM_BRIDGE_OP_DETECT) + drm_puts(p, " detect"); + if (bridge->ops & DRM_BRIDGE_OP_EDID) + drm_puts(p, " edid"); + if (bridge->ops & DRM_BRIDGE_OP_HPD) + drm_puts(p, " hpd"); + if (bridge->ops & DRM_BRIDGE_OP_MODES) + drm_puts(p, " modes"); + if (bridge->ops & DRM_BRIDGE_OP_HDMI) + drm_puts(p, " hdmi"); + drm_puts(p, "\n"); +} + +static int allbridges_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct drm_bridge *bridge; + unsigned int idx = 0; + + mutex_lock(&bridge_lock); + + list_for_each_entry(bridge, &bridge_list, list) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++); + + mutex_unlock(&bridge_lock); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(allbridges); + static int encoder_bridges_show(struct seq_file *m, void *data) { struct drm_encoder *encoder = m->private; @@ -1308,33 +1351,18 @@ static int encoder_bridges_show(struct seq_file *m, void *data) struct drm_bridge *bridge; unsigned int idx = 0; - drm_for_each_bridge_in_chain(encoder, bridge) { - drm_printf(&p, "bridge[%u]: %ps\n", idx++, bridge->funcs); - drm_printf(&p, "\ttype: [%d] %s\n", - bridge->type, - drm_get_connector_type_name(bridge->type)); - - if (bridge->of_node) - drm_printf(&p, "\tOF: %pOFfc\n", bridge->of_node); - - drm_printf(&p, "\tops: [0x%x]", bridge->ops); - if (bridge->ops & DRM_BRIDGE_OP_DETECT) - drm_puts(&p, " detect"); - if (bridge->ops & DRM_BRIDGE_OP_EDID) - drm_puts(&p, " edid"); - if (bridge->ops & DRM_BRIDGE_OP_HPD) - drm_puts(&p, " hpd"); - if (bridge->ops & DRM_BRIDGE_OP_MODES) - drm_puts(&p, " modes"); - if (bridge->ops & DRM_BRIDGE_OP_HDMI) - drm_puts(&p, " hdmi"); - drm_puts(&p, "\n"); - } + drm_for_each_bridge_in_chain(encoder, bridge) + drm_bridge_debugfs_show_bridge(&p, bridge, idx++); return 0; } DEFINE_SHOW_ATTRIBUTE(encoder_bridges); +void drm_bridge_debugfs_params(struct dentry *root) +{ + debugfs_create_file("bridges", 0444, root, NULL, &allbridges_fops); +} + void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder) { diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index c9487bc88624..3dc7acd56b1d 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -1209,6 +1210,7 @@ static int __init drm_core_init(void) } drm_debugfs_root = debugfs_create_dir("dri", NULL); + drm_bridge_debugfs_params(drm_debugfs_root); ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); if (ret < 0) diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index bacbc5dbf281..b0d86a685a41 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -1108,6 +1108,7 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm, } #endif +void drm_bridge_debugfs_params(struct dentry *root); void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder); #endif From 4383dd88fa77b8489b627125b268c3f1ab934e37 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 3 Mar 2025 18:35:18 +0100 Subject: [PATCH 0086/1627] drm/xe: Add MI_LOAD_REGISTER_REG command definition The MI_LOAD_REGISTER_REG command reads value from a source register location and writes that value to a destination register location. Bspec: 45730, 60233 Signed-off-by: Michal Wajdeczko Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250303173522.1822-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/instructions/xe_mi_commands.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h index 167fb0f742de..526bad9d4bac 100644 --- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h @@ -61,6 +61,10 @@ #define MI_LOAD_REGISTER_MEM (__MI_INSTR(0x29) | XE_INSTR_NUM_DW(4)) #define MI_LRM_USE_GGTT REG_BIT(22) +#define MI_LOAD_REGISTER_REG (__MI_INSTR(0x2a) | XE_INSTR_NUM_DW(3)) +#define MI_LRR_DST_CS_MMIO REG_BIT(19) +#define MI_LRR_SRC_CS_MMIO REG_BIT(18) + #define MI_COPY_MEM_MEM (__MI_INSTR(0x2e) | XE_INSTR_NUM_DW(5)) #define MI_COPY_MEM_MEM_SRC_GGTT REG_BIT(22) #define MI_COPY_MEM_MEM_DST_GGTT REG_BIT(21) From b823f80bbd63f67c926a3a0c5dcf246fb8736d7b Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 4 Mar 2025 17:23:07 +0100 Subject: [PATCH 0087/1627] drm/xe: Add MI_MATH and ALU instruction definitions The command streamer implements an Arithmetic Logic Unit (ALU) which supports basic arithmetic and logical operations on two 64-bit operands. Access to this ALU is thru the MI_MATH command and sixteen General Purpose Register (GPR) 64-bit registers, which are used as temporary storage. Bspec: 45737, 60236 # MI Bspec: 45525, 60132 # ALU Bspec: 45533, 60309 # GPR Signed-off-by: Michal Wajdeczko Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250304162307.1866-1-michal.wajdeczko@intel.com --- .../gpu/drm/xe/instructions/xe_alu_commands.h | 79 +++++++++++++++++++ .../gpu/drm/xe/instructions/xe_mi_commands.h | 1 + drivers/gpu/drm/xe/regs/xe_engine_regs.h | 4 + 3 files changed, 84 insertions(+) create mode 100644 drivers/gpu/drm/xe/instructions/xe_alu_commands.h diff --git a/drivers/gpu/drm/xe/instructions/xe_alu_commands.h b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h new file mode 100644 index 000000000000..2987b10d3e16 --- /dev/null +++ b/drivers/gpu/drm/xe/instructions/xe_alu_commands.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_ALU_COMMANDS_H_ +#define _XE_ALU_COMMANDS_H_ + +#include "instructions/xe_instr_defs.h" + +/* Instruction Opcodes */ +#define CS_ALU_OPCODE_NOOP 0x000 +#define CS_ALU_OPCODE_FENCE_RD 0x001 +#define CS_ALU_OPCODE_FENCE_WR 0x002 +#define CS_ALU_OPCODE_LOAD 0x080 +#define CS_ALU_OPCODE_LOADINV 0x480 +#define CS_ALU_OPCODE_LOAD0 0x081 +#define CS_ALU_OPCODE_LOAD1 0x481 +#define CS_ALU_OPCODE_LOADIND 0x082 +#define CS_ALU_OPCODE_ADD 0x100 +#define CS_ALU_OPCODE_SUB 0x101 +#define CS_ALU_OPCODE_AND 0x102 +#define CS_ALU_OPCODE_OR 0x103 +#define CS_ALU_OPCODE_XOR 0x104 +#define CS_ALU_OPCODE_SHL 0x105 +#define CS_ALU_OPCODE_SHR 0x106 +#define CS_ALU_OPCODE_SAR 0x107 +#define CS_ALU_OPCODE_STORE 0x180 +#define CS_ALU_OPCODE_STOREINV 0x580 +#define CS_ALU_OPCODE_STOREIND 0x181 + +/* Instruction Operands */ +#define CS_ALU_OPERAND_REG(n) REG_FIELD_PREP(GENMASK(3, 0), (n)) +#define CS_ALU_OPERAND_REG0 0x0 +#define CS_ALU_OPERAND_REG1 0x1 +#define CS_ALU_OPERAND_REG2 0x2 +#define CS_ALU_OPERAND_REG3 0x3 +#define CS_ALU_OPERAND_REG4 0x4 +#define CS_ALU_OPERAND_REG5 0x5 +#define CS_ALU_OPERAND_REG6 0x6 +#define CS_ALU_OPERAND_REG7 0x7 +#define CS_ALU_OPERAND_REG8 0x8 +#define CS_ALU_OPERAND_REG9 0x9 +#define CS_ALU_OPERAND_REG10 0xa +#define CS_ALU_OPERAND_REG11 0xb +#define CS_ALU_OPERAND_REG12 0xc +#define CS_ALU_OPERAND_REG13 0xd +#define CS_ALU_OPERAND_REG14 0xe +#define CS_ALU_OPERAND_REG15 0xf +#define CS_ALU_OPERAND_SRCA 0x20 +#define CS_ALU_OPERAND_SRCB 0x21 +#define CS_ALU_OPERAND_ACCU 0x31 +#define CS_ALU_OPERAND_ZF 0x32 +#define CS_ALU_OPERAND_CF 0x33 +#define CS_ALU_OPERAND_NA 0 /* N/A operand */ + +/* Command Streamer ALU Instructions */ +#define CS_ALU_INSTR(opcode, op1, op2) (REG_FIELD_PREP(GENMASK(31, 20), (opcode)) | \ + REG_FIELD_PREP(GENMASK(19, 10), (op1)) | \ + REG_FIELD_PREP(GENMASK(9, 0), (op2))) + +#define __CS_ALU_INSTR(opcode, op1, op2) CS_ALU_INSTR(CS_ALU_OPCODE_##opcode, \ + CS_ALU_OPERAND_##op1, \ + CS_ALU_OPERAND_##op2) + +#define CS_ALU_INSTR_NOOP __CS_ALU_INSTR(NOOP, NA, NA) +#define CS_ALU_INSTR_LOAD(op1, op2) __CS_ALU_INSTR(LOAD, op1, op2) +#define CS_ALU_INSTR_LOADINV(op1, op2) __CS_ALU_INSTR(LOADINV, op1, op2) +#define CS_ALU_INSTR_LOAD0(op1) __CS_ALU_INSTR(LOAD0, op1, NA) +#define CS_ALU_INSTR_LOAD1(op1) __CS_ALU_INSTR(LOAD1, op1, NA) +#define CS_ALU_INSTR_ADD __CS_ALU_INSTR(ADD, NA, NA) +#define CS_ALU_INSTR_SUB __CS_ALU_INSTR(SUB, NA, NA) +#define CS_ALU_INSTR_AND __CS_ALU_INSTR(AND, NA, NA) +#define CS_ALU_INSTR_OR __CS_ALU_INSTR(OR, NA, NA) +#define CS_ALU_INSTR_XOR __CS_ALU_INSTR(XOR, NA, NA) +#define CS_ALU_INSTR_STORE(op1, op2) __CS_ALU_INSTR(STORE, op1, op2) +#define CS_ALU_INSTR_STOREINV(op1, op2) __CS_ALU_INSTR(STOREINV, op1, op2) + +#endif diff --git a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h index 526bad9d4bac..eba582058d55 100644 --- a/drivers/gpu/drm/xe/instructions/xe_mi_commands.h +++ b/drivers/gpu/drm/xe/instructions/xe_mi_commands.h @@ -32,6 +32,7 @@ #define MI_BATCH_BUFFER_END __MI_INSTR(0xA) #define MI_TOPOLOGY_FILTER __MI_INSTR(0xD) #define MI_FORCE_WAKEUP __MI_INSTR(0x1D) +#define MI_MATH(n) (__MI_INSTR(0x1A) | XE_INSTR_NUM_DW((n) + 1)) #define MI_STORE_DATA_IMM __MI_INSTR(0x20) #define MI_SDI_GGTT REG_BIT(22) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 4f372dc2cb89..659cf85fa3d6 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -184,6 +184,10 @@ #define PREEMPT_GPGPU_LEVEL_MASK PREEMPT_GPGPU_LEVEL(1, 1) #define PREEMPT_3D_OBJECT_LEVEL REG_BIT(0) +#define CS_GPR_DATA(base, n) XE_REG((base) + 0x600 + (n) * 4) +#define CS_GPR_REG(base, n) CS_GPR_DATA((base), (n) * 2) +#define CS_GPR_REG_UDW(base, n) CS_GPR_DATA((base), (n) * 2 + 1) + #define VDBOX_CGCTL3F08(base) XE_REG((base) + 0x3f08) #define CG3DDISHRS_CLKGATE_DIS REG_BIT(5) From f2f90989ccff2d010472d47e4e62f7afe8ce67ff Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 3 Mar 2025 18:35:20 +0100 Subject: [PATCH 0088/1627] drm/xe: Avoid reading RMW registers in emit_wa_job MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To allow VFs properly handle LRC WAs, we should postpone doing all RMW register operations and let them be run by the engine itself, since attempt to perform read registers from within the driver will fail on the VF. Use MI_MATH and ALU for that. Signed-off-by: Michal Wajdeczko Cc: Michał Winiarski Cc: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250303173522.1822-4-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt.c | 84 ++++++++++++++++++++++++++++---------- 1 file changed, 63 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 10a9e3c72b36..8068b4bc0a09 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -12,8 +12,10 @@ #include +#include "instructions/xe_alu_commands.h" #include "instructions/xe_gfxpipe_commands.h" #include "instructions/xe_mi_commands.h" +#include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" #include "xe_assert.h" #include "xe_bb.h" @@ -176,15 +178,6 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) return 0; } -/* - * Convert back from encoded value to type-safe, only to be used when reg.mcr - * is true - */ -static struct xe_reg_mcr to_xe_reg_mcr(const struct xe_reg reg) -{ - return (const struct xe_reg_mcr){.__reg.raw = reg.raw }; -} - static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) { struct xe_reg_sr *sr = &q->hwe->reg_lrc; @@ -194,6 +187,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) struct xe_bb *bb; struct dma_fence *fence; long timeout; + int count_rmw = 0; int count = 0; if (q->hwe->class == XE_ENGINE_CLASS_RENDER) @@ -206,30 +200,32 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) if (IS_ERR(bb)) return PTR_ERR(bb); - xa_for_each(&sr->xa, idx, entry) - ++count; + /* count RMW registers as those will be handled separately */ + xa_for_each(&sr->xa, idx, entry) { + if (entry->reg.masked || entry->clr_bits == ~0) + ++count; + else + ++count_rmw; + } + + if (count || count_rmw) + xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name); if (count) { - xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name); + /* emit single LRI with all non RMW regs */ bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count); xa_for_each(&sr->xa, idx, entry) { struct xe_reg reg = entry->reg; - struct xe_reg_mcr reg_mcr = to_xe_reg_mcr(reg); u32 val; - /* - * Skip reading the register if it's not really needed - */ if (reg.masked) val = entry->clr_bits << 16; - else if (entry->clr_bits + 1) - val = (reg.mcr ? - xe_gt_mcr_unicast_read_any(gt, reg_mcr) : - xe_mmio_read32(>->mmio, reg)) & (~entry->clr_bits); - else + else if (entry->clr_bits == ~0) val = 0; + else + continue; val |= entry->set_bits; @@ -239,6 +235,52 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) } } + if (count_rmw) { + /* emit MI_MATH for each RMW reg */ + + xa_for_each(&sr->xa, idx, entry) { + if (entry->reg.masked || entry->clr_bits == ~0) + continue; + + bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO; + bb->cs[bb->len++] = entry->reg.addr; + bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; + + bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | + MI_LRI_LRM_CS_MMIO; + bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr; + bb->cs[bb->len++] = entry->clr_bits; + bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr; + bb->cs[bb->len++] = entry->set_bits; + + bb->cs[bb->len++] = MI_MATH(8); + bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0); + bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1); + bb->cs[bb->len++] = CS_ALU_INSTR_AND; + bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU); + bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0); + bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2); + bb->cs[bb->len++] = CS_ALU_INSTR_OR; + bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU); + + bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO; + bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; + bb->cs[bb->len++] = entry->reg.addr; + + xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n", + entry->reg.addr, entry->clr_bits, entry->set_bits); + } + + /* reset used GPR */ + bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO; + bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; + bb->cs[bb->len++] = 0; + bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr; + bb->cs[bb->len++] = 0; + bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr; + bb->cs[bb->len++] = 0; + } + xe_lrc_emit_hwe_state_instructions(q, bb); job = xe_bb_create_job(q, bb); From c19e705ec9815896abed94b929a843a0b3010651 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Mon, 3 Mar 2025 18:35:21 +0100 Subject: [PATCH 0089/1627] drm/xe/vf: Stop applying save-restore MMIOs if VF Currently we are blocking processing of all save-restore rules by the VFs inside the xe_rtp_process_to_sr() function, but we want to unblock that to allow processing of the LRC WA rules. To avoid hitting WARNs about reading an inaccessible registers by the VFs, stop applying save-restore MMIOs action if VF, without relying that SR list will be always empty for the VF. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250303173522.1822-5-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_reg_sr.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c index 9475e3f74958..fc8447a838c4 100644 --- a/drivers/gpu/drm/xe/xe_reg_sr.c +++ b/drivers/gpu/drm/xe/xe_reg_sr.c @@ -173,6 +173,9 @@ void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt) if (xa_empty(&sr->xa)) return; + if (IS_SRIOV_VF(gt_to_xe(gt))) + return; + xe_gt_dbg(gt, "Applying %s save-restore MMIOs\n", sr->name); fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); From 92a5bd302458a1663daaad36994373e2ff0df5be Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 11 Mar 2025 11:52:21 +0100 Subject: [PATCH 0090/1627] drm/xe/vf: Unblock xe_rtp_process_to_sr for VFs In commit 9632dfb0def4 ("drm/xe/vf: Don't run any save-restore RTP actions if VF") we disabled processing of all RTP rules if we were running as a VFs, since many of the RTP actions were trying to access registers unaccessible for VFs. This also included all of LRC WA rules, since some of them were implemented in a way that required RMW pattern. Now, as we can program LRC WAs without accessing such registers from the driver, relying on the MI_MATH instruction instead, we can unblock xe_rtp_process_to_sr() for VFs. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250311105221.1910-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_rtp.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 13bb62d3e615..29e694bb1219 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -258,9 +258,6 @@ void xe_rtp_process_to_sr(struct xe_rtp_process_ctx *ctx, rtp_get_context(ctx, &hwe, >, &xe); - if (IS_SRIOV_VF(xe)) - return; - xe_assert(xe, entries); for (entry = entries; entry - entries < n_entries; entry++) { From de35cc27fdf35a966f933fa5b611181e83689e1f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 11 Mar 2025 15:01:15 +0100 Subject: [PATCH 0091/1627] drm/xe: Prefer USEC_PER_SEC over MICRO It will be easier to understand the meaning of the flr_timeout value if the USEC_PER_SEC macro is used in the expression. Signed-off-by: Michal Wajdeczko Reviewed-by: Himal Prasad Ghimiray Link: https://patchwork.freedesktop.org/patch/msgid/20250311140115.2042-1-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index a7dd9c7b95e5..35eb001c6c1a 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -514,7 +514,7 @@ static bool xe_driver_flr_disabled(struct xe_device *xe) */ static void __xe_driver_flr(struct xe_device *xe) { - const unsigned int flr_timeout = 3 * MICRO; /* specs recommend a 3s wait */ + const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */ struct xe_mmio *mmio = xe_root_tile_mmio(xe); int ret; From d3414acf4a01c1bb28d9dc9b389862ffcc01aab4 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 11 Mar 2025 14:57:25 +0100 Subject: [PATCH 0092/1627] drm/xe/vf: Don't try Driver-FLR if VF Driver-FLR can't be triggered from the VF driver, so treat it as disabled if VF. While around, fix also the message, as it shouldn't be printed just 'once' as we may have many devices. Signed-off-by: Michal Wajdeczko Reviewed-by: Himal Prasad Ghimiray Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250311135726.1998-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_device.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 35eb001c6c1a..2f7d727c9392 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -496,7 +496,15 @@ ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */ static bool xe_driver_flr_disabled(struct xe_device *xe) { - return xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS; + if (IS_SRIOV_VF(xe)) + return true; + + if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) { + drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n"); + return true; + } + + return false; } /* @@ -560,10 +568,8 @@ static void __xe_driver_flr(struct xe_device *xe) static void xe_driver_flr(struct xe_device *xe) { - if (xe_driver_flr_disabled(xe)) { - drm_info_once(&xe->drm, "BIOS Disabled Driver-FLR\n"); + if (xe_driver_flr_disabled(xe)) return; - } __xe_driver_flr(xe); } From f990c11a63bb957ecdde4629492b334e3e42f072 Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 11 Mar 2025 14:57:26 +0100 Subject: [PATCH 0093/1627] drm/xe/vf: Catch all unexpected register reads While we can only mimic read32 for a few GT registers for which the PF shared the values, we shouldn't avoid calling helper code if we try to access non-GT register, as then we miss to trigger a debug warning. For cases where sriov_vf_gt was not set, just use primary_gt instead. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250311135726.1998-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_mmio.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 70a36e777546..13e06a956ceb 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -204,8 +204,9 @@ void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val) trace_xe_reg_rw(mmio, true, addr, val, sizeof(val)); - if (!reg.vf && mmio->sriov_vf_gt) - xe_gt_sriov_vf_write32(mmio->sriov_vf_gt, reg, val); + if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe)) + xe_gt_sriov_vf_write32(mmio->sriov_vf_gt ?: + mmio->tile->primary_gt, reg, val); else writel(val, mmio->regs + addr); } @@ -218,8 +219,9 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg) /* Wa_15015404425 */ mmio_flush_pending_writes(mmio); - if (!reg.vf && mmio->sriov_vf_gt) - val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt, reg); + if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe)) + val = xe_gt_sriov_vf_read32(mmio->sriov_vf_gt ?: + mmio->tile->primary_gt, reg); else val = readl(mmio->regs + addr); From fce3fb7b914bcd19341de8d8eff8bef371c2cddf Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 11 Mar 2025 12:40:41 +0100 Subject: [PATCH 0094/1627] drm/xe/vf: Don't check CTC_MODE[0] if VF Starting from commit 18778b5fdd01 ("drm/xe: Eliminate usage of TIMESTAMP_OVERRIDE") we access the CTC_MODE register only to warn if it has undocumented value. There is no point in doing that on the VF driver. While here, move this check to a helper function. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250311114042.1954-2-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt_clock.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c index 2a958c92d8ea..fca38738e610 100644 --- a/drivers/gpu/drm/xe/xe_gt_clock.c +++ b/drivers/gpu/drm/xe/xe_gt_clock.c @@ -40,11 +40,8 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg) } } -int xe_gt_clock_init(struct xe_gt *gt) +static void check_ctc_mode(struct xe_gt *gt) { - u32 c0 = xe_mmio_read32(>->mmio, RPM_CONFIG0); - u32 freq = 0; - /* * CTC_MODE[0] = 1 is definitely not supported for Xe2 and later * platforms. In theory it could be a valid setting for pre-Xe2 @@ -57,7 +54,17 @@ int xe_gt_clock_init(struct xe_gt *gt) */ if (xe_mmio_read32(>->mmio, CTC_MODE) & CTC_SOURCE_DIVIDE_LOGIC) xe_gt_warn(gt, "CTC_MODE[0] is set; this is unexpected and undocumented\n"); +} +int xe_gt_clock_init(struct xe_gt *gt) +{ + u32 freq; + u32 c0; + + if (!IS_SRIOV_VF(gt_to_xe(gt))) + check_ctc_mode(gt); + + c0 = xe_mmio_read32(>->mmio, RPM_CONFIG0); freq = get_crystal_clock_freq(c0); /* From 459664c98da853527c757edb7e6e529a24fff03f Mon Sep 17 00:00:00 2001 From: Michal Wajdeczko Date: Tue, 11 Mar 2025 12:40:42 +0100 Subject: [PATCH 0095/1627] drm/xe/pf: Drop CTC_MODE from VF runtime register list This register shouldn't be used by the VF drivers. Signed-off-by: Michal Wajdeczko Cc: Matt Roper Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250311114042.1954-3-michal.wajdeczko@intel.com --- drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c index 4efde5f46b43..821cfcc34e6b 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c @@ -112,7 +112,6 @@ static const struct xe_reg tgl_runtime_regs[] = { XELP_GT_SLICE_ENABLE, /* _MMIO(0x9138) */ XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -124,7 +123,6 @@ static const struct xe_reg ats_m_runtime_regs[] = { XELP_GT_GEOMETRY_DSS_ENABLE, /* _MMIO(0x913c) */ GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -136,7 +134,6 @@ static const struct xe_reg pvc_runtime_regs[] = { GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */ - CTC_MODE, /* _MMIO(0xA26C) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -150,7 +147,6 @@ static const struct xe_reg ver_1270_runtime_regs[] = { GT_VEBOX_VDBOX_DISABLE, /* _MMIO(0x9140) */ XEHP_GT_COMPUTE_DSS_ENABLE, /* _MMIO(0x9144) */ XEHPC_GT_COMPUTE_DSS_ENABLE_EXT,/* _MMIO(0x9148) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -167,7 +163,6 @@ static const struct xe_reg ver_2000_runtime_regs[] = { XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; @@ -185,7 +180,6 @@ static const struct xe_reg ver_3000_runtime_regs[] = { XE2_GT_COMPUTE_DSS_2, /* _MMIO(0x914c) */ XE2_GT_GEOMETRY_DSS_1, /* _MMIO(0x9150) */ XE2_GT_GEOMETRY_DSS_2, /* _MMIO(0x9154) */ - CTC_MODE, /* _MMIO(0xa26c) */ HUC_KERNEL_LOAD_INFO, /* _MMIO(0xc1dc) */ }; From c67c0fef5d4d1b888275a588f0fb0f6f2755924b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 12 Mar 2025 14:44:00 +0100 Subject: [PATCH 0096/1627] drm/sched: revert "drm_sched_job_cleanup(): correct false doc" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This reverts commit 44d2f310f008613c1dbe5e234c2cf2be90cbbfab. The function drm_sched_job_arm() is indeed the point of no return. The background is that it is nearly impossible for the driver to correctly retract the fence and signal it in the order enforced by the dma_fence framework. The code in drm_sched_job_cleanup() is for the purpose to cleanup after the job was armed through drm_sched_job_arm() *and* processed by the scheduler. We can certainly improve the documentation, but removing the warning is clearly not a good idea. Signed-off-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250312134400.2176393-1-christian.koenig@amd.com --- drivers/gpu/drm/scheduler/sched_main.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 53e6aec37b46..4d4219fbe49d 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1015,13 +1015,11 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency); * Cleans up the resources allocated with drm_sched_job_init(). * * Drivers should call this from their error unwind code if @job is aborted - * before it was submitted to an entity with drm_sched_entity_push_job(). + * before drm_sched_job_arm() is called. * - * Since calling drm_sched_job_arm() causes the job's fences to be initialized, - * it is up to the driver to ensure that fences that were exposed to external - * parties get signaled. drm_sched_job_cleanup() does not ensure this. - * - * This function must also be called in &struct drm_sched_backend_ops.free_job + * After that point of no return @job is committed to be executed by the + * scheduler, and this function should be called from the + * &drm_sched_backend_ops.free_job callback. */ void drm_sched_job_cleanup(struct drm_sched_job *job) { @@ -1032,7 +1030,7 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) /* drm_sched_job_arm() has been called */ dma_fence_put(&job->s_fence->finished); } else { - /* aborted job before arming */ + /* aborted job before committing to run it */ drm_sched_fence_free(job->s_fence); } From f9f087d946266bc5da7c3a17bd8fd9d01969e3cf Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 27 Feb 2025 14:20:32 +0100 Subject: [PATCH 0097/1627] drm: xlnx: zynqmp_dpsub: fix Kconfig dependencies for ASoC The new audio code fails to build when sounds support is in a loadable module but the GPU driver is built-in: x86_64-linux-ld: zynqmp_dp_audio.c:(.text+0x6a8): undefined reference to `devm_snd_soc_register_card' x86_64-linux-ld: drivers/gpu/drm/xlnx/zynqmp_dp_audio.o:(.rodata+0x1bc): undefined reference to `snd_soc_info_volsw' x86_64-linux-ld: drivers/gpu/drm/xlnx/zynqmp_dp_audio.o:(.rodata+0x1f0): undefined reference to `snd_soc_get_volsw' x86_64-linux-ld: drivers/gpu/drm/xlnx/zynqmp_dp_audio.o:(.rodata+0x1f4): undefined reference to `snd_soc_put_volsw' Change the Kconfig dependency to disallow the sound support in this configuration. Fixes: 3ec5c1579305 ("drm: xlnx: zynqmp_dpsub: Add DP audio support") Signed-off-by: Arnd Bergmann Signed-off-by: Tomi Valkeinen Link: https://patchwork.freedesktop.org/patch/msgid/20250227132036.1136600-1-arnd@kernel.org --- drivers/gpu/drm/xlnx/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig index dbecca9bdd54..cfabf5e2a0bb 100644 --- a/drivers/gpu/drm/xlnx/Kconfig +++ b/drivers/gpu/drm/xlnx/Kconfig @@ -22,6 +22,7 @@ config DRM_ZYNQMP_DPSUB_AUDIO bool "ZynqMP DisplayPort Audio Support" depends on DRM_ZYNQMP_DPSUB depends on SND && SND_SOC + depends on SND_SOC=y || DRM_ZYNQMP_DPSUB=m select SND_SOC_GENERIC_DMAENGINE_PCM help Choose this option to enable DisplayPort audio support in the ZynqMP From 10646ddac2917b31c985ceff0e4982c42a9c924b Mon Sep 17 00:00:00 2001 From: Vignesh Raman Date: Fri, 28 Feb 2025 18:56:18 +0530 Subject: [PATCH 0098/1627] drm/ci: fix merge request rules Merge request pipelines were only created when changes were made to drivers/gpu/drm/ci/, causing MRs that didn't touch this path to break. Fix MR pipeline rules to trigger jobs for all changes. Run jobs automatically for marge-bot and scheduled pipelines, but in all other cases run manually. Also remove CI_PROJECT_NAMESPACE checks specific to mesa. Fixes: df54f04f2020 ("drm/ci: update gitlab rules") Signed-off-by: Vignesh Raman Reviewed-by: Daniel Stone Signed-off-by: Helen Koike Link: https://patchwork.freedesktop.org/patch/msgid/20250228132620.556079-1-vignesh.raman@collabora.com --- drivers/gpu/drm/ci/gitlab-ci.yml | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/ci/gitlab-ci.yml b/drivers/gpu/drm/ci/gitlab-ci.yml index f04aabe8327c..b06b9e7d3d09 100644 --- a/drivers/gpu/drm/ci/gitlab-ci.yml +++ b/drivers/gpu/drm/ci/gitlab-ci.yml @@ -143,11 +143,11 @@ stages: # Pre-merge pipeline - if: &is-pre-merge $CI_PIPELINE_SOURCE == "merge_request_event" # Push to a branch on a fork - - if: &is-fork-push $CI_PROJECT_NAMESPACE != "mesa" && $CI_PIPELINE_SOURCE == "push" + - if: &is-fork-push $CI_PIPELINE_SOURCE == "push" # nightly pipeline - if: &is-scheduled-pipeline $CI_PIPELINE_SOURCE == "schedule" # pipeline for direct pushes that bypassed the CI - - if: &is-direct-push $CI_PROJECT_NAMESPACE == "mesa" && $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot" + - if: &is-direct-push $CI_PIPELINE_SOURCE == "push" && $GITLAB_USER_LOGIN != "marge-bot" # Rules applied to every job in the pipeline @@ -170,26 +170,15 @@ stages: - !reference [.disable-farm-mr-rules, rules] # Never run immediately after merging, as we just ran everything - !reference [.never-post-merge-rules, rules] - # Build everything in merge pipelines, if any files affecting the pipeline - # were changed + # Build everything in merge pipelines - if: *is-merge-attempt - changes: &all_paths - - drivers/gpu/drm/ci/**/* when: on_success # Same as above, but for pre-merge pipelines - if: *is-pre-merge - changes: - *all_paths when: manual - # Skip everything for pre-merge and merge pipelines which don't change - # anything in the build - - if: *is-merge-attempt - when: never - - if: *is-pre-merge - when: never # Build everything after someone bypassed the CI - if: *is-direct-push - when: on_success + when: manual # Build everything in scheduled pipelines - if: *is-scheduled-pipeline when: on_success From 80bcbdfc8cf51b41fde0a2466a83e057c5a1bf3a Mon Sep 17 00:00:00 2001 From: Francois Dugast Date: Wed, 12 Mar 2025 10:27:49 +0100 Subject: [PATCH 0099/1627] drm/xe/svm: Add stats for SVM page faults Add a new entry in stats to for svm page faults. If CONFIG_DEBUG_FS is enabled, the count can be viewed with per GT stat debugfs file. This is similar to what is already in place for vma page faults. Example output: cat /sys/kernel/debug/dri/0/gt0/stats svm_pagefault_count: 6 tlb_inval_count: 78 vma_pagefault_count: 0 vma_pagefault_kb: 0 v2: Fix build with CONFIG_DRM_GPUSVM disabled v3: Update argument in kernel doc Reviewed-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20250312092749.164232-1-francois.dugast@intel.com Signed-off-by: Francois Dugast --- drivers/gpu/drm/xe/xe_gt_pagefault.c | 2 +- drivers/gpu/drm/xe/xe_gt_stats.c | 1 + drivers/gpu/drm/xe/xe_gt_stats_types.h | 1 + drivers/gpu/drm/xe/xe_svm.c | 8 ++++++-- drivers/gpu/drm/xe/xe_svm.h | 7 ++++--- 5 files changed, 13 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index c5ad9a0a89c2..9fa11e837dd1 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -240,7 +240,7 @@ static int handle_pagefault(struct xe_gt *gt, struct pagefault *pf) atomic = access_is_atomic(pf->access_type); if (xe_vma_is_cpu_addr_mirror(vma)) - err = xe_svm_handle_pagefault(vm, vma, gt_to_tile(gt), + err = xe_svm_handle_pagefault(vm, vma, gt, pf->page_addr, atomic); else err = handle_vma_pagefault(gt, vma, atomic); diff --git a/drivers/gpu/drm/xe/xe_gt_stats.c b/drivers/gpu/drm/xe/xe_gt_stats.c index 6155ea354432..30f942671c2b 100644 --- a/drivers/gpu/drm/xe/xe_gt_stats.c +++ b/drivers/gpu/drm/xe/xe_gt_stats.c @@ -27,6 +27,7 @@ void xe_gt_stats_incr(struct xe_gt *gt, const enum xe_gt_stats_id id, int incr) } static const char *const stat_description[__XE_GT_STATS_NUM_IDS] = { + "svm_pagefault_count", "tlb_inval_count", "vma_pagefault_count", "vma_pagefault_kb", diff --git a/drivers/gpu/drm/xe/xe_gt_stats_types.h b/drivers/gpu/drm/xe/xe_gt_stats_types.h index d556771f99d6..be3244d7133c 100644 --- a/drivers/gpu/drm/xe/xe_gt_stats_types.h +++ b/drivers/gpu/drm/xe/xe_gt_stats_types.h @@ -7,6 +7,7 @@ #define _XE_GT_STATS_TYPES_H_ enum xe_gt_stats_id { + XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, XE_GT_STATS_ID_TLB_INVAL, XE_GT_STATS_ID_VMA_PAGEFAULT_COUNT, XE_GT_STATS_ID_VMA_PAGEFAULT_KB, diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 516898e99b26..08617a62ab07 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -4,6 +4,7 @@ */ #include "xe_bo.h" +#include "xe_gt_stats.h" #include "xe_gt_tlb_invalidation.h" #include "xe_migrate.h" #include "xe_module.h" @@ -713,7 +714,7 @@ unlock: * xe_svm_handle_pagefault() - SVM handle page fault * @vm: The VM. * @vma: The CPU address mirror VMA. - * @tile: The tile upon the fault occurred. + * @gt: The gt upon the fault occurred. * @fault_addr: The GPU fault address. * @atomic: The fault atomic access bit. * @@ -723,7 +724,7 @@ unlock: * Return: 0 on success, negative error code on error. */ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, - struct xe_tile *tile, u64 fault_addr, + struct xe_gt *gt, u64 fault_addr, bool atomic) { struct drm_gpusvm_ctx ctx = { @@ -737,12 +738,15 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, struct drm_gpusvm_range *r; struct drm_exec exec; struct dma_fence *fence; + struct xe_tile *tile = gt_to_tile(gt); ktime_t end = 0; int err; lockdep_assert_held_write(&vm->lock); xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); + xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1); + retry: /* Always process UNMAPs first so view SVM ranges is current */ err = xe_svm_garbage_collector(vm); diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index e059590e5076..93442738666e 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -12,10 +12,11 @@ #define XE_INTERCONNECT_VRAM DRM_INTERCONNECT_DRIVER struct xe_bo; -struct xe_vram_region; +struct xe_gt; struct xe_tile; struct xe_vm; struct xe_vma; +struct xe_vram_region; /** struct xe_svm_range - SVM range */ struct xe_svm_range { @@ -64,7 +65,7 @@ void xe_svm_fini(struct xe_vm *vm); void xe_svm_close(struct xe_vm *vm); int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, - struct xe_tile *tile, u64 fault_addr, + struct xe_gt *gt, u64 fault_addr, bool atomic); bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end); @@ -102,7 +103,7 @@ void xe_svm_close(struct xe_vm *vm) static inline int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, - struct xe_tile *tile, u64 fault_addr, + struct xe_gt *gt, u64 fault_addr, bool atomic) { return 0; From 5d6c69b712f9cb34063ef32168ce6a12af8acf0c Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Thu, 27 Feb 2025 09:11:06 +0530 Subject: [PATCH 0100/1627] drm/i915/watermark: Check bounds for scaler_users for dsc prefill latency Currently, during the computation of global watermarks, the latency for each scaler user is calculated to compute the DSC prefill latency. At this point, the number of scaler users can exceed the number of supported scalers, which is checked later in intel_atomic_setup_scalers(). This can cause issues when the number of scaler users exceeds the number of supported scalers. While checking for DSC prefill, ensure that the number of scaler users does not exceed the number of supported scalers. Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/4341 Fixes: a9b14af999b0 ("drm/i915/dsc: Check if vblank is sufficient for dsc prefill") Cc: Mitul Golani Cc: Ankit Nautiyal Cc: Jani Nikula Signed-off-by: Ankit Nautiyal Reviewed-by: Mitul Golani Link: https://patchwork.freedesktop.org/patch/msgid/20250227034106.1638203-1-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/skl_watermark.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 2d0de1c63308..621e97943542 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -2314,6 +2314,7 @@ cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state) static int dsc_prefill_latency(const struct intel_crtc_state *crtc_state) { + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); const struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state; int linetime = DIV_ROUND_UP(1000 * crtc_state->hw.adjusted_mode.htotal, @@ -2323,7 +2324,9 @@ dsc_prefill_latency(const struct intel_crtc_state *crtc_state) crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ? 2 : 1; u32 dsc_prefill_latency = 0; - if (!crtc_state->dsc.compression_enable || !num_scaler_users) + if (!crtc_state->dsc.compression_enable || + !num_scaler_users || + num_scaler_users > crtc->num_scalers) return dsc_prefill_latency; dsc_prefill_latency = DIV_ROUND_UP(15 * linetime * chroma_downscaling_factor, 10); From 03710f3d063d8f4873ef43d030bea375243bcbe4 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:44 +0530 Subject: [PATCH 0101/1627] drm/i915/vrr: Remove unwanted comment MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The comment about fixed average vtotal is incorrect. Remove it. Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-2-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index cac49319026d..106bfaf6649b 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -276,11 +276,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, */ crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display); - /* - * When panel is VRR capable and userspace has - * not enabled adaptive sync mode then Fixed Average - * Vtotal mode should be enabled. - */ if (crtc_state->uapi.vrr_enabled) { crtc_state->vrr.enable = true; crtc_state->mode_flags |= I915_MODE_FLAG_VRR; From 022d04b355a2771bc3de970a7f14980a716bfe4c Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:45 +0530 Subject: [PATCH 0102/1627] drm/i915:vrr: Separate out functions to compute vmin and vmax MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make helpers to compute vmin and vmax. v2: Make the adjusted mode const (Ville) Use reverse xmas tree order of declarations. (Ville) Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-3-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 38 +++++++++++++++++++----- 1 file changed, 30 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 106bfaf6649b..a88b77114867 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -222,6 +222,34 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required) return vtotal; } +static +int intel_vrr_compute_vmin(struct intel_connector *connector, + const struct drm_display_mode *adjusted_mode) +{ + const struct drm_display_info *info = &connector->base.display_info; + int vmin; + + vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000, + adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq); + vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal); + + return vmin; +} + +static +int intel_vrr_compute_vmax(struct intel_connector *connector, + const struct drm_display_mode *adjusted_mode) +{ + const struct drm_display_info *info = &connector->base.display_info; + int vmax; + + vmax = adjusted_mode->crtc_clock * 1000 / + (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq); + vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal); + + return vmax; +} + void intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct drm_connector_state *conn_state) @@ -232,7 +260,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, struct intel_dp *intel_dp = intel_attached_dp(connector); bool is_edp = intel_dp_is_edp(intel_dp); struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - const struct drm_display_info *info = &connector->base.display_info; int vmin, vmax; /* @@ -253,13 +280,8 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, if (HAS_LRR(display)) crtc_state->update_lrr = true; - vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000, - adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq); - vmax = adjusted_mode->crtc_clock * 1000 / - (adjusted_mode->crtc_htotal * info->monitor_range.min_vfreq); - - vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal); - vmax = max_t(int, vmax, adjusted_mode->crtc_vtotal); + vmin = intel_vrr_compute_vmin(connector, adjusted_mode); + vmax = intel_vrr_compute_vmax(connector, adjusted_mode); if (vmin >= vmax) return; From 58f9466c8292f8319158eb4b0f5fcbe89709d499 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:46 +0530 Subject: [PATCH 0103/1627] drm/i915/vrr: Make helpers for cmrr and vrr timings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Separate out functions for computing cmrr and vrr timings. Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-4-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 45 +++++++++++++++--------- 1 file changed, 28 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index a88b77114867..db0ea206e26e 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -222,6 +222,30 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required) return vtotal; } +static +void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state) +{ + crtc_state->vrr.enable = true; + crtc_state->cmrr.enable = true; + /* + * TODO: Compute precise target refresh rate to determine + * if video_mode_required should be true. Currently set to + * false due to uncertainty about the precise target + * refresh Rate. + */ + crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false); + crtc_state->vrr.vmin = crtc_state->vrr.vmax; + crtc_state->vrr.flipline = crtc_state->vrr.vmin; + crtc_state->mode_flags |= I915_MODE_FLAG_VRR; +} + +static +void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state) +{ + crtc_state->vrr.enable = true; + crtc_state->mode_flags |= I915_MODE_FLAG_VRR; +} + static int intel_vrr_compute_vmin(struct intel_connector *connector, const struct drm_display_mode *adjusted_mode) @@ -298,23 +322,10 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, */ crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display); - if (crtc_state->uapi.vrr_enabled) { - crtc_state->vrr.enable = true; - crtc_state->mode_flags |= I915_MODE_FLAG_VRR; - } else if (is_cmrr_frac_required(crtc_state) && is_edp) { - crtc_state->vrr.enable = true; - crtc_state->cmrr.enable = true; - /* - * TODO: Compute precise target refresh rate to determine - * if video_mode_required should be true. Currently set to - * false due to uncertainty about the precise target - * refresh Rate. - */ - crtc_state->vrr.vmax = cmrr_get_vtotal(crtc_state, false); - crtc_state->vrr.vmin = crtc_state->vrr.vmax; - crtc_state->vrr.flipline = crtc_state->vrr.vmin; - crtc_state->mode_flags |= I915_MODE_FLAG_VRR; - } + if (crtc_state->uapi.vrr_enabled) + intel_vrr_compute_vrr_timings(crtc_state); + else if (is_cmrr_frac_required(crtc_state) && is_edp) + intel_vrr_compute_cmrr_timings(crtc_state); if (HAS_AS_SDP(display)) { crtc_state->vrr.vsync_start = From a15b20e5094abd3bed90edfc22bf9ff84ef99e3c Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:47 +0530 Subject: [PATCH 0104/1627] drm/i915/vrr: Disable CMRR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switching between variable and fixed timings is possible as for that we just need to flip between VRR timings. However for CMRR along with the timings, few other bits also need to be changed on the fly, which might cause issues. So disable CMRR for now, till we have variable and fixed timings sorted out. Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-5-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index db0ea206e26e..a57659820f4b 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -182,7 +182,8 @@ is_cmrr_frac_required(struct intel_crtc_state *crtc_state) int calculated_refresh_k, actual_refresh_k, pixel_clock_per_line; struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; - if (!HAS_CMRR(display)) + /* Avoid CMRR for now till we have VRR with fixed timings working */ + if (!HAS_CMRR(display) || true) return false; actual_refresh_k = From 27217f9d185666c8bc1449796cd4029ca66d8d3c Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:48 +0530 Subject: [PATCH 0105/1627] drm/i915/vrr: Track vrr.enable only for variable timing MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since CMRR is now disabled, use the flag vrr.enable to tracks if vrr timing generator is used with variable timings. Avoid setting vrr.enable for CMRR and adjust readout to not set vrr.enable when vmax == vmin == flipline (fixed refresh rate timing). v2: Use intel_vrr_vmin_flipline() to account for adjustments required for icl/tgl. (Ville) v3: Add a #TODO for handling I915_MODE_FLAG_VRR better for CMRR. (Ville) Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-6-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index a57659820f4b..7320eb97991f 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -226,7 +226,6 @@ cmrr_get_vtotal(struct intel_crtc_state *crtc_state, bool video_mode_required) static void intel_vrr_compute_cmrr_timings(struct intel_crtc_state *crtc_state) { - crtc_state->vrr.enable = true; crtc_state->cmrr.enable = true; /* * TODO: Compute precise target refresh rate to determine @@ -527,6 +526,14 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); } +static +bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state) +{ + return crtc_state->vrr.flipline && + crtc_state->vrr.flipline == crtc_state->vrr.vmax && + crtc_state->vrr.flipline == intel_vrr_vmin_flipline(crtc_state); +} + void intel_vrr_get_config(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); @@ -536,7 +543,6 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) trans_vrr_ctl = intel_de_read(display, TRANS_VRR_CTL(display, cpu_transcoder)); - crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE; if (HAS_CMRR(display)) crtc_state->cmrr.enable = (trans_vrr_ctl & VRR_CTL_CMRR_ENABLE); @@ -576,6 +582,14 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) } } + crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE && + !intel_vrr_is_fixed_rr(crtc_state); + + /* + * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags. + * Since CMRR is currently disabled, set this flag for VRR for now. + * Need to keep this in mind while re-enabling CMRR. + */ if (crtc_state->vrr.enable) crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } From 1f44247dde98b582d7d6ce7d402facc070fa4506 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:49 +0530 Subject: [PATCH 0106/1627] drm/i915/vrr: Use crtc_vtotal for vmin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To have fixed refresh rate with VRR timing generator the guardband/pipeline full can't be programmed on the fly. So we need to ensure that the values satisfy both the fixed and variable refresh rates. Since we compute these value based on vmin, lets set the vmin to crtc_vtotal for both fixed and variable timings instead of using the current refresh rate based approach. This way the guardband remains sufficient for both cases. v2: Avoid using vblank delay while computing vtotal, as this comes into the picture later. (Ville) Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-7-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 7320eb97991f..e0573e28014b 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -247,17 +247,16 @@ void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state) } static -int intel_vrr_compute_vmin(struct intel_connector *connector, - const struct drm_display_mode *adjusted_mode) +int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state) { - const struct drm_display_info *info = &connector->base.display_info; - int vmin; - - vmin = DIV_ROUND_UP(adjusted_mode->crtc_clock * 1000, - adjusted_mode->crtc_htotal * info->monitor_range.max_vfreq); - vmin = max_t(int, vmin, adjusted_mode->crtc_vtotal); - - return vmin; + /* + * To make fixed rr and vrr work seamless the guardband/pipeline full + * should be set such that it satisfies both the fixed and variable + * timings. + * For this set the vmin as crtc_vtotal. With this we never need to + * change anything to do with the guardband. + */ + return crtc_state->hw.adjusted_mode.crtc_vtotal; } static @@ -304,7 +303,7 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, if (HAS_LRR(display)) crtc_state->update_lrr = true; - vmin = intel_vrr_compute_vmin(connector, adjusted_mode); + vmin = intel_vrr_compute_vmin(crtc_state); vmax = intel_vrr_compute_vmax(connector, adjusted_mode); if (vmin >= vmax) From bef1e60c7087418eea26f85a19dca7e6360857a9 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:50 +0530 Subject: [PATCH 0107/1627] drm/i915/vrr: Prepare for fixed refresh rate timings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we always compute the timings as if vrr is enabled. With this approach the state checker becomes complicated when we introduce fixed refresh rate mode with vrr timing generator. To avoid the complications, instead of always computing vrr timings, we compute vrr timings based on uapi.vrr_enable knob. So when the knob is disabled we always compute vmin=flipline=vmax. v2: Use actual timings without any adjustments while preparing for fixed timings in compute_config. (Ville) v3: Avoid setting fixed timings if !vrr_possible(). v4: Move vmin adjustement after all other timings are complete. (Ville) Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä (#v2) Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-8-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 87 ++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index e0573e28014b..622a70e21737 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -246,6 +246,72 @@ void intel_vrr_compute_vrr_timings(struct intel_crtc_state *crtc_state) crtc_state->mode_flags |= I915_MODE_FLAG_VRR; } +/* + * For fixed refresh rate mode Vmin, Vmax and Flipline all are set to + * Vtotal value. + */ +static +int intel_vrr_fixed_rr_vtotal(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + int crtc_vtotal = crtc_state->hw.adjusted_mode.crtc_vtotal; + + if (DISPLAY_VER(display) >= 13) + return crtc_vtotal; + else + return crtc_vtotal - + intel_vrr_real_vblank_delay(crtc_state); +} + +static +int intel_vrr_fixed_rr_vmax(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_fixed_rr_vtotal(crtc_state); +} + +static +int intel_vrr_fixed_rr_vmin(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + return intel_vrr_fixed_rr_vtotal(crtc_state) - + intel_vrr_flipline_offset(display); +} + +static +int intel_vrr_fixed_rr_flipline(const struct intel_crtc_state *crtc_state) +{ + return intel_vrr_fixed_rr_vtotal(crtc_state); +} + +static +void intel_vrr_set_fixed_rr_timings(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (!intel_vrr_possible(crtc_state)) + return; + + intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), + intel_vrr_fixed_rr_vmin(crtc_state) - 1); + intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), + intel_vrr_fixed_rr_vmax(crtc_state) - 1); + intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), + intel_vrr_fixed_rr_flipline(crtc_state) - 1); +} + +static +void intel_vrr_compute_fixed_rr_timings(struct intel_crtc_state *crtc_state) +{ + /* + * For fixed rr, vmin = vmax = flipline. + * vmin is already set to crtc_vtotal set vmax and flipline the same. + */ + crtc_state->vrr.vmax = crtc_state->hw.adjusted_mode.crtc_vtotal; + crtc_state->vrr.flipline = crtc_state->hw.adjusted_mode.crtc_vtotal; +} + static int intel_vrr_compute_vmin(struct intel_crtc_state *crtc_state) { @@ -314,6 +380,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, crtc_state->vrr.flipline = crtc_state->vrr.vmin; + if (crtc_state->uapi.vrr_enabled) + intel_vrr_compute_vrr_timings(crtc_state); + else if (is_cmrr_frac_required(crtc_state) && is_edp) + intel_vrr_compute_cmrr_timings(crtc_state); + else + intel_vrr_compute_fixed_rr_timings(crtc_state); + /* * flipline determines the min vblank length the hardware will * generate, and on ICL/TGL flipline>=vmin+1, hence we reduce @@ -321,11 +394,6 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state, */ crtc_state->vrr.vmin -= intel_vrr_flipline_offset(display); - if (crtc_state->uapi.vrr_enabled) - intel_vrr_compute_vrr_timings(crtc_state); - else if (is_cmrr_frac_required(crtc_state) && is_edp) - intel_vrr_compute_cmrr_timings(crtc_state); - if (HAS_AS_SDP(display)) { crtc_state->vrr.vsync_start = (crtc_state->hw.adjusted_mode.crtc_vtotal - @@ -496,6 +564,13 @@ void intel_vrr_enable(const struct intel_crtc_state *crtc_state) if (!crtc_state->vrr.enable) return; + intel_de_write(display, TRANS_VRR_VMIN(display, cpu_transcoder), + crtc_state->vrr.vmin - 1); + intel_de_write(display, TRANS_VRR_VMAX(display, cpu_transcoder), + crtc_state->vrr.vmax - 1); + intel_de_write(display, TRANS_VRR_FLIPLINE(display, cpu_transcoder), + crtc_state->vrr.flipline - 1); + intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN); @@ -523,6 +598,8 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) TRANS_VRR_STATUS(display, cpu_transcoder), VRR_STATUS_VRR_EN_LIVE, 1000); intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0); + + intel_vrr_set_fixed_rr_timings(old_crtc_state); } static From 2e921e1d47e627e575ac94eca9db81e374b1e409 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Tue, 11 Mar 2025 15:07:51 +0530 Subject: [PATCH 0108/1627] drm/i915/display: Enable MSA Ignore Timing PAR only when in not fixed_rr mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MSA Ignore Timing PAR enable is set in the DP sink when we enable variable refresh rate. Currently for link training we depend on flipline to decide whether we want to ignore the msa timings. With fixed refresh rate we will still fill the flipline in all cases whether panel supports VRR or not. Change the condition for link training to ignore the msa timings if vrr.in_range. v2: Add more documentation and a #TODO for readout of vrr.in_range. (Ville) Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311093751.1329043-9-ankit.k.nautiyal@intel.com --- .../gpu/drm/i915/display/intel_dp_link_training.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index ded246bbf232..53480914f239 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -727,8 +727,21 @@ void intel_dp_link_training_set_mode(struct intel_dp *intel_dp, int link_rate, b static void intel_dp_update_downspread_ctrl(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + /* + * Currently, we set the MSA ignore bit based on vrr.in_range. + * We can't really read that out during driver load since we don't have + * the connector information read in yet. So if we do end up doing a + * modeset during initial_commit() we'll clear the MSA ignore bit. + * GOP likely wouldn't have set this bit so after the initial commit, + * if there are no modesets and we enable VRR mode seamlessly + * (without a full modeset), the MSA ignore bit might never get set. + * + * #TODO: Implement readout of vrr.in_range. + * We need fastset support for setting the MSA ignore bit in DPCD, + * especially on the first real commit when clearing the inherited flag. + */ intel_dp_link_training_set_mode(intel_dp, - crtc_state->port_clock, crtc_state->vrr.flipline); + crtc_state->port_clock, crtc_state->vrr.in_range); } void intel_dp_link_training_set_bw(struct intel_dp *intel_dp, From 9377c00cfdb5cfc35dee3f62c52fce96d91464b7 Mon Sep 17 00:00:00 2001 From: Gustavo Sousa Date: Tue, 11 Mar 2025 14:04:50 -0300 Subject: [PATCH 0109/1627] drm/i915/display: Convert intel_bw.c internally to intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update intel_bw.c internally use intel_display. Conversion of the public interface will come as a follow-up. v2: - Prefer intel_uncore_read() for MCHBAR registers. (Ville) v3: - Remove the unnecessary inclusion of intel_de.h after changes from v2. (Ville) Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311-xe3lpd-bandwidth-update-v5-1-a95a9d90ad71@intel.com Signed-off-by: Gustavo Sousa --- drivers/gpu/drm/i915/display/intel_bw.c | 416 ++++++++++++------------ 1 file changed, 217 insertions(+), 199 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 048be2872247..6f805af32926 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -39,14 +39,15 @@ struct intel_qgv_info { u8 deinterleave; }; -static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, +static int dg1_mchbar_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 dclk_ratio, dclk_reference; u32 val; - val = intel_uncore_read(&dev_priv->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC); + val = intel_uncore_read(&i915->uncore, SA_PERF_STATUS_0_0_0_MCHBAR_PC); dclk_ratio = REG_FIELD_GET(DG1_QCLK_RATIO_MASK, val); if (val & DG1_QCLK_REFERENCE) dclk_reference = 6; /* 6 * 16.666 MHz = 100 MHz */ @@ -54,18 +55,18 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, dclk_reference = 8; /* 8 * 16.666 MHz = 133 MHz */ sp->dclk = DIV_ROUND_UP((16667 * dclk_ratio * dclk_reference) + 500, 1000); - val = intel_uncore_read(&dev_priv->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); + val = intel_uncore_read(&i915->uncore, SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU); if (val & DG1_GEAR_TYPE) sp->dclk *= 2; if (sp->dclk == 0) return -EINVAL; - val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR); + val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR); sp->t_rp = REG_FIELD_GET(DG1_DRAM_T_RP_MASK, val); sp->t_rdpre = REG_FIELD_GET(DG1_DRAM_T_RDPRE_MASK, val); - val = intel_uncore_read(&dev_priv->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH); + val = intel_uncore_read(&i915->uncore, MCHBAR_CH0_CR_TC_PRE_0_0_0_MCHBAR_HIGH); sp->t_rcd = REG_FIELD_GET(DG1_DRAM_T_RCD_MASK, val); sp->t_ras = REG_FIELD_GET(DG1_DRAM_T_RAS_MASK, val); @@ -74,22 +75,23 @@ static int dg1_mchbar_read_qgv_point_info(struct drm_i915_private *dev_priv, return 0; } -static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, +static int icl_pcode_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 val = 0, val2 = 0; u16 dclk; int ret; - ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), &val, &val2); if (ret) return ret; dclk = val & 0xffff; - sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0), + sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(display) >= 12 ? 500 : 0), 1000); sp->t_rp = (val & 0xff0000) >> 16; sp->t_rcd = (val & 0xff000000) >> 24; @@ -102,14 +104,15 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, return 0; } -static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, - struct intel_psf_gv_point *points) +static int adls_pcode_read_psf_gv_point_info(struct intel_display *display, + struct intel_psf_gv_point *points) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 val = 0; int ret; int i; - ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); if (ret) return ret; @@ -122,10 +125,10 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, return 0; } -static u16 icl_qgv_points_mask(struct drm_i915_private *i915) +static u16 icl_qgv_points_mask(struct intel_display *display) { - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; u16 qgv_points = 0, psf_points = 0; /* @@ -142,18 +145,19 @@ static u16 icl_qgv_points_mask(struct drm_i915_private *i915) return ICL_PCODE_REQ_QGV_PT(qgv_points) | ADLS_PCODE_REQ_PSF_PT(psf_points); } -static bool is_sagv_enabled(struct drm_i915_private *i915, u16 points_mask) +static bool is_sagv_enabled(struct intel_display *display, u16 points_mask) { - return !is_power_of_2(~points_mask & icl_qgv_points_mask(i915) & + return !is_power_of_2(~points_mask & icl_qgv_points_mask(display) & ICL_PCODE_REQ_QGV_PT_MASK); } int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, u32 points_mask) { + struct intel_display *display = &dev_priv->display; int ret; - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) return 0; /* bspec says to keep retrying for at least 1 ms */ @@ -164,27 +168,28 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, 1); if (ret < 0) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to disable qgv points (0x%x) points: 0x%x\n", ret, points_mask); return ret; } - dev_priv->display.sagv.status = is_sagv_enabled(dev_priv, points_mask) ? + display->sagv.status = is_sagv_enabled(display, points_mask) ? I915_SAGV_ENABLED : I915_SAGV_DISABLED; return 0; } -static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, +static int mtl_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 val, val2; u16 dclk; - val = intel_uncore_read(&dev_priv->uncore, + val = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_QGV_POINT_LOW(point)); - val2 = intel_uncore_read(&dev_priv->uncore, + val2 = intel_uncore_read(&i915->uncore, MTL_MEM_SS_INFO_QGV_POINT_HIGH(point)); dclk = REG_FIELD_GET(MTL_DCLK_MASK, val); sp->dclk = DIV_ROUND_CLOSEST(16667 * dclk, 1000); @@ -200,29 +205,30 @@ static int mtl_read_qgv_point_info(struct drm_i915_private *dev_priv, } static int -intel_read_qgv_point_info(struct drm_i915_private *dev_priv, +intel_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { - if (DISPLAY_VER(dev_priv) >= 14) - return mtl_read_qgv_point_info(dev_priv, sp, point); - else if (IS_DG1(dev_priv)) - return dg1_mchbar_read_qgv_point_info(dev_priv, sp, point); + if (DISPLAY_VER(display) >= 14) + return mtl_read_qgv_point_info(display, sp, point); + else if (display->platform.dg1) + return dg1_mchbar_read_qgv_point_info(display, sp, point); else - return icl_pcode_read_qgv_point_info(dev_priv, sp, point); + return icl_pcode_read_qgv_point_info(display, sp, point); } -static int icl_get_qgv_points(struct drm_i915_private *dev_priv, +static int icl_get_qgv_points(struct intel_display *display, struct intel_qgv_info *qi, bool is_y_tile) { - const struct dram_info *dram_info = &dev_priv->dram_info; + struct drm_i915_private *i915 = to_i915(display->drm); + const struct dram_info *dram_info = &i915->dram_info; int i, ret; qi->num_points = dram_info->num_qgv_points; qi->num_psf_points = dram_info->num_psf_gv_points; - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = 4; @@ -250,7 +256,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, MISSING_CASE(dram_info->type); return -EINVAL; } - } else if (DISPLAY_VER(dev_priv) >= 12) { + } else if (DISPLAY_VER(display) >= 12) { switch (dram_info->type) { case INTEL_DRAM_DDR4: qi->t_bl = is_y_tile ? 8 : 4; @@ -265,7 +271,7 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->deinterleave = is_y_tile ? 1 : 2; break; case INTEL_DRAM_LPDDR4: - if (IS_ROCKETLAKE(dev_priv)) { + if (display->platform.rocketlake) { qi->t_bl = 8; qi->max_numchannels = 4; qi->channel_width = 32; @@ -284,39 +290,39 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv, qi->max_numchannels = 1; break; } - } else if (DISPLAY_VER(dev_priv) == 11) { - qi->t_bl = dev_priv->dram_info.type == INTEL_DRAM_DDR4 ? 4 : 8; + } else if (DISPLAY_VER(display) == 11) { + qi->t_bl = dram_info->type == INTEL_DRAM_DDR4 ? 4 : 8; qi->max_numchannels = 1; } - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(display->drm, qi->num_points > ARRAY_SIZE(qi->points))) qi->num_points = ARRAY_SIZE(qi->points); for (i = 0; i < qi->num_points; i++) { struct intel_qgv_point *sp = &qi->points[i]; - ret = intel_read_qgv_point_info(dev_priv, sp, i); + ret = intel_read_qgv_point_info(display, sp, i); if (ret) { - drm_dbg_kms(&dev_priv->drm, "Could not read QGV %d info\n", i); + drm_dbg_kms(display->drm, "Could not read QGV %d info\n", i); return ret; } - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n", i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras, sp->t_rcd, sp->t_rc); } if (qi->num_psf_points > 0) { - ret = adls_pcode_read_psf_gv_point_info(dev_priv, qi->psf_points); + ret = adls_pcode_read_psf_gv_point_info(display, qi->psf_points); if (ret) { - drm_err(&dev_priv->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n"); + drm_err(display->drm, "Failed to read PSF point data; PSF points will not be considered in bandwidth calculations.\n"); qi->num_psf_points = 0; } for (i = 0; i < qi->num_psf_points; i++) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "PSF GV %d: CLK=%d \n", i, qi->psf_points[i].clk); } @@ -398,20 +404,21 @@ static const struct intel_sa_info xe2_hpd_sa_info = { /* Other values not used by simplified algorithm */ }; -static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) +static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_qgv_info qi = {}; bool is_y_tile = true; /* assume y tile may be used */ - int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); + int num_channels = max_t(u8, 1, i915->dram_info.num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw; - int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); + int num_groups = ARRAY_SIZE(display->bw.max); int i, ret; - ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); + ret = icl_get_qgv_points(display, &qi, is_y_tile); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } @@ -422,7 +429,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel qi.deinterleave = DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; + struct intel_bw_info *bi = &display->bw.max[i]; int clpchgroup; int j; @@ -449,7 +456,7 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel bi->deratedbw[j] = min(maxdebw, bw * (100 - sa->derating) / 100); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u\n", i, j, bi->num_planes, bi->deratedbw[j]); } @@ -460,44 +467,45 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + display->sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->display.sagv.status = I915_SAGV_ENABLED; + display->sagv.status = I915_SAGV_ENABLED; return 0; } -static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) +static int tgl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_qgv_info qi = {}; - const struct dram_info *dram_info = &dev_priv->dram_info; + const struct dram_info *dram_info = &i915->dram_info; bool is_y_tile = true; /* assume y tile may be used */ - int num_channels = max_t(u8, 1, dev_priv->dram_info.num_channels); + int num_channels = max_t(u8, 1, dram_info->num_channels); int ipqdepth, ipqdepthpch = 16; int dclk_max; int maxdebw, peakbw; int clperchgroup; - int num_groups = ARRAY_SIZE(dev_priv->display.bw.max); + int num_groups = ARRAY_SIZE(display->bw.max); int i, ret; - ret = icl_get_qgv_points(dev_priv, &qi, is_y_tile); + ret = icl_get_qgv_points(display, &qi, is_y_tile); if (ret) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } - if (DISPLAY_VER(dev_priv) < 14 && + if (DISPLAY_VER(display) < 14 && (dram_info->type == INTEL_DRAM_LPDDR4 || dram_info->type == INTEL_DRAM_LPDDR5)) num_channels *= 2; qi.deinterleave = qi.deinterleave ? : DIV_ROUND_UP(num_channels, is_y_tile ? 4 : 2); - if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12) + if (num_channels < qi.max_numchannels && DISPLAY_VER(display) >= 12) qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1); - if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels) - drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels."); + if (DISPLAY_VER(display) >= 12 && num_channels > qi.max_numchannels) + drm_warn(display->drm, "Number of channels exceeds max number of channels."); if (qi.max_numchannels != 0) num_channels = min_t(u8, num_channels, qi.max_numchannels); @@ -514,7 +522,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel clperchgroup = 4 * DIV_ROUND_UP(8, num_channels) * qi.deinterleave; for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &dev_priv->display.bw.max[i]; + struct intel_bw_info *bi = &display->bw.max[i]; struct intel_bw_info *bi_next; int clpchgroup; int j; @@ -522,7 +530,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel clpchgroup = (sa->deburst * qi.deinterleave / num_channels) << i; if (i < num_groups - 1) { - bi_next = &dev_priv->display.bw.max[i + 1]; + bi_next = &display->bw.max[i + 1]; if (clpchgroup < clperchgroup) bi_next->num_planes = (ipqdepth - clpchgroup) / @@ -554,7 +562,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel num_channels * qi.channel_width, 8); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "BW%d / QGV %d: num_planes=%d deratedbw=%u peakbw: %u\n", i, j, bi->num_planes, bi->deratedbw[j], bi->peakbw[j]); @@ -565,7 +573,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel bi->psf_bw[j] = adl_calc_psf_bw(sp->clk); - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "BW%d / PSF GV %d: num_planes=%d bw=%u\n", i, j, bi->num_planes, bi->psf_bw[j]); } @@ -577,17 +585,17 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel * as it will fail and pointless anyway. */ if (qi.num_points == 1) - dev_priv->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + display->sagv.status = I915_SAGV_NOT_CONTROLLED; else - dev_priv->display.sagv.status = I915_SAGV_ENABLED; + display->sagv.status = I915_SAGV_ENABLED; return 0; } -static void dg2_get_bw_info(struct drm_i915_private *i915) +static void dg2_get_bw_info(struct intel_display *display) { - unsigned int deratedbw = IS_DG2_G11(i915) ? 38000 : 50000; - int num_groups = ARRAY_SIZE(i915->display.bw.max); + unsigned int deratedbw = display->platform.dg2_g11 ? 38000 : 50000; + int num_groups = ARRAY_SIZE(display->bw.max); int i; /* @@ -598,7 +606,7 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) * whereas DG2-G11 platforms have 38 GB/s. */ for (i = 0; i < num_groups; i++) { - struct intel_bw_info *bi = &i915->display.bw.max[i]; + struct intel_bw_info *bi = &display->bw.max[i]; bi->num_planes = 1; /* Need only one dummy QGV point per group */ @@ -606,20 +614,21 @@ static void dg2_get_bw_info(struct drm_i915_private *i915) bi->deratedbw[0] = deratedbw; } - i915->display.sagv.status = I915_SAGV_NOT_CONTROLLED; + display->sagv.status = I915_SAGV_NOT_CONTROLLED; } -static int xe2_hpd_get_bw_info(struct drm_i915_private *i915, +static int xe2_hpd_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa) { + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_qgv_info qi = {}; int num_channels = i915->dram_info.num_channels; int peakbw, maxdebw; int ret, i; - ret = icl_get_qgv_points(i915, &qi, true); + ret = icl_get_qgv_points(display, &qi, true); if (ret) { - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "Failed to get memory subsystem information, ignoring bandwidth limits"); return ret; } @@ -631,33 +640,33 @@ static int xe2_hpd_get_bw_info(struct drm_i915_private *i915, const struct intel_qgv_point *point = &qi.points[i]; int bw = num_channels * (qi.channel_width / 8) * point->dclk; - i915->display.bw.max[0].deratedbw[i] = + display->bw.max[0].deratedbw[i] = min(maxdebw, (100 - sa->derating) * bw / 100); - i915->display.bw.max[0].peakbw[i] = bw; + display->bw.max[0].peakbw[i] = bw; - drm_dbg_kms(&i915->drm, "QGV %d: deratedbw=%u peakbw: %u\n", - i, i915->display.bw.max[0].deratedbw[i], - i915->display.bw.max[0].peakbw[i]); + drm_dbg_kms(display->drm, "QGV %d: deratedbw=%u peakbw: %u\n", + i, display->bw.max[0].deratedbw[i], + display->bw.max[0].peakbw[i]); } /* Bandwidth does not depend on # of planes; set all groups the same */ - i915->display.bw.max[0].num_planes = 1; - i915->display.bw.max[0].num_qgv_points = qi.num_points; - for (i = 1; i < ARRAY_SIZE(i915->display.bw.max); i++) - memcpy(&i915->display.bw.max[i], &i915->display.bw.max[0], - sizeof(i915->display.bw.max[0])); + display->bw.max[0].num_planes = 1; + display->bw.max[0].num_qgv_points = qi.num_points; + for (i = 1; i < ARRAY_SIZE(display->bw.max); i++) + memcpy(&display->bw.max[i], &display->bw.max[0], + sizeof(display->bw.max[0])); /* * Xe2_HPD should always have exactly two QGV points representing * battery and plugged-in operation. */ - drm_WARN_ON(&i915->drm, qi.num_points != 2); - i915->display.sagv.status = I915_SAGV_ENABLED; + drm_WARN_ON(display->drm, qi.num_points != 2); + display->sagv.status = I915_SAGV_ENABLED; return 0; } -static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, +static unsigned int icl_max_bw_index(struct intel_display *display, int num_planes, int qgv_point) { int i; @@ -667,9 +676,9 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = 0; i < ARRAY_SIZE(dev_priv->display.bw.max); i++) { + for (i = 0; i < ARRAY_SIZE(display->bw.max); i++) { const struct intel_bw_info *bi = - &dev_priv->display.bw.max[i]; + &display->bw.max[i]; /* * Pcode will not expose all QGV points when @@ -685,7 +694,7 @@ static unsigned int icl_max_bw_index(struct drm_i915_private *dev_priv, return UINT_MAX; } -static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, +static unsigned int tgl_max_bw_index(struct intel_display *display, int num_planes, int qgv_point) { int i; @@ -695,9 +704,9 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, */ num_planes = max(1, num_planes); - for (i = ARRAY_SIZE(dev_priv->display.bw.max) - 1; i >= 0; i--) { + for (i = ARRAY_SIZE(display->bw.max) - 1; i >= 0; i--) { const struct intel_bw_info *bi = - &dev_priv->display.bw.max[i]; + &display->bw.max[i]; /* * Pcode will not expose all QGV points when @@ -713,52 +722,54 @@ static unsigned int tgl_max_bw_index(struct drm_i915_private *dev_priv, return 0; } -static unsigned int adl_psf_bw(struct drm_i915_private *dev_priv, +static unsigned int adl_psf_bw(struct intel_display *display, int psf_gv_point) { const struct intel_bw_info *bi = - &dev_priv->display.bw.max[0]; + &display->bw.max[0]; return bi->psf_bw[psf_gv_point]; } -static unsigned int icl_qgv_bw(struct drm_i915_private *i915, +static unsigned int icl_qgv_bw(struct intel_display *display, int num_active_planes, int qgv_point) { unsigned int idx; - if (DISPLAY_VER(i915) >= 12) - idx = tgl_max_bw_index(i915, num_active_planes, qgv_point); + if (DISPLAY_VER(display) >= 12) + idx = tgl_max_bw_index(display, num_active_planes, qgv_point); else - idx = icl_max_bw_index(i915, num_active_planes, qgv_point); + idx = icl_max_bw_index(display, num_active_planes, qgv_point); - if (idx >= ARRAY_SIZE(i915->display.bw.max)) + if (idx >= ARRAY_SIZE(display->bw.max)) return 0; - return i915->display.bw.max[idx].deratedbw[qgv_point]; + return display->bw.max[idx].deratedbw[qgv_point]; } void intel_bw_init_hw(struct drm_i915_private *dev_priv) { - if (!HAS_DISPLAY(dev_priv)) + struct intel_display *display = &dev_priv->display; + + if (!HAS_DISPLAY(display)) return; - if (DISPLAY_VERx100(dev_priv) >= 1401 && IS_DGFX(dev_priv)) - xe2_hpd_get_bw_info(dev_priv, &xe2_hpd_sa_info); - else if (DISPLAY_VER(dev_priv) >= 14) - tgl_get_bw_info(dev_priv, &mtl_sa_info); - else if (IS_DG2(dev_priv)) - dg2_get_bw_info(dev_priv); - else if (IS_ALDERLAKE_P(dev_priv)) - tgl_get_bw_info(dev_priv, &adlp_sa_info); - else if (IS_ALDERLAKE_S(dev_priv)) - tgl_get_bw_info(dev_priv, &adls_sa_info); - else if (IS_ROCKETLAKE(dev_priv)) - tgl_get_bw_info(dev_priv, &rkl_sa_info); - else if (DISPLAY_VER(dev_priv) == 12) - tgl_get_bw_info(dev_priv, &tgl_sa_info); - else if (DISPLAY_VER(dev_priv) == 11) - icl_get_bw_info(dev_priv, &icl_sa_info); + if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) + xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info); + else if (DISPLAY_VER(display) >= 14) + tgl_get_bw_info(display, &mtl_sa_info); + else if (display->platform.dg2) + dg2_get_bw_info(display); + else if (display->platform.alderlake_p) + tgl_get_bw_info(display, &adlp_sa_info); + else if (display->platform.alderlake_s) + tgl_get_bw_info(display, &adls_sa_info); + else if (display->platform.rocketlake) + tgl_get_bw_info(display, &rkl_sa_info); + else if (DISPLAY_VER(display) == 12) + tgl_get_bw_info(display, &tgl_sa_info); + else if (DISPLAY_VER(display) == 11) + icl_get_bw_info(display, &icl_sa_info); } static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_state *crtc_state) @@ -772,8 +783,8 @@ static unsigned int intel_bw_crtc_num_active_planes(const struct intel_crtc_stat static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); unsigned int data_rate = 0; enum plane_id plane_id; @@ -787,7 +798,7 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_ data_rate += crtc_state->data_rate[plane_id]; - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) data_rate += crtc_state->data_rate_y[plane_id]; } @@ -797,37 +808,37 @@ static unsigned int intel_bw_crtc_data_rate(const struct intel_crtc_state *crtc_ /* "Maximum Pipe Read Bandwidth" */ static int intel_bw_crtc_min_cdclk(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc_state); - if (DISPLAY_VER(i915) < 12) + if (DISPLAY_VER(display) < 12) return 0; return DIV_ROUND_UP_ULL(mul_u32_u32(intel_bw_crtc_data_rate(crtc_state), 10), 512); } -static unsigned int intel_bw_num_active_planes(struct drm_i915_private *dev_priv, +static unsigned int intel_bw_num_active_planes(struct intel_display *display, const struct intel_bw_state *bw_state) { unsigned int num_active_planes = 0; enum pipe pipe; - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) num_active_planes += bw_state->num_active_planes[pipe]; return num_active_planes; } -static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, +static unsigned int intel_bw_data_rate(struct intel_display *display, const struct intel_bw_state *bw_state) { + struct drm_i915_private *i915 = to_i915(display->drm); unsigned int data_rate = 0; enum pipe pipe; - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) data_rate += bw_state->data_rate[pipe]; - if (DISPLAY_VER(dev_priv) >= 13 && i915_vtd_active(dev_priv)) + if (DISPLAY_VER(display) >= 13 && i915_vtd_active(i915)) data_rate = DIV_ROUND_UP(data_rate * 105, 100); return data_rate; @@ -836,10 +847,10 @@ static unsigned int intel_bw_data_rate(struct drm_i915_private *dev_priv, struct intel_bw_state * intel_atomic_get_old_bw_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_old_global_obj_state(state, &dev_priv->display.bw.obj); + bw_state = intel_atomic_get_old_global_obj_state(state, &display->bw.obj); return to_intel_bw_state(bw_state); } @@ -847,10 +858,10 @@ intel_atomic_get_old_bw_state(struct intel_atomic_state *state) struct intel_bw_state * intel_atomic_get_new_bw_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_new_global_obj_state(state, &dev_priv->display.bw.obj); + bw_state = intel_atomic_get_new_global_obj_state(state, &display->bw.obj); return to_intel_bw_state(bw_state); } @@ -858,27 +869,27 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state) struct intel_bw_state * intel_atomic_get_bw_state(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); struct intel_global_state *bw_state; - bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->display.bw.obj); + bw_state = intel_atomic_get_global_obj_state(state, &display->bw.obj); if (IS_ERR(bw_state)) return ERR_CAST(bw_state); return to_intel_bw_state(bw_state); } -static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915, +static unsigned int icl_max_bw_qgv_point_mask(struct intel_display *display, int num_active_planes) { - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; unsigned int max_bw_point = 0; unsigned int max_bw = 0; int i; for (i = 0; i < num_qgv_points; i++) { unsigned int max_data_rate = - icl_qgv_bw(i915, num_active_planes, i); + icl_qgv_bw(display, num_active_planes, i); /* * We need to know which qgv point gives us @@ -897,23 +908,23 @@ static unsigned int icl_max_bw_qgv_point_mask(struct drm_i915_private *i915, return max_bw_point; } -static u16 icl_prepare_qgv_points_mask(struct drm_i915_private *i915, +static u16 icl_prepare_qgv_points_mask(struct intel_display *display, unsigned int qgv_points, unsigned int psf_points) { return ~(ICL_PCODE_REQ_QGV_PT(qgv_points) | - ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(i915); + ADLS_PCODE_REQ_PSF_PT(psf_points)) & icl_qgv_points_mask(display); } -static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915) +static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display) { - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; + unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; unsigned int max_bw_point_mask = 0; unsigned int max_bw = 0; int i; for (i = 0; i < num_psf_gv_points; i++) { - unsigned int max_data_rate = adl_psf_bw(i915, i); + unsigned int max_data_rate = adl_psf_bw(display, i); if (max_data_rate > max_bw) { max_bw_point_mask = BIT(i); @@ -926,29 +937,31 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct drm_i915_private *i915) return max_bw_point_mask; } -static void icl_force_disable_sagv(struct drm_i915_private *i915, +static void icl_force_disable_sagv(struct intel_display *display, struct intel_bw_state *bw_state) { - unsigned int qgv_points = icl_max_bw_qgv_point_mask(i915, 0); - unsigned int psf_points = icl_max_bw_psf_gv_point_mask(i915); + struct drm_i915_private *i915 = to_i915(display->drm); + unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0); + unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display); - bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915, + bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display, qgv_points, psf_points); - drm_dbg_kms(&i915->drm, "Forcing SAGV disable: mask 0x%x\n", + drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n", bw_state->qgv_points_mask); icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask); } -static int mtl_find_qgv_points(struct drm_i915_private *i915, +static int mtl_find_qgv_points(struct intel_display *display, unsigned int data_rate, unsigned int num_active_planes, struct intel_bw_state *new_bw_state) { + struct drm_i915_private *i915 = to_i915(display->drm); unsigned int best_rate = UINT_MAX; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; unsigned int qgv_peak_bw = 0; int i; int ret; @@ -964,7 +977,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, */ if (!intel_can_enable_sagv(i915, new_bw_state)) { new_bw_state->qgv_point_peakbw = U16_MAX; - drm_dbg_kms(&i915->drm, "No SAGV, use UINT_MAX as peak bw."); + drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw."); return 0; } @@ -974,27 +987,27 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, */ for (i = 0; i < num_qgv_points; i++) { unsigned int bw_index = - tgl_max_bw_index(i915, num_active_planes, i); + tgl_max_bw_index(display, num_active_planes, i); unsigned int max_data_rate; - if (bw_index >= ARRAY_SIZE(i915->display.bw.max)) + if (bw_index >= ARRAY_SIZE(display->bw.max)) continue; - max_data_rate = i915->display.bw.max[bw_index].deratedbw[i]; + max_data_rate = display->bw.max[bw_index].deratedbw[i]; if (max_data_rate < data_rate) continue; if (max_data_rate - data_rate < best_rate) { best_rate = max_data_rate - data_rate; - qgv_peak_bw = i915->display.bw.max[bw_index].peakbw[i]; + qgv_peak_bw = display->bw.max[bw_index].peakbw[i]; } - drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n", + drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d qgv_peak_bw: %d\n", i, max_data_rate, data_rate, qgv_peak_bw); } - drm_dbg_kms(&i915->drm, "Matching peaks QGV bw: %d for required data rate: %d\n", + drm_dbg_kms(display->drm, "Matching peaks QGV bw: %d for required data rate: %d\n", qgv_peak_bw, data_rate); /* @@ -1002,7 +1015,7 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, * satisfying the required data rate is found */ if (qgv_peak_bw == 0) { - drm_dbg_kms(&i915->drm, "No QGV points for bw %d for display configuration(%d active planes).\n", + drm_dbg_kms(display->drm, "No QGV points for bw %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; } @@ -1013,14 +1026,15 @@ static int mtl_find_qgv_points(struct drm_i915_private *i915, return 0; } -static int icl_find_qgv_points(struct drm_i915_private *i915, +static int icl_find_qgv_points(struct intel_display *display, unsigned int data_rate, unsigned int num_active_planes, const struct intel_bw_state *old_bw_state, struct intel_bw_state *new_bw_state) { - unsigned int num_psf_gv_points = i915->display.bw.max[0].num_psf_gv_points; - unsigned int num_qgv_points = i915->display.bw.max[0].num_qgv_points; + struct drm_i915_private *i915 = to_i915(display->drm); + unsigned int num_psf_gv_points = display->bw.max[0].num_psf_gv_points; + unsigned int num_qgv_points = display->bw.max[0].num_qgv_points; u16 psf_points = 0; u16 qgv_points = 0; int i; @@ -1031,22 +1045,22 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, return ret; for (i = 0; i < num_qgv_points; i++) { - unsigned int max_data_rate = icl_qgv_bw(i915, + unsigned int max_data_rate = icl_qgv_bw(display, num_active_planes, i); if (max_data_rate >= data_rate) qgv_points |= BIT(i); - drm_dbg_kms(&i915->drm, "QGV point %d: max bw %d required %d\n", + drm_dbg_kms(display->drm, "QGV point %d: max bw %d required %d\n", i, max_data_rate, data_rate); } for (i = 0; i < num_psf_gv_points; i++) { - unsigned int max_data_rate = adl_psf_bw(i915, i); + unsigned int max_data_rate = adl_psf_bw(display, i); if (max_data_rate >= data_rate) psf_points |= BIT(i); - drm_dbg_kms(&i915->drm, "PSF GV point %d: max bw %d" + drm_dbg_kms(display->drm, "PSF GV point %d: max bw %d" " required %d\n", i, max_data_rate, data_rate); } @@ -1057,14 +1071,14 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, * reasons. */ if (qgv_points == 0) { - drm_dbg_kms(&i915->drm, "No QGV points provide sufficient memory" + drm_dbg_kms(display->drm, "No QGV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; } if (num_psf_gv_points > 0 && psf_points == 0) { - drm_dbg_kms(&i915->drm, "No PSF GV points provide sufficient memory" + drm_dbg_kms(display->drm, "No PSF GV points provide sufficient memory" " bandwidth %d for display configuration(%d active planes).\n", data_rate, num_active_planes); return -EINVAL; @@ -1076,8 +1090,8 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, * cause. */ if (!intel_can_enable_sagv(i915, new_bw_state)) { - qgv_points = icl_max_bw_qgv_point_mask(i915, num_active_planes); - drm_dbg_kms(&i915->drm, "No SAGV, using single QGV point mask 0x%x\n", + qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes); + drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n", qgv_points); } @@ -1085,7 +1099,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, * We store the ones which need to be masked as that is what PCode * actually accepts as a parameter. */ - new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(i915, + new_bw_state->qgv_points_mask = icl_prepare_qgv_points_mask(display, qgv_points, psf_points); /* @@ -1101,38 +1115,38 @@ static int icl_find_qgv_points(struct drm_i915_private *i915, return 0; } -static int intel_bw_check_qgv_points(struct drm_i915_private *i915, +static int intel_bw_check_qgv_points(struct intel_display *display, const struct intel_bw_state *old_bw_state, struct intel_bw_state *new_bw_state) { - unsigned int data_rate = intel_bw_data_rate(i915, new_bw_state); + unsigned int data_rate = intel_bw_data_rate(display, new_bw_state); unsigned int num_active_planes = - intel_bw_num_active_planes(i915, new_bw_state); + intel_bw_num_active_planes(display, new_bw_state); data_rate = DIV_ROUND_UP(data_rate, 1000); - if (DISPLAY_VER(i915) >= 14) - return mtl_find_qgv_points(i915, data_rate, num_active_planes, + if (DISPLAY_VER(display) >= 14) + return mtl_find_qgv_points(display, data_rate, num_active_planes, new_bw_state); else - return icl_find_qgv_points(i915, data_rate, num_active_planes, + return icl_find_qgv_points(display, data_rate, num_active_planes, old_bw_state, new_bw_state); } -static bool intel_bw_state_changed(struct drm_i915_private *i915, +static bool intel_bw_state_changed(struct intel_display *display, const struct intel_bw_state *old_bw_state, const struct intel_bw_state *new_bw_state) { enum pipe pipe; - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { const struct intel_dbuf_bw *old_crtc_bw = &old_bw_state->dbuf_bw[pipe]; const struct intel_dbuf_bw *new_crtc_bw = &new_bw_state->dbuf_bw[pipe]; enum dbuf_slice slice; - for_each_dbuf_slice(i915, slice) { + for_each_dbuf_slice(display, slice) { if (old_crtc_bw->max_bw[slice] != new_crtc_bw->max_bw[slice] || old_crtc_bw->active_planes[slice] != new_crtc_bw->active_planes[slice]) return true; @@ -1151,7 +1165,8 @@ static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state, const struct skl_ddb_entry *ddb, unsigned int data_rate) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe]; unsigned int dbuf_mask = skl_ddb_dbuf_slice_mask(i915, ddb); enum dbuf_slice slice; @@ -1160,7 +1175,7 @@ static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state, * The arbiter can only really guarantee an * equal share of the total bw to each plane. */ - for_each_dbuf_slice_in_mask(i915, slice, dbuf_mask) { + for_each_dbuf_slice_in_mask(display, slice, dbuf_mask) { crtc_bw->max_bw[slice] = max(crtc_bw->max_bw[slice], data_rate); crtc_bw->active_planes[slice] |= BIT(plane_id); } @@ -1169,8 +1184,8 @@ static void skl_plane_calc_dbuf_bw(struct intel_bw_state *bw_state, static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[crtc->pipe]; enum plane_id plane_id; @@ -1191,7 +1206,7 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, &crtc_state->wm.skl.plane_ddb[plane_id], crtc_state->data_rate[plane_id]); - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) skl_plane_calc_dbuf_bw(bw_state, crtc, plane_id, &crtc_state->wm.skl.plane_ddb_y[plane_id], crtc_state->data_rate[plane_id]); @@ -1200,13 +1215,13 @@ static void skl_crtc_calc_dbuf_bw(struct intel_bw_state *bw_state, /* "Maximum Data Buffer Bandwidth" */ static int -intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915, +intel_bw_dbuf_min_cdclk(struct intel_display *display, const struct intel_bw_state *bw_state) { unsigned int total_max_bw = 0; enum dbuf_slice slice; - for_each_dbuf_slice(i915, slice) { + for_each_dbuf_slice(display, slice) { int num_active_planes = 0; unsigned int max_bw = 0; enum pipe pipe; @@ -1215,7 +1230,7 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915, * The arbiter can only really guarantee an * equal share of the total bw to each plane. */ - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { const struct intel_dbuf_bw *crtc_bw = &bw_state->dbuf_bw[pipe]; max_bw = max(crtc_bw->max_bw[slice], max_bw); @@ -1232,12 +1247,13 @@ intel_bw_dbuf_min_cdclk(struct drm_i915_private *i915, int intel_bw_min_cdclk(struct drm_i915_private *i915, const struct intel_bw_state *bw_state) { + struct intel_display *display = &i915->display; enum pipe pipe; int min_cdclk; - min_cdclk = intel_bw_dbuf_min_cdclk(i915, bw_state); + min_cdclk = intel_bw_dbuf_min_cdclk(display, bw_state); - for_each_pipe(i915, pipe) + for_each_pipe(display, pipe) min_cdclk = max(min_cdclk, bw_state->min_cdclk[pipe]); return min_cdclk; @@ -1246,6 +1262,7 @@ int intel_bw_min_cdclk(struct drm_i915_private *i915, int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc) { + struct intel_display *display = to_intel_display(state); struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_bw_state *new_bw_state = NULL; const struct intel_bw_state *old_bw_state = NULL; @@ -1255,7 +1272,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, struct intel_crtc *crtc; int i; - if (DISPLAY_VER(dev_priv) < 9) + if (DISPLAY_VER(display) < 9) return 0; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { @@ -1274,7 +1291,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, if (!old_bw_state) return 0; - if (intel_bw_state_changed(dev_priv, old_bw_state, new_bw_state)) { + if (intel_bw_state_changed(display, old_bw_state, new_bw_state)) { int ret = intel_atomic_lock_global_state(&new_bw_state->base); if (ret) return ret; @@ -1309,7 +1326,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, if (new_min_cdclk <= cdclk_state->bw_min_cdclk) return 0; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n", new_min_cdclk, cdclk_state->bw_min_cdclk); *need_cdclk_calc = true; @@ -1319,7 +1336,7 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *changed) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); const struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; int i; @@ -1353,7 +1370,7 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan *changed = true; - drm_dbg_kms(&i915->drm, + drm_dbg_kms(display->drm, "[CRTC:%d:%s] data rate %u num active planes %u\n", crtc->base.base.id, crtc->base.name, new_bw_state->data_rate[crtc->pipe], @@ -1365,14 +1382,15 @@ static int intel_bw_check_data_rate(struct intel_atomic_state *state, bool *chan int intel_bw_atomic_check(struct intel_atomic_state *state) { + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); bool changed = false; - struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_bw_state *new_bw_state; const struct intel_bw_state *old_bw_state; int ret; /* FIXME earlier gens need some checks too */ - if (DISPLAY_VER(i915) < 11) + if (DISPLAY_VER(display) < 11) return 0; ret = intel_bw_check_data_rate(state, &changed); @@ -1395,7 +1413,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) if (!changed) return 0; - ret = intel_bw_check_qgv_points(i915, old_bw_state, new_bw_state); + ret = intel_bw_check_qgv_points(display, old_bw_state, new_bw_state); if (ret) return ret; @@ -1407,8 +1425,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state) static void intel_bw_crtc_update(struct intel_bw_state *bw_state, const struct intel_crtc_state *crtc_state) { + struct intel_display *display = to_intel_display(crtc_state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); bw_state->data_rate[crtc->pipe] = intel_bw_crtc_data_rate(crtc_state); @@ -1416,7 +1434,7 @@ static void intel_bw_crtc_update(struct intel_bw_state *bw_state, intel_bw_crtc_num_active_planes(crtc_state); bw_state->force_check_qgv = true; - drm_dbg_kms(&i915->drm, "pipe %c data rate %u num active planes %u\n", + drm_dbg_kms(display->drm, "pipe %c data rate %u num active planes %u\n", pipe_name(crtc->pipe), bw_state->data_rate[crtc->pipe], bw_state->num_active_planes[crtc->pipe]); @@ -1499,8 +1517,8 @@ int intel_bw_init(struct drm_i915_private *i915) * Limit this only if we have SAGV. And for Display version 14 onwards * sagv is handled though pmdemand requests */ - if (intel_has_sagv(i915) && IS_DISPLAY_VER(i915, 11, 13)) - icl_force_disable_sagv(i915, state); + if (intel_has_sagv(i915) && IS_DISPLAY_VER(display, 11, 13)) + icl_force_disable_sagv(display, state); return 0; } From d706998b6da687dcafee2cf6e9712136dafa574a Mon Sep 17 00:00:00 2001 From: Gustavo Sousa Date: Tue, 11 Mar 2025 14:04:51 -0300 Subject: [PATCH 0110/1627] drm/i915/display: Convert intel_bw.c externally to intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already have internal interface for intel_bw.c converted to use intel_display. Now convert the external interface as well. Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250311-xe3lpd-bandwidth-update-v5-2-a95a9d90ad71@intel.com Signed-off-by: Gustavo Sousa --- drivers/gpu/drm/i915/display/intel_bw.c | 25 ++++++++----------- drivers/gpu/drm/i915/display/intel_bw.h | 9 +++---- drivers/gpu/drm/i915/display/intel_cdclk.c | 3 +-- .../drm/i915/display/intel_display_driver.c | 2 +- drivers/gpu/drm/i915/display/skl_watermark.c | 10 +++++--- drivers/gpu/drm/i915/i915_driver.c | 2 +- drivers/gpu/drm/xe/display/xe_display.c | 2 +- 7 files changed, 24 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 6f805af32926..bda080d9ed4c 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -151,17 +151,17 @@ static bool is_sagv_enabled(struct intel_display *display, u16 points_mask) ICL_PCODE_REQ_QGV_PT_MASK); } -int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, +int icl_pcode_restrict_qgv_points(struct intel_display *display, u32 points_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *i915 = to_i915(display->drm); int ret; if (DISPLAY_VER(display) >= 14) return 0; /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, + ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, points_mask, ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK, ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE, @@ -747,10 +747,8 @@ static unsigned int icl_qgv_bw(struct intel_display *display, return display->bw.max[idx].deratedbw[qgv_point]; } -void intel_bw_init_hw(struct drm_i915_private *dev_priv) +void intel_bw_init_hw(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - if (!HAS_DISPLAY(display)) return; @@ -940,7 +938,6 @@ static unsigned int icl_max_bw_psf_gv_point_mask(struct intel_display *display) static void icl_force_disable_sagv(struct intel_display *display, struct intel_bw_state *bw_state) { - struct drm_i915_private *i915 = to_i915(display->drm); unsigned int qgv_points = icl_max_bw_qgv_point_mask(display, 0); unsigned int psf_points = icl_max_bw_psf_gv_point_mask(display); @@ -951,7 +948,7 @@ static void icl_force_disable_sagv(struct intel_display *display, drm_dbg_kms(display->drm, "Forcing SAGV disable: mask 0x%x\n", bw_state->qgv_points_mask); - icl_pcode_restrict_qgv_points(i915, bw_state->qgv_points_mask); + icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask); } static int mtl_find_qgv_points(struct intel_display *display, @@ -1244,10 +1241,9 @@ intel_bw_dbuf_min_cdclk(struct intel_display *display, return DIV_ROUND_UP(total_max_bw, 64); } -int intel_bw_min_cdclk(struct drm_i915_private *i915, +int intel_bw_min_cdclk(struct intel_display *display, const struct intel_bw_state *bw_state) { - struct intel_display *display = &i915->display; enum pipe pipe; int min_cdclk; @@ -1263,7 +1259,6 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_bw_state *new_bw_state = NULL; const struct intel_bw_state *old_bw_state = NULL; const struct intel_cdclk_state *cdclk_state; @@ -1297,8 +1292,8 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, return ret; } - old_min_cdclk = intel_bw_min_cdclk(dev_priv, old_bw_state); - new_min_cdclk = intel_bw_min_cdclk(dev_priv, new_bw_state); + old_min_cdclk = intel_bw_min_cdclk(display, old_bw_state); + new_min_cdclk = intel_bw_min_cdclk(display, new_bw_state); /* * No need to check against the cdclk state if @@ -1501,9 +1496,9 @@ static const struct intel_global_state_funcs intel_bw_funcs = { .atomic_destroy_state = intel_bw_destroy_state, }; -int intel_bw_init(struct drm_i915_private *i915) +int intel_bw_init(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_bw_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index 3313e4eac4f0..c18126c83d2e 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -12,7 +12,6 @@ #include "intel_display_power.h" #include "intel_global_state.h" -struct drm_i915_private; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; @@ -72,14 +71,14 @@ intel_atomic_get_new_bw_state(struct intel_atomic_state *state); struct intel_bw_state * intel_atomic_get_bw_state(struct intel_atomic_state *state); -void intel_bw_init_hw(struct drm_i915_private *dev_priv); -int intel_bw_init(struct drm_i915_private *dev_priv); +void intel_bw_init_hw(struct intel_display *display); +int intel_bw_init(struct intel_display *display); int intel_bw_atomic_check(struct intel_atomic_state *state); -int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv, +int icl_pcode_restrict_qgv_points(struct intel_display *display, u32 points_mask); int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc); -int intel_bw_min_cdclk(struct drm_i915_private *i915, +int intel_bw_min_cdclk(struct intel_display *display, const struct intel_bw_state *bw_state); void intel_bw_update_hw_state(struct intel_display *display); void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 2a8749a0213e..984fd9f98c9f 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2808,7 +2808,6 @@ static int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_stat static int intel_compute_min_cdclk(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_state *cdclk_state = intel_atomic_get_new_cdclk_state(state); const struct intel_bw_state *bw_state; @@ -2836,7 +2835,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) bw_state = intel_atomic_get_new_bw_state(state); if (bw_state) { - min_cdclk = intel_bw_min_cdclk(dev_priv, bw_state); + min_cdclk = intel_bw_min_cdclk(display, bw_state); if (cdclk_state->bw_min_cdclk != min_cdclk) { int ret; diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 31740a677dd8..5ad2f4090a2d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -259,7 +259,7 @@ int intel_display_driver_probe_noirq(struct intel_display *display) if (ret) goto cleanup_vga_client_pw_domain_dmc; - ret = intel_bw_init(i915); + ret = intel_bw_init(display); if (ret) goto cleanup_vga_client_pw_domain_dmc; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 621e97943542..91ab8537347f 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -249,7 +249,8 @@ static void skl_sagv_post_plane_update(struct intel_atomic_state *state) static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_bw_state *old_bw_state = intel_atomic_get_old_bw_state(state); const struct intel_bw_state *new_bw_state = @@ -276,12 +277,13 @@ static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) * time. Also masking should be done before updating the configuration * and unmasking afterwards. */ - icl_pcode_restrict_qgv_points(i915, new_mask); + icl_pcode_restrict_qgv_points(display, new_mask); } static void icl_sagv_post_plane_update(struct intel_atomic_state *state) { - struct drm_i915_private *i915 = to_i915(state->base.dev); + struct intel_display *display = to_intel_display(state); + struct drm_i915_private *i915 = to_i915(display->drm); const struct intel_bw_state *old_bw_state = intel_atomic_get_old_bw_state(state); const struct intel_bw_state *new_bw_state = @@ -308,7 +310,7 @@ static void icl_sagv_post_plane_update(struct intel_atomic_state *state) * time. Also masking should be done before updating the configuration * and unmasking afterwards. */ - icl_pcode_restrict_qgv_points(i915, new_mask); + icl_pcode_restrict_qgv_points(display, new_mask); } void intel_sagv_pre_plane_update(struct intel_atomic_state *state) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index ce3cc93ea211..6507dcfe4bf5 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -578,7 +578,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) */ intel_dram_detect(dev_priv); - intel_bw_init_hw(dev_priv); + intel_bw_init_hw(display); return 0; diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 4f434f84b2cc..3681aeccea3c 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -147,7 +147,7 @@ int xe_display_init_early(struct xe_device *xe) */ intel_dram_detect(xe); - intel_bw_init_hw(xe); + intel_bw_init_hw(display); intel_display_device_info_runtime_init(display); From 4051c59e2a6a1b3584fa8932361b8f50198e7396 Mon Sep 17 00:00:00 2001 From: Gustavo Sousa Date: Tue, 11 Mar 2025 14:04:52 -0300 Subject: [PATCH 0111/1627] drm/i915/xe3lpd: Update bandwidth parameters Bandwidth parameters for Xe3_LPD have been updated with respect to previous display releases. Encode them into xe3lpd_sa_info and use that new struct. Bspec: 68859 Reviewed-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20250311-xe3lpd-bandwidth-update-v5-3-a95a9d90ad71@intel.com Signed-off-by: Gustavo Sousa --- drivers/gpu/drm/i915/display/intel_bw.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index bda080d9ed4c..dc7612658a9d 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -404,6 +404,13 @@ static const struct intel_sa_info xe2_hpd_sa_info = { /* Other values not used by simplified algorithm */ }; +static const struct intel_sa_info xe3lpd_sa_info = { + .deburst = 32, + .deprogbwlimit = 65, /* GB/s */ + .displayrtids = 256, + .derating = 10, +}; + static int icl_get_bw_info(struct intel_display *display, const struct intel_sa_info *sa) { struct drm_i915_private *i915 = to_i915(display->drm); @@ -752,7 +759,9 @@ void intel_bw_init_hw(struct intel_display *display) if (!HAS_DISPLAY(display)) return; - if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) + if (DISPLAY_VER(display) >= 30) + tgl_get_bw_info(display, &xe3lpd_sa_info); + else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info); else if (DISPLAY_VER(display) >= 14) tgl_get_bw_info(display, &mtl_sa_info); From f68429691c938a2e9131faf2ab2a8f0563966ba1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Mon, 3 Mar 2025 11:38:47 +0200 Subject: [PATCH 0112/1627] drm/client: Constify modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The modes used by the client code live on the connectors' mode lists, which are not owned by the client code, and thus it has no business modifying the modes. Mark the modes const to make that fact abundantly clear. v2: Fix up the kunit test Reviewed-by: Jani Nikula Reviewed-by: Thomas Zimmermann Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250303093847.7698-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_client_modeset.c | 39 ++++++++++--------- .../gpu/drm/tests/drm_client_modeset_test.c | 3 +- 2 files changed, 23 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index aca442c25209..b114d1b8793b 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -117,10 +117,10 @@ drm_client_find_modeset(struct drm_client_dev *client, struct drm_crtc *crtc) return NULL; } -static struct drm_display_mode * +static const struct drm_display_mode * drm_connector_get_tiled_mode(struct drm_connector *connector) { - struct drm_display_mode *mode; + const struct drm_display_mode *mode; list_for_each_entry(mode, &connector->modes, head) { if (mode->hdisplay == connector->tile_h_size && @@ -130,10 +130,10 @@ drm_connector_get_tiled_mode(struct drm_connector *connector) return NULL; } -static struct drm_display_mode * +static const struct drm_display_mode * drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) { - struct drm_display_mode *mode; + const struct drm_display_mode *mode; list_for_each_entry(mode, &connector->modes, head) { if (mode->hdisplay == connector->tile_h_size && @@ -144,10 +144,10 @@ drm_connector_fallback_non_tiled_mode(struct drm_connector *connector) return NULL; } -static struct drm_display_mode * +static const struct drm_display_mode * drm_connector_preferred_mode(struct drm_connector *connector, int width, int height) { - struct drm_display_mode *mode; + const struct drm_display_mode *mode; list_for_each_entry(mode, &connector->modes, head) { if (mode->hdisplay > width || @@ -159,16 +159,18 @@ drm_connector_preferred_mode(struct drm_connector *connector, int width, int hei return NULL; } -static struct drm_display_mode *drm_connector_first_mode(struct drm_connector *connector) +static const struct drm_display_mode * +drm_connector_first_mode(struct drm_connector *connector) { return list_first_entry_or_null(&connector->modes, struct drm_display_mode, head); } -static struct drm_display_mode *drm_connector_pick_cmdline_mode(struct drm_connector *connector) +static const struct drm_display_mode * +drm_connector_pick_cmdline_mode(struct drm_connector *connector) { - struct drm_cmdline_mode *cmdline_mode; - struct drm_display_mode *mode; + const struct drm_cmdline_mode *cmdline_mode; + const struct drm_display_mode *mode; bool prefer_non_interlace; /* @@ -266,13 +268,14 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors, static bool drm_client_target_cloned(struct drm_device *dev, struct drm_connector **connectors, unsigned int connector_count, - struct drm_display_mode **modes, + const struct drm_display_mode **modes, struct drm_client_offset *offsets, bool *enabled, int width, int height) { int count, i, j; bool can_clone = false; - struct drm_display_mode *dmt_mode, *mode; + const struct drm_display_mode *mode; + struct drm_display_mode *dmt_mode; /* only contemplate cloning in the single crtc case */ if (dev->mode_config.num_crtc > 1) @@ -351,7 +354,7 @@ fail: static int drm_client_get_tile_offsets(struct drm_device *dev, struct drm_connector **connectors, unsigned int connector_count, - struct drm_display_mode **modes, + const struct drm_display_mode **modes, struct drm_client_offset *offsets, int idx, int h_idx, int v_idx) @@ -386,7 +389,7 @@ static int drm_client_get_tile_offsets(struct drm_device *dev, static bool drm_client_target_preferred(struct drm_device *dev, struct drm_connector **connectors, unsigned int connector_count, - struct drm_display_mode **modes, + const struct drm_display_mode **modes, struct drm_client_offset *offsets, bool *enabled, int width, int height) { @@ -505,7 +508,7 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, struct drm_connector **connectors, unsigned int connector_count, struct drm_crtc **best_crtcs, - struct drm_display_mode **modes, + const struct drm_display_mode **modes, int n, int width, int height) { struct drm_device *dev = client->dev; @@ -580,7 +583,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, struct drm_connector **connectors, unsigned int connector_count, struct drm_crtc **crtcs, - struct drm_display_mode **modes, + const struct drm_display_mode **modes, struct drm_client_offset *offsets, bool *enabled, int width, int height) { @@ -800,7 +803,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, struct drm_client_offset *offsets; unsigned int connector_count = 0; /* points to modes protected by mode_config.mutex */ - struct drm_display_mode **modes; + const struct drm_display_mode **modes; struct drm_crtc **crtcs; int i, ret = 0; bool *enabled; @@ -871,7 +874,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, drm_client_modeset_release(client); for (i = 0; i < connector_count; i++) { - struct drm_display_mode *mode = modes[i]; + const struct drm_display_mode *mode = modes[i]; struct drm_crtc *crtc = crtcs[i]; struct drm_client_offset *offset = &offsets[i]; diff --git a/drivers/gpu/drm/tests/drm_client_modeset_test.c b/drivers/gpu/drm/tests/drm_client_modeset_test.c index 7516f6cb36e4..cd43d2a52a2d 100644 --- a/drivers/gpu/drm/tests/drm_client_modeset_test.c +++ b/drivers/gpu/drm/tests/drm_client_modeset_test.c @@ -88,7 +88,8 @@ static void drm_test_pick_cmdline_res_1920_1080_60(struct kunit *test) struct drm_device *drm = priv->drm; struct drm_connector *connector = &priv->connector; struct drm_cmdline_mode *cmdline_mode = &connector->cmdline_mode; - struct drm_display_mode *expected_mode, *mode; + struct drm_display_mode *expected_mode; + const struct drm_display_mode *mode; const char *cmdline = "1920x1080@60"; int ret; From b218e72b8ac2c11ccab4d41263dd0406d8291c6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 28 Feb 2025 23:14:48 +0200 Subject: [PATCH 0113/1627] drm/client: Use array notation for function arguments MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the array notation rather that the pointer notation for function arguments. This makes it clear to the reader that we are in fact dealing with an array rather than a single pointer. Functionally the two are equivalent. Reviewed-by: Jani Nikula Reviewed-by: Thomas Zimmermann Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250228211454.8138-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_client_modeset.c | 42 ++++++++++++++-------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index b114d1b8793b..bdd4078e62ad 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -239,9 +239,9 @@ static bool drm_connector_enabled(struct drm_connector *connector, bool strict) return enable; } -static void drm_client_connectors_enabled(struct drm_connector **connectors, +static void drm_client_connectors_enabled(struct drm_connector *connectors[], unsigned int connector_count, - bool *enabled) + bool enabled[]) { bool any_enabled = false; struct drm_connector *connector; @@ -266,11 +266,11 @@ static void drm_client_connectors_enabled(struct drm_connector **connectors, } static bool drm_client_target_cloned(struct drm_device *dev, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - const struct drm_display_mode **modes, - struct drm_client_offset *offsets, - bool *enabled, int width, int height) + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], + bool enabled[], int width, int height) { int count, i, j; bool can_clone = false; @@ -352,10 +352,10 @@ fail: } static int drm_client_get_tile_offsets(struct drm_device *dev, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - const struct drm_display_mode **modes, - struct drm_client_offset *offsets, + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], int idx, int h_idx, int v_idx) { @@ -387,11 +387,11 @@ static int drm_client_get_tile_offsets(struct drm_device *dev, } static bool drm_client_target_preferred(struct drm_device *dev, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - const struct drm_display_mode **modes, - struct drm_client_offset *offsets, - bool *enabled, int width, int height) + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], + bool enabled[], int width, int height) { const u64 mask = BIT_ULL(connector_count) - 1; struct drm_connector *connector; @@ -505,10 +505,10 @@ static bool connector_has_possible_crtc(struct drm_connector *connector, } static int drm_client_pick_crtcs(struct drm_client_dev *client, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - struct drm_crtc **best_crtcs, - const struct drm_display_mode **modes, + struct drm_crtc *best_crtcs[], + const struct drm_display_mode *modes[], int n, int width, int height) { struct drm_device *dev = client->dev; @@ -580,12 +580,12 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, /* Try to read the BIOS display configuration and use it for the initial config */ static bool drm_client_firmware_config(struct drm_client_dev *client, - struct drm_connector **connectors, + struct drm_connector *connectors[], unsigned int connector_count, - struct drm_crtc **crtcs, - const struct drm_display_mode **modes, - struct drm_client_offset *offsets, - bool *enabled, int width, int height) + struct drm_crtc *crtcs[], + const struct drm_display_mode *modes[], + struct drm_client_offset offsets[], + bool enabled[], int width, int height) { const int count = min_t(unsigned int, connector_count, BITS_PER_LONG); unsigned long conn_configured, conn_seq, mask; From 7640a1c20f144eb933d981102b60080c8114d1d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 28 Feb 2025 23:14:49 +0200 Subject: [PATCH 0114/1627] drm/client: Streamline mode selection debugs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get rid of all the redundant debugs and just wait until the end to print which mode (and of which type) we picked. Reviewed-by: Thomas Zimmermann Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250228211454.8138-4-ville.syrjala@linux.intel.com --- drivers/gpu/drm/drm_client_modeset.c | 70 +++++++++++++--------------- 1 file changed, 33 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index bdd4078e62ad..148257287ae4 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -408,6 +408,8 @@ static bool drm_client_target_preferred(struct drm_device *dev, retry: for (i = 0; i < connector_count; i++) { + const char *mode_type; + connector = connectors[i]; if (conn_configured & BIT_ULL(i)) @@ -441,20 +443,20 @@ retry: modes, offsets, i, connector->tile_h_loc, connector->tile_v_loc); } - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n", - connector->base.id, connector->name); - /* got for command line mode first */ + mode_type = "cmdline"; modes[i] = drm_connector_pick_cmdline_mode(connector); + if (!modes[i]) { - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for preferred mode, tile %d\n", - connector->base.id, connector->name, - connector->tile_group ? connector->tile_group->id : 0); + mode_type = "preferred"; modes[i] = drm_connector_preferred_mode(connector, width, height); } - /* No preferred modes, pick one off the list */ - if (!modes[i]) + + if (!modes[i]) { + mode_type = "first"; modes[i] = drm_connector_first_mode(connector); + } + /* * In case of tiled mode if all tiles not present fallback to * first available non tiled mode. @@ -469,18 +471,22 @@ retry: (connector->tile_h_loc == 0 && connector->tile_v_loc == 0 && !drm_connector_get_tiled_mode(connector))) { - drm_dbg_kms(dev, - "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n", - connector->base.id, connector->name); + mode_type = "non tiled"; modes[i] = drm_connector_fallback_non_tiled_mode(connector); } else { + mode_type = "tiled"; modes[i] = drm_connector_get_tiled_mode(connector); } } - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Found mode %s\n", - connector->base.id, connector->name, - modes[i] ? modes[i]->name : "none"); + if (modes[i]) + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] found %s mode: %s\n", + connector->base.id, connector->name, + mode_type, modes[i]->name); + else + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] no mode found\n", + connector->base.id, connector->name); + conn_configured |= BIT_ULL(i); } @@ -627,6 +633,7 @@ retry: struct drm_connector *connector; struct drm_encoder *encoder; struct drm_crtc *new_crtc; + const char *mode_type; connector = connectors[i]; @@ -676,30 +683,22 @@ retry: */ for (j = 0; j < count; j++) { if (crtcs[j] == new_crtc) { - drm_dbg_kms(dev, "fallback: cloned configuration\n"); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n", + connector->base.id, connector->name); goto bail; } } - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for cmdline mode\n", - connector->base.id, connector->name); - - /* go for command line mode first */ + mode_type = "cmdline"; modes[i] = drm_connector_pick_cmdline_mode(connector); - /* try for preferred next */ if (!modes[i]) { - drm_dbg_kms(dev, - "[CONNECTOR:%d:%s] looking for preferred mode, has tile: %s\n", - connector->base.id, connector->name, - str_yes_no(connector->has_tile)); + mode_type = "preferred"; modes[i] = drm_connector_preferred_mode(connector, width, height); } - /* No preferred mode marked by the EDID? Are there any modes? */ - if (!modes[i] && !list_empty(&connector->modes)) { - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] using first listed mode\n", - connector->base.id, connector->name); + if (!modes[i]) { + mode_type = "first"; modes[i] = drm_connector_first_mode(connector); } @@ -716,28 +715,25 @@ retry: * This is crtc->mode and not crtc->state->mode for the * fastboot check to work correctly. */ - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] looking for current mode\n", - connector->base.id, connector->name); + mode_type = "current"; modes[i] = &connector->state->crtc->mode; } + /* * In case of tiled modes, if all tiles are not present * then fallback to a non tiled mode. */ if (connector->has_tile && num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Falling back to non-tiled mode\n", - connector->base.id, connector->name); + mode_type = "non tiled"; modes[i] = drm_connector_fallback_non_tiled_mode(connector); } crtcs[i] = new_crtc; - drm_dbg_kms(dev, "[CONNECTOR:%d:%s] on [CRTC:%d:%s]: %dx%d%s\n", + drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n", connector->base.id, connector->name, - connector->state->crtc->base.id, - connector->state->crtc->name, - modes[i]->hdisplay, modes[i]->vdisplay, - modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" : ""); + new_crtc->base.id, new_crtc->name, + mode_type, modes[i]->name); fallback = false; conn_configured |= BIT(i); From 3039cc0c0653c6e15130a8719c3237329a954670 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 28 Feb 2025 23:14:50 +0200 Subject: [PATCH 0115/1627] drm/client: Make copies of modes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm_client_firmware_config() is currently picking up the current mode of the crtc via the legacy crtc->mode, which is not supposed to be used by atomic drivers at all. We can't simply switch over to the proper crtc->state->mode because we drop the crtc->mutex (which protects crtc->state) before the mode gets used. The most straightforward solution to extend the lifetime of modes[] seem to be to make full copies of the modes. And with this we can undo also commit 3eadd887dbac ("drm/client:Fully protect modes[] with dev->mode_config.mutex") as the lifetime of modes[] no longer has anything to do with that lock. v2: Don't try to copy NULL modes v3: Keep storing pointers and use drm_mode_{duplicate,destroy}() Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250228211454.8138-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/drm_client_modeset.c | 62 +++++++++++++++++++++------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 148257287ae4..ff034359f063 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -265,6 +265,25 @@ static void drm_client_connectors_enabled(struct drm_connector *connectors[], enabled[i] = drm_connector_enabled(connectors[i], false); } +static void mode_replace(struct drm_device *dev, + const struct drm_display_mode **dst, + const struct drm_display_mode *src) +{ + drm_mode_destroy(dev, (struct drm_display_mode *)*dst); + + *dst = src ? drm_mode_duplicate(dev, src) : NULL; +} + +static void modes_destroy(struct drm_device *dev, + const struct drm_display_mode *modes[], + int count) +{ + int i; + + for (i = 0; i < count; i++) + mode_replace(dev, &modes[i], NULL); +} + static bool drm_client_target_cloned(struct drm_device *dev, struct drm_connector *connectors[], unsigned int connector_count, @@ -296,7 +315,9 @@ static bool drm_client_target_cloned(struct drm_device *dev, for (i = 0; i < connector_count; i++) { if (!enabled[i]) continue; - modes[i] = drm_connector_pick_cmdline_mode(connectors[i]); + + mode_replace(dev, &modes[i], + drm_connector_pick_cmdline_mode(connectors[i])); if (!modes[i]) { can_clone = false; break; @@ -335,7 +356,7 @@ static bool drm_client_target_cloned(struct drm_device *dev, DRM_MODE_MATCH_CLOCK | DRM_MODE_MATCH_FLAGS | DRM_MODE_MATCH_3D_FLAGS)) - modes[i] = mode; + mode_replace(dev, &modes[i], mode); } if (!modes[i]) can_clone = false; @@ -445,16 +466,19 @@ retry: } mode_type = "cmdline"; - modes[i] = drm_connector_pick_cmdline_mode(connector); + mode_replace(dev, &modes[i], + drm_connector_pick_cmdline_mode(connector)); if (!modes[i]) { mode_type = "preferred"; - modes[i] = drm_connector_preferred_mode(connector, width, height); + mode_replace(dev, &modes[i], + drm_connector_preferred_mode(connector, width, height)); } if (!modes[i]) { mode_type = "first"; - modes[i] = drm_connector_first_mode(connector); + mode_replace(dev, &modes[i], + drm_connector_first_mode(connector)); } /* @@ -472,10 +496,12 @@ retry: connector->tile_v_loc == 0 && !drm_connector_get_tiled_mode(connector))) { mode_type = "non tiled"; - modes[i] = drm_connector_fallback_non_tiled_mode(connector); + mode_replace(dev, &modes[i], + drm_connector_fallback_non_tiled_mode(connector)); } else { mode_type = "tiled"; - modes[i] = drm_connector_get_tiled_mode(connector); + mode_replace(dev, &modes[i], + drm_connector_get_tiled_mode(connector)); } } @@ -690,16 +716,19 @@ retry: } mode_type = "cmdline"; - modes[i] = drm_connector_pick_cmdline_mode(connector); + mode_replace(dev, &modes[i], + drm_connector_pick_cmdline_mode(connector)); if (!modes[i]) { mode_type = "preferred"; - modes[i] = drm_connector_preferred_mode(connector, width, height); + mode_replace(dev, &modes[i], + drm_connector_preferred_mode(connector, width, height)); } if (!modes[i]) { mode_type = "first"; - modes[i] = drm_connector_first_mode(connector); + mode_replace(dev, &modes[i], + drm_connector_first_mode(connector)); } /* last resort: use current mode */ @@ -716,7 +745,8 @@ retry: * fastboot check to work correctly. */ mode_type = "current"; - modes[i] = &connector->state->crtc->mode; + mode_replace(dev, &modes[i], + &connector->state->crtc->mode); } /* @@ -726,7 +756,8 @@ retry: if (connector->has_tile && num_tiled_conns < connector->num_h_tile * connector->num_v_tile) { mode_type = "non tiled"; - modes[i] = drm_connector_fallback_non_tiled_mode(connector); + mode_replace(dev, &modes[i], + drm_connector_fallback_non_tiled_mode(connector)); } crtcs[i] = new_crtc; @@ -798,7 +829,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int total_modes_count = 0; struct drm_client_offset *offsets; unsigned int connector_count = 0; - /* points to modes protected by mode_config.mutex */ const struct drm_display_mode **modes; struct drm_crtc **crtcs; int i, ret = 0; @@ -850,7 +880,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, if (!drm_client_firmware_config(client, connectors, connector_count, crtcs, modes, offsets, enabled, width, height)) { - memset(modes, 0, connector_count * sizeof(*modes)); + modes_destroy(dev, modes, connector_count); memset(crtcs, 0, connector_count * sizeof(*crtcs)); memset(offsets, 0, connector_count * sizeof(*offsets)); @@ -867,6 +897,8 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, crtcs, modes, 0, width, height); } + mutex_unlock(&dev->mode_config.mutex); + drm_client_modeset_release(client); for (i = 0; i < connector_count; i++) { @@ -901,11 +933,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, modeset->y = offset->y; } } - mutex_unlock(&dev->mode_config.mutex); mutex_unlock(&client->modeset_mutex); out: kfree(crtcs); + modes_destroy(dev, modes, connector_count); kfree(modes); kfree(offsets); kfree(enabled); From 82f9570b3563e9045c70658f474f11a48a21d4b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 28 Feb 2025 23:14:51 +0200 Subject: [PATCH 0116/1627] drm/client: Stop using the legacy crtc->mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit crtc->mode is legacy junk and shouldn't really be used with atomic drivers. Most (all?) atomic drivers do end up still calling drm_atomic_helper_update_legacy_modeset_state() at some point, so crtc->mode does still get populated, and this does work for now. But now that the modes[] lifetime issues have been sorted out we can just switch over to the proper crtc->state->mode. v2: Rebase Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250228211454.8138-6-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/drm_client_modeset.c | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index ff034359f063..4c64535fb82c 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -733,20 +733,9 @@ retry: /* last resort: use current mode */ if (!modes[i]) { - /* - * IMPORTANT: We want to use the adjusted mode (i.e. - * after the panel fitter upscaling) as the initial - * config, not the input mode, which is what crtc->mode - * usually contains. But since our current - * code puts a mode derived from the post-pfit timings - * into crtc->mode this works out correctly. - * - * This is crtc->mode and not crtc->state->mode for the - * fastboot check to work correctly. - */ mode_type = "current"; mode_replace(dev, &modes[i], - &connector->state->crtc->mode); + &new_crtc->state->mode); } /* From 4e5613849ecde8c8c4b0cb4e7bfe25ba5d149020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 28 Feb 2025 23:14:52 +0200 Subject: [PATCH 0117/1627] drm/client: s/new_crtc/crtc/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename the 'new_crtc' variable to just 'crtc' in drm_client_firmware_config(). We don't call any of the other stuff in here new or old so this feels out of place. v2: Rebase Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250228211454.8138-7-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/drm_client_modeset.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 4c64535fb82c..a0caa2b229dd 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -658,7 +658,7 @@ retry: for (i = 0; i < count; i++) { struct drm_connector *connector; struct drm_encoder *encoder; - struct drm_crtc *new_crtc; + struct drm_crtc *crtc; const char *mode_type; connector = connectors[i]; @@ -700,7 +700,7 @@ retry: num_connectors_enabled++; - new_crtc = connector->state->crtc; + crtc = connector->state->crtc; /* * Make sure we're not trying to drive multiple connectors @@ -708,7 +708,7 @@ retry: * match the BIOS. */ for (j = 0; j < count; j++) { - if (crtcs[j] == new_crtc) { + if (crtcs[j] == crtc) { drm_dbg_kms(dev, "[CONNECTOR:%d:%s] fallback: cloned configuration\n", connector->base.id, connector->name); goto bail; @@ -735,7 +735,7 @@ retry: if (!modes[i]) { mode_type = "current"; mode_replace(dev, &modes[i], - &new_crtc->state->mode); + &crtc->state->mode); } /* @@ -748,11 +748,11 @@ retry: mode_replace(dev, &modes[i], drm_connector_fallback_non_tiled_mode(connector)); } - crtcs[i] = new_crtc; + crtcs[i] = crtc; drm_dbg_kms(dev, "[CONNECTOR::%d:%s] on [CRTC:%d:%s] using %s mode: %s\n", connector->base.id, connector->name, - new_crtc->base.id, new_crtc->name, + crtc->base.id, crtc->name, mode_type, modes[i]->name); fallback = false; From c11acfe20c24f91588f1d9401f79da71a74fc5a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 28 Feb 2025 23:14:53 +0200 Subject: [PATCH 0118/1627] drm/client: Move variables to tighter scope MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bunch of variables are only needed inside loops and whatnot. Move them to a tighter scope to make the code less confusing. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250228211454.8138-8-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/drm_client_modeset.c | 33 +++++++++++++++------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index a0caa2b229dd..54cbcaa476e2 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -73,9 +73,10 @@ err_free: static void drm_client_modeset_release(struct drm_client_dev *client) { struct drm_mode_set *modeset; - unsigned int i; drm_client_for_each_modeset(modeset, client) { + unsigned int i; + drm_mode_destroy(client->dev, modeset->mode); modeset->mode = NULL; modeset->fb = NULL; @@ -291,9 +292,8 @@ static bool drm_client_target_cloned(struct drm_device *dev, struct drm_client_offset offsets[], bool enabled[], int width, int height) { - int count, i, j; + int count, i; bool can_clone = false; - const struct drm_display_mode *mode; struct drm_display_mode *dmt_mode; /* only contemplate cloning in the single crtc case */ @@ -313,6 +313,8 @@ static bool drm_client_target_cloned(struct drm_device *dev, /* check the command line or if nothing common pick 1024x768 */ can_clone = true; for (i = 0; i < connector_count; i++) { + int j; + if (!enabled[i]) continue; @@ -347,6 +349,8 @@ static bool drm_client_target_cloned(struct drm_device *dev, goto fail; for (i = 0; i < connector_count; i++) { + const struct drm_display_mode *mode; + if (!enabled[i]) continue; @@ -380,12 +384,12 @@ static int drm_client_get_tile_offsets(struct drm_device *dev, int idx, int h_idx, int v_idx) { - struct drm_connector *connector; int i; int hoffset = 0, voffset = 0; for (i = 0; i < connector_count; i++) { - connector = connectors[i]; + struct drm_connector *connector = connectors[i]; + if (!connector->has_tile) continue; @@ -415,7 +419,6 @@ static bool drm_client_target_preferred(struct drm_device *dev, bool enabled[], int width, int height) { const u64 mask = BIT_ULL(connector_count) - 1; - struct drm_connector *connector; u64 conn_configured = 0; int tile_pass = 0; int num_tiled_conns = 0; @@ -429,9 +432,9 @@ static bool drm_client_target_preferred(struct drm_device *dev, retry: for (i = 0; i < connector_count; i++) { + struct drm_connector *connector = connectors[i]; const char *mode_type; - connector = connectors[i]; if (conn_configured & BIT_ULL(i)) continue; @@ -546,9 +549,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, struct drm_device *dev = client->dev; struct drm_connector *connector; int my_score, best_score, score; - struct drm_crtc **crtcs, *crtc; + struct drm_crtc **crtcs; struct drm_mode_set *modeset; - int o; if (n == connector_count) return 0; @@ -578,7 +580,8 @@ static int drm_client_pick_crtcs(struct drm_client_dev *client, * remaining connectors */ drm_client_for_each_modeset(modeset, client) { - crtc = modeset->crtc; + struct drm_crtc *crtc = modeset->crtc; + int o; if (!connector_has_possible_crtc(connector, crtc)) continue; @@ -622,7 +625,7 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, const int count = min_t(unsigned int, connector_count, BITS_PER_LONG); unsigned long conn_configured, conn_seq, mask; struct drm_device *dev = client->dev; - int i, j; + int i; bool *save_enabled; bool fallback = true, ret = true; int num_connectors_enabled = 0; @@ -656,12 +659,11 @@ static bool drm_client_firmware_config(struct drm_client_dev *client, retry: conn_seq = conn_configured; for (i = 0; i < count; i++) { - struct drm_connector *connector; + struct drm_connector *connector = connectors[i]; struct drm_encoder *encoder; struct drm_crtc *crtc; const char *mode_type; - - connector = connectors[i]; + int j; if (conn_configured & BIT(i)) continue; @@ -1239,11 +1241,12 @@ static void drm_client_modeset_dpms_legacy(struct drm_client_dev *client, int dp struct drm_connector *connector; struct drm_mode_set *modeset; struct drm_modeset_acquire_ctx ctx; - int j; int ret; DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret); drm_client_for_each_modeset(modeset, client) { + int j; + if (!modeset->crtc->enabled) continue; From dbe74119ff71c00f2d863a32f72aab2d15e61c39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ville=20Syrj=C3=A4l=C3=A4?= Date: Fri, 28 Feb 2025 23:14:54 +0200 Subject: [PATCH 0119/1627] drm/client: s/unsigned int i/int i/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the 'unsigned int i' footguns with plain old signed int. Avoids accidents if/when someone decides they need to iterate backwards. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250228211454.8138-9-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/drm_client_modeset.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index 54cbcaa476e2..0f9d5ba36c81 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -39,7 +39,7 @@ int drm_client_modeset_create(struct drm_client_dev *client) unsigned int max_connector_count = 1; struct drm_mode_set *modeset; struct drm_crtc *crtc; - unsigned int i = 0; + int i = 0; /* Add terminating zero entry to enable index less iteration */ client->modesets = kcalloc(num_crtc + 1, sizeof(*client->modesets), GFP_KERNEL); @@ -75,7 +75,7 @@ static void drm_client_modeset_release(struct drm_client_dev *client) struct drm_mode_set *modeset; drm_client_for_each_modeset(modeset, client) { - unsigned int i; + int i; drm_mode_destroy(client->dev, modeset->mode); modeset->mode = NULL; @@ -960,7 +960,7 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation) struct drm_plane *plane = modeset->crtc->primary; struct drm_cmdline_mode *cmdline; u64 valid_mask = 0; - unsigned int i; + int i; if (!modeset->num_connectors) return false; From 278469ff569e1082d56b4a7af26fbaecef9fbf3b Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Wed, 12 Mar 2025 10:31:20 -0700 Subject: [PATCH 0120/1627] drm/xe/eustall: Fix a possible pointer dereference after free If devm_add_action_or_reset() isn't successful, xe_eu_stall_fini() is invoked. So, unsuccessful return from devm_add_action_or_reset() shouldn't dereference gt->eu_stall as xe_eu_stall_fini() already frees it. Fix this issue. Fixes: 9a0b11d4cf3b ("drm/xe/eustall: Add support to init, enable and disable EU stall sampling") Signed-off-by: Harish Chegondi Reviewed-by: Ashutosh Dixit Signed-off-by: Ashutosh Dixit Link: https://patchwork.freedesktop.org/patch/msgid/eae49a414a7314921108e0388810aaee6261ad92.1741800396.git.harish.chegondi@intel.com --- drivers/gpu/drm/xe/xe_eu_stall.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c index 88a92baf5c95..f2bb9168967c 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.c +++ b/drivers/gpu/drm/xe/xe_eu_stall.c @@ -222,13 +222,7 @@ int xe_eu_stall_init(struct xe_gt *gt) goto exit_free; } - ret = devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt); - if (ret) - goto exit_destroy; - - return 0; -exit_destroy: - destroy_workqueue(gt->eu_stall->buf_ptr_poll_wq); + return devm_add_action_or_reset(xe->drm.dev, xe_eu_stall_fini, gt); exit_free: mutex_destroy(>->eu_stall->stream_lock); kfree(gt->eu_stall); From 878516a9e62cd220379e511d43dcf58df3a6ca9f Mon Sep 17 00:00:00 2001 From: Qasim Ijaz Date: Thu, 13 Mar 2025 16:14:24 +0000 Subject: [PATCH 0121/1627] drm/ttm/tests: fix incorrect assert in ttm_bo_unreserve_bulk() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the ttm_bo_unreserve_bulk() test function, resv is allocated using kunit_kzalloc(), but the subsequent assertion mistakenly verifies the ttm_dev pointer instead of the resv pointer. Fix the assertion to properly verify the resv pointer. Signed-off-by: Qasim Ijaz Link: https://patchwork.freedesktop.org/patch/msgid/20250313161424.10688-1-qasdev00@gmail.com Reviewed-by: Christian König Signed-off-by: Christian König --- drivers/gpu/drm/ttm/tests/ttm_bo_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c index f8f20d2f6174..e08e5a138420 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_test.c @@ -340,7 +340,7 @@ static void ttm_bo_unreserve_bulk(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, ttm_dev); resv = kunit_kzalloc(test, sizeof(*resv), GFP_KERNEL); - KUNIT_ASSERT_NOT_NULL(test, ttm_dev); + KUNIT_ASSERT_NOT_NULL(test, resv); err = ttm_device_kunit_init(priv, ttm_dev, false, false); KUNIT_ASSERT_EQ(test, err, 0); From b5c68869d2f42f864773778b74cccb316d8359fe Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 12 Mar 2025 14:39:16 +0100 Subject: [PATCH 0122/1627] drm/display: hdmi: Create documentation section We have had documentation for the public functions in the HDMI helpers, but those were never referenced anywhere and thus not compiled as part of the doc. Let's add a section. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250312-drm-hdmi-state-docs-v2-1-6352a5d68d5b@kernel.org Signed-off-by: Maxime Ripard --- Documentation/gpu/drm-kms-helpers.rst | 15 +++++++++++++ .../gpu/drm/display/drm_hdmi_state_helper.c | 21 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst index b4ee25af1702..5139705089f2 100644 --- a/Documentation/gpu/drm-kms-helpers.rst +++ b/Documentation/gpu/drm-kms-helpers.rst @@ -233,6 +233,21 @@ Panel Self Refresh Helper Reference .. kernel-doc:: drivers/gpu/drm/drm_self_refresh_helper.c :export: +HDMI Atomic State Helpers +========================= + +Overview +-------- + +.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c + :doc: hdmi helpers + +Functions Reference +------------------- + +.. kernel-doc:: drivers/gpu/drm/display/drm_hdmi_state_helper.c + :export: + HDCP Helper Functions Reference =============================== diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c index c205f37da1e1..a61e72e83162 100644 --- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c @@ -9,6 +9,27 @@ #include #include +/** + * DOC: hdmi helpers + * + * These functions contain an implementation of the HDMI specification + * in the form of KMS helpers. + * + * It contains TMDS character rate computation, automatic selection of + * output formats, infoframes generation, etc. + * + * Testing + * ~~~~~~~ + * + * The helpers have unit testing and can be tested using kunit with: + * + * .. code-block:: bash + * + * $ ./tools/testing/kunit/kunit.py run \ + * --kunitconfig=drivers/gpu/drm/tests \ + * drm_atomic_helper_connector_hdmi_* + */ + /** * __drm_atomic_helper_connector_hdmi_reset() - Initializes all HDMI @drm_connector_state resources * @connector: DRM connector From 6df22c6f1823cc1e3ae973a43b88c6d153df35e0 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 12 Mar 2025 14:39:17 +0100 Subject: [PATCH 0123/1627] drm/display: hdmi: Mention Infoframes testing with edid-decode edid-decode gained recently support to check that infoframes are compliant and match the EDID the monitor exposes. Since the HDMI helpers provide those infoframes in debugfs, it makes it easy to check from userspace that the drivers (and helpers) behave properly. Let's document it. Cc: Hans Verkuil Reviewed-by: Hans Verkuil Link: https://patchwork.freedesktop.org/patch/msgid/20250312-drm-hdmi-state-docs-v2-2-6352a5d68d5b@kernel.org Signed-off-by: Maxime Ripard --- .../gpu/drm/display/drm_hdmi_state_helper.c | 271 ++++++++++++++++++ 1 file changed, 271 insertions(+) diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c index a61e72e83162..8a2472f277f1 100644 --- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c @@ -18,6 +18,277 @@ * It contains TMDS character rate computation, automatic selection of * output formats, infoframes generation, etc. * + * Infoframes Compliance + * ~~~~~~~~~~~~~~~~~~~~~ + * + * Drivers using the helpers will expose the various infoframes + * generated according to the HDMI specification in debugfs. + * + * Compliance can then be tested using ``edid-decode`` from the ``v4l-utils`` project + * (https://git.linuxtv.org/v4l-utils.git/). A sample run would look like: + * + * .. code-block:: bash + * + * # edid-decode \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/avi \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdmi \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm \ + * -I /sys/kernel/debug/dri/1/HDMI-A-1/infoframes/spd \ + * /sys/class/drm/card1-HDMI-A-1/edid \ + * -c + * + * edid-decode (hex): + * + * 00 ff ff ff ff ff ff 00 1e 6d f4 5b 1e ef 06 00 + * 07 20 01 03 80 2f 34 78 ea 24 05 af 4f 42 ab 25 + * 0f 50 54 21 08 00 d1 c0 61 40 45 40 01 01 01 01 + * 01 01 01 01 01 01 98 d0 00 40 a1 40 d4 b0 30 20 + * 3a 00 d1 0b 12 00 00 1a 00 00 00 fd 00 3b 3d 1e + * b2 31 00 0a 20 20 20 20 20 20 00 00 00 fc 00 4c + * 47 20 53 44 51 48 44 0a 20 20 20 20 00 00 00 ff + * 00 32 30 37 4e 54 52 4c 44 43 34 33 30 0a 01 46 + * + * 02 03 42 72 23 09 07 07 4d 01 03 04 90 12 13 1f + * 22 5d 5e 5f 60 61 83 01 00 00 6d 03 0c 00 10 00 + * b8 3c 20 00 60 01 02 03 67 d8 5d c4 01 78 80 03 + * e3 0f 00 18 e2 00 6a e3 05 c0 00 e6 06 05 01 52 + * 52 51 11 5d 00 a0 a0 40 29 b0 30 20 3a 00 d1 0b + * 12 00 00 1a 00 00 00 00 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 c3 + * + * ---------------- + * + * Block 0, Base EDID: + * EDID Structure Version & Revision: 1.3 + * Vendor & Product Identification: + * Manufacturer: GSM + * Model: 23540 + * Serial Number: 454430 (0x0006ef1e) + * Made in: week 7 of 2022 + * Basic Display Parameters & Features: + * Digital display + * Maximum image size: 47 cm x 52 cm + * Gamma: 2.20 + * DPMS levels: Standby Suspend Off + * RGB color display + * First detailed timing is the preferred timing + * Color Characteristics: + * Red : 0.6835, 0.3105 + * Green: 0.2587, 0.6679 + * Blue : 0.1445, 0.0585 + * White: 0.3134, 0.3291 + * Established Timings I & II: + * DMT 0x04: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz + * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz + * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz + * Standard Timings: + * DMT 0x52: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz + * DMT 0x10: 1024x768 60.003840 Hz 4:3 48.363 kHz 65.000000 MHz + * DMT 0x09: 800x600 60.316541 Hz 4:3 37.879 kHz 40.000000 MHz + * Detailed Timing Descriptors: + * DTD 1: 2560x2880 59.966580 Hz 8:9 185.417 kHz 534.000000 MHz (465 mm x 523 mm) + * Hfront 48 Hsync 32 Hback 240 Hpol P + * Vfront 3 Vsync 10 Vback 199 Vpol N + * Display Range Limits: + * Monitor ranges (GTF): 59-61 Hz V, 30-178 kHz H, max dotclock 490 MHz + * Display Product Name: 'LG SDQHD' + * Display Product Serial Number: '207NTRLDC430' + * Extension blocks: 1 + * Checksum: 0x46 + * + * ---------------- + * + * Block 1, CTA-861 Extension Block: + * Revision: 3 + * Basic audio support + * Supports YCbCr 4:4:4 + * Supports YCbCr 4:2:2 + * Native detailed modes: 2 + * Audio Data Block: + * Linear PCM: + * Max channels: 2 + * Supported sample rates (kHz): 48 44.1 32 + * Supported sample sizes (bits): 24 20 16 + * Video Data Block: + * VIC 1: 640x480 59.940476 Hz 4:3 31.469 kHz 25.175000 MHz + * VIC 3: 720x480 59.940060 Hz 16:9 31.469 kHz 27.000000 MHz + * VIC 4: 1280x720 60.000000 Hz 16:9 45.000 kHz 74.250000 MHz + * VIC 16: 1920x1080 60.000000 Hz 16:9 67.500 kHz 148.500000 MHz (native) + * VIC 18: 720x576 50.000000 Hz 16:9 31.250 kHz 27.000000 MHz + * VIC 19: 1280x720 50.000000 Hz 16:9 37.500 kHz 74.250000 MHz + * VIC 31: 1920x1080 50.000000 Hz 16:9 56.250 kHz 148.500000 MHz + * VIC 34: 1920x1080 30.000000 Hz 16:9 33.750 kHz 74.250000 MHz + * VIC 93: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz + * VIC 94: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz + * VIC 95: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz + * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz + * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz + * Speaker Allocation Data Block: + * FL/FR - Front Left/Right + * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: + * Source physical address: 1.0.0.0 + * Supports_AI + * DC_36bit + * DC_30bit + * DC_Y444 + * Maximum TMDS clock: 300 MHz + * Extended HDMI video details: + * HDMI VICs: + * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz + * HDMI VIC 2: 3840x2160 25.000000 Hz 16:9 56.250 kHz 297.000000 MHz + * HDMI VIC 3: 3840x2160 24.000000 Hz 16:9 54.000 kHz 297.000000 MHz + * Vendor-Specific Data Block (HDMI Forum), OUI C4-5D-D8: + * Version: 1 + * Maximum TMDS Character Rate: 600 MHz + * SCDC Present + * Supports 12-bits/component Deep Color 4:2:0 Pixel Encoding + * Supports 10-bits/component Deep Color 4:2:0 Pixel Encoding + * YCbCr 4:2:0 Capability Map Data Block: + * VIC 96: 3840x2160 50.000000 Hz 16:9 112.500 kHz 594.000000 MHz + * VIC 97: 3840x2160 60.000000 Hz 16:9 135.000 kHz 594.000000 MHz + * Video Capability Data Block: + * YCbCr quantization: No Data + * RGB quantization: Selectable (via AVI Q) + * PT scan behavior: Always Underscanned + * IT scan behavior: Always Underscanned + * CE scan behavior: Always Underscanned + * Colorimetry Data Block: + * BT2020YCC + * BT2020RGB + * HDR Static Metadata Data Block: + * Electro optical transfer functions: + * Traditional gamma - SDR luminance range + * SMPTE ST2084 + * Supported static metadata descriptors: + * Static metadata type 1 + * Desired content max luminance: 82 (295.365 cd/m^2) + * Desired content max frame-average luminance: 82 (295.365 cd/m^2) + * Desired content min luminance: 81 (0.298 cd/m^2) + * Detailed Timing Descriptors: + * DTD 2: 2560x2880 29.986961 Hz 8:9 87.592 kHz 238.250000 MHz (465 mm x 523 mm) + * Hfront 48 Hsync 32 Hback 80 Hpol P + * Vfront 3 Vsync 10 Vback 28 Vpol N + * Checksum: 0xc3 Unused space in Extension Block: 43 bytes + * + * ---------------- + * + * edid-decode 1.29.0-5346 + * edid-decode SHA: c363e9aa6d70 2025-03-11 11:41:18 + * + * Warnings: + * + * Block 1, CTA-861 Extension Block: + * IT Video Formats are overscanned by default, but normally this should be underscanned. + * Video Data Block: VIC 1 and the first DTD are not identical. Is this intended? + * Video Data Block: All VICs are in ascending order, and the first (preferred) VIC <= 4, is that intended? + * Video Capability Data Block: Set Selectable YCbCr Quantization to avoid interop issues. + * Video Capability Data Block: S_PT is equal to S_IT and S_CE, so should be set to 0 instead. + * Colorimetry Data Block: Set the sRGB colorimetry bit to avoid interop issues. + * Display Product Serial Number is set, so the Serial Number in the Base EDID should be 0. + * EDID: + * Base EDID: Some timings are out of range of the Monitor Ranges: + * Vertical Freq: 24.000 - 60.317 Hz (Monitor: 59.000 - 61.000 Hz) + * Horizontal Freq: 31.250 - 185.416 kHz (Monitor: 30.000 - 178.000 kHz) + * Maximum Clock: 594.000 MHz (Monitor: 490.000 MHz) + * + * Failures: + * + * Block 1, CTA-861 Extension Block: + * Video Capability Data Block: IT video formats are always underscanned, but bit 7 of Byte 3 of the CTA-861 Extension header is set to overscanned. + * EDID: + * CTA-861: Native progressive timings are a mix of several resolutions. + * + * EDID conformity: FAIL + * + * ================ + * + * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/audio' was empty. + * + * ================ + * + * edid-decode InfoFrame (hex): + * + * 82 02 0d 31 12 28 04 00 00 00 00 00 00 00 00 00 + * 00 + * + * ---------------- + * + * HDMI InfoFrame Checksum: 0x31 + * + * AVI InfoFrame + * Version: 2 + * Length: 13 + * Y: Color Component Sample Format: RGB + * A: Active Format Information Present: Yes + * B: Bar Data Present: Bar Data not present + * S: Scan Information: Composed for an underscanned display + * C: Colorimetry: No Data + * M: Picture Aspect Ratio: 16:9 + * R: Active Portion Aspect Ratio: 8 + * ITC: IT Content: No Data + * EC: Extended Colorimetry: xvYCC601 + * Q: RGB Quantization Range: Limited Range + * SC: Non-Uniform Picture Scaling: No Known non-uniform scaling + * YQ: YCC Quantization Range: Limited Range + * CN: IT Content Type: Graphics + * PR: Pixel Data Repetition Count: 0 + * Line Number of End of Top Bar: 0 + * Line Number of Start of Bottom Bar: 0 + * Pixel Number of End of Left Bar: 0 + * Pixel Number of Start of Right Bar: 0 + * + * ---------------- + * + * AVI InfoFrame conformity: PASS + * + * ================ + * + * edid-decode InfoFrame (hex): + * + * 81 01 05 49 03 0c 00 20 01 + * + * ---------------- + * + * HDMI InfoFrame Checksum: 0x49 + * + * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 + * Version: 1 + * Length: 5 + * HDMI Video Format: HDMI_VIC is present + * HDMI VIC 1: 3840x2160 30.000000 Hz 16:9 67.500 kHz 297.000000 MHz + * + * ---------------- + * + * Vendor-Specific InfoFrame (HDMI), OUI 00-0C-03 conformity: PASS + * + * ================ + * + * InfoFrame of '/sys/kernel/debug/dri/1/HDMI-A-1/infoframes/hdr_drm' was empty. + * + * ================ + * + * edid-decode InfoFrame (hex): + * + * 83 01 19 93 42 72 6f 61 64 63 6f 6d 56 69 64 65 + * 6f 63 6f 72 65 00 00 00 00 00 00 00 09 + * + * ---------------- + * + * HDMI InfoFrame Checksum: 0x93 + * + * Source Product Description InfoFrame + * Version: 1 + * Length: 25 + * Vendor Name: 'Broadcom' + * Product Description: 'Videocore' + * Source Information: PC general + * + * ---------------- + * + * Source Product Description InfoFrame conformity: PASS + * * Testing * ~~~~~~~ * From 9df13c356d0886d174e977406570f75f4012f27b Mon Sep 17 00:00:00 2001 From: Philipp Stanner Date: Thu, 13 Mar 2025 10:30:54 +0100 Subject: [PATCH 0124/1627] drm/sched: Clarify docu concerning drm_sched_job_arm() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The documentation for drm_sched_job_arm() and especially drm_sched_job_cleanup() does not make it very clear why drm_sched_job_arm() is a point of no return, which it indeed is. Make the nature of drm_sched_job_arm() in the docu as clear as possible. Suggested-by: Christian König Reviewed-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250313093053.65001-2-phasta@kernel.org --- drivers/gpu/drm/scheduler/sched_main.c | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 4d4219fbe49d..829579c41c6b 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -828,11 +828,15 @@ EXPORT_SYMBOL(drm_sched_job_init); * * This arms a scheduler job for execution. Specifically it initializes the * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv - * or other places that need to track the completion of this job. + * or other places that need to track the completion of this job. It also + * initializes sequence numbers, which are fundamental for fence ordering. * * Refer to drm_sched_entity_push_job() documentation for locking * considerations. * + * Once this function was called, you *must* submit @job with + * drm_sched_entity_push_job(). + * * This can only be called if drm_sched_job_init() succeeded. */ void drm_sched_job_arm(struct drm_sched_job *job) @@ -1017,9 +1021,12 @@ EXPORT_SYMBOL(drm_sched_job_has_dependency); * Drivers should call this from their error unwind code if @job is aborted * before drm_sched_job_arm() is called. * - * After that point of no return @job is committed to be executed by the - * scheduler, and this function should be called from the - * &drm_sched_backend_ops.free_job callback. + * drm_sched_job_arm() is a point of no return since it initializes the fences + * and their sequence number etc. Once that function has been called, you *must* + * submit it with drm_sched_entity_push_job() and cannot simply abort it by + * calling drm_sched_job_cleanup(). + * + * This function should be called in the &drm_sched_backend_ops.free_job callback. */ void drm_sched_job_cleanup(struct drm_sched_job *job) { @@ -1027,10 +1034,15 @@ void drm_sched_job_cleanup(struct drm_sched_job *job) unsigned long index; if (kref_read(&job->s_fence->finished.refcount)) { - /* drm_sched_job_arm() has been called */ + /* The job has been processed by the scheduler, i.e., + * drm_sched_job_arm() and drm_sched_entity_push_job() have + * been called. + */ dma_fence_put(&job->s_fence->finished); } else { - /* aborted job before committing to run it */ + /* The job was aborted before it has been committed to be run; + * notably, drm_sched_job_arm() has not been called. + */ drm_sched_fence_free(job->s_fence); } From ff568d622b0a587e15925b681152bef3e11b5f3f Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Wed, 12 Mar 2025 11:14:24 +0530 Subject: [PATCH 0125/1627] drm/i915/display: Maintain asciibetical order for HAS_* macros Move HAS_* macros to maintain asciibetical order. Signed-off-by: Ankit Nautiyal Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20250312054424.1628358-1-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_display_device.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 717286981687..4e9630f65af6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -143,9 +143,11 @@ struct intel_display_platforms { #define HAS_4TILE(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14) #define HAS_ASYNC_FLIPS(__display) (DISPLAY_VER(__display) >= 5) +#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13) #define HAS_BIGJOINER(__display) (DISPLAY_VER(__display) >= 11 && HAS_DSC(__display)) #define HAS_CDCLK_CRAWL(__display) (DISPLAY_INFO(__display)->has_cdclk_crawl) #define HAS_CDCLK_SQUASH(__display) (DISPLAY_INFO(__display)->has_cdclk_squash) +#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20) #define HAS_CMTG(__display) (!(__display)->platform.dg2 && DISPLAY_VER(__display) >= 13) #define HAS_CUR_FBC(__display) (!HAS_GMCH(__display) && IS_DISPLAY_VER(__display, 7, 13)) #define HAS_D12_PLANE_MINIMIZATION(__display) ((__display)->platform.rocketlake || (__display)->platform.alderlake_s) @@ -156,9 +158,9 @@ struct intel_display_platforms { #define HAS_DMC_WAKELOCK(__display) (DISPLAY_VER(__display) >= 20) #define HAS_DOUBLE_BUFFERED_M_N(__display) (DISPLAY_VER(__display) >= 9 || (__display)->platform.broadwell) #define HAS_DOUBLE_WIDE(__display) (DISPLAY_VER(__display) < 4) -#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst) #define HAS_DP20(__display) ((__display)->platform.dg2 || DISPLAY_VER(__display) >= 14) #define HAS_DPT(__display) (DISPLAY_VER(__display) >= 13) +#define HAS_DP_MST(__display) (DISPLAY_INFO(__display)->has_dp_mst) #define HAS_DSB(__display) (DISPLAY_INFO(__display)->has_dsb) #define HAS_DSC(__display) (DISPLAY_RUNTIME_INFO(__display)->has_dsc) #define HAS_DSC_MST(__display) (DISPLAY_VER(__display) >= 12 && HAS_DSC(__display)) @@ -166,8 +168,8 @@ struct intel_display_platforms { #define HAS_FBC_DIRTY_RECT(__display) (DISPLAY_VER(__display) >= 30) #define HAS_FPGA_DBG_UNCLAIMED(__display) (DISPLAY_INFO(__display)->has_fpga_dbg) #define HAS_FW_BLC(__display) (DISPLAY_VER(__display) >= 3) -#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4) #define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake) +#define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4) #define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch) #define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx) #define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc) @@ -189,8 +191,6 @@ struct intel_display_platforms { ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \ HAS_DSC(__display)) #define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11) -#define HAS_AS_SDP(__display) (DISPLAY_VER(__display) >= 13) -#define HAS_CMRR(__display) (DISPLAY_VER(__display) >= 20) #define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask)) #define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug) #define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical) From c8619f5402cbcccfe58151b53421029852473e4c Mon Sep 17 00:00:00 2001 From: Alyssa Rosenzweig Date: Mon, 10 Mar 2025 15:28:02 -0400 Subject: [PATCH 0126/1627] drm: add modifiers for Apple GPU layouts Apple GPUs support non-linear "GPU-tiled" image layouts. Add modifiers for these layouts. Mesa requires these modifiers to share non-linear buffers across processes, but no other userspace or kernel support is required/expected. These layouts are notably not used for interchange across hardware blocks (e.g. with the display controller). There are other layouts for that but we don't support them either in userspace or kernelspace yet (even downstream), so we don't add modifiers here. Acked-by: Faith Ekstrand Reviewed-by: Sven Peter Link: https://patchwork.freedesktop.org/patch/msgid/20250310-apple-twiddled-modifiers-v4-1-1ccac9544808@rosenzweig.io Signed-off-by: Alyssa Rosenzweig --- include/uapi/drm/drm_fourcc.h | 45 +++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h index e41a3cec6a9e..81202a50dc9e 100644 --- a/include/uapi/drm/drm_fourcc.h +++ b/include/uapi/drm/drm_fourcc.h @@ -422,6 +422,7 @@ extern "C" { #define DRM_FORMAT_MOD_VENDOR_ALLWINNER 0x09 #define DRM_FORMAT_MOD_VENDOR_AMLOGIC 0x0a #define DRM_FORMAT_MOD_VENDOR_MTK 0x0b +#define DRM_FORMAT_MOD_VENDOR_APPLE 0x0c /* add more to the end as needed */ @@ -1494,6 +1495,50 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier) /* alias for the most common tiling format */ #define DRM_FORMAT_MOD_MTK_16L_32S_TILE DRM_FORMAT_MOD_MTK(MTK_FMT_MOD_TILE_16L32S) +/* + * Apple GPU-tiled layouts. + * + * Apple GPUs support nonlinear tilings with optional lossless compression. + * + * GPU-tiled images are divided into 16KiB tiles: + * + * Bytes per pixel Tile size + * --------------- --------- + * 1 128x128 + * 2 128x64 + * 4 64x64 + * 8 64x32 + * 16 32x32 + * + * Tiles are raster-order. Pixels within a tile are interleaved (Morton order). + * + * Compressed images pad the body to 128-bytes and are immediately followed by a + * metadata section. The metadata section rounds the image dimensions to + * powers-of-two and contains 8 bytes for each 16x16 compression subtile. + * Subtiles are interleaved (Morton order). + * + * All images are 128-byte aligned. + * + * These layouts fundamentally do not have meaningful strides. No matter how we + * specify strides for these layouts, userspace unaware of Apple image layouts + * will be unable to use correctly the specified stride for any purpose. + * Userspace aware of the image layouts do not use strides. The most "correct" + * convention would be setting the image stride to 0. Unfortunately, some + * software assumes the stride is at least (width * bytes per pixel). We + * therefore require that stride equals (width * bytes per pixel). Since the + * stride is arbitrary here, we pick the simplest convention. + * + * Although containing two sections, compressed image layouts are treated in + * software as a single plane. This is modelled after AFBC, a similar + * scheme. Attempting to separate the sections to be "explicit" in DRM would + * only generate more confusion, as software does not treat the image this way. + * + * For detailed information on the hardware image layouts, see + * https://docs.mesa3d.org/drivers/asahi.html#image-layouts + */ +#define DRM_FORMAT_MOD_APPLE_GPU_TILED fourcc_mod_code(APPLE, 1) +#define DRM_FORMAT_MOD_APPLE_GPU_TILED_COMPRESSED fourcc_mod_code(APPLE, 2) + /* * AMD modifiers * From 34b1c1c71d375835d589e0929299b8caf0171cf1 Mon Sep 17 00:00:00 2001 From: Mikolaj Wasiak Date: Tue, 4 Mar 2025 09:43:26 +0100 Subject: [PATCH 0127/1627] i915/selftest/igt_mmap: let mmap tests run in kthread When the driver is loaded on the system with numa nodes it might be run in a kthread, which makes it impossible to use current->mm in the selftest. This patch allows the selftest to use current->mm by using active_mm. Signed-off-by: Mikolaj Wasiak Reviewed-by: Eugene Kobyak Reviewed-by: Krzysztof Niemiec Reviewed-by: Krzysztof Karas Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/2w6pt2hnemndwmanwhyn3keexa6vtha7rmo6rqoerkmyxhbrh2@ls7lndjpia6z --- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 99a9ade73956..2ef2f3db5bcc 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1837,6 +1837,8 @@ static int igt_mmap_revoke(void *arg) int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { + int ret; + bool unuse_mm = false; static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), SUBTEST(igt_smoke_tiling), @@ -1848,5 +1850,15 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_mmap_gpu), }; - return i915_live_subtests(tests, i915); + if (!current->mm) { + kthread_use_mm(current->active_mm); + unuse_mm = true; + } + + ret = i915_live_subtests(tests, i915); + + if (unuse_mm) + kthread_unuse_mm(current->active_mm); + + return ret; } From 73782fc64793f6e6e0118797d3b9044436f2e8e9 Mon Sep 17 00:00:00 2001 From: Mikolaj Wasiak Date: Tue, 11 Mar 2025 10:33:14 +0100 Subject: [PATCH 0128/1627] i915/gt/selftests: Disable lrc_timestamp test This test was designed to isolate a bug in tigerlake and dg2 hardware. The bug was found and fixed in newer generations. Since we won't support any new hardware with this driver, the test should now be turned off in the CI to not pollute it with random failures on previous hardware. For reference, the issue has been discssued here[*]. [*] https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13697 Signed-off-by: Mikolaj Wasiak Reviewed-by: Chris Wilson Reviewed-by: Krzysztof Karas Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/uxxb22n667zb3aic6zs4mr2krv5zavav5v2zjgqnhnabgxgzif@4icszicjakex --- drivers/gpu/drm/i915/gt/selftest_lrc.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index e17b8777d21d..8051123108e5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -7,6 +7,7 @@ #include "gem/i915_gem_internal.h" +#include "i915_drv.h" #include "i915_selftest.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" @@ -858,6 +859,14 @@ static int live_lrc_timestamp(void *arg) U32_MAX, }; + /* + * This test was designed to isolate a hardware bug. + * The bug was found and fixed in future generations but + * now the test pollutes our CI on previous generation. + */ + if (GRAPHICS_VER(gt->i915) == 12) + return 0; + /* * We want to verify that the timestamp is saved and restore across * context switches and is monotonic. From 83a0237859bc5a9e0a716e1db8e7fd3cafd63259 Mon Sep 17 00:00:00 2001 From: Chen Ni Date: Wed, 12 Mar 2025 15:34:04 +0800 Subject: [PATCH 0129/1627] accel/qaic: Remove redundant 'flush_workqueue()' calls 'destroy_workqueue()' already drains the queue before destroying it, so there is no need to flush it explicitly. Remove the redundant 'flush_workqueue()' calls. This was generated with coccinelle: @@ expression E; @@ - flush_workqueue(E); destroy_workqueue(E); Signed-off-by: Chen Ni Reviewed-by: Jeff Hugo Signed-off-by: Jeff Hugo Link: https://patchwork.freedesktop.org/patch/msgid/20250312073404.1429992-1-nichen@iscas.ac.cn --- drivers/accel/qaic/qaic_debugfs.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/accel/qaic/qaic_debugfs.c b/drivers/accel/qaic/qaic_debugfs.c index ba0cf2f94732..a991b8198dc4 100644 --- a/drivers/accel/qaic/qaic_debugfs.c +++ b/drivers/accel/qaic/qaic_debugfs.c @@ -240,7 +240,6 @@ static int qaic_bootlog_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_d mhi_unprepare: mhi_unprepare_from_transfer(mhi_dev); destroy_workqueue: - flush_workqueue(qdev->bootlog_wq); destroy_workqueue(qdev->bootlog_wq); out: return ret; @@ -253,7 +252,6 @@ static void qaic_bootlog_mhi_remove(struct mhi_device *mhi_dev) qdev = dev_get_drvdata(&mhi_dev->dev); mhi_unprepare_from_transfer(qdev->bootlog_ch); - flush_workqueue(qdev->bootlog_wq); destroy_workqueue(qdev->bootlog_wq); qdev->bootlog_ch = NULL; } From 28f79ac609de2797cccdd5fa6c4d5ec8bcef92b4 Mon Sep 17 00:00:00 2001 From: Raag Jadav Date: Wed, 12 Mar 2025 14:29:09 +0530 Subject: [PATCH 0130/1627] drm/xe/hwmon: expose fan speed Add hwmon support for fan1_input, fan2_input and fan3_input attributes, which will expose fan speed of respective channels in RPM when supported by hardware. With this in place we can monitor fan speed using lm-sensors tool. v2: Rely on platform checks instead of mailbox error (Aravind, Rodrigo) v3: Introduce has_fan_control flag (Rodrigo) Signed-off-by: Raag Jadav Reviewed-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20250312085909.755073-1-raag.jadav@intel.com Signed-off-by: Rodrigo Vivi --- .../ABI/testing/sysfs-driver-intel-xe-hwmon | 24 ++++ drivers/gpu/drm/xe/regs/xe_pcode_regs.h | 3 + drivers/gpu/drm/xe/xe_device_types.h | 2 + drivers/gpu/drm/xe/xe_hwmon.c | 125 +++++++++++++++++- drivers/gpu/drm/xe/xe_pci.c | 4 + drivers/gpu/drm/xe/xe_pcode_api.h | 3 + 6 files changed, 160 insertions(+), 1 deletion(-) diff --git a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon index 9bce281314df..adbb9bce15a5 100644 --- a/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon +++ b/Documentation/ABI/testing/sysfs-driver-intel-xe-hwmon @@ -124,3 +124,27 @@ Contact: intel-xe@lists.freedesktop.org Description: RO. VRAM temperature in millidegree Celsius. Only supported for particular Intel Xe graphics platforms. + +What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon/fan1_input +Date: March 2025 +KernelVersion: 6.14 +Contact: intel-xe@lists.freedesktop.org +Description: RO. Fan 1 speed in RPM. + + Only supported for particular Intel Xe graphics platforms. + +What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon/fan2_input +Date: March 2025 +KernelVersion: 6.14 +Contact: intel-xe@lists.freedesktop.org +Description: RO. Fan 2 speed in RPM. + + Only supported for particular Intel Xe graphics platforms. + +What: /sys/bus/pci/drivers/xe/.../hwmon/hwmon/fan3_input +Date: March 2025 +KernelVersion: 6.14 +Contact: intel-xe@lists.freedesktop.org +Description: RO. Fan 3 speed in RPM. + + Only supported for particular Intel Xe graphics platforms. diff --git a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h index 8846eb9ce2a4..c7d5d782e3f9 100644 --- a/drivers/gpu/drm/xe/regs/xe_pcode_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_pcode_regs.h @@ -21,6 +21,9 @@ #define BMG_PACKAGE_POWER_SKU XE_REG(0x138098) #define BMG_PACKAGE_POWER_SKU_UNIT XE_REG(0x1380dc) #define BMG_PACKAGE_ENERGY_STATUS XE_REG(0x138120) +#define BMG_FAN_1_SPEED XE_REG(0x138140) +#define BMG_FAN_2_SPEED XE_REG(0x138170) +#define BMG_FAN_3_SPEED XE_REG(0x1381a0) #define BMG_VRAM_TEMPERATURE XE_REG(0x1382c0) #define BMG_PACKAGE_TEMPERATURE XE_REG(0x138434) #define BMG_PACKAGE_RAPL_LIMIT XE_REG(0x138440) diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index fac488942316..a2c0e791b199 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -314,6 +314,8 @@ struct xe_device { u8 has_atomic_enable_pte_bit:1; /** @info.has_device_atomics_on_smem: Supports device atomics on SMEM */ u8 has_device_atomics_on_smem:1; + /** @info.has_fan_control: Device supports fan control */ + u8 has_fan_control:1; /** @info.has_flat_ccs: Whether flat CCS metadata is used */ u8 has_flat_ccs:1; /** @info.has_heci_cscfi: device has heci cscfi */ diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index 48d80ffdf7bb..eb293aec36a0 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -27,6 +28,7 @@ enum xe_hwmon_reg { REG_PKG_POWER_SKU_UNIT, REG_GT_PERF_STATUS, REG_PKG_ENERGY_STATUS, + REG_FAN_SPEED, }; enum xe_hwmon_reg_operation { @@ -42,6 +44,13 @@ enum xe_hwmon_channel { CHANNEL_MAX, }; +enum xe_fan_channel { + FAN_1, + FAN_2, + FAN_3, + FAN_MAX, +}; + /* * SF_* - scale factors for particular quantities according to hwmon spec. */ @@ -61,6 +70,16 @@ struct xe_hwmon_energy_info { long accum_energy; }; +/** + * struct xe_hwmon_fan_info - to cache previous fan reading + */ +struct xe_hwmon_fan_info { + /** @reg_val_prev: previous fan reg val */ + u32 reg_val_prev; + /** @time_prev: previous timestamp */ + u64 time_prev; +}; + /** * struct xe_hwmon - xe hwmon data structure */ @@ -79,6 +98,8 @@ struct xe_hwmon { int scl_shift_time; /** @ei: Energy info for energyN_input */ struct xe_hwmon_energy_info ei[CHANNEL_MAX]; + /** @fi: Fan info for fanN_input */ + struct xe_hwmon_fan_info fi[FAN_MAX]; }; static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg, @@ -144,6 +165,14 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg return PCU_CR_PACKAGE_ENERGY_STATUS; } break; + case REG_FAN_SPEED: + if (channel == FAN_1) + return BMG_FAN_1_SPEED; + else if (channel == FAN_2) + return BMG_FAN_2_SPEED; + else if (channel == FAN_3) + return BMG_FAN_3_SPEED; + break; default: drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg); break; @@ -454,6 +483,7 @@ static const struct hwmon_channel_info * const hwmon_info[] = { HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL), HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL), + HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT), NULL }; @@ -480,6 +510,19 @@ static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval) (uval & POWER_SETUP_I1_DATA_MASK)); } +static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval) +{ + struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe); + + /* Platforms that don't return correct value */ + if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) { + *uval = 2; + return 0; + } + + return xe_pcode_read(root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), uval, NULL); +} + static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel, long *value, u32 scale_factor) { @@ -705,6 +748,75 @@ xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) } } +static umode_t +xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel) +{ + u32 uval; + + if (!hwmon->xe->info.has_fan_control) + return 0; + + switch (attr) { + case hwmon_fan_input: + if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, &uval)) + return 0; + + return channel < uval ? 0444 : 0; + default: + return 0; + } +} + +static int +xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val) +{ + struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); + struct xe_hwmon_fan_info *fi = &hwmon->fi[channel]; + u64 rotations, time_now, time; + u32 reg_val; + int ret = 0; + + mutex_lock(&hwmon->hwmon_lock); + + reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_FAN_SPEED, channel)); + time_now = get_jiffies_64(); + + /* + * HW register value is accumulated count of pulses from PWM fan with the scale + * of 2 pulses per rotation. + */ + rotations = (reg_val - fi->reg_val_prev) / 2; + + time = jiffies_delta_to_msecs(time_now - fi->time_prev); + if (unlikely(!time)) { + ret = -EAGAIN; + goto unlock; + } + + /* + * Calculate fan speed in RPM by time averaging two subsequent readings in minutes. + * RPM = number of rotations * msecs per minute / time in msecs + */ + *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time); + + fi->reg_val_prev = reg_val; + fi->time_prev = time_now; +unlock: + mutex_unlock(&hwmon->hwmon_lock); + return ret; +} + +static int +xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val) +{ + switch (attr) { + case hwmon_fan_input: + return xe_hwmon_fan_input_read(hwmon, channel, val); + default: + return -EOPNOTSUPP; + } +} + static umode_t xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr, int channel) @@ -730,6 +842,9 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type, case hwmon_energy: ret = xe_hwmon_energy_is_visible(hwmon, attr, channel); break; + case hwmon_fan: + ret = xe_hwmon_fan_is_visible(hwmon, attr, channel); + break; default: ret = 0; break; @@ -765,6 +880,9 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, case hwmon_energy: ret = xe_hwmon_energy_read(hwmon, attr, channel, val); break; + case hwmon_fan: + ret = xe_hwmon_fan_read(hwmon, attr, channel, val); + break; default: ret = -EOPNOTSUPP; break; @@ -842,7 +960,7 @@ static void xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) { struct xe_mmio *mmio = xe_root_tile_mmio(hwmon->xe); - long energy; + long energy, fan_speed; u64 val_sku_unit = 0; int channel; struct xe_reg pkg_power_sku_unit; @@ -866,6 +984,11 @@ xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon) for (channel = 0; channel < CHANNEL_MAX; channel++) if (xe_hwmon_is_visible(hwmon, hwmon_energy, hwmon_energy_input, channel)) xe_hwmon_energy_get(hwmon, channel, &energy); + + /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */ + for (channel = 0; channel < FAN_MAX; channel++) + if (xe_hwmon_is_visible(hwmon, hwmon_fan, hwmon_fan_input, channel)) + xe_hwmon_fan_input_read(hwmon, channel, &fan_speed); } static void xe_hwmon_mutex_destroy(void *arg) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index da9679c8cf26..fc89d744978a 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -62,6 +62,7 @@ struct xe_device_desc { u8 is_dgfx:1; u8 has_display:1; + u8 has_fan_control:1; u8 has_heci_gscfi:1; u8 has_heci_cscfi:1; u8 has_llc:1; @@ -302,6 +303,7 @@ static const struct xe_device_desc dg2_desc = { DG2_FEATURES, .has_display = true, + .has_fan_control = true, }; static const __maybe_unused struct xe_device_desc pvc_desc = { @@ -336,6 +338,7 @@ static const struct xe_device_desc bmg_desc = { PLATFORM(BATTLEMAGE), .dma_mask_size = 46, .has_display = true, + .has_fan_control = true, .has_heci_cscfi = 1, }; @@ -575,6 +578,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.dma_mask_size = desc->dma_mask_size; xe->info.is_dgfx = desc->is_dgfx; + xe->info.has_fan_control = desc->has_fan_control; xe->info.has_heci_gscfi = desc->has_heci_gscfi; xe->info.has_heci_cscfi = desc->has_heci_cscfi; xe->info.has_llc = desc->has_llc; diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h index 2bae9afdbd35..e622ae17f08d 100644 --- a/drivers/gpu/drm/xe/xe_pcode_api.h +++ b/drivers/gpu/drm/xe/xe_pcode_api.h @@ -49,6 +49,9 @@ /* Domain IDs (param2) */ #define PCODE_MBOX_DOMAIN_HBM 0x2 +#define FAN_SPEED_CONTROL 0x7D +#define FSC_READ_NUM_FANS 0x4 + #define PCODE_SCRATCH(x) XE_REG(0x138320 + ((x) * 4)) /* PCODE_SCRATCH0 */ #define AUXINFO_REG_OFFSET REG_GENMASK(17, 15) From 96c85e428ebaeacd2c640eba075479ab92072ccd Mon Sep 17 00:00:00 2001 From: Vicki Pfau Date: Thu, 13 Mar 2025 14:16:44 -0700 Subject: [PATCH 0131/1627] drm: panel-orientation-quirks: Add ZOTAC Gaming Zone Add a panel orientation quirk for the ZOTAC Gaming Zone handheld gaming device. Signed-off-by: Vicki Pfau Reviewed-by: Hans de Goede Link: https://patchwork.freedesktop.org/patch/msgid/20250313211643.860786-2-vi@endrift.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/drm_panel_orientation_quirks.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index c554ad8f246b..7ac0fd5391fe 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -517,6 +517,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LTH17"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* ZOTAC Gaming Zone */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ZOTAC"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "G0A1W"), + }, + .driver_data = (void *)&lcd1080x1920_leftside_up, }, { /* One Mix 2S (generic strings, also match on bios date) */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"), From b6aa4b8b3ebf502898a8164bd7d1aa7d5870c1b9 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Fri, 14 Mar 2025 03:12:22 +0100 Subject: [PATCH 0132/1627] drm/i915/gt: Fix SPDX license format Header files need to declare the SPDX under /* ... */ style comments at the beginning of the file. Signed-off-by: Andi Shyti Reviewed-by: Sebastian Brzezinka Reviewed-by: Krzysztof Karas Link: https://patchwork.freedesktop.org/patch/msgid/20250314021225.11813-3-andi.shyti@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_wopcm.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_wopcm.h b/drivers/gpu/drm/i915/gt/intel_wopcm.h index 17d6aa86008a..d2038b6de5e7 100644 --- a/drivers/gpu/drm/i915/gt/intel_wopcm.h +++ b/drivers/gpu/drm/i915/gt/intel_wopcm.h @@ -1,6 +1,5 @@ +/* SPDX-License-Identifier: MIT */ /* - * SPDX-License-Identifier: MIT - * * Copyright © 2017-2018 Intel Corporation */ From 5bebf804effe3dd85e01a71d6a1fd82f4a1b436f Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Fri, 14 Mar 2025 03:12:23 +0100 Subject: [PATCH 0133/1627] drm/i915/gt: Remove trailing blank lines Remove useless blank lines before and after the brackets. Signed-off-by: Andi Shyti Reviewed-by: Sebastian Brzezinka Reviewed-by: Krzysztof Karas Link: https://patchwork.freedesktop.org/patch/msgid/20250314021225.11813-4-andi.shyti@linux.intel.com --- drivers/gpu/drm/i915/gt/intel_gtt.c | 1 - drivers/gpu/drm/i915/gt/intel_lrc.c | 1 - drivers/gpu/drm/i915/gt/intel_mocs.c | 1 - 3 files changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 30b128b1fde7..afbc5c769308 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -176,7 +176,6 @@ static void clear_vm_list(struct list_head *list) i915_vma_destroy_locked(vma); i915_gem_object_put(obj); } - } } diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 51847a846002..c481b56fa67d 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -751,7 +751,6 @@ static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) { - if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55)) /* * Note that the CSFE context has a dummy slot for CMD_BUF_CCTL diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index d791d63d49b4..96e7eb086a49 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -314,7 +314,6 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = { }; static const struct drm_i915_mocs_entry dg1_mocs_table[] = { - /* UC */ MOCS_ENTRY(1, 0, L3_1_UC), /* WB - L3 */ From 5ba97b5925229ab9211c72de7ff40283685876a3 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Fri, 14 Mar 2025 03:12:24 +0100 Subject: [PATCH 0134/1627] drm/i915/gt: Use proper sleeping functions for timeouts shorter than 20ms msleep is very imprecise for timeouts shorter than 20 milliseconds and most probably will sleep longer. Use uslee_range() instead. Signed-off-by: Andi Shyti Reviewed-by: Sebastian Brzezinka Reviewed-by: Krzysztof Karas Link: https://patchwork.freedesktop.org/patch/msgid/20250314021225.11813-5-andi.shyti@linux.intel.com --- drivers/gpu/drm/i915/gt/selftest_rc6.c | 3 ++- drivers/gpu/drm/i915/gt/selftest_tlb.c | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 27b6d51ef145..1f8bc5ac20c2 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -60,7 +60,8 @@ int live_rc6_manual(void *arg) /* Force RC6 off for starters */ __intel_rc6_disable(rc6); - msleep(1); /* wakeup is not immediate, takes about 100us on icl */ + /* wakeup is not immediate, takes about 100us on icl */ + usleep_range(1000, 2000); res[0] = rc6_residency(rc6); diff --git a/drivers/gpu/drm/i915/gt/selftest_tlb.c b/drivers/gpu/drm/i915/gt/selftest_tlb.c index 3941f2d6fa47..69ed946a39e5 100644 --- a/drivers/gpu/drm/i915/gt/selftest_tlb.c +++ b/drivers/gpu/drm/i915/gt/selftest_tlb.c @@ -143,7 +143,7 @@ pte_tlbinv(struct intel_context *ce, if (ce->engine->class == OTHER_CLASS) msleep(200); else - msleep(10); + usleep_range(10000, 20000); if (va == vb) { if (!i915_request_completed(rq)) { From 2ae485e3d11d34cb70fcfebf149f5f4fce97f089 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 14 Mar 2025 21:38:33 +0100 Subject: [PATCH 0135/1627] drm/i915: Downgrade device register error if injected Commit 8f460e2c78f2 ("drm/i915: Demidlayer driver loading") which introduced manual device registration also added a message that is submitted on device registration failure as an error. If that failure is triggered by error injection test, that's an expected error, but CI still reports it as a bug. Fix it. Suggested-by: Krzysztof Niemiec Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/9820 Cc: Chris Wilson Cc: Daniel Vetter Signed-off-by: Janusz Krzysztofik Reviewed-by: Krzysztof Niemiec Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20250314205202.809563-6-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/i915_driver.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 6507dcfe4bf5..e0dde7c0fa9c 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -635,8 +635,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) /* Reveal our presence to userspace */ if (drm_dev_register(&dev_priv->drm, 0)) { - drm_err(&dev_priv->drm, - "Failed to register driver for userspace access!\n"); + i915_probe_error(dev_priv, + "Failed to register driver for userspace access!\n"); return; } From 4cce01dc3ecdf5fb9ae75723aea7873ef52e4005 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 14 Mar 2025 21:38:34 +0100 Subject: [PATCH 0136/1627] drm/i915: Fix PXP cleanup missing from probe error rewind Commit f67986b0119c04 ("drm/i915/pxp: Promote pxp subsystem to top-level of i915") added PXP initialization to driver probe path, but didn't add a respective PXP cleanup on probe error. That lack of cleanup seems harmless as long as PXP is still unused and idle when a probe failure occurs and error rewind path is entered, but as soon as PXP starts consuming device and driver resources keeping them busy, kernel warnings may be triggered when cleaning up resources provided by memory regions, GGTT, GEM and/or VMA cache from the probe error rewind and/or module unload paths because of missing PXP cleanup. That scenario was observed on attempts to fail the probe and enter the rewind path on injection of now ignored error in device registration path. Fix it. Cc: Alan Previn Cc: Daniele Ceraolo Spurio Signed-off-by: Janusz Krzysztofik Reviewed-by: Krzysztof Niemiec Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20250314205202.809563-7-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/i915_driver.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index e0dde7c0fa9c..10d1d4f3c11c 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -845,6 +845,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; out_cleanup_gem: + intel_pxp_fini(i915); i915_gem_suspend(i915); i915_gem_driver_remove(i915); i915_gem_driver_release(i915); From 223038731489c0ed6bdd3498ad187d4286536ce3 Mon Sep 17 00:00:00 2001 From: Janusz Krzysztofik Date: Fri, 14 Mar 2025 21:38:35 +0100 Subject: [PATCH 0137/1627] drm/i915: Fix harmful driver register/unregister asymmetry Starting with commit ec3e00b4ee27 ("drm/i915: stop registering if drm_dev_register() fails"), we return from i915_driver_register() immediately if drm_dev_register() fails, skipping remaining registration steps, and continue only with remaining probe steps. However, the _unregister() counterpart called at driver remove knows nothing about that skip and executes reverts of all those steps. As a consequence, a number of kernel warnings that taint the kernel are triggered: <3> [525.823143] i915 0000:00:02.0: [drm] *ERROR* Failed to register driver for userspace access! ... <4> [525.831069] ------------[ cut here ]------------ <4> [525.831071] i915 0000:00:02.0: [drm] drm_WARN_ON(power_domains->init_wakeref) <4> [525.831095] WARNING: CPU: 6 PID: 3440 at drivers/gpu/drm/i915/display/intel_display_power.c:2074 intel_power_domains_disable+0xc2/0xd0 [i915] ... <4> [525.831328] CPU: 6 UID: 0 PID: 3440 Comm: i915_module_loa Tainted: G U 6.14.0-rc1-CI_DRM_16076-g7a632b6798b6+ #1 ... <4> [525.831334] RIP: 0010:intel_power_domains_disable+0xc2/0xd0 [i915] ... <4> [525.831483] Call Trace: <4> [525.831484] ... <4> [525.831943] i915_driver_remove+0x4b/0x140 [i915] <4> [525.832028] i915_pci_remove+0x1e/0x40 [i915] <4> [525.832099] pci_device_remove+0x3e/0xb0 <4> [525.832103] device_remove+0x40/0x80 <4> [525.832107] device_release_driver_internal+0x215/0x280 ... Moreover, that unexpected PM reference is left untouched (not released) but overwritten, then that triggers another kernel warning at driver release phase: <4> [526.685700] ------------[ cut here ]------------ <4> [526.685706] i915 0000:00:02.0: [drm] i915 raw-wakerefs=1 wakelocks=1 on cleanup <4> [526.685734] WARNING: CPU: 1 PID: 3440 at drivers/gpu/drm/i915/intel_runtime_pm.c:443 intel_runtime_pm_driver_release+0x75/0x90 [i915] ... <4> [526.686090] RIP: 0010:intel_runtime_pm_driver_release+0x75/0x90 [i915] ... <4> [526.686294] Call Trace: <4> [526.686296] ... <4> [526.687025] i915_driver_release+0x7e/0xb0 [i915] <4> [526.687243] drm_dev_put.part.0+0x47/0x90 <4> [526.687250] devm_drm_dev_init_release+0x13/0x30 <4> [526.687255] devm_action_release+0x12/0x30 <4> [526.687261] release_nodes+0x3a/0x120 <4> [526.687268] devres_release_all+0x97/0xe0 <4> [526.687277] device_unbind_cleanup+0x12/0x80 <4> [526.687282] device_release_driver_internal+0x23a/0x280 ... A call to intel_power_domains_disable() was already there. It triggers the drm_WARN_ON() when it finds a reference to a wakeref taken on device probe and not released after device registration failure. That wakeref is then left held forever once its handle gets lost overwritten with another wakeref, hence another WARN() is called from intel_runtime_pm_driver_release(). The WARN() triggered by kernfs_remove_by_name_ns() from i915_teardown_sysfs()->i915_gpu_error_sysfs_teardown(), formerly i915_teardown_error_capture(), was also there when the return was added. A call to intel_gt_sysfs_unregister() that triggers the WARN() from kobject_put() was added to intel_gt_driver_unregister() with commit 69d6bf5c3754ff ("drm/i915/gt: Fix memory leaks in per-gt sysfs"). Fix the asymmetry by failing the driver probe on device registration failure and going through rewind paths. For that to work as expected, we apparently need to start the rewind path of i915_driver_register() with drm_dev_unregister(), even if drm_dev_register() returned an error. v5: Drop unsigned keyword from ret variable declaration (Krzysztof), - keep the "Failed to register driver for userspace access" error message (Krzysztof), - split PXP cleanup addition to rewind path out to a separate patch. v4: Switch to taking an error rewind path on device registration failure (Krzysztof, Lucas). v3: Based on Andi's commitment on introducing a flag, try to address Jani's "must find another way" by finding a better place and name for the flag (in hope that's what Jani had on mind), - split into a series of patches and limit the scope of the first (this) one to a minimum of omitting conditionally only those unregister (sub)steps that trigger kernel warnings when not registered. v2: Check in _unregister whether the drm_dev_register has succeeded and skip some of the _unregister() steps. (Andi) Link: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10047 Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10131 Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/10887 Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/12817 Cc: Lucas De Marchi Cc: Chris Wilson Cc: Ashutosh Dixit Cc: Andi Shyti Cc: Krzysztof Niemiec Cc: Jani Nikula Signed-off-by: Janusz Krzysztofik Reviewed-by: Krzysztof Niemiec Reviewed-by: Andi Shyti Signed-off-by: Andi Shyti Link: https://patchwork.freedesktop.org/patch/msgid/20250314205202.809563-8-janusz.krzysztofik@linux.intel.com --- drivers/gpu/drm/i915/i915_driver.c | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 10d1d4f3c11c..cba56cf73b96 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -622,11 +622,12 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) * Perform any steps necessary to make the driver available via kernel * internal or userspace interfaces. */ -static void i915_driver_register(struct drm_i915_private *dev_priv) +static int i915_driver_register(struct drm_i915_private *dev_priv) { struct intel_display *display = &dev_priv->display; struct intel_gt *gt; unsigned int i; + int ret; i915_gem_driver_register(dev_priv); i915_pmu_register(dev_priv); @@ -634,10 +635,14 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) intel_vgpu_register(dev_priv); /* Reveal our presence to userspace */ - if (drm_dev_register(&dev_priv->drm, 0)) { + ret = drm_dev_register(&dev_priv->drm, 0); + if (ret) { i915_probe_error(dev_priv, "Failed to register driver for userspace access!\n"); - return; + drm_dev_unregister(&dev_priv->drm); + i915_pmu_unregister(dev_priv); + i915_gem_driver_unregister(dev_priv); + return ret; } i915_debugfs_register(dev_priv); @@ -660,6 +665,8 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) if (i915_switcheroo_register(dev_priv)) drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n"); + + return 0; } /** @@ -834,7 +841,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto out_cleanup_gem; - i915_driver_register(i915); + ret = i915_driver_register(i915); + if (ret) + goto out_cleanup_gem; enable_rpm_wakeref_asserts(&i915->runtime_pm); From f42c09e614f1bda96f5690be8d0bb273234febbc Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Thu, 23 Jan 2025 14:44:24 -0600 Subject: [PATCH 0138/1627] drm/vmwgfx: Fix dumb buffer leak Dumb buffers were not being freed because the GEM reference that was acquired in gb_surface_define was not dropped like it is in the 2D case. Dropping this ref uncovered a few additional issues with freeing the resources associated with dirty tracking in vmw_bo_free/release. Additionally the TTM object associated with the surface were also leaking which meant that when the ttm_object_file was closed at process exit the destructor unreferenced an already destroyed surface. The solution is to remove the destructor from the vmw_user_surface associated with the dumb_buffer and immediately unreferencing the TTM object which his removes it from the ttm_object_file. This also allows the early return in vmw_user_surface_base_release for the dumb buffer case to be removed as it should no longer occur. The chain of references now has the GEM handle(s) owning the dumb buffer. The GEM handles have a singular GEM reference to the vmw_bo which is dropped when all handles are closed. When the GEM reference count hits zero the vmw_bo is freed which then unreferences the surface via vmw_resource_release in vmw_bo_release. Fixes: d6667f0ddf46 ("drm/vmwgfx: Fix handling of dumb buffers") Signed-off-by: Ian Forbes Reviewed-by: Zack Rusin Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20250123204424.836896-1-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 ++++-- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 18 ++++++++++++------ 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 8832e4de86f1..6d48aacf6d01 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -51,11 +51,13 @@ static void vmw_bo_release(struct vmw_bo *vbo) mutex_lock(&res->dev_priv->cmdbuf_mutex); (void)vmw_resource_reserve(res, false, true); vmw_resource_mob_detach(res); + if (res->dirty) + res->func->dirty_free(res); if (res->coherent) vmw_bo_dirty_release(res->guest_memory_bo); res->guest_memory_bo = NULL; res->guest_memory_offset = 0; - vmw_resource_unreserve(res, false, false, false, NULL, + vmw_resource_unreserve(res, true, false, false, NULL, 0); mutex_unlock(&res->dev_priv->cmdbuf_mutex); } @@ -73,9 +75,9 @@ static void vmw_bo_free(struct ttm_buffer_object *bo) { struct vmw_bo *vbo = to_vmw_bo(&bo->base); - WARN_ON(vbo->dirty); WARN_ON(!RB_EMPTY_ROOT(&vbo->res_tree)); vmw_bo_release(vbo); + WARN_ON(vbo->dirty); kfree(vbo); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a73af8a355fb..c4d5fe5f330f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -273,7 +273,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, goto out_bad_resource; res = converter->base_obj_to_res(base); - kref_get(&res->kref); + vmw_resource_reference(res); *p_res = res; ret = 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 02ab65cc63ec..a9c14b8389cb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -639,7 +639,7 @@ static void vmw_user_surface_free(struct vmw_resource *res) struct vmw_user_surface *user_srf = container_of(srf, struct vmw_user_surface, srf); - WARN_ON_ONCE(res->dirty); + WARN_ON(res->dirty); if (user_srf->master) drm_master_put(&user_srf->master); kfree(srf->offsets); @@ -670,8 +670,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) * Dumb buffers own the resource and they'll unref the * resource themselves */ - if (res && res->guest_memory_bo && res->guest_memory_bo->is_dumb) - return; + WARN_ON(res && res->guest_memory_bo && res->guest_memory_bo->is_dumb); vmw_resource_unreference(&res); } @@ -2337,12 +2336,19 @@ int vmw_dumb_create(struct drm_file *file_priv, vbo = res->guest_memory_bo; vbo->is_dumb = true; vbo->dumb_surface = vmw_res_to_srf(res); - + drm_gem_object_put(&vbo->tbo.base); + /* + * Unset the user surface dtor since this in not actually exposed + * to userspace. The suface is owned via the dumb_buffer's GEM handle + */ + struct vmw_user_surface *usurf = container_of(vbo->dumb_surface, + struct vmw_user_surface, srf); + usurf->prime.base.refcount_release = NULL; err: if (res) vmw_resource_unreference(&res); - if (ret) - ttm_ref_object_base_unref(tfile, arg.rep.handle); + + ttm_ref_object_base_unref(tfile, arg.rep.handle); return ret; } From e95635d776a6bbb9ac46ae7602b9b1b74be42a3e Mon Sep 17 00:00:00 2001 From: Ian Forbes Date: Fri, 31 Jan 2025 14:03:21 -0600 Subject: [PATCH 0139/1627] drm/vmwgfx: Switch to exclusively using GEM references MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we use a combination of TTM and GEM reference counting which is cumbersome. TTM references are used for kernel internal BOs and operations like validation. Simply switching the ttm_bo_(get|put) calls to their GEM equivalents is insufficient as not all BOs are GEM BOs so we must set the GEM vtable for all BOs even if they are not exposed to userspace. Suggested-by: Christian König Signed-off-by: Ian Forbes Reviewed-by: Zack Rusin Signed-off-by: Zack Rusin Link: https://patchwork.freedesktop.org/patch/msgid/20250131200321.193939-1-ian.forbes@broadcom.com --- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.h | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 4 +--- drivers/gpu/drm/vmwgfx/vmwgfx_gem.c | 18 ++---------------- drivers/gpu/drm/vmwgfx/vmwgfx_mob.c | 3 +-- drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 8 ++++---- drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 4 +--- drivers/gpu/drm/vmwgfx/vmwgfx_validation.c | 7 +++---- 10 files changed, 18 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 6d48aacf6d01..f031a312c783 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -36,8 +36,7 @@ static void vmw_bo_release(struct vmw_bo *vbo) { struct vmw_resource *res; - WARN_ON(vbo->tbo.base.funcs && - kref_read(&vbo->tbo.base.refcount) != 0); + WARN_ON(kref_read(&vbo->tbo.base.refcount) != 0); vmw_bo_unmap(vbo); xa_destroy(&vbo->detached_resources); @@ -469,6 +468,7 @@ int vmw_bo_create(struct vmw_private *vmw, if (unlikely(ret != 0)) goto out_error; + (*p_bo)->tbo.base.funcs = &vmw_gem_object_funcs; return ret; out_error: *p_bo = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h index 8c81ae3f5461..cf84a163bfcb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h @@ -204,12 +204,12 @@ static inline void vmw_bo_unreference(struct vmw_bo **buf) *buf = NULL; if (tmp_buf) - ttm_bo_put(&tmp_buf->tbo); + drm_gem_object_put(&tmp_buf->tbo.base); } static inline struct vmw_bo *vmw_bo_reference(struct vmw_bo *buf) { - ttm_bo_get(&buf->tbo); + drm_gem_object_get(&buf->tbo.base); return buf; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index a7c07692262b..98331c4c0335 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -432,7 +432,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * for the new COTable. Initially pin the buffer object to make sure * we can use tryreserve without failure. */ - ret = vmw_gem_object_create(dev_priv, &bo_params, &buf); + ret = vmw_bo_create(dev_priv, &bo_params, &buf); if (ret) { DRM_ERROR("Failed initializing new cotable MOB.\n"); goto out_done; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 0dfb88fb19e2..594af8eb04c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -822,9 +822,7 @@ static inline bool vmw_resource_mob_attached(const struct vmw_resource *res) * GEM related functionality - vmwgfx_gem.c */ struct vmw_bo_params; -int vmw_gem_object_create(struct vmw_private *vmw, - struct vmw_bo_params *params, - struct vmw_bo **p_vbo); +extern const struct drm_gem_object_funcs vmw_gem_object_funcs; extern int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index ed5015ced392..026c9b699604 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -140,7 +140,7 @@ static const struct vm_operations_struct vmw_vm_ops = { .close = ttm_bo_vm_close, }; -static const struct drm_gem_object_funcs vmw_gem_object_funcs = { +const struct drm_gem_object_funcs vmw_gem_object_funcs = { .free = vmw_gem_object_free, .open = vmw_gem_object_open, .close = vmw_gem_object_close, @@ -154,20 +154,6 @@ static const struct drm_gem_object_funcs vmw_gem_object_funcs = { .vm_ops = &vmw_vm_ops, }; -int vmw_gem_object_create(struct vmw_private *vmw, - struct vmw_bo_params *params, - struct vmw_bo **p_vbo) -{ - int ret = vmw_bo_create(vmw, params, p_vbo); - - if (ret != 0) - goto out_no_bo; - - (*p_vbo)->tbo.base.funcs = &vmw_gem_object_funcs; -out_no_bo: - return ret; -} - int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, struct drm_file *filp, uint32_t size, @@ -183,7 +169,7 @@ int vmw_gem_object_create_with_handle(struct vmw_private *dev_priv, .pin = false }; - ret = vmw_gem_object_create(dev_priv, ¶ms, p_vbo); + ret = vmw_bo_create(dev_priv, ¶ms, p_vbo); if (ret != 0) goto out_no_bo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 7055cbefc768..d8204d4265d3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -282,8 +282,7 @@ out_no_setup: } vmw_bo_unpin_unlocked(&batch->otable_bo->tbo); - ttm_bo_put(&batch->otable_bo->tbo); - batch->otable_bo = NULL; + vmw_bo_unreference(&batch->otable_bo); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index c4d5fe5f330f..388011696941 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -347,7 +347,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, return 0; } - ret = vmw_gem_object_create(res->dev_priv, &bo_params, &gbo); + ret = vmw_bo_create(res->dev_priv, &bo_params, &gbo); if (unlikely(ret != 0)) goto out_no_bo; @@ -531,9 +531,9 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, } INIT_LIST_HEAD(&val_list); - ttm_bo_get(&res->guest_memory_bo->tbo); val_buf->bo = &res->guest_memory_bo->tbo; val_buf->num_shared = 0; + drm_gem_object_get(&val_buf->bo->base); list_add_tail(&val_buf->head, &val_list); ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); if (unlikely(ret != 0)) @@ -557,7 +557,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, out_no_validate: ttm_eu_backoff_reservation(ticket, &val_list); out_no_reserve: - ttm_bo_put(val_buf->bo); + drm_gem_object_put(&val_buf->bo->base); val_buf->bo = NULL; if (guest_memory_dirty) vmw_user_bo_unref(&res->guest_memory_bo); @@ -619,7 +619,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); ttm_eu_backoff_reservation(ticket, &val_list); - ttm_bo_put(val_buf->bo); + drm_gem_object_put(&val_buf->bo->base); val_buf->bo = NULL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 6149a9c981da..5f5f5a94301f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -445,7 +445,7 @@ vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); - ret = vmw_gem_object_create(dev_priv, &bo_params, &vps->uo.buffer); + ret = vmw_bo_create(dev_priv, &bo_params, &vps->uo.buffer); vmw_overlay_resume_all(dev_priv); if (ret) return ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index a9c14b8389cb..7e281c3c6bc5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -830,9 +830,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, .pin = false }; - ret = vmw_gem_object_create(dev_priv, - ¶ms, - &res->guest_memory_bo); + ret = vmw_bo_create(dev_priv, ¶ms, &res->guest_memory_bo); if (unlikely(ret != 0)) { vmw_resource_unreference(&res); goto out_unlock; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index e7625b3f71e0..7ee93e7191c7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -262,9 +262,8 @@ int vmw_validation_add_bo(struct vmw_validation_context *ctx, bo_node->hash.key); } val_buf = &bo_node->base; - val_buf->bo = ttm_bo_get_unless_zero(&vbo->tbo); - if (!val_buf->bo) - return -ESRCH; + vmw_bo_reference(vbo); + val_buf->bo = &vbo->tbo; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &ctx->bo_list); } @@ -656,7 +655,7 @@ void vmw_validation_unref_lists(struct vmw_validation_context *ctx) struct vmw_validation_res_node *val; list_for_each_entry(entry, &ctx->bo_list, base.head) { - ttm_bo_put(entry->base.bo); + drm_gem_object_put(&entry->base.bo->base); entry->base.bo = NULL; } From b96dabdba9b95f71ded50a1c094ee244408b2a8e Mon Sep 17 00:00:00 2001 From: Tomasz Rusinowicz Date: Tue, 18 Feb 2025 11:03:53 +0100 Subject: [PATCH 0140/1627] drm/xe: Fix exporting xe buffers multiple times MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The `struct ttm_resource->placement` contains TTM_PL_FLAG_* flags, but it was incorrectly tested for XE_PL_* flags. This caused xe_dma_buf_pin() to always fail when invoked for the second time. Fix this by checking the `mem_type` field instead. Fixes: 7764222d54b7 ("drm/xe: Disallow pinning dma-bufs in VRAM") Cc: Thomas Hellström Cc: Rodrigo Vivi Cc: Lucas De Marchi Cc: "Thomas Hellström" Cc: Michal Wajdeczko Cc: Matthew Brost Cc: Matthew Auld Cc: Nirmoy Das Cc: Jani Nikula Cc: intel-xe@lists.freedesktop.org Cc: # v6.8+ Signed-off-by: Tomasz Rusinowicz Signed-off-by: Jacek Lawrynowicz Reviewed-by: Matthew Brost Link: https://patchwork.freedesktop.org/patch/msgid/20250218100353.2137964-1-jacek.lawrynowicz@linux.intel.com Signed-off-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_bo.h | 2 -- drivers/gpu/drm/xe/xe_dma_buf.c | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index bda3fdd408da..ec3e4446d027 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -405,7 +405,6 @@ long xe_bo_shrink(struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo, const struct xe_bo_shrink_flags flags, unsigned long *scanned); -#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) /** * xe_bo_is_mem_type - Whether the bo currently resides in the given * TTM memory type @@ -420,4 +419,3 @@ static inline bool xe_bo_is_mem_type(struct xe_bo *bo, u32 mem_type) return bo->ttm.resource->mem_type == mem_type; } #endif -#endif diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index c5b95470fa32..f67803e15a0e 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -58,7 +58,7 @@ static int xe_dma_buf_pin(struct dma_buf_attachment *attach) * 1) Avoid pinning in a placement not accessible to some importers. * 2) Pinning in VRAM requires PIN accounting which is a to-do. */ - if (xe_bo_is_pinned(bo) && bo->ttm.resource->placement != XE_PL_TT) { + if (xe_bo_is_pinned(bo) && !xe_bo_is_mem_type(bo, XE_PL_TT)) { drm_dbg(&xe->drm, "Can't migrate pinned bo for dma-buf pin.\n"); return -EINVAL; } From 880d851a7fe1bbb6d78ec59087f7cdba46292c36 Mon Sep 17 00:00:00 2001 From: Pranav Tyagi Date: Tue, 18 Mar 2025 11:12:50 +0700 Subject: [PATCH 0141/1627] Documentation: vgaarbiter: Fix grammar Correct grammar issues: - Fix "co-exist" subject-verb agreement - Correct plural form of "server" in context of more than one legacy devices - Use passive mood for intro sentence of libpciaccess section Signed-off-by: Pranav Tyagi Reviewed-by: Bagas Sanjaya Link: https://lore.kernel.org/r/20250220164946.18007-1-pranav.tyagi03@gmail.com [Bagas: massage commit message] Signed-off-by: Bagas Sanjaya Link: https://patchwork.freedesktop.org/patch/msgid/20250318041249.20786-2-bagasdotme@gmail.com Signed-off-by: Maxime Ripard --- Documentation/gpu/vgaarbiter.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Documentation/gpu/vgaarbiter.rst b/Documentation/gpu/vgaarbiter.rst index bde3c0afb059..d1e953712cc2 100644 --- a/Documentation/gpu/vgaarbiter.rst +++ b/Documentation/gpu/vgaarbiter.rst @@ -11,9 +11,9 @@ Section 7, Legacy Devices. The Resource Access Control (RAC) module inside the X server [0] existed for the legacy VGA arbitration task (besides other bus management tasks) when more -than one legacy device co-exists on the same machine. But the problem happens +than one legacy device co-exist on the same machine. But the problem happens when these devices are trying to be accessed by different userspace clients -(e.g. two server in parallel). Their address assignments conflict. Moreover, +(e.g. two servers in parallel). Their address assignments conflict. Moreover, ideally, being a userspace application, it is not the role of the X server to control bus resources. Therefore an arbitration scheme outside of the X server is needed to control the sharing of these resources. This document introduces @@ -106,7 +106,7 @@ In-kernel interface libpciaccess ------------ -To use the vga arbiter char device it was implemented an API inside the +To use the vga arbiter char device, an API was implemented inside the libpciaccess library. One field was added to struct pci_device (each device on the system):: From 707bd8cceaac1af31d3eeeee166687bdde580fd3 Mon Sep 17 00:00:00 2001 From: Manikandan Muralidharan Date: Thu, 20 Jun 2024 15:28:56 +0530 Subject: [PATCH 0142/1627] MAINTAINERS: update Microchip's Atmel-HLCDC driver maintainers Drop Sam Ravnborg and Boris Brezillon as they are no longer interested in maintaining the drivers. Add myself and Dharma Balasubiramani as the Maintainer and co-maintainer for Microchip's Atmel-HLCDC driver. Thanks for their work. Signed-off-by: Manikandan Muralidharan Acked-by: Sam Ravnborg Acked-by: Boris Brezillon Acked-by: Nicolas Ferre Link: https://patchwork.freedesktop.org/patch/msgid/20240620095856.777390-1-manikandan.m@microchip.com Signed-off-by: Boris Brezillon --- MAINTAINERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 90e5b92d2382..24e4d90a38d1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7707,8 +7707,8 @@ F: drivers/gpu/drm/ci/xfails/meson* F: drivers/gpu/drm/meson/ DRM DRIVERS FOR ATMEL HLCDC -M: Sam Ravnborg -M: Boris Brezillon +M: Manikandan Muralidharan +M: Dharma Balasubiramani L: dri-devel@lists.freedesktop.org S: Supported T: git https://gitlab.freedesktop.org/drm/misc/kernel.git From 98007a0d56b07605c626c9bdb550b5ae5ce71453 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 12:59:55 +0100 Subject: [PATCH 0143/1627] drm/bridge: Add encoder parameter to drm_bridge_funcs.attach The drm_bridge structure contains an encoder pointer that is widely used by bridge drivers. This pattern is largely documented as deprecated in other KMS entities for atomic drivers. However, one of the main use of that pointer is done in attach to just call drm_bridge_attach on the next bridge to add it to the bridge list. While this dereferences the bridge->encoder pointer, it's effectively the same encoder the bridge was being attached to. We can make it more explicit by adding the encoder the bridge is attached to to the list of attach parameters. This also removes the need to dereference bridge->encoder in most drivers. Reviewed-by: Dmitry Baryshkov Reviewed-by: Douglas Anderson Tested-by: Douglas Anderson Tested-by: Luca Ceresoli Reviewed-by: Luca Ceresoli Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-1-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/adp/adp-mipi.c | 3 ++- drivers/gpu/drm/bridge/adv7511/adv7511_drv.c | 3 ++- drivers/gpu/drm/bridge/analogix/analogix-anx6345.c | 3 ++- drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c | 3 ++- drivers/gpu/drm/bridge/analogix/analogix_dp_core.c | 2 +- drivers/gpu/drm/bridge/analogix/anx7625.c | 3 ++- drivers/gpu/drm/bridge/aux-bridge.c | 3 ++- drivers/gpu/drm/bridge/aux-hpd-bridge.c | 1 + drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c | 3 ++- drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c | 1 + drivers/gpu/drm/bridge/chipone-icn6211.c | 6 ++++-- drivers/gpu/drm/bridge/chrontel-ch7033.c | 5 +++-- drivers/gpu/drm/bridge/display-connector.c | 1 + drivers/gpu/drm/bridge/fsl-ldb.c | 3 ++- drivers/gpu/drm/bridge/imx/imx-ldb-helper.c | 7 +++---- drivers/gpu/drm/bridge/imx/imx-ldb-helper.h | 2 +- drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c | 3 ++- drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c | 3 ++- drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c | 3 ++- drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c | 3 ++- drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c | 3 ++- drivers/gpu/drm/bridge/ite-it6263.c | 7 ++++--- drivers/gpu/drm/bridge/ite-it6505.c | 1 + drivers/gpu/drm/bridge/ite-it66121.c | 3 ++- drivers/gpu/drm/bridge/lontium-lt8912b.c | 3 ++- drivers/gpu/drm/bridge/lontium-lt9211.c | 3 ++- drivers/gpu/drm/bridge/lontium-lt9611.c | 3 ++- drivers/gpu/drm/bridge/lontium-lt9611uxc.c | 3 ++- drivers/gpu/drm/bridge/lvds-codec.c | 3 ++- .../gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c | 1 + drivers/gpu/drm/bridge/microchip-lvds.c | 3 ++- drivers/gpu/drm/bridge/nwl-dsi.c | 3 ++- drivers/gpu/drm/bridge/nxp-ptn3460.c | 5 +++-- drivers/gpu/drm/bridge/panel.c | 3 ++- drivers/gpu/drm/bridge/parade-ps8622.c | 1 + drivers/gpu/drm/bridge/parade-ps8640.c | 3 ++- drivers/gpu/drm/bridge/samsung-dsim.c | 3 ++- drivers/gpu/drm/bridge/sii902x.c | 5 +++-- drivers/gpu/drm/bridge/sil-sii8620.c | 1 + drivers/gpu/drm/bridge/simple-bridge.c | 5 +++-- drivers/gpu/drm/bridge/synopsys/dw-hdmi.c | 3 ++- drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c | 5 +++-- drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c | 5 +++-- drivers/gpu/drm/bridge/tc358762.c | 3 ++- drivers/gpu/drm/bridge/tc358764.c | 3 ++- drivers/gpu/drm/bridge/tc358767.c | 2 ++ drivers/gpu/drm/bridge/tc358768.c | 3 ++- drivers/gpu/drm/bridge/tc358775.c | 3 ++- drivers/gpu/drm/bridge/tda998x_drv.c | 1 + drivers/gpu/drm/bridge/thc63lvd1024.c | 3 ++- drivers/gpu/drm/bridge/ti-dlpc3433.c | 4 ++-- drivers/gpu/drm/bridge/ti-sn65dsi83.c | 3 ++- drivers/gpu/drm/bridge/ti-sn65dsi86.c | 3 ++- drivers/gpu/drm/bridge/ti-tdp158.c | 6 ++++-- drivers/gpu/drm/bridge/ti-tfp410.c | 5 +++-- drivers/gpu/drm/bridge/ti-tpd12s015.c | 3 ++- drivers/gpu/drm/drm_bridge.c | 2 +- drivers/gpu/drm/imx/ipuv3/parallel-display.c | 3 ++- drivers/gpu/drm/ingenic/ingenic-drm-drv.c | 5 +++-- drivers/gpu/drm/mcde/mcde_dsi.c | 3 ++- drivers/gpu/drm/mediatek/mtk_dp.c | 3 ++- drivers/gpu/drm/mediatek/mtk_dpi.c | 3 ++- drivers/gpu/drm/mediatek/mtk_dsi.c | 3 ++- drivers/gpu/drm/mediatek/mtk_hdmi.c | 3 ++- drivers/gpu/drm/meson/meson_encoder_cvbs.c | 3 ++- drivers/gpu/drm/meson/meson_encoder_dsi.c | 3 ++- drivers/gpu/drm/meson/meson_encoder_hdmi.c | 3 ++- drivers/gpu/drm/msm/dsi/dsi_manager.c | 3 ++- drivers/gpu/drm/omapdrm/dss/dpi.c | 3 ++- drivers/gpu/drm/omapdrm/dss/dsi.c | 3 ++- drivers/gpu/drm/omapdrm/dss/hdmi4.c | 3 ++- drivers/gpu/drm/omapdrm/dss/hdmi5.c | 3 ++- drivers/gpu/drm/omapdrm/dss/sdi.c | 3 ++- drivers/gpu/drm/omapdrm/dss/venc.c | 3 ++- drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c | 3 ++- drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c | 3 ++- drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c | 3 ++- drivers/gpu/drm/stm/lvds.c | 11 +++++------ drivers/gpu/drm/tidss/tidss_encoder.c | 3 ++- drivers/gpu/drm/vc4/vc4_dsi.c | 3 ++- drivers/gpu/drm/xlnx/zynqmp_dp.c | 3 ++- drivers/platform/arm64/acer-aspire1-ec.c | 3 ++- include/drm/drm_bridge.h | 2 +- 83 files changed, 172 insertions(+), 95 deletions(-) diff --git a/drivers/gpu/drm/adp/adp-mipi.c b/drivers/gpu/drm/adp/adp-mipi.c index ad80542b60ed..2b60128e2c69 100644 --- a/drivers/gpu/drm/adp/adp-mipi.c +++ b/drivers/gpu/drm/adp/adp-mipi.c @@ -212,12 +212,13 @@ static const struct mipi_dsi_host_ops adp_dsi_host_ops = { }; static int adp_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct adp_mipi_drv_private *adp = container_of(bridge, struct adp_mipi_drv_private, bridge); - return drm_bridge_attach(bridge->encoder, adp->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, adp->next_bridge, bridge, flags); } static const struct drm_bridge_funcs adp_dsi_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 050dae338ffe..1257009e850c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -948,13 +948,14 @@ static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge, } static int adv7511_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct adv7511 *adv = bridge_to_adv7511(bridge); int ret = 0; if (adv->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, adv->next_bridge, bridge, + ret = drm_bridge_attach(encoder, adv->next_bridge, bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) return ret; diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c index 83d711ee3a2e..a88a33eb5d97 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c @@ -517,6 +517,7 @@ static const struct drm_connector_funcs anx6345_connector_funcs = { }; static int anx6345_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct anx6345 *anx6345 = bridge_to_anx6345(bridge); @@ -553,7 +554,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge, anx6345->connector.polled = DRM_CONNECTOR_POLL_HPD; err = drm_connector_attach_encoder(&anx6345->connector, - bridge->encoder); + encoder); if (err) { DRM_ERROR("Failed to link up connector to encoder: %d\n", err); goto connector_cleanup; diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c index f74694bb9c50..8b4597885614 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c @@ -888,6 +888,7 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = { }; static int anx78xx_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct anx78xx *anx78xx = bridge_to_anx78xx(bridge); @@ -924,7 +925,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge, anx78xx->connector.polled = DRM_CONNECTOR_POLL_HPD; err = drm_connector_attach_encoder(&anx78xx->connector, - bridge->encoder); + encoder); if (err) { DRM_ERROR("Failed to link up connector to encoder: %d\n", err); goto connector_cleanup; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 071168aa0c3b..042154e2d8cc 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1113,10 +1113,10 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = { }; static int analogix_dp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct analogix_dp_device *dp = bridge->driver_private; - struct drm_encoder *encoder = dp->encoder; struct drm_connector *connector = NULL; int ret = 0; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 0b97b66de577..0b61e77c0398 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2141,6 +2141,7 @@ static void hdcp_check_work_func(struct work_struct *work) } static int anx7625_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); @@ -2159,7 +2160,7 @@ static int anx7625_bridge_attach(struct drm_bridge *bridge, } if (ctx->pdata.panel_bridge) { - err = drm_bridge_attach(bridge->encoder, + err = drm_bridge_attach(encoder, ctx->pdata.panel_bridge, &ctx->bridge, flags); if (err) diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index 015983c015e5..c179b86d208f 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -86,6 +86,7 @@ struct drm_aux_bridge_data { }; static int drm_aux_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct drm_aux_bridge_data *data; @@ -95,7 +96,7 @@ static int drm_aux_bridge_attach(struct drm_bridge *bridge, data = container_of(bridge, struct drm_aux_bridge_data, bridge); - return drm_bridge_attach(bridge->encoder, data->next_bridge, bridge, + return drm_bridge_attach(encoder, data->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index 48f297c78ee6..b3f588b71a7d 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -156,6 +156,7 @@ void drm_aux_hpd_bridge_notify(struct device *dev, enum drm_connector_status sta EXPORT_SYMBOL_GPL(drm_aux_hpd_bridge_notify); static int drm_aux_hpd_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index c7a0247e06ad..8f54c034ac4f 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -605,6 +605,7 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi, } static int cdns_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); @@ -617,7 +618,7 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge, return -ENOTSUPP; } - return drm_bridge_attach(bridge->encoder, output->bridge, bridge, + return drm_bridge_attach(encoder, output->bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index 81fad14c2cd5..ae1bd58975ce 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -1726,6 +1726,7 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp) } static int cdns_mhdp_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); diff --git a/drivers/gpu/drm/bridge/chipone-icn6211.c b/drivers/gpu/drm/bridge/chipone-icn6211.c index 81f7c701961f..634c5b030667 100644 --- a/drivers/gpu/drm/bridge/chipone-icn6211.c +++ b/drivers/gpu/drm/bridge/chipone-icn6211.c @@ -580,11 +580,13 @@ static int chipone_dsi_host_attach(struct chipone *icn) return ret; } -static int chipone_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +static int chipone_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { struct chipone *icn = bridge_to_chipone(bridge); - return drm_bridge_attach(bridge->encoder, icn->panel_bridge, bridge, flags); + return drm_bridge_attach(encoder, icn->panel_bridge, bridge, flags); } #define MAX_INPUT_SEL_FORMATS 1 diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c index da17f0978a79..210c45c1efd4 100644 --- a/drivers/gpu/drm/bridge/chrontel-ch7033.c +++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c @@ -268,13 +268,14 @@ static void ch7033_hpd_event(void *arg, enum drm_connector_status status) } static int ch7033_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ch7033_priv *priv = bridge_to_ch7033_priv(bridge); struct drm_connector *connector = &priv->connector; int ret; - ret = drm_bridge_attach(bridge->encoder, priv->next_bridge, bridge, + ret = drm_bridge_attach(encoder, priv->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) return ret; @@ -305,7 +306,7 @@ static int ch7033_bridge_attach(struct drm_bridge *bridge, return ret; } - return drm_connector_attach_encoder(&priv->connector, bridge->encoder); + return drm_connector_attach_encoder(&priv->connector, encoder); } static void ch7033_bridge_detach(struct drm_bridge *bridge) diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 72bc508d4e6e..09c08a53d5bd 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -34,6 +34,7 @@ to_display_connector(struct drm_bridge *bridge) } static int display_connector_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index 26ae1ab5237f..72d8f32d48fa 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -113,11 +113,12 @@ static unsigned long fsl_ldb_link_frequency(struct fsl_ldb *fsl_ldb, int clock) } static int fsl_ldb_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct fsl_ldb *fsl_ldb = to_fsl_ldb(bridge); - return drm_bridge_attach(bridge->encoder, fsl_ldb->panel_bridge, + return drm_bridge_attach(encoder, fsl_ldb->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c index 9b5bebbe357d..61347f6ec33d 100644 --- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c +++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.c @@ -104,7 +104,7 @@ void ldb_bridge_disable_helper(struct drm_bridge *bridge) } EXPORT_SYMBOL_GPL(ldb_bridge_disable_helper); -int ldb_bridge_attach_helper(struct drm_bridge *bridge, +int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ldb_channel *ldb_ch = bridge->driver_private; @@ -116,9 +116,8 @@ int ldb_bridge_attach_helper(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, - ldb_ch->next_bridge, bridge, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); + return drm_bridge_attach(encoder, ldb_ch->next_bridge, bridge, + DRM_BRIDGE_ATTACH_NO_CONNECTOR); } EXPORT_SYMBOL_GPL(ldb_bridge_attach_helper); diff --git a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h index a0a5cde27fbc..38a8a54b37a6 100644 --- a/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h +++ b/drivers/gpu/drm/bridge/imx/imx-ldb-helper.h @@ -81,7 +81,7 @@ void ldb_bridge_enable_helper(struct drm_bridge *bridge); void ldb_bridge_disable_helper(struct drm_bridge *bridge); -int ldb_bridge_attach_helper(struct drm_bridge *bridge, +int ldb_bridge_attach_helper(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags); int ldb_init_helper(struct ldb *ldb); diff --git a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c index 55a763045812..f072c6ed39ef 100644 --- a/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c +++ b/drivers/gpu/drm/bridge/imx/imx-legacy-bridge.c @@ -23,7 +23,8 @@ struct imx_legacy_bridge { #define to_imx_legacy_bridge(bridge) container_of(bridge, struct imx_legacy_bridge, base) static int imx_legacy_bridge_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; diff --git a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c index a17433a7c755..8a4fd7d77a8d 100644 --- a/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c +++ b/drivers/gpu/drm/bridge/imx/imx8mp-hdmi-pvi.c @@ -40,11 +40,12 @@ to_imx8mp_hdmi_pvi(struct drm_bridge *bridge) } static int imx8mp_hdmi_pvi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8mp_hdmi_pvi *pvi = to_imx8mp_hdmi_pvi(bridge); - return drm_bridge_attach(bridge->encoder, pvi->next_bridge, + return drm_bridge_attach(encoder, pvi->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c index 1d9529dc7f2a..1f6fd488e703 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-combiner.c @@ -108,6 +108,7 @@ imx8qxp_pc_bridge_mode_valid(struct drm_bridge *bridge, } static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8qxp_pc_channel *ch = bridge->driver_private; @@ -119,7 +120,7 @@ static int imx8qxp_pc_bridge_attach(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, + return drm_bridge_attach(encoder, ch->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c index cd6818db0fd3..e092c9ea99b0 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pixel-link.c @@ -128,6 +128,7 @@ static void imx8qxp_pixel_link_set_mst_addr(struct imx8qxp_pixel_link *pl) } static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8qxp_pixel_link *pl = bridge->driver_private; @@ -138,7 +139,7 @@ static int imx8qxp_pixel_link_bridge_attach(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, + return drm_bridge_attach(encoder, pl->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c index 49dd4f96d52c..da138ab51b3b 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-pxl2dpi.c @@ -48,6 +48,7 @@ struct imx8qxp_pxl2dpi { #define bridge_to_p2d(b) container_of(b, struct imx8qxp_pxl2dpi, bridge) static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx8qxp_pxl2dpi *p2d = bridge->driver_private; @@ -58,7 +59,7 @@ static int imx8qxp_pxl2dpi_bridge_attach(struct drm_bridge *bridge, return -EINVAL; } - return drm_bridge_attach(bridge->encoder, + return drm_bridge_attach(encoder, p2d->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); } diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c index 21152a1c28f7..a3a63a977b0a 100644 --- a/drivers/gpu/drm/bridge/ite-it6263.c +++ b/drivers/gpu/drm/bridge/ite-it6263.c @@ -665,13 +665,14 @@ it6263_bridge_mode_valid(struct drm_bridge *bridge, } static int it6263_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct it6263 *it = bridge_to_it6263(bridge); struct drm_connector *connector; int ret; - ret = drm_bridge_attach(bridge->encoder, it->next_bridge, bridge, + ret = drm_bridge_attach(encoder, it->next_bridge, bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -679,7 +680,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge, if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) return 0; - connector = drm_bridge_connector_init(bridge->dev, bridge->encoder); + connector = drm_bridge_connector_init(bridge->dev, encoder); if (IS_ERR(connector)) { ret = PTR_ERR(connector); dev_err(it->dev, "failed to initialize bridge connector: %d\n", @@ -687,7 +688,7 @@ static int it6263_bridge_attach(struct drm_bridge *bridge, return ret; } - drm_connector_attach_encoder(connector, bridge->encoder); + drm_connector_attach_encoder(connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 8a607558ac89..4e8b1dcba64f 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -3124,6 +3124,7 @@ static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge) } static int it6505_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct it6505 *it6505 = bridge_to_it6505(bridge); diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index b9f90f32145d..7b110ae53291 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -586,6 +586,7 @@ static bool it66121_is_hpd_detect(struct it66121_ctx *ctx) } static int it66121_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); @@ -594,7 +595,7 @@ static int it66121_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags); + ret = drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags); if (ret) return ret; diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index 52da204f5740..3e49d855b364 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -543,12 +543,13 @@ exit: } static int lt8912_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt8912 *lt = bridge_to_lt8912(bridge); int ret; - ret = drm_bridge_attach(bridge->encoder, lt->hdmi_port, bridge, + ret = drm_bridge_attach(encoder, lt->hdmi_port, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) { dev_err(lt->dev, "Failed to attach next bridge (%d)\n", ret); diff --git a/drivers/gpu/drm/bridge/lontium-lt9211.c b/drivers/gpu/drm/bridge/lontium-lt9211.c index 0fc5ea18fe6a..9b2dac9bd63c 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9211.c +++ b/drivers/gpu/drm/bridge/lontium-lt9211.c @@ -99,11 +99,12 @@ static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge) } static int lt9211_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt9211 *ctx = bridge_to_lt9211(bridge); - return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, + return drm_bridge_attach(encoder, ctx->panel_bridge, &ctx->bridge, flags); } diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index 026803034231..53987e826ccd 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -740,11 +740,12 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611, } static int lt9611_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); - return drm_bridge_attach(bridge->encoder, lt9611->next_bridge, + return drm_bridge_attach(encoder, lt9611->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index f4c3ff1fdc69..20bf1a3c786d 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -280,11 +280,12 @@ static struct mipi_dsi_device *lt9611uxc_attach_dsi(struct lt9611uxc *lt9611uxc, } static int lt9611uxc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); - return drm_bridge_attach(bridge->encoder, lt9611uxc->next_bridge, + return drm_bridge_attach(encoder, lt9611uxc->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c index 389af0233fcd..1646e454e0b0 100644 --- a/drivers/gpu/drm/bridge/lvds-codec.c +++ b/drivers/gpu/drm/bridge/lvds-codec.c @@ -34,11 +34,12 @@ static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge) } static int lvds_codec_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct lvds_codec *lvds_codec = to_lvds_codec(bridge); - return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge, + return drm_bridge_attach(encoder, lvds_codec->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index a47aabf134fd..15a5a1f644fc 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -190,6 +190,7 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id) } static int ge_b850v3_lvds_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct i2c_client *stdp4028_i2c diff --git a/drivers/gpu/drm/bridge/microchip-lvds.c b/drivers/gpu/drm/bridge/microchip-lvds.c index 53dd140a1b8d..1d4ae0097df8 100644 --- a/drivers/gpu/drm/bridge/microchip-lvds.c +++ b/drivers/gpu/drm/bridge/microchip-lvds.c @@ -104,11 +104,12 @@ static void lvds_serialiser_on(struct mchp_lvds *lvds) } static int mchp_lvds_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mchp_lvds *lvds = bridge_to_lvds(bridge); - return drm_bridge_attach(bridge->encoder, lvds->panel_bridge, + return drm_bridge_attach(encoder, lvds->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index d04c62a0cb9f..55912ae11f46 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -910,6 +910,7 @@ static void nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge, } static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct nwl_dsi *dsi = bridge_to_dsi(bridge); @@ -919,7 +920,7 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge, if (IS_ERR(panel_bridge)) return PTR_ERR(panel_bridge); - return drm_bridge_attach(bridge->encoder, panel_bridge, bridge, flags); + return drm_bridge_attach(encoder, panel_bridge, bridge, flags); } static u32 *nwl_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index 27261b2ac9c8..25d7c415478b 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -214,13 +214,14 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = { }; static int ptn3460_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge); int ret; /* Let this driver create connector if requested */ - ret = drm_bridge_attach(bridge->encoder, ptn_bridge->panel_bridge, + ret = drm_bridge_attach(encoder, ptn_bridge->panel_bridge, bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -239,7 +240,7 @@ static int ptn3460_bridge_attach(struct drm_bridge *bridge, &ptn3460_connector_helper_funcs); drm_connector_register(&ptn_bridge->connector); drm_connector_attach_encoder(&ptn_bridge->connector, - bridge->encoder); + encoder); drm_helper_hpd_irq_event(ptn_bridge->connector.dev); diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 258c85c83a28..79b009ab9396 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -58,6 +58,7 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = { }; static int panel_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge); @@ -81,7 +82,7 @@ static int panel_bridge_attach(struct drm_bridge *bridge, drm_panel_bridge_set_orientation(connector, bridge); drm_connector_attach_encoder(&panel_bridge->connector, - bridge->encoder); + encoder); if (bridge->dev->registered) { if (connector->funcs->reset) diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 13ada42a5514..8726fefc5c65 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -418,6 +418,7 @@ static void ps8622_post_disable(struct drm_bridge *bridge) } static int ps8622_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge); diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index a42138b33258..2422ff68c104 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -494,6 +494,7 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge, } static int ps8640_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ps8640 *ps_bridge = bridge_to_ps8640(bridge); @@ -518,7 +519,7 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge, } /* Attach the panel-bridge to the dsi bridge */ - ret = drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge, + ret = drm_bridge_attach(encoder, ps_bridge->panel_bridge, &ps_bridge->bridge, flags); if (ret) goto err_bridge_attach; diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index 54de6ed2fae8..55ac6bd5da08 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -1640,11 +1640,12 @@ static void samsung_dsim_mode_set(struct drm_bridge *bridge, } static int samsung_dsim_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct samsung_dsim *dsi = bridge_to_dsi(bridge); - return drm_bridge_attach(bridge->encoder, dsi->out_bridge, bridge, + return drm_bridge_attach(encoder, dsi->out_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 914a2609a685..6d185b7b0c3e 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -416,6 +416,7 @@ out: } static int sii902x_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sii902x *sii902x = bridge_to_sii902x(bridge); @@ -424,7 +425,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge, int ret; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) - return drm_bridge_attach(bridge->encoder, sii902x->next_bridge, + return drm_bridge_attach(encoder, sii902x->next_bridge, bridge, flags); drm_connector_helper_add(&sii902x->connector, @@ -452,7 +453,7 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge, if (ret) return ret; - drm_connector_attach_encoder(&sii902x->connector, bridge->encoder); + drm_connector_attach_encoder(&sii902x->connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index 28a2e1ee04b2..3af650dc92a1 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -2203,6 +2203,7 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge) } static int sii8620_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sii8620 *ctx = bridge_to_sii8620(bridge); diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index ab0b0e36e97a..70db5b99e5bb 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -103,12 +103,13 @@ static const struct drm_connector_funcs simple_bridge_con_funcs = { }; static int simple_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge); int ret; - ret = drm_bridge_attach(bridge->encoder, sbridge->next_bridge, bridge, + ret = drm_bridge_attach(encoder, sbridge->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -127,7 +128,7 @@ static int simple_bridge_attach(struct drm_bridge *bridge, return ret; } - drm_connector_attach_encoder(&sbridge->connector, bridge->encoder); + drm_connector_attach_encoder(&sbridge->connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 0890add5f707..b1cdf806b3c4 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2889,12 +2889,13 @@ static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge, } static int dw_hdmi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dw_hdmi *hdmi = bridge->driver_private; if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) - return drm_bridge_attach(bridge->encoder, hdmi->next_bridge, + return drm_bridge_attach(encoder, hdmi->next_bridge, bridge, flags); return dw_hdmi_connector_create(hdmi); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 2b6e70a49f43..b08ada920a50 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -1072,15 +1072,16 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge, } static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge); /* Set the encoder type as caller does not know it */ - bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI; + encoder->encoder_type = DRM_MODE_ENCODER_DSI; /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge, + return drm_bridge_attach(encoder, dsi->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c index 5fd7a459efdd..c76f5f2e74d1 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi2.c @@ -870,15 +870,16 @@ dw_mipi_dsi2_bridge_mode_valid(struct drm_bridge *bridge, } static int dw_mipi_dsi2_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dw_mipi_dsi2 *dsi2 = bridge_to_dsi2(bridge); /* Set the encoder type as caller does not know it */ - bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI; + encoder->encoder_type = DRM_MODE_ENCODER_DSI; /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi2->panel_bridge, bridge, + return drm_bridge_attach(encoder, dsi2->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/tc358762.c b/drivers/gpu/drm/bridge/tc358762.c index 49c76027f831..edf01476f2ef 100644 --- a/drivers/gpu/drm/bridge/tc358762.c +++ b/drivers/gpu/drm/bridge/tc358762.c @@ -202,11 +202,12 @@ static void tc358762_enable(struct drm_bridge *bridge, } static int tc358762_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc358762 *ctx = bridge_to_tc358762(bridge); - return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, + return drm_bridge_attach(encoder, ctx->panel_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c index 3d3d135b4348..3f76c890fad9 100644 --- a/drivers/gpu/drm/bridge/tc358764.c +++ b/drivers/gpu/drm/bridge/tc358764.c @@ -295,11 +295,12 @@ static void tc358764_pre_enable(struct drm_bridge *bridge) } static int tc358764_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc358764 *ctx = bridge_to_tc358764(bridge); - return drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, ctx->next_bridge, bridge, flags); } static const struct drm_bridge_funcs tc358764_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 39e2d3a7a27d..7e5449fb86a3 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1795,6 +1795,7 @@ static const struct drm_connector_funcs tc_connector_funcs = { }; static int tc_dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); @@ -1807,6 +1808,7 @@ static int tc_dpi_bridge_attach(struct drm_bridge *bridge, } static int tc_edp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index ec79b0dd0e2c..6db18d1e8824 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -554,6 +554,7 @@ static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = { }; static int tc358768_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); @@ -563,7 +564,7 @@ static int tc358768_bridge_attach(struct drm_bridge *bridge, return -ENOTSUPP; } - return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge, + return drm_bridge_attach(encoder, priv->output.bridge, bridge, flags); } diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index c89757bec4e6..13cd48e77d2d 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -589,12 +589,13 @@ static int tc358775_parse_dt(struct device_node *np, struct tc_data *tc) } static int tc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tc_data *tc = bridge_to_tc(bridge); /* Attach the panel-bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, tc->panel_bridge, + return drm_bridge_attach(encoder, tc->panel_bridge, &tc->bridge, flags); } diff --git a/drivers/gpu/drm/bridge/tda998x_drv.c b/drivers/gpu/drm/bridge/tda998x_drv.c index ebc758c72891..9c5bb2a16769 100644 --- a/drivers/gpu/drm/bridge/tda998x_drv.c +++ b/drivers/gpu/drm/bridge/tda998x_drv.c @@ -1365,6 +1365,7 @@ static int tda998x_connector_init(struct tda998x_priv *priv, /* DRM bridge functions */ static int tda998x_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge); diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index bba10cf9b4f9..e2fc78adebcf 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -43,11 +43,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge) } static int thc63_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct thc63_dev *thc63 = to_thc63(bridge); - return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags); + return drm_bridge_attach(encoder, thc63->next, bridge, flags); } static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge, diff --git a/drivers/gpu/drm/bridge/ti-dlpc3433.c b/drivers/gpu/drm/bridge/ti-dlpc3433.c index 85f2a0e74a1c..47638d1c96ec 100644 --- a/drivers/gpu/drm/bridge/ti-dlpc3433.c +++ b/drivers/gpu/drm/bridge/ti-dlpc3433.c @@ -242,12 +242,12 @@ static void dlpc_mode_set(struct drm_bridge *bridge, drm_mode_copy(&dlpc->mode, adjusted_mode); } -static int dlpc_attach(struct drm_bridge *bridge, +static int dlpc_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dlpc *dlpc = bridge_to_dlpc(bridge); - return drm_bridge_attach(bridge->encoder, dlpc->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, dlpc->next_bridge, bridge, flags); } static const struct drm_bridge_funcs dlpc_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 95563aa1b450..7122a3ebd883 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -290,11 +290,12 @@ static struct sn65dsi83 *bridge_to_sn65dsi83(struct drm_bridge *bridge) } static int sn65dsi83_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge); - return drm_bridge_attach(bridge->encoder, ctx->panel_bridge, + return drm_bridge_attach(encoder, ctx->panel_bridge, &ctx->bridge, flags); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 01d456b955ab..190929a41abd 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -732,6 +732,7 @@ static int ti_sn_attach_host(struct auxiliary_device *adev, struct ti_sn65dsi86 } static int ti_sn_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); @@ -748,7 +749,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge, * Attach the next bridge. * We never want the next bridge to *also* create a connector. */ - ret = drm_bridge_attach(bridge->encoder, pdata->next_bridge, + ret = drm_bridge_attach(encoder, pdata->next_bridge, &pdata->bridge, flags | DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) goto err_initted_aux; diff --git a/drivers/gpu/drm/bridge/ti-tdp158.c b/drivers/gpu/drm/bridge/ti-tdp158.c index 22316382451f..cca75443f012 100644 --- a/drivers/gpu/drm/bridge/ti-tdp158.c +++ b/drivers/gpu/drm/bridge/ti-tdp158.c @@ -45,11 +45,13 @@ static void tdp158_disable(struct drm_bridge *bridge, regulator_disable(tdp158->vcc); } -static int tdp158_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +static int tdp158_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { struct tdp158 *tdp158 = bridge->driver_private; - return drm_bridge_attach(bridge->encoder, tdp158->next, bridge, flags); + return drm_bridge_attach(encoder, tdp158->next, bridge, flags); } static const struct drm_bridge_funcs tdp158_bridge_funcs = { diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 79ab5da827e1..e15d232ddbac 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -120,12 +120,13 @@ static void tfp410_hpd_callback(void *arg, enum drm_connector_status status) } static int tfp410_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tfp410 *dvi = drm_bridge_to_tfp410(bridge); int ret; - ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge, + ret = drm_bridge_attach(encoder, dvi->next_bridge, bridge, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret < 0) return ret; @@ -159,7 +160,7 @@ static int tfp410_attach(struct drm_bridge *bridge, drm_display_info_set_bus_formats(&dvi->connector.display_info, &dvi->bus_format, 1); - drm_connector_attach_encoder(&dvi->connector, bridge->encoder); + drm_connector_attach_encoder(&dvi->connector, encoder); return 0; } diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index 47b74cb25b14..1c289051a598 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -38,6 +38,7 @@ static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge) } static int tpd12s015_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); @@ -46,7 +47,7 @@ static int tpd12s015_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge, + ret = drm_bridge_attach(encoder, tpd->next_bridge, bridge, flags); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index ea9525ec16b5..5bdce9db4475 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -328,7 +328,7 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, list_add(&bridge->chain_node, &encoder->bridge_chain); if (bridge->funcs->attach) { - ret = bridge->funcs->attach(bridge, flags); + ret = bridge->funcs->attach(bridge, encoder, flags); if (ret < 0) goto err_reset_bridge; } diff --git a/drivers/gpu/drm/imx/ipuv3/parallel-display.c b/drivers/gpu/drm/imx/ipuv3/parallel-display.c index 9e66eb77b1eb..6d8325c76697 100644 --- a/drivers/gpu/drm/imx/ipuv3/parallel-display.c +++ b/drivers/gpu/drm/imx/ipuv3/parallel-display.c @@ -162,11 +162,12 @@ static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge, } static int imx_pd_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge); - return drm_bridge_attach(bridge->encoder, imxpd->next_bridge, bridge, flags); + return drm_bridge_attach(encoder, imxpd->next_bridge, bridge, flags); } static const struct drm_bridge_funcs imx_pd_bridge_funcs = { diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 20b93fff0239..f851e9ffdb28 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -791,11 +791,12 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, } static int ingenic_drm_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { - struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(bridge->encoder); + struct ingenic_drm_bridge *ib = to_ingenic_drm_bridge(encoder); - return drm_bridge_attach(bridge->encoder, ib->next_bridge, + return drm_bridge_attach(encoder, ib->next_bridge, &ib->bridge, flags); } diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index 395449a72f0a..b302d8ec3ad0 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -1048,6 +1048,7 @@ void mcde_dsi_disable(struct drm_bridge *bridge) } static int mcde_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mcde_dsi *d = bridge_to_mcde_dsi(bridge); @@ -1059,7 +1060,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge, } /* Attach the DSI bridge to the output (panel etc) bridge */ - return drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags); + return drm_bridge_attach(encoder, d->bridge_out, bridge, flags); } static const struct drm_bridge_funcs mcde_dsi_bridge_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index 3d4648d2e15f..4523cc0a2db8 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2287,6 +2287,7 @@ static void mtk_dp_poweroff(struct mtk_dp *mtk_dp) } static int mtk_dp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); @@ -2310,7 +2311,7 @@ static int mtk_dp_bridge_attach(struct drm_bridge *bridge, goto err_aux_register; if (mtk_dp->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, mtk_dp->next_bridge, + ret = drm_bridge_attach(encoder, mtk_dp->next_bridge, &mtk_dp->bridge, flags); if (ret) { drm_warn(mtk_dp->drm_dev, diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 1864eb02dbf5..6b96ed4fc861 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -701,6 +701,7 @@ static int mtk_dpi_bridge_atomic_check(struct drm_bridge *bridge, } static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_dpi *dpi = bridge_to_dpi(bridge); @@ -719,7 +720,7 @@ static int mtk_dpi_bridge_attach(struct drm_bridge *bridge, "Failed to get bridge\n"); } - return drm_bridge_attach(bridge->encoder, dpi->next_bridge, + return drm_bridge_attach(encoder, dpi->next_bridge, &dpi->bridge, flags); } diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 0683c2b3ca5b..cd2fbd8487c5 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -807,12 +807,13 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi) } static int mtk_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_dsi *dsi = bridge_to_dsi(bridge); /* Attach the panel or bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi->next_bridge, + return drm_bridge_attach(encoder, dsi->next_bridge, &dsi->bridge, flags); } diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index d4ab098e1174..e753b8e2d91b 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1278,6 +1278,7 @@ static const struct drm_edid *mtk_hdmi_bridge_edid_read(struct drm_bridge *bridg } static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); @@ -1290,7 +1291,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge, } if (hdmi->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge, + ret = drm_bridge_attach(encoder, hdmi->next_bridge, bridge, flags); if (ret) return ret; diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c index e79f7c3ce32e..c9678dc68fa1 100644 --- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c +++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c @@ -83,12 +83,13 @@ meson_cvbs_get_mode(const struct drm_display_mode *req_mode) } static int meson_encoder_cvbs_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct meson_encoder_cvbs *meson_encoder_cvbs = bridge_to_meson_encoder_cvbs(bridge); - return drm_bridge_attach(bridge->encoder, meson_encoder_cvbs->next_bridge, + return drm_bridge_attach(encoder, meson_encoder_cvbs->next_bridge, &meson_encoder_cvbs->bridge, flags); } diff --git a/drivers/gpu/drm/meson/meson_encoder_dsi.c b/drivers/gpu/drm/meson/meson_encoder_dsi.c index fe204437bd65..3db518e5f95d 100644 --- a/drivers/gpu/drm/meson/meson_encoder_dsi.c +++ b/drivers/gpu/drm/meson/meson_encoder_dsi.c @@ -33,11 +33,12 @@ struct meson_encoder_dsi { container_of(x, struct meson_encoder_dsi, bridge) static int meson_encoder_dsi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct meson_encoder_dsi *encoder_dsi = bridge_to_meson_encoder_dsi(bridge); - return drm_bridge_attach(bridge->encoder, encoder_dsi->next_bridge, + return drm_bridge_attach(encoder, encoder_dsi->next_bridge, &encoder_dsi->bridge, flags); } diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c index 6d1c9262a2cf..5f02695aafd1 100644 --- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c +++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c @@ -49,11 +49,12 @@ struct meson_encoder_hdmi { container_of(x, struct meson_encoder_hdmi, bridge) static int meson_encoder_hdmi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct meson_encoder_hdmi *encoder_hdmi = bridge_to_meson_encoder_hdmi(bridge); - return drm_bridge_attach(bridge->encoder, encoder_hdmi->next_bridge, + return drm_bridge_attach(encoder, encoder_hdmi->next_bridge, &encoder_hdmi->bridge, flags); } diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c index a210b7c9e5ca..895ba9815a65 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_manager.c +++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c @@ -424,12 +424,13 @@ static enum drm_mode_status dsi_mgr_bridge_mode_valid(struct drm_bridge *bridge, } static int dsi_mgr_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { int id = dsi_mgr_bridge_get_id(bridge); struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id); - return drm_bridge_attach(bridge->encoder, msm_dsi->next_bridge, + return drm_bridge_attach(encoder, msm_dsi->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index b17e77f700dd..6eff97a09160 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -420,6 +420,7 @@ static void dpi_init_pll(struct dpi_data *dpi) */ static int dpi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dpi_data *dpi = drm_bridge_to_dpi(bridge); @@ -429,7 +430,7 @@ static int dpi_bridge_attach(struct drm_bridge *bridge, dpi_init_pll(dpi); - return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge, + return drm_bridge_attach(encoder, dpi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 59d20eb8a7e0..35e3e332bdcf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -4617,6 +4617,7 @@ static const struct component_ops dsi_component_ops = { */ static int dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct dsi_data *dsi = drm_bridge_to_dsi(bridge); @@ -4624,7 +4625,7 @@ static int dsi_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge, + return drm_bridge_attach(encoder, dsi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index e1ac447221ee..a3b22952fdc3 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -314,6 +314,7 @@ void hdmi4_core_disable(struct hdmi_core_data *core) */ static int hdmi4_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge); @@ -321,7 +322,7 @@ static int hdmi4_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge, + return drm_bridge_attach(encoder, hdmi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index fa9904e4c218..0c98444d39a9 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -312,6 +312,7 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi) */ static int hdmi5_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge); @@ -319,7 +320,7 @@ static int hdmi5_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge, + return drm_bridge_attach(encoder, hdmi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index f9ae358e8e52..e78826e4b560 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -128,6 +128,7 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi) */ static int sdi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); @@ -135,7 +136,7 @@ static int sdi_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge, + return drm_bridge_attach(encoder, sdi->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index aaeef603682c..50349518eda1 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -538,6 +538,7 @@ static int venc_get_clocks(struct venc_device *venc) */ static int venc_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct venc_device *venc = drm_bridge_to_venc(bridge); @@ -545,7 +546,7 @@ static int venc_bridge_attach(struct drm_bridge *bridge, if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) return -EINVAL; - return drm_bridge_attach(bridge->encoder, venc->output.next_bridge, + return drm_bridge_attach(encoder, venc->output.next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c index 380a855b832a..a9145253294f 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c @@ -634,6 +634,7 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge, } static int rcar_lvds_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge); @@ -641,7 +642,7 @@ static int rcar_lvds_attach(struct drm_bridge *bridge, if (!lvds->next_bridge) return 0; - return drm_bridge_attach(bridge->encoder, lvds->next_bridge, bridge, + return drm_bridge_attach(encoder, lvds->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index d1e626068065..7ab8be46c7f6 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -799,11 +799,12 @@ static void rcar_mipi_dsi_stop_video(struct rcar_mipi_dsi *dsi) */ static int rcar_mipi_dsi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct rcar_mipi_dsi *dsi = bridge_to_rcar_mipi_dsi(bridge); - return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + return drm_bridge_attach(encoder, dsi->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c index 4550c6d84796..96c014449547 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_mipi_dsi.c @@ -523,11 +523,12 @@ err: */ static int rzg2l_mipi_dsi_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct rzg2l_mipi_dsi *dsi = bridge_to_rzg2l_mipi_dsi(bridge); - return drm_bridge_attach(bridge->encoder, dsi->next_bridge, bridge, + return drm_bridge_attach(encoder, dsi->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/stm/lvds.c b/drivers/gpu/drm/stm/lvds.c index 4613e8e3b8fd..a3ae9a93ce66 100644 --- a/drivers/gpu/drm/stm/lvds.c +++ b/drivers/gpu/drm/stm/lvds.c @@ -934,28 +934,27 @@ static const struct drm_connector_funcs lvds_conn_funcs = { .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; -static int lvds_attach(struct drm_bridge *bridge, +static int lvds_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct stm_lvds *lvds = bridge_to_stm_lvds(bridge); struct drm_connector *connector = &lvds->connector; - struct drm_encoder *encoder = bridge->encoder; int ret; - if (!bridge->encoder) { + if (!encoder) { drm_err(bridge->dev, "Parent encoder object not found\n"); return -ENODEV; } /* Set the encoder type as caller does not know it */ - bridge->encoder->encoder_type = DRM_MODE_ENCODER_LVDS; + encoder->encoder_type = DRM_MODE_ENCODER_LVDS; /* No cloning support */ - bridge->encoder->possible_clones = 0; + encoder->possible_clones = 0; /* If we have a next bridge just attach it. */ if (lvds->next_bridge) - return drm_bridge_attach(bridge->encoder, lvds->next_bridge, + return drm_bridge_attach(encoder, lvds->next_bridge, bridge, flags); if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) { diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c index 17a86bed8054..95b4aeff2775 100644 --- a/drivers/gpu/drm/tidss/tidss_encoder.c +++ b/drivers/gpu/drm/tidss/tidss_encoder.c @@ -34,11 +34,12 @@ static inline struct tidss_encoder } static int tidss_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct tidss_encoder *t_enc = bridge_to_tidss_encoder(bridge); - return drm_bridge_attach(bridge->encoder, t_enc->next_bridge, + return drm_bridge_attach(encoder, t_enc->next_bridge, bridge, flags); } diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 779b22efe27b..efc6f6078b02 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -1160,12 +1160,13 @@ static void vc4_dsi_bridge_enable(struct drm_bridge *bridge, } static int vc4_dsi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct vc4_dsi *dsi = bridge_to_vc4_dsi(bridge); /* Attach the panel or bridge to the dsi bridge */ - return drm_bridge_attach(bridge->encoder, dsi->out_bridge, + return drm_bridge_attach(encoder, dsi->out_bridge, &dsi->bridge, flags); } diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index a6a4a871f197..11d2415fb5a1 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -1481,6 +1481,7 @@ static void zynqmp_dp_disp_disable(struct zynqmp_dp *dp, */ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { struct zynqmp_dp *dp = bridge_to_dp(bridge); @@ -1494,7 +1495,7 @@ static int zynqmp_dp_bridge_attach(struct drm_bridge *bridge, } if (dp->next_bridge) { - ret = drm_bridge_attach(bridge->encoder, dp->next_bridge, + ret = drm_bridge_attach(encoder, dp->next_bridge, bridge, flags); if (ret < 0) goto error; diff --git a/drivers/platform/arm64/acer-aspire1-ec.c b/drivers/platform/arm64/acer-aspire1-ec.c index 2df42406430d..958fe1bf5f85 100644 --- a/drivers/platform/arm64/acer-aspire1-ec.c +++ b/drivers/platform/arm64/acer-aspire1-ec.c @@ -366,7 +366,8 @@ static const struct power_supply_desc aspire_ec_adp_psy_desc = { * USB-C DP Alt mode HPD. */ -static int aspire_ec_bridge_attach(struct drm_bridge *bridge, enum drm_bridge_attach_flags flags) +static int aspire_ec_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) { return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; } diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index b0d86a685a41..884ff1faa4c8 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -73,7 +73,7 @@ struct drm_bridge_funcs { * * Zero on success, error code on failure. */ - int (*attach)(struct drm_bridge *bridge, + int (*attach)(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags); /** From 93b244866cf641f83130bb08946f84a6fdeeca4c Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 12:59:56 +0100 Subject: [PATCH 0144/1627] drm/bridge: Provide a helper to retrieve current bridge state The current bridge state is accessible from the drm_bridge structure, but since it's fairly indirect it's not easy to figure out. Provide a helper to retrieve it. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-2-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- include/drm/drm_bridge.h | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 884ff1faa4c8..cdad3b78a195 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -957,6 +957,38 @@ static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np) } #endif +/** + * drm_bridge_get_current_state() - Get the current bridge state + * @bridge: bridge object + * + * This function must be called with the modeset lock held. + * + * RETURNS: + * + * The current bridge state, or NULL if there is none. + */ +static inline struct drm_bridge_state * +drm_bridge_get_current_state(struct drm_bridge *bridge) +{ + if (!bridge) + return NULL; + + /* + * Only atomic bridges will have bridge->base initialized by + * drm_atomic_private_obj_init(), so we need to make sure we're + * working with one before we try to use the lock. + */ + if (!bridge->funcs || !bridge->funcs->atomic_reset) + return NULL; + + drm_modeset_lock_assert_held(&bridge->base.lock); + + if (!bridge->base.state) + return NULL; + + return drm_priv_to_bridge_state(bridge->base.state); +} + /** * drm_bridge_get_next_bridge() - Get the next bridge in the chain * @bridge: bridge object From 6b4dc0803a362f501047e08caddfeb92a580130d Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 12:59:57 +0100 Subject: [PATCH 0145/1627] drm/tests: Add kunit tests for bridges None of the drm_bridge function have kunit tests so far. Let's change that, starting with drm_bridge_get_current_state(). Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-3-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/tests/Makefile | 1 + drivers/gpu/drm/tests/drm_bridge_test.c | 210 ++++++++++++++++++++++++ 3 files changed, 212 insertions(+) create mode 100644 drivers/gpu/drm/tests/drm_bridge_test.c diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e5b59de28216..9b4061231329 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -74,6 +74,7 @@ config DRM_KUNIT_TEST_HELPERS config DRM_KUNIT_TEST tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS depends on DRM && KUNIT && MMU + select DRM_BRIDGE_CONNECTOR select DRM_BUDDY select DRM_DISPLAY_DP_HELPER select DRM_DISPLAY_HDMI_STATE_HELPER diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 0109bcf7faa5..6691c577d2d4 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -5,6 +5,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \ obj-$(CONFIG_DRM_KUNIT_TEST) += \ drm_atomic_state_test.o \ + drm_bridge_test.o \ drm_buddy_test.o \ drm_cmdline_parser_test.o \ drm_connector_test.o \ diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c new file mode 100644 index 000000000000..c0a05c459d95 --- /dev/null +++ b/drivers/gpu/drm/tests/drm_bridge_test.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kunit test for drm_bridge functions + */ +#include +#include +#include +#include + +#include + +struct drm_bridge_init_priv { + struct drm_device drm; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_encoder encoder; + struct drm_bridge bridge; + struct drm_connector *connector; +}; + +static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = { +}; + +static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = { + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +KUNIT_DEFINE_ACTION_WRAPPER(drm_bridge_remove_wrapper, + drm_bridge_remove, + struct drm_bridge *); + +static int drm_kunit_bridge_add(struct kunit *test, + struct drm_bridge *bridge) +{ + drm_bridge_add(bridge); + + return kunit_add_action_or_reset(test, + drm_bridge_remove_wrapper, + bridge); +} + +static struct drm_bridge_init_priv * +drm_test_bridge_init(struct kunit *test, const struct drm_bridge_funcs *funcs) +{ + struct drm_bridge_init_priv *priv; + struct drm_encoder *enc; + struct drm_bridge *bridge; + struct drm_device *drm; + struct device *dev; + int ret; + + dev = drm_kunit_helper_alloc_device(test); + if (IS_ERR(dev)) + return ERR_CAST(dev); + + priv = drm_kunit_helper_alloc_drm_device(test, dev, + struct drm_bridge_init_priv, drm, + DRIVER_MODESET | DRIVER_ATOMIC); + if (IS_ERR(priv)) + return ERR_CAST(priv); + + drm = &priv->drm; + priv->plane = drm_kunit_helper_create_primary_plane(test, drm, + NULL, + NULL, + NULL, 0, + NULL); + if (IS_ERR(priv->plane)) + return ERR_CAST(priv->plane); + + priv->crtc = drm_kunit_helper_create_crtc(test, drm, + priv->plane, NULL, + NULL, + NULL); + if (IS_ERR(priv->crtc)) + return ERR_CAST(priv->crtc); + + enc = &priv->encoder; + ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ERR_PTR(ret); + + enc->possible_crtcs = drm_crtc_mask(priv->crtc); + + bridge = &priv->bridge; + bridge->type = DRM_MODE_CONNECTOR_VIRTUAL; + bridge->funcs = funcs; + + ret = drm_kunit_bridge_add(test, bridge); + if (ret) + return ERR_PTR(ret); + + ret = drm_bridge_attach(enc, bridge, NULL, 0); + if (ret) + return ERR_PTR(ret); + + priv->connector = drm_bridge_connector_init(drm, enc); + if (IS_ERR(priv->connector)) + return ERR_CAST(priv->connector); + + drm_connector_attach_encoder(priv->connector, enc); + + drm_mode_config_reset(drm); + + return priv; +} + +/* + * Test that drm_bridge_get_current_state() returns the last committed + * state for an atomic bridge. + */ +static void drm_test_drm_bridge_get_current_state_atomic(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_bridge_state *curr_bridge_state; + struct drm_bridge_state *bridge_state; + struct drm_atomic_state *state; + struct drm_bridge *bridge; + struct drm_device *drm; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + drm_modeset_acquire_init(&ctx, 0); + + drm = &priv->drm; + state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); + +retry_commit: + bridge = &priv->bridge; + bridge_state = drm_atomic_get_bridge_state(state, bridge); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bridge_state); + + ret = drm_atomic_commit(state); + if (ret == -EDEADLK) { + drm_atomic_state_clear(state); + drm_modeset_backoff(&ctx); + goto retry_commit; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + drm_modeset_acquire_init(&ctx, 0); + +retry_state: + ret = drm_modeset_lock(&bridge->base.lock, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_state; + } + + curr_bridge_state = drm_bridge_get_current_state(bridge); + KUNIT_EXPECT_PTR_EQ(test, curr_bridge_state, bridge_state); + + drm_modeset_unlock(&bridge->base.lock); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +/* + * Test that drm_bridge_get_current_state() returns NULL for a + * non-atomic bridge. + */ +static void drm_test_drm_bridge_get_current_state_legacy(struct kunit *test) +{ + struct drm_bridge_init_priv *priv; + struct drm_bridge *bridge; + + priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + /* + * NOTE: Strictly speaking, we should take the bridge->base.lock + * before calling that function. However, bridge->base is only + * initialized if the bridge is atomic, while we explicitly + * initialize one that isn't there. + * + * In order to avoid unnecessary warnings, let's skip the + * locking. The function would return NULL in all cases anyway, + * so we don't really have any concurrency to worry about. + */ + bridge = &priv->bridge; + KUNIT_EXPECT_NULL(test, drm_bridge_get_current_state(bridge)); +} + +static struct kunit_case drm_bridge_get_current_state_tests[] = { + KUNIT_CASE(drm_test_drm_bridge_get_current_state_atomic), + KUNIT_CASE(drm_test_drm_bridge_get_current_state_legacy), + { } +}; + + +static struct kunit_suite drm_bridge_get_current_state_test_suite = { + .name = "drm_test_bridge_get_current_state", + .test_cases = drm_bridge_get_current_state_tests, +}; + +kunit_test_suite(drm_bridge_get_current_state_test_suite); + +MODULE_AUTHOR("Maxime Ripard "); +MODULE_DESCRIPTION("Kunit test for drm_bridge functions"); +MODULE_LICENSE("GPL"); From a7e4886e06f723045ce52de69491196a08cf14e9 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 12:59:58 +0100 Subject: [PATCH 0146/1627] drm/atomic: Introduce helper to lookup connector by encoder With the bridges switching over to drm_bridge_connector, the direct association between a bridge driver and its connector was lost. This is mitigated for atomic bridge drivers by the fact you can access the encoder, and then call drm_atomic_get_old_connector_for_encoder() or drm_atomic_get_new_connector_for_encoder() with drm_atomic_state. This was also made easier by providing drm_atomic_state directly to all atomic hooks bridges can implement. However, bridge drivers don't have a way to access drm_atomic_state outside of the modeset path, like from the hotplug interrupt path or any interrupt handler. Let's introduce a function to retrieve the connector currently assigned to an encoder, without using drm_atomic_state, to make these drivers' life easier. Reviewed-by: Dmitry Baryshkov Reviewed-by: Simona Vetter Tested-by: Herve Codina Co-developed-by: Simona Vetter Signed-off-by: Simona Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-4-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/drm_atomic.c | 59 ++++++++++++++++++++++++++++++++++++ include/drm/drm_atomic.h | 3 ++ 2 files changed, 62 insertions(+) diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index 9ea2611770f4..0138cf0b8b63 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -933,6 +933,9 @@ EXPORT_SYMBOL(drm_atomic_get_new_private_obj_state); * state). This is especially true in enable hooks because the pipeline has * changed. * + * If you don't have access to the atomic state, see + * drm_atomic_get_connector_for_encoder(). + * * Returns: The old connector connected to @encoder, or NULL if the encoder is * not connected. */ @@ -967,6 +970,9 @@ EXPORT_SYMBOL(drm_atomic_get_old_connector_for_encoder); * attached to @encoder vs ones that do (and to inspect their state). This is * especially true in disable hooks because the pipeline will change. * + * If you don't have access to the atomic state, see + * drm_atomic_get_connector_for_encoder(). + * * Returns: The new connector connected to @encoder, or NULL if the encoder is * not connected. */ @@ -987,6 +993,59 @@ drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_atomic_get_new_connector_for_encoder); +/** + * drm_atomic_get_connector_for_encoder - Get connector currently assigned to an encoder + * @encoder: The encoder to find the connector of + * @ctx: Modeset locking context + * + * This function finds and returns the connector currently assigned to + * an @encoder. + * + * It is similar to the drm_atomic_get_old_connector_for_encoder() and + * drm_atomic_get_new_connector_for_encoder() helpers, but doesn't + * require access to the atomic state. If you have access to it, prefer + * using these. This helper is typically useful in situations where you + * don't have access to the atomic state, like detect, link repair, + * threaded interrupt handlers, or hooks from other frameworks (ALSA, + * CEC, etc.). + * + * Returns: + * The connector connected to @encoder, or an error pointer otherwise. + * When the error is EDEADLK, a deadlock has been detected and the + * sequence must be restarted. + */ +struct drm_connector * +drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_connector_list_iter conn_iter; + struct drm_connector *out_connector = ERR_PTR(-EINVAL); + struct drm_connector *connector; + struct drm_device *dev = encoder->dev; + int ret; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ERR_PTR(ret); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (!connector->state) + continue; + + if (encoder == connector->state->best_encoder) { + out_connector = connector; + break; + } + } + drm_connector_list_iter_end(&conn_iter); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return out_connector; +} +EXPORT_SYMBOL(drm_atomic_get_connector_for_encoder); + + /** * drm_atomic_get_old_crtc_for_encoder - Get old crtc for an encoder * @state: Atomic state diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h index 4c673f0698fe..38636a593c9d 100644 --- a/include/drm/drm_atomic.h +++ b/include/drm/drm_atomic.h @@ -625,6 +625,9 @@ drm_atomic_get_old_connector_for_encoder(const struct drm_atomic_state *state, struct drm_connector * drm_atomic_get_new_connector_for_encoder(const struct drm_atomic_state *state, struct drm_encoder *encoder); +struct drm_connector * +drm_atomic_get_connector_for_encoder(const struct drm_encoder *encoder, + struct drm_modeset_acquire_ctx *ctx); struct drm_crtc * drm_atomic_get_old_crtc_for_encoder(struct drm_atomic_state *state, From e4e3de631d148d075cb0b3dc3c4c62f1e70fc46c Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 12:59:59 +0100 Subject: [PATCH 0147/1627] drm/tests: helpers: Create new helper to enable output We'll need the HDMI state tests light_up_connector() function in more tests, so let's promote it to a helper. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-5-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/tests/drm_kunit_helpers.c | 61 +++++++++++++++++++++++ include/drm/drm_kunit_helpers.h | 8 +++ 2 files changed, 69 insertions(+) diff --git a/drivers/gpu/drm/tests/drm_kunit_helpers.c b/drivers/gpu/drm/tests/drm_kunit_helpers.c index a4eb68f0decc..14ad8f0a0af1 100644 --- a/drivers/gpu/drm/tests/drm_kunit_helpers.c +++ b/drivers/gpu/drm/tests/drm_kunit_helpers.c @@ -2,6 +2,7 @@ #include #include +#include #include #include #include @@ -271,6 +272,66 @@ drm_kunit_helper_create_crtc(struct kunit *test, } EXPORT_SYMBOL_GPL(drm_kunit_helper_create_crtc); +/** + * drm_kunit_helper_enable_crtc_connector - Enables a CRTC -> Connector output + * @test: The test context object + * @drm: The device to alloc the plane for + * @crtc: The CRTC to enable + * @connector: The Connector to enable + * @mode: The display mode to configure the CRTC with + * @ctx: Locking context + * + * This function creates an atomic update to enable the route from @crtc + * to @connector, with the given @mode. + * + * Returns: + * + * A pointer to the new CRTC, or an ERR_PTR() otherwise. If the error + * returned is EDEADLK, the entire atomic sequence must be restarted. + */ +int drm_kunit_helper_enable_crtc_connector(struct kunit *test, + struct drm_device *drm, + struct drm_crtc *crtc, + struct drm_connector *connector, + const struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_atomic_state *state; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + int ret; + + state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); + if (IS_ERR(state)) + return PTR_ERR(state); + + conn_state = drm_atomic_get_connector_state(state, connector); + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + + ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); + if (ret) + return ret; + + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_set_mode_for_crtc(crtc_state, mode); + if (ret) + return ret; + + crtc_state->enable = true; + crtc_state->active = true; + + ret = drm_atomic_commit(state); + if (ret) + return ret; + + return 0; +} +EXPORT_SYMBOL_GPL(drm_kunit_helper_enable_crtc_connector); + static void kunit_action_drm_mode_destroy(void *ptr) { struct drm_display_mode *mode = ptr; diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h index 11d59ce0bac0..1cda7281f300 100644 --- a/include/drm/drm_kunit_helpers.h +++ b/include/drm/drm_kunit_helpers.h @@ -9,6 +9,7 @@ #include +struct drm_connector; struct drm_crtc_funcs; struct drm_crtc_helper_funcs; struct drm_device; @@ -118,6 +119,13 @@ drm_kunit_helper_create_crtc(struct kunit *test, const struct drm_crtc_funcs *funcs, const struct drm_crtc_helper_funcs *helper_funcs); +int drm_kunit_helper_enable_crtc_connector(struct kunit *test, + struct drm_device *drm, + struct drm_crtc *crtc, + struct drm_connector *connector, + const struct drm_display_mode *mode, + struct drm_modeset_acquire_ctx *ctx); + struct drm_display_mode * drm_kunit_display_mode_from_cea_vic(struct kunit *test, struct drm_device *dev, u8 video_code); From 6a5c0ad7e08e0345653058355ac1cdd0a7786af3 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:00 +0100 Subject: [PATCH 0148/1627] drm/tests: hdmi_state_helpers: Switch to new helper We introduced a new helper that supersedes the light_up_connector() function in drm_hdmi_state_helper_test, so let's convert all our tests to it. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-6-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- .../drm/tests/drm_hdmi_state_helper_test.c | 158 ++++++++++-------- 1 file changed, 92 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c index e97efd3af9ed..7ffd666753b1 100644 --- a/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c +++ b/drivers/gpu/drm/tests/drm_hdmi_state_helper_test.c @@ -55,49 +55,6 @@ static struct drm_display_mode *find_preferred_mode(struct drm_connector *connec return preferred; } -static int light_up_connector(struct kunit *test, - struct drm_device *drm, - struct drm_crtc *crtc, - struct drm_connector *connector, - struct drm_display_mode *mode, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_atomic_state *state; - struct drm_connector_state *conn_state; - struct drm_crtc_state *crtc_state; - int ret; - - state = drm_kunit_helper_atomic_state_alloc(test, drm, ctx); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, state); - -retry: - conn_state = drm_atomic_get_connector_state(state, connector); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, conn_state); - - ret = drm_atomic_set_crtc_for_connector(conn_state, crtc); - if (ret == -EDEADLK) { - drm_atomic_state_clear(state); - ret = drm_modeset_backoff(ctx); - if (!ret) - goto retry; - } - KUNIT_EXPECT_EQ(test, ret, 0); - - crtc_state = drm_atomic_get_crtc_state(state, crtc); - KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_state); - - ret = drm_atomic_set_mode_for_crtc(crtc_state, mode); - KUNIT_EXPECT_EQ(test, ret, 0); - - crtc_state->enable = true; - crtc_state->active = true; - - ret = drm_atomic_commit(state); - KUNIT_ASSERT_EQ(test, ret, 0); - - return 0; -} - static int set_connector_edid(struct kunit *test, struct drm_connector *connector, const char *edid, size_t edid_len) { @@ -298,7 +255,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_changed(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -364,7 +324,10 @@ static void drm_test_check_broadcast_rgb_crtc_mode_not_changed(struct kunit *tes drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -432,7 +395,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -489,7 +455,10 @@ static void drm_test_check_broadcast_rgb_auto_cea_mode_vic_1(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -547,7 +516,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -606,7 +578,10 @@ static void drm_test_check_broadcast_rgb_full_cea_mode_vic_1(struct kunit *test) KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -666,7 +641,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -725,7 +703,10 @@ static void drm_test_check_broadcast_rgb_limited_cea_mode_vic_1(struct kunit *te KUNIT_ASSERT_NOT_NULL(test, mode); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -789,7 +770,10 @@ static void drm_test_check_output_bpc_crtc_mode_changed(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -865,7 +849,10 @@ static void drm_test_check_output_bpc_crtc_mode_not_changed(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); @@ -941,7 +928,10 @@ static void drm_test_check_output_bpc_dvi(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -988,7 +978,10 @@ static void drm_test_check_tmds_char_rate_rgb_8bpc(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1037,7 +1030,10 @@ static void drm_test_check_tmds_char_rate_rgb_10bpc(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1086,7 +1082,10 @@ static void drm_test_check_tmds_char_rate_rgb_12bpc(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); conn_state = conn->state; @@ -1134,7 +1133,10 @@ static void drm_test_check_hdmi_funcs_reject_rate(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); /* You shouldn't be doing that at home. */ @@ -1208,7 +1210,10 @@ static void drm_test_check_max_tmds_rate_bpc_fallback(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1282,7 +1287,10 @@ static void drm_test_check_max_tmds_rate_format_fallback(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1347,7 +1355,10 @@ static void drm_test_check_output_bpc_format_vic_1(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, mode, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + mode, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1414,7 +1425,10 @@ static void drm_test_check_output_bpc_format_driver_rgb_only(struct kunit *test) drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1483,7 +1497,10 @@ static void drm_test_check_output_bpc_format_display_rgb_only(struct kunit *test drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1543,7 +1560,10 @@ static void drm_test_check_output_bpc_format_driver_8bpc_only(struct kunit *test drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1605,7 +1625,10 @@ static void drm_test_check_output_bpc_format_display_8bpc_only(struct kunit *tes drm_modeset_acquire_init(&ctx, 0); - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_EXPECT_EQ(test, ret, 0); conn_state = conn->state; @@ -1645,7 +1668,10 @@ static void drm_test_check_disable_connector(struct kunit *test) drm = &priv->drm; crtc = priv->crtc; - ret = light_up_connector(test, drm, crtc, conn, preferred, &ctx); + ret = drm_kunit_helper_enable_crtc_connector(test, drm, + crtc, conn, + preferred, + &ctx); KUNIT_ASSERT_EQ(test, ret, 0); state = drm_kunit_helper_atomic_state_alloc(test, drm, &ctx); From e0c358e2edf5906689ecb46053d755fcaf4339f0 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:01 +0100 Subject: [PATCH 0149/1627] drm/tests: Create tests for drm_atomic We don't have a set of kunit tests for the functions under drm_atomic.h. Let's use the introduction of drm_atomic_get_connector_for_encoder() to create some tests for it and thus create that set. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-7-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/tests/Makefile | 1 + drivers/gpu/drm/tests/drm_atomic_test.c | 153 ++++++++++++++++++++++++ 2 files changed, 154 insertions(+) create mode 100644 drivers/gpu/drm/tests/drm_atomic_test.c diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 6691c577d2d4..3afd6587df08 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_KUNIT_TEST_HELPERS) += \ drm_kunit_helpers.o obj-$(CONFIG_DRM_KUNIT_TEST) += \ + drm_atomic_test.o \ drm_atomic_state_test.o \ drm_bridge_test.o \ drm_buddy_test.o \ diff --git a/drivers/gpu/drm/tests/drm_atomic_test.c b/drivers/gpu/drm/tests/drm_atomic_test.c new file mode 100644 index 000000000000..ea91bec6569e --- /dev/null +++ b/drivers/gpu/drm/tests/drm_atomic_test.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kunit test for drm_atomic functions + */ +#include +#include +#include +#include +#include +#include + +#include + +struct drm_atomic_test_priv { + struct drm_device drm; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct drm_encoder encoder; + struct drm_connector connector; +}; + +static const struct drm_connector_helper_funcs drm_atomic_init_connector_helper_funcs = { +}; + +static const struct drm_connector_funcs drm_atomic_init_connector_funcs = { + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .reset = drm_atomic_helper_connector_reset, +}; + +static struct drm_atomic_test_priv *create_device(struct kunit *test) +{ + struct drm_atomic_test_priv *priv; + struct drm_connector *connector; + struct drm_encoder *enc; + struct drm_device *drm; + struct drm_plane *plane; + struct drm_crtc *crtc; + struct device *dev; + int ret; + + dev = drm_kunit_helper_alloc_device(test); + if (IS_ERR(dev)) + return ERR_CAST(dev); + + priv = drm_kunit_helper_alloc_drm_device(test, dev, + struct drm_atomic_test_priv, drm, + DRIVER_MODESET | DRIVER_ATOMIC); + if (IS_ERR(priv)) + return ERR_CAST(priv); + + drm = &priv->drm; + plane = drm_kunit_helper_create_primary_plane(test, drm, + NULL, + NULL, + NULL, 0, + NULL); + if (IS_ERR(plane)) + return ERR_CAST(plane); + priv->plane = plane; + + crtc = drm_kunit_helper_create_crtc(test, drm, + plane, NULL, + NULL, + NULL); + if (IS_ERR(crtc)) + return ERR_CAST(crtc); + priv->crtc = crtc; + + enc = &priv->encoder; + ret = drmm_encoder_init(drm, enc, NULL, DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ERR_PTR(ret); + + enc->possible_crtcs = drm_crtc_mask(crtc); + + connector = &priv->connector; + ret = drmm_connector_init(drm, connector, + &drm_atomic_init_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL, + NULL); + if (ret) + return ERR_PTR(ret); + + drm_connector_helper_add(connector, &drm_atomic_init_connector_helper_funcs); + + drm_connector_attach_encoder(connector, enc); + + drm_mode_config_reset(drm); + + return priv; +} + +static void drm_test_drm_atomic_get_connector_for_encoder(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_atomic_test_priv *priv; + struct drm_display_mode *mode; + struct drm_connector *curr_connector; + int ret; + + priv = create_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + drm_modeset_acquire_init(&ctx, 0); + +retry_enable: + ret = drm_kunit_helper_enable_crtc_connector(test, &priv->drm, + priv->crtc, &priv->connector, + mode, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_enable; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + drm_modeset_acquire_init(&ctx, 0); + +retry_conn: + curr_connector = drm_atomic_get_connector_for_encoder(&priv->encoder, + &ctx); + if (PTR_ERR(curr_connector) == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_conn; + } + KUNIT_EXPECT_PTR_EQ(test, curr_connector, &priv->connector); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); +} + +static struct kunit_case drm_atomic_get_connector_for_encoder_tests[] = { + KUNIT_CASE(drm_test_drm_atomic_get_connector_for_encoder), + { } +}; + + +static struct kunit_suite drm_atomic_get_connector_for_encoder_test_suite = { + .name = "drm_test_atomic_get_connector_for_encoder", + .test_cases = drm_atomic_get_connector_for_encoder_tests, +}; + +kunit_test_suite(drm_atomic_get_connector_for_encoder_test_suite); + +MODULE_AUTHOR("Maxime Ripard "); +MODULE_DESCRIPTION("Kunit test for drm_atomic functions"); +MODULE_LICENSE("GPL"); From 56ae6212417702f7e456007b2afa834810ed10a6 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:02 +0100 Subject: [PATCH 0150/1627] drm/bridge: Add helper to reset bridge pipeline Let's provide an helper to make it easier for bridge drivers to power-cycle their bridge. In order to avoid a circular dependency between that new helper and drm_atomic_helper_reset_crtc(), this new helper will be in a drm_bridge_helper.c file to follow the pattern we have for other objects. Reviewed-by: Herve Codina Reviewed-by: Simona Vetter Tested-by: Herve Codina Co-developed-by: Simona Vetter Signed-off-by: Simona Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-8-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/Makefile | 1 + drivers/gpu/drm/drm_atomic_helper.c | 3 ++ drivers/gpu/drm/drm_bridge_helper.c | 58 +++++++++++++++++++++++++++++ include/drm/drm_bridge_helper.h | 12 ++++++ 4 files changed, 74 insertions(+) create mode 100644 drivers/gpu/drm/drm_bridge_helper.c create mode 100644 include/drm/drm_bridge_helper.h diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 4cd054188faf..5a332f7d3ecc 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -133,6 +133,7 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o drm_kms_helper-y := \ drm_atomic_helper.o \ drm_atomic_state_helper.o \ + drm_bridge_helper.o \ drm_crtc_helper.o \ drm_damage_helper.o \ drm_flip_work.o \ diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 5302ab324898..ee64ca1b1bec 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3409,6 +3409,9 @@ EXPORT_SYMBOL(drm_atomic_helper_disable_all); * This implies a reset of all active components available between the CRTC and * connectors. * + * A variant of this function exists with + * drm_bridge_helper_reset_crtc(), dedicated to bridges. + * * NOTE: This relies on resetting &drm_crtc_state.connectors_changed. * For drivers which optimize out unnecessary modesets this will result in * a no-op commit, achieving nothing. diff --git a/drivers/gpu/drm/drm_bridge_helper.c b/drivers/gpu/drm/drm_bridge_helper.c new file mode 100644 index 000000000000..af80d2496194 --- /dev/null +++ b/drivers/gpu/drm/drm_bridge_helper.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include +#include +#include +#include +#include + +/** + * drm_bridge_helper_reset_crtc - Reset the pipeline feeding a bridge + * @bridge: DRM bridge to reset + * @ctx: lock acquisition context + * + * Reset a @bridge pipeline. It will power-cycle all active components + * between the CRTC and connector that bridge is connected to. + * + * As it relies on drm_atomic_helper_reset_crtc(), the same limitations + * apply. + * + * Returns: + * + * 0 on success or a negative error code on failure. If the error + * returned is EDEADLK, the whole atomic sequence must be restarted. + */ +int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_connector *connector; + struct drm_encoder *encoder = bridge->encoder; + struct drm_device *dev = encoder->dev; + struct drm_crtc *crtc; + int ret; + + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, ctx); + if (ret) + return ret; + + connector = drm_atomic_get_connector_for_encoder(encoder, ctx); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + goto out; + } + + if (!connector->state) { + ret = -EINVAL; + goto out; + } + + crtc = connector->state->crtc; + ret = drm_atomic_helper_reset_crtc(crtc, ctx); + if (ret) + goto out; + +out: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + return ret; +} +EXPORT_SYMBOL(drm_bridge_helper_reset_crtc); diff --git a/include/drm/drm_bridge_helper.h b/include/drm/drm_bridge_helper.h new file mode 100644 index 000000000000..6c35b479ec2a --- /dev/null +++ b/include/drm/drm_bridge_helper.h @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#ifndef __DRM_BRIDGE_HELPER_H_ +#define __DRM_BRIDGE_HELPER_H_ + +struct drm_bridge; +struct drm_modeset_acquire_ctx; + +int drm_bridge_helper_reset_crtc(struct drm_bridge *bridge, + struct drm_modeset_acquire_ctx *ctx); + +#endif // __DRM_BRIDGE_HELPER_H_ From d4dfff472e3936bd2ff3a61c3830237f8c442e41 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:03 +0100 Subject: [PATCH 0151/1627] drm/tests: bridge: Provide tests for drm_bridge_helper_reset_crtc Let's provide a bunch of kunit tests to make sure drm_bridge_helper_reset_crtc() works as expected. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-9-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/tests/drm_bridge_test.c | 209 +++++++++++++++++++++++- 1 file changed, 208 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tests/drm_bridge_test.c b/drivers/gpu/drm/tests/drm_bridge_test.c index c0a05c459d95..ff88ec2e911c 100644 --- a/drivers/gpu/drm/tests/drm_bridge_test.c +++ b/drivers/gpu/drm/tests/drm_bridge_test.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include @@ -16,12 +17,52 @@ struct drm_bridge_init_priv { struct drm_encoder encoder; struct drm_bridge bridge; struct drm_connector *connector; + unsigned int enable_count; + unsigned int disable_count; }; +static void drm_test_bridge_enable(struct drm_bridge *bridge) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->enable_count++; +} + +static void drm_test_bridge_disable(struct drm_bridge *bridge) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->disable_count++; +} + static const struct drm_bridge_funcs drm_test_bridge_legacy_funcs = { + .enable = drm_test_bridge_enable, + .disable = drm_test_bridge_disable, }; +static void drm_test_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->enable_count++; +} + +static void drm_test_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct drm_bridge_init_priv *priv = + container_of(bridge, struct drm_bridge_init_priv, bridge); + + priv->disable_count++; +} + static const struct drm_bridge_funcs drm_test_bridge_atomic_funcs = { + .atomic_enable = drm_test_bridge_atomic_enable, + .atomic_disable = drm_test_bridge_atomic_disable, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_reset = drm_atomic_helper_bridge_reset, @@ -203,7 +244,173 @@ static struct kunit_suite drm_bridge_get_current_state_test_suite = { .test_cases = drm_bridge_get_current_state_tests, }; -kunit_test_suite(drm_bridge_get_current_state_test_suite); +/* + * Test that an atomic bridge is properly power-cycled when calling + * drm_bridge_helper_reset_crtc(). + */ +static void drm_test_drm_bridge_helper_reset_crtc_atomic(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_display_mode *mode; + struct drm_bridge *bridge; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + drm_modeset_acquire_init(&ctx, 0); + +retry_commit: + ret = drm_kunit_helper_enable_crtc_connector(test, + &priv->drm, priv->crtc, + priv->connector, + mode, + &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_commit; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + bridge = &priv->bridge; + KUNIT_ASSERT_EQ(test, priv->enable_count, 1); + KUNIT_ASSERT_EQ(test, priv->disable_count, 0); + + drm_modeset_acquire_init(&ctx, 0); + +retry_reset: + ret = drm_bridge_helper_reset_crtc(bridge, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_reset; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + KUNIT_EXPECT_EQ(test, priv->enable_count, 2); + KUNIT_EXPECT_EQ(test, priv->disable_count, 1); +} + +/* + * Test that calling drm_bridge_helper_reset_crtc() on a disabled atomic + * bridge will fail and not call the enable / disable callbacks + */ +static void drm_test_drm_bridge_helper_reset_crtc_atomic_disabled(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_display_mode *mode; + struct drm_bridge *bridge; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_atomic_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + bridge = &priv->bridge; + KUNIT_ASSERT_EQ(test, priv->enable_count, 0); + KUNIT_ASSERT_EQ(test, priv->disable_count, 0); + + drm_modeset_acquire_init(&ctx, 0); + +retry_reset: + ret = drm_bridge_helper_reset_crtc(bridge, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_reset; + } + KUNIT_EXPECT_LT(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + KUNIT_EXPECT_EQ(test, priv->enable_count, 0); + KUNIT_EXPECT_EQ(test, priv->disable_count, 0); +} + +/* + * Test that a non-atomic bridge is properly power-cycled when calling + * drm_bridge_helper_reset_crtc(). + */ +static void drm_test_drm_bridge_helper_reset_crtc_legacy(struct kunit *test) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_bridge_init_priv *priv; + struct drm_display_mode *mode; + struct drm_bridge *bridge; + int ret; + + priv = drm_test_bridge_init(test, &drm_test_bridge_legacy_funcs); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, priv); + + mode = drm_kunit_display_mode_from_cea_vic(test, &priv->drm, 16); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mode); + + drm_modeset_acquire_init(&ctx, 0); + +retry_commit: + ret = drm_kunit_helper_enable_crtc_connector(test, + &priv->drm, priv->crtc, + priv->connector, + mode, + &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_commit; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + bridge = &priv->bridge; + KUNIT_ASSERT_EQ(test, priv->enable_count, 1); + KUNIT_ASSERT_EQ(test, priv->disable_count, 0); + + drm_modeset_acquire_init(&ctx, 0); + +retry_reset: + ret = drm_bridge_helper_reset_crtc(bridge, &ctx); + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry_reset; + } + KUNIT_ASSERT_EQ(test, ret, 0); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + KUNIT_EXPECT_EQ(test, priv->enable_count, 2); + KUNIT_EXPECT_EQ(test, priv->disable_count, 1); +} + +static struct kunit_case drm_bridge_helper_reset_crtc_tests[] = { + KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic), + KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_atomic_disabled), + KUNIT_CASE(drm_test_drm_bridge_helper_reset_crtc_legacy), + { } +}; + +static struct kunit_suite drm_bridge_helper_reset_crtc_test_suite = { + .name = "drm_test_bridge_helper_reset_crtc", + .test_cases = drm_bridge_helper_reset_crtc_tests, +}; + +kunit_test_suites( + &drm_bridge_get_current_state_test_suite, + &drm_bridge_helper_reset_crtc_test_suite, +); MODULE_AUTHOR("Maxime Ripard "); MODULE_DESCRIPTION("Kunit test for drm_bridge functions"); From e17fadff7ab9b1536af8120d161e6c0a450961ed Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:04 +0100 Subject: [PATCH 0152/1627] drm/bridge: ti-sn65dsi83: Switch to drm_bridge_helper_reset_crtc Now that we have a helper for bridge drivers to call to reset the output pipeline, let's use it. Reviewed-by: Dmitry Baryshkov Reviewed-by: Herve Codina Tested-by: Herve Codina Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-10-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/bridge/ti-sn65dsi83.c | 28 +++++++++++---------------- 1 file changed, 11 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 7122a3ebd883..53cc4cfb0c88 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -40,7 +40,7 @@ #include #include -#include /* DRM_MODESET_LOCK_ALL_BEGIN() needs drm_drv_uses_atomic_modeset() */ +#include #include #include #include @@ -371,7 +371,6 @@ static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx) static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83) { - struct drm_device *dev = sn65dsi83->bridge.dev; struct drm_modeset_acquire_ctx ctx; int err; @@ -386,26 +385,21 @@ static int sn65dsi83_reset_pipe(struct sn65dsi83 *sn65dsi83) * Keep the lock during the whole operation to be atomic. */ - DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, err); - - if (!sn65dsi83->bridge.encoder->crtc) { - /* - * No CRTC attached -> No CRTC active outputs to reset - * This can happen when the SN65DSI83 is reset. Simply do - * nothing without returning any errors. - */ - err = 0; - goto end; - } + drm_modeset_acquire_init(&ctx, 0); dev_warn(sn65dsi83->dev, "reset the pipe\n"); - err = drm_atomic_helper_reset_crtc(sn65dsi83->bridge.encoder->crtc, &ctx); +retry: + err = drm_bridge_helper_reset_crtc(&sn65dsi83->bridge, &ctx); + if (err == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } -end: - DRM_MODESET_LOCK_ALL_END(dev, ctx, err); + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); - return err; + return 0; } static void sn65dsi83_reset_work(struct work_struct *ws) From ba6c94d51a87bb4f1faacd3bfa33af57ea9b84eb Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:05 +0100 Subject: [PATCH 0153/1627] drm/bridge: Introduce drm_bridge_is_atomic() helper We test for whether the bridge is atomic in several places in the source code, so let's consolidate them. Suggested-by: Dmitry Baryshkov Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-11-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/drm_bridge.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index 5bdce9db4475..ef98e21dc593 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -281,6 +281,11 @@ static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = { .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state, }; +static bool drm_bridge_is_atomic(struct drm_bridge *bridge) +{ + return bridge->funcs->atomic_reset != NULL; +} + /** * drm_bridge_attach - attach the bridge to an encoder's chain * @@ -333,7 +338,7 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, goto err_reset_bridge; } - if (bridge->funcs->atomic_reset) { + if (drm_bridge_is_atomic(bridge)) { struct drm_bridge_state *state; state = bridge->funcs->atomic_reset(bridge); @@ -378,7 +383,7 @@ void drm_bridge_detach(struct drm_bridge *bridge) if (WARN_ON(!bridge->dev)) return; - if (bridge->funcs->atomic_reset) + if (drm_bridge_is_atomic(bridge)) drm_atomic_private_obj_fini(&bridge->base); if (bridge->funcs->detach) From 68c98e227a960c567530b2c4c6765fdeab993e3b Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:06 +0100 Subject: [PATCH 0154/1627] drm/bridge: cdns-csi: Switch to atomic helpers The Cadence DSI driver follows the drm_encoder->crtc pointer that is deprecated and shouldn't be used by atomic drivers. Fortunately, the atomic hooks provide the drm_atomic_state and we can access our current CRTC from that, going from the bridge to its encoder, to its connector, and to its CRTC. Let's convert this bridge driver to atomic so we can get rid of the drm_encoder->crtc dereference. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-12-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- .../gpu/drm/bridge/cadence/cdns-dsi-core.c | 31 +++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c index 8f54c034ac4f..99d43944fb8f 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-dsi-core.c @@ -656,7 +656,8 @@ cdns_dsi_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void cdns_dsi_bridge_disable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); @@ -676,7 +677,8 @@ static void cdns_dsi_bridge_disable(struct drm_bridge *bridge) pm_runtime_put(dsi->base.dev); } -static void cdns_dsi_bridge_post_disable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); @@ -753,13 +755,17 @@ static void cdns_dsi_init_link(struct cdns_dsi *dsi) dsi->link_initialized = true; } -static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); struct cdns_dsi_output *output = &dsi->output; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; struct drm_display_mode *mode; struct phy_configure_opts_mipi_dphy *phy_cfg = &output->phy_opts.mipi_dphy; + struct drm_connector *connector; unsigned long tx_byte_period; struct cdns_dsi_cfg dsi_cfg; u32 tmp, reg_wakeup, div; @@ -771,7 +777,10 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) if (dsi->platform_ops && dsi->platform_ops->enable) dsi->platform_ops->enable(dsi); - mode = &bridge->encoder->crtc->state->adjusted_mode; + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + conn_state = drm_atomic_get_new_connector_state(state, connector); + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + mode = &crtc_state->adjusted_mode; nlanes = output->dev->lanes; WARN_ON_ONCE(cdns_dsi_check_conf(dsi, mode, &dsi_cfg, false)); @@ -893,7 +902,8 @@ static void cdns_dsi_bridge_enable(struct drm_bridge *bridge) writel(tmp, dsi->regs + MCTL_MAIN_EN); } -static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge) +static void cdns_dsi_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge); struct cdns_dsi *dsi = input_to_dsi(input); @@ -908,10 +918,13 @@ static void cdns_dsi_bridge_pre_enable(struct drm_bridge *bridge) static const struct drm_bridge_funcs cdns_dsi_bridge_funcs = { .attach = cdns_dsi_bridge_attach, .mode_valid = cdns_dsi_bridge_mode_valid, - .disable = cdns_dsi_bridge_disable, - .pre_enable = cdns_dsi_bridge_pre_enable, - .enable = cdns_dsi_bridge_enable, - .post_disable = cdns_dsi_bridge_post_disable, + .atomic_disable = cdns_dsi_bridge_atomic_disable, + .atomic_pre_enable = cdns_dsi_bridge_atomic_pre_enable, + .atomic_enable = cdns_dsi_bridge_atomic_enable, + .atomic_post_disable = cdns_dsi_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, }; static int cdns_dsi_attach(struct mipi_dsi_host *host, From ae875180318a51b2812295e8948162108f5e46ef Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:07 +0100 Subject: [PATCH 0155/1627] drm/bridge: tc358775: Switch to atomic commit The tc358775 driver follows the drm_encoder->crtc pointer that is deprecated and shouldn't be used by atomic drivers. Fortunately, the atomic hooks provide the drm_atomic_state and we can access our current CRTC from that, going from the bridge to its encoder, to its connector, and to its CRTC. Let's convert this bridge driver to atomic so we can get rid of the drm_encoder->crtc dereference. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-13-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/bridge/tc358775.c | 42 ++++++++++++++----------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358775.c b/drivers/gpu/drm/bridge/tc358775.c index 13cd48e77d2d..1b10e6ee1724 100644 --- a/drivers/gpu/drm/bridge/tc358775.c +++ b/drivers/gpu/drm/bridge/tc358775.c @@ -286,7 +286,8 @@ static inline struct tc_data *bridge_to_tc(struct drm_bridge *b) return container_of(b, struct tc_data, bridge); } -static void tc_bridge_pre_enable(struct drm_bridge *bridge) +static void tc_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc_data *tc = bridge_to_tc(bridge); struct device *dev = &tc->dsi->dev; @@ -309,7 +310,8 @@ static void tc_bridge_pre_enable(struct drm_bridge *bridge) usleep_range(10, 20); } -static void tc_bridge_post_disable(struct drm_bridge *bridge) +static void tc_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc_data *tc = bridge_to_tc(bridge); struct device *dev = &tc->dsi->dev; @@ -368,30 +370,21 @@ static void d2l_write(struct i2c_client *i2c, u16 addr, u32 val) ret, addr); } -/* helper function to access bus_formats */ -static struct drm_connector *get_connector(struct drm_encoder *encoder) -{ - struct drm_device *dev = encoder->dev; - struct drm_connector *connector; - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) - if (connector->encoder == encoder) - return connector; - - return NULL; -} - -static void tc_bridge_enable(struct drm_bridge *bridge) +static void tc_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc_data *tc = bridge_to_tc(bridge); u32 hback_porch, hsync_len, hfront_porch, hactive, htime1, htime2; u32 vback_porch, vsync_len, vfront_porch, vactive, vtime1, vtime2; u32 val = 0; u16 dsiclk, clkdiv, byteclk, t1, t2, t3, vsdelay; - struct drm_display_mode *mode; - struct drm_connector *connector = get_connector(bridge->encoder); - - mode = &bridge->encoder->crtc->state->adjusted_mode; + struct drm_connector *connector = + drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + struct drm_connector_state *conn_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, conn_state->crtc); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; hback_porch = mode->htotal - mode->hsync_end; hsync_len = mode->hsync_end - mode->hsync_start; @@ -601,10 +594,13 @@ static int tc_bridge_attach(struct drm_bridge *bridge, static const struct drm_bridge_funcs tc_bridge_funcs = { .attach = tc_bridge_attach, - .pre_enable = tc_bridge_pre_enable, - .enable = tc_bridge_enable, + .atomic_pre_enable = tc_bridge_atomic_pre_enable, + .atomic_enable = tc_bridge_atomic_enable, .mode_valid = tc_mode_valid, - .post_disable = tc_bridge_post_disable, + .atomic_post_disable = tc_bridge_atomic_post_disable, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, }; static int tc_attach_host(struct tc_data *tc) From 9c77154b71ad936227d0c407835854604c532700 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:08 +0100 Subject: [PATCH 0156/1627] drm/bridge: tc358768: Stop disabling when failing to enable The tc358768 bridge driver, if enabling it fails, tries to disable it. This is pretty uncommon in bridge drivers, and also stands in the way for further reworks. Worse, since pre_enable and enable aren't expected to fail, disable and post_disable might be called twice: once to handle the failure, and once to actually disable the bridge. Since post_disable uses regulators and clocks, this would lead to enable count imbalances. In order to prevent that imbalance, and to allow further reworks, let's drop the calls to disable and post_disable, but keep the warning to let users know about what's going on. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-14-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/bridge/tc358768.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index 6db18d1e8824..6b65ba8aed86 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -1077,11 +1077,8 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) tc358768_write(priv, TC358768_DSI_CONFW, val); ret = tc358768_clear_error(priv); - if (ret) { + if (ret) dev_err(dev, "Bridge pre_enable failed: %d\n", ret); - tc358768_bridge_disable(bridge); - tc358768_bridge_post_disable(bridge); - } } static void tc358768_bridge_enable(struct drm_bridge *bridge) @@ -1101,11 +1098,8 @@ static void tc358768_bridge_enable(struct drm_bridge *bridge) tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6)); ret = tc358768_clear_error(priv); - if (ret) { + if (ret) dev_err(priv->dev, "Bridge enable failed: %d\n", ret); - tc358768_bridge_disable(bridge); - tc358768_bridge_post_disable(bridge); - } } #define MAX_INPUT_SEL_FORMATS 1 From 070bac234bc6e8e925f87bb5a5ab1a98b890fce8 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:09 +0100 Subject: [PATCH 0157/1627] drm/bridge: tc358768: Convert to atomic helpers The tc358768 driver follows the drm_encoder->crtc pointer that is deprecated and shouldn't be used by atomic drivers. Fortunately, the atomic hooks provide the drm_atomic_state and we can access our current CRTC from that, going from the bridge to its encoder, to its connector, and to its CRTC. Let's convert this bridge driver to atomic so we can get rid of the drm_encoder->crtc dereference. Reviewed-by: Dmitry Baryshkov Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-15-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/bridge/tc358768.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c index 6b65ba8aed86..063f217a17b6 100644 --- a/drivers/gpu/drm/bridge/tc358768.c +++ b/drivers/gpu/drm/bridge/tc358768.c @@ -581,7 +581,8 @@ tc358768_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static void tc358768_bridge_disable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); int ret; @@ -603,7 +604,8 @@ static void tc358768_bridge_disable(struct drm_bridge *bridge) dev_warn(priv->dev, "Software disable failed: %d\n", ret); } -static void tc358768_bridge_post_disable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); @@ -683,13 +685,17 @@ static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val) return (u32)div_u64(m, n); } -static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); struct mipi_dsi_device *dsi_dev = priv->output.dev; unsigned long mode_flags = dsi_dev->mode_flags; u32 val, val2, lptxcnt, hact, data_type; s32 raw_val; + struct drm_crtc_state *crtc_state; + struct drm_connector_state *conn_state; + struct drm_connector *connector; const struct drm_display_mode *mode; u32 hsbyteclk_ps, dsiclk_ps, ui_ps; u32 dsiclk, hsbyteclk; @@ -720,7 +726,10 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) return; } - mode = &bridge->encoder->crtc->state->adjusted_mode; + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + conn_state = drm_atomic_get_new_connector_state(state, connector); + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + mode = &crtc_state->adjusted_mode; ret = tc358768_setup_pll(priv, mode); if (ret) { dev_err(dev, "PLL setup failed: %d\n", ret); @@ -1081,7 +1090,8 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge) dev_err(dev, "Bridge pre_enable failed: %d\n", ret); } -static void tc358768_bridge_enable(struct drm_bridge *bridge) +static void tc358768_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) { struct tc358768_priv *priv = bridge_to_tc358768(bridge); int ret; @@ -1161,10 +1171,10 @@ static const struct drm_bridge_funcs tc358768_bridge_funcs = { .attach = tc358768_bridge_attach, .mode_valid = tc358768_bridge_mode_valid, .mode_fixup = tc358768_mode_fixup, - .pre_enable = tc358768_bridge_pre_enable, - .enable = tc358768_bridge_enable, - .disable = tc358768_bridge_disable, - .post_disable = tc358768_bridge_post_disable, + .atomic_pre_enable = tc358768_bridge_atomic_pre_enable, + .atomic_enable = tc358768_bridge_atomic_enable, + .atomic_disable = tc358768_bridge_atomic_disable, + .atomic_post_disable = tc358768_bridge_atomic_post_disable, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, From f24d1d4a7a425e67551ca8d86a89df7102766ac9 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 13 Mar 2025 13:00:10 +0100 Subject: [PATCH 0158/1627] drm/bridge: ti-sn65dsi86: Remove drm_encoder->crtc use The TI sn65dsi86 driver follows the drm_encoder->crtc pointer that is deprecated and shouldn't be used by atomic drivers. Fortunately, the atomic hooks provide the drm_atomic_state and we can access our current CRTC from that, going from the bridge to its encoder, to its connector, and to its CRTC. This bridge driver uses the atomic hooks already, but dereferences the drm_encoder->crtc pointer in functions that don't have access to it. Let's rework the driver to pass the state where needed, and remove the need for the drm_encoder->crtc dereference. Reviewed-by: Douglas Anderson Tested-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20250313-bridge-connector-v6-16-511c54a604fb@kernel.org Signed-off-by: Maxime Ripard --- drivers/gpu/drm/bridge/ti-sn65dsi86.c | 55 ++++++++++++++++++--------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 190929a41abd..fd68ad2e2718 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -243,11 +243,26 @@ static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata, regmap_bulk_write(pdata->regmap, reg, buf, ARRAY_SIZE(buf)); } -static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata) +static struct drm_display_mode * +get_new_adjusted_display_mode(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct drm_connector *connector = + drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + struct drm_connector_state *conn_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, conn_state->crtc); + + return &crtc_state->adjusted_mode; +} + +static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { u32 bit_rate_khz, clk_freq_khz; struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); bit_rate_khz = mode->clock * mipi_dsi_pixel_format_to_bpp(pdata->dsi->format); @@ -274,7 +289,8 @@ static const u32 ti_sn_bridge_dsiclk_lut[] = { 460800000, }; -static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) +static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { int i; u32 refclk_rate; @@ -287,7 +303,7 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut); clk_prepare_enable(pdata->refclk); } else { - refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000; + refclk_rate = ti_sn_bridge_get_dsi_freq(pdata, state) * 1000; refclk_lut = ti_sn_bridge_dsiclk_lut; refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut); } @@ -311,12 +327,13 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata) pdata->pwm_refclk_freq = ti_sn_bridge_refclk_lut[i]; } -static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata) +static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { mutex_lock(&pdata->comms_mutex); /* configure bridge ref_clk */ - ti_sn_bridge_set_refclk_freq(pdata); + ti_sn_bridge_set_refclk_freq(pdata, state); /* * HPD on this bridge chip is a bit useless. This is an eDP bridge @@ -376,7 +393,7 @@ static int __maybe_unused ti_sn65dsi86_resume(struct device *dev) * clock so reading early doesn't work. */ if (pdata->refclk) - ti_sn65dsi86_enable_comms(pdata); + ti_sn65dsi86_enable_comms(pdata, NULL); return ret; } @@ -822,12 +839,13 @@ static void ti_sn_bridge_atomic_disable(struct drm_bridge *bridge, regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0); } -static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata) +static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { unsigned int bit_rate_mhz, clk_freq_mhz; unsigned int val; struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); /* set DSIA clk frequency */ bit_rate_mhz = (mode->clock / 1000) * @@ -857,12 +875,14 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = { 0, 1620, 2160, 2430, 2700, 3240, 4320, 5400 }; -static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, unsigned int bpp) +static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state, + unsigned int bpp) { unsigned int bit_rate_khz, dp_rate_mhz; unsigned int i; struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); /* Calculate minimum bit rate based on our pixel clock. */ bit_rate_khz = mode->clock * bpp; @@ -961,10 +981,11 @@ static unsigned int ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata) return valid_rates; } -static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata) +static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata, + struct drm_atomic_state *state) { struct drm_display_mode *mode = - &pdata->bridge.encoder->crtc->state->adjusted_mode; + get_new_adjusted_display_mode(&pdata->bridge, state); u8 hsync_polarity = 0, vsync_polarity = 0; if (mode->flags & DRM_MODE_FLAG_NHSYNC) @@ -1106,7 +1127,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge, pdata->ln_polrs << LN_POLRS_OFFSET); /* set dsi clk frequency value */ - ti_sn_bridge_set_dsi_rate(pdata); + ti_sn_bridge_set_dsi_rate(pdata, state); /* * The SN65DSI86 only supports ASSR Display Authentication method and @@ -1141,7 +1162,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge, valid_rates = ti_sn_bridge_read_valid_rates(pdata); /* Train until we run out of rates */ - for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, bpp); + for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata, state, bpp); dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); dp_rate_idx++) { if (!(valid_rates & BIT(dp_rate_idx))) @@ -1157,7 +1178,7 @@ static void ti_sn_bridge_atomic_enable(struct drm_bridge *bridge, } /* config video parameters */ - ti_sn_bridge_set_video_timings(pdata); + ti_sn_bridge_set_video_timings(pdata, state); /* enable video stream */ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, @@ -1172,7 +1193,7 @@ static void ti_sn_bridge_atomic_pre_enable(struct drm_bridge *bridge, pm_runtime_get_sync(pdata->dev); if (!pdata->refclk) - ti_sn65dsi86_enable_comms(pdata); + ti_sn65dsi86_enable_comms(pdata, state); /* td7: min 100 us after enable before DSI data */ usleep_range(100, 110); From 97e81f78d3cbf061a809bbb8180a5b96395b8e03 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Sat, 15 Mar 2025 20:01:43 +0800 Subject: [PATCH 0159/1627] drm/i915/display: Fix build error without DRM_FBDEV_EMULATION MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In file included from : ./drivers/gpu/drm/i915/display/intel_fbdev.h: In function ‘intel_fbdev_framebuffer’: ./drivers/gpu/drm/i915/display/intel_fbdev.h:32:16: error: ‘NULL’ undeclared (first use in this function) 32 | return NULL; | ^~~~ ./drivers/gpu/drm/i915/display/intel_fbdev.h:1:1: note: ‘NULL’ is defined in header ‘’; did you forget to ‘#include ’? +++ |+#include 1 | /* SPDX-License-Identifier: MIT */ ./drivers/gpu/drm/i915/display/intel_fbdev.h:32:16: note: each undeclared identifier is reported only once for each function it appears in 32 | return NULL; | ^~~~ Build fails if CONFIG_DRM_FBDEV_EMULATION is n, add missing header file. Fixes: 9fa154f40eb6 ("drm/{i915,xe}: Run DRM default client setup") Signed-off-by: Yue Haibing Acked-by: Thomas Zimmermann Link: https://lore.kernel.org/r/20250315120143.2344958-1-yuehaibing@huawei.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_fbdev.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h index ca2c8c438f02..89bad3a2b01a 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev.h @@ -6,6 +6,8 @@ #ifndef __INTEL_FBDEV_H__ #define __INTEL_FBDEV_H__ +#include + struct drm_fb_helper; struct drm_fb_helper_surface_size; struct drm_i915_private; From 7c53ff050ba88bb37eed3e17f2bb8ec592d83302 Mon Sep 17 00:00:00 2001 From: Vinay Belgaumkar Date: Thu, 20 Mar 2025 10:51:23 -0700 Subject: [PATCH 0160/1627] drm/xe: Apply Wa_16023105232 The WA requires KMD to disable DOP clock gating during a semaphore wait and also ensure that idle delay for every CS is lower than the idle wait time in the PWRCTX_MAXCNT register. Default values for these registers already comply with this restriction. v2: Store timestamp_base in gt info and other comments (Daniele) v3: Skip WA check for VF v4: Review comments (Matt Roper) v5: Cleanup the clock functions and use reg_field_get (Matt Roper) v6: Fix checkpatch issue v7: Fix CI issue Cc: Matt Roper Reviewed-by: Matt Roper Reviewed-by: Daniele Ceraolo Spurio Signed-off-by: Vinay Belgaumkar Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20250320175123.3026754-1-vinay.belgaumkar@intel.com --- drivers/gpu/drm/xe/regs/xe_engine_regs.h | 4 +++ drivers/gpu/drm/xe/xe_gt_clock.c | 39 ++++++++++++++++-------- drivers/gpu/drm/xe/xe_gt_types.h | 2 ++ drivers/gpu/drm/xe/xe_hw_engine.c | 33 ++++++++++++++++++++ drivers/gpu/drm/xe/xe_wa.c | 6 ++++ drivers/gpu/drm/xe/xe_wa_oob.rules | 2 ++ 6 files changed, 74 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h index 659cf85fa3d6..da713634d6a0 100644 --- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h @@ -130,6 +130,10 @@ #define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234) #define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4) +#define RING_IDLEDLY(base) XE_REG((base) + 0x23c) +#define INHIBIT_SWITCH_UNTIL_PREEMPTED REG_BIT(31) +#define IDLE_DELAY REG_GENMASK(20, 0) + #define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED) #define CTX_CTRL_PXP_ENABLE REG_BIT(10) #define CTX_CTRL_OAC_CONTEXT_ENABLE REG_BIT(8) diff --git a/drivers/gpu/drm/xe/xe_gt_clock.c b/drivers/gpu/drm/xe/xe_gt_clock.c index fca38738e610..4f011d1573c6 100644 --- a/drivers/gpu/drm/xe/xe_gt_clock.c +++ b/drivers/gpu/drm/xe/xe_gt_clock.c @@ -16,27 +16,42 @@ #include "xe_macros.h" #include "xe_mmio.h" -static u32 get_crystal_clock_freq(u32 rpm_config_reg) +#define f19_2_mhz 19200000 +#define f24_mhz 24000000 +#define f25_mhz 25000000 +#define f38_4_mhz 38400000 +#define ts_base_83 83333 +#define ts_base_52 52083 +#define ts_base_80 80000 + +static void read_crystal_clock(struct xe_gt *gt, u32 rpm_config_reg, u32 *freq, + u32 *timestamp_base) { - const u32 f19_2_mhz = 19200000; - const u32 f24_mhz = 24000000; - const u32 f25_mhz = 25000000; - const u32 f38_4_mhz = 38400000; u32 crystal_clock = REG_FIELD_GET(RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK, rpm_config_reg); switch (crystal_clock) { case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: - return f24_mhz; + *freq = f24_mhz; + *timestamp_base = ts_base_83; + return; case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: - return f19_2_mhz; + *freq = f19_2_mhz; + *timestamp_base = ts_base_52; + return; case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: - return f38_4_mhz; + *freq = f38_4_mhz; + *timestamp_base = ts_base_52; + return; case RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: - return f25_mhz; + *freq = f25_mhz; + *timestamp_base = ts_base_80; + return; default: - XE_WARN_ON("NOT_POSSIBLE"); - return 0; + xe_gt_warn(gt, "Invalid crystal clock frequency: %u", crystal_clock); + *freq = 0; + *timestamp_base = 0; + return; } } @@ -65,7 +80,7 @@ int xe_gt_clock_init(struct xe_gt *gt) check_ctc_mode(gt); c0 = xe_mmio_read32(>->mmio, RPM_CONFIG0); - freq = get_crystal_clock_freq(c0); + read_crystal_clock(gt, c0, &freq, >->info.timestamp_base); /* * Now figure out how the command stream's timestamp diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index e3cfb026ac88..7def0959da35 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -121,6 +121,8 @@ struct xe_gt { enum xe_gt_type type; /** @info.reference_clock: clock frequency */ u32 reference_clock; + /** @info.timestamp_base: GT timestamp base */ + u32 timestamp_base; /** * @info.engine_mask: mask of engines present on GT. Some of * them may be reserved in runtime and not available for user. diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 223b95de388c..8c05fd30b7df 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -8,7 +8,9 @@ #include #include +#include #include +#include #include "regs/xe_engine_regs.h" #include "regs/xe_gt_regs.h" @@ -21,6 +23,7 @@ #include "xe_gsc.h" #include "xe_gt.h" #include "xe_gt_ccs_mode.h" +#include "xe_gt_clock.h" #include "xe_gt_printk.h" #include "xe_gt_mcr.h" #include "xe_gt_topology.h" @@ -564,6 +567,33 @@ static void hw_engine_init_early(struct xe_gt *gt, struct xe_hw_engine *hwe, xe_reg_whitelist_process_engine(hwe); } +static void adjust_idledly(struct xe_hw_engine *hwe) +{ + struct xe_gt *gt = hwe->gt; + u32 idledly, maxcnt; + u32 idledly_units_ps = 8 * gt->info.timestamp_base; + u32 maxcnt_units_ns = 640; + bool inhibit_switch = 0; + + if (!IS_SRIOV_VF(gt_to_xe(hwe->gt)) && XE_WA(gt, 16023105232)) { + idledly = xe_mmio_read32(>->mmio, RING_IDLEDLY(hwe->mmio_base)); + maxcnt = xe_mmio_read32(>->mmio, RING_PWRCTX_MAXCNT(hwe->mmio_base)); + + inhibit_switch = idledly & INHIBIT_SWITCH_UNTIL_PREEMPTED; + idledly = REG_FIELD_GET(IDLE_DELAY, idledly); + idledly = DIV_ROUND_CLOSEST(idledly * idledly_units_ps, 1000); + maxcnt = REG_FIELD_GET(IDLE_WAIT_TIME, maxcnt); + maxcnt *= maxcnt_units_ns; + + if (xe_gt_WARN_ON(gt, idledly >= maxcnt || inhibit_switch)) { + idledly = DIV_ROUND_CLOSEST(((maxcnt - 1) * maxcnt_units_ns), + idledly_units_ps); + idledly = DIV_ROUND_CLOSEST(idledly, 1000); + xe_mmio_write32(>->mmio, RING_IDLEDLY(hwe->mmio_base), idledly); + } + } +} + static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, enum xe_hw_engine_id id) { @@ -604,6 +634,9 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe, if (xe->info.has_usm && hwe->class == XE_ENGINE_CLASS_COPY) gt->usm.reserved_bcs_instance = hwe->instance; + /* Ensure IDLEDLY is lower than MAXCNT */ + adjust_idledly(hwe); + return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe); err_hwsp: diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index a25afb757f70..24f644c0a673 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -622,6 +622,12 @@ static const struct xe_rtp_entry_sr engine_was[] = { FUNC(xe_rtp_match_first_render_or_compute)), XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, RES_CHK_SPR_DIS)) }, + { XE_RTP_NAME("16023105232"), + XE_RTP_RULES(MEDIA_VERSION_RANGE(1301, 3000), OR, + GRAPHICS_VERSION_RANGE(2001, 3001)), + XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE, + XE_RTP_ACTION_FLAG(ENGINE_BASE))) + }, }; static const struct xe_rtp_entry_sr lrc_was[] = { diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index e0c5fa460487..0c738af24f7c 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -53,3 +53,5 @@ no_media_l3 MEDIA_VERSION(3000) GRAPHICS_VERSION_RANGE(1270, 1274) 1508761755 GRAPHICS_VERSION(1255) GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0) +16023105232 GRAPHICS_VERSION_RANGE(2001, 3001) + MEDIA_VERSION_RANGE(1301, 3000) From 49567c41766f0b389ec8ec97775420d279675004 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:45:57 +0200 Subject: [PATCH 0161/1627] drm/i915/color: prefer display->platform. checks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This let's us drop the dependency on i915_drv.h. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/d57fd6444c512b3cc35c0e216c86eeb95124eead.1742481923.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_color.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index cfe14162231d..bbf6df7ebb95 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -22,7 +22,7 @@ * */ -#include "i915_drv.h" +#include "i915_utils.h" #include "i9xx_plane_regs.h" #include "intel_color.h" #include "intel_color_regs.h" @@ -405,14 +405,13 @@ static void icl_read_csc(struct intel_crtc_state *crtc_state) static bool ilk_limited_range(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(display->drm); /* icl+ have dedicated output CSC */ if (DISPLAY_VER(display) >= 11) return false; /* pre-hsw have TRANSCONF_COLOR_RANGE_SELECT */ - if (DISPLAY_VER(display) < 7 || IS_IVYBRIDGE(i915)) + if (DISPLAY_VER(display) < 7 || display->platform.ivybridge) return false; return crtc_state->limited_color_range; @@ -516,7 +515,6 @@ static void ilk_csc_convert_ctm(const struct intel_crtc_state *crtc_state, static void ilk_assign_csc(struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct drm_i915_private *i915 = to_i915(display->drm); bool limited_color_range = ilk_csc_limited_range(crtc_state); if (crtc_state->hw.ctm) { @@ -538,7 +536,7 @@ static void ilk_assign_csc(struct intel_crtc_state *crtc_state) * LUT is needed but CSC is not we need to load an * identity matrix. */ - drm_WARN_ON(display->drm, !IS_GEMINILAKE(i915)); + drm_WARN_ON(display->drm, !display->platform.geminilake); ilk_csc_copy(display, &crtc_state->csc, &ilk_csc_matrix_identity); } else { @@ -3983,12 +3981,10 @@ int intel_color_init(struct intel_display *display) void intel_color_init_hooks(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (HAS_GMCH(display)) { - if (IS_CHERRYVIEW(i915)) + if (display->platform.cherryview) display->funcs.color = &chv_color_funcs; - else if (IS_VALLEYVIEW(i915)) + else if (display->platform.valleyview) display->funcs.color = &vlv_color_funcs; else if (DISPLAY_VER(display) >= 4) display->funcs.color = &i965_color_funcs; @@ -4005,7 +4001,7 @@ void intel_color_init_hooks(struct intel_display *display) display->funcs.color = &skl_color_funcs; else if (DISPLAY_VER(display) == 8) display->funcs.color = &bdw_color_funcs; - else if (IS_HASWELL(i915)) + else if (display->platform.haswell) display->funcs.color = &hsw_color_funcs; else if (DISPLAY_VER(display) == 7) display->funcs.color = &ivb_color_funcs; From 3e1966e2db6fbeb124cb32a0c57202662781a617 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:45:58 +0200 Subject: [PATCH 0162/1627] drm/i915/connector: convert intel_connector.c to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert as much as possible of intel_connector.c to struct intel_display. i915_inject_probe_failure() remains the only call that requires i915 pointer. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/398e3210459a65f74e78f2d34584cda6eea6a99b.1742481923.git.jani.nikula@intel.com --- .../gpu/drm/i915/display/intel_connector.c | 45 ++++++++++--------- 1 file changed, 23 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index e42357bd9e80..6c81c9f2fd09 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -31,8 +31,10 @@ #include #include "i915_drv.h" +#include "i915_utils.h" #include "intel_backlight.h" #include "intel_connector.h" +#include "intel_display_core.h" #include "intel_display_debugfs.h" #include "intel_display_types.h" #include "intel_hdcp.h" @@ -154,13 +156,14 @@ void intel_connector_destroy(struct drm_connector *connector) int intel_connector_register(struct drm_connector *connector) { struct intel_connector *intel_connector = to_intel_connector(connector); + struct drm_i915_private *i915 = to_i915(connector->dev); int ret; ret = intel_backlight_device_register(intel_connector); if (ret) goto err; - if (i915_inject_probe_failure(to_i915(connector->dev))) { + if (i915_inject_probe_failure(i915)) { ret = -EFAULT; goto err_backlight; } @@ -204,10 +207,10 @@ bool intel_connector_get_hw_state(struct intel_connector *connector) enum pipe intel_connector_get_pipe(struct intel_connector *connector) { - struct drm_device *dev = connector->base.dev; + struct intel_display *display = to_intel_display(connector); - drm_WARN_ON(dev, - !drm_modeset_is_locked(&dev->mode_config.connection_mutex)); + drm_WARN_ON(display->drm, + !drm_modeset_is_locked(&display->drm->mode_config.connection_mutex)); if (!connector->base.state->crtc) return INVALID_PIPE; @@ -264,20 +267,19 @@ static const struct drm_prop_enum_list force_audio_names[] = { void intel_attach_force_audio_property(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(connector->dev); struct drm_property *prop; - prop = dev_priv->display.properties.force_audio; + prop = display->properties.force_audio; if (prop == NULL) { - prop = drm_property_create_enum(dev, 0, - "audio", - force_audio_names, - ARRAY_SIZE(force_audio_names)); + prop = drm_property_create_enum(display->drm, 0, + "audio", + force_audio_names, + ARRAY_SIZE(force_audio_names)); if (prop == NULL) return; - dev_priv->display.properties.force_audio = prop; + display->properties.force_audio = prop; } drm_object_attach_property(&connector->base, prop, 0); } @@ -291,20 +293,19 @@ static const struct drm_prop_enum_list broadcast_rgb_names[] = { void intel_attach_broadcast_rgb_property(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(connector->dev); struct drm_property *prop; - prop = dev_priv->display.properties.broadcast_rgb; + prop = display->properties.broadcast_rgb; if (prop == NULL) { - prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, - "Broadcast RGB", - broadcast_rgb_names, - ARRAY_SIZE(broadcast_rgb_names)); + prop = drm_property_create_enum(display->drm, DRM_MODE_PROP_ENUM, + "Broadcast RGB", + broadcast_rgb_names, + ARRAY_SIZE(broadcast_rgb_names)); if (prop == NULL) return; - dev_priv->display.properties.broadcast_rgb = prop; + display->properties.broadcast_rgb = prop; } drm_object_attach_property(&connector->base, prop, 0); @@ -336,14 +337,14 @@ intel_attach_dp_colorspace_property(struct drm_connector *connector) void intel_attach_scaling_mode_property(struct drm_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_display *display = to_intel_display(connector->dev); u32 scaling_modes; scaling_modes = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN); /* On GMCH platforms borders are only possible on the LVDS port */ - if (!HAS_GMCH(i915) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS) + if (!HAS_GMCH(display) || connector->connector_type == DRM_MODE_CONNECTOR_LVDS) scaling_modes |= BIT(DRM_MODE_SCALE_CENTER); drm_connector_attach_scaling_mode_property(connector, scaling_modes); From 4cd502aa7ef9fff33fa2d801735709d606a11ee2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:45:59 +0200 Subject: [PATCH 0163/1627] drm/i915/hotplug: convert intel_hotplug.[ch] to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert as much as possible of intel_hotplug.[ch] to struct intel_display. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/cf382dbfacf1445b26fbe1e7c011e7a3ea6e1594.1742481923.git.jani.nikula@intel.com --- .../drm/i915/display/intel_display_debugfs.c | 2 +- .../drm/i915/display/intel_display_driver.c | 14 +- .../i915/display/intel_display_power_well.c | 6 +- .../drm/i915/display/intel_display_reset.c | 4 +- drivers/gpu/drm/i915/display/intel_dp.c | 2 +- drivers/gpu/drm/i915/display/intel_hotplug.c | 332 ++++++++++-------- drivers/gpu/drm/i915/display/intel_hotplug.h | 26 +- .../gpu/drm/i915/display/intel_hotplug_irq.c | 24 +- drivers/gpu/drm/i915/i915_driver.c | 14 +- drivers/gpu/drm/i915/i915_irq.c | 3 +- drivers/gpu/drm/xe/display/xe_display.c | 26 +- 11 files changed, 240 insertions(+), 213 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index fdedf65bee53..f42b5a69eed5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -844,7 +844,7 @@ void intel_display_debugfs_register(struct intel_display *display) intel_dmc_debugfs_register(display); intel_dp_test_debugfs_register(display); intel_fbc_debugfs_register(display); - intel_hpd_debugfs_register(i915); + intel_hpd_debugfs_register(display); intel_opregion_debugfs_register(display); intel_psr_debugfs_register(display); intel_wm_debugfs_register(i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 5ad2f4090a2d..4035482a2e1b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -315,11 +315,9 @@ static void set_display_access(struct intel_display *display, */ void intel_display_driver_enable_user_access(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - set_display_access(display, true, NULL); - intel_hpd_enable_detection_work(i915); + intel_hpd_enable_detection_work(display); } /** @@ -341,9 +339,7 @@ void intel_display_driver_enable_user_access(struct intel_display *display) */ void intel_display_driver_disable_user_access(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - intel_hpd_disable_detection_work(i915); + intel_hpd_disable_detection_work(display); set_display_access(display, false, current); } @@ -524,7 +520,7 @@ int intel_display_driver_probe(struct intel_display *display) intel_overlay_setup(display); /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(i915); + intel_hpd_init(display); skl_watermark_ipc_init(i915); @@ -558,7 +554,7 @@ void intel_display_driver_register(struct intel_display *display) * fbdev->async_cookie. */ drm_kms_helper_poll_init(display->drm); - intel_hpd_poll_disable(i915); + intel_hpd_poll_disable(display); intel_fbdev_setup(i915); @@ -600,7 +596,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display) * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. */ - intel_hpd_poll_fini(i915); + intel_hpd_poll_fini(display); intel_unregister_dsm_handler(); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 8ec87ffd87d2..daf2a0cbb157 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -1236,8 +1236,8 @@ static void vlv_display_power_well_init(struct intel_display *display) if (display->power.domains.initializing) return; - intel_hpd_init(dev_priv); - intel_hpd_poll_disable(dev_priv); + intel_hpd_init(display); + intel_hpd_poll_disable(display); /* Re-enable the ADPA, if we have one */ for_each_intel_encoder(display->drm, encoder) { @@ -1265,7 +1265,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display) /* Prevent us from re-enabling polling on accident in late suspend */ if (!display->drm->dev->power.is_suspended) - intel_hpd_poll_enable(dev_priv); + intel_hpd_poll_enable(display); } static void vlv_display_power_well_enable(struct intel_display *display, diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c index 1f2798404f2c..1dbd3e841df3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_reset.c +++ b/drivers/gpu/drm/i915/display/intel_display_reset.c @@ -107,14 +107,14 @@ void intel_display_reset_finish(struct intel_display *display, bool test_only) intel_display_driver_init_hw(display); intel_clock_gating_init(i915); intel_cx0_pll_power_save_wa(display); - intel_hpd_init(i915); + intel_hpd_init(display); ret = __intel_display_driver_resume(display, state, ctx); if (ret) drm_err(display->drm, "Restoring old state failed with %i\n", ret); - intel_hpd_poll_disable(i915); + intel_hpd_poll_disable(display); } drm_atomic_state_put(state); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a236b5fc7a3d..7d074770d793 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -6117,7 +6117,7 @@ static void intel_dp_oob_hotplug_event(struct drm_connector *connector, spin_unlock_irq(&i915->irq_lock); if (need_work) - intel_hpd_schedule_detection(i915); + intel_hpd_schedule_detection(display); } static const struct drm_connector_funcs intel_dp_connector_funcs = { diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index c69b1f5fd160..9bde28ce1979 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -118,7 +118,7 @@ intel_connector_hpd_pin(struct intel_connector *connector) /** * intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin - * @dev_priv: private driver data pointer + * @display: display device * @pin: the pin to gather stats on * @long_hpd: whether the HPD IRQ was long or short * @@ -127,13 +127,13 @@ intel_connector_hpd_pin(struct intel_connector *connector) * responsible for further action. * * The number of IRQs that are allowed within @HPD_STORM_DETECT_PERIOD is - * stored in @dev_priv->display.hotplug.hpd_storm_threshold which defaults to + * stored in @display->hotplug.hpd_storm_threshold which defaults to * @HPD_STORM_DEFAULT_THRESHOLD. Long IRQs count as +10 to this threshold, and * short IRQs count as +1. If this threshold is exceeded, it's considered an * IRQ storm and the IRQ state is set to @HPD_MARK_DISABLED. * * By default, most systems will only count long IRQs towards - * &dev_priv->display.hotplug.hpd_storm_threshold. However, some older systems also + * &display->hotplug.hpd_storm_threshold. However, some older systems also * suffer from short IRQ storms and must also track these. Because short IRQ * storms are naturally caused by sideband interactions with DP MST devices, * short IRQ detection is only enabled for systems without DP MST support. @@ -145,10 +145,10 @@ intel_connector_hpd_pin(struct intel_connector *connector) * * Return true if an IRQ storm was detected on @pin. */ -static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, +static bool intel_hpd_irq_storm_detect(struct intel_display *display, enum hpd_pin pin, bool long_hpd) { - struct intel_hotplug *hpd = &dev_priv->display.hotplug; + struct intel_hotplug *hpd = &display->hotplug; unsigned long start = hpd->stats[pin].last_jiffies; unsigned long end = start + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD); const int increment = long_hpd ? 10 : 1; @@ -156,7 +156,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, bool storm = false; if (!threshold || - (!long_hpd && !dev_priv->display.hotplug.hpd_short_storm_enabled)) + (!long_hpd && !display->hotplug.hpd_short_storm_enabled)) return false; if (!time_in_range(jiffies, start, end)) { @@ -167,11 +167,11 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, hpd->stats[pin].count += increment; if (hpd->stats[pin].count > threshold) { hpd->stats[pin].state = HPD_MARK_DISABLED; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "HPD interrupt storm detected on PIN %d\n", pin); storm = true; } else { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Received HPD interrupt on PIN %d - cnt: %d\n", pin, hpd->stats[pin].count); @@ -180,56 +180,65 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv, return storm; } -static bool detection_work_enabled(struct drm_i915_private *i915) +static bool detection_work_enabled(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - return i915->display.hotplug.detection_work_enabled; + return display->hotplug.detection_work_enabled; } static bool -mod_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) +mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - if (!detection_work_enabled(i915)) + if (!detection_work_enabled(display)) return false; return mod_delayed_work(i915->unordered_wq, work, delay); } static bool -queue_delayed_detection_work(struct drm_i915_private *i915, struct delayed_work *work, int delay) +queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - if (!detection_work_enabled(i915)) + if (!detection_work_enabled(display)) return false; return queue_delayed_work(i915->unordered_wq, work, delay); } static bool -queue_detection_work(struct drm_i915_private *i915, struct work_struct *work) +queue_detection_work(struct intel_display *display, struct work_struct *work) { + struct drm_i915_private *i915 = to_i915(display->drm); + lockdep_assert_held(&i915->irq_lock); - if (!detection_work_enabled(i915)) + if (!detection_work_enabled(display)) return false; return queue_work(i915->unordered_wq, work); } static void -intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) +intel_hpd_irq_storm_switch_to_polling(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; bool hpd_disabled = false; lockdep_assert_held(&dev_priv->irq_lock); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; @@ -238,15 +247,15 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->display.hotplug.stats[pin].state != HPD_MARK_DISABLED) + display->hotplug.stats[pin].state != HPD_MARK_DISABLED) continue; - drm_info(&dev_priv->drm, + drm_info(display->drm, "HPD interrupt storm detected on connector %s: " "switching from hotplug detection to polling\n", connector->base.name); - dev_priv->display.hotplug.stats[pin].state = HPD_DISABLED; + display->hotplug.stats[pin].state = HPD_DISABLED; connector->base.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; hpd_disabled = true; @@ -255,18 +264,18 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv) /* Enable polling and queue hotplug re-enabling. */ if (hpd_disabled) { - drm_kms_helper_poll_reschedule(&dev_priv->drm); - mod_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.reenable_work, + drm_kms_helper_poll_reschedule(display->drm); + mod_delayed_detection_work(display, + &display->hotplug.reenable_work, msecs_to_jiffies(HPD_STORM_REENABLE_DELAY)); } } static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, typeof(*dev_priv), - display.hotplug.reenable_work.work); + struct intel_display *display = + container_of(work, typeof(*display), hotplug.reenable_work.work); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; intel_wakeref_t wakeref; @@ -276,15 +285,15 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) spin_lock_irq(&dev_priv->irq_lock); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { pin = intel_connector_hpd_pin(connector); if (pin == HPD_NONE || - dev_priv->display.hotplug.stats[pin].state != HPD_DISABLED) + display->hotplug.stats[pin].state != HPD_DISABLED) continue; if (connector->base.polled != connector->polled) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Reenabling HPD on connector %s\n", connector->base.name); connector->base.polled = connector->polled; @@ -292,8 +301,8 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) drm_connector_list_iter_end(&conn_iter); for_each_hpd_pin(pin) { - if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) - dev_priv->display.hotplug.stats[pin].state = HPD_ENABLED; + if (display->hotplug.stats[pin].state == HPD_DISABLED) + display->hotplug.stats[pin].state = HPD_ENABLED; } intel_hpd_irq_setup(dev_priv); @@ -407,7 +416,7 @@ static void i915_digport_work_func(struct work_struct *work) spin_unlock_irq(&dev_priv->irq_lock); - for_each_intel_encoder(&dev_priv->drm, encoder) { + for_each_intel_encoder(display->drm, encoder) { struct intel_digital_port *dig_port; enum hpd_pin pin = encoder->hpd_pin; bool long_hpd, short_hpd; @@ -433,9 +442,9 @@ static void i915_digport_work_func(struct work_struct *work) if (old_bits) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.event_bits |= old_bits; - queue_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.hotplug_work, 0); + display->hotplug.event_bits |= old_bits; + queue_delayed_detection_work(display, + &display->hotplug.hotplug_work, 0); spin_unlock_irq(&dev_priv->irq_lock); } } @@ -481,8 +490,8 @@ static void i915_hotplug_work_func(struct work_struct *work) int changed_connectors = 0; u32 blocked_hpd_pin_mask; - mutex_lock(&dev_priv->drm.mode_config.mutex); - drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n"); + mutex_lock(&display->drm->mode_config.mutex); + drm_dbg_kms(display->drm, "running encoder hotplug functions\n"); spin_lock_irq(&dev_priv->irq_lock); @@ -493,18 +502,18 @@ static void i915_hotplug_work_func(struct work_struct *work) hotplug->retry_bits &= ~hpd_retry_bits; /* Enable polling for connectors which had HPD IRQ storms */ - intel_hpd_irq_storm_switch_to_polling(dev_priv); + intel_hpd_irq_storm_switch_to_polling(display); spin_unlock_irq(&dev_priv->irq_lock); /* Skip calling encode hotplug handlers if ignore long HPD set*/ - if (dev_priv->display.hotplug.ignore_long_hpd) { - drm_dbg_kms(&dev_priv->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + if (display->hotplug.ignore_long_hpd) { + drm_dbg_kms(display->drm, "Ignore HPD flag on - skip encoder hotplug handlers\n"); + mutex_unlock(&display->drm->mode_config.mutex); return; } - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; u32 hpd_bit; @@ -523,7 +532,7 @@ static void i915_hotplug_work_func(struct work_struct *work) else connector->hotplug_retries++; - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Connector %s (pin %i) received hotplug event. (retry %d)\n", connector->base.name, pin, connector->hotplug_retries); @@ -546,12 +555,12 @@ static void i915_hotplug_work_func(struct work_struct *work) } } drm_connector_list_iter_end(&conn_iter); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (changed_connectors == 1) drm_kms_helper_connector_hotplug_event(first_changed_connector); else if (changed_connectors > 0) - drm_kms_helper_hotplug_event(&dev_priv->drm); + drm_kms_helper_hotplug_event(display->drm); if (first_changed_connector) drm_connector_put(first_changed_connector); @@ -560,10 +569,10 @@ static void i915_hotplug_work_func(struct work_struct *work) retry &= ~changed; if (retry) { spin_lock_irq(&dev_priv->irq_lock); - dev_priv->display.hotplug.retry_bits |= retry; + display->hotplug.retry_bits |= retry; - mod_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.hotplug_work, + mod_delayed_detection_work(display, + &display->hotplug.hotplug_work, msecs_to_jiffies(HPD_RETRY_DELAY)); spin_unlock_irq(&dev_priv->irq_lock); } @@ -572,7 +581,7 @@ static void i915_hotplug_work_func(struct work_struct *work) /** * intel_hpd_irq_handler - main hotplug irq handler - * @dev_priv: drm_i915_private + * @display: display device * @pin_mask: a mask of hpd pins that have triggered the irq * @long_mask: a mask of hpd pins that may be long hpd pulses * @@ -586,10 +595,10 @@ static void i915_hotplug_work_func(struct work_struct *work) * Here, we do hotplug irq storm detection and mitigation, and pass further * processing to appropriate bottom halves. */ -void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, +void intel_hpd_irq_handler(struct intel_display *display, u32 pin_mask, u32 long_mask) { - struct intel_display *display = to_intel_display(&dev_priv->drm); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; bool storm_detected = false; bool queue_dig = false, queue_hp = false; @@ -608,7 +617,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * as each pin may have up to two encoders (HDMI and DP) and * only the one of them (DP) will have ->hpd_pulse(). */ - for_each_intel_encoder(&dev_priv->drm, encoder) { + for_each_intel_encoder(display->drm, encoder) { bool long_hpd; pin = encoder->hpd_pin; @@ -620,7 +629,7 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, long_hpd = long_mask & BIT(pin); - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "digital hpd on [ENCODER:%d:%s] - %s\n", encoder->base.base.id, encoder->base.name, long_hpd ? "long" : "short"); @@ -630,10 +639,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (long_hpd) { long_hpd_pulse_mask |= BIT(pin); - dev_priv->display.hotplug.long_hpd_pin_mask |= BIT(pin); + display->hotplug.long_hpd_pin_mask |= BIT(pin); } else { short_hpd_pulse_mask |= BIT(pin); - dev_priv->display.hotplug.short_hpd_pin_mask |= BIT(pin); + display->hotplug.short_hpd_pin_mask |= BIT(pin); } } @@ -644,20 +653,20 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (!(BIT(pin) & pin_mask)) continue; - if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) { + if (display->hotplug.stats[pin].state == HPD_DISABLED) { /* * On GMCH platforms the interrupt mask bits only * prevent irq generation, not the setting of the * hotplug bits itself. So only WARN about unexpected * interrupts on saner platforms. */ - drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv), + drm_WARN_ONCE(display->drm, !HAS_GMCH(display), "Received HPD interrupt on pin %d although disabled\n", pin); continue; } - if (dev_priv->display.hotplug.stats[pin].state != HPD_ENABLED) + if (display->hotplug.stats[pin].state != HPD_ENABLED) continue; /* @@ -668,15 +677,15 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, if (((short_hpd_pulse_mask | long_hpd_pulse_mask) & BIT(pin))) { long_hpd = long_hpd_pulse_mask & BIT(pin); } else { - dev_priv->display.hotplug.event_bits |= BIT(pin); + display->hotplug.event_bits |= BIT(pin); long_hpd = true; if (!hpd_pin_is_blocked(display, pin)) queue_hp = true; } - if (intel_hpd_irq_storm_detect(dev_priv, pin, long_hpd)) { - dev_priv->display.hotplug.event_bits &= ~BIT(pin); + if (intel_hpd_irq_storm_detect(display, pin, long_hpd)) { + display->hotplug.event_bits &= ~BIT(pin); storm_detected = true; queue_hp = true; } @@ -696,17 +705,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * deadlock. */ if (queue_dig) - queue_work(dev_priv->display.hotplug.dp_wq, &dev_priv->display.hotplug.dig_port_work); + queue_work(display->hotplug.dp_wq, &display->hotplug.dig_port_work); if (queue_hp) - queue_delayed_detection_work(dev_priv, - &dev_priv->display.hotplug.hotplug_work, 0); + queue_delayed_detection_work(display, + &display->hotplug.hotplug_work, 0); spin_unlock(&dev_priv->irq_lock); } /** * intel_hpd_init - initializes and enables hpd support - * @dev_priv: i915 device instance + * @display: display device instance * * This function enables the hotplug support. It requires that interrupts have * already been enabled with intel_irq_init_hw(). From this point on hotplug and @@ -718,16 +727,17 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, * * Also see: intel_hpd_poll_enable() and intel_hpd_poll_disable(). */ -void intel_hpd_init(struct drm_i915_private *dev_priv) +void intel_hpd_init(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); int i; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; for_each_hpd_pin(i) { - dev_priv->display.hotplug.stats[i].count = 0; - dev_priv->display.hotplug.stats[i].state = HPD_ENABLED; + display->hotplug.stats[i].count = 0; + display->hotplug.stats[i].state = HPD_ENABLED; } /* @@ -739,19 +749,19 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) spin_unlock_irq(&dev_priv->irq_lock); } -static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915) +static void i915_hpd_poll_detect_connectors(struct intel_display *display) { struct drm_connector_list_iter conn_iter; struct intel_connector *connector; struct intel_connector *first_changed_connector = NULL; int changed = 0; - mutex_lock(&i915->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); - if (!i915->drm.mode_config.poll_enabled) + if (!display->drm->mode_config.poll_enabled) goto out; - drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (!(connector->base.polled & DRM_CONNECTOR_POLL_HPD)) continue; @@ -769,7 +779,7 @@ static void i915_hpd_poll_detect_connectors(struct drm_i915_private *i915) drm_connector_list_iter_end(&conn_iter); out: - mutex_unlock(&i915->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (!changed) return; @@ -777,25 +787,24 @@ out: if (changed == 1) drm_kms_helper_connector_hotplug_event(&first_changed_connector->base); else - drm_kms_helper_hotplug_event(&i915->drm); + drm_kms_helper_hotplug_event(display->drm); drm_connector_put(&first_changed_connector->base); } static void i915_hpd_poll_init_work(struct work_struct *work) { - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, - display.hotplug.poll_init_work); - struct intel_display *display = &dev_priv->display; + struct intel_display *display = + container_of(work, typeof(*display), hotplug.poll_init_work); + struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; intel_wakeref_t wakeref; bool enabled; - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); - enabled = READ_ONCE(dev_priv->display.hotplug.poll_enabled); + enabled = READ_ONCE(display->hotplug.poll_enabled); /* * Prevent taking a power reference from this sequence of * i915_hpd_poll_init_work() -> drm_helper_hpd_irq_event() -> @@ -805,14 +814,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work) if (!enabled) { wakeref = intel_display_power_get(display, POWER_DOMAIN_DISPLAY_CORE); - drm_WARN_ON(&dev_priv->drm, - READ_ONCE(dev_priv->display.hotplug.poll_enabled)); - cancel_work(&dev_priv->display.hotplug.poll_init_work); + drm_WARN_ON(display->drm, + READ_ONCE(display->hotplug.poll_enabled)); + cancel_work(&display->hotplug.poll_init_work); } spin_lock_irq(&dev_priv->irq_lock); - drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { enum hpd_pin pin; @@ -820,7 +829,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) if (pin == HPD_NONE) continue; - if (dev_priv->display.hotplug.stats[pin].state == HPD_DISABLED) + if (display->hotplug.stats[pin].state == HPD_DISABLED) continue; connector->base.polled = connector->polled; @@ -834,16 +843,16 @@ static void i915_hpd_poll_init_work(struct work_struct *work) spin_unlock_irq(&dev_priv->irq_lock); if (enabled) - drm_kms_helper_poll_reschedule(&dev_priv->drm); + drm_kms_helper_poll_reschedule(display->drm); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); /* * We might have missed any hotplugs that happened while we were * in the middle of disabling polling */ if (!enabled) { - i915_hpd_poll_detect_connectors(dev_priv); + i915_hpd_poll_detect_connectors(display); intel_display_power_put(display, POWER_DOMAIN_DISPLAY_CORE, @@ -853,7 +862,7 @@ static void i915_hpd_poll_init_work(struct work_struct *work) /** * intel_hpd_poll_enable - enable polling for connectors with hpd - * @dev_priv: i915 device instance + * @display: display device instance * * This function enables polling for all connectors which support HPD. * Under certain conditions HPD may not be functional. On most Intel GPUs, @@ -867,15 +876,14 @@ static void i915_hpd_poll_init_work(struct work_struct *work) * * Also see: intel_hpd_init() and intel_hpd_poll_disable(). */ -void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) +void intel_hpd_poll_enable(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); - if (!HAS_DISPLAY(dev_priv) || - !intel_display_device_enabled(display)) + if (!HAS_DISPLAY(display) || !intel_display_device_enabled(display)) return; - WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, true); + WRITE_ONCE(display->hotplug.poll_enabled, true); /* * We might already be holding dev->mode_config.mutex, so do this in a @@ -884,14 +892,14 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * this worker anyway */ spin_lock_irq(&dev_priv->irq_lock); - queue_detection_work(dev_priv, - &dev_priv->display.hotplug.poll_init_work); + queue_detection_work(display, + &display->hotplug.poll_init_work); spin_unlock_irq(&dev_priv->irq_lock); } /** * intel_hpd_poll_disable - disable polling for connectors with hpd - * @dev_priv: i915 device instance + * @display: display device instance * * This function disables polling for all connectors which support HPD. * Under certain conditions HPD may not be functional. On most Intel GPUs, @@ -908,26 +916,28 @@ void intel_hpd_poll_enable(struct drm_i915_private *dev_priv) * * Also see: intel_hpd_init() and intel_hpd_poll_enable(). */ -void intel_hpd_poll_disable(struct drm_i915_private *dev_priv) +void intel_hpd_poll_disable(struct intel_display *display) { - if (!HAS_DISPLAY(dev_priv)) + struct drm_i915_private *dev_priv = to_i915(display->drm); + + if (!HAS_DISPLAY(display)) return; - WRITE_ONCE(dev_priv->display.hotplug.poll_enabled, false); + WRITE_ONCE(display->hotplug.poll_enabled, false); spin_lock_irq(&dev_priv->irq_lock); - queue_detection_work(dev_priv, - &dev_priv->display.hotplug.poll_init_work); + queue_detection_work(display, + &display->hotplug.poll_init_work); spin_unlock_irq(&dev_priv->irq_lock); } -void intel_hpd_poll_fini(struct drm_i915_private *i915) +void intel_hpd_poll_fini(struct intel_display *display) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; /* Kill all the work that may have been queued by hpd. */ - drm_connector_list_iter_begin(&i915->drm, &conn_iter); + drm_connector_list_iter_begin(display->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { intel_connector_cancel_modeset_retry_work(connector); intel_hdcp_cancel_works(connector); @@ -935,70 +945,70 @@ void intel_hpd_poll_fini(struct drm_i915_private *i915) drm_connector_list_iter_end(&conn_iter); } -void intel_hpd_init_early(struct drm_i915_private *i915) +void intel_hpd_init_early(struct intel_display *display) { - INIT_DELAYED_WORK(&i915->display.hotplug.hotplug_work, + INIT_DELAYED_WORK(&display->hotplug.hotplug_work, i915_hotplug_work_func); - INIT_WORK(&i915->display.hotplug.dig_port_work, i915_digport_work_func); - INIT_WORK(&i915->display.hotplug.poll_init_work, i915_hpd_poll_init_work); - INIT_DELAYED_WORK(&i915->display.hotplug.reenable_work, + INIT_WORK(&display->hotplug.dig_port_work, i915_digport_work_func); + INIT_WORK(&display->hotplug.poll_init_work, i915_hpd_poll_init_work); + INIT_DELAYED_WORK(&display->hotplug.reenable_work, intel_hpd_irq_storm_reenable_work); - i915->display.hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; + display->hotplug.hpd_storm_threshold = HPD_STORM_DEFAULT_THRESHOLD; /* If we have MST support, we want to avoid doing short HPD IRQ storm * detection, as short HPD storms will occur as a natural part of * sideband messaging with MST. * On older platforms however, IRQ storms can occur with both long and * short pulses, as seen on some G4x systems. */ - i915->display.hotplug.hpd_short_storm_enabled = !HAS_DP_MST(i915); + display->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(display); } -static bool cancel_all_detection_work(struct drm_i915_private *i915) +static bool cancel_all_detection_work(struct intel_display *display) { bool was_pending = false; - if (cancel_delayed_work_sync(&i915->display.hotplug.hotplug_work)) + if (cancel_delayed_work_sync(&display->hotplug.hotplug_work)) was_pending = true; - if (cancel_work_sync(&i915->display.hotplug.poll_init_work)) + if (cancel_work_sync(&display->hotplug.poll_init_work)) was_pending = true; - if (cancel_delayed_work_sync(&i915->display.hotplug.reenable_work)) + if (cancel_delayed_work_sync(&display->hotplug.reenable_work)) was_pending = true; return was_pending; } -void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) +void intel_hpd_cancel_work(struct intel_display *display) { - struct intel_display *display = to_intel_display(&dev_priv->drm); + struct drm_i915_private *dev_priv = to_i915(display->drm); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; spin_lock_irq(&dev_priv->irq_lock); drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display)); - dev_priv->display.hotplug.long_hpd_pin_mask = 0; - dev_priv->display.hotplug.short_hpd_pin_mask = 0; - dev_priv->display.hotplug.event_bits = 0; - dev_priv->display.hotplug.retry_bits = 0; + display->hotplug.long_hpd_pin_mask = 0; + display->hotplug.short_hpd_pin_mask = 0; + display->hotplug.event_bits = 0; + display->hotplug.retry_bits = 0; spin_unlock_irq(&dev_priv->irq_lock); - cancel_work_sync(&dev_priv->display.hotplug.dig_port_work); + cancel_work_sync(&display->hotplug.dig_port_work); /* * All other work triggered by hotplug events should be canceled by * now. */ - if (cancel_all_detection_work(dev_priv)) - drm_dbg_kms(&dev_priv->drm, "Hotplug detection work still active\n"); + if (cancel_all_detection_work(display)) + drm_dbg_kms(display->drm, "Hotplug detection work still active\n"); } -static void queue_work_for_missed_irqs(struct drm_i915_private *i915) +static void queue_work_for_missed_irqs(struct intel_display *display) { - struct intel_display *display = to_intel_display(&i915->drm); + struct drm_i915_private *i915 = to_i915(display->drm); struct intel_hotplug *hotplug = &display->hotplug; bool queue_hp_work = false; u32 blocked_hpd_pin_mask; @@ -1011,7 +1021,7 @@ static void queue_work_for_missed_irqs(struct drm_i915_private *i915) queue_hp_work = true; for_each_hpd_pin(pin) { - switch (i915->display.hotplug.stats[pin].state) { + switch (display->hotplug.stats[pin].state) { case HPD_MARK_DISABLED: queue_hp_work = true; break; @@ -1019,7 +1029,7 @@ static void queue_work_for_missed_irqs(struct drm_i915_private *i915) case HPD_ENABLED: break; default: - MISSING_CASE(i915->display.hotplug.stats[pin].state); + MISSING_CASE(display->hotplug.stats[pin].state); } } @@ -1027,7 +1037,7 @@ static void queue_work_for_missed_irqs(struct drm_i915_private *i915) queue_work(hotplug->dp_wq, &hotplug->dig_port_work); if (queue_hp_work) - queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); + queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0); } static bool block_hpd_pin(struct intel_display *display, enum hpd_pin pin) @@ -1121,7 +1131,7 @@ void intel_hpd_unblock(struct intel_encoder *encoder) spin_lock_irq(&i915->irq_lock); if (unblock_hpd_pin(display, encoder->hpd_pin)) - queue_work_for_missed_irqs(i915); + queue_work_for_missed_irqs(display); spin_unlock_irq(&i915->irq_lock); } @@ -1156,30 +1166,35 @@ void intel_hpd_clear_and_unblock(struct intel_encoder *encoder) spin_unlock_irq(&i915->irq_lock); } -void intel_hpd_enable_detection_work(struct drm_i915_private *i915) +void intel_hpd_enable_detection_work(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.detection_work_enabled = true; - queue_work_for_missed_irqs(i915); + display->hotplug.detection_work_enabled = true; + queue_work_for_missed_irqs(display); spin_unlock_irq(&i915->irq_lock); } -void intel_hpd_disable_detection_work(struct drm_i915_private *i915) +void intel_hpd_disable_detection_work(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); + spin_lock_irq(&i915->irq_lock); - i915->display.hotplug.detection_work_enabled = false; + display->hotplug.detection_work_enabled = false; spin_unlock_irq(&i915->irq_lock); - cancel_all_detection_work(i915); + cancel_all_detection_work(display); } -bool intel_hpd_schedule_detection(struct drm_i915_private *i915) +bool intel_hpd_schedule_detection(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); unsigned long flags; bool ret; spin_lock_irqsave(&i915->irq_lock, flags); - ret = queue_delayed_detection_work(i915, &i915->display.hotplug.hotplug_work, 0); + ret = queue_delayed_detection_work(display, &display->hotplug.hotplug_work, 0); spin_unlock_irqrestore(&i915->irq_lock, flags); return ret; @@ -1188,14 +1203,15 @@ bool intel_hpd_schedule_detection(struct drm_i915_private *i915) static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; - struct intel_hotplug *hotplug = &dev_priv->display.hotplug; + struct intel_display *display = &dev_priv->display; + struct intel_hotplug *hotplug = &display->hotplug; /* Synchronize with everything first in case there's been an HPD * storm, but we haven't finished handling it in the kernel yet */ intel_synchronize_irq(dev_priv); - flush_work(&dev_priv->display.hotplug.dig_port_work); - flush_delayed_work(&dev_priv->display.hotplug.hotplug_work); + flush_work(&display->hotplug.dig_port_work); + flush_delayed_work(&display->hotplug.hotplug_work); seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold); seq_printf(m, "Detected: %s\n", @@ -1210,7 +1226,8 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - struct intel_hotplug *hotplug = &dev_priv->display.hotplug; + struct intel_display *display = &dev_priv->display; + struct intel_hotplug *hotplug = &display->hotplug; unsigned int new_threshold; int i; char *newline; @@ -1235,11 +1252,11 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, return -EINVAL; if (new_threshold > 0) - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Setting HPD storm detection threshold to %d\n", new_threshold); else - drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n"); + drm_dbg_kms(display->drm, "Disabling HPD storm detection\n"); spin_lock_irq(&dev_priv->irq_lock); hotplug->hpd_storm_threshold = new_threshold; @@ -1249,7 +1266,7 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->display.hotplug.reenable_work); + flush_delayed_work(&display->hotplug.reenable_work); return len; } @@ -1271,9 +1288,10 @@ static const struct file_operations i915_hpd_storm_ctl_fops = { static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = m->private; + struct intel_display *display = &dev_priv->display; seq_printf(m, "Enabled: %s\n", - str_yes_no(dev_priv->display.hotplug.hpd_short_storm_enabled)); + str_yes_no(display->hotplug.hpd_short_storm_enabled)); return 0; } @@ -1291,7 +1309,8 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, { struct seq_file *m = file->private_data; struct drm_i915_private *dev_priv = m->private; - struct intel_hotplug *hotplug = &dev_priv->display.hotplug; + struct intel_display *display = &dev_priv->display; + struct intel_hotplug *hotplug = &display->hotplug; char *newline; char tmp[16]; int i; @@ -1312,11 +1331,11 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, /* Reset to the "default" state for this system */ if (strcmp(tmp, "reset") == 0) - new_state = !HAS_DP_MST(dev_priv); + new_state = !HAS_DP_MST(display); else if (kstrtobool(tmp, &new_state) != 0) return -EINVAL; - drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n", + drm_dbg_kms(display->drm, "%sabling HPD short storm detection\n", new_state ? "En" : "Dis"); spin_lock_irq(&dev_priv->irq_lock); @@ -1327,7 +1346,7 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, spin_unlock_irq(&dev_priv->irq_lock); /* Re-enable hpd immediately if we were in an irq storm */ - flush_delayed_work(&dev_priv->display.hotplug.reenable_work); + flush_delayed_work(&display->hotplug.reenable_work); return len; } @@ -1341,14 +1360,15 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = { .write = i915_hpd_short_storm_ctl_write, }; -void intel_hpd_debugfs_register(struct drm_i915_private *i915) +void intel_hpd_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = i915->drm.primary; + struct drm_minor *minor = display->drm->primary; + struct drm_i915_private *i915 = to_i915(display->drm); debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, i915, &i915_hpd_storm_ctl_fops); debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, i915, &i915_hpd_short_storm_ctl_fops); debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, - &i915->display.hotplug.ignore_long_hpd); + &display->hotplug.ignore_long_hpd); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index f189b871904e..edc41c9d3d65 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -8,31 +8,31 @@ #include -struct drm_i915_private; +enum port; struct intel_connector; struct intel_digital_port; +struct intel_display; struct intel_encoder; -enum port; -void intel_hpd_poll_enable(struct drm_i915_private *dev_priv); -void intel_hpd_poll_disable(struct drm_i915_private *dev_priv); -void intel_hpd_poll_fini(struct drm_i915_private *i915); +void intel_hpd_poll_enable(struct intel_display *display); +void intel_hpd_poll_disable(struct intel_display *display); +void intel_hpd_poll_fini(struct intel_display *display); enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, struct intel_connector *connector); -void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, +void intel_hpd_irq_handler(struct intel_display *display, u32 pin_mask, u32 long_mask); void intel_hpd_trigger_irq(struct intel_digital_port *dig_port); -void intel_hpd_init(struct drm_i915_private *dev_priv); -void intel_hpd_init_early(struct drm_i915_private *i915); -void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); +void intel_hpd_init(struct intel_display *display); +void intel_hpd_init_early(struct intel_display *display); +void intel_hpd_cancel_work(struct intel_display *display); enum hpd_pin intel_hpd_pin_default(enum port port); void intel_hpd_block(struct intel_encoder *encoder); void intel_hpd_unblock(struct intel_encoder *encoder); void intel_hpd_clear_and_unblock(struct intel_encoder *encoder); -void intel_hpd_debugfs_register(struct drm_i915_private *i915); +void intel_hpd_debugfs_register(struct intel_display *display); -void intel_hpd_enable_detection_work(struct drm_i915_private *i915); -void intel_hpd_disable_detection_work(struct drm_i915_private *i915); -bool intel_hpd_schedule_detection(struct drm_i915_private *i915); +void intel_hpd_enable_detection_work(struct intel_display *display); +void intel_hpd_disable_detection_work(struct intel_display *display); +bool intel_hpd_schedule_detection(struct intel_display *display); #endif /* __INTEL_HOTPLUG_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 2137ac7b882a..e6320838df59 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -472,7 +472,7 @@ void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) dev_priv->display.hotplug.hpd, i9xx_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } if ((IS_G4X(dev_priv) || @@ -483,6 +483,7 @@ void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) { + struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; /* @@ -509,7 +510,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) dev_priv->display.hotplug.pch_hpd, pch_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) @@ -543,7 +544,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n", hotplug_trigger, pin_mask, long_mask); - intel_hpd_irq_handler(i915, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } if (trigger_aux) @@ -587,7 +588,7 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) } if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_ICP) intel_gmbus_irq_handler(display); @@ -624,7 +625,7 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) } if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); if (pch_iir & SDE_GMBUS_CPT) intel_gmbus_irq_handler(display); @@ -632,6 +633,7 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) { + struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); @@ -641,11 +643,12 @@ void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) dev_priv->display.hotplug.hpd, ilk_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) { + struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); @@ -655,11 +658,12 @@ void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) dev_priv->display.hotplug.hpd, bxt_port_hotplug_long_detect); - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); } void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) { + struct intel_display *display = &dev_priv->display; u32 pin_mask = 0, long_mask = 0; u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; @@ -687,7 +691,7 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); + intel_hpd_irq_handler(display, pin_mask, long_mask); else drm_err(&dev_priv->drm, "Unexpected DE HPD interrupt 0x%08x\n", iir); @@ -1467,9 +1471,11 @@ void intel_hpd_irq_setup(struct drm_i915_private *i915) void intel_hotplug_irq_init(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; + intel_hpd_init_pins(i915); - intel_hpd_init_early(i915); + intel_hpd_init_early(display); if (HAS_GMCH(i915)) { if (I915_HAS_HOTPLUG(i915)) diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index cba56cf73b96..fcb5a6adf570 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -991,7 +991,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_dp_mst_suspend(display); intel_irq_suspend(i915); - intel_hpd_cancel_work(i915); + intel_hpd_cancel_work(display); if (HAS_DISPLAY(i915)) intel_display_driver_suspend_access(display); @@ -1074,7 +1074,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_display_driver_suspend(display); intel_irq_suspend(dev_priv); - intel_hpd_cancel_work(dev_priv); + intel_hpd_cancel_work(display); if (HAS_DISPLAY(dev_priv)) intel_display_driver_suspend_access(display); @@ -1237,7 +1237,7 @@ static int i915_drm_resume(struct drm_device *dev) if (HAS_DISPLAY(dev_priv)) intel_display_driver_resume_access(display); - intel_hpd_init(dev_priv); + intel_hpd_init(display); intel_display_driver_resume(display); @@ -1245,7 +1245,7 @@ static int i915_drm_resume(struct drm_device *dev) intel_display_driver_enable_user_access(display); drm_kms_helper_poll_enable(dev); } - intel_hpd_poll_disable(dev_priv); + intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -1585,7 +1585,7 @@ static int intel_runtime_suspend(struct device *kdev) assert_forcewakes_inactive(&dev_priv->uncore); if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) - intel_hpd_poll_enable(dev_priv); + intel_hpd_poll_enable(display); drm_dbg(&dev_priv->drm, "Device suspended\n"); return 0; @@ -1643,8 +1643,8 @@ static int intel_runtime_resume(struct device *kdev) * everyone else do it here. */ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { - intel_hpd_init(dev_priv); - intel_hpd_poll_disable(dev_priv); + intel_hpd_init(display); + intel_hpd_poll_disable(display); } skl_watermark_ipc_update(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 37ca4a35daf2..de53615571be 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1280,6 +1280,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv) */ void intel_irq_uninstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; int irq = to_pci_dev(dev_priv->drm.dev)->irq; if (drm_WARN_ON(&dev_priv->drm, !dev_priv->irqs_enabled)) @@ -1289,7 +1290,7 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) free_irq(irq, dev_priv); - intel_hpd_cancel_work(dev_priv); + intel_hpd_cancel_work(display); dev_priv->irqs_enabled = false; } diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 3681aeccea3c..7dc24bd7f9d0 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -173,7 +173,7 @@ static void xe_display_fini(void *arg) struct xe_device *xe = arg; struct intel_display *display = &xe->display; - intel_hpd_poll_fini(xe); + intel_hpd_poll_fini(display); intel_hdcp_component_fini(display); intel_audio_deinit(display); } @@ -314,7 +314,7 @@ static void xe_display_enable_d3cold(struct xe_device *xe) intel_dmc_suspend(display); if (has_display(xe)) - intel_hpd_poll_enable(xe); + intel_hpd_poll_enable(display); } static void xe_display_disable_d3cold(struct xe_device *xe) @@ -331,10 +331,10 @@ static void xe_display_disable_d3cold(struct xe_device *xe) intel_display_driver_init_hw(display); - intel_hpd_init(xe); + intel_hpd_init(display); if (has_display(xe)) - intel_hpd_poll_disable(xe); + intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -364,7 +364,7 @@ void xe_display_pm_suspend(struct xe_device *xe) xe_display_flush_cleanup_work(xe); - intel_hpd_cancel_work(xe); + intel_hpd_cancel_work(display); if (has_display(xe)) { intel_display_driver_suspend_access(display); @@ -394,7 +394,7 @@ void xe_display_pm_shutdown(struct xe_device *xe) xe_display_flush_cleanup_work(xe); intel_dp_mst_suspend(display); - intel_hpd_cancel_work(xe); + intel_hpd_cancel_work(display); if (has_display(xe)) intel_display_driver_suspend_access(display); @@ -409,6 +409,8 @@ void xe_display_pm_shutdown(struct xe_device *xe) void xe_display_pm_runtime_suspend(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; @@ -417,7 +419,7 @@ void xe_display_pm_runtime_suspend(struct xe_device *xe) return; } - intel_hpd_poll_enable(xe); + intel_hpd_poll_enable(display); } void xe_display_pm_suspend_late(struct xe_device *xe) @@ -491,7 +493,7 @@ void xe_display_pm_resume(struct xe_device *xe) if (has_display(xe)) intel_display_driver_resume_access(display); - intel_hpd_init(xe); + intel_hpd_init(display); if (has_display(xe)) { intel_display_driver_resume(display); @@ -500,7 +502,7 @@ void xe_display_pm_resume(struct xe_device *xe) } if (has_display(xe)) - intel_hpd_poll_disable(xe); + intel_hpd_poll_disable(display); intel_opregion_resume(display); @@ -511,6 +513,8 @@ void xe_display_pm_resume(struct xe_device *xe) void xe_display_pm_runtime_resume(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; @@ -519,8 +523,8 @@ void xe_display_pm_runtime_resume(struct xe_device *xe) return; } - intel_hpd_init(xe); - intel_hpd_poll_disable(xe); + intel_hpd_init(display); + intel_hpd_poll_disable(display); skl_watermark_ipc_update(xe); } From 8e0f3bb5085d000f232672c1c62988ce598d1eda Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:46:00 +0200 Subject: [PATCH 0164/1627] drm/i915/hotplug: convert hotplug debugfs to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pass struct intel_display as the cookie to debugfs functions. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/b1cbf64d366ca97005f9b139e85d8a32b460623a.1742481923.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_hotplug.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 9bde28ce1979..7683b3ce124d 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -1202,8 +1202,8 @@ bool intel_hpd_schedule_detection(struct intel_display *display) static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; - struct intel_display *display = &dev_priv->display; + struct intel_display *display = m->private; + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_hotplug *hotplug = &display->hotplug; /* Synchronize with everything first in case there's been an HPD @@ -1225,8 +1225,8 @@ static ssize_t i915_hpd_storm_ctl_write(struct file *file, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - struct intel_display *display = &dev_priv->display; + struct intel_display *display = m->private; + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_hotplug *hotplug = &display->hotplug; unsigned int new_threshold; int i; @@ -1287,8 +1287,7 @@ static const struct file_operations i915_hpd_storm_ctl_fops = { static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data) { - struct drm_i915_private *dev_priv = m->private; - struct intel_display *display = &dev_priv->display; + struct intel_display *display = m->private; seq_printf(m, "Enabled: %s\n", str_yes_no(display->hotplug.hpd_short_storm_enabled)); @@ -1308,8 +1307,8 @@ static ssize_t i915_hpd_short_storm_ctl_write(struct file *file, size_t len, loff_t *offp) { struct seq_file *m = file->private_data; - struct drm_i915_private *dev_priv = m->private; - struct intel_display *display = &dev_priv->display; + struct intel_display *display = m->private; + struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_hotplug *hotplug = &display->hotplug; char *newline; char tmp[16]; @@ -1363,12 +1362,11 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = { void intel_hpd_debugfs_register(struct intel_display *display) { struct drm_minor *minor = display->drm->primary; - struct drm_i915_private *i915 = to_i915(display->drm); debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, - i915, &i915_hpd_storm_ctl_fops); + display, &i915_hpd_storm_ctl_fops); debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, - i915, &i915_hpd_short_storm_ctl_fops); + display, &i915_hpd_short_storm_ctl_fops); debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, &display->hotplug.ignore_long_hpd); } From 79e23d576c7b970c5addb4a4ab0a0b7c25cb57ff Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:46:01 +0200 Subject: [PATCH 0165/1627] drm/i915/hotplug: convert hotplug irq handling to intel_de_*() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All the registers handled here are display registers. Switch from intel_uncore_*() to intel_de_*() functions. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/cd1149b3ebcb7a9f73830b99957f09e468cd5fd9.1742481923.git.jani.nikula@intel.com --- .../gpu/drm/i915/display/intel_hotplug_irq.c | 205 ++++++++++-------- 1 file changed, 114 insertions(+), 91 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index e6320838df59..f24c65478742 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -183,11 +183,12 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, u32 mask, u32 bits) { + struct intel_display *display = &dev_priv->display; + lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, bits & ~mask); - intel_uncore_rmw(&dev_priv->uncore, PORT_HOTPLUG_EN(dev_priv), mask, - bits); + intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits); } /** @@ -415,6 +416,7 @@ static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; u32 hotplug_status = 0, hotplug_status_mask; int i; @@ -435,21 +437,20 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) * bits can itself generate a new hotplug interrupt :( */ for (i = 0; i < 10; i++) { - u32 tmp = intel_uncore_read(&dev_priv->uncore, - PORT_HOTPLUG_STAT(dev_priv)) & hotplug_status_mask; + u32 tmp = intel_de_read(display, + PORT_HOTPLUG_STAT(display)) & hotplug_status_mask; if (tmp == 0) return hotplug_status; hotplug_status |= tmp; - intel_uncore_write(&dev_priv->uncore, - PORT_HOTPLUG_STAT(dev_priv), - hotplug_status); + intel_de_write(display, PORT_HOTPLUG_STAT(display), + hotplug_status); } drm_WARN_ONCE(&dev_priv->drm, 1, "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", - intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT(dev_priv))); + intel_de_read(display, PORT_HOTPLUG_STAT(display))); return hotplug_status; } @@ -492,7 +493,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) * zero. Not acking leads to "The master control interrupt lied (SDE)!" * errors. */ - dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); + dig_hotplug_reg = intel_de_read(display, PCH_PORT_HOTPLUG); if (!hotplug_trigger) { u32 mask = PORTA_HOTPLUG_STATUS_MASK | PORTD_HOTPLUG_STATUS_MASK | @@ -501,7 +502,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) dig_hotplug_reg &= ~mask; } - intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); + intel_de_write(display, PCH_PORT_HOTPLUG, dig_hotplug_reg); if (!hotplug_trigger) return; @@ -567,7 +568,7 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) /* Locking due to DSI native GPIO sequences */ spin_lock(&dev_priv->irq_lock); - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0); spin_unlock(&dev_priv->irq_lock); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, @@ -579,7 +580,7 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (tc_hotplug_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, @@ -605,7 +606,7 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (hotplug_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, @@ -616,7 +617,7 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (hotplug2_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug2_trigger, dig_hotplug_reg, @@ -636,7 +637,7 @@ void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, @@ -651,7 +652,7 @@ void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, @@ -671,7 +672,7 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) if (trigger_tc) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, @@ -682,7 +683,7 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) if (trigger_tbt) { u32 dig_hotplug_reg; - dig_hotplug_reg = intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, 0, 0); + dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, @@ -741,23 +742,25 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder) static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + /* * Enable digital hotplug on the PCH, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec). * The pulse duration bits are reserved on LPT+. */ - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); } static void ibx_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, - ibx_hotplug_mask(encoder->hpd_pin), - ibx_hotplug_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + ibx_hotplug_mask(encoder->hpd_pin), + ibx_hotplug_enables(encoder)); } static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -812,34 +815,38 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) { - intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_DDI, - intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); + struct intel_display *display = &dev_priv->display; + + intel_de_rmw(display, SHOTPLUG_CTL_DDI, + intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); } static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_DDI, - icp_ddi_hotplug_mask(encoder->hpd_pin), - icp_ddi_hotplug_enables(encoder)); + intel_de_rmw(display, SHOTPLUG_CTL_DDI, + icp_ddi_hotplug_mask(encoder->hpd_pin), + icp_ddi_hotplug_enables(encoder)); } static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) { - intel_uncore_rmw(&dev_priv->uncore, SHOTPLUG_CTL_TC, - intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); + struct intel_display *display = &dev_priv->display; + + intel_de_rmw(display, SHOTPLUG_CTL_TC, + intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); } static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, SHOTPLUG_CTL_TC, - icp_tc_hotplug_mask(encoder->hpd_pin), - icp_tc_hotplug_enables(encoder)); + intel_de_rmw(display, SHOTPLUG_CTL_TC, + icp_tc_hotplug_mask(encoder->hpd_pin), + icp_tc_hotplug_enables(encoder)); } static void icp_hpd_enable_detection(struct intel_encoder *encoder) @@ -850,6 +857,7 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder) static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); @@ -859,7 +867,7 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) * We reduce the value to 250us to be able to detect SHPD when an external display * is connected. This is also expected of us as stated in DP1.4a Table 3-4. */ - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); + intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -889,11 +897,12 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder) static void dg1_hpd_invert(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; u32 val = (INVERT_DDIA_HPD | INVERT_DDIB_HPD | INVERT_DDIC_HPD | INVERT_DDID_HPD); - intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, 0, val); + intel_de_rmw(display, SOUTH_CHICKEN1, 0, val); } static void dg1_hpd_enable_detection(struct intel_encoder *encoder) @@ -912,34 +921,38 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) { - intel_uncore_rmw(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, - intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); + struct intel_display *display = &dev_priv->display; + + intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, + intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); } static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, GEN11_TC_HOTPLUG_CTL, - gen11_hotplug_mask(encoder->hpd_pin), - gen11_hotplug_enables(encoder)); + intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, + gen11_hotplug_mask(encoder->hpd_pin), + gen11_hotplug_enables(encoder)); } static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) { - intel_uncore_rmw(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, - intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); + struct intel_display *display = &dev_priv->display; + + intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, + intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); } static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, GEN11_TBT_HOTPLUG_CTL, - gen11_hotplug_mask(encoder->hpd_pin), - gen11_hotplug_enables(encoder)); + intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, + gen11_hotplug_mask(encoder->hpd_pin), + gen11_hotplug_enables(encoder)); } static void gen11_hpd_enable_detection(struct intel_encoder *encoder) @@ -955,14 +968,15 @@ static void gen11_hpd_enable_detection(struct intel_encoder *encoder) static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); - intel_uncore_rmw(&dev_priv->uncore, GEN11_DE_HPD_IMR, hotplug_irqs, - ~enabled_irqs & hotplug_irqs); - intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); + intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs, + ~enabled_irqs & hotplug_irqs); + intel_de_posting_read(display, GEN11_DE_HPD_IMR); gen11_tc_hpd_detection_setup(dev_priv); gen11_tbt_hpd_detection_setup(dev_priv); @@ -1141,6 +1155,7 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder) static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915) { + struct intel_display *display = &i915->display; u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd); @@ -1148,7 +1163,7 @@ static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915) intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs, ~enabled_irqs & hotplug_irqs); - intel_uncore_posting_read(&i915->uncore, PICAINTERRUPT_IMR); + intel_de_posting_read(display, PICAINTERRUPT_IMR); xelpdp_pica_hpd_detection_setup(i915); @@ -1196,48 +1211,52 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder) static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(dev_priv)) { - intel_uncore_rmw(&dev_priv->uncore, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, - CHASSIS_CLK_REQ_DURATION(0xf)); + intel_de_rmw(display, SOUTH_CHICKEN1, CHASSIS_CLK_REQ_DURATION_MASK, + CHASSIS_CLK_REQ_DURATION(0xf)); } /* Enable digital hotplug on the PCH */ - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG2, - intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask), - intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); + intel_de_rmw(display, PCH_PORT_HOTPLUG2, + intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask), + intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); } static void spt_hpd_enable_detection(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *i915 = to_i915(encoder->base.dev); /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(i915)) { - intel_uncore_rmw(&i915->uncore, SOUTH_CHICKEN1, - CHASSIS_CLK_REQ_DURATION_MASK, - CHASSIS_CLK_REQ_DURATION(0xf)); + intel_de_rmw(display, SOUTH_CHICKEN1, + CHASSIS_CLK_REQ_DURATION_MASK, + CHASSIS_CLK_REQ_DURATION(0xf)); } - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, - spt_hotplug_mask(encoder->hpd_pin), - spt_hotplug_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + spt_hotplug_mask(encoder->hpd_pin), + spt_hotplug_enables(encoder)); - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG2, - spt_hotplug2_mask(encoder->hpd_pin), - spt_hotplug2_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG2, + spt_hotplug2_mask(encoder->hpd_pin), + spt_hotplug2_enables(encoder)); } static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; u32 hotplug_irqs, enabled_irqs; if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) - intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); @@ -1271,23 +1290,25 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder) static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + /* * Enable digital hotplug on the CPU, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec) * The pulse duration bits are reserved on HSW+. */ - intel_uncore_rmw(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, - intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); + intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, + intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); } static void ilk_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, - ilk_hotplug_mask(encoder->hpd_pin), - ilk_hotplug_enables(encoder)); + intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, + ilk_hotplug_mask(encoder->hpd_pin), + ilk_hotplug_enables(encoder)); ibx_hpd_enable_detection(encoder); } @@ -1350,18 +1371,20 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder) static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) { - intel_uncore_rmw(&dev_priv->uncore, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); + struct intel_display *display = &dev_priv->display; + + intel_de_rmw(display, PCH_PORT_HOTPLUG, + intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask), + intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); } static void bxt_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_uncore_rmw(&i915->uncore, PCH_PORT_HOTPLUG, - bxt_hotplug_mask(encoder->hpd_pin), - bxt_hotplug_enables(encoder)); + intel_de_rmw(display, PCH_PORT_HOTPLUG, + bxt_hotplug_mask(encoder->hpd_pin), + bxt_hotplug_enables(encoder)); } static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) From 1e40b20ed47646eea52d8521b8b2ff0bdc716106 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:46:02 +0200 Subject: [PATCH 0166/1627] drm/i915/hotplug: convert intel_hotplug_irq.[ch] to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert as much as possible of intel_hotplug_irq.[ch] to struct intel_display. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/8ddf27ea31b543f88c5f124f029c2eaa06a9aae7.1742481923.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_crt.c | 4 +- .../gpu/drm/i915/display/intel_display_irq.c | 33 +- drivers/gpu/drm/i915/display/intel_hotplug.c | 6 +- .../gpu/drm/i915/display/intel_hotplug_irq.c | 474 +++++++++--------- .../gpu/drm/i915/display/intel_hotplug_irq.h | 28 +- drivers/gpu/drm/i915/i915_irq.c | 18 +- 6 files changed, 276 insertions(+), 287 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index bca91d49cb96..a7f360f89410 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -606,7 +606,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) for (i = 0; i < tries ; i++) { /* turn on the FORCE_DETECT */ - i915_hotplug_interrupt_update(dev_priv, + i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, CRT_HOTPLUG_FORCE_DETECT); /* wait for FORCE_DETECT to go off */ @@ -624,7 +624,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) intel_de_write(display, PORT_HOTPLUG_STAT(display), CRT_HOTPLUG_INT_STATUS); - i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0); + i915_hotplug_interrupt_update(display, CRT_HOTPLUG_FORCE_DETECT, 0); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index aa23bb817805..b37bcb8fb2e8 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -675,7 +675,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; - ibx_hpd_irq_handler(dev_priv, hotplug_trigger); + ibx_hpd_irq_handler(display, hotplug_trigger); if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> @@ -812,7 +812,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; - ibx_hpd_irq_handler(dev_priv, hotplug_trigger); + ibx_hpd_irq_handler(display, hotplug_trigger); if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> @@ -901,7 +901,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; if (hotplug_trigger) - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + ilk_hpd_irq_handler(display, hotplug_trigger); if (de_iir & DE_AUX_CHANNEL_A) intel_dp_aux_irq_handler(display); @@ -953,7 +953,7 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; if (hotplug_trigger) - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + ilk_hpd_irq_handler(display, hotplug_trigger); if (de_iir & DE_ERR_INT_IVB) ivb_err_int_handler(dev_priv); @@ -1382,7 +1382,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) iir = intel_de_read(display, GEN11_DE_HPD_IIR); if (iir) { intel_de_write(display, GEN11_DE_HPD_IIR, iir); - gen11_hpd_irq_handler(dev_priv, iir); + gen11_hpd_irq_handler(display, iir); } else { drm_err_ratelimited(&dev_priv->drm, "The master control interrupt lied, (DE HPD)!\n"); @@ -1405,14 +1405,14 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { - bxt_hpd_irq_handler(dev_priv, hotplug_trigger); + bxt_hpd_irq_handler(display, hotplug_trigger); found = true; } } else if (IS_BROADWELL(dev_priv)) { u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { - ilk_hpd_irq_handler(dev_priv, hotplug_trigger); + ilk_hpd_irq_handler(display, hotplug_trigger); found = true; } } @@ -1498,12 +1498,12 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); if (iir) { if (pica_iir) - xelpdp_pica_irq_handler(dev_priv, pica_iir); + xelpdp_pica_irq_handler(display, pica_iir); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_handler(dev_priv, iir); + icp_irq_handler(display, iir); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - spt_irq_handler(dev_priv, iir); + spt_irq_handler(display, iir); else cpt_irq_handler(dev_priv, iir); } else { @@ -1904,7 +1904,7 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) gen2_error_reset(to_intel_uncore(display->drm), VLV_ERROR_REGS); - i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); + i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0); intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0); i9xx_pipestat_irq_reset(dev_priv); @@ -1924,7 +1924,7 @@ void i9xx_display_irq_reset(struct drm_i915_private *i915) struct intel_display *display = &i915->display; if (I915_HAS_HOTPLUG(i915)) { - i915_hotplug_interrupt_update(i915, 0xffffffff, 0); + i915_hotplug_interrupt_update(display, 0xffffffff, 0); intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0); } @@ -2348,10 +2348,11 @@ void dg1_de_irq_postinstall(struct drm_i915_private *i915) void intel_display_irq_init(struct drm_i915_private *i915) { - i915->drm.vblank_disable_immediate = true; + struct intel_display *display = &i915->display; - intel_hotplug_irq_init(i915); + display->drm->vblank_disable_immediate = true; - INIT_WORK(&i915->display.irq.vblank_dc_work, - intel_display_vblank_dc_work); + intel_hotplug_irq_init(display); + + INIT_WORK(&display->irq.vblank_dc_work, intel_display_vblank_dc_work); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 7683b3ce124d..fcc3f546cb97 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -305,7 +305,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) display->hotplug.stats[pin].state = HPD_ENABLED; } - intel_hpd_irq_setup(dev_priv); + intel_hpd_irq_setup(display); spin_unlock_irq(&dev_priv->irq_lock); @@ -696,7 +696,7 @@ void intel_hpd_irq_handler(struct intel_display *display, * happens later in our hotplug work. */ if (storm_detected) - intel_hpd_irq_setup(dev_priv); + intel_hpd_irq_setup(display); /* * Our hotplug handler can grab modeset locks (by calling down into the @@ -745,7 +745,7 @@ void intel_hpd_init(struct intel_display *display) * just to make the assert_spin_locked checks happy. */ spin_lock_irq(&dev_priv->irq_lock); - intel_hpd_irq_setup(dev_priv); + intel_hpd_irq_setup(display); spin_unlock_irq(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index f24c65478742..e44ae6acc55f 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -131,30 +131,31 @@ static const u32 hpd_mtp[HPD_NUM_PINS] = { [HPD_PORT_TC4] = SDE_TC_HOTPLUG_ICP(HPD_PORT_TC4), }; -static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) +static void intel_hpd_init_pins(struct intel_display *display) { - struct intel_hotplug *hpd = &dev_priv->display.hotplug; + struct drm_i915_private *dev_priv = to_i915(display->drm); + struct intel_hotplug *hpd = &display->hotplug; - if (HAS_GMCH(dev_priv)) { - if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || - IS_CHERRYVIEW(dev_priv)) + if (HAS_GMCH(display)) { + if (display->platform.g4x || display->platform.valleyview || + display->platform.cherryview) hpd->hpd = hpd_status_g4x; else hpd->hpd = hpd_status_i915; return; } - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) hpd->hpd = hpd_xelpdp; - else if (DISPLAY_VER(dev_priv) >= 11) + else if (DISPLAY_VER(display) >= 11) hpd->hpd = hpd_gen11; - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (display->platform.geminilake || display->platform.broxton) hpd->hpd = hpd_bxt; - else if (DISPLAY_VER(dev_priv) == 9) + else if (DISPLAY_VER(display) == 9) hpd->hpd = NULL; /* no north HPD on SKL */ - else if (DISPLAY_VER(dev_priv) >= 8) + else if (DISPLAY_VER(display) >= 8) hpd->hpd = hpd_bdw; - else if (DISPLAY_VER(dev_priv) >= 7) + else if (DISPLAY_VER(display) >= 7) hpd->hpd = hpd_ivb; else hpd->hpd = hpd_ilk; @@ -180,20 +181,20 @@ static void intel_hpd_init_pins(struct drm_i915_private *dev_priv) } /* For display hotplug interrupt */ -void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, +void i915_hotplug_interrupt_update_locked(struct intel_display *display, u32 mask, u32 bits) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, bits & ~mask); + drm_WARN_ON(display->drm, bits & ~mask); intel_de_rmw(display, PORT_HOTPLUG_EN(display), mask, bits); } /** * i915_hotplug_interrupt_update - update hotplug interrupt enable - * @dev_priv: driver private + * @display: display device * @mask: bits to update * @bits: bits to enable * NOTE: the HPD enable bits are modified both inside and outside @@ -203,12 +204,14 @@ void i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, * held already, this function acquires the lock itself. A non-locking * version is also available. */ -void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv, +void i915_hotplug_interrupt_update(struct intel_display *display, u32 mask, u32 bits) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + spin_lock_irq(&dev_priv->irq_lock); - i915_hotplug_interrupt_update_locked(dev_priv, mask, bits); + i915_hotplug_interrupt_update_locked(display, mask, bits); spin_unlock_irq(&dev_priv->irq_lock); } @@ -340,7 +343,7 @@ static bool i9xx_port_hotplug_long_detect(enum hpd_pin pin, u32 val) * * Note that the caller is expected to zero out the masks initially. */ -static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, +static void intel_get_hpd_pins(struct intel_display *display, u32 *pin_mask, u32 *long_mask, u32 hotplug_trigger, u32 dig_hotplug_reg, const u32 hpd[HPD_NUM_PINS], @@ -360,37 +363,37 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv, *long_mask |= BIT(pin); } - drm_dbg(&dev_priv->drm, - "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", - hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); + drm_dbg_kms(display->drm, + "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask); } -static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv, +static u32 intel_hpd_enabled_irqs(struct intel_display *display, const u32 hpd[HPD_NUM_PINS]) { struct intel_encoder *encoder; u32 enabled_irqs = 0; - for_each_intel_encoder(&dev_priv->drm, encoder) - if (dev_priv->display.hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) + for_each_intel_encoder(display->drm, encoder) + if (display->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) enabled_irqs |= hpd[encoder->hpd_pin]; return enabled_irqs; } -static u32 intel_hpd_hotplug_irqs(struct drm_i915_private *dev_priv, +static u32 intel_hpd_hotplug_irqs(struct intel_display *display, const u32 hpd[HPD_NUM_PINS]) { struct intel_encoder *encoder; u32 hotplug_irqs = 0; - for_each_intel_encoder(&dev_priv->drm, encoder) + for_each_intel_encoder(display->drm, encoder) hotplug_irqs |= hpd[encoder->hpd_pin]; return hotplug_irqs; } -static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915, +static u32 intel_hpd_hotplug_mask(struct intel_display *display, hotplug_mask_func hotplug_mask) { enum hpd_pin pin; @@ -402,26 +405,25 @@ static u32 intel_hpd_hotplug_mask(struct drm_i915_private *i915, return hotplug; } -static u32 intel_hpd_hotplug_enables(struct drm_i915_private *i915, +static u32 intel_hpd_hotplug_enables(struct intel_display *display, hotplug_enables_func hotplug_enables) { struct intel_encoder *encoder; u32 hotplug = 0; - for_each_intel_encoder(&i915->drm, encoder) + for_each_intel_encoder(display->drm, encoder) hotplug |= hotplug_enables(encoder); return hotplug; } -u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) +u32 i9xx_hpd_irq_ack(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; u32 hotplug_status = 0, hotplug_status_mask; int i; - if (IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (display->platform.g4x || + display->platform.valleyview || display->platform.cherryview) hotplug_status_mask = HOTPLUG_INT_STATUS_G4X | DP_AUX_CHANNEL_MASK_INT_STATUS_G4X; else @@ -448,43 +450,41 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) hotplug_status); } - drm_WARN_ONCE(&dev_priv->drm, 1, + drm_WARN_ONCE(display->drm, 1, "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", intel_de_read(display, PORT_HOTPLUG_STAT(display))); return hotplug_status; } -void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status) +void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status) { - struct intel_display *display = &dev_priv->display; u32 pin_mask = 0, long_mask = 0; u32 hotplug_trigger; - if (IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + if (display->platform.g4x || + display->platform.valleyview || display->platform.cherryview) hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; else hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; if (hotplug_trigger) { - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, hotplug_trigger, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, i9xx_port_hotplug_long_detect); intel_hpd_irq_handler(display, pin_mask, long_mask); } - if ((IS_G4X(dev_priv) || - IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + if ((display->platform.g4x || + display->platform.valleyview || display->platform.cherryview) && hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) intel_dp_aux_irq_handler(display); } -void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger) { - struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; /* @@ -506,44 +506,43 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) if (!hotplug_trigger) return; - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, pch_port_hotplug_long_detect); intel_hpd_irq_handler(display, pin_mask, long_mask); } -void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) +void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir) { - struct intel_display *display = &i915->display; enum hpd_pin pin; u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK); u32 trigger_aux = iir & XELPDP_AUX_TC_MASK; u32 pin_mask = 0, long_mask = 0; - if (DISPLAY_VER(i915) >= 20) + if (DISPLAY_VER(display) >= 20) trigger_aux |= iir & XE2LPD_AUX_DDI_MASK; for (pin = HPD_PORT_TC1; pin <= HPD_PORT_TC4; pin++) { u32 val; - if (!(i915->display.hotplug.hpd[pin] & hotplug_trigger)) + if (!(display->hotplug.hpd[pin] & hotplug_trigger)) continue; pin_mask |= BIT(pin); - val = intel_de_read(i915, XELPDP_PORT_HOTPLUG_CTL(pin)); - intel_de_write(i915, XELPDP_PORT_HOTPLUG_CTL(pin), val); + val = intel_de_read(display, XELPDP_PORT_HOTPLUG_CTL(pin)); + intel_de_write(display, XELPDP_PORT_HOTPLUG_CTL(pin), val); if (val & (XELPDP_DP_ALT_HPD_LONG_DETECT | XELPDP_TBT_HPD_LONG_DETECT)) long_mask |= BIT(pin); } if (pin_mask) { - drm_dbg(&i915->drm, - "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n", - hotplug_trigger, pin_mask, long_mask); + drm_dbg_kms(display->drm, + "pica hotplug event received, stat 0x%08x, pins 0x%08x, long 0x%08x\n", + hotplug_trigger, pin_mask, long_mask); intel_hpd_irq_handler(display, pin_mask, long_mask); } @@ -552,13 +551,13 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir) intel_dp_aux_irq_handler(display); if (!pin_mask && !trigger_aux) - drm_err(&i915->drm, + drm_err(display->drm, "Unexpected DE HPD/AUX interrupt 0x%08x\n", iir); } -void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +void icp_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_HOTPLUG_MASK_ICP; u32 tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_MASK_ICP; u32 pin_mask = 0, long_mask = 0; @@ -571,9 +570,9 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_DDI, 0, 0); spin_unlock(&dev_priv->irq_lock); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, icp_ddi_port_hotplug_long_detect); } @@ -582,9 +581,9 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) dig_hotplug_reg = intel_de_rmw(display, SHOTPLUG_CTL_TC, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, icp_tc_port_hotplug_long_detect); } @@ -595,9 +594,8 @@ void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_gmbus_irq_handler(display); } -void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +void spt_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & ~SDE_PORTE_HOTPLUG_SPT; u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; @@ -608,9 +606,9 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, spt_port_hotplug_long_detect); } @@ -619,9 +617,9 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG2, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug2_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.pch_hpd, + display->hotplug.pch_hpd, spt_port_hotplug2_long_detect); } @@ -632,39 +630,36 @@ void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_gmbus_irq_handler(display); } -void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger) { - struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, ilk_port_hotplug_long_detect); intel_hpd_irq_handler(display, pin_mask, long_mask); } -void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger) +void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger) { - struct intel_display *display = &dev_priv->display; u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; dig_hotplug_reg = intel_de_rmw(display, PCH_PORT_HOTPLUG, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, bxt_port_hotplug_long_detect); intel_hpd_irq_handler(display, pin_mask, long_mask); } -void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) +void gen11_hpd_irq_handler(struct intel_display *display, u32 iir) { - struct intel_display *display = &dev_priv->display; u32 pin_mask = 0, long_mask = 0; u32 trigger_tc = iir & GEN11_DE_TC_HOTPLUG_MASK; u32 trigger_tbt = iir & GEN11_DE_TBT_HOTPLUG_MASK; @@ -674,9 +669,9 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) dig_hotplug_reg = intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, gen11_port_hotplug_long_detect); } @@ -685,16 +680,16 @@ void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) dig_hotplug_reg = intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, 0, 0); - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, + intel_get_hpd_pins(display, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, - dev_priv->display.hotplug.hpd, + display->hotplug.hpd, gen11_port_hotplug_long_detect); } if (pin_mask) intel_hpd_irq_handler(display, pin_mask, long_mask); else - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unexpected DE HPD interrupt 0x%08x\n", iir); } @@ -740,18 +735,16 @@ static u32 ibx_hotplug_enables(struct intel_encoder *encoder) } } -static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void ibx_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - /* * Enable digital hotplug on the PCH, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec). * The pulse duration bits are reserved on LPT+. */ intel_de_rmw(display, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, ibx_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables)); + intel_hpd_hotplug_mask(display, ibx_hotplug_mask), + intel_hpd_hotplug_enables(display, ibx_hotplug_enables)); } static void ibx_hpd_enable_detection(struct intel_encoder *encoder) @@ -763,16 +756,17 @@ static void ibx_hpd_enable_detection(struct intel_encoder *encoder) ibx_hotplug_enables(encoder)); } -static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void ibx_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - ibx_hpd_detection_setup(dev_priv); + ibx_hpd_detection_setup(display); } static u32 icp_ddi_hotplug_mask(enum hpd_pin hpd_pin) @@ -813,13 +807,11 @@ static u32 icp_tc_hotplug_enables(struct intel_encoder *encoder) return icp_tc_hotplug_mask(encoder->hpd_pin); } -static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void icp_ddi_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - intel_de_rmw(display, SHOTPLUG_CTL_DDI, - intel_hpd_hotplug_mask(dev_priv, icp_ddi_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables)); + intel_hpd_hotplug_mask(display, icp_ddi_hotplug_mask), + intel_hpd_hotplug_enables(display, icp_ddi_hotplug_enables)); } static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder) @@ -831,13 +823,11 @@ static void icp_ddi_hpd_enable_detection(struct intel_encoder *encoder) icp_ddi_hotplug_enables(encoder)); } -static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void icp_tc_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - intel_de_rmw(display, SHOTPLUG_CTL_TC, - intel_hpd_hotplug_mask(dev_priv, icp_tc_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables)); + intel_hpd_hotplug_mask(display, icp_tc_hotplug_mask), + intel_hpd_hotplug_enables(display, icp_tc_hotplug_enables)); } static void icp_tc_hpd_enable_detection(struct intel_encoder *encoder) @@ -855,13 +845,13 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder) icp_tc_hpd_enable_detection(encoder); } -static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void icp_hpd_irq_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); /* * We reduce the value to 250us to be able to detect SHPD when an external display @@ -871,8 +861,8 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - icp_ddi_hpd_detection_setup(dev_priv); - icp_tc_hpd_detection_setup(dev_priv); + icp_ddi_hpd_detection_setup(display); + icp_tc_hpd_detection_setup(display); } static u32 gen11_hotplug_mask(enum hpd_pin hpd_pin) @@ -895,9 +885,8 @@ static u32 gen11_hotplug_enables(struct intel_encoder *encoder) return gen11_hotplug_mask(encoder->hpd_pin); } -static void dg1_hpd_invert(struct drm_i915_private *i915) +static void dg1_hpd_invert(struct intel_display *display) { - struct intel_display *display = &i915->display; u32 val = (INVERT_DDIA_HPD | INVERT_DDIB_HPD | INVERT_DDIC_HPD | @@ -907,25 +896,23 @@ static void dg1_hpd_invert(struct drm_i915_private *i915) static void dg1_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - dg1_hpd_invert(i915); + dg1_hpd_invert(display); icp_hpd_enable_detection(encoder); } -static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void dg1_hpd_irq_setup(struct intel_display *display) { - dg1_hpd_invert(dev_priv); - icp_hpd_irq_setup(dev_priv); + dg1_hpd_invert(display); + icp_hpd_irq_setup(display); } -static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void gen11_tc_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - intel_de_rmw(display, GEN11_TC_HOTPLUG_CTL, - intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); + intel_hpd_hotplug_mask(display, gen11_hotplug_mask), + intel_hpd_hotplug_enables(display, gen11_hotplug_enables)); } static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder) @@ -937,13 +924,11 @@ static void gen11_tc_hpd_enable_detection(struct intel_encoder *encoder) gen11_hotplug_enables(encoder)); } -static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void gen11_tbt_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - intel_de_rmw(display, GEN11_TBT_HOTPLUG_CTL, - intel_hpd_hotplug_mask(dev_priv, gen11_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables)); + intel_hpd_hotplug_mask(display, gen11_hotplug_mask), + intel_hpd_hotplug_enables(display, gen11_hotplug_enables)); } static void gen11_tbt_hpd_enable_detection(struct intel_encoder *encoder) @@ -966,23 +951,23 @@ static void gen11_hpd_enable_detection(struct intel_encoder *encoder) icp_hpd_enable_detection(encoder); } -static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void gen11_hpd_irq_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); intel_de_rmw(display, GEN11_DE_HPD_IMR, hotplug_irqs, ~enabled_irqs & hotplug_irqs); intel_de_posting_read(display, GEN11_DE_HPD_IMR); - gen11_tc_hpd_detection_setup(dev_priv); - gen11_tbt_hpd_detection_setup(dev_priv); + gen11_tc_hpd_detection_setup(display); + gen11_tbt_hpd_detection_setup(display); if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_hpd_irq_setup(dev_priv); + icp_hpd_irq_setup(display); } static u32 mtp_ddi_hotplug_mask(enum hpd_pin hpd_pin) @@ -1019,39 +1004,39 @@ static u32 mtp_tc_hotplug_enables(struct intel_encoder *encoder) return mtp_tc_hotplug_mask(encoder->hpd_pin); } -static void mtp_ddi_hpd_detection_setup(struct drm_i915_private *i915) +static void mtp_ddi_hpd_detection_setup(struct intel_display *display) { - intel_de_rmw(i915, SHOTPLUG_CTL_DDI, - intel_hpd_hotplug_mask(i915, mtp_ddi_hotplug_mask), - intel_hpd_hotplug_enables(i915, mtp_ddi_hotplug_enables)); + intel_de_rmw(display, SHOTPLUG_CTL_DDI, + intel_hpd_hotplug_mask(display, mtp_ddi_hotplug_mask), + intel_hpd_hotplug_enables(display, mtp_ddi_hotplug_enables)); } static void mtp_ddi_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(i915, SHOTPLUG_CTL_DDI, + intel_de_rmw(display, SHOTPLUG_CTL_DDI, mtp_ddi_hotplug_mask(encoder->hpd_pin), mtp_ddi_hotplug_enables(encoder)); } -static void mtp_tc_hpd_detection_setup(struct drm_i915_private *i915) +static void mtp_tc_hpd_detection_setup(struct intel_display *display) { - intel_de_rmw(i915, SHOTPLUG_CTL_TC, - intel_hpd_hotplug_mask(i915, mtp_tc_hotplug_mask), - intel_hpd_hotplug_enables(i915, mtp_tc_hotplug_enables)); + intel_de_rmw(display, SHOTPLUG_CTL_TC, + intel_hpd_hotplug_mask(display, mtp_tc_hotplug_mask), + intel_hpd_hotplug_enables(display, mtp_tc_hotplug_enables)); } static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(i915, SHOTPLUG_CTL_DDI, + intel_de_rmw(display, SHOTPLUG_CTL_DDI, mtp_tc_hotplug_mask(encoder->hpd_pin), mtp_tc_hotplug_enables(encoder)); } -static void mtp_hpd_invert(struct drm_i915_private *i915) +static void mtp_hpd_invert(struct intel_display *display) { u32 val = (INVERT_DDIA_HPD | INVERT_DDIB_HPD | @@ -1062,49 +1047,51 @@ static void mtp_hpd_invert(struct drm_i915_private *i915) INVERT_TC4_HPD | INVERT_DDID_HPD_MTP | INVERT_DDIE_HPD); - intel_de_rmw(i915, SOUTH_CHICKEN1, 0, val); + intel_de_rmw(display, SOUTH_CHICKEN1, 0, val); } static void mtp_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - mtp_hpd_invert(i915); + mtp_hpd_invert(display); mtp_ddi_hpd_enable_detection(encoder); mtp_tc_hpd_enable_detection(encoder); } -static void mtp_hpd_irq_setup(struct drm_i915_private *i915) +static void mtp_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); /* * Use 250us here to align with the DP1.4a(Table 3-4) spec as to what the * SHPD_FILTER_CNT value should be. */ - intel_de_write(i915, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); + intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); - mtp_hpd_invert(i915); + mtp_hpd_invert(display); ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); - mtp_ddi_hpd_detection_setup(i915); - mtp_tc_hpd_detection_setup(i915); + mtp_ddi_hpd_detection_setup(display); + mtp_tc_hpd_detection_setup(display); } -static void xe2lpd_sde_hpd_irq_setup(struct drm_i915_private *i915) +static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *i915 = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); - mtp_ddi_hpd_detection_setup(i915); - mtp_tc_hpd_detection_setup(i915); + mtp_ddi_hpd_detection_setup(display); + mtp_tc_hpd_detection_setup(display); } static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin) @@ -1112,7 +1099,7 @@ static bool is_xelpdp_pica_hpd_pin(enum hpd_pin hpd_pin) return hpd_pin >= HPD_PORT_TC1 && hpd_pin <= HPD_PORT_TC4; } -static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915, +static void _xelpdp_pica_hpd_detection_setup(struct intel_display *display, enum hpd_pin hpd_pin, bool enable) { u32 mask = XELPDP_TBT_HOTPLUG_ENABLE | @@ -1121,18 +1108,18 @@ static void _xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915, if (!is_xelpdp_pica_hpd_pin(hpd_pin)) return; - intel_de_rmw(i915, XELPDP_PORT_HOTPLUG_CTL(hpd_pin), + intel_de_rmw(display, XELPDP_PORT_HOTPLUG_CTL(hpd_pin), mask, enable ? mask : 0); } static void xelpdp_pica_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - _xelpdp_pica_hpd_detection_setup(i915, encoder->hpd_pin, true); + _xelpdp_pica_hpd_detection_setup(display, encoder->hpd_pin, true); } -static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915) +static void xelpdp_pica_hpd_detection_setup(struct intel_display *display) { struct intel_encoder *encoder; u32 available_pins = 0; @@ -1140,11 +1127,11 @@ static void xelpdp_pica_hpd_detection_setup(struct drm_i915_private *i915) BUILD_BUG_ON(BITS_PER_TYPE(available_pins) < HPD_NUM_PINS); - for_each_intel_encoder(&i915->drm, encoder) + for_each_intel_encoder(display->drm, encoder) available_pins |= BIT(encoder->hpd_pin); for_each_hpd_pin(pin) - _xelpdp_pica_hpd_detection_setup(i915, pin, available_pins & BIT(pin)); + _xelpdp_pica_hpd_detection_setup(display, pin, available_pins & BIT(pin)); } static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder) @@ -1153,24 +1140,24 @@ static void xelpdp_hpd_enable_detection(struct intel_encoder *encoder) mtp_hpd_enable_detection(encoder); } -static void xelpdp_hpd_irq_setup(struct drm_i915_private *i915) +static void xelpdp_hpd_irq_setup(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(i915, i915->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(i915, i915->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); - intel_de_rmw(i915, PICAINTERRUPT_IMR, hotplug_irqs, + intel_de_rmw(display, PICAINTERRUPT_IMR, hotplug_irqs, ~enabled_irqs & hotplug_irqs); intel_de_posting_read(display, PICAINTERRUPT_IMR); - xelpdp_pica_hpd_detection_setup(i915); + xelpdp_pica_hpd_detection_setup(display); if (INTEL_PCH_TYPE(i915) >= PCH_LNL) - xe2lpd_sde_hpd_irq_setup(i915); + xe2lpd_sde_hpd_irq_setup(display); else if (INTEL_PCH_TYPE(i915) >= PCH_MTL) - mtp_hpd_irq_setup(i915); + mtp_hpd_irq_setup(display); } static u32 spt_hotplug_mask(enum hpd_pin hpd_pin) @@ -1209,9 +1196,9 @@ static u32 spt_hotplug2_enables(struct intel_encoder *encoder) return spt_hotplug2_mask(encoder->hpd_pin); } -static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void spt_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(dev_priv)) { @@ -1221,12 +1208,12 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) /* Enable digital hotplug on the PCH */ intel_de_rmw(display, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, spt_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables)); + intel_hpd_hotplug_mask(display, spt_hotplug_mask), + intel_hpd_hotplug_enables(display, spt_hotplug_enables)); intel_de_rmw(display, PCH_PORT_HOTPLUG2, - intel_hpd_hotplug_mask(dev_priv, spt_hotplug2_mask), - intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables)); + intel_hpd_hotplug_mask(display, spt_hotplug2_mask), + intel_hpd_hotplug_enables(display, spt_hotplug2_enables)); } static void spt_hpd_enable_detection(struct intel_encoder *encoder) @@ -1250,20 +1237,20 @@ static void spt_hpd_enable_detection(struct intel_encoder *encoder) spt_hotplug2_enables(encoder)); } -static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void spt_hpd_irq_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.pch_hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - spt_hpd_detection_setup(dev_priv); + spt_hpd_detection_setup(display); } static u32 ilk_hotplug_mask(enum hpd_pin hpd_pin) @@ -1288,18 +1275,16 @@ static u32 ilk_hotplug_enables(struct intel_encoder *encoder) } } -static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void ilk_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - /* * Enable digital hotplug on the CPU, and configure the DP short pulse * duration to 2ms (which is the minimum in the Display Port spec) * The pulse duration bits are reserved on HSW+. */ intel_de_rmw(display, DIGITAL_PORT_HOTPLUG_CNTRL, - intel_hpd_hotplug_mask(dev_priv, ilk_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables)); + intel_hpd_hotplug_mask(display, ilk_hotplug_mask), + intel_hpd_hotplug_enables(display, ilk_hotplug_enables)); } static void ilk_hpd_enable_detection(struct intel_encoder *encoder) @@ -1313,21 +1298,22 @@ static void ilk_hpd_enable_detection(struct intel_encoder *encoder) ibx_hpd_enable_detection(encoder); } -static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void ilk_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); - if (DISPLAY_VER(dev_priv) >= 8) + if (DISPLAY_VER(display) >= 8) bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); else ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); - ilk_hpd_detection_setup(dev_priv); + ilk_hpd_detection_setup(display); - ibx_hpd_irq_setup(dev_priv); + ibx_hpd_irq_setup(display); } static u32 bxt_hotplug_mask(enum hpd_pin hpd_pin) @@ -1369,13 +1355,11 @@ static u32 bxt_hotplug_enables(struct intel_encoder *encoder) } } -static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) +static void bxt_hpd_detection_setup(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - intel_de_rmw(display, PCH_PORT_HOTPLUG, - intel_hpd_hotplug_mask(dev_priv, bxt_hotplug_mask), - intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables)); + intel_hpd_hotplug_mask(display, bxt_hotplug_mask), + intel_hpd_hotplug_enables(display, bxt_hotplug_enables)); } static void bxt_hpd_enable_detection(struct intel_encoder *encoder) @@ -1387,42 +1371,44 @@ static void bxt_hpd_enable_detection(struct intel_encoder *encoder) bxt_hotplug_enables(encoder)); } -static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void bxt_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->display.hotplug.hpd); - hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->display.hotplug.hpd); + enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); + hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); - bxt_hpd_detection_setup(dev_priv); + bxt_hpd_detection_setup(display); } -static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915) +static void g45_hpd_peg_band_gap_wa(struct intel_display *display) { /* * For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written * 0xd. Failure to do so will result in spurious interrupts being * generated on the port when a cable is not attached. */ - intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd); + intel_de_rmw(display, PEG_BAND_GAP_DATA, 0xf, 0xd); } static void i915_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin]; - if (IS_G45(i915)) - g45_hpd_peg_band_gap_wa(i915); + if (display->platform.g45) + g45_hpd_peg_band_gap_wa(display); /* HPD sense and interrupt enable are one and the same */ - i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en); + i915_hotplug_interrupt_update(display, hotplug_en, hotplug_en); } -static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void i915_hpd_irq_setup(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_en; lockdep_assert_held(&dev_priv->irq_lock); @@ -1431,20 +1417,20 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) * Note HDMI and DP share hotplug bits. Enable bits are the same for all * generations. */ - hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915); + hotplug_en = intel_hpd_enabled_irqs(display, hpd_mask_i915); /* * Programming the CRT detection parameters tends to generate a spurious * hotplug event about three seconds later. So just do it once. */ - if (IS_G4X(dev_priv)) + if (display->platform.g4x) hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; - if (IS_G45(dev_priv)) - g45_hpd_peg_band_gap_wa(dev_priv); + if (display->platform.g45) + g45_hpd_peg_band_gap_wa(display); /* Ignore TV since it's buggy */ - i915_hotplug_interrupt_update_locked(dev_priv, + i915_hotplug_interrupt_update_locked(display, HOTPLUG_INT_EN_MASK | CRT_HOTPLUG_VOLTAGE_COMPARE_MASK | CRT_HOTPLUG_ACTIVATION_PERIOD_64, @@ -1453,7 +1439,7 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv) struct intel_hotplug_funcs { /* Enable HPD sense and interrupts for all present encoders */ - void (*hpd_irq_setup)(struct drm_i915_private *i915); + void (*hpd_irq_setup)(struct intel_display *display); /* Enable HPD sense for a single encoder */ void (*hpd_enable_detection)(struct intel_encoder *encoder); }; @@ -1476,49 +1462,49 @@ HPD_FUNCS(ilk); void intel_hpd_enable_detection(struct intel_encoder *encoder) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (i915->display.funcs.hotplug) - i915->display.funcs.hotplug->hpd_enable_detection(encoder); + if (display->funcs.hotplug) + display->funcs.hotplug->hpd_enable_detection(encoder); } -void intel_hpd_irq_setup(struct drm_i915_private *i915) +void intel_hpd_irq_setup(struct intel_display *display) { - if ((IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) && - !i915->display.irq.vlv_display_irqs_enabled) + if ((display->platform.valleyview || display->platform.cherryview) && + !display->irq.vlv_display_irqs_enabled) return; - if (i915->display.funcs.hotplug) - i915->display.funcs.hotplug->hpd_irq_setup(i915); + if (display->funcs.hotplug) + display->funcs.hotplug->hpd_irq_setup(display); } -void intel_hotplug_irq_init(struct drm_i915_private *i915) +void intel_hotplug_irq_init(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); - intel_hpd_init_pins(i915); + intel_hpd_init_pins(display); intel_hpd_init_early(display); - if (HAS_GMCH(i915)) { - if (I915_HAS_HOTPLUG(i915)) - i915->display.funcs.hotplug = &i915_hpd_funcs; + if (HAS_GMCH(display)) { + if (I915_HAS_HOTPLUG(display)) + display->funcs.hotplug = &i915_hpd_funcs; } else { if (HAS_PCH_DG2(i915)) - i915->display.funcs.hotplug = &icp_hpd_funcs; + display->funcs.hotplug = &icp_hpd_funcs; else if (HAS_PCH_DG1(i915)) - i915->display.funcs.hotplug = &dg1_hpd_funcs; - else if (DISPLAY_VER(i915) >= 14) - i915->display.funcs.hotplug = &xelpdp_hpd_funcs; - else if (DISPLAY_VER(i915) >= 11) - i915->display.funcs.hotplug = &gen11_hpd_funcs; - else if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) - i915->display.funcs.hotplug = &bxt_hpd_funcs; + display->funcs.hotplug = &dg1_hpd_funcs; + else if (DISPLAY_VER(display) >= 14) + display->funcs.hotplug = &xelpdp_hpd_funcs; + else if (DISPLAY_VER(display) >= 11) + display->funcs.hotplug = &gen11_hpd_funcs; + else if (display->platform.geminilake || display->platform.broxton) + display->funcs.hotplug = &bxt_hpd_funcs; else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) - i915->display.funcs.hotplug = &icp_hpd_funcs; + display->funcs.hotplug = &icp_hpd_funcs; else if (INTEL_PCH_TYPE(i915) >= PCH_SPT) - i915->display.funcs.hotplug = &spt_hpd_funcs; + display->funcs.hotplug = &spt_hpd_funcs; else - i915->display.funcs.hotplug = &ilk_hpd_funcs; + display->funcs.hotplug = &ilk_hpd_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h index e4db752df096..9063bb02a2e9 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.h @@ -8,28 +8,28 @@ #include -struct drm_i915_private; +struct intel_display; struct intel_encoder; -u32 i9xx_hpd_irq_ack(struct drm_i915_private *i915); +u32 i9xx_hpd_irq_ack(struct intel_display *display); -void i9xx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_status); -void ibx_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); -void ilk_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); -void gen11_hpd_irq_handler(struct drm_i915_private *i915, u32 iir); -void bxt_hpd_irq_handler(struct drm_i915_private *i915, u32 hotplug_trigger); -void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir); -void icp_irq_handler(struct drm_i915_private *i915, u32 pch_iir); -void spt_irq_handler(struct drm_i915_private *i915, u32 pch_iir); +void i9xx_hpd_irq_handler(struct intel_display *display, u32 hotplug_status); +void ibx_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger); +void ilk_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger); +void gen11_hpd_irq_handler(struct intel_display *display, u32 iir); +void bxt_hpd_irq_handler(struct intel_display *display, u32 hotplug_trigger); +void xelpdp_pica_irq_handler(struct intel_display *display, u32 iir); +void icp_irq_handler(struct intel_display *display, u32 pch_iir); +void spt_irq_handler(struct intel_display *display, u32 pch_iir); -void i915_hotplug_interrupt_update_locked(struct drm_i915_private *i915, +void i915_hotplug_interrupt_update_locked(struct intel_display *display, u32 mask, u32 bits); -void i915_hotplug_interrupt_update(struct drm_i915_private *i915, +void i915_hotplug_interrupt_update(struct intel_display *display, u32 mask, u32 bits); void intel_hpd_enable_detection(struct intel_encoder *encoder); -void intel_hpd_irq_setup(struct drm_i915_private *i915); +void intel_hpd_irq_setup(struct intel_display *display); -void intel_hotplug_irq_init(struct drm_i915_private *i915); +void intel_hotplug_irq_init(struct intel_display *display); #endif /* __INTEL_HOTPLUG_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index de53615571be..3b05eb3f9cbc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -277,7 +277,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); if (iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_ack(display, &eir, &dpinvgtt); @@ -306,7 +306,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) gen6_rps_irq_handler(&to_gt(dev_priv)->rps, pm_iir); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_handler(display, eir, dpinvgtt); @@ -367,7 +367,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) gen8_gt_irq_handler(to_gt(dev_priv), master_ctl); if (iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_ack(display, &eir, &dpinvgtt); @@ -392,7 +392,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_handler(display, eir, dpinvgtt); @@ -952,6 +952,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) static irqreturn_t i915_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; + struct intel_display *display = &dev_priv->display; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -974,7 +975,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (I915_HAS_HOTPLUG(dev_priv) && iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); /* Call regardless, as some status bits might not be * signalled in IIR */ @@ -992,7 +993,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); @@ -1075,6 +1076,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) static irqreturn_t i965_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; + struct intel_display *display = &dev_priv->display; irqreturn_t ret = IRQ_NONE; if (!intel_irqs_enabled(dev_priv)) @@ -1096,7 +1098,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) ret = IRQ_HANDLED; if (iir & I915_DISPLAY_PORT_INTERRUPT) - hotplug_status = i9xx_hpd_irq_ack(dev_priv); + hotplug_status = i9xx_hpd_irq_ack(display); /* Call regardless, as some status bits might not be * signalled in IIR */ @@ -1119,7 +1121,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) i9xx_error_irq_handler(dev_priv, eir, eir_stuck); if (hotplug_status) - i9xx_hpd_irq_handler(dev_priv, hotplug_status); + i9xx_hpd_irq_handler(display, hotplug_status); i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); From 007232f685e622b9c8813809222aea2f2610760f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:46:03 +0200 Subject: [PATCH 0167/1627] drm/i915/irq: convert intel_display_irq.[ch] interfaces to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert the external interfaces of intel_display_irq.[ch] to struct intel_display. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/83b552154761d2790d8c774707e8d7612037bdf5.1742481923.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/i9xx_plane.c | 24 ++- .../drm/i915/display/intel_display_driver.c | 2 +- .../gpu/drm/i915/display/intel_display_irq.c | 204 +++++++++--------- .../gpu/drm/i915/display/intel_display_irq.h | 75 ++++--- .../i915/display/intel_display_power_well.c | 12 +- .../drm/i915/display/intel_fifo_underrun.c | 27 +-- .../gpu/drm/i915/display/intel_hotplug_irq.c | 22 +- drivers/gpu/drm/i915/display/intel_pipe_crc.c | 3 +- drivers/gpu/drm/i915/display/intel_tv.c | 4 +- .../drm/i915/display/skl_universal_plane.c | 6 +- drivers/gpu/drm/i915/gt/intel_rps.c | 6 +- drivers/gpu/drm/i915/i915_irq.c | 98 +++++---- drivers/gpu/drm/xe/display/xe_display.c | 12 +- 13 files changed, 263 insertions(+), 232 deletions(-) diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 013295f66d56..5e8344fdfc28 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -630,84 +630,92 @@ vlv_primary_async_flip(struct intel_dsb *dsb, static void bdw_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_enable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); + bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); spin_unlock_irq(&i915->irq_lock); } static void bdw_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_disable_pipe_irq(i915, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); + bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_PRIMARY_FLIP_DONE); spin_unlock_irq(&i915->irq_lock); } static void ivb_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); + ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ivb_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); + ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE_IVB(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ilk_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_enable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); + ilk_enable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void ilk_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); spin_lock_irq(&i915->irq_lock); - ilk_disable_display_irq(i915, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); + ilk_disable_display_irq(display, DE_PLANE_FLIP_DONE(plane->i9xx_plane)); spin_unlock_irq(&i915->irq_lock); } static void vlv_primary_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - i915_enable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); + i915_enable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); spin_unlock_irq(&i915->irq_lock); } static void vlv_primary_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - i915_disable_pipestat(i915, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); + i915_disable_pipestat(display, pipe, PLANE_FLIP_DONE_INT_STATUS_VLV); spin_unlock_irq(&i915->irq_lock); } diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 4035482a2e1b..e4fbf1e5b2bb 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -193,7 +193,7 @@ void intel_display_driver_early_probe(struct intel_display *display) mutex_init(&display->pps.mutex); mutex_init(&display->hdcp.hdcp_mutex); - intel_display_irq_init(i915); + intel_display_irq_init(display); intel_dkl_phy_init(display); intel_color_init_hooks(display); intel_init_cdclk_hooks(display); diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index b37bcb8fb2e8..68f903c35978 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -125,14 +125,14 @@ intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) /** * ilk_update_display_irq - update DEIMR - * @dev_priv: driver private + * @display: display device * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void ilk_update_display_irq(struct drm_i915_private *dev_priv, +void ilk_update_display_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -150,26 +150,26 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, } } -void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits) +void ilk_enable_display_irq(struct intel_display *display, u32 bits) { - ilk_update_display_irq(i915, bits, bits); + ilk_update_display_irq(display, bits, bits); } -void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits) +void ilk_disable_display_irq(struct intel_display *display, u32 bits) { - ilk_update_display_irq(i915, bits, 0); + ilk_update_display_irq(display, bits, 0); } /** * bdw_update_port_irq - update DE port interrupt - * @dev_priv: driver private + * @display: display device * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void bdw_update_port_irq(struct drm_i915_private *dev_priv, +void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 new_val; u32 old_val; @@ -194,16 +194,16 @@ void bdw_update_port_irq(struct drm_i915_private *dev_priv, /** * bdw_update_pipe_irq - update DE pipe interrupt - * @dev_priv: driver private + * @display: display device * @pipe: pipe whose interrupt to update * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, +static void bdw_update_pipe_irq(struct intel_display *display, enum pipe pipe, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); @@ -224,29 +224,29 @@ static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, } } -void bdw_enable_pipe_irq(struct drm_i915_private *i915, +void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits) { - bdw_update_pipe_irq(i915, pipe, bits, bits); + bdw_update_pipe_irq(display, pipe, bits, bits); } -void bdw_disable_pipe_irq(struct drm_i915_private *i915, +void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits) { - bdw_update_pipe_irq(i915, pipe, bits, 0); + bdw_update_pipe_irq(display, pipe, bits, 0); } /** * ibx_display_interrupt_update - update SDEIMR - * @dev_priv: driver private + * @display: display device * @interrupt_mask: mask of interrupt bits to update * @enabled_irq_mask: mask of interrupt bits to enable */ -void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, +void ibx_display_interrupt_update(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 sdeimr = intel_de_read(display, SDEIMR); sdeimr &= ~interrupt_mask; @@ -263,14 +263,14 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, intel_de_posting_read(display, SDEIMR); } -void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits) +void ibx_enable_display_interrupt(struct intel_display *display, u32 bits) { - ibx_display_interrupt_update(i915, bits, bits); + ibx_display_interrupt_update(display, bits, bits); } -void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits) +void ibx_disable_display_interrupt(struct intel_display *display, u32 bits) { - ibx_display_interrupt_update(i915, bits, 0); + ibx_display_interrupt_update(display, bits, 0); } u32 i915_pipestat_enable_mask(struct intel_display *display, @@ -318,10 +318,10 @@ out: return enable_mask; } -void i915_enable_pipestat(struct drm_i915_private *dev_priv, +void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); i915_reg_t reg = PIPESTAT(dev_priv, pipe); u32 enable_mask; @@ -342,10 +342,10 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv, intel_de_posting_read(display, reg); } -void i915_disable_pipestat(struct drm_i915_private *dev_priv, +void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); i915_reg_t reg = PIPESTAT(dev_priv, pipe); u32 enable_mask; @@ -381,11 +381,11 @@ static bool i915_has_legacy_blc_interrupt(struct intel_display *display) /** * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion - * @dev_priv: i915 device private + * @display: display device */ -void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) +void i915_enable_asle_pipestat(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); if (!intel_opregion_asle_present(display)) return; @@ -395,9 +395,9 @@ void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv) spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); + i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); if (DISPLAY_VER(dev_priv) >= 4) - i915_enable_pipestat(dev_priv, PIPE_A, + i915_enable_pipestat(display, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); @@ -524,10 +524,10 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) } } -void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, +void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; spin_lock(&dev_priv->irq_lock); @@ -592,10 +592,10 @@ void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, spin_unlock(&dev_priv->irq_lock); } -void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, +void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); bool blc_event = false; enum pipe pipe; @@ -617,10 +617,10 @@ void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv, intel_opregion_asle_intr(display); } -void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, +void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); bool blc_event = false; enum pipe pipe; @@ -645,10 +645,10 @@ void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv, intel_gmbus_irq_handler(display); } -void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv, +void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; for_each_pipe(dev_priv, pipe) { @@ -894,9 +894,9 @@ static void ilk_gtt_fault_irq_handler(struct intel_display *display) } } -void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) +void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; @@ -946,9 +946,9 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) gen5_rps_irq_handler(&to_gt(dev_priv)->rps); } -void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir) +void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; @@ -1359,9 +1359,9 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i intel_de_write(display, PICAINTERRUPT_IER, pica_ier); } -void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) +void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 iir; enum pipe pipe; @@ -1517,9 +1517,8 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) } } -u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) +u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) { - struct intel_display *display = &i915->display; u32 iir; if (!(master_ctl & GEN11_GU_MISC_IRQ)) @@ -1532,17 +1531,15 @@ u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl) return iir; } -void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir) +void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir) { - struct intel_display *display = &i915->display; - if (iir & GEN11_GU_MISC_GSE) intel_opregion_asle_intr(display); } -void gen11_display_irq_handler(struct drm_i915_private *i915) +void gen11_display_irq_handler(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 disp_ctl; disable_rpm_wakeref_asserts(&i915->runtime_pm); @@ -1553,7 +1550,7 @@ void gen11_display_irq_handler(struct drm_i915_private *i915) disp_ctl = intel_de_read(display, GEN11_DISPLAY_INT_CTL); intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); - gen8_de_irq_handler(i915, disp_ctl); + gen8_de_irq_handler(display, disp_ctl); intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); enable_rpm_wakeref_asserts(&i915->runtime_pm); @@ -1585,8 +1582,10 @@ static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915) _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } -void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable) +void i915gm_irq_cstate_wa(struct intel_display *display, bool enable) { + struct drm_i915_private *i915 = to_i915(display->drm); + spin_lock_irq(&i915->drm.vblank_time_lock); if (enable) @@ -1599,12 +1598,13 @@ void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable) int i8xx_enable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + i915_enable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); return 0; @@ -1612,12 +1612,13 @@ int i8xx_enable_vblank(struct drm_crtc *crtc) void i8xx_disable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, PIPE_VBLANK_INTERRUPT_STATUS); + i915_disable_pipestat(display, pipe, PIPE_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -1641,12 +1642,13 @@ void i915gm_disable_vblank(struct drm_crtc *crtc) int i965_enable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_enable_pipestat(dev_priv, pipe, + i915_enable_pipestat(display, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); @@ -1655,18 +1657,20 @@ int i965_enable_vblank(struct drm_crtc *crtc) void i965_disable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - i915_disable_pipestat(dev_priv, pipe, + i915_disable_pipestat(display, pipe, PIPE_START_VBLANK_INTERRUPT_STATUS); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } int ilk_enable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; @@ -1674,7 +1678,7 @@ int ilk_enable_vblank(struct drm_crtc *crtc) DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_enable_display_irq(dev_priv, bit); + ilk_enable_display_irq(display, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* Even though there is no DMC, frame counter can get stuck when @@ -1688,6 +1692,7 @@ int ilk_enable_vblank(struct drm_crtc *crtc) void ilk_disable_vblank(struct drm_crtc *crtc) { + struct intel_display *display = to_intel_display(crtc->dev); struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; @@ -1695,7 +1700,7 @@ void ilk_disable_vblank(struct drm_crtc *crtc) DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - ilk_disable_display_irq(dev_priv, bit); + ilk_disable_display_irq(display, bit); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } @@ -1753,7 +1758,7 @@ int bdw_enable_vblank(struct drm_crtc *_crtc) schedule_work(&display->irq.vblank_dc_work); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); /* Even if there is no DMC, frame counter can get stuck when @@ -1777,7 +1782,7 @@ void bdw_disable_vblank(struct drm_crtc *_crtc) return; spin_lock_irqsave(&dev_priv->irq_lock, irqflags); - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); + bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_VBLANK); spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); if (crtc->block_dc_for_vblank && --display->irq.vblank_wa_num_pipes == 0) @@ -1913,15 +1918,17 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) dev_priv->irq_mask = ~0u; } -void vlv_display_irq_reset(struct drm_i915_private *dev_priv) +void vlv_display_irq_reset(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + if (dev_priv->display.irq.vlv_display_irqs_enabled) _vlv_display_irq_reset(dev_priv); } -void i9xx_display_irq_reset(struct drm_i915_private *i915) +void i9xx_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); if (I915_HAS_HOTPLUG(i915)) { i915_hotplug_interrupt_update(display, 0xffffffff, 0); @@ -1937,9 +1944,9 @@ static u32 vlv_error_mask(void) return VLV_ERROR_PAGE_TABLE; } -void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) +void vlv_display_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 pipestat_mask; u32 enable_mask; enum pipe pipe; @@ -1961,9 +1968,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); for_each_pipe(dev_priv, pipe) - i915_enable_pipestat(dev_priv, pipe, pipestat_mask); + i915_enable_pipestat(display, pipe, pipestat_mask); enable_mask = I915_DISPLAY_PORT_INTERRUPT | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | @@ -1983,9 +1990,9 @@ void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) intel_display_irq_regs_init(display, VLV_IRQ_REGS, dev_priv->irq_mask, enable_mask); } -void gen8_display_irq_reset(struct drm_i915_private *dev_priv) +void gen8_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; if (!HAS_DISPLAY(dev_priv)) @@ -2003,9 +2010,9 @@ void gen8_display_irq_reset(struct drm_i915_private *dev_priv) intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); } -void gen11_display_irq_reset(struct drm_i915_private *dev_priv) +void gen11_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); @@ -2054,10 +2061,10 @@ void gen11_display_irq_reset(struct drm_i915_private *dev_priv) intel_display_irq_regs_reset(display, SDE_IRQ_REGS); } -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, +void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | gen8_de_pipe_flip_done_mask(dev_priv); enum pipe pipe; @@ -2077,10 +2084,10 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, spin_unlock_irq(&dev_priv->irq_lock); } -void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, +void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); @@ -2128,8 +2135,10 @@ static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); } -void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) +void valleyview_enable_display_irqs(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + lockdep_assert_held(&dev_priv->irq_lock); if (dev_priv->display.irq.vlv_display_irqs_enabled) @@ -2139,12 +2148,14 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) if (intel_irqs_enabled(dev_priv)) { _vlv_display_irq_reset(dev_priv); - vlv_display_irq_postinstall(dev_priv); + vlv_display_irq_postinstall(display); } } -void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) +void valleyview_disable_display_irqs(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + lockdep_assert_held(&dev_priv->irq_lock); if (!dev_priv->display.irq.vlv_display_irqs_enabled) @@ -2156,9 +2167,10 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) _vlv_display_irq_reset(dev_priv); } -void ilk_de_irq_postinstall(struct drm_i915_private *i915) +void ilk_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); + u32 display_mask, extra_mask; if (DISPLAY_VER(i915) >= 7) { @@ -2201,9 +2213,9 @@ void ilk_de_irq_postinstall(struct drm_i915_private *i915) static void mtp_irq_postinstall(struct drm_i915_private *i915); static void icp_irq_postinstall(struct drm_i915_private *i915); -void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) +void gen8_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | GEN8_PIPE_CDCLK_CRC_DONE; @@ -2323,33 +2335,27 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); } -void gen11_de_irq_postinstall(struct drm_i915_private *dev_priv) +void gen11_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - gen8_de_irq_postinstall(dev_priv); + gen8_de_irq_postinstall(display); intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); } -void dg1_de_irq_postinstall(struct drm_i915_private *i915) +void dg1_de_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &i915->display; - - if (!HAS_DISPLAY(i915)) + if (!HAS_DISPLAY(display)) return; - gen8_de_irq_postinstall(i915); + gen8_de_irq_postinstall(display); intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); } -void intel_display_irq_init(struct drm_i915_private *i915) +void intel_display_irq_init(struct intel_display *display) { - struct intel_display *display = &i915->display; - display->drm->vblank_disable_immediate = true; intel_hotplug_irq_init(display); diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.h b/drivers/gpu/drm/i915/display/intel_display_irq.h index d9867cd0a220..f72727768351 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.h +++ b/drivers/gpu/drm/i915/display/intel_display_irq.h @@ -12,28 +12,27 @@ enum pipe; struct drm_crtc; -struct drm_i915_private; struct intel_display; -void valleyview_enable_display_irqs(struct drm_i915_private *i915); -void valleyview_disable_display_irqs(struct drm_i915_private *i915); +void valleyview_enable_display_irqs(struct intel_display *display); +void valleyview_disable_display_irqs(struct intel_display *display); -void ilk_update_display_irq(struct drm_i915_private *i915, +void ilk_update_display_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask); -void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits); -void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits); +void ilk_enable_display_irq(struct intel_display *display, u32 bits); +void ilk_disable_display_irq(struct intel_display *display, u32 bits); -void bdw_update_port_irq(struct drm_i915_private *i915, u32 interrupt_mask, u32 enabled_irq_mask); -void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); -void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits); +void bdw_update_port_irq(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask); +void bdw_enable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits); +void bdw_disable_pipe_irq(struct intel_display *display, enum pipe pipe, u32 bits); -void ibx_display_interrupt_update(struct drm_i915_private *i915, +void ibx_display_interrupt_update(struct intel_display *display, u32 interrupt_mask, u32 enabled_irq_mask); -void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits); -void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits); +void ibx_enable_display_interrupt(struct intel_display *display, u32 bits); +void ibx_disable_display_interrupt(struct intel_display *display, u32 bits); -void gen8_irq_power_well_post_enable(struct drm_i915_private *i915, u8 pipe_mask); -void gen8_irq_power_well_pre_disable(struct drm_i915_private *i915, u8 pipe_mask); +void gen8_irq_power_well_post_enable(struct intel_display *display, u8 pipe_mask); +void gen8_irq_power_well_pre_disable(struct intel_display *display, u8 pipe_mask); int i8xx_enable_vblank(struct drm_crtc *crtc); int i915gm_enable_vblank(struct drm_crtc *crtc); @@ -46,41 +45,41 @@ void i965_disable_vblank(struct drm_crtc *crtc); void ilk_disable_vblank(struct drm_crtc *crtc); void bdw_disable_vblank(struct drm_crtc *crtc); -void ivb_display_irq_handler(struct drm_i915_private *i915, u32 de_iir); -void ilk_display_irq_handler(struct drm_i915_private *i915, u32 de_iir); -void gen8_de_irq_handler(struct drm_i915_private *i915, u32 master_ctl); -void gen11_display_irq_handler(struct drm_i915_private *i915); +void ivb_display_irq_handler(struct intel_display *display, u32 de_iir); +void ilk_display_irq_handler(struct intel_display *display, u32 de_iir); +void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl); +void gen11_display_irq_handler(struct intel_display *display); -u32 gen11_gu_misc_irq_ack(struct drm_i915_private *i915, const u32 master_ctl); -void gen11_gu_misc_irq_handler(struct drm_i915_private *i915, const u32 iir); +u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl); +void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir); -void i9xx_display_irq_reset(struct drm_i915_private *i915); -void vlv_display_irq_reset(struct drm_i915_private *i915); -void gen8_display_irq_reset(struct drm_i915_private *i915); -void gen11_display_irq_reset(struct drm_i915_private *i915); +void i9xx_display_irq_reset(struct intel_display *display); +void vlv_display_irq_reset(struct intel_display *display); +void gen8_display_irq_reset(struct intel_display *display); +void gen11_display_irq_reset(struct intel_display *display); -void vlv_display_irq_postinstall(struct drm_i915_private *i915); -void ilk_de_irq_postinstall(struct drm_i915_private *i915); -void gen8_de_irq_postinstall(struct drm_i915_private *i915); -void gen11_de_irq_postinstall(struct drm_i915_private *i915); -void dg1_de_irq_postinstall(struct drm_i915_private *i915); +void vlv_display_irq_postinstall(struct intel_display *display); +void ilk_de_irq_postinstall(struct intel_display *display); +void gen8_de_irq_postinstall(struct intel_display *display); +void gen11_de_irq_postinstall(struct intel_display *display); +void dg1_de_irq_postinstall(struct intel_display *display); u32 i915_pipestat_enable_mask(struct intel_display *display, enum pipe pipe); -void i915_enable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); -void i915_disable_pipestat(struct drm_i915_private *i915, enum pipe pipe, u32 status_mask); -void i915_enable_asle_pipestat(struct drm_i915_private *i915); +void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask); +void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask); +void i915_enable_asle_pipestat(struct intel_display *display); -void i9xx_pipestat_irq_ack(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void i9xx_pipestat_irq_ack(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); -void i915_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); -void i965_pipestat_irq_handler(struct drm_i915_private *i915, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); -void valleyview_pipestat_irq_handler(struct drm_i915_private *i915, u32 pipe_stats[I915_MAX_PIPES]); +void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]); +void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]); void vlv_display_error_irq_ack(struct intel_display *display, u32 *eir, u32 *dpinvgtt); void vlv_display_error_irq_handler(struct intel_display *display, u32 eir, u32 dpinvgtt); -void intel_display_irq_init(struct drm_i915_private *i915); +void intel_display_irq_init(struct intel_display *display); -void i915gm_irq_cstate_wa(struct drm_i915_private *i915, bool enable); +void i915gm_irq_cstate_wa(struct intel_display *display, bool enable); #endif /* __INTEL_DISPLAY_IRQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index daf2a0cbb157..b03a95ef64da 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -186,22 +186,18 @@ int intel_power_well_refcount(struct i915_power_well *power_well) static void hsw_power_well_post_enable(struct intel_display *display, u8 irq_pipe_mask, bool has_vga) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (has_vga) intel_vga_reset_io_mem(display); if (irq_pipe_mask) - gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); + gen8_irq_power_well_post_enable(display, irq_pipe_mask); } static void hsw_power_well_pre_disable(struct intel_display *display, u8 irq_pipe_mask) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (irq_pipe_mask) - gen8_irq_power_well_pre_disable(dev_priv, irq_pipe_mask); + gen8_irq_power_well_pre_disable(display, irq_pipe_mask); } #define ICL_AUX_PW_TO_PHY(pw_idx) \ @@ -1226,7 +1222,7 @@ static void vlv_display_power_well_init(struct intel_display *display) vlv_init_display_clock_gating(display); spin_lock_irq(&dev_priv->irq_lock); - valleyview_enable_display_irqs(dev_priv); + valleyview_enable_display_irqs(display); spin_unlock_irq(&dev_priv->irq_lock); /* @@ -1255,7 +1251,7 @@ static void vlv_display_power_well_deinit(struct intel_display *display) struct drm_i915_private *dev_priv = to_i915(display->drm); spin_lock_irq(&dev_priv->irq_lock); - valleyview_disable_display_irqs(dev_priv); + valleyview_disable_display_irqs(display); spin_unlock_irq(&dev_priv->irq_lock); /* make sure we're done processing display irqs */ diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c index 7a8fbff39be0..3f47a3beb2e6 100644 --- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c +++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c @@ -136,14 +136,13 @@ static void i9xx_set_fifo_underrun_reporting(struct intel_display *display, static void ilk_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : DE_PIPEB_FIFO_UNDERRUN; if (enable) - ilk_enable_display_irq(dev_priv, bit); + ilk_enable_display_irq(display, bit); else - ilk_disable_display_irq(dev_priv, bit); + ilk_disable_display_irq(display, bit); } static void ivb_check_fifo_underruns(struct intel_crtc *crtc) @@ -169,7 +168,6 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pipe, bool enable, bool old) { - struct drm_i915_private *dev_priv = to_i915(display->drm); if (enable) { intel_de_write(display, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); @@ -177,9 +175,9 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display, if (!ivb_can_enable_err_int(display)) return; - ilk_enable_display_irq(dev_priv, DE_ERR_INT_IVB); + ilk_enable_display_irq(display, DE_ERR_INT_IVB); } else { - ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB); + ilk_disable_display_irq(display, DE_ERR_INT_IVB); if (old && intel_de_read(display, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) { @@ -193,26 +191,23 @@ static void ivb_set_fifo_underrun_reporting(struct intel_display *display, static void bdw_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pipe, bool enable) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (enable) - bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_enable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN); else - bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); + bdw_disable_pipe_irq(display, pipe, GEN8_PIPE_FIFO_UNDERRUN); } static void ibx_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pch_transcoder, bool enable) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 bit = (pch_transcoder == PIPE_A) ? SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; if (enable) - ibx_enable_display_interrupt(dev_priv, bit); + ibx_enable_display_interrupt(display, bit); else - ibx_disable_display_interrupt(dev_priv, bit); + ibx_disable_display_interrupt(display, bit); } static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc) @@ -240,8 +235,6 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display, enum pipe pch_transcoder, bool enable, bool old) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (enable) { intel_de_write(display, SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); @@ -249,9 +242,9 @@ static void cpt_set_fifo_underrun_reporting(struct intel_display *display, if (!cpt_can_enable_serr_int(display)) return; - ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); + ibx_enable_display_interrupt(display, SDE_ERROR_CPT); } else { - ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); + ibx_disable_display_interrupt(display, SDE_ERROR_CPT); if (old && intel_de_read(display, SERR_INT) & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) { diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index e44ae6acc55f..1bcff3a47745 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -758,13 +758,12 @@ static void ibx_hpd_enable_detection(struct intel_encoder *encoder) static void ibx_hpd_irq_setup(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); ibx_hpd_detection_setup(display); } @@ -847,7 +846,6 @@ static void icp_hpd_enable_detection(struct intel_encoder *encoder) static void icp_hpd_irq_setup(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); @@ -859,7 +857,7 @@ static void icp_hpd_irq_setup(struct intel_display *display) */ intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); icp_ddi_hpd_detection_setup(display); icp_tc_hpd_detection_setup(display); @@ -1061,7 +1059,6 @@ static void mtp_hpd_enable_detection(struct intel_encoder *encoder) static void mtp_hpd_irq_setup(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); @@ -1074,7 +1071,7 @@ static void mtp_hpd_irq_setup(struct intel_display *display) intel_de_write(display, SHPD_FILTER_CNT, SHPD_FILTER_CNT_250); mtp_hpd_invert(display); - ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); mtp_ddi_hpd_detection_setup(display); mtp_tc_hpd_detection_setup(display); @@ -1082,13 +1079,12 @@ static void mtp_hpd_irq_setup(struct intel_display *display) static void xe2lpd_sde_hpd_irq_setup(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); - ibx_display_interrupt_update(i915, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); mtp_ddi_hpd_detection_setup(display); mtp_tc_hpd_detection_setup(display); @@ -1248,7 +1244,7 @@ static void spt_hpd_irq_setup(struct intel_display *display) enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.pch_hpd); - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); + ibx_display_interrupt_update(display, hotplug_irqs, enabled_irqs); spt_hpd_detection_setup(display); } @@ -1300,16 +1296,15 @@ static void ilk_hpd_enable_detection(struct intel_encoder *encoder) static void ilk_hpd_irq_setup(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); if (DISPLAY_VER(display) >= 8) - bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + bdw_update_port_irq(display, hotplug_irqs, enabled_irqs); else - ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); + ilk_update_display_irq(display, hotplug_irqs, enabled_irqs); ilk_hpd_detection_setup(display); @@ -1373,13 +1368,12 @@ static void bxt_hpd_enable_detection(struct intel_encoder *encoder) static void bxt_hpd_irq_setup(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); u32 hotplug_irqs, enabled_irqs; enabled_irqs = intel_hpd_enabled_irqs(display, display->hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(display, display->hotplug.hpd); - bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); + bdw_update_port_irq(display, hotplug_irqs, enabled_irqs); bxt_hpd_detection_setup(display); } diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c index 10e26c3db946..65f60615f387 100644 --- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c +++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c @@ -281,6 +281,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source, static void intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) { + struct intel_display *display = to_intel_display(crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *pipe_config; struct drm_atomic_state *state; @@ -288,7 +289,7 @@ intel_crtc_crc_setup_workarounds(struct intel_crtc *crtc, bool enable) int ret; if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) - i915gm_irq_cstate_wa(dev_priv, enable); + i915gm_irq_cstate_wa(display, enable); drm_modeset_acquire_init(&ctx, 0); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index 5dbe857ea85b..2e3f3f0207e8 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -1594,7 +1594,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Disable TV interrupts around load detect or we'll recurse */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irq(&dev_priv->irq_lock); - i915_disable_pipestat(dev_priv, 0, + i915_disable_pipestat(display, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); @@ -1669,7 +1669,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv, /* Restore interrupt config */ if (connector->polled & DRM_CONNECTOR_POLL_HPD) { spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, 0, + i915_enable_pipestat(display, 0, PIPE_HOTPLUG_INTERRUPT_STATUS | PIPE_HOTPLUG_TV_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 70e550539bb2..8739195aba69 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -2689,22 +2689,24 @@ static const struct drm_plane_funcs tgl_plane_funcs = { static void skl_plane_enable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_enable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); + bdw_enable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); spin_unlock_irq(&i915->irq_lock); } static void skl_plane_disable_flip_done(struct intel_plane *plane) { + struct intel_display *display = to_intel_display(plane); struct drm_i915_private *i915 = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; spin_lock_irq(&i915->irq_lock); - bdw_disable_pipe_irq(i915, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); + bdw_disable_pipe_irq(display, pipe, GEN9_PIPE_PLANE_FLIP_DONE(plane->id)); spin_unlock_irq(&i915->irq_lock); } diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index fa304ea088e4..4991a63a24e4 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -550,6 +550,7 @@ static unsigned int init_emon(struct intel_uncore *uncore) static bool gen5_rps_enable(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_display *display = &i915->display; struct intel_uncore *uncore = rps_to_uncore(rps); u8 fstart, vstart; u32 rgvmodectl; @@ -608,7 +609,7 @@ static bool gen5_rps_enable(struct intel_rps *rps) rps->ips.last_time2 = ktime_get_raw_ns(); spin_lock(&i915->irq_lock); - ilk_enable_display_irq(i915, DE_PCU_EVENT); + ilk_enable_display_irq(display, DE_PCU_EVENT); spin_unlock(&i915->irq_lock); spin_unlock_irq(&mchdev_lock); @@ -621,13 +622,14 @@ static bool gen5_rps_enable(struct intel_rps *rps) static void gen5_rps_disable(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_display *display = &i915->display; struct intel_uncore *uncore = rps_to_uncore(rps); u16 rgvswctl; spin_lock_irq(&mchdev_lock); spin_lock(&i915->irq_lock); - ilk_disable_display_irq(i915, DE_PCU_EVENT); + ilk_disable_display_irq(display, DE_PCU_EVENT); spin_unlock(&i915->irq_lock); rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 3b05eb3f9cbc..ba3afc7e38ac 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -284,7 +284,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | I915_LPE_PIPE_B_INTERRUPT)) @@ -311,7 +311,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_handler(display, eir, dpinvgtt); - valleyview_pipestat_irq_handler(dev_priv, pipe_stats); + valleyview_pipestat_irq_handler(display, pipe_stats); } while (0); pmu_irq_stats(dev_priv, ret); @@ -374,7 +374,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & (I915_LPE_PIPE_A_INTERRUPT | I915_LPE_PIPE_B_INTERRUPT | @@ -397,7 +397,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) if (iir & I915_MASTER_ERROR_INTERRUPT) vlv_display_error_irq_handler(display, eir, dpinvgtt); - valleyview_pipestat_irq_handler(dev_priv, pipe_stats); + valleyview_pipestat_irq_handler(display, pipe_stats); } while (0); pmu_irq_stats(dev_priv, ret); @@ -418,6 +418,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) static irqreturn_t ilk_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; + struct intel_display *display = &i915->display; void __iomem * const regs = intel_uncore_regs(&i915->uncore); u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; @@ -458,9 +459,9 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) if (de_iir) { raw_reg_write(regs, DEIIR, de_iir); if (DISPLAY_VER(i915) >= 7) - ivb_display_irq_handler(i915, de_iir); + ivb_display_irq_handler(display, de_iir); else - ilk_display_irq_handler(i915, de_iir); + ilk_display_irq_handler(display, de_iir); ret = IRQ_HANDLED; } @@ -506,6 +507,7 @@ static inline void gen8_master_intr_enable(void __iomem * const regs) static irqreturn_t gen8_irq_handler(int irq, void *arg) { struct drm_i915_private *dev_priv = arg; + struct intel_display *display = &dev_priv->display; void __iomem * const regs = intel_uncore_regs(&dev_priv->uncore); u32 master_ctl; @@ -524,7 +526,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & ~GEN8_GT_IRQS) { disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - gen8_de_irq_handler(dev_priv, master_ctl); + gen8_de_irq_handler(display, master_ctl); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); } @@ -556,6 +558,7 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) static irqreturn_t gen11_irq_handler(int irq, void *arg) { struct drm_i915_private *i915 = arg; + struct intel_display *display = &i915->display; void __iomem * const regs = intel_uncore_regs(&i915->uncore); struct intel_gt *gt = to_gt(i915); u32 master_ctl; @@ -575,13 +578,13 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) /* IRQs are synced during runtime_suspend, we don't require a wakeref */ if (master_ctl & GEN11_DISPLAY_IRQ) - gen11_display_irq_handler(i915); + gen11_display_irq_handler(display); - gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); gen11_master_intr_enable(regs); - gen11_gu_misc_irq_handler(i915, gu_misc_iir); + gen11_gu_misc_irq_handler(display, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -613,6 +616,7 @@ static inline void dg1_master_intr_enable(void __iomem * const regs) static irqreturn_t dg1_irq_handler(int irq, void *arg) { struct drm_i915_private * const i915 = arg; + struct intel_display *display = &i915->display; struct intel_gt *gt = to_gt(i915); void __iomem * const regs = intel_uncore_regs(gt->uncore); u32 master_tile_ctl, master_ctl; @@ -641,13 +645,13 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) gen11_gt_irq_handler(gt, master_ctl); if (master_ctl & GEN11_DISPLAY_IRQ) - gen11_display_irq_handler(i915); + gen11_display_irq_handler(display); - gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl); + gu_misc_iir = gen11_gu_misc_irq_ack(display, master_ctl); dg1_master_intr_enable(regs); - gen11_gu_misc_irq_handler(i915, gu_misc_iir); + gen11_gu_misc_irq_handler(display, gu_misc_iir); pmu_irq_stats(i915, IRQ_HANDLED); @@ -691,24 +695,27 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv) static void valleyview_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); gen5_gt_irq_reset(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_reset(dev_priv); + vlv_display_irq_reset(display); spin_unlock_irq(&dev_priv->irq_lock); } static void gen8_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; gen8_master_intr_disable(intel_uncore_regs(uncore)); gen8_gt_irq_reset(to_gt(dev_priv)); - gen8_display_irq_reset(dev_priv); + gen8_display_irq_reset(display); gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); if (HAS_PCH_SPLIT(dev_priv)) @@ -718,13 +725,14 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; gen11_master_intr_disable(intel_uncore_regs(&dev_priv->uncore)); gen11_gt_irq_reset(gt); - gen11_display_irq_reset(dev_priv); + gen11_display_irq_reset(display); gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); @@ -732,6 +740,7 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) static void dg1_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; struct intel_gt *gt; unsigned int i; @@ -741,7 +750,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) for_each_gt(gt, dev_priv, i) gen11_gt_irq_reset(gt); - gen11_display_irq_reset(dev_priv); + gen11_display_irq_reset(display); gen2_irq_reset(uncore, GEN11_GU_MISC_IRQ_REGS); gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); @@ -751,6 +760,7 @@ static void dg1_irq_reset(struct drm_i915_private *dev_priv) static void cherryview_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; intel_uncore_write(uncore, GEN8_MASTER_IRQ, 0); @@ -761,23 +771,27 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) gen2_irq_reset(uncore, GEN8_PCU_IRQ_REGS); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_reset(dev_priv); + vlv_display_irq_reset(display); spin_unlock_irq(&dev_priv->irq_lock); } static void ilk_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen5_gt_irq_postinstall(to_gt(dev_priv)); - ilk_de_irq_postinstall(dev_priv); + ilk_de_irq_postinstall(display); } static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen5_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_postinstall(dev_priv); + vlv_display_irq_postinstall(display); spin_unlock_irq(&dev_priv->irq_lock); intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); @@ -786,20 +800,23 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) static void gen8_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen8_gt_irq_postinstall(to_gt(dev_priv)); - gen8_de_irq_postinstall(dev_priv); + gen8_de_irq_postinstall(display); gen8_master_intr_enable(intel_uncore_regs(&dev_priv->uncore)); } static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_gt *gt = to_gt(dev_priv); struct intel_uncore *uncore = gt->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; gen11_gt_irq_postinstall(gt); - gen11_de_irq_postinstall(dev_priv); + gen11_de_irq_postinstall(display); gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); @@ -809,6 +826,7 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; u32 gu_misc_masked = GEN11_GU_MISC_GSE; struct intel_gt *gt; @@ -819,7 +837,7 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) gen2_irq_init(uncore, GEN11_GU_MISC_IRQ_REGS, ~gu_misc_masked, gu_misc_masked); - dg1_de_irq_postinstall(dev_priv); + dg1_de_irq_postinstall(display); dg1_master_intr_enable(intel_uncore_regs(uncore)); intel_uncore_posting_read(uncore, DG1_MSTR_TILE_INTR); @@ -827,10 +845,12 @@ static void dg1_irq_postinstall(struct drm_i915_private *dev_priv) static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; + gen8_gt_irq_postinstall(to_gt(dev_priv)); spin_lock_irq(&dev_priv->irq_lock); - vlv_display_irq_postinstall(dev_priv); + vlv_display_irq_postinstall(display); spin_unlock_irq(&dev_priv->irq_lock); intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); @@ -900,9 +920,10 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, static void i915_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; - i9xx_display_irq_reset(dev_priv); + i9xx_display_irq_reset(display); gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); @@ -911,6 +932,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv) static void i915_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; @@ -942,11 +964,11 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); - i915_enable_asle_pipestat(dev_priv); + i915_enable_asle_pipestat(display); } static irqreturn_t i915_irq_handler(int irq, void *arg) @@ -979,7 +1001,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); @@ -995,7 +1017,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (hotplug_status) i9xx_hpd_irq_handler(display, hotplug_status); - i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); + i915_pipestat_irq_handler(display, iir, pipe_stats); } while (0); pmu_irq_stats(dev_priv, ret); @@ -1007,9 +1029,10 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) static void i965_irq_reset(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; - i9xx_display_irq_reset(dev_priv); + i9xx_display_irq_reset(display); gen2_error_reset(uncore, GEN2_ERROR_REGS); gen2_irq_reset(uncore, GEN2_IRQ_REGS); @@ -1037,6 +1060,7 @@ static u32 i965_error_mask(struct drm_i915_private *i915) static void i965_irq_postinstall(struct drm_i915_private *dev_priv) { + struct intel_display *display = &dev_priv->display; struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; @@ -1065,12 +1089,12 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) /* Interrupt setup is already guaranteed to be single-threaded, this is * just to make the assert_spin_locked check happy. */ spin_lock_irq(&dev_priv->irq_lock); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); - i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); + i915_enable_pipestat(display, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); spin_unlock_irq(&dev_priv->irq_lock); - i915_enable_asle_pipestat(dev_priv); + i915_enable_asle_pipestat(display); } static irqreturn_t i965_irq_handler(int irq, void *arg) @@ -1102,7 +1126,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) /* Call regardless, as some status bits might not be * signalled in IIR */ - i9xx_pipestat_irq_ack(dev_priv, iir, pipe_stats); + i9xx_pipestat_irq_ack(display, iir, pipe_stats); if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); @@ -1123,7 +1147,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (hotplug_status) i9xx_hpd_irq_handler(display, hotplug_status); - i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); + i965_pipestat_irq_handler(display, iir, pipe_stats); } while (0); pmu_irq_stats(dev_priv, IRQ_HANDLED); diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 7dc24bd7f9d0..6bd08810f1dd 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -229,11 +229,13 @@ void xe_display_driver_remove(struct xe_device *xe) void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; if (master_ctl & DISPLAY_IRQ) - gen11_display_irq_handler(xe); + gen11_display_irq_handler(display); } void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) @@ -249,19 +251,23 @@ void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) void xe_display_irq_reset(struct xe_device *xe) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; - gen11_display_irq_reset(xe); + gen11_display_irq_reset(display); } void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) { + struct intel_display *display = &xe->display; + if (!xe->info.probe_display) return; if (gt->info.id == XE_GT0) - gen11_de_irq_postinstall(xe); + gen11_de_irq_postinstall(display); } static bool suspend_to_idle(void) From d22168b68632a427c795da27b739b65eeb85473d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:46:04 +0200 Subject: [PATCH 0168/1627] drm/i915/irq: convert rest of intel_display_irq.[ch] to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert as much as possible of intel_display_irq.[ch] to struct intel_display. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/b6e281875278ad84772938f81129fde6065b2745.1742481923.git.jani.nikula@intel.com --- .../gpu/drm/i915/display/intel_display_irq.c | 527 ++++++++---------- 1 file changed, 247 insertions(+), 280 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 68f903c35978..6f78fe6de06a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -115,9 +115,8 @@ static void intel_pipe_fault_irq_handler(struct intel_display *display, } static void -intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) +intel_handle_vblank(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); drm_crtc_handle_vblank(&crtc->base); @@ -136,14 +135,14 @@ void ilk_update_display_irq(struct intel_display *display, u32 new_val; lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); new_val = dev_priv->irq_mask; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != dev_priv->irq_mask && - !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { + !drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) { dev_priv->irq_mask = new_val; intel_de_write(display, DEIMR, dev_priv->irq_mask); intel_de_posting_read(display, DEIMR); @@ -175,9 +174,9 @@ void bdw_update_port_irq(struct intel_display *display, lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; old_val = intel_de_read(display, GEN8_DE_PORT_IMR); @@ -208,17 +207,17 @@ static void bdw_update_pipe_irq(struct intel_display *display, lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; - new_val = dev_priv->display.irq.de_irq_mask[pipe]; + new_val = display->irq.de_irq_mask[pipe]; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); - if (new_val != dev_priv->display.irq.de_irq_mask[pipe]) { - dev_priv->display.irq.de_irq_mask[pipe] = new_val; + if (new_val != display->irq.de_irq_mask[pipe]) { + display->irq.de_irq_mask[pipe] = new_val; intel_de_write(display, GEN8_DE_PIPE_IMR(pipe), display->irq.de_irq_mask[pipe]); intel_de_posting_read(display, GEN8_DE_PIPE_IMR(pipe)); } @@ -252,11 +251,11 @@ void ibx_display_interrupt_update(struct intel_display *display, sdeimr &= ~interrupt_mask; sdeimr |= (~enabled_irq_mask & interrupt_mask); - drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask); + drm_WARN_ON(display->drm, enabled_irq_mask & ~interrupt_mask); lockdep_assert_held(&dev_priv->irq_lock); - if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) + if (drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv))) return; intel_de_write(display, SDEIMR, sdeimr); @@ -322,20 +321,20 @@ void i915_enable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask) { struct drm_i915_private *dev_priv = to_i915(display->drm); - i915_reg_t reg = PIPESTAT(dev_priv, pipe); + i915_reg_t reg = PIPESTAT(display, pipe); u32 enable_mask; - drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, "pipe %c: status_mask=0x%x\n", pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); + drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); - if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) + if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == status_mask) return; - dev_priv->display.irq.pipestat_irq_mask[pipe] |= status_mask; + display->irq.pipestat_irq_mask[pipe] |= status_mask; enable_mask = i915_pipestat_enable_mask(display, pipe); intel_de_write(display, reg, enable_mask | status_mask); @@ -346,20 +345,20 @@ void i915_disable_pipestat(struct intel_display *display, enum pipe pipe, u32 status_mask) { struct drm_i915_private *dev_priv = to_i915(display->drm); - i915_reg_t reg = PIPESTAT(dev_priv, pipe); + i915_reg_t reg = PIPESTAT(display, pipe); u32 enable_mask; - drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, + drm_WARN_ONCE(display->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK, "pipe %c: status_mask=0x%x\n", pipe_name(pipe), status_mask); lockdep_assert_held(&dev_priv->irq_lock); - drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)); + drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); - if ((dev_priv->display.irq.pipestat_irq_mask[pipe] & status_mask) == 0) + if ((display->irq.pipestat_irq_mask[pipe] & status_mask) == 0) return; - dev_priv->display.irq.pipestat_irq_mask[pipe] &= ~status_mask; + display->irq.pipestat_irq_mask[pipe] &= ~status_mask; enable_mask = i915_pipestat_enable_mask(display, pipe); intel_de_write(display, reg, enable_mask | status_mask); @@ -368,15 +367,13 @@ void i915_disable_pipestat(struct intel_display *display, static bool i915_has_legacy_blc_interrupt(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - if (IS_I85X(i915)) + if (display->platform.i85x) return true; - if (IS_PINEVIEW(i915)) + if (display->platform.pineview) return true; - return IS_DISPLAY_VER(display, 3, 4) && IS_MOBILE(i915); + return IS_DISPLAY_VER(display, 3, 4) && display->platform.mobile; } /** @@ -396,7 +393,7 @@ void i915_enable_asle_pipestat(struct intel_display *display) spin_lock_irq(&dev_priv->irq_lock); i915_enable_pipestat(display, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); - if (DISPLAY_VER(dev_priv) >= 4) + if (DISPLAY_VER(display) >= 4) i915_enable_pipestat(display, PIPE_A, PIPE_LEGACY_BLC_EVENT_STATUS); @@ -404,13 +401,12 @@ void i915_enable_asle_pipestat(struct intel_display *display) } #if IS_ENABLED(CONFIG_DEBUG_FS) -static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void display_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) { - struct intel_display *display = &dev_priv->display; struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc; u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 }; @@ -427,7 +423,7 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, * don't trust that one either. */ if (pipe_crc->skipped <= 0 || - (DISPLAY_VER(dev_priv) >= 8 && pipe_crc->skipped == 1)) { + (DISPLAY_VER(display) >= 8 && pipe_crc->skipped == 1)) { pipe_crc->skipped++; spin_unlock(&pipe_crc->lock); return; @@ -440,20 +436,19 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, } #else static inline void -display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +display_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe, u32 crc0, u32 crc1, u32 crc2, u32 crc3, u32 crc4) {} #endif -static void flip_done_handler(struct drm_i915_private *i915, +static void flip_done_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &i915->display; struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); - spin_lock(&i915->drm.event_lock); + spin_lock(&display->drm->event_lock); if (crtc->flip_done_event) { trace_intel_crtc_flip_done(crtc); @@ -461,25 +456,21 @@ static void flip_done_handler(struct drm_i915_private *i915, crtc->flip_done_event = NULL; } - spin_unlock(&i915->drm.event_lock); + spin_unlock(&display->drm->event_lock); } -static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void hsw_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; - - display_pipe_crc_irq_handler(dev_priv, pipe, + display_pipe_crc_irq_handler(display, pipe, intel_de_read(display, PIPE_CRC_RES_HSW(pipe)), 0, 0, 0, 0); } -static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void ivb_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; - - display_pipe_crc_irq_handler(dev_priv, pipe, + display_pipe_crc_irq_handler(display, pipe, intel_de_read(display, PIPE_CRC_RES_1_IVB(pipe)), intel_de_read(display, PIPE_CRC_RES_2_IVB(pipe)), intel_de_read(display, PIPE_CRC_RES_3_IVB(pipe)), @@ -487,40 +478,38 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, intel_de_read(display, PIPE_CRC_RES_5_IVB(pipe))); } -static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, +static void i9xx_pipe_crc_irq_handler(struct intel_display *display, enum pipe pipe) { - struct intel_display *display = &dev_priv->display; u32 res1, res2; - if (DISPLAY_VER(dev_priv) >= 3) - res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(dev_priv, pipe)); + if (DISPLAY_VER(display) >= 3) + res1 = intel_de_read(display, PIPE_CRC_RES_RES1_I915(display, pipe)); else res1 = 0; - if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) - res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(dev_priv, pipe)); + if (DISPLAY_VER(display) >= 5 || display->platform.g4x) + res2 = intel_de_read(display, PIPE_CRC_RES_RES2_G4X(display, pipe)); else res2 = 0; - display_pipe_crc_irq_handler(dev_priv, pipe, - intel_de_read(display, PIPE_CRC_RES_RED(dev_priv, pipe)), - intel_de_read(display, PIPE_CRC_RES_GREEN(dev_priv, pipe)), - intel_de_read(display, PIPE_CRC_RES_BLUE(dev_priv, pipe)), + display_pipe_crc_irq_handler(display, pipe, + intel_de_read(display, PIPE_CRC_RES_RED(display, pipe)), + intel_de_read(display, PIPE_CRC_RES_GREEN(display, pipe)), + intel_de_read(display, PIPE_CRC_RES_BLUE(display, pipe)), res1, res2); } -static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) +static void i9xx_pipestat_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { intel_de_write(display, - PIPESTAT(dev_priv, pipe), + PIPESTAT(display, pipe), PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); - dev_priv->display.irq.pipestat_irq_mask[pipe] = 0; + display->irq.pipestat_irq_mask[pipe] = 0; } } @@ -532,13 +521,13 @@ void i9xx_pipestat_irq_ack(struct intel_display *display, spin_lock(&dev_priv->irq_lock); - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && - !dev_priv->display.irq.vlv_display_irqs_enabled) { + if ((display->platform.valleyview || display->platform.cherryview) && + !display->irq.vlv_display_irqs_enabled) { spin_unlock(&dev_priv->irq_lock); return; } - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { i915_reg_t reg; u32 status_mask, enable_mask, iir_bit = 0; @@ -566,12 +555,12 @@ void i9xx_pipestat_irq_ack(struct intel_display *display, break; } if (iir & iir_bit) - status_mask |= dev_priv->display.irq.pipestat_irq_mask[pipe]; + status_mask |= display->irq.pipestat_irq_mask[pipe]; if (!status_mask) continue; - reg = PIPESTAT(dev_priv, pipe); + reg = PIPESTAT(display, pipe); pipe_stats[pipe] = intel_de_read(display, reg) & status_mask; enable_mask = i915_pipestat_enable_mask(display, pipe); @@ -595,19 +584,18 @@ void i9xx_pipestat_irq_ack(struct intel_display *display, void i915_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = to_i915(display->drm); bool blc_event = false; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(display, pipe); @@ -620,19 +608,18 @@ void i915_pipestat_irq_handler(struct intel_display *display, void i965_pipestat_irq_handler(struct intel_display *display, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = to_i915(display->drm); bool blc_event = false; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) blc_event = true; if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(display, pipe); @@ -648,18 +635,17 @@ void i965_pipestat_irq_handler(struct intel_display *display, void valleyview_pipestat_irq_handler(struct intel_display *display, u32 pipe_stats[I915_MAX_PIPES]) { - struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) - flip_done_handler(dev_priv, pipe); + flip_done_handler(display, pipe); if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) intel_cpu_fifo_underrun_irq_handler(display, pipe); @@ -669,9 +655,8 @@ void valleyview_pipestat_irq_handler(struct intel_display *display, intel_gmbus_irq_handler(display); } -static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +static void ibx_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; @@ -680,7 +665,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (pch_iir & SDE_AUDIO_POWER_MASK) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> SDE_AUDIO_POWER_SHIFT); - drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n", + drm_dbg(display->drm, "PCH audio power change on port %d\n", port_name(port)); } @@ -691,26 +676,26 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_HDCP_MASK) - drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n"); + drm_dbg(display->drm, "PCH HDCP audio interrupt\n"); if (pch_iir & SDE_AUDIO_TRANS_MASK) - drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n"); + drm_dbg(display->drm, "PCH transcoder audio interrupt\n"); if (pch_iir & SDE_POISON) - drm_err(&dev_priv->drm, "PCH poison interrupt\n"); + drm_err(display->drm, "PCH poison interrupt\n"); if (pch_iir & SDE_FDI_MASK) { - for_each_pipe(dev_priv, pipe) - drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + for_each_pipe(display, pipe) + drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), intel_de_read(display, FDI_RX_IIR(pipe))); } if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) - drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n"); + drm_dbg(display->drm, "PCH transcoder CRC done interrupt\n"); if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "PCH transcoder CRC error interrupt\n"); if (pch_iir & SDE_TRANSA_FIFO_UNDER) @@ -753,14 +738,13 @@ static const struct pipe_fault_handler ivb_pipe_fault_handlers[] = { {} }; -static void ivb_err_int_handler(struct drm_i915_private *dev_priv) +static void ivb_err_int_handler(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; u32 err_int = intel_de_read(display, GEN7_ERR_INT); enum pipe pipe; if (err_int & ERR_INT_POISON) - drm_err(&dev_priv->drm, "Poison interrupt\n"); + drm_err(display->drm, "Poison interrupt\n"); if (err_int & ERR_INT_INVALID_GTT_PTE) drm_err_ratelimited(display->drm, "Invalid GTT PTE\n"); @@ -768,17 +752,17 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv) if (err_int & ERR_INT_INVALID_PTE_DATA) drm_err_ratelimited(display->drm, "Invalid PTE data\n"); - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { u32 fault_errors; if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(display, pipe); if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { - if (IS_IVYBRIDGE(dev_priv)) - ivb_pipe_crc_irq_handler(dev_priv, pipe); + if (display->platform.ivybridge) + ivb_pipe_crc_irq_handler(display, pipe); else - hsw_pipe_crc_irq_handler(dev_priv, pipe); + hsw_pipe_crc_irq_handler(display, pipe); } fault_errors = err_int & ivb_err_int_pipe_fault_mask(pipe); @@ -790,25 +774,23 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv) intel_de_write(display, GEN7_ERR_INT, err_int); } -static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) +static void cpt_serr_int_handler(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; u32 serr_int = intel_de_read(display, SERR_INT); enum pipe pipe; if (serr_int & SERR_INT_POISON) - drm_err(&dev_priv->drm, "PCH poison interrupt\n"); + drm_err(display->drm, "PCH poison interrupt\n"); - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) intel_pch_fifo_underrun_irq_handler(display, pipe); intel_de_write(display, SERR_INT, serr_int); } -static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) +static void cpt_irq_handler(struct intel_display *display, u32 pch_iir) { - struct intel_display *display = &dev_priv->display; enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; @@ -817,7 +799,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> SDE_AUDIO_POWER_SHIFT_CPT); - drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n", + drm_dbg(display->drm, "PCH audio power change on port %c\n", port_name(port)); } @@ -828,20 +810,20 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) intel_gmbus_irq_handler(display); if (pch_iir & SDE_AUDIO_CP_REQ_CPT) - drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n"); + drm_dbg(display->drm, "Audio CP request interrupt\n"); if (pch_iir & SDE_AUDIO_CP_CHG_CPT) - drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n"); + drm_dbg(display->drm, "Audio CP change interrupt\n"); if (pch_iir & SDE_FDI_MASK_CPT) { - for_each_pipe(dev_priv, pipe) - drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", + for_each_pipe(display, pipe) + drm_dbg(display->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), intel_de_read(display, FDI_RX_IIR(pipe))); } if (pch_iir & SDE_ERROR_CPT) - cpt_serr_int_handler(dev_priv); + cpt_serr_int_handler(display); } static u32 ilk_gtt_fault_pipe_fault_mask(enum pipe pipe) @@ -910,23 +892,23 @@ void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) intel_opregion_asle_intr(display); if (de_iir & DE_POISON) - drm_err(&dev_priv->drm, "Poison interrupt\n"); + drm_err(display->drm, "Poison interrupt\n"); if (de_iir & DE_GTT_FAULT) ilk_gtt_fault_irq_handler(display); - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (de_iir & DE_PIPE_VBLANK(pipe)) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (de_iir & DE_PLANE_FLIP_DONE(pipe)) - flip_done_handler(dev_priv, pipe); + flip_done_handler(display, pipe); if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) intel_cpu_fifo_underrun_irq_handler(display, pipe); if (de_iir & DE_PIPE_CRC_DONE(pipe)) - i9xx_pipe_crc_irq_handler(dev_priv, pipe); + i9xx_pipe_crc_irq_handler(display, pipe); } /* check event from PCH */ @@ -934,15 +916,15 @@ void ilk_display_irq_handler(struct intel_display *display, u32 de_iir) u32 pch_iir = intel_de_read(display, SDEIIR); if (HAS_PCH_CPT(dev_priv)) - cpt_irq_handler(dev_priv, pch_iir); + cpt_irq_handler(display, pch_iir); else - ibx_irq_handler(dev_priv, pch_iir); + ibx_irq_handler(display, pch_iir); /* should clear PCH hotplug event before clear CPU irq */ intel_de_write(display, SDEIIR, pch_iir); } - if (DISPLAY_VER(dev_priv) == 5 && de_iir & DE_PCU_EVENT) + if (DISPLAY_VER(display) == 5 && de_iir & DE_PCU_EVENT) gen5_rps_irq_handler(&to_gt(dev_priv)->rps); } @@ -956,12 +938,12 @@ void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) ilk_hpd_irq_handler(display, hotplug_trigger); if (de_iir & DE_ERR_INT_IVB) - ivb_err_int_handler(dev_priv); + ivb_err_int_handler(display); if (de_iir & DE_EDP_PSR_INT_HSW) { struct intel_encoder *encoder; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); u32 psr_iir; @@ -977,35 +959,35 @@ void ivb_display_irq_handler(struct intel_display *display, u32 de_iir) if (de_iir & DE_GSE_IVB) intel_opregion_asle_intr(display); - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { if (de_iir & DE_PIPE_VBLANK_IVB(pipe)) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) - flip_done_handler(dev_priv, pipe); + flip_done_handler(display, pipe); } /* check event from PCH */ if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { u32 pch_iir = intel_de_read(display, SDEIIR); - cpt_irq_handler(dev_priv, pch_iir); + cpt_irq_handler(display, pch_iir); /* clear PCH hotplug event before clear CPU irq */ intel_de_write(display, SDEIIR, pch_iir); } } -static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) +static u32 gen8_de_port_aux_mask(struct intel_display *display) { u32 mask; - if (DISPLAY_VER(dev_priv) >= 20) + if (DISPLAY_VER(display) >= 20) return 0; - else if (DISPLAY_VER(dev_priv) >= 14) + else if (DISPLAY_VER(display) >= 14) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB; - else if (DISPLAY_VER(dev_priv) >= 13) + else if (DISPLAY_VER(display) >= 13) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | TGL_DE_PORT_AUX_DDIC | @@ -1015,7 +997,7 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) TGL_DE_PORT_AUX_USBC2 | TGL_DE_PORT_AUX_USBC3 | TGL_DE_PORT_AUX_USBC4; - else if (DISPLAY_VER(dev_priv) >= 12) + else if (DISPLAY_VER(display) >= 12) return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | TGL_DE_PORT_AUX_DDIC | @@ -1027,12 +1009,12 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) TGL_DE_PORT_AUX_USBC6; mask = GEN8_AUX_CHANNEL_A; - if (DISPLAY_VER(dev_priv) >= 9) + if (DISPLAY_VER(display) >= 9) mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; - if (DISPLAY_VER(dev_priv) == 11) { + if (DISPLAY_VER(display) == 11) { mask |= ICL_AUX_CHANNEL_F; mask |= ICL_AUX_CHANNEL_E; } @@ -1040,10 +1022,8 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) return mask; } -static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) +static u32 gen8_de_pipe_fault_mask(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; - if (DISPLAY_VER(display) >= 14) return MTL_PIPEDMC_ATS_FAULT | MTL_PLANE_ATS_FAULT | @@ -1195,15 +1175,14 @@ gen8_pipe_fault_handlers(struct intel_display *display) return bdw_pipe_fault_handlers; } -static void intel_pmdemand_irq_handler(struct drm_i915_private *dev_priv) +static void intel_pmdemand_irq_handler(struct intel_display *display) { - wake_up_all(&dev_priv->display.pmdemand.waitqueue); + wake_up_all(&display->pmdemand.waitqueue); } static void -gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) +gen8_de_misc_irq_handler(struct intel_display *display, u32 iir) { - struct intel_display *display = &dev_priv->display; bool found = false; if (HAS_DBUF_OVERLAP_DETECTION(display)) { @@ -1213,20 +1192,20 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } } - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { if (iir & (XELPDP_PMDEMAND_RSP | XELPDP_PMDEMAND_RSPTOUT_ERR)) { if (iir & XELPDP_PMDEMAND_RSPTOUT_ERR) - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "Error waiting for Punit PM Demand Response\n"); - intel_pmdemand_irq_handler(dev_priv); + intel_pmdemand_irq_handler(display); found = true; } if (iir & XELPDP_RM_TIMEOUT) { u32 val = intel_de_read(display, RM_TIMEOUT_REG_CAPTURE); - drm_warn(&dev_priv->drm, "Register Access Timeout = 0x%x\n", val); + drm_warn(display->drm, "Register Access Timeout = 0x%x\n", val); found = true; } } else if (iir & GEN8_DE_MISC_GSE) { @@ -1239,12 +1218,12 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) u32 psr_iir; i915_reg_t iir_reg; - for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { + for_each_intel_encoder_with_psr(display->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - if (DISPLAY_VER(dev_priv) >= 12) - iir_reg = TRANS_PSR_IIR(dev_priv, - intel_dp->psr.transcoder); + if (DISPLAY_VER(display) >= 12) + iir_reg = TRANS_PSR_IIR(display, + intel_dp->psr.transcoder); else iir_reg = EDP_PSR_IIR; @@ -1256,19 +1235,18 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) intel_psr_irq_handler(intel_dp, psr_iir); /* prior GEN12 only have one EDP PSR */ - if (DISPLAY_VER(dev_priv) < 12) + if (DISPLAY_VER(display) < 12) break; } } if (!found) - drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); + drm_err(display->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir); } -static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, +static void gen11_dsi_te_interrupt_handler(struct intel_display *display, u32 te_trigger) { - struct intel_display *display = &dev_priv->display; enum pipe pipe = INVALID_PIPE; enum transcoder dsi_trans; enum port port; @@ -1278,7 +1256,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, * Incase of dual link, TE comes from DSI_1 * this is to check if dual link is enabled */ - val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(dev_priv, TRANSCODER_DSI_0)); + val = intel_de_read(display, TRANS_DDI_FUNC_CTL2(display, TRANSCODER_DSI_0)); val &= PORT_SYNC_MODE_ENABLE; /* @@ -1294,12 +1272,12 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, val = val & OP_MODE_MASK; if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { - drm_err(&dev_priv->drm, "DSI trancoder not configured in command mode\n"); + drm_err(display->drm, "DSI trancoder not configured in command mode\n"); return; } /* Get PIPE for handling VBLANK event */ - val = intel_de_read(display, TRANS_DDI_FUNC_CTL(dev_priv, dsi_trans)); + val = intel_de_read(display, TRANS_DDI_FUNC_CTL(display, dsi_trans)); switch (val & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: pipe = PIPE_A; @@ -1311,28 +1289,28 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, pipe = PIPE_C; break; default: - drm_err(&dev_priv->drm, "Invalid PIPE\n"); + drm_err(display->drm, "Invalid PIPE\n"); return; } - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); /* clear TE in dsi IIR */ port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; intel_de_rmw(display, DSI_INTR_IDENT_REG(port), 0, 0); } -static u32 gen8_de_pipe_flip_done_mask(struct drm_i915_private *i915) +static u32 gen8_de_pipe_flip_done_mask(struct intel_display *display) { - if (DISPLAY_VER(i915) >= 9) + if (DISPLAY_VER(display) >= 9) return GEN9_PIPE_PLANE1_FLIP_DONE; else return GEN8_PIPE_PRIMARY_FLIP_DONE; } -static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_iir, u32 *pica_iir) +static void gen8_read_and_ack_pch_irqs(struct intel_display *display, u32 *pch_iir, u32 *pica_iir) { - struct intel_display *display = &i915->display; + struct drm_i915_private *i915 = to_i915(display->drm); u32 pica_ier = 0; *pica_iir = 0; @@ -1346,7 +1324,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i * their flags both in the PICA and SDE IIR. */ if (*pch_iir & SDE_PICAINTERRUPT) { - drm_WARN_ON(&i915->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); + drm_WARN_ON(display->drm, INTEL_PCH_TYPE(i915) < PCH_MTL); pica_ier = intel_de_rmw(display, PICAINTERRUPT_IER, ~0, 0); *pica_iir = intel_de_read(display, PICAINTERRUPT_IIR); @@ -1365,26 +1343,26 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) u32 iir; enum pipe pipe; - drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DISPLAY(dev_priv)); + drm_WARN_ON_ONCE(display->drm, !HAS_DISPLAY(display)); if (master_ctl & GEN8_DE_MISC_IRQ) { iir = intel_de_read(display, GEN8_DE_MISC_IIR); if (iir) { intel_de_write(display, GEN8_DE_MISC_IIR, iir); - gen8_de_misc_irq_handler(dev_priv, iir); + gen8_de_misc_irq_handler(display, iir); } else { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied (DE MISC)!\n"); } } - if (DISPLAY_VER(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { + if (DISPLAY_VER(display) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { iir = intel_de_read(display, GEN11_DE_HPD_IIR); if (iir) { intel_de_write(display, GEN11_DE_HPD_IIR, iir); gen11_hpd_irq_handler(display, iir); } else { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied, (DE HPD)!\n"); } } @@ -1396,19 +1374,19 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) intel_de_write(display, GEN8_DE_PORT_IIR, iir); - if (iir & gen8_de_port_aux_mask(dev_priv)) { + if (iir & gen8_de_port_aux_mask(display)) { intel_dp_aux_irq_handler(display); found = true; } - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { u32 hotplug_trigger = iir & BXT_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { bxt_hpd_irq_handler(display, hotplug_trigger); found = true; } - } else if (IS_BROADWELL(dev_priv)) { + } else if (display->platform.broadwell) { u32 hotplug_trigger = iir & BDW_DE_PORT_HOTPLUG_MASK; if (hotplug_trigger) { @@ -1417,31 +1395,31 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) } } - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && + if ((display->platform.geminilake || display->platform.broxton) && (iir & BXT_DE_PORT_GMBUS)) { intel_gmbus_irq_handler(display); found = true; } - if (DISPLAY_VER(dev_priv) >= 11) { + if (DISPLAY_VER(display) >= 11) { u32 te_trigger = iir & (DSI0_TE | DSI1_TE); if (te_trigger) { - gen11_dsi_te_interrupt_handler(dev_priv, te_trigger); + gen11_dsi_te_interrupt_handler(display, te_trigger); found = true; } } if (!found) - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "Unexpected DE Port interrupt\n"); } else { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied (DE PORT)!\n"); } } - for_each_pipe(dev_priv, pipe) { + for_each_pipe(display, pipe) { u32 fault_errors; if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) @@ -1449,7 +1427,7 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) iir = intel_de_read(display, GEN8_DE_PIPE_IIR(pipe)); if (!iir) { - drm_err_ratelimited(&dev_priv->drm, + drm_err_ratelimited(display->drm, "The master control interrupt lied (DE PIPE)!\n"); continue; } @@ -1457,29 +1435,29 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) intel_de_write(display, GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK) - intel_handle_vblank(dev_priv, pipe); + intel_handle_vblank(display, pipe); - if (iir & gen8_de_pipe_flip_done_mask(dev_priv)) - flip_done_handler(dev_priv, pipe); + if (iir & gen8_de_pipe_flip_done_mask(display)) + flip_done_handler(display, pipe); - if (HAS_DSB(dev_priv)) { + if (HAS_DSB(display)) { if (iir & GEN12_DSB_INT(INTEL_DSB_0)) - intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_0); + intel_dsb_irq_handler(display, pipe, INTEL_DSB_0); if (iir & GEN12_DSB_INT(INTEL_DSB_1)) - intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_1); + intel_dsb_irq_handler(display, pipe, INTEL_DSB_1); if (iir & GEN12_DSB_INT(INTEL_DSB_2)) - intel_dsb_irq_handler(&dev_priv->display, pipe, INTEL_DSB_2); + intel_dsb_irq_handler(display, pipe, INTEL_DSB_2); } if (iir & GEN8_PIPE_CDCLK_CRC_DONE) - hsw_pipe_crc_irq_handler(dev_priv, pipe); + hsw_pipe_crc_irq_handler(display, pipe); if (iir & GEN8_PIPE_FIFO_UNDERRUN) intel_cpu_fifo_underrun_irq_handler(display, pipe); - fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv); + fault_errors = iir & gen8_de_pipe_fault_mask(display); if (fault_errors) intel_pipe_fault_irq_handler(display, gen8_pipe_fault_handlers(display), @@ -1495,7 +1473,7 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) * scheme also closed the SDE interrupt handling race we've seen * on older pch-split platforms. But this needs testing. */ - gen8_read_and_ack_pch_irqs(dev_priv, &iir, &pica_iir); + gen8_read_and_ack_pch_irqs(display, &iir, &pica_iir); if (iir) { if (pica_iir) xelpdp_pica_irq_handler(display, pica_iir); @@ -1505,13 +1483,13 @@ void gen8_de_irq_handler(struct intel_display *display, u32 master_ctl) else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) spt_irq_handler(display, iir); else - cpt_irq_handler(dev_priv, iir); + cpt_irq_handler(display, iir); } else { /* * Like on previous PCH there seems to be something * fishy going on with forwarding PCH interrupts. */ - drm_dbg(&dev_priv->drm, + drm_dbg(display->drm, "The master control interrupt lied (SDE)!\n"); } } @@ -1556,10 +1534,9 @@ void gen11_display_irq_handler(struct intel_display *display) enable_rpm_wakeref_asserts(&i915->runtime_pm); } -static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915) +static void i915gm_irq_cstate_wa_enable(struct intel_display *display) { - struct intel_display *display = &i915->display; - lockdep_assert_held(&i915->drm.vblank_time_lock); + lockdep_assert_held(&display->drm->vblank_time_lock); /* * Vblank/CRC interrupts fail to wake the device up from C2+. @@ -1567,33 +1544,30 @@ static void i915gm_irq_cstate_wa_enable(struct drm_i915_private *i915) * the problem. There is a small power cost so we do this * only when vblank/CRC interrupts are actually enabled. */ - if (i915->display.irq.vblank_enabled++ == 0) + if (display->irq.vblank_enabled++ == 0) intel_de_write(display, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } -static void i915gm_irq_cstate_wa_disable(struct drm_i915_private *i915) +static void i915gm_irq_cstate_wa_disable(struct intel_display *display) { - struct intel_display *display = &i915->display; - lockdep_assert_held(&i915->drm.vblank_time_lock); + lockdep_assert_held(&display->drm->vblank_time_lock); - if (--i915->display.irq.vblank_enabled == 0) + if (--display->irq.vblank_enabled == 0) intel_de_write(display, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } void i915gm_irq_cstate_wa(struct intel_display *display, bool enable) { - struct drm_i915_private *i915 = to_i915(display->drm); - - spin_lock_irq(&i915->drm.vblank_time_lock); + spin_lock_irq(&display->drm->vblank_time_lock); if (enable) - i915gm_irq_cstate_wa_enable(i915); + i915gm_irq_cstate_wa_enable(display); else - i915gm_irq_cstate_wa_disable(i915); + i915gm_irq_cstate_wa_disable(display); - spin_unlock_irq(&i915->drm.vblank_time_lock); + spin_unlock_irq(&display->drm->vblank_time_lock); } int i8xx_enable_vblank(struct drm_crtc *crtc) @@ -1624,20 +1598,20 @@ void i8xx_disable_vblank(struct drm_crtc *crtc) int i915gm_enable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->dev); + struct intel_display *display = to_intel_display(crtc->dev); - i915gm_irq_cstate_wa_enable(i915); + i915gm_irq_cstate_wa_enable(display); return i8xx_enable_vblank(crtc); } void i915gm_disable_vblank(struct drm_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->dev); + struct intel_display *display = to_intel_display(crtc->dev); i8xx_disable_vblank(crtc); - i915gm_irq_cstate_wa_disable(i915); + i915gm_irq_cstate_wa_disable(display); } int i965_enable_vblank(struct drm_crtc *crtc) @@ -1674,7 +1648,7 @@ int ilk_enable_vblank(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + u32 bit = DISPLAY_VER(display) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -1684,7 +1658,7 @@ int ilk_enable_vblank(struct drm_crtc *crtc) /* Even though there is no DMC, frame counter can get stuck when * PSR is active as no frames are generated. */ - if (HAS_PSR(dev_priv)) + if (HAS_PSR(display)) drm_crtc_vblank_restore(crtc); return 0; @@ -1696,7 +1670,7 @@ void ilk_disable_vblank(struct drm_crtc *crtc) struct drm_i915_private *dev_priv = to_i915(crtc->dev); enum pipe pipe = to_intel_crtc(crtc)->pipe; unsigned long irqflags; - u32 bit = DISPLAY_VER(dev_priv) >= 7 ? + u32 bit = DISPLAY_VER(display) >= 7 ? DE_PIPE_VBLANK_IVB(pipe) : DE_PIPE_VBLANK(pipe); spin_lock_irqsave(&dev_priv->irq_lock, irqflags); @@ -1764,7 +1738,7 @@ int bdw_enable_vblank(struct drm_crtc *_crtc) /* Even if there is no DMC, frame counter can get stuck when * PSR is active as no frames are generated, so check only for PSR. */ - if (HAS_PSR(dev_priv)) + if (HAS_PSR(display)) drm_crtc_vblank_restore(&crtc->base); return 0; @@ -1897,11 +1871,11 @@ void vlv_display_error_irq_handler(struct intel_display *display, vlv_page_table_error_irq_handler(display, dpinvgtt); } -static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) +static void _vlv_display_irq_reset(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV); else intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_VLV); @@ -1910,9 +1884,9 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) VLV_ERROR_REGS); i915_hotplug_interrupt_update_locked(display, 0xffffffff, 0); - intel_de_rmw(display, PORT_HOTPLUG_STAT(dev_priv), 0, 0); + intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); - i9xx_pipestat_irq_reset(dev_priv); + i9xx_pipestat_irq_reset(display); intel_display_irq_regs_reset(display, VLV_IRQ_REGS); dev_priv->irq_mask = ~0u; @@ -1920,22 +1894,18 @@ static void _vlv_display_irq_reset(struct drm_i915_private *dev_priv) void vlv_display_irq_reset(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - - if (dev_priv->display.irq.vlv_display_irqs_enabled) - _vlv_display_irq_reset(dev_priv); + if (display->irq.vlv_display_irqs_enabled) + _vlv_display_irq_reset(display); } void i9xx_display_irq_reset(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - if (I915_HAS_HOTPLUG(i915)) { + if (I915_HAS_HOTPLUG(display)) { i915_hotplug_interrupt_update(display, 0xffffffff, 0); - intel_de_rmw(display, PORT_HOTPLUG_STAT(i915), 0, 0); + intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); } - i9xx_pipestat_irq_reset(i915); + i9xx_pipestat_irq_reset(display); } static u32 vlv_error_mask(void) @@ -1951,10 +1921,10 @@ void vlv_display_irq_postinstall(struct intel_display *display) u32 enable_mask; enum pipe pipe; - if (!dev_priv->display.irq.vlv_display_irqs_enabled) + if (!display->irq.vlv_display_irqs_enabled) return; - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) intel_de_write(display, DPINVGTT, DPINVGTT_STATUS_MASK_CHV | DPINVGTT_EN_MASK_CHV); @@ -1969,7 +1939,7 @@ void vlv_display_irq_postinstall(struct intel_display *display) pipestat_mask = PIPE_CRC_DONE_INTERRUPT_STATUS; i915_enable_pipestat(display, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) i915_enable_pipestat(display, pipe, pipestat_mask); enable_mask = I915_DISPLAY_PORT_INTERRUPT | @@ -1979,11 +1949,11 @@ void vlv_display_irq_postinstall(struct intel_display *display) I915_LPE_PIPE_B_INTERRUPT | I915_MASTER_ERROR_INTERRUPT; - if (IS_CHERRYVIEW(dev_priv)) + if (display->platform.cherryview) enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | I915_LPE_PIPE_C_INTERRUPT; - drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u); + drm_WARN_ON(display->drm, dev_priv->irq_mask != ~0u); dev_priv->irq_mask = ~enable_mask; @@ -1992,16 +1962,15 @@ void vlv_display_irq_postinstall(struct intel_display *display) void gen8_display_irq_reset(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); enum pipe pipe; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; intel_de_write(display, EDP_PSR_IMR, 0xffffffff); intel_de_write(display, EDP_PSR_IIR, 0xffffffff); - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); @@ -2017,15 +1986,15 @@ void gen11_display_irq_reset(struct intel_display *display) u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; intel_de_write(display, GEN11_DISPLAY_INT_CTL, 0); - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { enum transcoder trans; - for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { + for_each_cpu_transcoder_masked(display, trans, trans_mask) { enum intel_display_power_domain domain; domain = POWER_DOMAIN_TRANSCODER(trans); @@ -2033,10 +2002,10 @@ void gen11_display_irq_reset(struct intel_display *display) continue; intel_de_write(display, - TRANS_PSR_IMR(dev_priv, trans), + TRANS_PSR_IMR(display, trans), 0xffffffff); intel_de_write(display, - TRANS_PSR_IIR(dev_priv, trans), + TRANS_PSR_IIR(display, trans), 0xffffffff); } } else { @@ -2044,7 +2013,7 @@ void gen11_display_irq_reset(struct intel_display *display) intel_de_write(display, EDP_PSR_IIR, 0xffffffff); } - for_each_pipe(dev_priv, pipe) + for_each_pipe(display, pipe) if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); @@ -2052,7 +2021,7 @@ void gen11_display_irq_reset(struct intel_display *display) intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); - if (DISPLAY_VER(dev_priv) >= 14) + if (DISPLAY_VER(display) >= 14) intel_display_irq_regs_reset(display, PICAINTERRUPT_IRQ_REGS); else intel_display_irq_regs_reset(display, GEN11_DE_HPD_IRQ_REGS); @@ -2066,7 +2035,7 @@ void gen8_irq_power_well_post_enable(struct intel_display *display, { struct drm_i915_private *dev_priv = to_i915(display->drm); u32 extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | - gen8_de_pipe_flip_done_mask(dev_priv); + gen8_de_pipe_flip_done_mask(display); enum pipe pipe; spin_lock_irq(&dev_priv->irq_lock); @@ -2076,10 +2045,10 @@ void gen8_irq_power_well_post_enable(struct intel_display *display, return; } - for_each_pipe_masked(dev_priv, pipe, pipe_mask) + for_each_pipe_masked(display, pipe, pipe_mask) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - dev_priv->display.irq.de_irq_mask[pipe], - ~dev_priv->display.irq.de_irq_mask[pipe] | extra_ier); + display->irq.de_irq_mask[pipe], + ~display->irq.de_irq_mask[pipe] | extra_ier); spin_unlock_irq(&dev_priv->irq_lock); } @@ -2097,7 +2066,7 @@ void gen8_irq_power_well_pre_disable(struct intel_display *display, return; } - for_each_pipe_masked(dev_priv, pipe, pipe_mask) + for_each_pipe_masked(display, pipe, pipe_mask) intel_display_irq_regs_reset(display, GEN8_DE_PIPE_IRQ_REGS(pipe)); spin_unlock_irq(&dev_priv->irq_lock); @@ -2117,9 +2086,9 @@ void gen8_irq_power_well_pre_disable(struct intel_display *display, * to avoid races with the irq handler, assuming we have MSI. Shared legacy * interrupts could still race. */ -static void ibx_irq_postinstall(struct drm_i915_private *dev_priv) +static void ibx_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); u32 mask; if (HAS_PCH_NOP(dev_priv)) @@ -2141,13 +2110,13 @@ void valleyview_enable_display_irqs(struct intel_display *display) lockdep_assert_held(&dev_priv->irq_lock); - if (dev_priv->display.irq.vlv_display_irqs_enabled) + if (display->irq.vlv_display_irqs_enabled) return; - dev_priv->display.irq.vlv_display_irqs_enabled = true; + display->irq.vlv_display_irqs_enabled = true; if (intel_irqs_enabled(dev_priv)) { - _vlv_display_irq_reset(dev_priv); + _vlv_display_irq_reset(display); vlv_display_irq_postinstall(display); } } @@ -2158,13 +2127,13 @@ void valleyview_disable_display_irqs(struct intel_display *display) lockdep_assert_held(&dev_priv->irq_lock); - if (!dev_priv->display.irq.vlv_display_irqs_enabled) + if (!display->irq.vlv_display_irqs_enabled) return; - dev_priv->display.irq.vlv_display_irqs_enabled = false; + display->irq.vlv_display_irqs_enabled = false; if (intel_irqs_enabled(dev_priv)) - _vlv_display_irq_reset(dev_priv); + _vlv_display_irq_reset(display); } void ilk_de_irq_postinstall(struct intel_display *display) @@ -2173,7 +2142,7 @@ void ilk_de_irq_postinstall(struct intel_display *display) u32 display_mask, extra_mask; - if (DISPLAY_VER(i915) >= 7) { + if (DISPLAY_VER(display) >= 7) { display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | DE_AUX_CHANNEL_A_IVB); extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | @@ -2194,59 +2163,59 @@ void ilk_de_irq_postinstall(struct intel_display *display) DE_DP_A_HOTPLUG); } - if (IS_HASWELL(i915)) { + if (display->platform.haswell) { intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); display_mask |= DE_EDP_PSR_INT_HSW; } - if (IS_IRONLAKE_M(i915)) + if (display->platform.ironlake && display->platform.mobile) extra_mask |= DE_PCU_EVENT; i915->irq_mask = ~display_mask; - ibx_irq_postinstall(i915); + ibx_irq_postinstall(display); intel_display_irq_regs_init(display, DE_IRQ_REGS, i915->irq_mask, display_mask | extra_mask); } -static void mtp_irq_postinstall(struct drm_i915_private *i915); -static void icp_irq_postinstall(struct drm_i915_private *i915); +static void mtp_irq_postinstall(struct intel_display *display); +static void icp_irq_postinstall(struct intel_display *display); void gen8_de_irq_postinstall(struct intel_display *display) { struct drm_i915_private *dev_priv = to_i915(display->drm); - u32 de_pipe_masked = gen8_de_pipe_fault_mask(dev_priv) | + u32 de_pipe_masked = gen8_de_pipe_fault_mask(display) | GEN8_PIPE_CDCLK_CRC_DONE; u32 de_pipe_enables; - u32 de_port_masked = gen8_de_port_aux_mask(dev_priv); + u32 de_port_masked = gen8_de_port_aux_mask(display); u32 de_port_enables; u32 de_misc_masked = GEN8_DE_EDP_PSR; u32 trans_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C) | BIT(TRANSCODER_D); enum pipe pipe; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(display)) return; - if (DISPLAY_VER(dev_priv) >= 14) - mtp_irq_postinstall(dev_priv); + if (DISPLAY_VER(display) >= 14) + mtp_irq_postinstall(display); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_postinstall(dev_priv); + icp_irq_postinstall(display); else if (HAS_PCH_SPLIT(dev_priv)) - ibx_irq_postinstall(dev_priv); + ibx_irq_postinstall(display); - if (DISPLAY_VER(dev_priv) < 11) + if (DISPLAY_VER(display) < 11) de_misc_masked |= GEN8_DE_MISC_GSE; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) de_port_masked |= BXT_DE_PORT_GMBUS; - if (DISPLAY_VER(dev_priv) >= 14) { + if (DISPLAY_VER(display) >= 14) { de_misc_masked |= XELPDP_PMDEMAND_RSPTOUT_ERR | XELPDP_PMDEMAND_RSP | XELPDP_RM_TIMEOUT; - } else if (DISPLAY_VER(dev_priv) >= 11) { + } else if (DISPLAY_VER(display) >= 11) { enum port port; if (intel_bios_is_dsi_present(display, &port)) @@ -2256,25 +2225,25 @@ void gen8_de_irq_postinstall(struct intel_display *display) if (HAS_DBUF_OVERLAP_DETECTION(display)) de_misc_masked |= XE2LPD_DBUF_OVERLAP_DETECTED; - if (HAS_DSB(dev_priv)) + if (HAS_DSB(display)) de_pipe_masked |= GEN12_DSB_INT(INTEL_DSB_0) | GEN12_DSB_INT(INTEL_DSB_1) | GEN12_DSB_INT(INTEL_DSB_2); de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN | - gen8_de_pipe_flip_done_mask(dev_priv); + gen8_de_pipe_flip_done_mask(display); de_port_enables = de_port_masked; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK; - else if (IS_BROADWELL(dev_priv)) + else if (display->platform.broadwell) de_port_enables |= BDW_DE_PORT_HOTPLUG_MASK; - if (DISPLAY_VER(dev_priv) >= 12) { + if (DISPLAY_VER(display) >= 12) { enum transcoder trans; - for_each_cpu_transcoder_masked(dev_priv, trans, trans_mask) { + for_each_cpu_transcoder_masked(display, trans, trans_mask) { enum intel_display_power_domain domain; domain = POWER_DOMAIN_TRANSCODER(trans); @@ -2282,19 +2251,19 @@ void gen8_de_irq_postinstall(struct intel_display *display) continue; intel_display_irq_regs_assert_irr_is_zero(display, - TRANS_PSR_IIR(dev_priv, trans)); + TRANS_PSR_IIR(display, trans)); } } else { intel_display_irq_regs_assert_irr_is_zero(display, EDP_PSR_IIR); } - for_each_pipe(dev_priv, pipe) { - dev_priv->display.irq.de_irq_mask[pipe] = ~de_pipe_masked; + for_each_pipe(display, pipe) { + display->irq.de_irq_mask[pipe] = ~de_pipe_masked; if (intel_display_power_is_enabled(display, POWER_DOMAIN_PIPE(pipe))) intel_display_irq_regs_init(display, GEN8_DE_PIPE_IRQ_REGS(pipe), - dev_priv->display.irq.de_irq_mask[pipe], + display->irq.de_irq_mask[pipe], de_pipe_enables); } @@ -2303,7 +2272,7 @@ void gen8_de_irq_postinstall(struct intel_display *display) intel_display_irq_regs_init(display, GEN8_DE_MISC_IRQ_REGS, ~de_misc_masked, de_misc_masked); - if (IS_DISPLAY_VER(dev_priv, 11, 13)) { + if (IS_DISPLAY_VER(display, 11, 13)) { u32 de_hpd_masked = 0; u32 de_hpd_enables = GEN11_DE_TC_HOTPLUG_MASK | GEN11_DE_TBT_HOTPLUG_MASK; @@ -2313,9 +2282,8 @@ void gen8_de_irq_postinstall(struct intel_display *display) } } -static void mtp_irq_postinstall(struct drm_i915_private *i915) +static void mtp_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &i915->display; u32 sde_mask = SDE_GMBUS_ICP | SDE_PICAINTERRUPT; u32 de_hpd_mask = XELPDP_AUX_TC_MASK; u32 de_hpd_enables = de_hpd_mask | XELPDP_DP_ALT_HOTPLUG_MASK | @@ -2327,9 +2295,8 @@ static void mtp_irq_postinstall(struct drm_i915_private *i915) intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~sde_mask, 0xffffffff); } -static void icp_irq_postinstall(struct drm_i915_private *dev_priv) +static void icp_irq_postinstall(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; u32 mask = SDE_GMBUS_ICP; intel_display_irq_regs_init(display, SDE_IRQ_REGS, ~mask, 0xffffffff); From 09b9563e54415d51c7a3cb35e127e42a5419a0e5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 16:46:05 +0200 Subject: [PATCH 0169/1627] drm/i915/display: rename I915_HAS_HOTPLUG() to HAS_HOTPLUG MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Most of the other display feature check macros are just HAS_. Follow suit with hotplug check. Reviewed-by: Uma Shankar Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/c386ef007ae8bdda1bb9b1b353b1cd2957897842.1742481923.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_crt.c | 6 +++--- drivers/gpu/drm/i915/display/intel_display_device.h | 2 +- drivers/gpu/drm/i915/display/intel_display_irq.c | 2 +- drivers/gpu/drm/i915/display/intel_hotplug_irq.c | 2 +- drivers/gpu/drm/i915/display/intel_sdvo.c | 2 +- drivers/gpu/drm/i915/i915_irq.c | 4 ++-- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index a7f360f89410..cca22d2402e8 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -877,7 +877,7 @@ intel_crt_detect(struct drm_connector *connector, wakeref = intel_display_power_get(display, encoder->power_domain); - if (I915_HAS_HOTPLUG(display)) { + if (HAS_HOTPLUG(display)) { /* We can not rely on the HPD pin always being correctly wired * up, for example many KVM do not pass it through, and so * only trust an assertion that the monitor is connected. @@ -901,7 +901,7 @@ intel_crt_detect(struct drm_connector *connector, * broken monitor (without edid) to work behind a broken kvm (that fails * to have the right resistors for HP detection) needs to fix this up. * For now just bail out. */ - if (I915_HAS_HOTPLUG(display)) { + if (HAS_HOTPLUG(display)) { status = connector_status_disconnected; goto out; } @@ -1081,7 +1081,7 @@ void intel_crt_init(struct intel_display *display) crt->base.power_domain = POWER_DOMAIN_PORT_CRT; - if (I915_HAS_HOTPLUG(display) && + if (HAS_HOTPLUG(display) && !dmi_check_system(intel_spurious_crt_detect)) { crt->base.hpd_pin = HPD_CRT; crt->base.hotplug = intel_encoder_hotplug; diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 4e9630f65af6..368b0d3417c2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -171,6 +171,7 @@ struct intel_display_platforms { #define HAS_GMBUS_BURST_READ(__display) (DISPLAY_VER(__display) >= 10 || (__display)->platform.kabylake) #define HAS_GMBUS_IRQ(__display) (DISPLAY_VER(__display) >= 4) #define HAS_GMCH(__display) (DISPLAY_INFO(__display)->has_gmch) +#define HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug) #define HAS_HW_SAGV_WM(__display) (DISPLAY_VER(__display) >= 13 && !(__display)->platform.dgfx) #define HAS_IPC(__display) (DISPLAY_INFO(__display)->has_ipc) #define HAS_IPS(__display) ((__display)->platform.haswell_ult || (__display)->platform.broadwell) @@ -192,7 +193,6 @@ struct intel_display_platforms { HAS_DSC(__display)) #define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11) #define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask)) -#define I915_HAS_HOTPLUG(__display) (DISPLAY_INFO(__display)->has_hotplug) #define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical) #define SUPPORTS_TV(__display) (DISPLAY_INFO(__display)->supports_tv) diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 6f78fe6de06a..d9f9b9f78abb 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -1900,7 +1900,7 @@ void vlv_display_irq_reset(struct intel_display *display) void i9xx_display_irq_reset(struct intel_display *display) { - if (I915_HAS_HOTPLUG(display)) { + if (HAS_HOTPLUG(display)) { i915_hotplug_interrupt_update(display, 0xffffffff, 0); intel_de_rmw(display, PORT_HOTPLUG_STAT(display), 0, 0); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 1bcff3a47745..2463e61e7802 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -1481,7 +1481,7 @@ void intel_hotplug_irq_init(struct intel_display *display) intel_hpd_init_early(display); if (HAS_GMCH(display)) { - if (I915_HAS_HOTPLUG(display)) + if (HAS_HOTPLUG(display)) display->funcs.hotplug = &i915_hpd_funcs; } else { if (HAS_PCH_DG2(i915)) diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 6e2d9929b4d7..757b9ce7e3b1 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2036,7 +2036,7 @@ static u16 intel_sdvo_get_hotplug_support(struct intel_sdvo *intel_sdvo) struct intel_display *display = to_intel_display(&intel_sdvo->base); u16 hotplug; - if (!I915_HAS_HOTPLUG(display)) + if (!HAS_HOTPLUG(display)) return 0; /* diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index ba3afc7e38ac..c1f938a1da44 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -954,7 +954,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) enable_mask |= I915_ASLE_INTERRUPT; } - if (I915_HAS_HOTPLUG(dev_priv)) { + if (HAS_HOTPLUG(dev_priv)) { dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PORT_INTERRUPT; } @@ -995,7 +995,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ret = IRQ_HANDLED; - if (I915_HAS_HOTPLUG(dev_priv) && + if (HAS_HOTPLUG(dev_priv) && iir & I915_DISPLAY_PORT_INTERRUPT) hotplug_status = i9xx_hpd_irq_ack(display); From 336c0eaed2bde68b592769e9cd9c12d76d2b4578 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 17:03:55 +0200 Subject: [PATCH 0170/1627] drm/i915/display: add display specific runtime PM wrappers Add display specific wrappers around the i915 and xe dedicated runtime PM interfaces. There are no conversions here, just the wrappers. Implement with_intel_display_rpm() without needing to provide a local variable, which neatly narrows the scope and hides the type of the wakeref cookie. Reviewed-by: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/086b312367fa0fbd8de92e9764117aa7ff4a8cc5.1742483007.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile | 1 + .../gpu/drm/i915/display/intel_display_rpm.c | 68 ++++++++++++++++++ .../gpu/drm/i915/display/intel_display_rpm.h | 37 ++++++++++ drivers/gpu/drm/xe/Makefile | 1 + drivers/gpu/drm/xe/display/xe_display_rpm.c | 71 +++++++++++++++++++ 5 files changed, 178 insertions(+) create mode 100644 drivers/gpu/drm/i915/display/intel_display_rpm.c create mode 100644 drivers/gpu/drm/i915/display/intel_display_rpm.h create mode 100644 drivers/gpu/drm/xe/display/xe_display_rpm.c diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index ed05b131ed3a..c8fc271b33b7 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -247,6 +247,7 @@ i915-y += \ display/intel_display_power_map.o \ display/intel_display_power_well.o \ display/intel_display_reset.o \ + display/intel_display_rpm.o \ display/intel_display_rps.o \ display/intel_display_snapshot.o \ display/intel_display_wa.o \ diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.c b/drivers/gpu/drm/i915/display/intel_display_rpm.c new file mode 100644 index 000000000000..48da67dd0136 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rpm.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include "i915_drv.h" +#include "intel_display_rpm.h" +#include "intel_runtime_pm.h" + +static struct intel_runtime_pm *display_to_rpm(struct intel_display *display) +{ + struct drm_i915_private *i915 = to_i915(display->drm); + + return &i915->runtime_pm; +} + +struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display) +{ + return intel_runtime_pm_get_raw(display_to_rpm(display)); +} + +void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put_raw(display_to_rpm(display), wakeref); +} + +struct ref_tracker *intel_display_rpm_get(struct intel_display *display) +{ + return intel_runtime_pm_get(display_to_rpm(display)); +} + +struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display) +{ + return intel_runtime_pm_get_if_in_use(display_to_rpm(display)); +} + +struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display) +{ + return intel_runtime_pm_get_noresume(display_to_rpm(display)); +} + +void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref) +{ + intel_runtime_pm_put(display_to_rpm(display), wakeref); +} + +void intel_display_rpm_put_unchecked(struct intel_display *display) +{ + intel_runtime_pm_put_unchecked(display_to_rpm(display)); +} + +bool intel_display_rpm_suspended(struct intel_display *display) +{ + return intel_runtime_pm_suspended(display_to_rpm(display)); +} + +void assert_display_rpm_held(struct intel_display *display) +{ + assert_rpm_wakelock_held(display_to_rpm(display)); +} + +void intel_display_rpm_assert_block(struct intel_display *display) +{ + disable_rpm_wakeref_asserts(display_to_rpm(display)); +} + +void intel_display_rpm_assert_unblock(struct intel_display *display) +{ + enable_rpm_wakeref_asserts(display_to_rpm(display)); +} diff --git a/drivers/gpu/drm/i915/display/intel_display_rpm.h b/drivers/gpu/drm/i915/display/intel_display_rpm.h new file mode 100644 index 000000000000..6ef48515f84b --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_display_rpm.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __INTEL_DISPLAY_RPM__ +#define __INTEL_DISPLAY_RPM__ + +#include + +struct intel_display; +struct ref_tracker; + +struct ref_tracker *intel_display_rpm_get(struct intel_display *display); +void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref); + +#define __with_intel_display_rpm(__display, __wakeref) \ + for (struct ref_tracker *(__wakeref) = intel_display_rpm_get(__display); (__wakeref); \ + intel_display_rpm_put((__display), (__wakeref)), (__wakeref) = NULL) + +#define with_intel_display_rpm(__display) \ + __with_intel_display_rpm((__display), __UNIQUE_ID(wakeref)) + +/* Only for special cases. */ +bool intel_display_rpm_suspended(struct intel_display *display); + +void assert_display_rpm_held(struct intel_display *display); +void intel_display_rpm_assert_block(struct intel_display *display); +void intel_display_rpm_assert_unblock(struct intel_display *display); + +/* Only for display power implementation. */ +struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display); +void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref); + +struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display); +struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display); +void intel_display_rpm_put_unchecked(struct intel_display *display); + +#endif /* __INTEL_DISPLAY_RPM__ */ diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index 5ce65ccb3c08..4a99568605bd 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -181,6 +181,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ display/intel_fbdev_fb.o \ display/xe_display.o \ display/xe_display_misc.o \ + display/xe_display_rpm.o \ display/xe_display_rps.o \ display/xe_display_wa.o \ display/xe_dsb_buffer.o \ diff --git a/drivers/gpu/drm/xe/display/xe_display_rpm.c b/drivers/gpu/drm/xe/display/xe_display_rpm.c new file mode 100644 index 000000000000..1955153aadba --- /dev/null +++ b/drivers/gpu/drm/xe/display/xe_display_rpm.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: MIT +/* Copyright © 2025 Intel Corporation */ + +#include "intel_display_rpm.h" +#include "xe_device_types.h" +#include "xe_pm.h" + +static struct xe_device *display_to_xe(struct intel_display *display) +{ + return container_of(display, struct xe_device, display); +} + +struct ref_tracker *intel_display_rpm_get_raw(struct intel_display *display) +{ + return intel_display_rpm_get(display); +} + +void intel_display_rpm_put_raw(struct intel_display *display, struct ref_tracker *wakeref) +{ + intel_display_rpm_put(display, wakeref); +} + +struct ref_tracker *intel_display_rpm_get(struct intel_display *display) +{ + return xe_pm_runtime_resume_and_get(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; +} + +struct ref_tracker *intel_display_rpm_get_if_in_use(struct intel_display *display) +{ + return xe_pm_runtime_get_if_in_use(display_to_xe(display)) ? INTEL_WAKEREF_DEF : NULL; +} + +struct ref_tracker *intel_display_rpm_get_noresume(struct intel_display *display) +{ + xe_pm_runtime_get_noresume(display_to_xe(display)); + + return INTEL_WAKEREF_DEF; +} + +void intel_display_rpm_put(struct intel_display *display, struct ref_tracker *wakeref) +{ + if (wakeref) + xe_pm_runtime_put(display_to_xe(display)); +} + +void intel_display_rpm_put_unchecked(struct intel_display *display) +{ + xe_pm_runtime_put(display_to_xe(display)); +} + +bool intel_display_rpm_suspended(struct intel_display *display) +{ + struct xe_device *xe = display_to_xe(display); + + return pm_runtime_suspended(xe->drm.dev); +} + +void assert_display_rpm_held(struct intel_display *display) +{ + /* FIXME */ +} + +void intel_display_rpm_assert_block(struct intel_display *display) +{ + /* FIXME */ +} + +void intel_display_rpm_assert_unblock(struct intel_display *display) +{ + /* FIXME */ +} From e1de63b84cf0e621e69b0accaed4d5504e01b7ff Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 17:03:56 +0200 Subject: [PATCH 0171/1627] drm/i915/display: conversions to with_intel_display_rpm() Convert all with_intel_runtime_pm() uses to with_intel_display_rpm(). Reviewed-by: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/888566433ca5f31b3fa3c0a192fd495d86c2f201.1742483007.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_backlight.c | 5 ++--- drivers/gpu/drm/i915/display/intel_bios.c | 6 +++--- drivers/gpu/drm/i915/display/intel_hdcp.c | 5 ++--- drivers/gpu/drm/i915/display/skl_watermark.c | 9 +++++---- 4 files changed, 12 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 178dc6c8de80..4f3fa966c537 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -16,6 +16,7 @@ #include "intel_backlight_regs.h" #include "intel_connector.h" #include "intel_de.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dp_aux_backlight.h" #include "intel_dsi_dcs_backlight.h" @@ -901,11 +902,9 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd) { struct intel_connector *connector = bl_get_data(bd); struct intel_display *display = to_intel_display(connector); - struct drm_i915_private *i915 = to_i915(connector->base.dev); - intel_wakeref_t wakeref; int ret = 0; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + with_intel_display_rpm(display) { u32 hw_level; drm_modeset_lock(&display->drm->mode_config.connection_mutex, NULL); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index a8d08d7d82b3..fabfcf2caa69 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -37,6 +37,7 @@ #include "i915_drv.h" #include "intel_display.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_gmbus.h" @@ -3115,7 +3116,6 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display { struct drm_i915_private *i915 = to_i915(display->drm); const struct vbt_header *vbt = NULL; - intel_wakeref_t wakeref; vbt = firmware_get_vbt(display, sizep); @@ -3127,11 +3127,11 @@ static const struct vbt_header *intel_bios_get_vbt(struct intel_display *display * through MMIO or PCI mapping */ if (!vbt && IS_DGFX(i915)) - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_display_rpm(display) vbt = oprom_get_vbt(display, intel_rom_spi(i915), sizep, "SPI flash"); if (!vbt) - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_display_rpm(display) vbt = oprom_get_vbt(display, intel_rom_pci(i915), sizep, "PCI ROM"); return vbt; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 1bf424a822f3..72a43ef6e4d2 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -22,6 +22,7 @@ #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_hdcp_gsc.h" @@ -334,9 +335,7 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, static bool hdcp_key_loadable(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); enum i915_power_well_id id; - intel_wakeref_t wakeref; bool enabled = false; /* @@ -349,7 +348,7 @@ static bool hdcp_key_loadable(struct intel_display *display) id = SKL_DISP_PW_1; /* PG1 (power well #1) needs to be enabled */ - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_display_rpm(display) enabled = intel_display_power_well_is_enabled(display, id); /* diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 91ab8537347f..a6af5e4ba4d4 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -19,6 +19,7 @@ #include "intel_de.h" #include "intel_display.h" #include "intel_display_power.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fixed.h" @@ -4057,7 +4058,7 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file, { struct seq_file *m = file->private_data; struct drm_i915_private *i915 = m->private; - intel_wakeref_t wakeref; + struct intel_display *display = &i915->display; bool enable; int ret; @@ -4065,11 +4066,11 @@ static ssize_t skl_watermark_ipc_status_write(struct file *file, if (ret < 0) return ret; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + with_intel_display_rpm(display) { if (!skl_watermark_ipc_enabled(i915) && enable) - drm_info(&i915->drm, + drm_info(display->drm, "Enabling IPC: WM will be proper only after next commit\n"); - i915->display.wm.ipc_enabled = enable; + display->wm.ipc_enabled = enable; skl_watermark_ipc_update(i915); } From 31630f39e7a5f6186f25e08e502b8d1c775635d3 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 17:03:57 +0200 Subject: [PATCH 0172/1627] drm/i915/display: use display runtime PM interfaces for for atomic state Convert intel_atomic_commit() and intel_atomic_commit_tail() to use display runtime PM interfaces. Also convert the wakeref member type to struct ref_tracker *, which is the same as intel_wakeref_t, but without the typedef. Reviewed-by: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/2682fa92089ab87429eef4d45f931839f0d32077.1742483007.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 12 ++++++------ drivers/gpu/drm/i915/display/intel_display_types.h | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3afb85fe8536..b852ffe94a10 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -73,6 +73,7 @@ #include "intel_de.h" #include "intel_display_driver.h" #include "intel_display_power.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" @@ -7229,7 +7230,7 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, static void intel_atomic_commit_tail(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); - struct drm_i915_private *dev_priv = to_i915(display->drm); + struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); struct intel_crtc_state *new_crtc_state, *old_crtc_state; struct intel_crtc *crtc; struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {}; @@ -7443,7 +7444,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) * toggling overhead at and above 60 FPS. */ intel_display_power_put_async_delay(display, POWER_DOMAIN_DC_OFF, wakeref, 17); - intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); + intel_display_rpm_put(display, state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -7515,10 +7516,9 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, { struct intel_display *display = to_intel_display(dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); - struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + state->wakeref = intel_display_rpm_get(display); /* * The intel_legacy_cursor_update() fast path takes care @@ -7552,7 +7552,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, if (ret) { drm_dbg_atomic(display->drm, "Preparing state failed with %i\n", ret); - intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); + intel_display_rpm_put(display, state->wakeref); return ret; } @@ -7562,7 +7562,7 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, if (ret) { drm_atomic_helper_unprepare_planes(dev, &state->base); - intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref); + intel_display_rpm_put(display, state->wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 99a6fd2900b9..7d9cc430a6b8 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -581,7 +581,7 @@ struct dpll { struct intel_atomic_state { struct drm_atomic_state base; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; struct __intel_global_objs_state *global_objs; int num_global_objs; From 4d3408328af05a6d9399e14c7505cce0b2f2e3b9 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 17:03:58 +0200 Subject: [PATCH 0173/1627] drm/i915/display: convert to display runtime PM interfaces Convert i915 runtime PM interfaces to display runtime PM interfaces all over the place in display code. Reviewed-by: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/494d0bd0348e4aa99560f1aed21aaaff31706c44.1742483007.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/hsw_ips.c | 8 ++++---- .../drm/i915/display/intel_display_debugfs.c | 17 +++++++---------- .../gpu/drm/i915/display/intel_display_irq.c | 6 +++--- drivers/gpu/drm/i915/display/intel_dmc.c | 9 +++++---- drivers/gpu/drm/i915/display/intel_dp.c | 5 ++--- drivers/gpu/drm/i915/display/intel_dpt.c | 7 ++++--- drivers/gpu/drm/i915/display/intel_dsb.c | 17 +++++++++-------- drivers/gpu/drm/i915/display/intel_fb_pin.c | 7 ++++--- drivers/gpu/drm/i915/display/intel_fbc.c | 8 ++++---- drivers/gpu/drm/i915/display/intel_fbdev.c | 11 +++++++---- drivers/gpu/drm/i915/display/intel_hotplug.c | 7 ++++--- drivers/gpu/drm/i915/display/intel_psr.c | 17 ++++++----------- 12 files changed, 59 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 674a0e5f0858..4307e2ed03d9 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -10,6 +10,7 @@ #include "i915_reg.h" #include "intel_color_regs.h" #include "intel_de.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_pcode.h" @@ -344,10 +345,9 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_crtc *crtc = m->private; struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); seq_printf(m, "Enabled by kernel parameter: %s\n", str_yes_no(display->params.enable_ips)); @@ -361,7 +361,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index f42b5a69eed5..4c784bb7e14b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -24,6 +24,7 @@ #include "intel_display_debugfs_params.h" #include "intel_display_power.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dp.h" @@ -580,13 +581,12 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) static int i915_display_info(struct seq_file *m, void *unused) { struct intel_display *display = node_to_intel_display(m->private); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_crtc *crtc; struct drm_connector *connector; struct drm_connector_list_iter conn_iter; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); drm_modeset_lock_all(display->drm); @@ -605,7 +605,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_modeset_unlock_all(display->drm); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; } @@ -690,14 +690,11 @@ static bool intel_lpsp_power_well_enabled(struct intel_display *display, enum i915_power_well_id power_well_id) { - struct drm_i915_private *i915 = to_i915(display->drm); - intel_wakeref_t wakeref; bool is_enabled; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - is_enabled = intel_display_power_well_is_enabled(display, - power_well_id); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + with_intel_display_rpm(display) + is_enabled = intel_display_power_well_is_enabled(display, + power_well_id); return is_enabled; } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index d9f9b9f78abb..d2a35e3630b1 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -14,6 +14,7 @@ #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_irq.h" +#include "intel_display_rpm.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_dmc_wl.h" @@ -1517,10 +1518,9 @@ void gen11_gu_misc_irq_handler(struct intel_display *display, const u32 iir) void gen11_display_irq_handler(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); u32 disp_ctl; - disable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_display_rpm_assert_block(display); /* * GEN11_DISPLAY_INT_CTL has same format as GEN8_MASTER_IRQ * for the display related bits. @@ -1531,7 +1531,7 @@ void gen11_display_irq_handler(struct intel_display *display) gen8_de_irq_handler(display, disp_ctl); intel_de_write(display, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); - enable_rpm_wakeref_asserts(&i915->runtime_pm); + intel_display_rpm_assert_unblock(display); } static void i915gm_irq_cstate_wa_enable(struct intel_display *display) diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index fa6944e55d95..eb6b47ba0870 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -28,6 +28,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_de.h" +#include "intel_display_rpm.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" #include "intel_step.h" @@ -595,7 +596,7 @@ void intel_dmc_load_program(struct intel_display *display) disable_all_event_handlers(display); - assert_rpm_wakelock_held(&i915->runtime_pm); + assert_display_rpm_held(display); preempt_disable(); @@ -1237,13 +1238,13 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) struct intel_display *display = m->private; struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dmc *dmc = display_to_dmc(display); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; if (!HAS_DMC(display)) return -ENODEV; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); seq_printf(m, "DMC initialized: %s\n", str_yes_no(dmc)); seq_printf(m, "fw loaded: %s\n", @@ -1299,7 +1300,7 @@ out: intel_de_read(display, DMC_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", intel_de_read(display, DMC_HTP_SKL)); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 7d074770d793..e3821ccfabe3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -62,6 +62,7 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_driver.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -87,7 +88,6 @@ #include "intel_pfit.h" #include "intel_pps.h" #include "intel_psr.h" -#include "intel_runtime_pm.h" #include "intel_quirks.h" #include "intel_tc.h" #include "intel_vdsc.h" @@ -6144,13 +6144,12 @@ enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd) { struct intel_display *display = to_intel_display(dig_port); - struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); struct intel_dp *intel_dp = &dig_port->dp; u8 dpcd[DP_RECEIVER_CAP_SIZE]; if (dig_port->base.type == INTEL_OUTPUT_EDP && (long_hpd || - intel_runtime_pm_suspended(&i915->runtime_pm) || + intel_display_rpm_suspended(display) || !intel_pps_have_panel_power_or_vdd(intel_dp))) { /* * vdd off can generate a long/short pulse on eDP which diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index 0d8ebe38226e..43bd97e4f589 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -9,6 +9,7 @@ #include "gt/gen8_ppgtt.h" #include "i915_drv.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" @@ -127,7 +128,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm, struct drm_i915_private *i915 = vm->i915; struct intel_display *display = &i915->display; struct i915_dpt *dpt = i915_vm_to_dpt(vm); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; struct i915_vma *vma; void __iomem *iomem; struct i915_gem_ww_ctx ww; @@ -137,7 +138,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm, if (i915_gem_object_is_stolen(dpt->obj)) pin_flags |= PIN_MAPPABLE; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); atomic_inc(&display->restore.pending_fb_pin); for_i915_gem_ww(&ww, err, true) { @@ -169,7 +170,7 @@ struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm, dpt->obj->mm.dirty = true; atomic_dec(&display->restore.pending_fb_pin); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return err ? ERR_PTR(err) : vma; } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 9fc4003d1579..0ddcdedf5453 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -11,6 +11,7 @@ #include "i915_reg.h" #include "intel_crtc.h" #include "intel_de.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dsb.h" #include "intel_dsb_buffer.h" @@ -795,22 +796,22 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, enum intel_dsb_id dsb_id, unsigned int max_cmds) { - struct drm_i915_private *i915 = to_i915(state->base.dev); - intel_wakeref_t wakeref; + struct intel_display *display = to_intel_display(state); + struct ref_tracker *wakeref; struct intel_dsb *dsb; unsigned int size; - if (!HAS_DSB(i915)) + if (!HAS_DSB(display)) return NULL; - if (!i915->display.params.enable_dsb) + if (!display->params.enable_dsb) return NULL; dsb = kzalloc(sizeof(*dsb), GFP_KERNEL); if (!dsb) goto out; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); /* ~1 qword per instruction, full cachelines */ size = ALIGN(max_cmds * 8, CACHELINE_BYTES); @@ -818,7 +819,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size)) goto out_put_rpm; - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); dsb->id = dsb_id; dsb->crtc = crtc; @@ -831,10 +832,10 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state, return dsb; out_put_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); kfree(dsb); out: - drm_info_once(&i915->drm, + drm_info_once(display->drm, "[CRTC:%d:%s] DSB %d queue setup failed, will fallback to MMIO for display HW programming\n", crtc->base.base.id, crtc->base.name, dsb_id); diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 30ac9b089ad6..c648ab8a93d7 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -12,6 +12,7 @@ #include "i915_drv.h" #include "intel_atomic_plane.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" @@ -117,7 +118,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, struct drm_i915_private *dev_priv = to_i915(dev); struct drm_gem_object *_obj = intel_fb_bo(fb); struct drm_i915_gem_object *obj = to_intel_bo(_obj); - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; struct i915_gem_ww_ctx ww; struct i915_vma *vma; unsigned int pinctl; @@ -136,7 +137,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); atomic_inc(&display->restore.pending_fb_pin); @@ -215,7 +216,7 @@ err: vma = ERR_PTR(ret); atomic_dec(&display->restore.pending_fb_pin); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return vma; } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index b6978135e8ad..4f9b4fc526ea 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -55,6 +55,7 @@ #include "intel_cdclk.h" #include "intel_de.h" #include "intel_display_device.h" +#include "intel_display_rpm.h" #include "intel_display_trace.h" #include "intel_display_types.h" #include "intel_display_wa.h" @@ -2120,13 +2121,12 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_fbc *fbc = m->private; struct intel_display *display = fbc->display; - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_plane *plane; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; drm_modeset_lock_all(display->drm); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_display_rpm_get(display); mutex_lock(&fbc->lock); if (fbc->active) { @@ -2151,7 +2151,7 @@ static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); drm_modeset_unlock_all(display->drm); diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index adc19d5607de..369f46286e95 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -50,6 +50,7 @@ #include "i915_drv.h" #include "i915_vma.h" #include "intel_bo.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" @@ -213,7 +214,8 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, struct intel_framebuffer *fb = ifbdev->fb; struct drm_device *dev = helper->dev; struct drm_i915_private *dev_priv = to_i915(dev); - intel_wakeref_t wakeref; + struct intel_display *display = to_intel_display(dev); + struct ref_tracker *wakeref; struct fb_info *info; struct i915_vma *vma; unsigned long flags = 0; @@ -247,7 +249,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, sizes->fb_height = fb->base.height; } - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -299,14 +301,15 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; out_unpin: intel_fb_unpin_vma(vma, flags); out_unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); + return ret; } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index fcc3f546cb97..dce9cde03d70 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -30,6 +30,7 @@ #include "i915_irq.h" #include "intel_connector.h" #include "intel_display_power.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -278,10 +279,10 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) struct drm_i915_private *dev_priv = to_i915(display->drm); struct drm_connector_list_iter conn_iter; struct intel_connector *connector; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; enum hpd_pin pin; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); spin_lock_irq(&dev_priv->irq_lock); @@ -309,7 +310,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); } static enum intel_hotplug_state diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 4e938bad808c..50a22cd8d84a 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -36,6 +36,7 @@ #include "intel_ddi.h" #include "intel_de.h" #include "intel_display_irq.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dp.h" #include "intel_dp_aux.h" @@ -3728,10 +3729,9 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp, static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; struct intel_psr *psr = &intel_dp->psr; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; bool enabled; u32 val, psr2_ctl; @@ -3740,7 +3740,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) if (!(psr->sink_support || psr->sink_panel_replay_support)) return 0; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get(display); mutex_lock(&psr->lock); intel_psr_print_mode(intel_dp, m); @@ -3822,7 +3822,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp) unlock: mutex_unlock(&psr->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); return 0; } @@ -3853,9 +3853,7 @@ static int i915_edp_psr_debug_set(void *data, u64 val) { struct intel_display *display = data; - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_encoder *encoder; - intel_wakeref_t wakeref; int ret = -ENODEV; if (!HAS_PSR(display)) @@ -3866,12 +3864,9 @@ i915_edp_psr_debug_set(void *data, u64 val) drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - // TODO: split to each transcoder's PSR debug state - ret = intel_psr_debug_set(intel_dp, val); - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + with_intel_display_rpm(display) + ret = intel_psr_debug_set(intel_dp, val); } return ret; From b5de8f445a5f0b9ea1504f89900702d67d2ece2b Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 17:03:59 +0200 Subject: [PATCH 0174/1627] drm/i915/power: convert to display runtime PM interfaces Finish the conversions to display specific runtime PM interfaces in the power code. Reviewed-by: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/b08a074d466a966b7f0fda9ef35c8ef81d180ebb.1742483007.git.jani.nikula@intel.com --- .../drm/i915/display/intel_display_power.c | 63 ++++++++----------- .../i915/display/intel_display_power_well.c | 4 +- 2 files changed, 30 insertions(+), 37 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index f7171e6932dc..adeb4408eb49 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -16,6 +16,7 @@ #include "intel_display_power.h" #include "intel_display_power_map.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_mchbar_regs.h" @@ -204,7 +205,7 @@ static bool __intel_display_power_is_enabled(struct intel_display *display, struct i915_power_well *power_well; bool is_enabled; - if (pm_runtime_suspended(display->drm->dev)) + if (intel_display_rpm_suspended(display)) return false; is_enabled = true; @@ -455,7 +456,6 @@ static bool intel_display_power_grab_async_put_ref(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; struct intel_power_domain_mask async_put_mask; bool ret = false; @@ -473,8 +473,8 @@ intel_display_power_grab_async_put_ref(struct intel_display *display, goto out_verify; cancel_async_put_work(power_domains, false); - intel_runtime_pm_put_raw(&dev_priv->runtime_pm, - fetch_and_zero(&power_domains->async_put_wakeref)); + intel_display_rpm_put_raw(display, + fetch_and_zero(&power_domains->async_put_wakeref)); out_verify: verify_async_put_domains_state(power_domains); @@ -512,9 +512,10 @@ __intel_display_power_get_domain(struct intel_display *display, intel_wakeref_t intel_display_power_get(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + struct ref_tracker *wakeref; + + wakeref = intel_display_rpm_get(display); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(display, domain); @@ -539,12 +540,11 @@ intel_wakeref_t intel_display_power_get_if_enabled(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; bool is_enabled; - wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); + wakeref = intel_display_rpm_get_if_in_use(display); if (!wakeref) return NULL; @@ -560,7 +560,7 @@ intel_display_power_get_if_enabled(struct intel_display *display, mutex_unlock(&power_domains->lock); if (!is_enabled) { - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); wakeref = NULL; } @@ -623,12 +623,10 @@ release_async_put_domains(struct i915_power_domains *power_domains, struct intel_display *display = container_of(power_domains, struct intel_display, power.domains); - struct drm_i915_private *dev_priv = to_i915(display->drm); - struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; - intel_wakeref_t wakeref; + struct ref_tracker *wakeref; - wakeref = intel_runtime_pm_get_noresume(rpm); + wakeref = intel_display_rpm_get_noresume(display); for_each_power_domain(domain, mask) { /* Clear before put, so put's sanity check is happy. */ @@ -636,7 +634,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, __intel_display_power_put_domain(display, domain); } - intel_runtime_pm_put(rpm, wakeref); + intel_display_rpm_put(display, wakeref); } static void @@ -644,11 +642,10 @@ intel_display_power_put_async_work(struct work_struct *work) { struct intel_display *display = container_of(work, struct intel_display, power.domains.async_put_work.work); - struct drm_i915_private *dev_priv = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; - intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); - intel_wakeref_t old_work_wakeref = NULL; + struct ref_tracker *new_work_wakeref, *old_work_wakeref = NULL; + + new_work_wakeref = intel_display_rpm_get_raw(display); mutex_lock(&power_domains->lock); @@ -688,9 +685,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (old_work_wakeref) - intel_runtime_pm_put_raw(rpm, old_work_wakeref); + intel_display_rpm_put_raw(display, old_work_wakeref); if (new_work_wakeref) - intel_runtime_pm_put_raw(rpm, new_work_wakeref); + intel_display_rpm_put_raw(display, new_work_wakeref); } /** @@ -711,10 +708,10 @@ void __intel_display_power_put_async(struct intel_display *display, intel_wakeref_t wakeref, int delay_ms) { - struct drm_i915_private *i915 = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; - struct intel_runtime_pm *rpm = &i915->runtime_pm; - intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); + struct ref_tracker *work_wakeref; + + work_wakeref = intel_display_rpm_get_raw(display); delay_ms = delay_ms >= 0 ? delay_ms : 100; @@ -746,9 +743,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(rpm, work_wakeref); + intel_display_rpm_put_raw(display, work_wakeref); - intel_runtime_pm_put(rpm, wakeref); + intel_display_rpm_put(display, wakeref); } /** @@ -765,7 +762,6 @@ out_verify: */ void intel_display_power_flush_work(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct i915_power_domains *power_domains = &display->power.domains; struct intel_power_domain_mask async_put_mask; intel_wakeref_t work_wakeref; @@ -786,7 +782,7 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); + intel_display_rpm_put_raw(display, work_wakeref); } /** @@ -824,10 +820,8 @@ void intel_display_power_put(struct intel_display *display, enum intel_display_power_domain domain, intel_wakeref_t wakeref) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - __intel_display_power_put(display, domain); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); } #else /** @@ -846,10 +840,8 @@ void intel_display_power_put(struct intel_display *display, void intel_display_power_put_unchecked(struct intel_display *display, enum intel_display_power_domain domain) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - __intel_display_power_put(display, domain); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); + intel_display_rpm_put_unchecked(display); } #endif @@ -1979,7 +1971,6 @@ void intel_power_domains_init_hw(struct intel_display *display, bool resume) */ void intel_power_domains_driver_remove(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); intel_wakeref_t wakeref __maybe_unused = fetch_and_zero(&display->power.domains.init_wakeref); @@ -1993,7 +1984,7 @@ void intel_power_domains_driver_remove(struct intel_display *display) intel_power_domains_verify_state(display); /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_display_rpm_put(display, wakeref); } /** diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index b03a95ef64da..751e49b880d6 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -13,6 +13,7 @@ #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_power_well.h" +#include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dkl_phy.h" #include "intel_dkl_phy_regs.h" @@ -812,7 +813,8 @@ static void assert_can_enable_dc5(struct intel_display *display) (intel_de_read(display, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5), "DC5 already programmed to be enabled.\n"); - assert_rpm_wakelock_held(&dev_priv->runtime_pm); + + assert_display_rpm_held(display); assert_dmc_loaded(display); } From f5c3bcd23afb4b2888c8f6eaa608b8a0c70091e4 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Mar 2025 17:04:00 +0200 Subject: [PATCH 0175/1627] drm/xe/compat: remove intel_runtime_pm.h Now that all display code has been converted to display specific runtime PM interfaces, there's no need for the compat header anymore. Reviewed-by: Rodrigo Vivi Signed-off-by: Jani Nikula Link: https://lore.kernel.org/r/037ed1f38c96715c76514e9eb7069b896ce06ba1.1742483007.git.jani.nikula@intel.com --- .../gpu/drm/xe/compat-i915-headers/i915_drv.h | 1 - .../xe/compat-i915-headers/intel_runtime_pm.h | 76 ------------------- 2 files changed, 77 deletions(-) delete mode 100644 drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index dfec5108d2c3..f89bd5e3520d 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -13,7 +13,6 @@ #include #include "i915_utils.h" -#include "intel_runtime_pm.h" #include "xe_device.h" /* for xe_device_has_flat_ccs() */ #include "xe_device_types.h" diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h deleted file mode 100644 index 274042bff1be..000000000000 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h +++ /dev/null @@ -1,76 +0,0 @@ -/* SPDX-License-Identifier: MIT */ -/* - * Copyright © 2023 Intel Corporation - */ - -#ifndef __INTEL_RUNTIME_PM_H__ -#define __INTEL_RUNTIME_PM_H__ - -#include "intel_wakeref.h" -#include "xe_device_types.h" -#include "xe_pm.h" - -#define intel_runtime_pm xe_runtime_pm - -static inline void disable_rpm_wakeref_asserts(void *rpm) -{ -} - -static inline void enable_rpm_wakeref_asserts(void *rpm) -{ -} - -static inline bool -intel_runtime_pm_suspended(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - return pm_runtime_suspended(xe->drm.dev); -} - -static inline intel_wakeref_t intel_runtime_pm_get(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - return xe_pm_runtime_resume_and_get(xe) ? INTEL_WAKEREF_DEF : NULL; -} - -static inline intel_wakeref_t intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - return xe_pm_runtime_get_if_in_use(xe) ? INTEL_WAKEREF_DEF : NULL; -} - -static inline intel_wakeref_t intel_runtime_pm_get_noresume(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - xe_pm_runtime_get_noresume(xe); - - return INTEL_WAKEREF_DEF; -} - -static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm) -{ - struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); - - xe_pm_runtime_put(xe); -} - -static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, intel_wakeref_t wakeref) -{ - if (wakeref) - intel_runtime_pm_put_unchecked(pm); -} - -#define intel_runtime_pm_get_raw intel_runtime_pm_get -#define intel_runtime_pm_put_raw intel_runtime_pm_put -#define assert_rpm_wakelock_held(x) do { } while (0) -#define assert_rpm_raw_wakeref_held(x) do { } while (0) - -#define with_intel_runtime_pm(rpm, wf) \ - for ((wf) = intel_runtime_pm_get(rpm); (wf); \ - intel_runtime_pm_put((rpm), (wf)), (wf) = NULL) - -#endif From f3e08e98bf408a44182b0e3e946521a0e9b5482f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Fri, 21 Mar 2025 14:37:09 +0100 Subject: [PATCH 0176/1627] drm/xe: Simplify pinned bo iteration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce and use a helper to iterate over the various pinned bo lists. There are a couple of slight functional changes: 1) GGTT maps are now performed with the bo locked. 2) If the per-bo callback fails, keep the bo on the original list. v2: - Skip unrelated change in xe_bo.c Cc: Matthew Auld Signed-off-by: Thomas Hellström Reviewed-by: Matthew Auld Link: https://patchwork.freedesktop.org/patch/msgid/20250321133709.75327-1-thomas.hellstrom@linux.intel.com --- drivers/gpu/drm/xe/xe_bo_evict.c | 209 ++++++++++++------------------- 1 file changed, 82 insertions(+), 127 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 6a40eedd9db1..1eeb3910450b 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -10,6 +10,44 @@ #include "xe_ggtt.h" #include "xe_tile.h" +typedef int (*xe_pinned_fn)(struct xe_bo *bo); + +static int xe_bo_apply_to_pinned(struct xe_device *xe, + struct list_head *pinned_list, + struct list_head *new_list, + const xe_pinned_fn pinned_fn) +{ + LIST_HEAD(still_in_list); + struct xe_bo *bo; + int ret = 0; + + spin_lock(&xe->pinned.lock); + while (!ret) { + bo = list_first_entry_or_null(pinned_list, typeof(*bo), + pinned_link); + if (!bo) + break; + xe_bo_get(bo); + list_move_tail(&bo->pinned_link, &still_in_list); + spin_unlock(&xe->pinned.lock); + + xe_bo_lock(bo, false); + ret = pinned_fn(bo); + if (ret && pinned_list != new_list) { + spin_lock(&xe->pinned.lock); + list_move(&bo->pinned_link, pinned_list); + spin_unlock(&xe->pinned.lock); + } + xe_bo_unlock(bo); + xe_bo_put(bo); + spin_lock(&xe->pinned.lock); + } + list_splice_tail(&still_in_list, new_list); + spin_unlock(&xe->pinned.lock); + + return ret; +} + /** * xe_bo_evict_all - evict all BOs from VRAM * @@ -27,9 +65,7 @@ int xe_bo_evict_all(struct xe_device *xe) { struct ttm_device *bdev = &xe->ttm; - struct xe_bo *bo; struct xe_tile *tile; - struct list_head still_in_list; u32 mem_type; u8 id; int ret; @@ -57,34 +93,9 @@ int xe_bo_evict_all(struct xe_device *xe) } } - /* Pinned user memory in VRAM */ - INIT_LIST_HEAD(&still_in_list); - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.external_vram, - typeof(*bo), pinned_link); - if (!bo) - break; - xe_bo_get(bo); - list_move_tail(&bo->pinned_link, &still_in_list); - spin_unlock(&xe->pinned.lock); - - xe_bo_lock(bo, false); - ret = xe_bo_evict_pinned(bo); - xe_bo_unlock(bo); - xe_bo_put(bo); - if (ret) { - spin_lock(&xe->pinned.lock); - list_splice_tail(&still_in_list, - &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); - return ret; - } - - spin_lock(&xe->pinned.lock); - } - list_splice_tail(&still_in_list, &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external_vram, + &xe->pinned.external_vram, + xe_bo_evict_pinned); /* * Wait for all user BO to be evicted as those evictions depend on the @@ -93,26 +104,42 @@ int xe_bo_evict_all(struct xe_device *xe) for_each_tile(tile, xe, id) xe_tile_migrate_wait(tile); - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.kernel_bo_present, - typeof(*bo), pinned_link); - if (!bo) - break; - xe_bo_get(bo); - list_move_tail(&bo->pinned_link, &xe->pinned.evicted); - spin_unlock(&xe->pinned.lock); + if (ret) + return ret; - xe_bo_lock(bo, false); - ret = xe_bo_evict_pinned(bo); - xe_bo_unlock(bo); - xe_bo_put(bo); - if (ret) - return ret; + return xe_bo_apply_to_pinned(xe, &xe->pinned.kernel_bo_present, + &xe->pinned.evicted, + xe_bo_evict_pinned); +} - spin_lock(&xe->pinned.lock); +static int xe_bo_restore_and_map_ggtt(struct xe_bo *bo) +{ + struct xe_device *xe = xe_bo_device(bo); + int ret; + + ret = xe_bo_restore_pinned(bo); + if (ret) + return ret; + + if (bo->flags & XE_BO_FLAG_GGTT) { + struct xe_tile *tile; + u8 id; + + for_each_tile(tile, xe_bo_device(bo), id) { + if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile))) + continue; + + mutex_lock(&tile->mem.ggtt->lock); + xe_ggtt_map_bo(tile->mem.ggtt, bo); + mutex_unlock(&tile->mem.ggtt->lock); + } } - spin_unlock(&xe->pinned.lock); + + /* + * We expect validate to trigger a move VRAM and our move code + * should setup the iosys map. + */ + xe_assert(xe, !iosys_map_is_null(&bo->vmap)); return 0; } @@ -130,54 +157,9 @@ int xe_bo_evict_all(struct xe_device *xe) */ int xe_bo_restore_kernel(struct xe_device *xe) { - struct xe_bo *bo; - int ret; - - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.evicted, - typeof(*bo), pinned_link); - if (!bo) - break; - xe_bo_get(bo); - list_move_tail(&bo->pinned_link, &xe->pinned.kernel_bo_present); - spin_unlock(&xe->pinned.lock); - - xe_bo_lock(bo, false); - ret = xe_bo_restore_pinned(bo); - xe_bo_unlock(bo); - if (ret) { - xe_bo_put(bo); - return ret; - } - - if (bo->flags & XE_BO_FLAG_GGTT) { - struct xe_tile *tile; - u8 id; - - for_each_tile(tile, xe, id) { - if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile))) - continue; - - mutex_lock(&tile->mem.ggtt->lock); - xe_ggtt_map_bo(tile->mem.ggtt, bo); - mutex_unlock(&tile->mem.ggtt->lock); - } - } - - /* - * We expect validate to trigger a move VRAM and our move code - * should setup the iosys map. - */ - xe_assert(xe, !iosys_map_is_null(&bo->vmap)); - - xe_bo_put(bo); - - spin_lock(&xe->pinned.lock); - } - spin_unlock(&xe->pinned.lock); - - return 0; + return xe_bo_apply_to_pinned(xe, &xe->pinned.evicted, + &xe->pinned.kernel_bo_present, + xe_bo_restore_and_map_ggtt); } /** @@ -192,47 +174,20 @@ int xe_bo_restore_kernel(struct xe_device *xe) */ int xe_bo_restore_user(struct xe_device *xe) { - struct xe_bo *bo; struct xe_tile *tile; - struct list_head still_in_list; - u8 id; - int ret; + int ret, id; if (!IS_DGFX(xe)) return 0; /* Pinned user memory in VRAM should be validated on resume */ - INIT_LIST_HEAD(&still_in_list); - spin_lock(&xe->pinned.lock); - for (;;) { - bo = list_first_entry_or_null(&xe->pinned.external_vram, - typeof(*bo), pinned_link); - if (!bo) - break; - list_move_tail(&bo->pinned_link, &still_in_list); - xe_bo_get(bo); - spin_unlock(&xe->pinned.lock); - - xe_bo_lock(bo, false); - ret = xe_bo_restore_pinned(bo); - xe_bo_unlock(bo); - xe_bo_put(bo); - if (ret) { - spin_lock(&xe->pinned.lock); - list_splice_tail(&still_in_list, - &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); - return ret; - } - - spin_lock(&xe->pinned.lock); - } - list_splice_tail(&still_in_list, &xe->pinned.external_vram); - spin_unlock(&xe->pinned.lock); + ret = xe_bo_apply_to_pinned(xe, &xe->pinned.external_vram, + &xe->pinned.external_vram, + xe_bo_restore_pinned); /* Wait for restore to complete */ for_each_tile(tile, xe, id) xe_tile_migrate_wait(tile); - return 0; + return ret; } From 75584c8213d341ddd5b7c72abf822e62f4b3ab27 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 7 Mar 2025 10:13:21 -0800 Subject: [PATCH 0177/1627] drm/xe/uc: Remove static from loop variable The `entries` variable is used to loop through the array - it's supposed to be const, but not static. Reviewed-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20250307-xe-per-gt-fw-v1-1-459574d76400@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_uc_fw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index fb0eda3d5682..b553079ae3d6 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -222,8 +222,8 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw) [XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) }, [XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) }, }; - static const struct uc_fw_entry *entries; enum xe_platform p = xe->info.platform; + const struct uc_fw_entry *entries; u32 count; int i; From 613256e67cfd836a521a60b315772b3d31fb017d Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 7 Mar 2025 10:13:22 -0800 Subject: [PATCH 0178/1627] drm/xe/uc: Add support for different firmware files on each GT The different GTs on a device can be very different. Right now for all platforms the same firmware is loaded in each GT, however future platforms may benefit from loading a different file depending on the GT type. Based on previous patch by John Harrison . Reviewed-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20250307-xe-per-gt-fw-v1-2-459574d76400@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_uc_fw.c | 88 +++++++++++++++++++++-------------- 1 file changed, 52 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index b553079ae3d6..4a16d3c40ea9 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -92,6 +92,8 @@ struct uc_fw_entry { enum xe_platform platform; + enum xe_gt_type gt_type; + struct { const char *path; u16 major; @@ -106,32 +108,37 @@ struct fw_blobs_by_type { u32 count; }; -#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(BATTLEMAGE, major_ver(xe, guc, bmg, 70, 29, 2)) \ - fw_def(LUNARLAKE, major_ver(xe, guc, lnl, 70, 29, 2)) \ - fw_def(METEORLAKE, major_ver(i915, guc, mtl, 70, 29, 2)) \ - fw_def(DG2, major_ver(i915, guc, dg2, 70, 29, 2)) \ - fw_def(DG1, major_ver(i915, guc, dg1, 70, 29, 2)) \ - fw_def(ALDERLAKE_N, major_ver(i915, guc, tgl, 70, 29, 2)) \ - fw_def(ALDERLAKE_P, major_ver(i915, guc, adlp, 70, 29, 2)) \ - fw_def(ALDERLAKE_S, major_ver(i915, guc, tgl, 70, 29, 2)) \ - fw_def(ROCKETLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) \ - fw_def(TIGERLAKE, major_ver(i915, guc, tgl, 70, 29, 2)) +/* + * Add an "ANY" define just to convey the meaning it's given here. + */ +#define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED + +#define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ + fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 29, 2)) \ + fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 29, 2)) \ + fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 29, 2)) \ + fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 29, 2)) \ + fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 29, 2)) \ + fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 29, 2)) \ + fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 29, 2)) \ + fw_def(ALDERLAKE_S, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 29, 2)) \ + fw_def(ROCKETLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 29, 2)) \ + fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 29, 2)) #define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \ - fw_def(BATTLEMAGE, no_ver(xe, huc, bmg)) \ - fw_def(LUNARLAKE, no_ver(xe, huc, lnl)) \ - fw_def(METEORLAKE, no_ver(i915, huc_gsc, mtl)) \ - fw_def(DG1, no_ver(i915, huc, dg1)) \ - fw_def(ALDERLAKE_P, no_ver(i915, huc, tgl)) \ - fw_def(ALDERLAKE_S, no_ver(i915, huc, tgl)) \ - fw_def(ROCKETLAKE, no_ver(i915, huc, tgl)) \ - fw_def(TIGERLAKE, no_ver(i915, huc, tgl)) + fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \ + fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \ + fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \ + fw_def(DG1, GT_TYPE_ANY, no_ver(i915, huc, dg1)) \ + fw_def(ALDERLAKE_P, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \ + fw_def(ALDERLAKE_S, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \ + fw_def(ROCKETLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) \ + fw_def(TIGERLAKE, GT_TYPE_ANY, no_ver(i915, huc, tgl)) /* for the GSC FW we match the compatibility version and not the release one */ #define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \ - fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \ - fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0)) + fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, gsc, lnl, 104, 1, 0)) \ + fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, gsc, mtl, 102, 1, 0)) #define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \ __stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin" @@ -159,12 +166,13 @@ struct fw_blobs_by_type { a, b, c } /* All blobs need to be declared via MODULE_FIRMWARE() */ -#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \ +#define XE_UC_MODULE_FIRMWARE(platform__, gt_type__, fw_filename) \ MODULE_FIRMWARE(fw_filename); -#define XE_UC_FW_ENTRY(platform__, entry__) \ +#define XE_UC_FW_ENTRY(platform__, gt_type__, entry__) \ { \ .platform = XE_ ## platform__, \ + .gt_type = XE_ ## gt_type__, \ entry__, \ }, @@ -222,30 +230,38 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw) [XE_UC_FW_TYPE_HUC] = { entries_huc, ARRAY_SIZE(entries_huc) }, [XE_UC_FW_TYPE_GSC] = { entries_gsc, ARRAY_SIZE(entries_gsc) }, }; + struct xe_gt *gt = uc_fw_to_gt(uc_fw); enum xe_platform p = xe->info.platform; const struct uc_fw_entry *entries; u32 count; int i; - xe_assert(xe, uc_fw->type < ARRAY_SIZE(blobs_all)); + xe_gt_assert(gt, uc_fw->type < ARRAY_SIZE(blobs_all)); + xe_gt_assert(gt, gt->info.type != XE_GT_TYPE_UNINITIALIZED); + entries = blobs_all[uc_fw->type].entries; count = blobs_all[uc_fw->type].count; for (i = 0; i < count && p <= entries[i].platform; i++) { - if (p == entries[i].platform) { - uc_fw->path = entries[i].path; - uc_fw->versions.wanted.major = entries[i].major; - uc_fw->versions.wanted.minor = entries[i].minor; - uc_fw->versions.wanted.patch = entries[i].patch; - uc_fw->full_ver_required = entries[i].full_ver_required; + if (p != entries[i].platform) + continue; - if (uc_fw->type == XE_UC_FW_TYPE_GSC) - uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY; - else - uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE; + if (entries[i].gt_type != XE_GT_TYPE_ANY && + entries[i].gt_type != gt->info.type) + continue; - break; - } + uc_fw->path = entries[i].path; + uc_fw->versions.wanted.major = entries[i].major; + uc_fw->versions.wanted.minor = entries[i].minor; + uc_fw->versions.wanted.patch = entries[i].patch; + uc_fw->full_ver_required = entries[i].full_ver_required; + + if (uc_fw->type == XE_UC_FW_TYPE_GSC) + uc_fw->versions.wanted_type = XE_UC_FW_VER_COMPATIBILITY; + else + uc_fw->versions.wanted_type = XE_UC_FW_VER_RELEASE; + + break; } } From 86b5e0dbba07438de91dd81095464c6c4aa7a372 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 14 Mar 2025 06:48:58 -0700 Subject: [PATCH 0179/1627] drm/xe: Move survivability back to xe Commit d40f275d96e8 ("drm/xe: Move survivability entirely to xe_pci") moved the survivability handling to be done entirely in the xe_pci layer. However there are some issues with that approach: 1) Survivability mode needs at least the mmio initialized, otherwise it can't really read a register to decide if it should enter that state 2) SR-IOV mode should be initialized, otherwise it's not possible to check if it's VF Besides, as pointed by Riana the check for xe_survivability_mode_enable() was wrong in xe_pci_probe() since it's not a bool return. Fix that by moving the initialization to be entirely in the xe_device layer, with the correct dependencies handled: only after mmio and sriov initialization, and not triggering it on error from wait_for_lmem_ready(). This restores the trigger behavior before that commit. The xe_pci layer now only checks for "is it enabled?", like it's doing in xe_pci_suspend()/xe_pci_remove(), etc. Cc: Riana Tauro Fixes: d40f275d96e8 ("drm/xe: Move survivability entirely to xe_pci") Reviewed-by: Riana Tauro Link: https://patchwork.freedesktop.org/patch/msgid/20250314-fix-survivability-v5-1-fdb3559ea965@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_device.c | 17 +++++++++++++++-- drivers/gpu/drm/xe/xe_pci.c | 16 +++++++--------- drivers/gpu/drm/xe/xe_survivability_mode.c | 19 ++++++++++++------- drivers/gpu/drm/xe/xe_survivability_mode.h | 1 - 4 files changed, 34 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index b2f656b2a563..0e8805f93468 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -53,6 +53,7 @@ #include "xe_pxp.h" #include "xe_query.h" #include "xe_shrinker.h" +#include "xe_survivability_mode.h" #include "xe_sriov.h" #include "xe_tile.h" #include "xe_ttm_stolen_mgr.h" @@ -711,8 +712,20 @@ int xe_device_probe_early(struct xe_device *xe) sriov_update_device_info(xe); err = xe_pcode_probe_early(xe); - if (err) - return err; + if (err) { + int save_err = err; + + /* + * Try to leave device in survivability mode if device is + * possible, but still return the previous error for error + * propagation + */ + err = xe_survivability_mode_enable(xe); + if (err) + return err; + + return save_err; + } err = wait_for_lmem_ready(xe); if (err) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index fc89d744978a..88138a3fcf50 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -807,16 +807,14 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) return err; err = xe_device_probe_early(xe); - - /* - * In Boot Survivability mode, no drm card is exposed and driver is - * loaded with bare minimum to allow for firmware to be flashed through - * mei. If early probe fails, check if survivability mode is flagged by - * HW to be enabled. In that case enable it and return success. - */ if (err) { - if (xe_survivability_mode_required(xe) && - xe_survivability_mode_enable(xe)) + /* + * In Boot Survivability mode, no drm card is exposed and driver + * is loaded with bare minimum to allow for firmware to be + * flashed through mei. If early probe failed, but it managed to + * enable survivability mode, return success. + */ + if (xe_survivability_mode_is_enabled(xe)) return 0; return err; diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c index d939ce70e6fa..7b1ec643f0f0 100644 --- a/drivers/gpu/drm/xe/xe_survivability_mode.c +++ b/drivers/gpu/drm/xe/xe_survivability_mode.c @@ -178,15 +178,16 @@ bool xe_survivability_mode_is_enabled(struct xe_device *xe) return xe->survivability.mode; } -/** - * xe_survivability_mode_required - checks if survivability mode is required - * @xe: xe device instance +/* + * survivability_mode_requested - check if it's possible to enable + * survivability mode and that was requested by firmware * - * This function reads the boot status from Pcode + * This function reads the boot status from Pcode. * - * Return: true if boot status indicates failure, false otherwise + * Return: true if platform support is available and boot status indicates + * failure, false otherwise. */ -bool xe_survivability_mode_required(struct xe_device *xe) +static bool survivability_mode_requested(struct xe_device *xe) { struct xe_survivability *survivability = &xe->survivability; struct xe_mmio *mmio = xe_root_tile_mmio(xe); @@ -208,7 +209,8 @@ bool xe_survivability_mode_required(struct xe_device *xe) * * Initialize survivability information and enable survivability mode * - * Return: 0 for success, negative error code otherwise. + * Return: 0 if survivability mode is enabled or not requested; negative error + * code otherwise. */ int xe_survivability_mode_enable(struct xe_device *xe) { @@ -216,6 +218,9 @@ int xe_survivability_mode_enable(struct xe_device *xe) struct xe_survivability_info *info; struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + if (!survivability_mode_requested(xe)) + return 0; + survivability->size = MAX_SCRATCH_MMIO; info = devm_kcalloc(xe->drm.dev, survivability->size, sizeof(*info), diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.h b/drivers/gpu/drm/xe/xe_survivability_mode.h index f4df5f9025ce..d7e64885570d 100644 --- a/drivers/gpu/drm/xe/xe_survivability_mode.h +++ b/drivers/gpu/drm/xe/xe_survivability_mode.h @@ -12,6 +12,5 @@ struct xe_device; int xe_survivability_mode_enable(struct xe_device *xe); bool xe_survivability_mode_is_enabled(struct xe_device *xe); -bool xe_survivability_mode_required(struct xe_device *xe); #endif /* _XE_SURVIVABILITY_MODE_H_ */ From 14efa739ca70514e8b923a02b5bcb42511dd1ee8 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 14 Mar 2025 06:54:26 -0700 Subject: [PATCH 0180/1627] drm/xe: Set survivability mode before heci init Commit d40f275d96e8 ("drm/xe: Move survivability entirely to xe_pci") tried to follow the logic: initialize everything needed and if everything succeeds, set the flag that it's enabled. While it fixed some corner cases of those calls failing, it was wrong for setting the flag after the call to xe_heci_gsc_init(): that function does a different initialization for survivability mode. Fix that and add comments about this being done on purpose. Suggested-by: Riana Tauro Fixes: d40f275d96e8 ("drm/xe: Move survivability entirely to xe_pci") Reviewed-by: Riana Tauro Link: https://patchwork.freedesktop.org/patch/msgid/20250314-fix-survivability-v5-2-fdb3559ea965@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_survivability_mode.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c index 7b1ec643f0f0..cb813b337fd3 100644 --- a/drivers/gpu/drm/xe/xe_survivability_mode.c +++ b/drivers/gpu/drm/xe/xe_survivability_mode.c @@ -155,13 +155,21 @@ static int enable_survivability_mode(struct pci_dev *pdev) if (ret) return ret; + /* Make sure xe_heci_gsc_init() knows about survivability mode */ + survivability->mode = true; + ret = xe_heci_gsc_init(xe); - if (ret) + if (ret) { + /* + * But if it fails, device can't enter survivability + * so move it back for correct error handling + */ + survivability->mode = false; return ret; + } xe_vsec_init(xe); - survivability->mode = true; dev_err(dev, "In Survivability Mode\n"); return 0; From 676da6ba5bdc1997910ac1cc901cbf16bc6f6d97 Mon Sep 17 00:00:00 2001 From: Lucas De Marchi Date: Fri, 14 Mar 2025 06:54:27 -0700 Subject: [PATCH 0181/1627] drm/xe: Allow to inject error in early probe Allow to test if driver behaves correctly when xe_pcode_probe_early() fails. Note that this is not sufficient for testing survivability mode as it's still required to read the hw to check for errors, which doesn't happen on an injected failure. To complete the early probe coverage, allow injection in the other functions as well: xe_mmio_probe_early() and xe_device_probe_early(). Reviewed-by: Francois Dugast Link: https://patchwork.freedesktop.org/patch/msgid/20250314-fix-survivability-v5-3-fdb3559ea965@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/xe_device.c | 1 + drivers/gpu/drm/xe/xe_mmio.c | 1 + drivers/gpu/drm/xe/xe_pcode.c | 2 ++ 3 files changed, 4 insertions(+) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 0e8805f93468..1ffb7d1f6be6 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -735,6 +735,7 @@ int xe_device_probe_early(struct xe_device *xe) return 0; } +ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */ static int probe_has_flat_ccs(struct xe_device *xe) { diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 13e06a956ceb..096c38cc51c8 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -138,6 +138,7 @@ int xe_mmio_probe_early(struct xe_device *xe) return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe); } +ALLOW_ERROR_INJECTION(xe_mmio_probe_early, ERRNO); /* See xe_pci_probe() */ /** * xe_mmio_init() - Initialize an MMIO instance diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index 9333ce776a6e..cf955b3ed52c 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -7,6 +7,7 @@ #include #include +#include #include @@ -323,3 +324,4 @@ int xe_pcode_probe_early(struct xe_device *xe) { return xe_pcode_ready(xe, false); } +ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */ From 76dbd0973c555037931d2ed055a4a69e592caad4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Mon, 17 Mar 2025 22:01:09 -0300 Subject: [PATCH 0182/1627] drm/v3d: Associate a V3D tech revision to all supported devices MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The V3D driver currently determines the GPU tech version (33, 41...) by reading a register. This approach has worked so far since this information wasn’t needed before powering on the GPU. V3D 7.1 introduces new registers that must be written to power on the GPU, requiring us to know the V3D version beforehand. To address this, associate each supported SoC with the corresponding VideoCore GPU version as part of the device data. To prevent possible mistakes, add an assertion to verify that the version specified in the device data matches the one reported by the hardware. If there is a mismatch, the kernel will trigger a warning. With the goal of maintaining consistency around the driver, use `enum v3d_gen` to assign values to `v3d->ver` and for comparisons with other V3D generations. Note that all mentions of unsupported or non-existing V3D generations (such as V3D 4.0) were removed by this commit and replaced with supported generations without functional changes. Reviewed-by: Iago Toral Quiroga Reviewed-by: Stefan Wahren Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20250317-v3d-gpu-reset-fixes-v6-1-f3ee7717ed17@igalia.com --- drivers/gpu/drm/v3d/v3d_debugfs.c | 116 +++++++++++++++--------------- drivers/gpu/drm/v3d/v3d_drv.c | 22 ++++-- drivers/gpu/drm/v3d/v3d_drv.h | 11 ++- drivers/gpu/drm/v3d/v3d_gem.c | 10 +-- drivers/gpu/drm/v3d/v3d_irq.c | 6 +- drivers/gpu/drm/v3d/v3d_perfmon.c | 4 +- drivers/gpu/drm/v3d/v3d_sched.c | 6 +- 7 files changed, 96 insertions(+), 79 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_debugfs.c b/drivers/gpu/drm/v3d/v3d_debugfs.c index 76816f2551c1..7e789e181af0 100644 --- a/drivers/gpu/drm/v3d/v3d_debugfs.c +++ b/drivers/gpu/drm/v3d/v3d_debugfs.c @@ -21,74 +21,74 @@ struct v3d_reg_def { }; static const struct v3d_reg_def v3d_hub_reg_defs[] = { - REGDEF(33, 42, V3D_HUB_AXICFG), - REGDEF(33, 71, V3D_HUB_UIFCFG), - REGDEF(33, 71, V3D_HUB_IDENT0), - REGDEF(33, 71, V3D_HUB_IDENT1), - REGDEF(33, 71, V3D_HUB_IDENT2), - REGDEF(33, 71, V3D_HUB_IDENT3), - REGDEF(33, 71, V3D_HUB_INT_STS), - REGDEF(33, 71, V3D_HUB_INT_MSK_STS), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_HUB_AXICFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_UIFCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_IDENT3), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_HUB_INT_MSK_STS), - REGDEF(33, 71, V3D_MMU_CTL), - REGDEF(33, 71, V3D_MMU_VIO_ADDR), - REGDEF(33, 71, V3D_MMU_VIO_ID), - REGDEF(33, 71, V3D_MMU_DEBUG_INFO), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_CTL), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ADDR), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_VIO_ID), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_MMU_DEBUG_INFO), - REGDEF(71, 71, V3D_GMP_STATUS(71)), - REGDEF(71, 71, V3D_GMP_CFG(71)), - REGDEF(71, 71, V3D_GMP_VIO_ADDR(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_STATUS(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_CFG(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_GMP_VIO_ADDR(71)), }; static const struct v3d_reg_def v3d_gca_reg_defs[] = { - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN), - REGDEF(33, 33, V3D_GCA_SAFE_SHUTDOWN_ACK), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN), + REGDEF(V3D_GEN_33, V3D_GEN_33, V3D_GCA_SAFE_SHUTDOWN_ACK), }; static const struct v3d_reg_def v3d_core_reg_defs[] = { - REGDEF(33, 71, V3D_CTL_IDENT0), - REGDEF(33, 71, V3D_CTL_IDENT1), - REGDEF(33, 71, V3D_CTL_IDENT2), - REGDEF(33, 71, V3D_CTL_MISCCFG), - REGDEF(33, 71, V3D_CTL_INT_STS), - REGDEF(33, 71, V3D_CTL_INT_MSK_STS), - REGDEF(33, 71, V3D_CLE_CT0CS), - REGDEF(33, 71, V3D_CLE_CT0CA), - REGDEF(33, 71, V3D_CLE_CT0EA), - REGDEF(33, 71, V3D_CLE_CT1CS), - REGDEF(33, 71, V3D_CLE_CT1CA), - REGDEF(33, 71, V3D_CLE_CT1EA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT0), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT1), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_IDENT2), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_MISCCFG), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CTL_INT_MSK_STS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT0EA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1CA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_CLE_CT1EA), - REGDEF(33, 71, V3D_PTB_BPCA), - REGDEF(33, 71, V3D_PTB_BPCS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCA), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_PTB_BPCS), - REGDEF(33, 42, V3D_GMP_STATUS(33)), - REGDEF(33, 42, V3D_GMP_CFG(33)), - REGDEF(33, 42, V3D_GMP_VIO_ADDR(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_STATUS(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_CFG(33)), + REGDEF(V3D_GEN_33, V3D_GEN_42, V3D_GMP_VIO_ADDR(33)), - REGDEF(33, 71, V3D_ERR_FDBGO), - REGDEF(33, 71, V3D_ERR_FDBGB), - REGDEF(33, 71, V3D_ERR_FDBGS), - REGDEF(33, 71, V3D_ERR_STAT), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGO), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGB), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_FDBGS), + REGDEF(V3D_GEN_33, V3D_GEN_71, V3D_ERR_STAT), }; static const struct v3d_reg_def v3d_csd_reg_defs[] = { - REGDEF(41, 71, V3D_CSD_STATUS), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG0(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG1(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG2(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG3(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG4(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG5(41)), - REGDEF(41, 42, V3D_CSD_CURRENT_CFG6(41)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG0(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG1(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG2(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG3(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG4(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG5(71)), - REGDEF(71, 71, V3D_CSD_CURRENT_CFG6(71)), - REGDEF(71, 71, V3D_V7_CSD_CURRENT_CFG7), + REGDEF(V3D_GEN_41, V3D_GEN_71, V3D_CSD_STATUS), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG0(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG1(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG2(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG3(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG4(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG5(41)), + REGDEF(V3D_GEN_41, V3D_GEN_42, V3D_CSD_CURRENT_CFG6(41)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG0(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG1(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG2(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG3(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG4(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG5(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_CSD_CURRENT_CFG6(71)), + REGDEF(V3D_GEN_71, V3D_GEN_71, V3D_V7_CSD_CURRENT_CFG7), }; static int v3d_v3d_debugfs_regs(struct seq_file *m, void *unused) @@ -164,7 +164,7 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) str_yes_no(ident2 & V3D_HUB_IDENT2_WITH_MMU)); seq_printf(m, "TFU: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TFU)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, "TSY: %s\n", str_yes_no(ident1 & V3D_HUB_IDENT1_WITH_TSY)); } @@ -196,11 +196,11 @@ static int v3d_v3d_debugfs_ident(struct seq_file *m, void *unused) seq_printf(m, " QPUs: %d\n", nslc * qups); seq_printf(m, " Semaphores: %d\n", V3D_GET_FIELD(ident1, V3D_IDENT1_NSEM)); - if (v3d->ver <= 42) { + if (v3d->ver <= V3D_GEN_42) { seq_printf(m, " BCG int: %d\n", (ident2 & V3D_IDENT2_BCG_INT) != 0); } - if (v3d->ver < 40) { + if (v3d->ver < V3D_GEN_41) { seq_printf(m, " Override TMU: %d\n", (misccfg & V3D_MISCCFG_OVRTMUOUT) != 0); } @@ -234,7 +234,7 @@ static int v3d_measure_clock(struct seq_file *m, void *unused) int core = 0; int measure_ms = 1000; - if (v3d->ver >= 40) { + if (v3d->ver >= V3D_GEN_41) { int cycle_count_reg = V3D_PCTR_CYCLE_COUNT(v3d->ver); V3D_CORE_WRITE(core, V3D_V4_PCTR_0_SRC_0_3, V3D_SET_FIELD_VER(cycle_count_reg, diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 852015214e97..aa68be8fe86b 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -92,7 +93,7 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, args->value = 1; return 0; case DRM_V3D_PARAM_SUPPORTS_PERFMON: - args->value = (v3d->ver >= 40); + args->value = (v3d->ver >= V3D_GEN_41); return 0; case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT: args->value = 1; @@ -254,10 +255,10 @@ static const struct drm_driver v3d_drm_driver = { }; static const struct of_device_id v3d_of_match[] = { - { .compatible = "brcm,2711-v3d" }, - { .compatible = "brcm,2712-v3d" }, - { .compatible = "brcm,7268-v3d" }, - { .compatible = "brcm,7278-v3d" }, + { .compatible = "brcm,2711-v3d", .data = (void *)V3D_GEN_42 }, + { .compatible = "brcm,2712-v3d", .data = (void *)V3D_GEN_71 }, + { .compatible = "brcm,7268-v3d", .data = (void *)V3D_GEN_33 }, + { .compatible = "brcm,7278-v3d", .data = (void *)V3D_GEN_41 }, {}, }; MODULE_DEVICE_TABLE(of, v3d_of_match); @@ -274,6 +275,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct drm_device *drm; struct v3d_dev *v3d; + enum v3d_gen gen; int ret; u32 mmu_debug; u32 ident1, ident3; @@ -287,6 +289,9 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, drm); + gen = (uintptr_t)of_device_get_match_data(dev); + v3d->ver = gen; + ret = map_regs(v3d, &v3d->hub_regs, "hub"); if (ret) return ret; @@ -316,6 +321,11 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) ident1 = V3D_READ(V3D_HUB_IDENT1); v3d->ver = (V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_TVER) * 10 + V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_REV)); + /* Make sure that the V3D tech version retrieved from the HW is equal + * to the one advertised by the device tree. + */ + WARN_ON(v3d->ver != gen); + v3d->cores = V3D_GET_FIELD(ident1, V3D_HUB_IDENT1_NCORES); WARN_ON(v3d->cores > 1); /* multicore not yet implemented */ @@ -340,7 +350,7 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) } } - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { ret = map_regs(v3d, &v3d->gca_regs, "gca"); if (ret) goto clk_disable; diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 9deaefa0f95b..de4a9e18f6a9 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -94,11 +94,18 @@ struct v3d_perfmon { u64 values[] __counted_by(ncounters); }; +enum v3d_gen { + V3D_GEN_33 = 33, + V3D_GEN_41 = 41, + V3D_GEN_42 = 42, + V3D_GEN_71 = 71, +}; + struct v3d_dev { struct drm_device drm; /* Short representation (e.g. 33, 41) of the V3D tech version */ - int ver; + enum v3d_gen ver; /* Short representation (e.g. 5, 6) of the V3D tech revision */ int rev; @@ -199,7 +206,7 @@ to_v3d_dev(struct drm_device *dev) static inline bool v3d_has_csd(struct v3d_dev *v3d) { - return v3d->ver >= 41; + return v3d->ver >= V3D_GEN_41; } #define v3d_to_pdev(v3d) to_platform_device((v3d)->drm.dev) diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index b1e681630ded..1ea6d3832c22 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -25,7 +25,7 @@ v3d_init_core(struct v3d_dev *v3d, int core) * type. If you want the default behavior, you can still put * "2" in the indirect texture state's output_type field. */ - if (v3d->ver < 40) + if (v3d->ver < V3D_GEN_41) V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT); /* Whenever we flush the L2T cache, we always want to flush @@ -58,7 +58,7 @@ v3d_idle_axi(struct v3d_dev *v3d, int core) static void v3d_idle_gca(struct v3d_dev *v3d) { - if (v3d->ver >= 41) + if (v3d->ver >= V3D_GEN_41) return; V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN); @@ -132,13 +132,13 @@ v3d_reset(struct v3d_dev *v3d) static void v3d_flush_l3(struct v3d_dev *v3d) { - if (v3d->ver < 41) { + if (v3d->ver < V3D_GEN_41) { u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL); V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH); - if (v3d->ver < 33) { + if (v3d->ver < V3D_GEN_33) { V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL, gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH); } @@ -151,7 +151,7 @@ v3d_flush_l3(struct v3d_dev *v3d) static void v3d_invalidate_l2c(struct v3d_dev *v3d, int core) { - if (v3d->ver > 32) + if (v3d->ver >= V3D_GEN_33) return; V3D_CORE_WRITE(core, V3D_CTL_L2CACTL, diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 72b6a119412f..29f63f572d35 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -143,7 +143,7 @@ v3d_irq(int irq, void *arg) /* We shouldn't be triggering these if we have GMP in * always-allowed mode. */ - if (v3d->ver < 71 && (intsts & V3D_INT_GMPV)) + if (v3d->ver < V3D_GEN_71 && (intsts & V3D_INT_GMPV)) dev_err(v3d->drm.dev, "GMP violation\n"); /* V3D 4.2 wires the hub and core IRQs together, so if we & @@ -200,7 +200,7 @@ v3d_hub_irq(int irq, void *arg) V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL)); - if (v3d->ver >= 41) { + if (v3d->ver >= V3D_GEN_41) { axi_id = axi_id >> 5; if (axi_id < ARRAY_SIZE(v3d41_axi_ids)) client = v3d41_axi_ids[axi_id]; @@ -217,7 +217,7 @@ v3d_hub_irq(int irq, void *arg) status = IRQ_HANDLED; } - if (v3d->ver >= 71 && (intsts & V3D_V7_HUB_INT_GMPV)) { + if (v3d->ver >= V3D_GEN_71 && (intsts & V3D_V7_HUB_INT_GMPV)) { dev_err(v3d->drm.dev, "GMP Violation\n"); status = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index 3ebda2fa46fc..9a3fe5255874 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -200,10 +200,10 @@ void v3d_perfmon_init(struct v3d_dev *v3d) const struct v3d_perf_counter_desc *counters = NULL; unsigned int max = 0; - if (v3d->ver >= 71) { + if (v3d->ver >= V3D_GEN_71) { counters = v3d_v71_performance_counters; max = ARRAY_SIZE(v3d_v71_performance_counters); - } else if (v3d->ver >= 42) { + } else if (v3d->ver >= V3D_GEN_42) { counters = v3d_v42_performance_counters; max = ARRAY_SIZE(v3d_v42_performance_counters); } diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 80466ce8c7df..34bd2b14f932 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -345,11 +345,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) V3D_WRITE(V3D_TFU_ICA(v3d->ver), job->args.ica); V3D_WRITE(V3D_TFU_IUA(v3d->ver), job->args.iua); V3D_WRITE(V3D_TFU_IOA(v3d->ver), job->args.ioa); - if (v3d->ver >= 71) + if (v3d->ver >= V3D_GEN_71) V3D_WRITE(V3D_V7_TFU_IOC, job->args.v71.ioc); V3D_WRITE(V3D_TFU_IOS(v3d->ver), job->args.ios); V3D_WRITE(V3D_TFU_COEF0(v3d->ver), job->args.coef[0]); - if (v3d->ver >= 71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { + if (v3d->ver >= V3D_GEN_71 || (job->args.coef[0] & V3D_TFU_COEF0_USECOEF)) { V3D_WRITE(V3D_TFU_COEF1(v3d->ver), job->args.coef[1]); V3D_WRITE(V3D_TFU_COEF2(v3d->ver), job->args.coef[2]); V3D_WRITE(V3D_TFU_COEF3(v3d->ver), job->args.coef[3]); @@ -395,7 +395,7 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) * * XXX: Set the CFG7 register */ - if (v3d->ver >= 71) + if (v3d->ver >= V3D_GEN_71) V3D_CORE_WRITE(0, V3D_V7_CSD_QUEUED_CFG7, 0); /* CFG0 write kicks off the job. */ From 38712c5281ac5f6f27058b825ca62ae69f2e2451 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Mon, 17 Mar 2025 22:01:10 -0300 Subject: [PATCH 0183/1627] dt-bindings: gpu: v3d: Add per-compatible register restrictions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to enforce per-SoC register rules, add per-compatible restrictions. For example, V3D 3.3 (used in brcm,7268-v3d) has a cache controller (GCA), which is not present in other V3D generations. Declaring these differences helps ensure the DTB accurately reflect the hardware design. The example was using an incorrect order for the register names. This commit corrects that by enforcing the order established in the register items description. Reviewed-by: Krzysztof Kozlowski Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20250317-v3d-gpu-reset-fixes-v6-2-f3ee7717ed17@igalia.com --- .../devicetree/bindings/gpu/brcm,bcm-v3d.yaml | 86 ++++++++++++++++--- 1 file changed, 73 insertions(+), 13 deletions(-) diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml index dc078ceeca9a..6a1a09031983 100644 --- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml +++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml @@ -22,20 +22,12 @@ properties: - brcm,7278-v3d reg: - items: - - description: hub register (required) - - description: core0 register (required) - - description: GCA cache controller register (if GCA controller present) - - description: bridge register (if no external reset controller) minItems: 2 + maxItems: 4 reg-names: - items: - - const: hub - - const: core0 - - enum: [ bridge, gca ] - - enum: [ bridge, gca ] minItems: 2 + maxItems: 4 interrupts: items: @@ -58,6 +50,74 @@ required: - reg-names - interrupts +allOf: + - if: + properties: + compatible: + contains: + const: brcm,2711-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + reg-names: + items: + - const: hub + - const: core0 + - if: + properties: + compatible: + contains: + const: brcm,2712-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + reg-names: + items: + - const: hub + - const: core0 + - if: + properties: + compatible: + contains: + const: brcm,7268-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + - description: GCA cache controller register + - description: bridge register + reg-names: + items: + - const: hub + - const: core0 + - const: gca + - const: bridge + - if: + properties: + compatible: + contains: + const: brcm,7278-v3d + then: + properties: + reg: + items: + - description: hub register + - description: core0 register + - description: bridge register + reg-names: + items: + - const: hub + - const: core0 + - const: bridge + additionalProperties: false examples: @@ -66,9 +126,9 @@ examples: compatible = "brcm,7268-v3d"; reg = <0xf1200000 0x4000>, <0xf1208000 0x4000>, - <0xf1204000 0x100>, - <0xf1204100 0x100>; - reg-names = "hub", "core0", "bridge", "gca"; + <0xf1204100 0x100>, + <0xf1204000 0x100>; + reg-names = "hub", "core0", "gca", "bridge"; interrupts = <0 78 4>, <0 77 4>; }; From e29671ae9714d2c37cd5165a2e928cc2fdd829c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Mon, 17 Mar 2025 22:01:11 -0300 Subject: [PATCH 0184/1627] dt-bindings: gpu: v3d: Add SMS register to BCM2712 compatible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit V3D 7.1 exposes a new register block, called V3D_SMS. As BCM2712 has a V3D 7.1 core, add a new register item to its compatible. Similar to the GCA, which is specific for V3D 3.3, SMS should only be added for V3D 7.1 variants (such as brcm,2712-v3d). Acked-by: Krzysztof Kozlowski Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20250317-v3d-gpu-reset-fixes-v6-3-f3ee7717ed17@igalia.com --- Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml index 6a1a09031983..dd2cc63c9a51 100644 --- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml +++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml @@ -77,10 +77,12 @@ allOf: items: - description: hub register - description: core0 register + - description: SMS state manager register reg-names: items: - const: hub - const: core0 + - const: sms - if: properties: compatible: From b1cd1d738e8d98296a01768ba217f2f2ea5dd7b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Mon, 17 Mar 2025 22:01:12 -0300 Subject: [PATCH 0185/1627] dt-bindings: gpu: v3d: Add V3D driver maintainer as DT maintainer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As established in commit 89d04995f76c ("MAINTAINERS: Drop Emma Anholt from all M lines."), Emma is no longer active in the Linux kernel and dropped the V3D maintainership. Therefore, remove Emma as one of the DT maintainers and add the current V3D driver maintainer. Acked-by: Emma Anholt Acked-by: Rob Herring (Arm) Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20250317-v3d-gpu-reset-fixes-v6-4-f3ee7717ed17@igalia.com --- Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml index dd2cc63c9a51..43c6d2d72456 100644 --- a/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml +++ b/Documentation/devicetree/bindings/gpu/brcm,bcm-v3d.yaml @@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml# title: Broadcom V3D GPU maintainers: - - Eric Anholt + - Maíra Canal - Nicolas Saenz Julienne properties: From 1bdf2ccc351ce73ec5fcc0fa82eb6959b30f34c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Mon, 17 Mar 2025 22:01:13 -0300 Subject: [PATCH 0186/1627] drm/v3d: Use V3D_SMS registers for power on/off and reset on V3D 7.x MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In addition to the standard reset controller, V3D 7.x requires configuring the V3D_SMS registers for proper power on/off and reset. Add the new registers to `v3d_regs.h` and ensure they are properly configured during device probing, removal, and reset. This change fixes GPU reset issues on the Raspberry Pi 5 (BCM2712). Without exposing these registers, a GPU reset causes the GPU to hang, stopping any further job execution and freezing the desktop GUI. The same issue occurs when unloading and loading the v3d driver. Link: https://github.com/raspberrypi/linux/issues/6660 Reviewed-by: Iago Toral Quiroga Signed-off-by: Maíra Canal Link: https://patchwork.freedesktop.org/patch/msgid/20250317-v3d-gpu-reset-fixes-v6-5-f3ee7717ed17@igalia.com --- drivers/gpu/drm/v3d/v3d_drv.c | 40 ++++++++++++++++++++++++++++++++++ drivers/gpu/drm/v3d/v3d_drv.h | 11 ++++++++++ drivers/gpu/drm/v3d/v3d_gem.c | 17 +++++++++++++++ drivers/gpu/drm/v3d/v3d_regs.h | 26 ++++++++++++++++++++++ 4 files changed, 94 insertions(+) diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index aa68be8fe86b..5e997ae8bc9c 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -263,6 +263,36 @@ static const struct of_device_id v3d_of_match[] = { }; MODULE_DEVICE_TABLE(of, v3d_of_match); +static void +v3d_idle_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_CLEAR_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_IDLE), 100)) { + DRM_ERROR("Failed to power up SMS\n"); + } + + v3d_reset_sms(v3d); +} + +static void +v3d_power_off_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_TEE_CS, V3D_SMS_POWER_OFF); + + if (wait_for((V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_TEE_CS), + V3D_SMS_STATE) == V3D_SMS_POWER_OFF_STATE), 100)) { + DRM_ERROR("Failed to power off SMS\n"); + } +} + static int map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name) { @@ -300,6 +330,12 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) if (ret) return ret; + if (v3d->ver >= V3D_GEN_71) { + ret = map_regs(v3d, &v3d->sms_regs, "sms"); + if (ret) + return ret; + } + v3d->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(v3d->clk)) return dev_err_probe(dev, PTR_ERR(v3d->clk), "Failed to get V3D clock\n"); @@ -310,6 +346,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) return ret; } + v3d_idle_sms(v3d); + mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); mask = DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); ret = dma_set_mask_and_coherent(dev, mask); @@ -410,6 +448,8 @@ static void v3d_platform_drm_remove(struct platform_device *pdev) dma_free_wc(v3d->drm.dev, 4096, v3d->mmu_scratch, v3d->mmu_scratch_paddr); + v3d_power_off_sms(v3d); + clk_disable_unprepare(v3d->clk); } diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index de4a9e18f6a9..b51f0b648a08 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -118,6 +118,7 @@ struct v3d_dev { void __iomem *core_regs[3]; void __iomem *bridge_regs; void __iomem *gca_regs; + void __iomem *sms_regs; struct clk *clk; struct reset_control *reset; @@ -268,6 +269,15 @@ to_v3d_fence(struct dma_fence *fence) #define V3D_GCA_READ(offset) readl(v3d->gca_regs + offset) #define V3D_GCA_WRITE(offset, val) writel(val, v3d->gca_regs + offset) +#define V3D_SMS_IDLE 0x0 +#define V3D_SMS_ISOLATING_FOR_RESET 0xa +#define V3D_SMS_RESETTING 0xb +#define V3D_SMS_ISOLATING_FOR_POWER_OFF 0xc +#define V3D_SMS_POWER_OFF_STATE 0xd + +#define V3D_SMS_READ(offset) readl(v3d->sms_regs + (offset)) +#define V3D_SMS_WRITE(offset, val) writel(val, v3d->sms_regs + (offset)) + #define V3D_CORE_READ(core, offset) readl(v3d->core_regs[core] + offset) #define V3D_CORE_WRITE(core, offset, val) writel(val, v3d->core_regs[core] + offset) @@ -546,6 +556,7 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); /* v3d_gem.c */ int v3d_gem_init(struct drm_device *dev); void v3d_gem_destroy(struct drm_device *dev); +void v3d_reset_sms(struct v3d_dev *v3d); void v3d_reset(struct v3d_dev *v3d); void v3d_invalidate_caches(struct v3d_dev *v3d); void v3d_clean_caches(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 1ea6d3832c22..d7d16da78db3 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -104,6 +104,22 @@ v3d_reset_v3d(struct v3d_dev *v3d) v3d_init_hw_state(v3d); } +void +v3d_reset_sms(struct v3d_dev *v3d) +{ + if (v3d->ver < V3D_GEN_71) + return; + + V3D_SMS_WRITE(V3D_SMS_REE_CS, V3D_SET_FIELD(0x4, V3D_SMS_STATE)); + + if (wait_for(!(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_ISOLATING_FOR_RESET) && + !(V3D_GET_FIELD(V3D_SMS_READ(V3D_SMS_REE_CS), + V3D_SMS_STATE) == V3D_SMS_RESETTING), 100)) { + DRM_ERROR("Failed to wait for SMS reset\n"); + } +} + void v3d_reset(struct v3d_dev *v3d) { @@ -119,6 +135,7 @@ v3d_reset(struct v3d_dev *v3d) v3d_idle_axi(v3d, 0); v3d_idle_gca(v3d); + v3d_reset_sms(v3d); v3d_reset_v3d(v3d); v3d_mmu_set_page_table(v3d); diff --git a/drivers/gpu/drm/v3d/v3d_regs.h b/drivers/gpu/drm/v3d/v3d_regs.h index 6da3c69082bd..c1870265eaee 100644 --- a/drivers/gpu/drm/v3d/v3d_regs.h +++ b/drivers/gpu/drm/v3d/v3d_regs.h @@ -515,4 +515,30 @@ # define V3D_ERR_VPAERGS BIT(1) # define V3D_ERR_VPAEABB BIT(0) +#define V3D_SMS_REE_CS 0x00000 +#define V3D_SMS_TEE_CS 0x00400 +# define V3D_SMS_INTERRUPT BIT(31) +# define V3D_SMS_POWER_OFF BIT(30) +# define V3D_SMS_CLEAR_POWER_OFF BIT(29) +# define V3D_SMS_LOCK BIT(28) +# define V3D_SMS_CLEAR_LOCK BIT(27) +# define V3D_SMS_SVP_MODE_EXIT BIT(26) +# define V3D_SMS_CLEAR_SVP_MODE_EXIT BIT(25) +# define V3D_SMS_SVP_MODE_ENTER BIT(24) +# define V3D_SMS_CLEAR_SVP_MODE_ENTER BIT(23) +# define V3D_SMS_THEIR_MODE_EXIT BIT(22) +# define V3D_SMS_THEIR_MODE_ENTER BIT(21) +# define V3D_SMS_OUR_MODE_EXIT BIT(20) +# define V3D_SMS_CLEAR_OUR_MODE_EXIT BIT(19) +# define V3D_SMS_SEQ_PC_MASK V3D_MASK(16, 10) +# define V3D_SMS_SEQ_PC_SHIFT 10 +# define V3D_SMS_HUBCORE_STATUS_MASK V3D_MASK(9, 8) +# define V3D_SMS_HUBCORE_STATUS_SHIFT 8 +# define V3D_SMS_NEW_MODE_MASK V3D_MASK(7, 6) +# define V3D_SMS_NEW_MODE_SHIFT 6 +# define V3D_SMS_OLD_MODE_MASK V3D_MASK(5, 4) +# define V3D_SMS_OLD_MODE_SHIFT 4 +# define V3D_SMS_STATE_MASK V3D_MASK(3, 0) +# define V3D_SMS_STATE_SHIFT 0 + #endif /* V3D_REGS_H */ From 689582882802cd64986c1eb584c9f5184d67f0cf Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Sun, 23 Mar 2025 19:41:03 +0800 Subject: [PATCH 0187/1627] drm/xe: Fix unmet direct dependencies warning WARNING: unmet direct dependencies detected for FB_IOMEM_HELPERS Depends on [n]: HAS_IOMEM [=y] && FB_CORE [=n] Selected by [m]: - DRM_XE_DISPLAY [=y] && HAS_IOMEM [=y] && DRM [=m] && DRM_XE [=m] && DRM_XE [=m]=m [=m] && HAS_IOPORT [=y] DRM_XE_DISPLAY requires FB_IOMEM_HELPERS, but the dependency FB_CORE is missing, selecting FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION is set as other drm drivers. Fixes: 44e694958b95 ("drm/xe/display: Implement display support") Signed-off-by: Yue Haibing Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20250323114103.1960511-1-yuehaibing@huawei.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/xe/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 7d7995196702..5c2f459a2925 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -53,7 +53,7 @@ config DRM_XE config DRM_XE_DISPLAY bool "Enable display support" depends on DRM_XE && DRM_XE=m && HAS_IOPORT - select FB_IOMEM_HELPERS + select FB_IOMEM_HELPERS if DRM_FBDEV_EMULATION select I2C select I2C_ALGOBIT default y From 8c6c3d207549d517638d31ee99d59f2bc16823ca Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Thu, 6 Mar 2025 18:28:40 +0100 Subject: [PATCH 0188/1627] drm/bridge: imx8qxp-ldb: cleanup return value 'ret' can only be 0 at this point, being preceded by a 'if (ret) return ret;'. So return 0 for clarity. Signed-off-by: Luca Ceresoli Reviewed-by: Liu Ying Signed-off-by: Liu Ying Link: https://patchwork.freedesktop.org/patch/msgid/20250306-drm-two-ldb-improvements-v1-1-f139d768b92c@bootlin.com --- drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c index 3cb484773ddf..d4f3492ca5ab 100644 --- a/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c +++ b/drivers/gpu/drm/bridge/imx/imx8qxp-ldb.c @@ -662,7 +662,7 @@ static int imx8qxp_ldb_probe(struct platform_device *pdev) ldb_add_bridge_helper(ldb, &imx8qxp_ldb_bridge_funcs); - return ret; + return 0; } static void imx8qxp_ldb_remove(struct platform_device *pdev) From 616299b6669ff66400c7341720f2dbf4b1fa81d1 Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Thu, 6 Mar 2025 18:28:41 +0100 Subject: [PATCH 0189/1627] drm/bridge: fsl-ldb: make warning message more informative This warning notifies a clock was set to an inaccurate value. Modify the string to also show the clock name. While doing that also rewrap the entire function call. Signed-off-by: Luca Ceresoli Acked-by: Liu Ying Signed-off-by: Liu Ying Link: https://patchwork.freedesktop.org/patch/msgid/20250306-drm-two-ldb-improvements-v1-2-f139d768b92c@bootlin.com --- drivers/gpu/drm/bridge/fsl-ldb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/bridge/fsl-ldb.c b/drivers/gpu/drm/bridge/fsl-ldb.c index 72d8f32d48fa..2cb6dfc7a6d3 100644 --- a/drivers/gpu/drm/bridge/fsl-ldb.c +++ b/drivers/gpu/drm/bridge/fsl-ldb.c @@ -181,9 +181,9 @@ static void fsl_ldb_atomic_enable(struct drm_bridge *bridge, configured_link_freq = clk_get_rate(fsl_ldb->clk); if (configured_link_freq != requested_link_freq) - dev_warn(fsl_ldb->dev, "Configured LDB clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n", - configured_link_freq, - requested_link_freq); + dev_warn(fsl_ldb->dev, + "Configured %pC clock (%lu Hz) does not match requested LVDS clock: %lu Hz\n", + fsl_ldb->clk, configured_link_freq, requested_link_freq); clk_prepare_enable(fsl_ldb->clk); From c63d00e388f2240c732bf1c89dc48bc8ff98089d Mon Sep 17 00:00:00 2001 From: Suraj Kandpal Date: Fri, 28 Feb 2025 20:55:32 +0530 Subject: [PATCH 0190/1627] drm/i915/vdsc: Use the DSC config tables for DSI panels Some DSI panel vendors end up hardcoding PPS params because of which it does not listen to the params sent from the source. We use the default config tables for DSI panels when using DSC 1.1 rather than calculate our own rc parameters. --v2 -Use intel_crtc_has_type [Jani] --v4 -Use a function to check Mipi dsi dsc 1.1 condition [Ankit] -Add documentation for using this condition [Ankit] -Rebase --v5 -Pass only the crtc_state [Jani] -Fixup the comment [Jani] -Check for dsc major version [Jani] -Use co-developed-by tag [Jani] --v6 -Add more definition of the issue and solution in the comment [Ankit] Closes: https://gitlab.freedesktop.org/drm/i915/kernel/-/issues/13719 Co-developed-by: William Tseng Signed-off-by: Suraj Kandpal Reviewed-by: Ankit Nautiyal Link: https://patchwork.freedesktop.org/patch/msgid/20250228152531.403026-1-suraj.kandpal@intel.com --- drivers/gpu/drm/i915/display/intel_vdsc.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 3ed64c17bdff..470c170897e5 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -259,6 +259,15 @@ static int intel_dsc_slice_dimensions_valid(struct intel_crtc_state *pipe_config return 0; } +static bool is_dsi_dsc_1_1(struct intel_crtc_state *crtc_state) +{ + struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; + + return vdsc_cfg->dsc_version_major == 1 && + vdsc_cfg->dsc_version_minor == 1 && + intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI); +} + int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) { struct intel_display *display = to_intel_display(pipe_config); @@ -317,8 +326,19 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config) * From XE_LPD onwards we supports compression bpps in steps of 1 * upto uncompressed bpp-1, hence add calculations for all the rc * parameters + * + * We don't want to calculate all rc parameters when the panel + * is MIPI DSI and it's using DSC 1.1. The reason being that some + * DSI panels vendors have hardcoded PPS params in the VBT causing + * the parameters sent from the source which are derived through + * interpolation to differ from the params the panel expects. + * This causes a noise in the display. + * Furthermore for DSI panels we are currently using bits_per_pixel + * (compressed bpp) hardcoded from VBT, (unlike other encoders where we + * find the optimum compressed bpp) so dont need to rely on interpolation, + * as we can get the required rc parameters from the tables. */ - if (DISPLAY_VER(display) >= 13) { + if (DISPLAY_VER(display) >= 13 && !is_dsi_dsc_1_1(pipe_config)) { calculate_rc_params(vdsc_cfg); } else { if ((compressed_bpp == 8 || From fe8fd8af6de6bc76506f739b9fd1acb1837cfa02 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Sat, 22 Mar 2025 10:13:44 +0530 Subject: [PATCH 0191/1627] drm/i915/display: Add fixed_rr to crtc_state dump MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add fixed refresh rate mode in crtc_state dump. VRR Timing Generator is running in fixed refresh rate mode when vrr.vmin = vrr.vmax = vrr.flipline. v2: s/fixed_rr/fixed rr for consistency with the other stuff. (Ville) Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250322044345.3827137-2-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_crtc_state_dump.c | 3 ++- drivers/gpu/drm/i915/display/intel_vrr.c | 1 - drivers/gpu/drm/i915/display/intel_vrr.h | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c index 599ddce96371..0f0fad329b89 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c +++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c @@ -294,8 +294,9 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config, pipe_config->hw.adjusted_mode.crtc_vdisplay, pipe_config->framestart_delay, pipe_config->msa_timing_delay); - drm_printf(&p, "vrr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n", + drm_printf(&p, "vrr: %s, fixed rr: %s, vmin: %d, vmax: %d, flipline: %d, pipeline full: %d, guardband: %d vsync start: %d, vsync end: %d\n", str_yes_no(pipe_config->vrr.enable), + str_yes_no(intel_vrr_is_fixed_rr(pipe_config)), pipe_config->vrr.vmin, pipe_config->vrr.vmax, pipe_config->vrr.flipline, pipe_config->vrr.pipeline_full, pipe_config->vrr.guardband, pipe_config->vrr.vsync_start, pipe_config->vrr.vsync_end); diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index 622a70e21737..aa65a6933ddb 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -602,7 +602,6 @@ void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state) intel_vrr_set_fixed_rr_timings(old_crtc_state); } -static bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state) { return crtc_state->vrr.flipline && diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h index 514822577e8a..65d2b0eead51 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.h +++ b/drivers/gpu/drm/i915/display/intel_vrr.h @@ -35,5 +35,6 @@ int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state); int intel_vrr_vmax_vblank_start(const struct intel_crtc_state *crtc_state); int intel_vrr_vmin_vblank_start(const struct intel_crtc_state *crtc_state); int intel_vrr_vblank_delay(const struct intel_crtc_state *crtc_state); +bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state); #endif /* __INTEL_VRR_H__ */ From 0ead88112bf69da4ee54d3f26e21258f00640865 Mon Sep 17 00:00:00 2001 From: Ankit Nautiyal Date: Sat, 22 Mar 2025 10:13:45 +0530 Subject: [PATCH 0192/1627] drm/i915/vrr: Avoid reading vrr.enable based on fixed_rr check MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, vrr.enable is intended only for variable refresh rate timings. At this point, we do not set fixed refresh rate timings, but the GOP can, which creates a problem during the readback of vrr.enable. The GOP enables the VRR timing generator with fixed timings, while the driver only recognizes the VRR timing generator as enabled with variable timings. This discrepancy causes an issue due to the fixed refresh rate check during readback. Since the VRR timing generator is enabled and we do not support fixed timings, the readback should set vrr.enable so that the driver can disable the VRR timing generator. However, the current check does not allow this. Therefore, remove the fixed refresh rate check during readback. Fixes: 27217f9d1856 ("drm/i915/vrr: Track vrr.enable only for variable timing") Cc: Ankit Nautiyal Cc: Ville Syrjälä Cc: Jani Nikula Signed-off-by: Ankit Nautiyal Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20250322044345.3827137-3-ankit.k.nautiyal@intel.com --- drivers/gpu/drm/i915/display/intel_vrr.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c index aa65a6933ddb..6bdcdfed4b9b 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr.c +++ b/drivers/gpu/drm/i915/display/intel_vrr.c @@ -657,8 +657,7 @@ void intel_vrr_get_config(struct intel_crtc_state *crtc_state) } } - crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE && - !intel_vrr_is_fixed_rr(crtc_state); + crtc_state->vrr.enable = trans_vrr_ctl & VRR_CTL_VRR_ENABLE; /* * #TODO: For Both VRR and CMRR the flag I915_MODE_FLAG_VRR is set for mode_flags. From 88c1f9a4d36de61f87cc52aac670020b13d1ccaa Mon Sep 17 00:00:00 2001 From: Mohammed Thasleem Date: Fri, 21 Mar 2025 18:07:07 +0530 Subject: [PATCH 0193/1627] drm/i915/dmc: Create debugfs entry for dc6 counter Starting from MTL we don't have a platform agnostic way to validate DC6 state due to dc6 counter has been removed to validate DC state. The goal is to validate that the display HW can reach the DC6 power state. There is no HW DC6 residency counter (and there wasn't such a counter earlier either), so an alternative way is required. According to the HW team the display driver has programmed everything correctly in order to allow the DC6 power state if the DC5 power state is reached (indicated by the HW DC5 residency counter incrementing) and DC6 is enabled by the driver. Driver could take a snapshot of the DC5 residency counter right after it enables DC6 (dc5_residency_start) and increment the SW DC6 residency counter right before it disables DC6 or when user space reads the DC6 counter. So the driver would update the counter at these two points in the following way: dc6_residency_counter += dc5_current_count - dc5_start_count v2: Update the discription. (Imre) Read dc5 count during dc6 enable and disable then and update dc6 residency counter. (Imre) Remove variable from dmc structure. (Jani) Updated the subject title. v3: Add i915_power_domains lock to updated dc6 count in debugfs. (Imre) Use flags to check dc6 enable/disable states. (Imre) Move the display version check and counter read/update to a helper. (Imre) Resize the variable length. (Rodrigo) Use old dc6 debugfs entry for every platform. (Rodrigo) v4: Remove superfluous whitespace. (Jani) Read DMC registers in intel_dmc.c (Jani) Rename dc6_en_dis to dc6_enabled and change its type to bool. (Jani) Rename update_dc6_count and move it to intel_dmc.c (Jani) Rename dc6_en_dis to start_tracking. (Imre) Have lock for dc6 state read aswelll. (Imre) Keep the existing way print 'DC5 -> DC6 count' along with new 'DC6 Allowed Count' print. (Imre) Add counters in intel_dmc struct. (Imre) Have interface to return dc6 allowed count. (Imre) Rename dc6_count to dc6_allowed_count. (Rodrigo) v5: Rename counters and move in to dc6_allowed structure. (Imre) Order declaration lines in decreasing line length. (Imre) Update start_tacking logic. (Imre) Move get couner inside lock and DISPLAY_VER code to helper. (Imre) v6: Change intel_dmc_get_dc6_allowed_count return type to bool. (Imre) Update debugfs print to better allien with old print. (Imre) Remove braces at if/else for signle line statements. (Imre) v7: Remove in line variable declaration. (Imre) v8: Rebase the changes. Signed-off-by: Mohammed Thasleem Reviewed-by: Imre Deak Signed-off-by: Ankit Nautiyal Link: https://patchwork.freedesktop.org/patch/msgid/20250321123707.287745-1-mohammed.thasleem@intel.com --- .../i915/display/intel_display_power_well.c | 11 +++- drivers/gpu/drm/i915/display/intel_dmc.c | 50 ++++++++++++++++++- drivers/gpu/drm/i915/display/intel_dmc.h | 1 + 3 files changed, 60 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 751e49b880d6..b9b4359751cc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -749,8 +749,9 @@ void gen9_sanitize_dc_state(struct intel_display *display) void gen9_set_dc_state(struct intel_display *display, u32 state) { struct i915_power_domains *power_domains = &display->power.domains; - u32 val; + bool dc6_was_enabled, enable_dc6; u32 mask; + u32 val; if (!HAS_DISPLAY(display)) return; @@ -769,11 +770,19 @@ void gen9_set_dc_state(struct intel_display *display, u32 state) drm_err(display->drm, "DC state mismatch (0x%x -> 0x%x)\n", power_domains->dc_state, val & mask); + enable_dc6 = state & DC_STATE_EN_UPTO_DC6; + dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6; + if (!dc6_was_enabled && enable_dc6) + intel_dmc_update_dc6_allowed_count(display, true); + val &= ~mask; val |= state; gen9_write_dc_state(display, val); + if (!enable_dc6 && dc6_was_enabled) + intel_dmc_update_dc6_allowed_count(display, false); + power_domains->dc_state = val & mask; } diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index eb6b47ba0870..98f80a6c63e8 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -29,6 +29,7 @@ #include "i915_reg.h" #include "intel_de.h" #include "intel_display_rpm.h" +#include "intel_display_power_well.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" #include "intel_step.h" @@ -58,6 +59,10 @@ struct intel_dmc { const char *fw_path; u32 max_fw_size; /* bytes */ u32 version; + struct { + u32 dc5_start; + u32 count; + } dc6_allowed; struct dmc_fw_info { u32 mmio_count; i915_reg_t mmioaddr[20]; @@ -1233,6 +1238,44 @@ void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct DMC_VERSION_MINOR(snapshot->version)); } +void intel_dmc_update_dc6_allowed_count(struct intel_display *display, + bool start_tracking) +{ + struct intel_dmc *dmc = display_to_dmc(display); + u32 dc5_cur_count; + + if (DISPLAY_VER(dmc->display) < 14) + return; + + dc5_cur_count = intel_de_read(dmc->display, DG1_DMC_DEBUG_DC5_COUNT); + + if (!start_tracking) + dmc->dc6_allowed.count += dc5_cur_count - dmc->dc6_allowed.dc5_start; + + dmc->dc6_allowed.dc5_start = dc5_cur_count; +} + +static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *count) +{ + struct i915_power_domains *power_domains = &display->power.domains; + struct intel_dmc *dmc = display_to_dmc(display); + bool dc6_enabled; + + if (DISPLAY_VER(display) < 14) + return false; + + mutex_lock(&power_domains->lock); + dc6_enabled = intel_de_read(display, DC_STATE_EN) & + DC_STATE_EN_UPTO_DC6; + if (dc6_enabled) + intel_dmc_update_dc6_allowed_count(display, false); + + *count = dmc->dc6_allowed.count; + mutex_unlock(&power_domains->lock); + + return true; +} + static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) { struct intel_display *display = m->private; @@ -1240,6 +1283,7 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) struct intel_dmc *dmc = display_to_dmc(display); struct ref_tracker *wakeref; i915_reg_t dc5_reg, dc6_reg = INVALID_MMIO_REG; + u32 dc6_allowed_count; if (!HAS_DMC(display)) return -ENODEV; @@ -1288,7 +1332,11 @@ static int intel_dmc_debugfs_status_show(struct seq_file *m, void *unused) } seq_printf(m, "DC3 -> DC5 count: %d\n", intel_de_read(display, dc5_reg)); - if (i915_mmio_reg_valid(dc6_reg)) + + if (intel_dmc_get_dc6_allowed_count(display, &dc6_allowed_count)) + seq_printf(m, "DC5 -> DC6 allowed count: %d\n", + dc6_allowed_count); + else if (i915_mmio_reg_valid(dc6_reg)) seq_printf(m, "DC5 -> DC6 count: %d\n", intel_de_read(display, dc6_reg)); diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index 44cecef98e73..c78426eb4cd5 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -26,6 +26,7 @@ void intel_dmc_debugfs_register(struct intel_display *display); struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *display); void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p); +void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking); void assert_dmc_loaded(struct intel_display *display); From ff9cb6d2035c586ea7c8f1754d4409eec7a2d26d Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 3 Mar 2025 15:52:56 +0100 Subject: [PATCH 0194/1627] drm/udl: Unregister device before cleaning up on disconnect Disconnecting a DisplayLink device results in the following kernel error messages [ 93.041748] [drm:udl_urb_completion [udl]] *ERROR* udl_urb_completion - nonzero write bulk status received: -115 [ 93.055299] [drm:udl_submit_urb [udl]] *ERROR* usb_submit_urb error fffffffe [ 93.065363] [drm:udl_urb_completion [udl]] *ERROR* udl_urb_completion - nonzero write bulk status received: -115 [ 93.078207] [drm:udl_submit_urb [udl]] *ERROR* usb_submit_urb error fffffffe coming from KMS poll helpers. Shutting down poll helpers runs them one final time when the USB device is already gone. Run drm_dev_unplug() first in udl's USB disconnect handler. Udl's polling code already handles disconnects gracefully if the device has been marked as unplugged. Signed-off-by: Thomas Zimmermann Fixes: b1a981bd5576 ("drm/udl: drop drm_driver.release hook") Cc: dri-devel@lists.freedesktop.org Cc: # v5.8+ Reviewed-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250303145604.62962-2-tzimmermann@suse.de --- drivers/gpu/drm/udl/udl_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 3b56ca2f6eb8..9a66a1a6781f 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -110,9 +110,9 @@ static void udl_usb_disconnect(struct usb_interface *interface) { struct drm_device *dev = usb_get_intfdata(interface); + drm_dev_unplug(dev); drm_kms_helper_poll_fini(dev); udl_drop_usb(dev); - drm_dev_unplug(dev); } /* From 695a7f1c11355bbb50986423f11096421a466078 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 3 Mar 2025 15:52:57 +0100 Subject: [PATCH 0195/1627] drm/udl: Switch poll helpers to managed cleanup Call drmm_kms_helper_poll_init() to set up managed cleanup for connector polling. Signed-off-by: Thomas Zimmermann Reviewed-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250303145604.62962-3-tzimmermann@suse.de --- drivers/gpu/drm/udl/udl_drv.c | 1 - drivers/gpu/drm/udl/udl_main.c | 2 -- drivers/gpu/drm/udl/udl_modeset.c | 1 + 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index 9a66a1a6781f..d1bc3f165b27 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -111,7 +111,6 @@ static void udl_usb_disconnect(struct usb_interface *interface) struct drm_device *dev = usb_get_intfdata(interface); drm_dev_unplug(dev); - drm_kms_helper_poll_fini(dev); udl_drop_usb(dev); } diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c index cbb0169cc030..48260a821b8d 100644 --- a/drivers/gpu/drm/udl/udl_main.c +++ b/drivers/gpu/drm/udl/udl_main.c @@ -341,8 +341,6 @@ int udl_init(struct udl_device *udl) if (ret) goto err; - drm_kms_helper_poll_init(dev); - return 0; err: diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c index bbb04f98886a..3b65e93ea0ae 100644 --- a/drivers/gpu/drm/udl/udl_modeset.c +++ b/drivers/gpu/drm/udl/udl_modeset.c @@ -535,6 +535,7 @@ int udl_modeset_init(struct drm_device *dev) return ret; drm_mode_config_reset(dev); + drmm_kms_helper_poll_init(dev); return 0; } From f878af62c06c3e0f2b94f6bafd040400d8cfa4d9 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Mon, 3 Mar 2025 15:52:58 +0100 Subject: [PATCH 0196/1627] drm/probe-helper: Do not fail from drmm_kms_helper_poll_init() Failing to set up connector polling is not significant enough to fail device probing. Print a warning and return nothing from the init helper. This only affects the managed init function. The unmanaged init already never fails with an error. Signed-off-by: Thomas Zimmermann Reviewed-by: Patrik Jakobsson Link: https://patchwork.freedesktop.org/patch/msgid/20250303145604.62962-4-tzimmermann@suse.de --- drivers/gpu/drm/ast/ast_mode.c | 5 +---- drivers/gpu/drm/drm_probe_helper.c | 11 ++++++----- include/drm/drm_probe_helper.h | 2 +- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 4cac5c7f4547..c6df32278bcf 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -1035,10 +1035,7 @@ int ast_mode_config_init(struct ast_device *ast) return ret; drm_mode_config_reset(dev); - - ret = drmm_kms_helper_poll_init(dev); - if (ret) - return ret; + drmm_kms_helper_poll_init(dev); return 0; } diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index 7ba16323e7c2..6b3541159c0f 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -958,15 +958,16 @@ static void drm_kms_helper_poll_init_release(struct drm_device *dev, void *res) * cleaned up when the DRM device goes away. * * See drm_kms_helper_poll_init() for more information. - * - * Returns: - * 0 on success, or a negative errno code otherwise. */ -int drmm_kms_helper_poll_init(struct drm_device *dev) +void drmm_kms_helper_poll_init(struct drm_device *dev) { + int ret; + drm_kms_helper_poll_init(dev); - return drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev); + ret = drmm_add_action_or_reset(dev, drm_kms_helper_poll_init_release, dev); + if (ret) + drm_warn(dev, "Connector status will not be updated, error %d\n", ret); } EXPORT_SYMBOL(drmm_kms_helper_poll_init); diff --git a/include/drm/drm_probe_helper.h b/include/drm/drm_probe_helper.h index d6ce7b218b77..840ae5f798c2 100644 --- a/include/drm/drm_probe_helper.h +++ b/include/drm/drm_probe_helper.h @@ -17,7 +17,7 @@ int drm_helper_probe_detect(struct drm_connector *connector, struct drm_modeset_acquire_ctx *ctx, bool force); -int drmm_kms_helper_poll_init(struct drm_device *dev); +void drmm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_init(struct drm_device *dev); void drm_kms_helper_poll_fini(struct drm_device *dev); bool drm_helper_hpd_irq_event(struct drm_device *dev); From 8e623137f112eb86ad949e3bcb6c0e5ae11a092a Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 24 Mar 2025 09:26:28 +0000 Subject: [PATCH 0197/1627] drm: Move some options to separate new Kconfig MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move some options out into a new debug specific kconfig file in order to make things a bit cleaner. Signed-off-by: Tvrtko Ursulin Cc: Christian König Cc: Danilo Krummrich Cc: Matthew Brost Cc: Philipp Stanner Acked-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250324092633.49746-2-tvrtko.ursulin@igalia.com --- drivers/gpu/drm/Kconfig | 110 ++-------------------------------- drivers/gpu/drm/Kconfig.debug | 104 ++++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+), 105 deletions(-) create mode 100644 drivers/gpu/drm/Kconfig.debug diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 9b4061231329..1c6fa662d6ed 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -26,6 +26,11 @@ menuconfig DRM details. You should also select and configure AGP (/dev/agpgart) support if it is available for your platform. +menu "DRM debugging options" +depends on DRM +source "drivers/gpu/drm/Kconfig.debug" +endmenu + if DRM config DRM_MIPI_DBI @@ -37,66 +42,6 @@ config DRM_MIPI_DSI bool depends on DRM -config DRM_DEBUG_MM - bool "Insert extra checks and debug info into the DRM range managers" - default n - depends on DRM - depends on STACKTRACE_SUPPORT - select STACKDEPOT - help - Enable allocation tracking of memory manager and leak detection on - shutdown. - - Recommended for driver developers only. - - If in doubt, say "N". - -config DRM_USE_DYNAMIC_DEBUG - bool "use dynamic debug to implement drm.debug" - default n - depends on BROKEN - depends on DRM - depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE - depends on JUMP_LABEL - help - Use dynamic-debug to avoid drm_debug_enabled() runtime overheads. - Due to callsite counts in DRM drivers (~4k in amdgpu) and 56 - bytes per callsite, the .data costs can be substantial, and - are therefore configurable. - -config DRM_KUNIT_TEST_HELPERS - tristate - depends on DRM && KUNIT - select DRM_KMS_HELPER - help - KUnit Helpers for KMS drivers. - -config DRM_KUNIT_TEST - tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS - depends on DRM && KUNIT && MMU - select DRM_BRIDGE_CONNECTOR - select DRM_BUDDY - select DRM_DISPLAY_DP_HELPER - select DRM_DISPLAY_HDMI_STATE_HELPER - select DRM_DISPLAY_HELPER - select DRM_EXEC - select DRM_EXPORT_FOR_TESTS if m - select DRM_GEM_SHMEM_HELPER - select DRM_KUNIT_TEST_HELPERS - select DRM_LIB_RANDOM - select PRIME_NUMBERS - default KUNIT_ALL_TESTS - help - This builds unit tests for DRM. This option is not useful for - distributions or general kernels, but only for kernel - developers working on DRM and associated drivers. - - For more information on KUnit and unit tests in general, - please refer to the KUnit documentation in - Documentation/dev-tools/kunit/. - - If in doubt, say "N". - config DRM_KMS_HELPER tristate depends on DRM @@ -248,23 +193,6 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. -config DRM_TTM_KUNIT_TEST - tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS - default n - depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST) - select DRM_TTM - select DRM_BUDDY - select DRM_EXPORT_FOR_TESTS if m - select DRM_KUNIT_TEST_HELPERS - default KUNIT_ALL_TESTS - help - Enables unit tests for TTM, a GPU memory manager subsystem used - to manage memory buffers. This option is mostly useful for kernel - developers. It depends on (UML || COMPILE_TEST) since no other driver - which uses TTM can be loaded while running the tests. - - If in doubt, say "N". - config DRM_EXEC tristate depends on DRM @@ -466,9 +394,6 @@ config DRM_HYPERV If M is selected the module will be called hyperv_drm. -config DRM_EXPORT_FOR_TESTS - bool - # Separate option as not all DRM drivers use it config DRM_PANEL_BACKLIGHT_QUIRKS tristate @@ -481,31 +406,6 @@ config DRM_PRIVACY_SCREEN bool default n -config DRM_WERROR - bool "Compile the drm subsystem with warnings as errors" - depends on DRM && EXPERT - depends on !WERROR - default n - help - A kernel build should not cause any compiler warnings, and this - enables the '-Werror' flag to enforce that rule in the drm subsystem. - - The drm subsystem enables more warnings than the kernel default, so - this config option is disabled by default. - - If in doubt, say N. - -config DRM_HEADER_TEST - bool "Ensure DRM headers are self-contained and pass kernel-doc" - depends on DRM && EXPERT - default n - help - Ensure the DRM subsystem headers both under drivers/gpu/drm and - include/drm compile, are self-contained, have header guards, and have - no kernel-doc warnings. - - If in doubt, say N. - endif # Separate option because drm_panel_orientation_quirks.c is shared with fbdev diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug new file mode 100644 index 000000000000..ddc080f37850 --- /dev/null +++ b/drivers/gpu/drm/Kconfig.debug @@ -0,0 +1,104 @@ +config DRM_USE_DYNAMIC_DEBUG + bool "use dynamic debug to implement drm.debug" + default n + depends on BROKEN + depends on DRM + depends on DYNAMIC_DEBUG || DYNAMIC_DEBUG_CORE + depends on JUMP_LABEL + help + Use dynamic-debug to avoid drm_debug_enabled() runtime overheads. + Due to callsite counts in DRM drivers (~4k in amdgpu) and 56 + bytes per callsite, the .data costs can be substantial, and + are therefore configurable. + +config DRM_WERROR + bool "Compile the drm subsystem with warnings as errors" + depends on DRM && EXPERT + depends on !WERROR + default n + help + A kernel build should not cause any compiler warnings, and this + enables the '-Werror' flag to enforce that rule in the drm subsystem. + + The drm subsystem enables more warnings than the kernel default, so + this config option is disabled by default. + + If in doubt, say N. + +config DRM_HEADER_TEST + bool "Ensure DRM headers are self-contained and pass kernel-doc" + depends on DRM && EXPERT + default n + help + Ensure the DRM subsystem headers both under drivers/gpu/drm and + include/drm compile, are self-contained, have header guards, and have + no kernel-doc warnings. + + If in doubt, say N. + +config DRM_DEBUG_MM + bool "Insert extra checks and debug info into the DRM range managers" + default n + depends on DRM + depends on STACKTRACE_SUPPORT + select STACKDEPOT + help + Enable allocation tracking of memory manager and leak detection on + shutdown. + + Recommended for driver developers only. + + If in doubt, say "N". + +config DRM_KUNIT_TEST_HELPERS + tristate + depends on DRM && KUNIT + select DRM_KMS_HELPER + help + KUnit Helpers for KMS drivers. + +config DRM_KUNIT_TEST + tristate "KUnit tests for DRM" if !KUNIT_ALL_TESTS + depends on DRM && KUNIT && MMU + select DRM_BRIDGE_CONNECTOR + select DRM_BUDDY + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HDMI_STATE_HELPER + select DRM_DISPLAY_HELPER + select DRM_EXEC + select DRM_EXPORT_FOR_TESTS if m + select DRM_GEM_SHMEM_HELPER + select DRM_KUNIT_TEST_HELPERS + select DRM_LIB_RANDOM + select PRIME_NUMBERS + default KUNIT_ALL_TESTS + help + This builds unit tests for DRM. This option is not useful for + distributions or general kernels, but only for kernel + developers working on DRM and associated drivers. + + For more information on KUnit and unit tests in general, + please refer to the KUnit documentation in + Documentation/dev-tools/kunit/. + + If in doubt, say "N". + +config DRM_TTM_KUNIT_TEST + tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS + default n + depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST) + select DRM_TTM + select DRM_BUDDY + select DRM_EXPORT_FOR_TESTS if m + select DRM_KUNIT_TEST_HELPERS + default KUNIT_ALL_TESTS + help + Enables unit tests for TTM, a GPU memory manager subsystem used + to manage memory buffers. This option is mostly useful for kernel + developers. It depends on (UML || COMPILE_TEST) since no other driver + which uses TTM can be loaded while running the tests. + + If in doubt, say "N". + +config DRM_EXPORT_FOR_TESTS + bool From 5a99350794fec11faaed8a4b36a6931e696f672f Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 24 Mar 2025 09:26:29 +0000 Subject: [PATCH 0198/1627] drm/sched: Add scheduler unit testing infrastructure and some basic tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement a mock scheduler backend and add some basic test to exercise the core scheduler code paths. Mock backend (kind of like a very simple mock GPU) can either process jobs by tests manually advancing the "timeline" job at a time, or alternatively jobs can be configured with a time duration in which case they get completed asynchronously from the unit test code. Core scheduler classes are subclassed to support this mock implementation. The tests added are just a few simple submission patterns. Signed-off-by: Tvrtko Ursulin Suggested-by: Philipp Stanner Cc: Christian König Cc: Danilo Krummrich Cc: Matthew Brost Cc: Philipp Stanner Acked-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250324092633.49746-3-tvrtko.ursulin@igalia.com --- drivers/gpu/drm/Kconfig.debug | 12 + drivers/gpu/drm/scheduler/.kunitconfig | 12 + drivers/gpu/drm/scheduler/Makefile | 2 + drivers/gpu/drm/scheduler/tests/Makefile | 7 + .../gpu/drm/scheduler/tests/mock_scheduler.c | 354 ++++++++++++++++++ drivers/gpu/drm/scheduler/tests/sched_tests.h | 224 +++++++++++ drivers/gpu/drm/scheduler/tests/tests_basic.c | 198 ++++++++++ 7 files changed, 809 insertions(+) create mode 100644 drivers/gpu/drm/scheduler/.kunitconfig create mode 100644 drivers/gpu/drm/scheduler/tests/Makefile create mode 100644 drivers/gpu/drm/scheduler/tests/mock_scheduler.c create mode 100644 drivers/gpu/drm/scheduler/tests/sched_tests.h create mode 100644 drivers/gpu/drm/scheduler/tests/tests_basic.c diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug index ddc080f37850..c493743e8aca 100644 --- a/drivers/gpu/drm/Kconfig.debug +++ b/drivers/gpu/drm/Kconfig.debug @@ -100,5 +100,17 @@ config DRM_TTM_KUNIT_TEST If in doubt, say "N". +config DRM_SCHED_KUNIT_TEST + tristate "KUnit tests for the DRM scheduler" if !KUNIT_ALL_TESTS + select DRM_SCHED + depends on DRM && KUNIT + default KUNIT_ALL_TESTS + help + Choose this option to build unit tests for the DRM scheduler. + + Recommended for driver developers only. + + If in doubt, say "N". + config DRM_EXPORT_FOR_TESTS bool diff --git a/drivers/gpu/drm/scheduler/.kunitconfig b/drivers/gpu/drm/scheduler/.kunitconfig new file mode 100644 index 000000000000..cece53609fcf --- /dev/null +++ b/drivers/gpu/drm/scheduler/.kunitconfig @@ -0,0 +1,12 @@ +CONFIG_KUNIT=y +CONFIG_DRM=y +CONFIG_DRM_SCHED_KUNIT_TEST=y +CONFIG_EXPERT=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_ATOMIC_SLEEP=y +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +CONFIG_LOCKDEP=y +CONFIG_DEBUG_LOCKDEP=y +CONFIG_DEBUG_LIST=y diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile index 53863621829f..6e13e4c63e9d 100644 --- a/drivers/gpu/drm/scheduler/Makefile +++ b/drivers/gpu/drm/scheduler/Makefile @@ -23,3 +23,5 @@ gpu-sched-y := sched_main.o sched_fence.o sched_entity.o obj-$(CONFIG_DRM_SCHED) += gpu-sched.o + +obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += tests/ diff --git a/drivers/gpu/drm/scheduler/tests/Makefile b/drivers/gpu/drm/scheduler/tests/Makefile new file mode 100644 index 000000000000..5bf707bad373 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 + +drm-sched-tests-y := \ + mock_scheduler.o \ + tests_basic.o + +obj-$(CONFIG_DRM_SCHED_KUNIT_TEST) += drm-sched-tests.o diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c new file mode 100644 index 000000000000..d039f873cc11 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Valve Corporation */ + +#include "sched_tests.h" + +/* + * Here we implement the mock "GPU" (or the scheduler backend) which is used by + * the DRM scheduler unit tests in order to exercise the core functionality. + * + * Test cases are implemented in a separate file. + */ + +/** + * drm_mock_sched_entity_new - Create a new mock scheduler entity + * + * @test: KUnit test owning the entity + * @priority: Scheduling priority + * @sched: Mock scheduler on which the entity can be scheduled + * + * Returns: New mock scheduler entity with allocation managed by the test + */ +struct drm_mock_sched_entity * +drm_mock_sched_entity_new(struct kunit *test, + enum drm_sched_priority priority, + struct drm_mock_scheduler *sched) +{ + struct drm_mock_sched_entity *entity; + struct drm_gpu_scheduler *drm_sched; + int ret; + + entity = kunit_kzalloc(test, sizeof(*entity), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, entity); + + drm_sched = &sched->base; + ret = drm_sched_entity_init(&entity->base, + priority, + &drm_sched, 1, + NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + + entity->test = test; + + return entity; +} + +/** + * drm_mock_sched_entity_free - Destroys a mock scheduler entity + * + * @entity: Entity to destroy + * + * To be used from the test cases once done with the entity. + */ +void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity) +{ + drm_sched_entity_destroy(&entity->base); +} + +static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(job->base.sched); + + lockdep_assert_held(&sched->lock); + + job->flags |= DRM_MOCK_SCHED_JOB_DONE; + list_move_tail(&job->link, &sched->done_list); + dma_fence_signal(&job->hw_fence); + complete(&job->done); +} + +static enum hrtimer_restart +drm_mock_sched_job_signal_timer(struct hrtimer *hrtimer) +{ + struct drm_mock_sched_job *job = + container_of(hrtimer, typeof(*job), timer); + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(job->base.sched); + struct drm_mock_sched_job *next; + ktime_t now = ktime_get(); + unsigned long flags; + LIST_HEAD(signal); + + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &sched->job_list, link) { + if (!job->duration_us) + break; + + if (ktime_before(now, job->finish_at)) + break; + + sched->hw_timeline.cur_seqno = job->hw_fence.seqno; + drm_mock_sched_job_complete(job); + } + spin_unlock_irqrestore(&sched->lock, flags); + + return HRTIMER_NORESTART; +} + +/** + * drm_mock_sched_job_new - Create a new mock scheduler job + * + * @test: KUnit test owning the job + * @entity: Scheduler entity of the job + * + * Returns: New mock scheduler job with allocation managed by the test + */ +struct drm_mock_sched_job * +drm_mock_sched_job_new(struct kunit *test, + struct drm_mock_sched_entity *entity) +{ + struct drm_mock_sched_job *job; + int ret; + + job = kunit_kzalloc(test, sizeof(*job), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, job); + + ret = drm_sched_job_init(&job->base, + &entity->base, + 1, + NULL); + KUNIT_ASSERT_EQ(test, ret, 0); + + job->test = test; + + init_completion(&job->done); + spin_lock_init(&job->lock); + INIT_LIST_HEAD(&job->link); + hrtimer_init(&job->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + job->timer.function = drm_mock_sched_job_signal_timer; + + return job; +} + +static const char *drm_mock_sched_hw_fence_driver_name(struct dma_fence *fence) +{ + return "drm_mock_sched"; +} + +static const char * +drm_mock_sched_hw_fence_timeline_name(struct dma_fence *fence) +{ + struct drm_mock_sched_job *job = + container_of(fence, typeof(*job), hw_fence); + + return (const char *)job->base.sched->name; +} + +static void drm_mock_sched_hw_fence_release(struct dma_fence *fence) +{ + struct drm_mock_sched_job *job = + container_of(fence, typeof(*job), hw_fence); + + hrtimer_cancel(&job->timer); + + /* Containing job is freed by the kunit framework */ +} + +static const struct dma_fence_ops drm_mock_sched_hw_fence_ops = { + .get_driver_name = drm_mock_sched_hw_fence_driver_name, + .get_timeline_name = drm_mock_sched_hw_fence_timeline_name, + .release = drm_mock_sched_hw_fence_release, +}; + +static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + + dma_fence_init(&job->hw_fence, + &drm_mock_sched_hw_fence_ops, + &job->lock, + sched->hw_timeline.context, + atomic_inc_return(&sched->hw_timeline.next_seqno)); + + dma_fence_get(&job->hw_fence); /* Reference for the job_list */ + + spin_lock_irq(&sched->lock); + if (job->duration_us) { + ktime_t prev_finish_at = 0; + + if (!list_empty(&sched->job_list)) { + struct drm_mock_sched_job *prev = + list_last_entry(&sched->job_list, typeof(*prev), + link); + + prev_finish_at = prev->finish_at; + } + + if (!prev_finish_at) + prev_finish_at = ktime_get(); + + job->finish_at = ktime_add_us(prev_finish_at, job->duration_us); + } + list_add_tail(&job->link, &sched->job_list); + if (job->finish_at) + hrtimer_start(&job->timer, job->finish_at, HRTIMER_MODE_ABS); + spin_unlock_irq(&sched->lock); + + return &job->hw_fence; +} + +static enum drm_gpu_sched_stat +mock_sched_timedout_job(struct drm_sched_job *sched_job) +{ + return DRM_GPU_SCHED_STAT_ENODEV; +} + +static void mock_sched_free_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = + drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + unsigned long flags; + + /* Remove from the scheduler done list. */ + spin_lock_irqsave(&sched->lock, flags); + list_del(&job->link); + spin_unlock_irqrestore(&sched->lock, flags); + dma_fence_put(&job->hw_fence); + + drm_sched_job_cleanup(sched_job); + + /* Mock job itself is freed by the kunit framework. */ +} + +static const struct drm_sched_backend_ops drm_mock_scheduler_ops = { + .run_job = mock_sched_run_job, + .timedout_job = mock_sched_timedout_job, + .free_job = mock_sched_free_job +}; + +/** + * drm_mock_sched_new - Create a new mock scheduler + * + * @test: KUnit test owning the job + * + * Returns: New mock scheduler with allocation managed by the test + */ +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test) +{ + struct drm_sched_init_args args = { + .ops = &drm_mock_scheduler_ops, + .num_rqs = DRM_SCHED_PRIORITY_COUNT, + .credit_limit = U32_MAX, + .hang_limit = 1, + .timeout = MAX_SCHEDULE_TIMEOUT, + .name = "drm-mock-scheduler", + }; + struct drm_mock_scheduler *sched; + int ret; + + sched = kunit_kzalloc(test, sizeof(*sched), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, sched); + + ret = drm_sched_init(&sched->base, &args); + KUNIT_ASSERT_EQ(test, ret, 0); + + sched->test = test; + sched->hw_timeline.context = dma_fence_context_alloc(1); + atomic_set(&sched->hw_timeline.next_seqno, 0); + INIT_LIST_HEAD(&sched->job_list); + INIT_LIST_HEAD(&sched->done_list); + spin_lock_init(&sched->lock); + + return sched; +} + +/** + * drm_mock_sched_fini - Destroys a mock scheduler + * + * @sched: Scheduler to destroy + * + * To be used from the test cases once done with the scheduler. + */ +void drm_mock_sched_fini(struct drm_mock_scheduler *sched) +{ + struct drm_mock_sched_job *job, *next; + unsigned long flags; + LIST_HEAD(list); + + drm_sched_wqueue_stop(&sched->base); + + /* Force complete all unfinished jobs. */ + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &sched->job_list, link) + list_move_tail(&job->link, &list); + spin_unlock_irqrestore(&sched->lock, flags); + + list_for_each_entry(job, &list, link) + hrtimer_cancel(&job->timer); + + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &list, link) + drm_mock_sched_job_complete(job); + spin_unlock_irqrestore(&sched->lock, flags); + + /* + * Free completed jobs and jobs not yet processed by the DRM scheduler + * free worker. + */ + spin_lock_irqsave(&sched->lock, flags); + list_for_each_entry_safe(job, next, &sched->done_list, link) + list_move_tail(&job->link, &list); + spin_unlock_irqrestore(&sched->lock, flags); + + list_for_each_entry_safe(job, next, &list, link) + mock_sched_free_job(&job->base); + + drm_sched_fini(&sched->base); +} + +/** + * drm_mock_sched_advance - Advances the mock scheduler timeline + * + * @sched: Scheduler timeline to advance + * @num: By how many jobs to advance + * + * Advancing the scheduler timeline by a number of seqnos will trigger + * signalling of the hardware fences and unlinking the jobs from the internal + * scheduler tracking. + * + * This can be used from test cases which want complete control of the simulated + * job execution timing. For example submitting one job with no set duration + * would never complete it before test cases advances the timeline by one. + */ +unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched, + unsigned int num) +{ + struct drm_mock_sched_job *job, *next; + unsigned int found = 0; + unsigned long flags; + LIST_HEAD(signal); + + spin_lock_irqsave(&sched->lock, flags); + if (WARN_ON_ONCE(sched->hw_timeline.cur_seqno + num < + sched->hw_timeline.cur_seqno)) + goto unlock; + sched->hw_timeline.cur_seqno += num; + list_for_each_entry_safe(job, next, &sched->job_list, link) { + if (sched->hw_timeline.cur_seqno < job->hw_fence.seqno) + break; + + drm_mock_sched_job_complete(job); + found++; + } +unlock: + spin_unlock_irqrestore(&sched->lock, flags); + + return found; +} + +MODULE_DESCRIPTION("DRM mock scheduler and tests"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h new file mode 100644 index 000000000000..31aaba3443fa --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2025 Valve Corporation */ + +#ifndef _SCHED_TESTS_H_ +#define _SCHED_TESTS_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * DOC: Mock DRM scheduler data structures + * + * drm_mock_* data structures are used to implement a mock "GPU". + * + * They subclass the core DRM scheduler objects and add their data on top, which + * enables tracking the submitted jobs and simulating their execution with the + * attributes as specified by the test case. + */ + +/** + * struct drm_mock_scheduler - implements a trivial mock GPU execution engine + * + * @base: DRM scheduler base class + * @test: Backpointer to owning the kunit test case + * @lock: Lock to protect the simulated @hw_timeline, @job_list and @done_list + * @job_list: List of jobs submitted to the mock GPU + * @done_list: List of jobs completed by the mock GPU + * @hw_timeline: Simulated hardware timeline has a @context, @next_seqno and + * @cur_seqno for implementing a struct dma_fence signaling the + * simulated job completion. + * + * Trivial mock GPU execution engine tracks submitted jobs and enables + * completing them strictly in submission order. + */ +struct drm_mock_scheduler { + struct drm_gpu_scheduler base; + + struct kunit *test; + + spinlock_t lock; + struct list_head job_list; + struct list_head done_list; + + struct { + u64 context; + atomic_t next_seqno; + unsigned int cur_seqno; + } hw_timeline; +}; + +/** + * struct drm_mock_sched_entity - implements a mock GPU sched entity + * + * @base: DRM scheduler entity base class + * @test: Backpointer to owning the kunit test case + * + * Mock GPU sched entity is used by the test cases to submit jobs to the mock + * scheduler. + */ +struct drm_mock_sched_entity { + struct drm_sched_entity base; + + struct kunit *test; +}; + +/** + * struct drm_mock_sched_job - implements a mock GPU job + * + * @base: DRM sched job base class + * @done: Completion signaling job completion. + * @flags: Flags designating job state. + * @link: List head element used by job tracking by the drm_mock_scheduler + * @timer: Timer used for simulating job execution duration + * @duration_us: Simulated job duration in micro seconds, or zero if in manual + * timeline advance mode + * @finish_at: Absolute time when the jobs with set duration will complete + * @lock: Lock used for @hw_fence + * @hw_fence: Fence returned to DRM scheduler as the hardware fence + * @test: Backpointer to owning the kunit test case + * + * Mock GPU sched job is used by the test cases to submit jobs to the mock + * scheduler. + */ +struct drm_mock_sched_job { + struct drm_sched_job base; + + struct completion done; + +#define DRM_MOCK_SCHED_JOB_DONE 0x1 + unsigned long flags; + + struct list_head link; + struct hrtimer timer; + + unsigned int duration_us; + ktime_t finish_at; + + spinlock_t lock; + struct dma_fence hw_fence; + + struct kunit *test; +}; + +static inline struct drm_mock_scheduler * +drm_sched_to_mock_sched(struct drm_gpu_scheduler *sched) +{ + return container_of(sched, struct drm_mock_scheduler, base); +}; + +static inline struct drm_mock_sched_entity * +drm_sched_entity_to_mock_entity(struct drm_sched_entity *sched_entity) +{ + return container_of(sched_entity, struct drm_mock_sched_entity, base); +}; + +static inline struct drm_mock_sched_job * +drm_sched_job_to_mock_job(struct drm_sched_job *sched_job) +{ + return container_of(sched_job, struct drm_mock_sched_job, base); +}; + +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test); +void drm_mock_sched_fini(struct drm_mock_scheduler *sched); +unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched, + unsigned int num); + +struct drm_mock_sched_entity * +drm_mock_sched_entity_new(struct kunit *test, + enum drm_sched_priority priority, + struct drm_mock_scheduler *sched); +void drm_mock_sched_entity_free(struct drm_mock_sched_entity *entity); + +struct drm_mock_sched_job * +drm_mock_sched_job_new(struct kunit *test, + struct drm_mock_sched_entity *entity); + +/** + * drm_mock_sched_job_submit - Arm and submit a job in one go + * + * @job: Job to arm and submit + */ +static inline void drm_mock_sched_job_submit(struct drm_mock_sched_job *job) +{ + drm_sched_job_arm(&job->base); + drm_sched_entity_push_job(&job->base); +} + +/** + * drm_mock_sched_job_set_duration_us - Set a job duration + * + * @job: Job to set the duration for + * @duration_us: Duration in micro seconds + * + * Jobs with duration set will be automatically completed by the mock scheduler + * as the timeline progresses, unless a job without a set duration is + * encountered in the timelime in which case calling drm_mock_sched_advance() + * will be required to bump the timeline. + */ +static inline void +drm_mock_sched_job_set_duration_us(struct drm_mock_sched_job *job, + unsigned int duration_us) +{ + job->duration_us = duration_us; +} + +/** + * drm_mock_sched_job_is_finished - Check if a job is finished + * + * @job: Job to check + * + * Returns: true if finished + */ +static inline bool +drm_mock_sched_job_is_finished(struct drm_mock_sched_job *job) +{ + return job->flags & DRM_MOCK_SCHED_JOB_DONE; +} + +/** + * drm_mock_sched_job_wait_finished - Wait until a job is finished + * + * @job: Job to wait for + * @timeout: Wait time in jiffies + * + * Returns: true if finished within the timeout provided, otherwise false + */ +static inline bool +drm_mock_sched_job_wait_finished(struct drm_mock_sched_job *job, long timeout) +{ + if (job->flags & DRM_MOCK_SCHED_JOB_DONE) + return true; + + return wait_for_completion_timeout(&job->done, timeout) != 0; +} + +/** + * drm_mock_sched_job_wait_scheduled - Wait until a job is scheduled + * + * @job: Job to wait for + * @timeout: Wait time in jiffies + * + * Returns: true if scheduled within the timeout provided, otherwise false + */ +static inline bool +drm_mock_sched_job_wait_scheduled(struct drm_mock_sched_job *job, long timeout) +{ + KUNIT_ASSERT_EQ(job->test, job->flags & DRM_MOCK_SCHED_JOB_DONE, 0); + + return dma_fence_wait_timeout(&job->base.s_fence->scheduled, + false, + timeout) != 0; +} + +#endif diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c new file mode 100644 index 000000000000..c06672e13cf6 --- /dev/null +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2025 Valve Corporation */ + +#include "sched_tests.h" + +/* + * DRM scheduler basic tests should check the basic functional correctness of + * the scheduler, including some very light smoke testing. More targeted tests, + * for example focusing on testing specific bugs and other more complicated test + * scenarios, should be implemented in separate source units. + */ + +static int drm_sched_basic_init(struct kunit *test) +{ + test->priv = drm_mock_sched_new(test); + + return 0; +} + +static void drm_sched_basic_exit(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + + drm_mock_sched_fini(sched); +} + +static void drm_sched_basic_submit(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + unsigned int i; + bool done; + + /* + * Submit one job to the scheduler and verify that it gets scheduled + * and completed only when the mock hw backend processes it. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, HZ / 2); + KUNIT_ASSERT_FALSE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); +} + +struct drm_sched_basic_params { + const char *description; + unsigned int queue_depth; + unsigned int num_entities; + unsigned int job_us; + bool dep_chain; +}; + +static const struct drm_sched_basic_params drm_sched_basic_cases[] = { + { + .description = "A queue of jobs in a single entity", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 1, + }, + { + .description = "A chain of dependent jobs across multiple entities", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 1, + .dep_chain = true, + }, + { + .description = "Multiple independent job queues", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 4, + }, + { + .description = "Multiple inter-dependent job queues", + .queue_depth = 100, + .job_us = 1000, + .num_entities = 4, + .dep_chain = true, + }, +}; + +static void +drm_sched_basic_desc(const struct drm_sched_basic_params *params, char *desc) +{ + strscpy(desc, params->description, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(drm_sched_basic, drm_sched_basic_cases, drm_sched_basic_desc); + +static void drm_sched_basic_test(struct kunit *test) +{ + const struct drm_sched_basic_params *params = test->param_value; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job, *prev = NULL; + struct drm_mock_sched_entity **entity; + unsigned int i, cur_ent = 0; + bool done; + + entity = kunit_kcalloc(test, params->num_entities, sizeof(*entity), + GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, entity); + + for (i = 0; i < params->num_entities; i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + for (i = 0; i < params->queue_depth; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= params->num_entities; + drm_mock_sched_job_set_duration_us(job, params->job_us); + if (params->dep_chain && prev) + drm_sched_job_add_dependency(&job->base, + dma_fence_get(&prev->base.s_fence->finished)); + drm_mock_sched_job_submit(job); + prev = job; + } + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + for (i = 0; i < params->num_entities; i++) + drm_mock_sched_entity_free(entity[i]); +} + +static void drm_sched_basic_entity_cleanup(struct kunit *test) +{ + struct drm_mock_sched_job *job, *mid, *prev = NULL; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity[4]; + const unsigned int qd = 100; + unsigned int i, cur_ent = 0; + bool done; + + /* + * Submit a queue of jobs across different entities with an explicit + * chain of dependencies between them and trigger entity cleanup while + * the queue is still being processed. + */ + + for (i = 0; i < ARRAY_SIZE(entity); i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + if (prev) + drm_sched_job_add_dependency(&job->base, + dma_fence_get(&prev->base.s_fence->finished)); + drm_mock_sched_job_submit(job); + if (i == qd / 2) + mid = job; + prev = job; + } + + done = drm_mock_sched_job_wait_finished(mid, HZ); + KUNIT_ASSERT_TRUE(test, done); + + /* Exit with half of the queue still pending to be executed. */ + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static struct kunit_case drm_sched_basic_tests[] = { + KUNIT_CASE(drm_sched_basic_submit), + KUNIT_CASE_PARAM(drm_sched_basic_test, drm_sched_basic_gen_params), + KUNIT_CASE(drm_sched_basic_entity_cleanup), + {} +}; + +static struct kunit_suite drm_sched_basic = { + .name = "drm_sched_basic_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_basic_tests, +}; + +kunit_test_suite(drm_sched_basic); From 53e65974924ec3e66b2cdf71780f089b338fed33 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 24 Mar 2025 09:26:30 +0000 Subject: [PATCH 0199/1627] drm/sched: Add a simple timeout test MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a very simple timeout test which submits a single job and verifies that the timeout handling will run if the backend failed to complete the job in time. Signed-off-by: Tvrtko Ursulin Cc: Christian König Cc: Danilo Krummrich Cc: Matthew Brost Cc: Philipp Stanner Acked-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250324092633.49746-4-tvrtko.ursulin@igalia.com --- .../gpu/drm/scheduler/tests/mock_scheduler.c | 11 +++- drivers/gpu/drm/scheduler/tests/sched_tests.h | 4 +- drivers/gpu/drm/scheduler/tests/tests_basic.c | 64 ++++++++++++++++++- 3 files changed, 73 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c index d039f873cc11..61efc96e6e41 100644 --- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c +++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c @@ -203,7 +203,11 @@ static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job) static enum drm_gpu_sched_stat mock_sched_timedout_job(struct drm_sched_job *sched_job) { - return DRM_GPU_SCHED_STAT_ENODEV; + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + + job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT; + + return DRM_GPU_SCHED_STAT_NOMINAL; } static void mock_sched_free_job(struct drm_sched_job *sched_job) @@ -234,17 +238,18 @@ static const struct drm_sched_backend_ops drm_mock_scheduler_ops = { * drm_mock_sched_new - Create a new mock scheduler * * @test: KUnit test owning the job + * @timeout: Job timeout to set * * Returns: New mock scheduler with allocation managed by the test */ -struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test) +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout) { struct drm_sched_init_args args = { .ops = &drm_mock_scheduler_ops, .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = U32_MAX, .hang_limit = 1, - .timeout = MAX_SCHEDULE_TIMEOUT, + .timeout = timeout, .name = "drm-mock-scheduler", }; struct drm_mock_scheduler *sched; diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h index 31aaba3443fa..27caf8285fb7 100644 --- a/drivers/gpu/drm/scheduler/tests/sched_tests.h +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -97,6 +97,7 @@ struct drm_mock_sched_job { struct completion done; #define DRM_MOCK_SCHED_JOB_DONE 0x1 +#define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2 unsigned long flags; struct list_head link; @@ -129,7 +130,8 @@ drm_sched_job_to_mock_job(struct drm_sched_job *sched_job) return container_of(sched_job, struct drm_mock_sched_job, base); }; -struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test); +struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, + long timeout); void drm_mock_sched_fini(struct drm_mock_scheduler *sched); unsigned int drm_mock_sched_advance(struct drm_mock_scheduler *sched, unsigned int num); diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c index c06672e13cf6..0e1fa4767b0d 100644 --- a/drivers/gpu/drm/scheduler/tests/tests_basic.c +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -12,7 +12,7 @@ static int drm_sched_basic_init(struct kunit *test) { - test->priv = drm_mock_sched_new(test); + test->priv = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); return 0; } @@ -24,6 +24,13 @@ static void drm_sched_basic_exit(struct kunit *test) drm_mock_sched_fini(sched); } +static int drm_sched_timeout_init(struct kunit *test) +{ + test->priv = drm_mock_sched_new(test, HZ); + + return 0; +} + static void drm_sched_basic_submit(struct kunit *test) { struct drm_mock_scheduler *sched = test->priv; @@ -195,4 +202,57 @@ static struct kunit_suite drm_sched_basic = { .test_cases = drm_sched_basic_tests, }; -kunit_test_suite(drm_sched_basic); +static void drm_sched_basic_timeout(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + bool done; + + /* + * Submit a single job against a scheduler with the timeout configured + * and verify that the timeout handling will run if the backend fails + * to complete it in time. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, HZ / 2); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, + 0); + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, + DRM_MOCK_SCHED_JOB_TIMEDOUT); + + drm_mock_sched_entity_free(entity); +} + +static struct kunit_case drm_sched_timeout_tests[] = { + KUNIT_CASE(drm_sched_basic_timeout), + {} +}; + +static struct kunit_suite drm_sched_timeout = { + .name = "drm_sched_basic_timeout_tests", + .init = drm_sched_timeout_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_timeout_tests, +}; + +kunit_test_suites(&drm_sched_basic, + &drm_sched_timeout); From 7b765cda7ae96c13930285224d1cf8d7f4bb6027 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 24 Mar 2025 09:26:31 +0000 Subject: [PATCH 0200/1627] drm/sched: Add basic priority tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add some basic tests for exercising entity priority handling. Signed-off-by: Tvrtko Ursulin Cc: Christian König Cc: Danilo Krummrich Cc: Matthew Brost Cc: Philipp Stanner Acked-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250324092633.49746-5-tvrtko.ursulin@igalia.com --- drivers/gpu/drm/scheduler/tests/tests_basic.c | 95 ++++++++++++++++++- 1 file changed, 94 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c index 0e1fa4767b0d..10378b7ca457 100644 --- a/drivers/gpu/drm/scheduler/tests/tests_basic.c +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2025 Valve Corporation */ +#include + #include "sched_tests.h" /* @@ -254,5 +256,96 @@ static struct kunit_suite drm_sched_timeout = { .test_cases = drm_sched_timeout_tests, }; +static void drm_sched_priorities(struct kunit *test) +{ + struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT]; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job; + const unsigned int qd = 100; + unsigned int i, cur_ent = 0; + enum drm_sched_priority p; + bool done; + + /* + * Submit a bunch of jobs against entities configured with different + * priorities. + */ + + BUILD_BUG_ON(DRM_SCHED_PRIORITY_KERNEL > DRM_SCHED_PRIORITY_LOW); + BUILD_BUG_ON(ARRAY_SIZE(entity) != DRM_SCHED_PRIORITY_COUNT); + + for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++) + entity[p] = drm_mock_sched_entity_new(test, p, sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static void drm_sched_change_priority(struct kunit *test) +{ + struct drm_mock_sched_entity *entity[DRM_SCHED_PRIORITY_COUNT]; + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_job *job; + const unsigned int qd = 1000; + unsigned int i, cur_ent = 0; + enum drm_sched_priority p; + + /* + * Submit a bunch of jobs against entities configured with different + * priorities and while waiting for them to complete, periodically keep + * changing their priorities. + * + * We set up the queue-depth (qd) and job duration so the priority + * changing loop has some time to interact with submissions to the + * backend and job completions as they progress. + */ + + for (p = DRM_SCHED_PRIORITY_KERNEL; p <= DRM_SCHED_PRIORITY_LOW; p++) + entity[p] = drm_mock_sched_entity_new(test, p, sched); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + do { + drm_sched_entity_set_priority(&entity[cur_ent]->base, + (entity[cur_ent]->base.priority + 1) % + DRM_SCHED_PRIORITY_COUNT); + cur_ent++; + cur_ent %= ARRAY_SIZE(entity); + usleep_range(200, 500); + } while (!drm_mock_sched_job_is_finished(job)); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); +} + +static struct kunit_case drm_sched_priority_tests[] = { + KUNIT_CASE(drm_sched_priorities), + KUNIT_CASE(drm_sched_change_priority), + {} +}; + +static struct kunit_suite drm_sched_priority = { + .name = "drm_sched_basic_priority_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_priority_tests, +}; + kunit_test_suites(&drm_sched_basic, - &drm_sched_timeout); + &drm_sched_timeout, + &drm_sched_priority); From c85fc5db76e51693e8ce9b700f32d88e04e5081c Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 24 Mar 2025 09:26:32 +0000 Subject: [PATCH 0201/1627] drm/sched: Add a basic test for modifying entities scheduler list MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a basic test for exercising modifying the entities scheduler list at runtime. Signed-off-by: Tvrtko Ursulin Cc: Christian König Cc: Danilo Krummrich Cc: Matthew Brost Cc: Philipp Stanner Acked-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250324092633.49746-6-tvrtko.ursulin@igalia.com --- drivers/gpu/drm/scheduler/tests/tests_basic.c | 69 ++++++++++++++++++- 1 file changed, 68 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c index 10378b7ca457..996cac00bb52 100644 --- a/drivers/gpu/drm/scheduler/tests/tests_basic.c +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -346,6 +346,73 @@ static struct kunit_suite drm_sched_priority = { .test_cases = drm_sched_priority_tests, }; +static void drm_sched_test_modify_sched(struct kunit *test) +{ + unsigned int i, cur_ent = 0, cur_sched = 0; + struct drm_mock_sched_entity *entity[13]; + struct drm_mock_scheduler *sched[3]; + struct drm_mock_sched_job *job; + const unsigned int qd = 1000; + + /* + * Submit a bunch of jobs against entities configured with different + * schedulers and while waiting for them to complete, periodically keep + * changing schedulers associated with each entity. + * + * We set up the queue-depth (qd) and job duration so the sched modify + * loop has some time to interact with submissions to the backend and + * job completions as they progress. + * + * For the number of schedulers and entities we use primes in order to + * perturb the entity->sched assignments with less of a regular pattern. + */ + + for (i = 0; i < ARRAY_SIZE(sched); i++) + sched[i] = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + entity[i] = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched[i % ARRAY_SIZE(sched)]); + + for (i = 0; i < qd; i++) { + job = drm_mock_sched_job_new(test, entity[cur_ent++]); + cur_ent %= ARRAY_SIZE(entity); + drm_mock_sched_job_set_duration_us(job, 1000); + drm_mock_sched_job_submit(job); + } + + do { + struct drm_gpu_scheduler *modify; + + usleep_range(200, 500); + cur_ent++; + cur_ent %= ARRAY_SIZE(entity); + cur_sched++; + cur_sched %= ARRAY_SIZE(sched); + modify = &sched[cur_sched]->base; + drm_sched_entity_modify_sched(&entity[cur_ent]->base, &modify, + 1); + } while (!drm_mock_sched_job_is_finished(job)); + + for (i = 0; i < ARRAY_SIZE(entity); i++) + drm_mock_sched_entity_free(entity[i]); + + for (i = 0; i < ARRAY_SIZE(sched); i++) + drm_mock_sched_fini(sched[i]); +} + +static struct kunit_case drm_sched_modify_sched_tests[] = { + KUNIT_CASE(drm_sched_test_modify_sched), + {} +}; + +static struct kunit_suite drm_sched_modify_sched = { + .name = "drm_sched_basic_modify_sched_tests", + .test_cases = drm_sched_modify_sched_tests, +}; + kunit_test_suites(&drm_sched_basic, &drm_sched_timeout, - &drm_sched_priority); + &drm_sched_priority, + &drm_sched_modify_sched); From 909bda2206a698be88d0351910446932a9843d58 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Mon, 24 Mar 2025 09:26:33 +0000 Subject: [PATCH 0202/1627] drm/sched: Add a basic test for checking credit limit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a basic test for checking whether scheduler respects the configured credit limit. Signed-off-by: Tvrtko Ursulin Cc: Christian König Cc: Danilo Krummrich Cc: Matthew Brost Cc: Philipp Stanner Acked-by: Christian König Signed-off-by: Philipp Stanner Link: https://patchwork.freedesktop.org/patch/msgid/20250324092633.49746-7-tvrtko.ursulin@igalia.com --- drivers/gpu/drm/scheduler/tests/tests_basic.c | 60 ++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c index 996cac00bb52..7230057e0594 100644 --- a/drivers/gpu/drm/scheduler/tests/tests_basic.c +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -412,7 +412,65 @@ static struct kunit_suite drm_sched_modify_sched = { .test_cases = drm_sched_modify_sched_tests, }; +static void drm_sched_test_credits(struct kunit *test) +{ + struct drm_mock_sched_entity *entity; + struct drm_mock_scheduler *sched; + struct drm_mock_sched_job *job[2]; + bool done; + int i; + + /* + * Check that the configured credit limit is respected. + */ + + sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + sched->base.credit_limit = 1; + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + + job[0] = drm_mock_sched_job_new(test, entity); + job[1] = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job[0]); + drm_mock_sched_job_submit(job[1]); + + done = drm_mock_sched_job_wait_scheduled(job[0], HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_scheduled(job[1], HZ); + KUNIT_ASSERT_FALSE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_scheduled(job[1], HZ); + KUNIT_ASSERT_TRUE(test, done); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job[1], HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); + drm_mock_sched_fini(sched); +} + +static struct kunit_case drm_sched_credits_tests[] = { + KUNIT_CASE(drm_sched_test_credits), + {} +}; + +static struct kunit_suite drm_sched_credits = { + .name = "drm_sched_basic_credits_tests", + .test_cases = drm_sched_credits_tests, +}; + kunit_test_suites(&drm_sched_basic, &drm_sched_timeout, &drm_sched_priority, - &drm_sched_modify_sched); + &drm_sched_modify_sched, + &drm_sched_credits); From ccdb96cc7186c51045d707503d87cd220c5fed26 Mon Sep 17 00:00:00 2001 From: Vinod Govindapillai Date: Fri, 21 Mar 2025 11:45:28 +0200 Subject: [PATCH 0203/1627] drm/i915/fbc: keep FBC disabled if selective update is on in xe2lpd MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit FBC was disabled in case PSR2 selective update in display 12 to 14 as part of a wa. From xe2lpd onwards there is a logic to be implemented to decide between FBC and selective update. Until that logic is implemented keep FBC disabled in case selective update is enabled. v1: updated patch description and some explanation and todo Signed-off-by: Vinod Govindapillai Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20250321094529.197397-2-vinod.govindapillai@intel.com --- drivers/gpu/drm/i915/display/intel_fbc.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 4f9b4fc526ea..d75e3c7eaa44 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1465,13 +1465,15 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * Recommendation is to keep this combination disabled * Bspec: 50422 HSD: 14010260002 * - * In Xe3, PSR2 selective fetch and FBC dirty rect feature cannot - * coexist. So if PSR2 selective fetch is supported then mark that - * FBC is not supported. - * TODO: Need a logic to decide between PSR2 and FBC Dirty rect + * TODO: Implement a logic to select between PSR2 selective fetch and + * FBC based on Bspec: 68881 in xe2lpd onwards. + * + * As we still see some strange underruns in those platforms while + * disabling PSR2, keep FBC disabled in case of selective update is on + * until the selection logic is implemented. */ - if ((IS_DISPLAY_VER(display, 12, 14) || HAS_FBC_DIRTY_RECT(display)) && - crtc_state->has_sel_update && !crtc_state->has_panel_replay) { + if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update && + !crtc_state->has_panel_replay) { plane_state->no_fbc_reason = "PSR2 enabled"; return 0; } From 11938353bfbfaf1acad000bac45adc296748f2f1 Mon Sep 17 00:00:00 2001 From: Vinod Govindapillai Date: Fri, 21 Mar 2025 11:45:29 +0200 Subject: [PATCH 0204/1627] drm/i915/fbc: update the panel_replay dependency in fbc wa's MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There are two panel_replay scenarios fbc wa need to be aware of, panel replay with and without selective update capability. Panel replay without selective update don't have any fbc wa. So keep the fbc psr1 wa as it is. The current fbc psr2 wa is mainly about selective fetch and we need to apply the fbc wa if selective fetch is on - irrespective of panel replay. Hence we can't exclude panel replay from the fbc psr2 wa. v1: keep panel_replay exclusion in PSR1 case (Jouni) Patch description updated Bspec: 66624, 50442 Signed-off-by: Vinod Govindapillai Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20250321094529.197397-3-vinod.govindapillai@intel.com --- drivers/gpu/drm/i915/display/intel_fbc.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index d75e3c7eaa44..ea3123874cbf 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1472,9 +1472,8 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, * disabling PSR2, keep FBC disabled in case of selective update is on * until the selection logic is implemented. */ - if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update && - !crtc_state->has_panel_replay) { - plane_state->no_fbc_reason = "PSR2 enabled"; + if (DISPLAY_VER(display) >= 12 && crtc_state->has_sel_update) { + plane_state->no_fbc_reason = "Selective update enabled"; return 0; } From ac7759c74a602688c77519f056bd83ab657a73a3 Mon Sep 17 00:00:00 2001 From: Francois Dugast Date: Fri, 14 Mar 2025 11:50:50 +0100 Subject: [PATCH 0205/1627] drm/xe/hw_engine_class_sysfs: Allow to inject error during probe Allow fault injection in a function used during initialization by xe_hw_engine_class_sysfs_init() so that its error handling can be tested. Signed-off-by: Francois Dugast Reviewed-by: Tejas Upadhyay Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20250314105050.636983-1-francois.dugast@intel.com Signed-off-by: Rodrigo Vivi --- drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c index b53e8d2accdb..e238c0e9fdd0 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_class_sysfs.c @@ -558,6 +558,7 @@ err_object: kobject_put(kobj); return err; } +ALLOW_ERROR_INJECTION(xe_add_hw_engine_class_defaults, ERRNO); /* See xe_pci_probe() */ static void xe_hw_engine_sysfs_kobj_release(struct kobject *kobj) { From 1d1f7b15cb9c11974cebfd39da51dc69b8cb31ff Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Sat, 15 Mar 2025 21:15:11 +0100 Subject: [PATCH 0206/1627] drm/bridge: ti-sn65dsi86: make use of debugfs_init callback Do not create a custom directory in debugfs-root, but use the debugfs_init callback to create a custom directory at the given place for the bridge. The new directory layout looks like this on a Renesas GrayHawk-Single with a R-Car V4M SoC: /sys/kernel/debug/dri/feb00000.display/DP-1/1-002c Signed-off-by: Wolfram Sang Reviewed-by: Dmitry Baryshkov Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20250315201651.7339-2-wsa+renesas@sang-engineering.com --- drivers/gpu/drm/bridge/ti-sn65dsi86.c | 40 +++++++-------------------- 1 file changed, 10 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index fd68ad2e2718..c2bdc7e57ac7 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -440,36 +440,8 @@ static int status_show(struct seq_file *s, void *data) return 0; } - DEFINE_SHOW_ATTRIBUTE(status); -static void ti_sn65dsi86_debugfs_remove(void *data) -{ - debugfs_remove_recursive(data); -} - -static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata) -{ - struct device *dev = pdata->dev; - struct dentry *debugfs; - int ret; - - debugfs = debugfs_create_dir(dev_name(dev), NULL); - - /* - * We might get an error back if debugfs wasn't enabled in the kernel - * so let's just silently return upon failure. - */ - if (IS_ERR_OR_NULL(debugfs)) - return; - - ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs); - if (ret) - return; - - debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); -} - /* ----------------------------------------------------------------------------- * Auxiliary Devices (*not* AUX) */ @@ -1238,6 +1210,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge, return drm_edid_read_ddc(connector, &pdata->aux.ddc); } +static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + struct dentry *debugfs; + + debugfs = debugfs_create_dir(dev_name(pdata->dev), root); + debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); +} + static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .attach = ti_sn_bridge_attach, .detach = ti_sn_bridge_detach, @@ -1251,6 +1232,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .atomic_reset = drm_atomic_helper_bridge_reset, .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .debugfs_init = ti_sn65dsi86_debugfs_init, }; static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, @@ -1959,8 +1941,6 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) if (ret) return ret; - ti_sn65dsi86_debugfs_init(pdata); - /* * Break ourselves up into a collection of aux devices. The only real * motiviation here is to solve the chicken-and-egg problem of probe From d69362f55fba92eb4cac10fe8da618de52b49bfc Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 18 Mar 2025 16:52:56 +0100 Subject: [PATCH 0207/1627] drm/bridge: ti-sn65dsi86: Check bridge connection failure Read out and check the ID registers, so we can bail out if I2C communication does not work or if the device is unknown. Tested on a Renesas GrayHawk board (R-Car V4M) by using a wrong I2C address and by not enabling RuntimePM for the device. Signed-off-by: Wolfram Sang Reviewed-by: Douglas Anderson Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20250318155549.19625-2-wsa+renesas@sang-engineering.com --- drivers/gpu/drm/bridge/ti-sn65dsi86.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index c2bdc7e57ac7..f72675766e01 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -35,6 +35,7 @@ #include #include +#define SN_DEVICE_ID_REGS 0x00 /* up to 0x07 */ #define SN_DEVICE_REV_REG 0x08 #define SN_DPPLL_SRC_REG 0x0A #define DPPLL_CLK_SRC_DSICLK BIT(0) @@ -1898,6 +1899,7 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct ti_sn65dsi86 *pdata; + u8 id_buf[8]; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { @@ -1941,6 +1943,16 @@ static int ti_sn65dsi86_probe(struct i2c_client *client) if (ret) return ret; + pm_runtime_get_sync(dev); + ret = regmap_bulk_read(pdata->regmap, SN_DEVICE_ID_REGS, id_buf, ARRAY_SIZE(id_buf)); + pm_runtime_put_autosuspend(dev); + if (ret) + return dev_err_probe(dev, ret, "failed to read device id\n"); + + /* The ID string is stored backwards */ + if (strncmp(id_buf, "68ISD ", ARRAY_SIZE(id_buf))) + return dev_err_probe(dev, -EOPNOTSUPP, "unsupported device id\n"); + /* * Break ourselves up into a collection of aux devices. The only real * motiviation here is to solve the chicken-and-egg problem of probe From 837f9b917c47b4d35f0ee571a736de2895e2dd54 Mon Sep 17 00:00:00 2001 From: Tejas Vipin Date: Thu, 20 Mar 2025 00:01:06 +0530 Subject: [PATCH 0208/1627] drm/panel: samsung-s6d7aa0: transition to mipi_dsi wrapped functions Changes the samsung-s6d7aa0 panel to use multi style functions for improved error handling. Signed-off-by: Tejas Vipin Reviewed-by: Douglas Anderson [dianders: fixed whitespace errors] Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20250319183106.12613-1-tejasvipin76@gmail.com --- drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c | 238 ++++++------------ 1 file changed, 73 insertions(+), 165 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c index f23d8832a1ad..93f11e2e9398 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c @@ -34,8 +34,8 @@ struct s6d7aa0 { struct s6d7aa0_panel_desc { unsigned int panel_type; - int (*init_func)(struct s6d7aa0 *ctx); - int (*off_func)(struct s6d7aa0 *ctx); + void (*init_func)(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx); + void (*off_func)(struct mipi_dsi_multi_context *dsi_ctx); const struct drm_display_mode *drm_mode; unsigned long mode_flags; u32 bus_flags; @@ -62,93 +62,61 @@ static void s6d7aa0_reset(struct s6d7aa0 *ctx) msleep(50); } -static int s6d7aa0_lock(struct s6d7aa0 *ctx, bool lock) +static void s6d7aa0_lock(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx, bool lock) { - struct mipi_dsi_device *dsi = ctx->dsi; - if (lock) { - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0xa5, 0xa5); - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0xa5, 0xa5); if (ctx->desc->use_passwd3) - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0x5a, 0x5a); } else { - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD1, 0x5a, 0x5a); - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD2, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD1, 0x5a, 0x5a); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD2, 0x5a, 0x5a); if (ctx->desc->use_passwd3) - mipi_dsi_dcs_write_seq(dsi, MCS_PASSWD3, 0xa5, 0xa5); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_PASSWD3, 0xa5, 0xa5); } - - return 0; } static int s6d7aa0_on(struct s6d7aa0 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = ctx->desc->init_func(ctx); - if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); - gpiod_set_value_cansleep(ctx->reset_gpio, 1); - return ret; - } + ctx->desc->init_func(ctx, &dsi_ctx); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - return 0; + return dsi_ctx.accum_err; } -static int s6d7aa0_off(struct s6d7aa0 *ctx) +static void s6d7aa0_off(struct s6d7aa0 *ctx) { struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = ctx->desc->off_func(ctx); - if (ret < 0) { - dev_err(dev, "Panel-specific off function failed: %d\n", ret); - return ret; - } + ctx->desc->off_func(&dsi_ctx); - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display off: %d\n", ret); - return ret; - } - msleep(64); + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 64); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode: %d\n", ret); - return ret; - } - msleep(120); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); - return 0; + mipi_dsi_msleep(&dsi_ctx, 120); } static int s6d7aa0_prepare(struct drm_panel *panel) { struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel); - struct device *dev = &ctx->dsi->dev; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); - if (ret < 0) { - dev_err(dev, "Failed to enable regulators: %d\n", ret); + if (ret < 0) return ret; - } s6d7aa0_reset(ctx); ret = s6d7aa0_on(ctx); if (ret < 0) { - dev_err(dev, "Failed to initialize panel: %d\n", ret); gpiod_set_value_cansleep(ctx->reset_gpio, 1); return ret; } @@ -159,12 +127,8 @@ static int s6d7aa0_prepare(struct drm_panel *panel) static int s6d7aa0_disable(struct drm_panel *panel) { struct s6d7aa0 *ctx = panel_to_s6d7aa0(panel); - struct device *dev = &ctx->dsi->dev; - int ret; - ret = s6d7aa0_off(ctx); - if (ret < 0) - dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + s6d7aa0_off(ctx); return 0; } @@ -185,13 +149,11 @@ static int s6d7aa0_bl_update_status(struct backlight_device *bl) { struct mipi_dsi_device *dsi = bl_get_data(bl); u16 brightness = backlight_get_brightness(bl); - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_brightness(dsi, brightness); - if (ret < 0) - return ret; + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, brightness); - return 0; + return dsi_ctx.accum_err; } static int s6d7aa0_bl_get_brightness(struct backlight_device *bl) @@ -228,65 +190,39 @@ s6d7aa0_create_backlight(struct mipi_dsi_device *dsi) /* Initialization code and structures for LSL080AL02 panel */ -static int s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al02_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + mipi_dsi_usleep_range(dsi_ctx, 20000, 25000); - usleep_range(20000, 25000); + s6d7aa0_lock(ctx, dsi_ctx, false); - ret = s6d7aa0_lock(ctx, false); - if (ret < 0) { - dev_err(dev, "Failed to unlock registers: %d\n", ret); - return ret; - } - - mipi_dsi_dcs_write_seq(dsi, MCS_OTP_RELOAD, 0x00, 0x10); - usleep_range(1000, 1500); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_OTP_RELOAD, 0x00, 0x10); + mipi_dsi_usleep_range(dsi_ctx, 1000, 1500); /* SEQ_B6_PARAM_8_R01 */ - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x10); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb6, 0x10); /* BL_CTL_ON */ - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x28); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x28); - usleep_range(5000, 6000); + mipi_dsi_usleep_range(dsi_ctx, 5000, 6000); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x04); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x04); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } + mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx); - msleep(120); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x00); + mipi_dsi_msleep(dsi_ctx, 120); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00); - ret = s6d7aa0_lock(ctx, true); - if (ret < 0) { - dev_err(dev, "Failed to lock registers: %d\n", ret); - return ret; - } + s6d7aa0_lock(ctx, dsi_ctx, true); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - - return 0; + mipi_dsi_dcs_set_display_on_multi(dsi_ctx); } -static int s6d7aa0_lsl080al02_off(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al02_off(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - /* BL_CTL_OFF */ - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x20); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x20); } static const struct drm_display_mode s6d7aa0_lsl080al02_mode = { @@ -317,79 +253,51 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = { /* Initialization code and structures for LSL080AL03 panel */ -static int s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al03_init(struct s6d7aa0 *ctx, struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - struct device *dev = &dsi->dev; - int ret; + mipi_dsi_usleep_range(dsi_ctx, 20000, 25000); - usleep_range(20000, 25000); - - ret = s6d7aa0_lock(ctx, false); - if (ret < 0) { - dev_err(dev, "Failed to unlock registers: %d\n", ret); - return ret; - } + s6d7aa0_lock(ctx, dsi_ctx, false); if (ctx->desc->panel_type == S6D7AA0_PANEL_LSL080AL03) { - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0xc7, 0x00, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23, - 0x09); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21, - 0x80, 0x78); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0xc7, 0x00, 0x29); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0xa0); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23, + 0x09); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21, + 0x80, 0x78); } else if (ctx->desc->panel_type == S6D7AA0_PANEL_LTL101AT01) { - mipi_dsi_dcs_write_seq(dsi, MCS_BL_CTL, 0x40, 0x00, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xbc, 0x01, 0x4e, 0x0b); - mipi_dsi_dcs_write_seq(dsi, 0xfd, 0x16, 0x10, 0x11, 0x23, - 0x09); - mipi_dsi_dcs_write_seq(dsi, 0xfe, 0x00, 0x02, 0x03, 0x21, - 0x80, 0x68); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MCS_BL_CTL, 0x40, 0x00, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xbc, 0x01, 0x4e, 0x0b); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfd, 0x16, 0x10, 0x11, 0x23, + 0x09); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xfe, 0x00, 0x02, 0x03, 0x21, + 0x80, 0x68); } - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x51); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xf2, 0x02, 0x08, 0x08); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xb3, 0x51); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x24); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xf2, 0x02, 0x08, 0x08); - usleep_range(10000, 11000); + mipi_dsi_usleep_range(dsi_ctx, 10000, 11000); - mipi_dsi_dcs_write_seq(dsi, 0xc0, 0x80, 0x80, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0xcd, - 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, - 0x2e, 0x2e, 0x2e, 0x2e, 0x2e); - mipi_dsi_dcs_write_seq(dsi, 0xce, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc1, 0x03); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc0, 0x80, 0x80, 0x30); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xcd, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, 0x2e, + 0x2e, 0x2e, 0x2e, 0x2e, 0x2e); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xce, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0xc1, 0x03); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - - ret = s6d7aa0_lock(ctx, true); - if (ret < 0) { - dev_err(dev, "Failed to lock registers: %d\n", ret); - return ret; - } - - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - - return 0; + mipi_dsi_dcs_exit_sleep_mode_multi(dsi_ctx); + s6d7aa0_lock(ctx, dsi_ctx, true); + mipi_dsi_dcs_set_display_on_multi(dsi_ctx); } -static int s6d7aa0_lsl080al03_off(struct s6d7aa0 *ctx) +static void s6d7aa0_lsl080al03_off(struct mipi_dsi_multi_context *dsi_ctx) { - struct mipi_dsi_device *dsi = ctx->dsi; - - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00); - - return 0; + mipi_dsi_dcs_write_seq_multi(dsi_ctx, 0x22, 0x00); } static const struct drm_display_mode s6d7aa0_lsl080al03_mode = { From 15a226179c55ffef2e0a883b6bc15eaceff4a08d Mon Sep 17 00:00:00 2001 From: Tejas Vipin Date: Sat, 15 Mar 2025 23:55:22 +0530 Subject: [PATCH 0209/1627] drm/panel: novatek-nt36523: transition to mipi_dsi wrapped functions Changes the novatek-nt36523 panel to use multi style functions for improved error handling. Reviewed-by: Douglas Anderson Signed-off-by: Tejas Vipin Reviewed-by: Dmitry Baryshkov Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20250315182522.628187-1-tejasvipin76@gmail.com --- drivers/gpu/drm/panel/panel-novatek-nt36523.c | 1639 ++++++++--------- 1 file changed, 801 insertions(+), 838 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36523.c b/drivers/gpu/drm/panel/panel-novatek-nt36523.c index 04f1d2676c78..116d67bfa114 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36523.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36523.c @@ -23,10 +23,12 @@ #define DSI_NUM_MIN 1 -#define mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, cmd, seq...) \ - do { \ - mipi_dsi_dcs_write_seq(dsi0, cmd, seq); \ - mipi_dsi_dcs_write_seq(dsi1, cmd, seq); \ +#define mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, cmd, seq...) \ + do { \ + dsi_ctx.dsi = dsi0; \ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \ + dsi_ctx.dsi = dsi1; \ + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, cmd, seq); \ } while (0) struct panel_info { @@ -67,868 +69,829 @@ static int elish_boe_init_sequence(struct panel_info *pinfo) { struct mipi_dsi_device *dsi0 = pinfo->dsi[0]; struct mipi_dsi_device *dsi1 = pinfo->dsi[1]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; /* No datasheet, so write magic init sequence directly */ - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x47); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x47); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x47); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd2, 0x30); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x76, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x77, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x49); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x59); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x48); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd7, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdc, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdd, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe1, 0x43); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xe2, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf2, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf3, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xf4, 0x48); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x13, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x97, 0x3c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x98, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x99, 0x95); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9b, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9c, 0x0b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9d, 0x0a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9e, 0x90); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9f, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa3, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x14, 0x60); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0xc0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11); - msleep(70); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x47); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd2, 0x30); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x76, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x77, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x49); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x59); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x48); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd7, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdc, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdd, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe1, 0x43); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xe2, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf3, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xf4, 0x48); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x13, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x97, 0x3c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x98, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x99, 0x95); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9b, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9c, 0x0b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9d, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9e, 0x90); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9f, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa3, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x14, 0x60); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11); + mipi_dsi_msleep(&dsi_ctx, 70); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29); - return 0; + return dsi_ctx.accum_err; } static int elish_csot_init_sequence(struct panel_info *pinfo) { struct mipi_dsi_device *dsi0 = pinfo->dsi[0]; struct mipi_dsi_device *dsi1 = pinfo->dsi[1]; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = NULL }; /* No datasheet, so write magic init sequence directly */ - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x05); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x18, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xd0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x02, 0xaf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x30); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0xee); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1d, 0x09); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x23); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0x84); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x05, 0x2d); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x06, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x07, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x08, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x09, 0x45); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x12, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x15, 0x83); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x16, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29, 0x0a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x31, 0xfe); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x32, 0xfd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x33, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x34, 0xf8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x35, 0xf5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x36, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x37, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x38, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0xf2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3a, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0xec); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3d, 0xe9); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3f, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x40, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x41, 0xe5); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x45, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x46, 0xf4); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x47, 0xe7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x48, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x49, 0xcd); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4a, 0xc0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4b, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4c, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4d, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4e, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x4f, 0x99); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x50, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x68); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x52, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x54, 0x66); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0x0e); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x58, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x59, 0xfb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5a, 0xf7); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5b, 0xf3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5c, 0xef); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5d, 0xe3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5e, 0xda); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x5f, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x60, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x61, 0xd8); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x62, 0xcb); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x63, 0xbf); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x64, 0xb3); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x65, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x66, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x67, 0xb2); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x0f, 0xff); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x53, 0x2c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x55, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x13); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x25, 0x46); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x30, 0x46); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x39, 0x46); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1a, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1c, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2a, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x2b, 0xe0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0xf0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x84, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x85, 0x0c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x51, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x91, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x92, 0x0f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x93, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x94, 0x18); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x95, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x96, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb0, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x19, 0x1f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x1b, 0x1b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x24); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x27); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x31); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd1, 0x20); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xde, 0x80); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xdf, 0x02); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x26); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x00, 0x81); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x01, 0xb0); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x22); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x6f, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x70, 0x11); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x73, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x74, 0x4d); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa0, 0x3f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xa9, 0x50); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xaa, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xab, 0x28); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xad, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb8, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xb9, 0x4b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xba, 0x96); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbb, 0x4b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbe, 0x07); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbf, 0x4b); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc0, 0x07); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc1, 0x5c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc2, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc5, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc6, 0x3f); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xc7, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xca, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcb, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xce, 0x00); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xcf, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd0, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd3, 0x08); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xd4, 0x40); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x25); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbc, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xbd, 0x1c); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x2a); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xfb, 0x01); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x9a, 0x03); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0xff, 0x10); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x11); - msleep(70); - mipi_dsi_dual_dcs_write_seq(dsi0, dsi1, 0x29); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x05); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x18, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xd0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x02, 0xaf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x30); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0xee); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1d, 0x09); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x23); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0x84); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x05, 0x2d); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x06, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x07, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x08, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x09, 0x45); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x12, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x15, 0x83); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x16, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29, 0x0a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x31, 0xfe); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x32, 0xfd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x33, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x34, 0xf8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x35, 0xf5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x36, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x37, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x38, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0xf2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3a, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0xec); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3d, 0xe9); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3f, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x40, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x41, 0xe5); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x45, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x46, 0xf4); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x47, 0xe7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x48, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x49, 0xcd); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4a, 0xc0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4b, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4c, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4d, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4e, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x4f, 0x99); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x50, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x68); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x52, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x54, 0x66); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0x0e); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x58, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x59, 0xfb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5a, 0xf7); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5b, 0xf3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5c, 0xef); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5d, 0xe3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5e, 0xda); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x5f, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x60, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x61, 0xd8); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x62, 0xcb); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x63, 0xbf); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x64, 0xb3); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x65, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x66, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x67, 0xb2); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x0f, 0xff); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x53, 0x2c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x55, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x13); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x3b, 0x03, 0xac, 0x1a, 0x04, 0x04); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x25, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x30, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x39, 0x46); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1a, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1c, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2a, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x2b, 0xe0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0xf0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x84, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x85, 0x0c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x51, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x91, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x92, 0x0f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x93, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x94, 0x18); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x95, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x96, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb0, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x19, 0x1f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x1b, 0x1b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x24); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x27); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x31); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd1, 0x20); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xde, 0x80); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xdf, 0x02); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x26); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x00, 0x81); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x01, 0xb0); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x22); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x6f, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x70, 0x11); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x73, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x74, 0x4d); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa0, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xa9, 0x50); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xaa, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xab, 0x28); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xad, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb8, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xb9, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xba, 0x96); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbb, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbe, 0x07); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbf, 0x4b); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc0, 0x07); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc1, 0x5c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc2, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc5, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc6, 0x3f); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xc7, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xca, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcb, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xce, 0x00); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xcf, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd0, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd3, 0x08); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xd4, 0x40); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x25); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbc, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xbd, 0x1c); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x2a); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xfb, 0x01); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x9a, 0x03); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0xff, 0x10); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x11); + mipi_dsi_msleep(&dsi_ctx, 70); + mipi_dsi_dual_dcs_write_seq_multi(dsi_ctx, dsi0, dsi1, 0x29); - return 0; + return dsi_ctx.accum_err; } static int j606f_boe_init_sequence(struct panel_info *pinfo) { struct mipi_dsi_device *dsi = pinfo->dsi[0]; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x05, 0xd9); - mipi_dsi_dcs_write_seq(dsi, 0x07, 0x78); - mipi_dsi_dcs_write_seq(dsi, 0x08, 0x5a); - mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x63); - mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x91); - mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0x95, 0xeb); - mipi_dsi_dcs_write_seq(dsi, 0x96, 0xeb); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x6d, 0x66); - mipi_dsi_dcs_write_seq(dsi, 0x75, 0xa2); - mipi_dsi_dcs_write_seq(dsi, 0x77, 0xb3); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00, - 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01, - 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03, - 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03, - 0xfd, 0x03, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00, - 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); - mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01, - 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03, - 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03, - 0xfd, 0x03, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, 0x00, - 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, 0x01, - 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, 0x03, - 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, 0x03, - 0xfd, 0x03, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x21); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00, - 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); - mipi_dsi_dcs_write_seq(dsi, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01, - 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03, - 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03, - 0xf5, 0x03, 0xf7); - mipi_dsi_dcs_write_seq(dsi, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00, - 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); - mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01, - 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03, - 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03, - 0xf5, 0x03, 0xf7); - mipi_dsi_dcs_write_seq(dsi, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, 0x00, - 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, 0x01, - 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); - mipi_dsi_dcs_write_seq(dsi, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, 0x03, - 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, 0x03, - 0xf5, 0x03, 0xf7); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x23); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x80); - mipi_dsi_dcs_write_seq(dsi, 0x07, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x11, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x12, 0x77); - mipi_dsi_dcs_write_seq(dsi, 0x15, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x01, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x02, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x03, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x04, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x05, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x06, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x07, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x08, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x09, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x0a, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x0b, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x0e, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x10, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x11, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x12, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x13, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x14, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x17, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x18, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x0f); - mipi_dsi_dcs_write_seq(dsi, 0x20, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x21, 0x0e); - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x23, 0x0d); - mipi_dsi_dcs_write_seq(dsi, 0x24, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0x0c); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x28, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x0a); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x44); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0x32); - mipi_dsi_dcs_write_seq(dsi, 0x37, 0x44); - mipi_dsi_dcs_write_seq(dsi, 0x38, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0xd9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x78); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x5a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x63); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x91); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x95, 0xeb); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x96, 0xeb); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x66); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xa2); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0xb3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, + 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, + 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, + 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, + 0x03, 0xfd, 0x03, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, + 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, + 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, + 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, + 0x03, 0xfd, 0x03, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4d, 0x00, 0x6d, + 0x00, 0x89, 0x00, 0xa1, 0x00, 0xb6, 0x00, 0xc9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xda, 0x01, 0x13, 0x01, 0x3c, 0x01, 0x7e, + 0x01, 0xab, 0x01, 0xf7, 0x02, 0x2f, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x67, 0x02, 0xa6, 0x02, 0xd1, 0x03, 0x08, + 0x03, 0x2e, 0x03, 0x5b, 0x03, 0x6b, 0x03, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x8e, 0x03, 0xa2, 0x03, 0xb7, 0x03, 0xe7, + 0x03, 0xfd, 0x03, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x21); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb0, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, + 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, + 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb2, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, + 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb3, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, + 0x03, 0xf5, 0x03, 0xf7); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb4, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, + 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, + 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, + 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb7, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, + 0x03, 0xf5, 0x03, 0xf7); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb8, 0x00, 0x00, 0x00, 0x1b, 0x00, 0x45, 0x00, 0x65, + 0x00, 0x81, 0x00, 0x99, 0x00, 0xae, 0x00, 0xc1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x00, 0xd2, 0x01, 0x0b, 0x01, 0x34, 0x01, 0x76, + 0x01, 0xa3, 0x01, 0xef, 0x02, 0x27, 0x02, 0x29); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xba, 0x02, 0x5f, 0x02, 0x9e, 0x02, 0xc9, 0x03, 0x00, + 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x03, 0x86, 0x03, 0x9a, 0x03, 0xaf, 0x03, 0xdf, + 0x03, 0xf5, 0x03, 0xf7); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x23); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x80); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x77); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x0f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x0e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x0d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x44); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x32); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x44); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00); - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x9a); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x9a); - mipi_dsi_dcs_write_seq(dsi, 0x3b, 0xa0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_3D_CONTROL, 0x42); - mipi_dsi_dcs_write_seq(dsi, 0x3f, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x43, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x47, 0x66); - mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x9a); - mipi_dsi_dcs_write_seq(dsi, 0x4b, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x4c, 0x91); - mipi_dsi_dcs_write_seq(dsi, 0x4d, 0x21); - mipi_dsi_dcs_write_seq(dsi, 0x4e, 0x43); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_3D_CONTROL, 0x42); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x66); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x9a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x91); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x21); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x43); - ret = mipi_dsi_dcs_set_display_brightness(dsi, 18); - if (ret < 0) { - dev_err(dev, "Failed to set display brightness: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 18); - mipi_dsi_dcs_write_seq(dsi, 0x52, 0x34); - mipi_dsi_dcs_write_seq(dsi, 0x55, 0x82, 0x02); - mipi_dsi_dcs_write_seq(dsi, 0x56, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x58, 0x21); - mipi_dsi_dcs_write_seq(dsi, 0x59, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0x5a, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0xa0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x65, 0x82); - mipi_dsi_dcs_write_seq(dsi, 0x7e, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0x7f, 0x3c); - mipi_dsi_dcs_write_seq(dsi, 0x82, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0x97, 0xc0); - mipi_dsi_dcs_write_seq(dsi, 0xb6, - 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x05, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x92, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0x93, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0x94, 0x5f); - mipi_dsi_dcs_write_seq(dsi, 0xd7, 0x55); - mipi_dsi_dcs_write_seq(dsi, 0xda, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0xde, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xdc, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22); - mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe0, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe2, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe4, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe6, 0xc4); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x88); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x88); - mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xb5, 0x90); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x05, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x07); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x20, 0xa0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x3f, 0xe0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_VSYNC_TIMING, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x44, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_GET_SCANLINE, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0x48, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x49, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0); - mipi_dsi_dcs_write_seq(dsi, 0x61, 0xba); - mipi_dsi_dcs_write_seq(dsi, 0x62, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0xf1, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x64, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0x67, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0x6a, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0x70, 0x30); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_START, 0xf3); - mipi_dsi_dcs_write_seq(dsi, 0xa3, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xa4, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xa5, 0xff); - mipi_dsi_dcs_write_seq(dsi, 0xd6, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0xa1); - mipi_dsi_dcs_write_seq(dsi, 0x0a, 0xf2); - mipi_dsi_dcs_write_seq(dsi, 0x04, 0x28); - mipi_dsi_dcs_write_seq(dsi, 0x06, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0x0c, 0x13); - mipi_dsi_dcs_write_seq(dsi, 0x0d, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x0f, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x11, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x12, 0x50); - mipi_dsi_dcs_write_seq(dsi, 0x13, 0x51); - mipi_dsi_dcs_write_seq(dsi, 0x14, 0x65); - mipi_dsi_dcs_write_seq(dsi, 0x15, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0x17, 0xa0); - mipi_dsi_dcs_write_seq(dsi, 0x18, 0x86); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0x1c, 0xbb); - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x23, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7b); - mipi_dsi_dcs_write_seq(dsi, 0x1d, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x1e, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x2f, 0x05); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x32, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x39, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x34); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x82, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x21); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x00, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x82); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x3c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x97, 0xc0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb6, + 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, + 0x05, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x93, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x94, 0x5f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd7, 0x55); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xda, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xde, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0xc4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x88); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x88); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x90); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x07); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0xe0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_VSYNC_TIMING, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_GET_SCANLINE, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0xd0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0xba); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf1, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_START, 0xf3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa3, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa4, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xa5, 0xff); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd6, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0xa1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0xf2); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x28); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x13); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x50); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x51); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x65); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0xa0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x86); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0xbb); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x00); - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0xc3); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0xc3); - mipi_dsi_dcs_write_seq(dsi, 0x20, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0x11); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0x78); - mipi_dsi_dcs_write_seq(dsi, 0x35, 0x16); - mipi_dsi_dcs_write_seq(dsi, 0xc8, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xc9, 0x82); - mipi_dsi_dcs_write_seq(dsi, 0xca, 0x4e); - mipi_dsi_dcs_write_seq(dsi, 0xcb, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_READ_PPS_CONTINUE, 0x4c); - mipi_dsi_dcs_write_seq(dsi, 0xaa, 0x47); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x27); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x56, 0x06); - mipi_dsi_dcs_write_seq(dsi, 0x58, 0x80); - mipi_dsi_dcs_write_seq(dsi, 0x59, 0x53); - mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x14); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x01); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0x5f, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0x60, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x61, 0x1d); - mipi_dsi_dcs_write_seq(dsi, 0x62, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x63, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x64, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0x65, 0x1c); - mipi_dsi_dcs_write_seq(dsi, 0x66, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x67, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x68, 0x25); - mipi_dsi_dcs_write_seq(dsi, 0x00, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x78, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xc3, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xd1, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xd2, 0x30); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x22, 0x2f); - mipi_dsi_dcs_write_seq(dsi, 0x23, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0x24, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0xc3); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0xf8); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x28, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0x29, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x00); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_LUT, 0x1a); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0xe0); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x14, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x16, 0xc0); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0xf0); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x11); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x78); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x16); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc8, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc9, 0x82); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x4e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcb, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_READ_PPS_CONTINUE, 0x4c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xaa, 0x47); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x27); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x06); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x80); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x53); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x14); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_CABC_MIN_BRIGHTNESS, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x1d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x1c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x25); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc3, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd1, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd2, 0x30); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x2f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0xf8); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_LUT, 0x1a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xe0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0xc0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xf0); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x08); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x08); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x24); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x24); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x5d); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x5d); - mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x4a, 0x5d); - mipi_dsi_dcs_write_seq(dsi, 0x4b, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x5a, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x91, 0x44); - mipi_dsi_dcs_write_seq(dsi, 0x92, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xdb, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xdc, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xdd, 0x22); - mipi_dsi_dcs_write_seq(dsi, 0xdf, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe0, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xe1, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe2, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xe3, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe4, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xe5, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0xe6, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0x5c, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x5d, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x8d, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x8e, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x25); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x20, 0x60); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_GAMMA_CURVE, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x27, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x33, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x34, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x48, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x49, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0x5b, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x61, 0x70); - mipi_dsi_dcs_write_seq(dsi, 0x62, 0x60); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x26); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x02, 0x31); - mipi_dsi_dcs_write_seq(dsi, 0x19, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x1a, 0x7f); - mipi_dsi_dcs_write_seq(dsi, 0x1b, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x1c, 0x0c); - mipi_dsi_dcs_write_seq(dsi, 0x2a, 0x0a); - mipi_dsi_dcs_write_seq(dsi, 0x2b, 0x7f); - mipi_dsi_dcs_write_seq(dsi, 0x1e, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0x1f, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_ROWS, 0x75); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05); - mipi_dsi_dcs_write_seq(dsi, 0x32, 0x8d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x5d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x91, 0x44); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x92, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdb, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdc, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdd, 0x22); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xdf, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe0, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe1, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe2, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe3, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe4, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe5, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xe6, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8d, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x8e, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x25); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_GAMMA_CURVE, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x60); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x26); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x31); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x7f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x7f); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_ROWS, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_SET_PARTIAL_COLUMNS, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x8d); - ret = mipi_dsi_dcs_set_pixel_format(dsi, 0x75); - if (ret < 0) { - dev_err(dev, "Failed to set pixel format: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x2a); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x25, 0x75); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x20); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0x18, 0x40); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); - mipi_dsi_dcs_write_seq(dsi, 0xb9, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x75); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x40); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb9, 0x02); - ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - if (ret < 0) { - dev_err(dev, "Failed to set tear on: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); - mipi_dsi_dcs_write_seq(dsi, 0xbb, 0x13); - mipi_dsi_dcs_write_seq(dsi, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04); - mipi_dsi_dcs_write_seq(dsi, 0xff, 0x10); - usleep_range(10000, 11000); - mipi_dsi_dcs_write_seq(dsi, 0xfb, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xbb, 0x13); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x03, 0x5f, 0x1a, 0x04, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0x10); + mipi_dsi_usleep_range(&dsi_ctx, 10000, 11000); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xfb, 0x01); - ret = mipi_dsi_dcs_set_display_brightness(dsi, 0); - if (ret < 0) { - dev_err(dev, "Failed to set display brightness: %d\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c); - mipi_dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00); - mipi_dsi_dcs_write_seq(dsi, 0x68, 0x05, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x2c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_POWER_SAVE, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x05, 0x01); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode: %d\n", ret); - return ret; - } - msleep(100); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 100); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display on: %d\n", ret); - return ret; - } - msleep(30); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 30); - return 0; + return dsi_ctx.accum_err; } static const struct drm_display_mode elish_boe_modes[] = { @@ -1063,18 +1026,18 @@ static int nt36523_prepare(struct drm_panel *panel) static int nt36523_disable(struct drm_panel *panel) { struct panel_info *pinfo = to_panel_info(panel); - int i, ret; + int i; for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) { - ret = mipi_dsi_dcs_set_display_off(pinfo->dsi[i]); - if (ret < 0) - dev_err(&pinfo->dsi[i]->dev, "failed to set display off: %d\n", ret); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]}; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); } for (i = 0; i < DSI_NUM_MIN + pinfo->desc->is_dual_dsi; i++) { - ret = mipi_dsi_dcs_enter_sleep_mode(pinfo->dsi[i]); - if (ret < 0) - dev_err(&pinfo->dsi[i]->dev, "failed to enter sleep mode: %d\n", ret); + struct mipi_dsi_multi_context dsi_ctx = { .dsi = pinfo->dsi[i]}; + + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); } msleep(70); From c8ba07caaecc622a9922cda49f24790821af8a71 Mon Sep 17 00:00:00 2001 From: Anusha Srivatsa Date: Tue, 18 Mar 2025 10:37:07 -0400 Subject: [PATCH 0210/1627] drm/panel/synaptics-r63353: Use _multi variants Move away from using deprecated API and use _multi variants if available. Use mipi_dsi_msleep() and mipi_dsi_usleep_range() instead of msleep() and usleep_range() respectively. Used Coccinelle to find the _multi variant APIs, replacing mpi_dsi_msleep() where necessary and for returning dsi_ctx.accum_err in these functions. Manually handled the reset step before returning from r63353_panel_activate() v2: Do not skip the reset in case of error during panel activate (Dmitry) - Convert all usleep_range() v3: mipi_dsi_usleep_range() is to be used only when in between _multi commands(Doug) - Check for error once in the end while using _multi variants (Doug) v4: Change return type of r63353_panel_deactivate() to void (Doug) @rule_1@ identifier dsi_var; identifier r; identifier func; type t; position p; expression dsi_device; expression list es; @@ t func(...) { ... struct mipi_dsi_device *dsi_var = dsi_device; +struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi_var }; <+... ( -r = mipi_dsi_dcs_nop(dsi_var)@p; +mipi_dsi_dcs_nop_multi(&dsi_ctx); | -r = mipi_dsi_dcs_exit_sleep_mode(dsi_var)@p; +mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); | -r = mipi_dsi_dcs_enter_sleep_mode(dsi_var)@p; +mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); | -r = mipi_dsi_dcs_write_buffer(dsi_var,es)@p; +mipi_dsi_dcs_write_buffer_multi(&dsi_ctx,es); | -r = mipi_dsi_dcs_set_display_off(dsi_var,es)@p; +mipi_dsi_dcs_set_display_off_multi(&dsi_ctx,es); | -r = mipi_dsi_compression_mode_ext(dsi_var,es)@p; +mipi_dsi_compression_mode_ext_multi(&dsi_ctx,es); | -r = mipi_dsi_compression_mode(dsi_var,es)@p; +mipi_dsi_compression_mode_multi(&dsi_ctx,es); | -r = mipi_dsi_picture_parameter_set(dsi_var,es)@p; +mipi_dsi_picture_parameter_set_multi(&dsi_ctx,es); | -r = mipi_dsi_dcs_set_display_on(dsi_var,es)@p; +mipi_dsi_dcs_set_display_on_multi(&dsi_ctx,es); | -r = mipi_dsi_dcs_set_tear_on(dsi_var)@p; +mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx); | -r = mipi_dsi_turn_on_peripheral(dsi_var)@p; +mipi_dsi_turn_on_peripheral_multi(&dsi_ctx); | -r = mipi_dsi_dcs_soft_reset(dsi_var)@p; +mipi_dsi_dcs_soft_reset_multi(&dsi_ctx); | -r = mipi_dsi_dcs_set_display_brightness(dsi_var,es)@p; +mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx,es); | -r = mipi_dsi_dcs_set_pixel_format(dsi_var,es)@p; +mipi_dsi_dcs_set_pixel_format_multi(&dsi_ctx,es); | -r = mipi_dsi_dcs_set_column_address(dsi_var,es)@p; +mipi_dsi_dcs_set_column_address_multi(&dsi_ctx,es); | -r = mipi_dsi_dcs_set_page_address(dsi_var,es)@p; +mipi_dsi_dcs_set_page_address_multi(&dsi_ctx,es); | -r = mipi_dsi_dcs_set_tear_scanline(dsi_var,es)@p; +mipi_dsi_dcs_set_tear_scanline_multi(&dsi_ctx,es); ) -if(r < 0) { -... -} ...+> } @rule_2@ identifier dsi_var; identifier r; identifier func; type t; position p; expression dsi_device; expression list es; @@ t func(...) { ... struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi_var }; <+... ( -r = msleep(es)@p; +r = mipi_dsi_msleep(&dsi_ctx,es); | -msleep(es)@p; +mipi_dsi_msleep(&dsi_ctx,es); | -r = usleep_range(es)@p; +r = mipi_dsi_usleep_range(&dsi_ctx,es); | -usleep_range(es)@p; +mipi_dsi_usleep_range(&dsi_ctx,es); ) ...+> } @rule_3@ identifier dsi_var; identifier func; type t; position p; expression list es; @@ t func(...) { ... struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi_var }; ... -return 0; +return dsi_ctx.accum_err; } Cc: Maxime Ripard Cc: Dmitry Baryshkov Cc: Tejas Vipin Cc: Douglas Anderson Cc: Neil Armstrong Reviewed-by: Douglas Anderson Signed-off-by: Anusha Srivatsa Signed-off-by: Douglas Anderson Link: https://patchwork.freedesktop.org/patch/msgid/20250318-synaptic-expt-v1-1-fa3831a7d883@redhat.com --- .../gpu/drm/panel/panel-synaptics-r63353.c | 68 ++++++------------- 1 file changed, 19 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-synaptics-r63353.c b/drivers/gpu/drm/panel/panel-synaptics-r63353.c index 17349825543f..b148e6cba9bd 100644 --- a/drivers/gpu/drm/panel/panel-synaptics-r63353.c +++ b/drivers/gpu/drm/panel/panel-synaptics-r63353.c @@ -106,53 +106,34 @@ static int r63353_panel_power_off(struct r63353_panel *rpanel) static int r63353_panel_activate(struct r63353_panel *rpanel) { struct mipi_dsi_device *dsi = rpanel->dsi; - struct device *dev = &dsi->dev; - int i, ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; + int i; - ret = mipi_dsi_dcs_soft_reset(dsi); - if (ret < 0) { - dev_err(dev, "Failed to do Software Reset (%d)\n", ret); - goto fail; - } + mipi_dsi_dcs_soft_reset_multi(&dsi_ctx); - usleep_range(15000, 17000); + mipi_dsi_usleep_range(&dsi_ctx, 15000, 17000); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode (%d)\n", ret); - goto fail; - } + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); for (i = 0; i < rpanel->pdata->init_length; i++) { const struct r63353_instr *instr = &rpanel->pdata->init[i]; - ret = mipi_dsi_dcs_write_buffer(dsi, instr->data, instr->len); - if (ret < 0) - goto fail; + mipi_dsi_dcs_write_buffer_multi(&dsi_ctx, instr->data, + instr->len); } - msleep(120); + mipi_dsi_msleep(&dsi_ctx, 120); - ret = mipi_dsi_dcs_exit_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to exit sleep mode (%d)\n", ret); - goto fail; - } + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); - usleep_range(5000, 10000); + mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000); - ret = mipi_dsi_dcs_set_display_on(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display ON (%d)\n", ret); - goto fail; - } + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); - return 0; + if (dsi_ctx.accum_err) + gpiod_set_value(rpanel->reset_gpio, 0); -fail: - gpiod_set_value(rpanel->reset_gpio, 0); - - return ret; + return dsi_ctx.accum_err; } static int r63353_panel_prepare(struct drm_panel *panel) @@ -178,27 +159,16 @@ static int r63353_panel_prepare(struct drm_panel *panel) return 0; } -static int r63353_panel_deactivate(struct r63353_panel *rpanel) +static void r63353_panel_deactivate(struct r63353_panel *rpanel) { struct mipi_dsi_device *dsi = rpanel->dsi; - struct device *dev = &dsi->dev; - int ret; + struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi }; - ret = mipi_dsi_dcs_set_display_off(dsi); - if (ret < 0) { - dev_err(dev, "Failed to set display OFF (%d)\n", ret); - return ret; - } + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); - usleep_range(5000, 10000); + mipi_dsi_usleep_range(&dsi_ctx, 5000, 10000); - ret = mipi_dsi_dcs_enter_sleep_mode(dsi); - if (ret < 0) { - dev_err(dev, "Failed to enter sleep mode (%d)\n", ret); - return ret; - } - - return 0; + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); } static int r63353_panel_unprepare(struct drm_panel *panel) From 327e30123cafcb45c0fc5843da0367b90332999d Mon Sep 17 00:00:00 2001 From: Vivek Kasireddy Date: Mon, 24 Mar 2025 10:22:33 -0700 Subject: [PATCH 0211/1627] drm/i915/xe2hpd: Identify the memory type for SKUs with GDDR + ECC Some SKUs of Xe2_HPD platforms (such as BMG) have GDDR memory type with ECC enabled. We need to identify this scenario and add a new case in xelpdp_get_dram_info() to handle it. In addition, the derating value needs to be adjusted accordingly to compensate for the limited bandwidth. Bspec: 64602 Cc: Matt Roper Fixes: 3adcf970dc7e ("drm/xe/bmg: Drop force_probe requirement") Cc: stable@vger.kernel.org Signed-off-by: Vivek Kasireddy Reviewed-by: Matt Roper Acked-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20250324-tip-v2-1-38397de319f8@intel.com Signed-off-by: Lucas De Marchi --- drivers/gpu/drm/i915/display/intel_bw.c | 12 ++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 1 + drivers/gpu/drm/i915/soc/intel_dram.c | 4 ++++ drivers/gpu/drm/xe/xe_device_types.h | 1 + 4 files changed, 18 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index dc7612658a9d..bb81efec08a0 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -250,6 +250,7 @@ static int icl_get_qgv_points(struct intel_display *display, qi->deinterleave = 4; break; case INTEL_DRAM_GDDR: + case INTEL_DRAM_GDDR_ECC: qi->channel_width = 32; break; default: @@ -404,6 +405,12 @@ static const struct intel_sa_info xe2_hpd_sa_info = { /* Other values not used by simplified algorithm */ }; +static const struct intel_sa_info xe2_hpd_ecc_sa_info = { + .derating = 45, + .deprogbwlimit = 53, + /* Other values not used by simplified algorithm */ +}; + static const struct intel_sa_info xe3lpd_sa_info = { .deburst = 32, .deprogbwlimit = 65, /* GB/s */ @@ -756,11 +763,16 @@ static unsigned int icl_qgv_bw(struct intel_display *display, void intel_bw_init_hw(struct intel_display *display) { + const struct dram_info *dram_info = &to_i915(display->drm)->dram_info; + if (!HAS_DISPLAY(display)) return; if (DISPLAY_VER(display) >= 30) tgl_get_bw_info(display, &xe3lpd_sa_info); + else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx && + dram_info->type == INTEL_DRAM_GDDR_ECC) + xe2_hpd_get_bw_info(display, &xe2_hpd_ecc_sa_info); else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx) xe2_hpd_get_bw_info(display, &xe2_hpd_sa_info); else if (DISPLAY_VER(display) >= 14) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ffc346379cc2..54538b6f85df 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -305,6 +305,7 @@ struct drm_i915_private { INTEL_DRAM_DDR5, INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, + INTEL_DRAM_GDDR_ECC, } type; u8 num_qgv_points; u8 num_psf_gv_points; diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 9e310f4099f4..f60eedb0e92c 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -687,6 +687,10 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915) drm_WARN_ON(&i915->drm, !IS_DGFX(i915)); dram_info->type = INTEL_DRAM_GDDR; break; + case 9: + drm_WARN_ON(&i915->drm, !IS_DGFX(i915)); + dram_info->type = INTEL_DRAM_GDDR_ECC; + break; default: MISSING_CASE(val); return -EINVAL; diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 4656305dd45a..0921e957d784 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -575,6 +575,7 @@ struct xe_device { INTEL_DRAM_DDR5, INTEL_DRAM_LPDDR5, INTEL_DRAM_GDDR, + INTEL_DRAM_GDDR_ECC, } type; u8 num_qgv_points; u8 num_psf_gv_points; From 5003720e7d96c2f77c5ae3049aa8958e8074dc44 Mon Sep 17 00:00:00 2001 From: Animesh Manna Date: Mon, 24 Mar 2025 15:38:23 +0530 Subject: [PATCH 0212/1627] drm/i915/display: Read panel replay source status through PSR2 status register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PTL onwards get panel replay status from PSR2 status register instead of SRD status. Signed-off-by: Animesh Manna Reviewed-by: Jouni Högander Link: https://patchwork.freedesktop.org/patch/msgid/20250324100823.3111564-1-animesh.manna@intel.com --- drivers/gpu/drm/i915/display/intel_psr.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 50a22cd8d84a..708fbcfa9089 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -3635,8 +3635,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m) const char *status = "unknown"; u32 val, status_val; - if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled || - intel_dp->psr.panel_replay_enabled)) { + if ((intel_dp_is_edp(intel_dp) || DISPLAY_VER(display) >= 30) && + (intel_dp->psr.sel_update_enabled || intel_dp->psr.panel_replay_enabled)) { static const char * const live_status[] = { "IDLE", "CAPTURE", From 77ba0b8562251f44bdc4002813d976dfb3c0a387 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Mar 2025 12:52:45 +0200 Subject: [PATCH 0213/1627] drm/i915/dsi: convert vlv_dsi.[ch] to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert as much as possible of vlv_dsi.[ch] to struct intel_display. Reviewed-by: Ville Syrjälä Link: https://lore.kernel.org/r/320449f3b58c6eca6fdbb16e4e819cd0e133887a.1742554320.git.jani.nikula@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 4 +- drivers/gpu/drm/i915/display/vlv_dsi.c | 157 +++++++++---------- drivers/gpu/drm/i915/display/vlv_dsi.h | 6 +- 3 files changed, 80 insertions(+), 87 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index b852ffe94a10..d7243848fb23 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7670,7 +7670,7 @@ void intel_setup_outputs(struct intel_display *display) intel_bios_for_each_encoder(display, intel_ddi_init); if (display->platform.geminilake || display->platform.broxton) - vlv_dsi_init(dev_priv); + vlv_dsi_init(display); } else if (HAS_PCH_SPLIT(dev_priv)) { int found; @@ -7754,7 +7754,7 @@ void intel_setup_outputs(struct intel_display *display) g4x_hdmi_init(display, CHV_HDMID, PORT_D); } - vlv_dsi_init(dev_priv); + vlv_dsi_init(display); } else if (display->platform.pineview) { intel_lvds_init(dev_priv); intel_crt_init(display); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index af717df83197..6ddf33de85d3 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -251,8 +251,10 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs, return 0; } -static void band_gap_reset(struct drm_i915_private *dev_priv) +static void band_gap_reset(struct intel_display *display) { + struct drm_i915_private *dev_priv = to_i915(display->drm); + vlv_flisdsi_get(dev_priv); vlv_flisdsi_write(dev_priv, 0x08, 0x0001); @@ -269,13 +271,13 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_connector *intel_connector = intel_dsi->attached_connector; struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; int ret; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -298,7 +300,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder, else pipe_config->pipe_bpp = 18; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { /* Enable Frame time stamp based scanline reporting */ pipe_config->mode_flags |= I915_MODE_FLAG_GET_SCANLINE_FROM_TIMESTAMP; @@ -468,7 +470,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) vlv_flisdsi_put(dev_priv); /* bandgap reset is needed after everytime we do power gate */ - band_gap_reset(dev_priv); + band_gap_reset(display); for_each_dsi_port(port, intel_dsi->ports) { @@ -495,11 +497,11 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder) static void intel_dsi_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) glk_dsi_device_ready(encoder); - else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + else if (display->platform.geminilake || display->platform.broxton) bxt_dsi_device_ready(encoder); else vlv_dsi_device_ready(encoder); @@ -559,23 +561,22 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder) glk_dsi_disable_mipi_io(encoder); } -static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port) +static i915_reg_t port_ctrl_reg(struct intel_display *display, enum port port) { - return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ? + return display->platform.geminilake || display->platform.broxton ? BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(port); } static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(display->drm, "\n"); for_each_dsi_port(port, intel_dsi->ports) { /* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */ - i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ? + i915_reg_t port_ctrl = display->platform.broxton ? BXT_MIPI_PORT_CTRL(port) : VLV_MIPI_PORT_CTRL(PORT_A); intel_de_write(display, MIPI_DEVICE_READY(display, port), @@ -594,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder) * On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI * Port A only. MIPI Port C has no similar bit for checking. */ - if ((IS_BROXTON(dev_priv) || port == PORT_A) && + if ((display->platform.broxton || port == PORT_A) && intel_de_wait_for_clear(display, port_ctrl, AFE_LATCHOUT, 30)) drm_err(display->drm, "DSI LP not going Low\n"); @@ -612,7 +613,6 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; @@ -620,7 +620,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { u32 temp = intel_dsi->pixel_overlap; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { for_each_dsi_port(port, intel_dsi->ports) intel_de_rmw(display, MIPI_CTRL(display, port), BXT_PIXEL_OVERLAP_CNT_MASK, @@ -633,7 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); + i915_reg_t port_ctrl = port_ctrl_reg(display, port); u32 temp; temp = intel_de_read(display, port_ctrl); @@ -644,7 +644,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, if (intel_dsi->ports == (BIT(PORT_A) | BIT(PORT_C))) { temp |= (intel_dsi->dual_link - 1) << DUAL_LINK_MODE_SHIFT; - if (IS_BROXTON(dev_priv)) + if (display->platform.broxton) temp |= LANE_CONFIGURATION_DUAL_LINK_A; else temp |= crtc->pipe ? @@ -664,12 +664,11 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder, static void intel_dsi_port_disable(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); + i915_reg_t port_ctrl = port_ctrl_reg(display, port); /* de-assert ip_tg_enable signal */ intel_de_rmw(display, port_ctrl, DPI_ENABLE, 0); @@ -730,7 +729,6 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum port port; bool glk_cold_boot = false; @@ -745,7 +743,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, * The BIOS may leave the PLL in a wonky state where it doesn't * lock. It needs to be fully powered down to fix it. */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { bxt_dsi_pll_disable(encoder); bxt_dsi_pll_enable(encoder, pipe_config); } else { @@ -753,7 +751,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, vlv_dsi_pll_enable(encoder, pipe_config); } - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { /* Add MIPI IO reset programming for modeset */ intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, 0, MIPIO_RST_CTRL); @@ -762,13 +760,13 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, 0); } - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (display->platform.valleyview || display->platform.cherryview) { /* Disable DPOunit clock gating, can stall pipe */ - intel_de_rmw(display, DSPCLK_GATE_D(dev_priv), + intel_de_rmw(display, DSPCLK_GATE_D(display), 0, DPOUNIT_CLOCK_GATE_DISABLE); } - if (!IS_GEMINILAKE(dev_priv)) + if (!display->platform.geminilake) intel_dsi_prepare(encoder, pipe_config); /* Give the panel time to power-on and then deassert its reset */ @@ -776,7 +774,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, msleep(intel_dsi->panel_on_delay); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); - if (IS_GEMINILAKE(dev_priv)) { + if (display->platform.geminilake) { glk_cold_boot = glk_dsi_enable_io(encoder); /* Prepare port in cold boot(s3/s4) scenario */ @@ -788,7 +786,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, intel_dsi_device_ready(encoder); /* Prepare port in normal boot scenario */ - if (IS_GEMINILAKE(dev_priv) && !glk_cold_boot) + if (display->platform.geminilake && !glk_cold_boot) intel_dsi_prepare(encoder, pipe_config); /* Send initialization commands in LP mode */ @@ -836,11 +834,11 @@ static void intel_dsi_disable(struct intel_atomic_state *state, const struct intel_crtc_state *old_crtc_state, const struct drm_connector_state *old_conn_state) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - drm_dbg_kms(&i915->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF); intel_backlight_disable(old_conn_state); @@ -860,9 +858,9 @@ static void intel_dsi_disable(struct intel_atomic_state *state, static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) glk_dsi_clear_device_ready(encoder); else vlv_dsi_clear_device_ready(encoder); @@ -874,13 +872,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, const struct drm_connector_state *old_conn_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; drm_dbg_kms(display->drm, "\n"); - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { intel_crtc_vblank_off(old_crtc_state); skl_scaler_disable(old_crtc_state); @@ -907,7 +904,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, /* Transition to LP-00 */ intel_dsi_clear_device_ready(encoder); - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { /* Power down DSI regulator to save power */ intel_de_write(display, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT); intel_de_write(display, BXT_P_DSI_REGULATOR_TX_CTRL, @@ -917,12 +914,12 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, intel_de_rmw(display, BXT_P_CR_GT_DISP_PWRON, MIPIO_RST_CTRL, 0); } - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { bxt_dsi_pll_disable(encoder); } else { vlv_dsi_pll_disable(encoder); - intel_de_rmw(display, DSPCLK_GATE_D(dev_priv), + intel_de_rmw(display, DSPCLK_GATE_D(display), DPOUNIT_CLOCK_GATE_DISABLE, 0); } @@ -957,13 +954,13 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * configuration, otherwise accessing DSI registers will hang the * machine. See BSpec North Display Engine registers/MIPI[BXT]. */ - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && + if ((display->platform.geminilake || display->platform.broxton) && !bxt_dsi_pll_is_enabled(dev_priv)) goto out_put_power; /* XXX: this only works for one DSI output */ for_each_dsi_port(port, intel_dsi->ports) { - i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port); + i915_reg_t port_ctrl = port_ctrl_reg(display, port); bool enabled = intel_de_read(display, port_ctrl) & DPI_ENABLE; /* @@ -971,10 +968,10 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * bit in port C control register does not get set. As a * workaround, check pipe B conf instead. */ - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + if ((display->platform.valleyview || display->platform.cherryview) && port == PORT_C) enabled = intel_de_read(display, - TRANSCONF(dev_priv, PIPE_B)) & TRANSCONF_ENABLE; + TRANSCONF(display, PIPE_B)) & TRANSCONF_ENABLE; /* Try command mode if video mode not enabled */ if (!enabled) { @@ -989,7 +986,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, if (!(intel_de_read(display, MIPI_DEVICE_READY(display, port)) & DEVICE_READY)) continue; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { u32 tmp = intel_de_read(display, MIPI_CTRL(display, port)); tmp &= BXT_PIPE_SELECT_MASK; tmp >>= BXT_PIPE_SELECT_SHIFT; @@ -1177,15 +1174,15 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder, static void intel_dsi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u32 pclk; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI); - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { bxt_dsi_get_pipe_config(encoder, pipe_config); pclk = bxt_dsi_get_pclk(encoder, pipe_config); } else { @@ -1218,7 +1215,6 @@ static void set_dsi_timings(struct intel_encoder *encoder, const struct drm_display_mode *adjusted_mode) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); @@ -1253,7 +1249,7 @@ static void set_dsi_timings(struct intel_encoder *encoder, hbp = txbyteclkhs(hbp, bpp, lane_count, intel_dsi->burst_mode_ratio); for_each_dsi_port(port, intel_dsi->ports) { - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { /* * Program hdisplay and vdisplay on MIPI transcoder. * This is different from calculated hactive and @@ -1307,7 +1303,6 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; @@ -1327,7 +1322,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, } for_each_dsi_port(port, intel_dsi->ports) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + if (display->platform.valleyview || display->platform.cherryview) { /* * escape clock divider, 20MHz, shared for A and C. * device ready must be off when doing this! txclkesc? @@ -1342,7 +1337,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, tmp &= ~READ_REQUEST_PRIORITY_MASK; intel_de_write(display, MIPI_CTRL(display, port), tmp | READ_REQUEST_PRIORITY_HIGH); - } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + } else if (display->platform.geminilake || display->platform.broxton) { enum pipe pipe = crtc->pipe; intel_de_rmw(display, MIPI_CTRL(display, port), @@ -1377,7 +1372,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, if (intel_dsi->clock_stop) tmp |= CLOCKSTOP; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) { + if (display->platform.geminilake || display->platform.broxton) { tmp |= BXT_DPHY_DEFEATURE_EN; if (!is_cmd_mode(intel_dsi)) tmp |= BXT_DEFEATURE_DPI_FIFO_CTR; @@ -1424,7 +1419,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, intel_de_write(display, MIPI_INIT_COUNT(display, port), txclkesc(intel_dsi->escape_clk_div, 100)); - if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) && + if ((display->platform.geminilake || display->platform.broxton) && !intel_dsi->dual_link) { /* * BXT spec says write MIPI_INIT_COUNT for @@ -1461,7 +1456,7 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, intel_de_write(display, MIPI_LP_BYTECLK(display, port), intel_dsi->lp_byte_clk); - if (IS_GEMINILAKE(dev_priv)) { + if (display->platform.geminilake) { intel_de_write(display, MIPI_TLPX_TIME_COUNT(display, port), intel_dsi->lp_byte_clk); /* Shadow of DPHY reg */ @@ -1513,18 +1508,17 @@ static void intel_dsi_prepare(struct intel_encoder *encoder, static void intel_dsi_unprepare(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) return; for_each_dsi_port(port, intel_dsi->ports) { /* Panel commands can be sent when clock is in LP11 */ intel_de_write(display, MIPI_DEVICE_READY(display, port), 0x0); - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) bxt_dsi_reset_clocks(encoder, port); else vlv_dsi_reset_clocks(encoder, port); @@ -1596,8 +1590,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector) static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) { - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); struct intel_connector *connector = intel_dsi->attached_connector; + struct intel_display *display = to_intel_display(connector); struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns, extra_byte_count, tlpx_ui; u32 ui_num, ui_den; @@ -1645,7 +1639,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) * For GEMINILAKE dphy_param_reg will be programmed in terms of * HS byte clock count for other platform in HS ddr clock count */ - mul = IS_GEMINILAKE(dev_priv) ? 8 : 2; + mul = display->platform.geminilake ? 8 : 2; ths_prepare_ns = max(mipi_config->ths_prepare, mipi_config->tclk_prepare); @@ -1653,7 +1647,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul); if (prepare_cnt > PREPARE_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n", + drm_dbg_kms(display->drm, "prepare count too high %u\n", prepare_cnt); prepare_cnt = PREPARE_CNT_MAX; } @@ -1674,7 +1668,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) exit_zero_cnt += 1; if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n", + drm_dbg_kms(display->drm, "exit zero count too high %u\n", exit_zero_cnt); exit_zero_cnt = EXIT_ZERO_CNT_MAX; } @@ -1685,7 +1679,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) * ui_den, ui_num * mul); if (clk_zero_cnt > CLK_ZERO_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n", + drm_dbg_kms(display->drm, "clock zero count too high %u\n", clk_zero_cnt); clk_zero_cnt = CLK_ZERO_CNT_MAX; } @@ -1695,7 +1689,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul); if (trail_cnt > TRAIL_CNT_MAX) { - drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n", + drm_dbg_kms(display->drm, "trail count too high %u\n", trail_cnt); trail_cnt = TRAIL_CNT_MAX; } @@ -1761,7 +1755,7 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); + struct intel_display *display = to_intel_display(crtc_state); if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) return 0; @@ -1770,7 +1764,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) * On Valleyview some DSI panels lose (v|h)sync when the clock is lower * than 320000KHz. */ - if (IS_VALLEYVIEW(dev_priv)) + if (display->platform.valleyview) return 320000; /* @@ -1778,7 +1772,7 @@ int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) * picture gets unstable, despite that values are * correct for DSI PLL and DE PLL. */ - if (IS_GEMINILAKE(dev_priv)) + if (display->platform.geminilake) return 158400; return 0; @@ -1903,9 +1897,8 @@ static const struct dmi_system_id vlv_dsi_dmi_quirk_table[] = { { } }; -void vlv_dsi_init(struct drm_i915_private *dev_priv) +void vlv_dsi_init(struct intel_display *display) { - struct intel_display *display = &dev_priv->display; struct intel_dsi *intel_dsi; struct intel_encoder *encoder; struct intel_connector *connector; @@ -1914,16 +1907,16 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) enum port port; enum pipe pipe; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); /* There is no detection method for MIPI so rely on VBT */ if (!intel_bios_is_dsi_present(display, &port)) return; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) - dev_priv->display.dsi.mmio_base = BXT_MIPI_BASE; + if (display->platform.geminilake || display->platform.broxton) + display->dsi.mmio_base = BXT_MIPI_BASE; else - dev_priv->display.dsi.mmio_base = VLV_MIPI_BASE; + display->dsi.mmio_base = VLV_MIPI_BASE; intel_dsi = kzalloc(sizeof(*intel_dsi), GFP_KERNEL); if (!intel_dsi) @@ -1938,12 +1931,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) encoder = &intel_dsi->base; intel_dsi->attached_connector = connector; - drm_encoder_init(&dev_priv->drm, &encoder->base, &intel_dsi_funcs, + drm_encoder_init(display->drm, &encoder->base, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, "DSI %c", port_name(port)); encoder->compute_config = intel_dsi_compute_config; encoder->pre_enable = intel_dsi_pre_enable; - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) encoder->enable = bxt_dsi_enable; encoder->disable = intel_dsi_disable; encoder->post_disable = intel_dsi_post_disable; @@ -1963,7 +1956,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI * port C. BXT isn't limited like this. */ - if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) + if (display->platform.geminilake || display->platform.broxton) encoder->pipe_mask = ~0; else if (port == PORT_A) encoder->pipe_mask = BIT(PIPE_A); @@ -1979,10 +1972,10 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) else intel_dsi->ports = BIT(port); - if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.bl_ports & ~intel_dsi->ports)) connector->panel.vbt.dsi.bl_ports &= intel_dsi->ports; - if (drm_WARN_ON(&dev_priv->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) + if (drm_WARN_ON(display->drm, connector->panel.vbt.dsi.cabc_ports & ~intel_dsi->ports)) connector->panel.vbt.dsi.cabc_ports &= intel_dsi->ports; /* Create a DSI host (and a device) for each port. */ @@ -1998,18 +1991,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) } if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) { - drm_dbg_kms(&dev_priv->drm, "no device found\n"); + drm_dbg_kms(display->drm, "no device found\n"); goto err; } /* Use clock read-back from current hw-state for fastboot */ current_mode = intel_encoder_current_mode(encoder); if (current_mode) { - drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n", + drm_dbg_kms(display->drm, "Calculated pclk %d GOP %d\n", intel_dsi->pclk, current_mode->clock); if (intel_fuzzy_clock_check(intel_dsi->pclk, current_mode->clock)) { - drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n"); + drm_dbg_kms(display->drm, "Using GOP pclk\n"); intel_dsi->pclk = current_mode->clock; } @@ -2021,7 +2014,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_dsi_vbt_gpio_init(intel_dsi, intel_dsi_get_hw_state(encoder, &pipe)); - drm_connector_init(&dev_priv->drm, &connector->base, &intel_dsi_connector_funcs, + drm_connector_init(display->drm, &connector->base, &intel_dsi_connector_funcs, DRM_MODE_CONNECTOR_DSI); drm_connector_helper_add(&connector->base, &intel_dsi_connector_helper_funcs); @@ -2030,12 +2023,12 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) intel_connector_attach_encoder(connector, encoder); - mutex_lock(&dev_priv->drm.mode_config.mutex); + mutex_lock(&display->drm->mode_config.mutex); intel_panel_add_vbt_lfp_fixed_mode(connector); - mutex_unlock(&dev_priv->drm.mode_config.mutex); + mutex_unlock(&display->drm->mode_config.mutex); if (!intel_panel_preferred_fixed_mode(connector)) { - drm_dbg_kms(&dev_priv->drm, "no fixed mode\n"); + drm_dbg_kms(display->drm, "no fixed mode\n"); goto err_cleanup_connector; } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.h b/drivers/gpu/drm/i915/display/vlv_dsi.h index 277bacfbc551..ff349b5876c2 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi.h @@ -7,14 +7,14 @@ #define __VLV_DSI_H__ enum port; -struct drm_i915_private; struct intel_crtc_state; +struct intel_display; struct intel_dsi; #ifdef I915 void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port); int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state); -void vlv_dsi_init(struct drm_i915_private *dev_priv); +void vlv_dsi_init(struct intel_display *display); #else static inline void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port) { @@ -23,7 +23,7 @@ static inline int vlv_dsi_min_cdclk(const struct intel_crtc_state *crtc_state) { return 0; } -static inline void vlv_dsi_init(struct drm_i915_private *dev_priv) +static inline void vlv_dsi_init(struct intel_display *display) { } #endif From 767efb276ea80af8a71a8be9cfd8e668d8720a98 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Mar 2025 12:52:46 +0200 Subject: [PATCH 0214/1627] drm/i915/dsi: convert vlv_dsi_pll.[ch] to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert as much as possible of vlv_dsi_pll.[ch] to struct intel_display. Reviewed-by: Ville Syrjälä Link: https://lore.kernel.org/r/9d34d8b91c6bc8b2dd8e2081194ee496b251bbf3.1742554320.git.jani.nikula@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 3 +- drivers/gpu/drm/i915/display/vlv_dsi.c | 3 +- drivers/gpu/drm/i915/display/vlv_dsi_pll.c | 118 +++++++++---------- drivers/gpu/drm/i915/display/vlv_dsi_pll.h | 5 +- 4 files changed, 63 insertions(+), 66 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d7243848fb23..222bc01d4682 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3834,7 +3834,6 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, struct intel_display_power_domain_set *power_domain_set) { struct intel_display *display = to_intel_display(crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder; enum port port; u32 tmp; @@ -3856,7 +3855,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, * registers/MIPI[BXT]. We can break out here early, since we * need the same DSI PLL to be enabled for both DSI ports. */ - if (!bxt_dsi_pll_is_enabled(dev_priv)) + if (!bxt_dsi_pll_is_enabled(display)) break; /* XXX: this works for video mode only */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 6ddf33de85d3..346737f15fa9 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -936,7 +936,6 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); intel_wakeref_t wakeref; enum port port; @@ -955,7 +954,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, * machine. See BSpec North Display Engine registers/MIPI[BXT]. */ if ((display->platform.geminilake || display->platform.broxton) && - !bxt_dsi_pll_is_enabled(dev_priv)) + !bxt_dsi_pll_is_enabled(display)) goto out_put_power; /* XXX: this only works for one DSI output */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index 2ed47e7d1051..7ce924a5ef90 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -57,7 +57,7 @@ static u32 dsi_clk_from_pclk(u32 pclk, enum mipi_dsi_pixel_format fmt, return dsi_clk_khz; } -static int dsi_calc_mnp(struct drm_i915_private *dev_priv, +static int dsi_calc_mnp(struct intel_display *display, struct intel_crtc_state *config, int target_dsi_clk) { @@ -68,11 +68,11 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, /* target_dsi_clk is expected in kHz */ if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { - drm_err(&dev_priv->drm, "DSI CLK Out of Range\n"); + drm_err(display->drm, "DSI CLK Out of Range\n"); return -ECHRNG; } - if (IS_CHERRYVIEW(dev_priv)) { + if (display->platform.cherryview) { ref_clk = 100000; n = 4; m_min = 70; @@ -116,13 +116,13 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv, static int vlv_dsi_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); u32 dsi_clock; u32 pll_ctl, pll_div; u32 m = 0, p = 0, n; - int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000; + int refclk = display->platform.cherryview ? 100000 : 25000; int i; pll_ctl = config->dsi_pll.ctrl; @@ -147,7 +147,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder, p--; if (!p) { - drm_err(&dev_priv->drm, "wrong P1 divisor\n"); + drm_err(display->drm, "wrong P1 divisor\n"); return 0; } @@ -157,7 +157,7 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder, } if (i == ARRAY_SIZE(lfsr_converts)) { - drm_err(&dev_priv->drm, "wrong m_seed programmed\n"); + drm_err(display->drm, "wrong m_seed programmed\n"); return 0; } @@ -175,16 +175,16 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder, int vlv_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); int pclk, dsi_clk, ret; dsi_clk = dsi_clk_from_pclk(intel_dsi->pclk, intel_dsi->pixel_format, intel_dsi->lane_count); - ret = dsi_calc_mnp(dev_priv, config, dsi_clk); + ret = dsi_calc_mnp(display, config, dsi_clk); if (ret) { - drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n"); + drm_dbg_kms(display->drm, "dsi_calc_mnp failed\n"); return ret; } @@ -196,7 +196,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, config->dsi_pll.ctrl |= DSI_PLL_VCO_EN; - drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n", + drm_dbg_kms(display->drm, "dsi pll div %08x, ctrl %08x\n", config->dsi_pll.div, config->dsi_pll.ctrl); pclk = vlv_dsi_pclk(encoder, config); @@ -213,9 +213,10 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder, void vlv_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); vlv_cck_get(dev_priv); @@ -235,20 +236,21 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder, DSI_PLL_LOCK, 20)) { vlv_cck_put(dev_priv); - drm_err(&dev_priv->drm, "DSI PLL lock failed\n"); + drm_err(display->drm, "DSI PLL lock failed\n"); return; } vlv_cck_put(dev_priv); - drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); + drm_dbg_kms(display->drm, "DSI PLL locked\n"); } void vlv_dsi_pll_disable(struct intel_encoder *encoder) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); vlv_cck_get(dev_priv); @@ -260,14 +262,14 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder) vlv_cck_put(dev_priv); } -bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +bool bxt_dsi_pll_is_enabled(struct intel_display *display) { bool enabled; u32 val; u32 mask; mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED; - val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE); + val = intel_de_read(display, BXT_DSI_PLL_ENABLE); enabled = (val & mask) == mask; if (!enabled) @@ -281,17 +283,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) * times, and since accessing DSI registers with invalid dividers * causes a system hang. */ - val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); - if (IS_GEMINILAKE(dev_priv)) { + val = intel_de_read(display, BXT_DSI_PLL_CTL); + if (display->platform.geminilake) { if (!(val & BXT_DSIA_16X_MASK)) { - drm_dbg(&dev_priv->drm, - "Invalid PLL divider (%08x)\n", val); + drm_dbg_kms(display->drm, + "Invalid PLL divider (%08x)\n", val); enabled = false; } } else { if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) { - drm_dbg(&dev_priv->drm, - "Invalid PLL divider (%08x)\n", val); + drm_dbg_kms(display->drm, + "Invalid PLL divider (%08x)\n", val); enabled = false; } } @@ -301,29 +303,30 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) void bxt_dsi_pll_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); - intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); + intel_de_rmw(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_DO_ENABLE, 0); /* * PLL lock should deassert within 200us. * Wait up to 1ms before timing out. */ - if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE, + if (intel_de_wait_for_clear(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timeout waiting for PLL lock deassertion\n"); } u32 vlv_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { + struct intel_display *display = to_intel_display(encoder); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 pll_ctl, pll_div; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); vlv_cck_get(dev_priv); pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL); @@ -352,14 +355,14 @@ static int bxt_dsi_pclk(struct intel_encoder *encoder, u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); u32 pclk; - config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL); + config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL); pclk = bxt_dsi_pclk(encoder, config); - drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk); + drm_dbg_kms(display->drm, "Calculated pclk=%u\n", pclk); return pclk; } @@ -375,10 +378,9 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT); } -static void glk_dsi_program_esc_clock(struct drm_device *dev, - const struct intel_crtc_state *config) +static void glk_dsi_program_esc_clock(struct intel_display *display, + const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 dsi_rate = 0; u32 pll_ratio = 0; u32 ddr_clk = 0; @@ -415,17 +417,16 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev, txesc2_div = min_t(u32, div2_value, 10); - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, + intel_de_write(display, MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK); - intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, + intel_de_write(display, MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK); } /* Program BXT Mipi clocks and dividers */ -static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, +static void bxt_dsi_program_clocks(struct intel_display *display, enum port port, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(dev); u32 tmp; u32 dsi_rate = 0; u32 pll_ratio = 0; @@ -436,7 +437,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, u32 mipi_8by3_divider; /* Clear old configurations */ - tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL); + tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port)); @@ -472,13 +473,13 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower); tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper); - intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp); + intel_de_write(display, BXT_MIPI_CLOCK_CTL, tmp); } int bxt_dsi_pll_compute(struct intel_encoder *encoder, struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); u8 dsi_ratio, dsi_ratio_min, dsi_ratio_max; u32 dsi_clk; @@ -494,7 +495,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, */ dsi_ratio = DIV_ROUND_UP(dsi_clk * 2, BXT_REF_CLOCK_KHZ); - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { dsi_ratio_min = BXT_DSI_PLL_RATIO_MIN; dsi_ratio_max = BXT_DSI_PLL_RATIO_MAX; } else { @@ -503,11 +504,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, } if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Can't get a suitable ratio from DSI PLL ratios\n"); return -ECHRNG; } else - drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n"); + drm_dbg_kms(display->drm, "DSI PLL calculation is Done!!\n"); /* * Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x @@ -519,7 +520,7 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, /* As per recommendation from hardware team, * Prog PVD ratio =1 if dsi ratio <= 50 */ - if (IS_BROXTON(dev_priv) && dsi_ratio <= 50) + if (display->platform.broxton && dsi_ratio <= 50) config->dsi_pll.ctrl |= BXT_DSI_PLL_PVD_RATIO_1; pclk = bxt_dsi_pclk(encoder, config); @@ -536,46 +537,45 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, void bxt_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); /* Configure PLL vales */ - intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); - intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL); + intel_de_write(display, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl); + intel_de_posting_read(display, BXT_DSI_PLL_CTL); /* Program TX, RX, Dphy clocks */ - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { for_each_dsi_port(port, intel_dsi->ports) - bxt_dsi_program_clocks(encoder->base.dev, port, config); + bxt_dsi_program_clocks(display, port, config); } else { - glk_dsi_program_esc_clock(encoder->base.dev, config); + glk_dsi_program_esc_clock(display, config); } /* Enable DSI PLL */ - intel_de_rmw(dev_priv, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); + intel_de_rmw(display, BXT_DSI_PLL_ENABLE, 0, BXT_DSI_PLL_DO_ENABLE); /* Timeout and fail if PLL not locked */ - if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE, + if (intel_de_wait_for_set(display, BXT_DSI_PLL_ENABLE, BXT_DSI_PLL_LOCKED, 1)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Timed out waiting for DSI PLL to lock\n"); return; } - drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n"); + drm_dbg_kms(display->drm, "DSI PLL locked\n"); } void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); u32 tmp; /* Clear old configurations */ - if (IS_BROXTON(dev_priv)) { + if (display->platform.broxton) { tmp = intel_de_read(display, BXT_MIPI_CLOCK_CTL); tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port)); diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h index f975660fa609..f26e31a7dd69 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.h +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.h @@ -9,7 +9,6 @@ #include enum port; -struct drm_i915_private; struct intel_crtc_state; struct intel_display; struct intel_encoder; @@ -33,11 +32,11 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port); #ifdef I915 -bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv); +bool bxt_dsi_pll_is_enabled(struct intel_display *display); void assert_dsi_pll_enabled(struct intel_display *display); void assert_dsi_pll_disabled(struct intel_display *display); #else -static inline bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv) +static inline bool bxt_dsi_pll_is_enabled(struct intel_display *display) { return false; } From 70c716349a5c29ba1ada1f6945a68e93c4ab6afc Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Mar 2025 12:52:47 +0200 Subject: [PATCH 0215/1627] drm/i915/dsi: convert parameter printing to drm_printer MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The DSI VBT initialization debug logs a lot of parameters. Convert this to use struct drm_printer with a prefix. Reviewed-by: Ville Syrjälä Link: https://lore.kernel.org/r/50ff85e66c058a12b2fe0d0cba6a542f7cfa71cf.1742554320.git.jani.nikula@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 78 +++++++++----------- 1 file changed, 34 insertions(+), 44 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 7b2ffd14ae6e..802f210bfafe 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -695,54 +695,44 @@ void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi, void intel_dsi_log_params(struct intel_dsi *intel_dsi) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); + struct drm_printer p = drm_dbg_printer(display->drm, DRM_UT_KMS, + "DSI parameters:"); - drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk); - drm_dbg_kms(&i915->drm, "Pixel overlap %d\n", - intel_dsi->pixel_overlap); - drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count); - drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg); - drm_dbg_kms(&i915->drm, "Video mode format %s\n", - intel_dsi->video_mode == NON_BURST_SYNC_PULSE ? - "non-burst with sync pulse" : - intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ? - "non-burst with sync events" : - intel_dsi->video_mode == BURST_MODE ? - "burst" : ""); - drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n", - intel_dsi->burst_mode_ratio); - drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val); - drm_dbg_kms(&i915->drm, "Eot %s\n", - str_enabled_disabled(intel_dsi->eotp_pkt)); - drm_dbg_kms(&i915->drm, "Clockstop %s\n", - str_enabled_disabled(!intel_dsi->clock_stop)); - drm_dbg_kms(&i915->drm, "Mode %s\n", - intel_dsi->operation_mode ? "command" : "video"); + drm_printf(&p, "Pclk %d\n", intel_dsi->pclk); + drm_printf(&p, "Pixel overlap %d\n", intel_dsi->pixel_overlap); + drm_printf(&p, "Lane count %d\n", intel_dsi->lane_count); + drm_printf(&p, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg); + drm_printf(&p, "Video mode format %s\n", + intel_dsi->video_mode == NON_BURST_SYNC_PULSE ? + "non-burst with sync pulse" : + intel_dsi->video_mode == NON_BURST_SYNC_EVENTS ? + "non-burst with sync events" : + intel_dsi->video_mode == BURST_MODE ? + "burst" : ""); + drm_printf(&p, "Burst mode ratio %d\n", intel_dsi->burst_mode_ratio); + drm_printf(&p, "Reset timer %d\n", intel_dsi->rst_timer_val); + drm_printf(&p, "Eot %s\n", str_enabled_disabled(intel_dsi->eotp_pkt)); + drm_printf(&p, "Clockstop %s\n", str_enabled_disabled(!intel_dsi->clock_stop)); + drm_printf(&p, "Mode %s\n", intel_dsi->operation_mode ? "command" : "video"); if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) - drm_dbg_kms(&i915->drm, - "Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); + drm_printf(&p, "Dual link: DSI_DUAL_LINK_FRONT_BACK\n"); else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT) - drm_dbg_kms(&i915->drm, - "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); + drm_printf(&p, "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n"); else - drm_dbg_kms(&i915->drm, "Dual link: NONE\n"); - drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format); - drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div); - drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n", - intel_dsi->lp_rx_timeout); - drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n", - intel_dsi->turn_arnd_val); - drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count); - drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n", - intel_dsi->hs_to_lp_count); - drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk); - drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer); - drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n", - intel_dsi->clk_lp_to_hs_count); - drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n", - intel_dsi->clk_hs_to_lp_count); - drm_dbg_kms(&i915->drm, "BTA %s\n", - str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); + drm_printf(&p, "Dual link: NONE\n"); + drm_printf(&p, "Pixel Format %d\n", intel_dsi->pixel_format); + drm_printf(&p, "TLPX %d\n", intel_dsi->escape_clk_div); + drm_printf(&p, "LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout); + drm_printf(&p, "Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val); + drm_printf(&p, "Init Count 0x%x\n", intel_dsi->init_count); + drm_printf(&p, "HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count); + drm_printf(&p, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk); + drm_printf(&p, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer); + drm_printf(&p, "LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count); + drm_printf(&p, "HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count); + drm_printf(&p, "BTA %s\n", + str_enabled_disabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA))); } static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format) From bf5169db50c0edbdbc4528cb52c4741304219f3d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Mar 2025 12:52:48 +0200 Subject: [PATCH 0216/1627] drm/i915/dsi: convert intel_dsi_vbt.[ch] to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert as much as possible of intel_dsi_vbt.[ch] to struct intel_display. Reviewed-by: Ville Syrjälä Link: https://lore.kernel.org/r/d2a327c7121263cd67986a2d9199e18d7bf03acd.1742554320.git.jani.nikula@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 131 +++++++++---------- 1 file changed, 63 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 802f210bfafe..4e92504f5c14 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -102,13 +102,13 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi, static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct mipi_dsi_device *dsi_device; u8 type, flags, seq_port; u16 len; enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); flags = *data++; type = *data++; @@ -120,12 +120,12 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, port = intel_dsi_seq_port_to_port(intel_dsi, seq_port); - if (drm_WARN_ON(&dev_priv->drm, !intel_dsi->dsi_hosts[port])) + if (drm_WARN_ON(display->drm, !intel_dsi->dsi_hosts[port])) goto out; dsi_device = intel_dsi->dsi_hosts[port]->device; if (!dsi_device) { - drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n", + drm_dbg_kms(display->drm, "no dsi device for port %c\n", port_name(port)); goto out; } @@ -150,8 +150,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: - drm_dbg(&dev_priv->drm, - "Generic Read not yet implemented or used\n"); + drm_dbg_kms(display->drm, "Generic Read not yet implemented or used\n"); break; case MIPI_DSI_GENERIC_LONG_WRITE: mipi_dsi_generic_write(dsi_device, data, len); @@ -163,15 +162,14 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi, mipi_dsi_dcs_write_buffer(dsi_device, data, 2); break; case MIPI_DSI_DCS_READ: - drm_dbg(&dev_priv->drm, - "DCS Read not yet implemented or used\n"); + drm_dbg_kms(display->drm, "DCS Read not yet implemented or used\n"); break; case MIPI_DSI_DCS_LONG_WRITE: mipi_dsi_dcs_write_buffer(dsi_device, data, len); break; } - if (DISPLAY_VER(dev_priv) < 11) + if (DISPLAY_VER(display) < 11) vlv_dsi_wait_for_fifo_empty(intel_dsi, port); out: @@ -182,10 +180,10 @@ out: static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); u32 delay = *((const u32 *) data); - drm_dbg_kms(&i915->drm, "%d usecs\n", delay); + drm_dbg_kms(display->drm, "%d usecs\n", delay); usleep_range(delay, delay + 10); data += 4; @@ -196,7 +194,7 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data) static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index, const char *con_id, u8 idx, bool value) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); /* XXX: this table is a quick ugly hack. */ static struct gpio_desc *soc_gpio_table[U8_MAX + 1]; struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index]; @@ -204,10 +202,10 @@ static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index, if (gpio_desc) { gpiod_set_value(gpio_desc, value); } else { - gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx, + gpio_desc = devm_gpiod_get_index(display->drm->dev, con_id, idx, value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW); if (IS_ERR(gpio_desc)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "GPIO index %u request failed (%pe)\n", gpio_index, gpio_desc); return; @@ -242,16 +240,16 @@ static void soc_opaque_gpio_set_value(struct intel_connector *connector, static void vlv_gpio_set_value(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */ if (connector->panel.vbt.dsi.seq_version < 3) { if (gpio_source == 1) { - drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n"); + drm_dbg_kms(display->drm, "SC gpio not supported\n"); return; } if (gpio_source > 1) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "unknown gpio source %u\n", gpio_source); return; } @@ -264,7 +262,7 @@ static void vlv_gpio_set_value(struct intel_connector *connector, static void chv_gpio_set_value(struct intel_connector *connector, u8 gpio_source, u8 gpio_index, bool value) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); if (connector->panel.vbt.dsi.seq_version >= 3) { if (gpio_index >= CHV_GPIO_IDX_START_SE) { @@ -284,13 +282,13 @@ static void chv_gpio_set_value(struct intel_connector *connector, } else { /* XXX: The spec is unclear about CHV GPIO on seq v2 */ if (gpio_source != 0) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "unknown gpio source %u\n", gpio_source); return; } if (gpio_index >= CHV_GPIO_IDX_START_E) { - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "invalid gpio index %u for GPIO N\n", gpio_index); return; @@ -320,13 +318,13 @@ enum { MIPI_VIO_EN_2, }; -static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, +static void icl_native_gpio_set_value(struct intel_display *display, int gpio, bool value) { - struct intel_display *display = &dev_priv->display; + struct drm_i915_private *dev_priv = to_i915(display->drm); int index; - if (drm_WARN_ON(&dev_priv->drm, DISPLAY_VER(dev_priv) == 11 && gpio >= MIPI_RESET_2)) + if (drm_WARN_ON(display->drm, DISPLAY_VER(display) == 11 && gpio >= MIPI_RESET_2)) return; switch (gpio) { @@ -344,7 +342,7 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, * modifications in irq setup and handling. */ spin_lock_irq(&dev_priv->irq_lock); - intel_de_rmw(dev_priv, SHOTPLUG_CTL_DDI, + intel_de_rmw(display, SHOTPLUG_CTL_DDI, SHOTPLUG_CTL_DDI_HPD_ENABLE(index) | SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index), value ? SHOTPLUG_CTL_DDI_HPD_OUTPUT_DATA(index) : 0); @@ -354,14 +352,14 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, case MIPI_AVDD_EN_2: index = gpio == MIPI_AVDD_EN_1 ? 0 : 1; - intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), PANEL_POWER_ON, + intel_de_rmw(display, PP_CONTROL(display, index), PANEL_POWER_ON, value ? PANEL_POWER_ON : 0); break; case MIPI_BKLT_EN_1: case MIPI_BKLT_EN_2: index = gpio == MIPI_BKLT_EN_1 ? 0 : 1; - intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, index), EDP_BLC_ENABLE, + intel_de_rmw(display, PP_CONTROL(display, index), EDP_BLC_ENABLE, value ? EDP_BLC_ENABLE : 0); break; case MIPI_AVEE_EN_1: @@ -389,13 +387,12 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv, static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *i915 = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; u8 gpio_source = 0, gpio_index = 0, gpio_number; bool value; int size; - bool native = DISPLAY_VER(i915) >= 11; + bool native = DISPLAY_VER(display) >= 11; if (connector->panel.vbt.dsi.seq_version >= 3) { size = 3; @@ -416,16 +413,16 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) gpio_source = (data[1] >> 1) & 3; } - drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", + drm_dbg_kms(display->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n", gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value)); if (native) - icl_native_gpio_set_value(i915, gpio_number, value); - else if (DISPLAY_VER(i915) >= 9) + icl_native_gpio_set_value(display, gpio_number, value); + else if (DISPLAY_VER(display) >= 9) bxt_gpio_set_value(connector, gpio_index, value); - else if (IS_VALLEYVIEW(i915)) + else if (display->platform.valleyview) vlv_gpio_set_value(connector, gpio_source, gpio_number, value); - else if (IS_CHERRYVIEW(i915)) + else if (display->platform.cherryview) chv_gpio_set_value(connector, gpio_source, gpio_number, value); return data + size; @@ -463,8 +460,8 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data) static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, const u16 target_addr) { - struct drm_device *drm_dev = intel_dsi->base.base.dev; - struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); + struct acpi_device *adev = ACPI_COMPANION(display->drm->dev); struct i2c_adapter_lookup lookup = { .target_addr = target_addr, .intel_dsi = intel_dsi, @@ -484,7 +481,7 @@ static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct i2c_adapter *adapter; struct i2c_msg msg; int ret; @@ -494,7 +491,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) u8 payload_size = *(data + 6); u8 *payload_data; - drm_dbg_kms(&i915->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n", + drm_dbg_kms(display->drm, "bus %d target-addr 0x%02x reg 0x%02x data %*ph\n", vbt_i2c_bus_num, target_addr, reg_offset, payload_size, data + 7); if (intel_dsi->i2c_bus_num < 0) { @@ -504,7 +501,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) adapter = i2c_get_adapter(intel_dsi->i2c_bus_num); if (!adapter) { - drm_err(&i915->drm, "Cannot find a valid i2c bus for xfer\n"); + drm_err(display->drm, "Cannot find a valid i2c bus for xfer\n"); goto err_bus; } @@ -522,7 +519,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data) ret = i2c_transfer(adapter, &msg, 1); if (ret < 0) - drm_err(&i915->drm, + drm_err(display->drm, "Failed to xfer payload of size (%u) to reg (%u)\n", payload_size, reg_offset); @@ -535,16 +532,16 @@ err_bus: static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); - drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n"); + drm_dbg_kms(display->drm, "Skipping SPI element execution\n"); return data + *(data + 5) + 6; } static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) { - struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); #ifdef CONFIG_PMIC_OPREGION u32 value, mask, reg_address; u16 i2c_address; @@ -560,9 +557,9 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data) reg_address, value, mask); if (ret) - drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret); + drm_err(display->drm, "%s failed, error: %d\n", __func__, ret); #else - drm_err(&i915->drm, + drm_err(display->drm, "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n"); #endif @@ -612,12 +609,12 @@ static const char *sequence_name(enum mipi_seq seq_id) static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, enum mipi_seq seq_id) { - struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; const u8 *data; fn_mipi_elem_exec mipi_elem_exec; - if (drm_WARN_ON(&dev_priv->drm, + if (drm_WARN_ON(display->drm, seq_id >= ARRAY_SIZE(connector->panel.vbt.dsi.sequence))) return; @@ -625,9 +622,9 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, if (!data) return; - drm_WARN_ON(&dev_priv->drm, *data != seq_id); + drm_WARN_ON(display->drm, *data != seq_id); - drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n", + drm_dbg_kms(display->drm, "Starting MIPI sequence %d - %s\n", seq_id, sequence_name(seq_id)); /* Skip Sequence Byte. */ @@ -657,19 +654,19 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi, /* Consistency check if we have size. */ if (operation_size && data != next) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Inconsistent operation size\n"); return; } } else if (operation_size) { /* We have size, skip. */ - drm_dbg_kms(&dev_priv->drm, + drm_dbg_kms(display->drm, "Unsupported MIPI operation byte %u\n", operation_byte); data += operation_size; } else { /* No size, can't skip without parsing. */ - drm_err(&dev_priv->drm, + drm_err(display->drm, "Unsupported MIPI operation byte %u\n", operation_byte); return; @@ -754,8 +751,7 @@ static enum mipi_dsi_pixel_format vbt_to_dsi_pixel_format(unsigned int format) bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; struct mipi_pps_data *pps = connector->panel.vbt.dsi.pps; @@ -763,7 +759,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) u16 burst_mode_ratio; enum port port; - drm_dbg_kms(&dev_priv->drm, "\n"); + drm_dbg_kms(display->drm, "\n"); intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1; intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0; @@ -809,7 +805,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) u32 bitrate; if (mipi_config->target_burst_mode_freq == 0) { - drm_err(&dev_priv->drm, "Burst mode target is not set\n"); + drm_err(display->drm, "Burst mode target is not set\n"); return false; } @@ -826,7 +822,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) mipi_config->target_burst_mode_freq = bitrate; if (mipi_config->target_burst_mode_freq < bitrate) { - drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n"); + drm_err(display->drm, "Burst mode freq is less than computed\n"); return false; } @@ -890,8 +886,7 @@ static const struct pinctrl_map soc_pwm_pinctrl_map[] = { void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) { - struct drm_device *dev = intel_dsi->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; @@ -901,13 +896,13 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) struct pinctrl *pinctrl; int ret; - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && + if ((display->platform.valleyview || display->platform.cherryview) && mipi_config->pwm_blc == PPS_BLC_PMIC) { gpiod_lookup_table = &pmic_panel_gpio_table; want_panel_gpio = true; } - if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) { + if (display->platform.valleyview && mipi_config->pwm_blc == PPS_BLC_SOC) { gpiod_lookup_table = &soc_panel_gpio_table; want_panel_gpio = true; want_backlight_gpio = true; @@ -916,12 +911,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) ret = pinctrl_register_mappings(soc_pwm_pinctrl_map, ARRAY_SIZE(soc_pwm_pinctrl_map)); if (ret) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to register pwm0 pinmux mapping\n"); - pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0"); + pinctrl = devm_pinctrl_get_select(display->drm->dev, "soc_pwm0"); if (IS_ERR(pinctrl)) - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to set pinmux to PWM\n"); } @@ -929,9 +924,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) gpiod_add_lookup_table(gpiod_lookup_table); if (want_panel_gpio) { - intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags); + intel_dsi->gpio_panel = devm_gpiod_get(display->drm->dev, "panel", flags); if (IS_ERR(intel_dsi->gpio_panel)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to own gpio for panel control\n"); intel_dsi->gpio_panel = NULL; } @@ -939,9 +934,9 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on) if (want_backlight_gpio) { intel_dsi->gpio_backlight = - devm_gpiod_get(dev->dev, "backlight", flags); + devm_gpiod_get(display->drm->dev, "backlight", flags); if (IS_ERR(intel_dsi->gpio_backlight)) { - drm_err(&dev_priv->drm, + drm_err(display->drm, "Failed to own gpio for backlight control\n"); intel_dsi->gpio_backlight = NULL; } From 1767a751509451910df53a4f4f51d5865ebec7eb Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Mar 2025 12:52:49 +0200 Subject: [PATCH 0217/1627] drm/i915/dsi: convert intel_dsi_dcs_backlight.c to struct intel_display MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Going forward, struct intel_display is the main display device data pointer. Convert intel_dsi_dcs_backlight.c to struct intel_display. Reviewed-by: Ville Syrjälä Link: https://lore.kernel.org/r/19ed78f51ac153016fbe60c49037bef840a9cc1b.1742554320.git.jani.nikula@intel.com Signed-off-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index 049443245310..b3c453bf7d5c 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -24,9 +24,10 @@ */ #include +#include #include