From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
To: amd-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: kernel-dev@igalia.com, intel-xe@lists.freedesktop.org,
Danilo Krummrich <dakr@kernel.org>,
Philipp Stanner <phasta@kernel.org>,
Tvrtko Ursulin <tvrtko.ursulin@igalia.com>,
Christian König <christian.koenig@amd.com>,
Matthew Brost <matthew.brost@intel.com>
Subject: [PATCH v7 14/29] drm/sched: Embed run queue singleton into the scheduler
Date: Fri, 6 Mar 2026 16:34:30 +0000 [thread overview]
Message-ID: <20260306163445.97243-15-tvrtko.ursulin@igalia.com> (raw)
In-Reply-To: <20260306163445.97243-1-tvrtko.ursulin@igalia.com>
Now that the run queue to scheduler relationship is always 1:1 we can
embed it (the run queue) directly in the scheduler struct and save on
some allocation error handling code and such.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
Cc: Christian König <christian.koenig@amd.com>
Cc: Danilo Krummrich <dakr@kernel.org>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: Philipp Stanner <phasta@kernel.org>
Acked-by: Danilo Krummrich <dakr@kernel.org>
---
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++--
drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 6 +++--
drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 5 +++-
drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 8 ++++--
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 8 +++---
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c | 8 +++---
drivers/gpu/drm/scheduler/sched_entity.c | 29 ++++++++-------------
drivers/gpu/drm/scheduler/sched_fence.c | 2 +-
drivers/gpu/drm/scheduler/sched_internal.h | 3 +--
drivers/gpu/drm/scheduler/sched_main.c | 27 +++----------------
drivers/gpu/drm/scheduler/sched_rq.c | 16 ++++++------
include/drm/gpu_scheduler.h | 5 +---
12 files changed, 51 insertions(+), 72 deletions(-)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 24e4b4fc9156..8719aa0adad8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1108,7 +1108,8 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (p->gang_size > 1 && !adev->vm_manager.concurrent_flush) {
for (i = 0; i < p->gang_size; ++i) {
struct drm_sched_entity *entity = p->entities[i];
- struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct drm_gpu_scheduler *sched =
+ container_of(entity->rq, typeof(*sched), rq);
struct amdgpu_ring *ring = to_amdgpu_ring(sched);
if (amdgpu_vmid_uses_reserved(vm, ring->vm_hub))
@@ -1239,7 +1240,8 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
return r;
}
- sched = p->gang_leader->base.entity->rq->sched;
+ sched = container_of(p->gang_leader->base.entity->rq, typeof(*sched),
+ rq);
while ((fence = amdgpu_sync_get_fence(&p->sync))) {
struct drm_sched_fence *s_fence = to_drm_sched_fence(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 7d6ff0b6ca4e..d6ff3fbabd88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -390,7 +390,9 @@ static struct dma_fence *
amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
+ struct drm_gpu_scheduler *sched =
+ container_of(s_entity->rq, typeof(*sched), rq);
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct dma_fence *fence;
int r;
@@ -483,7 +485,7 @@ drm_sched_entity_queue_pop(struct drm_sched_entity *entity)
void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
{
- struct drm_sched_rq *rq = sched->rq;
+ struct drm_sched_rq *rq = &sched->rq;
struct drm_sched_entity *s_entity;
struct drm_sched_job *s_job;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
index 56a88e14a044..e70a1117b812 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h
@@ -107,7 +107,10 @@ struct amdgpu_job {
static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
{
- return to_amdgpu_ring(job->base.entity->rq->sched);
+ struct drm_gpu_scheduler *sched =
+ container_of(job->base.entity->rq, typeof(*sched), rq);
+
+ return to_amdgpu_ring(sched);
}
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index d13e64a69e25..85724ec6aaf8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -145,6 +145,7 @@ TRACE_EVENT(amdgpu_cs,
struct amdgpu_ib *ib),
TP_ARGS(p, job, ib),
TP_STRUCT__entry(
+ __field(struct drm_gpu_scheduler *, sched)
__field(struct amdgpu_bo_list *, bo_list)
__field(u32, ring)
__field(u32, dw)
@@ -152,11 +153,14 @@ TRACE_EVENT(amdgpu_cs,
),
TP_fast_assign(
+ __entry->sched = container_of(job->base.entity->rq,
+ typeof(*__entry->sched),
+ rq);
__entry->bo_list = p->bo_list;
- __entry->ring = to_amdgpu_ring(job->base.entity->rq->sched)->idx;
+ __entry->ring = to_amdgpu_ring(__entry->sched)->idx;
__entry->dw = ib->length_dw;
__entry->fences = amdgpu_fence_count_emitted(
- to_amdgpu_ring(job->base.entity->rq->sched));
+ to_amdgpu_ring(__entry->sched));
),
TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u",
__entry->bo_list, __entry->ring, __entry->dw,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 36805dcfa159..4ccd2e769799 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -106,13 +106,13 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
struct dma_fence **fence)
{
+ struct drm_gpu_scheduler *sched =
+ container_of(p->vm->delayed.rq, typeof(*sched), rq);
+ struct amdgpu_ring *ring =
+ container_of(sched, struct amdgpu_ring, sched);
struct amdgpu_ib *ib = p->job->ibs;
- struct amdgpu_ring *ring;
struct dma_fence *f;
- ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
- sched);
-
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
index cc5f4e01e38f..fee77af5045f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
@@ -465,15 +465,15 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
void amdgpu_xcp_release_sched(struct amdgpu_device *adev,
struct amdgpu_ctx_entity *entity)
{
- struct drm_gpu_scheduler *sched;
- struct amdgpu_ring *ring;
+ struct drm_gpu_scheduler *sched =
+ container_of(entity->entity.rq, typeof(*sched), rq);
if (!adev->xcp_mgr)
return;
- sched = entity->entity.rq->sched;
if (drm_sched_wqueue_ready(sched)) {
- ring = to_amdgpu_ring(entity->entity.rq->sched);
+ struct amdgpu_ring *ring = to_amdgpu_ring(sched);
+
atomic_dec(&adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt);
}
}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 713ae46f7bbd..61a4818cc87b 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -136,19 +136,9 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->last_user = current->group_leader;
entity->num_sched_list = num_sched_list;
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
+ entity->rq = &sched_list[0]->rq;
RCU_INIT_POINTER(entity->last_scheduled, NULL);
RB_CLEAR_NODE(&entity->rb_tree_node);
-
- if (!sched_list[0]->rq) {
- /* Since every entry covered by num_sched_list
- * should be non-NULL and therefore we warn drivers
- * not to do this and to fix their DRM calling order.
- */
- pr_warn("%s: called with uninitialized scheduler\n", __func__);
- } else {
- entity->rq = sched_list[0]->rq;
- }
-
init_completion(&entity->entity_idle);
/* We start in an idle state. */
@@ -338,7 +328,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
if (!entity->rq)
return 0;
- sched = entity->rq->sched;
+ sched = container_of(entity->rq, typeof(*sched), rq);
/*
* The client will not queue more jobs during this fini - consume
* existing queued ones, or discard them on SIGKILL.
@@ -419,10 +409,12 @@ static void drm_sched_entity_wakeup(struct dma_fence *f,
{
struct drm_sched_entity *entity =
container_of(cb, struct drm_sched_entity, cb);
+ struct drm_gpu_scheduler *sched =
+ container_of(entity->rq, typeof(*sched), rq);
entity->dependency = NULL;
dma_fence_put(f);
- drm_sched_wakeup(entity->rq->sched);
+ drm_sched_wakeup(sched);
}
/**
@@ -449,7 +441,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority);
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity,
struct drm_sched_job *sched_job)
{
- struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct drm_gpu_scheduler *sched =
+ container_of(entity->rq, typeof(*sched), rq);
struct dma_fence *fence = entity->dependency;
struct drm_sched_fence *s_fence;
@@ -583,7 +576,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
spin_lock(&entity->lock);
sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
- rq = sched ? sched->rq : NULL;
+ rq = sched ? &sched->rq : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
@@ -607,6 +600,8 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
struct drm_sched_entity *entity = sched_job->entity;
+ struct drm_gpu_scheduler *sched =
+ container_of(entity->rq, typeof(*sched), rq);
bool first;
trace_drm_sched_job_queue(sched_job, entity);
@@ -618,7 +613,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
xa_for_each(&sched_job->dependencies, index, entry)
trace_drm_sched_job_add_dep(sched_job, entry);
}
- atomic_inc(entity->rq->sched->score);
+ atomic_inc(sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
/*
@@ -629,8 +624,6 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
/* first job wakes up scheduler */
if (first) {
- struct drm_gpu_scheduler *sched;
-
sched = drm_sched_rq_add_entity(entity);
if (sched)
drm_sched_wakeup(sched);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 096fe28aa9c9..85ee3d694dc9 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -227,7 +227,7 @@ void drm_sched_fence_init(struct drm_sched_fence *fence,
{
unsigned seq;
- fence->sched = entity->rq->sched;
+ fence->sched = container_of(entity->rq, typeof(*fence->sched), rq);
seq = atomic_inc_return(&entity->fence_seq);
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
&fence->lock, entity->fence_context, seq);
diff --git a/drivers/gpu/drm/scheduler/sched_internal.h b/drivers/gpu/drm/scheduler/sched_internal.h
index 520a421cbf8d..af43a350356e 100644
--- a/drivers/gpu/drm/scheduler/sched_internal.h
+++ b/drivers/gpu/drm/scheduler/sched_internal.h
@@ -35,8 +35,7 @@ bool drm_sched_can_queue(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
-void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_rq *rq);
+void drm_sched_rq_init(struct drm_sched_rq *rq);
struct drm_gpu_scheduler *
drm_sched_rq_add_entity(struct drm_sched_entity *entity);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 79e44ccefdc9..818d3d4434b5 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -648,7 +648,7 @@ void drm_sched_job_arm(struct drm_sched_job *job)
BUG_ON(!entity);
drm_sched_entity_select_rq(entity);
- sched = entity->rq->sched;
+ sched = container_of(entity->rq, typeof(*sched), rq);
job->sched = sched;
job->s_priority = entity->priority;
@@ -1081,15 +1081,6 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->score = args->score ? args->score : &sched->_score;
sched->dev = args->dev;
- if (sched->rq) {
- /* Not an error, but warn anyway so drivers can
- * fine-tune their DRM calling order, and return all
- * is good.
- */
- dev_warn(sched->dev, "%s: scheduler already initialized!\n", __func__);
- return 0;
- }
-
if (args->submit_wq) {
sched->submit_wq = args->submit_wq;
sched->own_submit_wq = false;
@@ -1101,11 +1092,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->own_submit_wq = true;
}
- sched->rq = kzalloc_obj(*sched->rq);
- if (!sched->rq)
- goto Out_check_own;
-
- drm_sched_rq_init(sched, sched->rq);
+ drm_sched_rq_init(&sched->rq);
init_waitqueue_head(&sched->job_scheduled);
INIT_LIST_HEAD(&sched->pending_list);
@@ -1121,12 +1108,6 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_
sched->ready = true;
return 0;
-
-Out_check_own:
- if (sched->own_submit_wq)
- destroy_workqueue(sched->submit_wq);
- dev_err(sched->dev, "%s: Failed to setup GPU scheduler--out of memory\n", __func__);
- return -ENOMEM;
}
EXPORT_SYMBOL(drm_sched_init);
@@ -1172,8 +1153,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
if (sched->own_submit_wq)
destroy_workqueue(sched->submit_wq);
sched->ready = false;
- kfree(sched->rq);
- sched->rq = NULL;
if (!list_empty(&sched->pending_list))
dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n");
@@ -1193,7 +1172,7 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
{
struct drm_gpu_scheduler *sched = bad->sched;
struct drm_sched_entity *entity, *tmp;
- struct drm_sched_rq *rq = sched->rq;
+ struct drm_sched_rq *rq = &sched->rq;
/* don't change @bad's karma if it's from KERNEL RQ,
* because sometimes GPU hang would cause kernel jobs (like VM updating jobs)
diff --git a/drivers/gpu/drm/scheduler/sched_rq.c b/drivers/gpu/drm/scheduler/sched_rq.c
index 8df040c58b0a..f55fc2de0270 100644
--- a/drivers/gpu/drm/scheduler/sched_rq.c
+++ b/drivers/gpu/drm/scheduler/sched_rq.c
@@ -81,18 +81,15 @@ static void drm_sched_rq_update_tree_locked(struct drm_sched_entity *entity,
/**
* drm_sched_rq_init - initialize a given run queue struct
- * @sched: scheduler instance to associate with this run queue
* @rq: scheduler run queue
*
* Initializes a scheduler runqueue.
*/
-void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_rq *rq)
+void drm_sched_rq_init(struct drm_sched_rq *rq)
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->rb_tree_root = RB_ROOT_CACHED;
- rq->sched = sched;
rq->head_prio = DRM_SCHED_PRIORITY_INVALID;
}
@@ -161,7 +158,8 @@ drm_sched_entity_restore_vruntime(struct drm_sched_entity *entity,
enum drm_sched_priority rq_prio)
{
struct drm_sched_entity_stats *stats = entity->stats;
- struct drm_gpu_scheduler *sched = entity->rq->sched;
+ struct drm_gpu_scheduler *sched =
+ container_of(entity->rq, typeof(*sched), rq);
enum drm_sched_priority prio = entity->priority;
unsigned long avg_us, sched_avg_us;
ktime_t vruntime;
@@ -261,8 +259,8 @@ drm_sched_rq_add_entity(struct drm_sched_entity *entity)
}
rq = entity->rq;
+ sched = container_of(rq, typeof(*sched), rq);
spin_lock(&rq->lock);
- sched = rq->sched;
if (list_empty(&entity->list)) {
atomic_inc(sched->score);
@@ -289,6 +287,8 @@ drm_sched_rq_add_entity(struct drm_sched_entity *entity)
void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity)
{
+ struct drm_gpu_scheduler *sched = container_of(rq, typeof(*sched), rq);
+
lockdep_assert_held(&entity->lock);
if (list_empty(&entity->list))
@@ -296,7 +296,7 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
spin_lock(&rq->lock);
- atomic_dec(rq->sched->score);
+ atomic_dec(sched->score);
list_del_init(&entity->list);
drm_sched_rq_remove_tree_locked(entity, rq);
@@ -352,7 +352,7 @@ void drm_sched_rq_pop_entity(struct drm_sched_entity *entity)
struct drm_sched_entity *
drm_sched_select_entity(struct drm_gpu_scheduler *sched)
{
- struct drm_sched_rq *rq = sched->rq;
+ struct drm_sched_rq *rq = &sched->rq;
struct rb_node *rb;
spin_lock(&rq->lock);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index ae54e959970c..cea2ee956422 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -249,7 +249,6 @@ struct drm_sched_entity {
/**
* struct drm_sched_rq - queue of entities to be scheduled.
*
- * @sched: the scheduler to which this rq belongs to.
* @lock: protects @entities, @rb_tree_root and @head_prio.
* @entities: list of the entities to be scheduled.
* @rb_tree_root: root of time based priority queue of entities for FIFO scheduling
@@ -260,8 +259,6 @@ struct drm_sched_entity {
* the next entity to emit commands from.
*/
struct drm_sched_rq {
- struct drm_gpu_scheduler *sched;
-
spinlock_t lock;
/* Following members are protected by the @lock: */
struct list_head entities;
@@ -584,7 +581,7 @@ struct drm_gpu_scheduler {
atomic_t credit_count;
long timeout;
const char *name;
- struct drm_sched_rq *rq;
+ struct drm_sched_rq rq;
wait_queue_head_t job_scheduled;
atomic64_t job_id_count;
struct workqueue_struct *submit_wq;
--
2.52.0
next prev parent reply other threads:[~2026-03-06 16:35 UTC|newest]
Thread overview: 32+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-06 16:34 [PATCH v7 00/29] Fair(er) DRM scheduler Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 01/29] drm/sched: Disallow initializing entities with no schedulers Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 02/29] drm/sched: Consolidate entity run queue management Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 03/29] drm/sched: Move run queue related code into a separate file Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 04/29] drm/sched: Add some scheduling quality unit tests Tvrtko Ursulin
2026-03-08 10:10 ` kernel test robot
2026-03-06 16:34 ` [PATCH v7 05/29] drm/sched: Add some more " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 06/29] drm/sched: Implement RR via FIFO Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 07/29] drm/sched: Free all finished jobs at once Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 08/29] drm/sched: Account entity GPU time Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 09/29] drm/sched: Remove idle entity from tree Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 10/29] drm/sched: Add fair scheduling policy Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 11/29] drm/sched: Favour interactive clients slightly Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 12/29] drm/sched: Switch default policy to fair Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 13/29] drm/sched: Remove FIFO and RR and simplify to a single run queue Tvrtko Ursulin
2026-03-06 16:34 ` Tvrtko Ursulin [this message]
2026-03-06 16:34 ` [PATCH v7 15/29] accel/amdxdna: Remove drm_sched_init_args->num_rqs usage Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 16/29] accel/rocket: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 17/29] accel/ethosu: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 18/29] drm/amdgpu: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 19/29] drm/etnaviv: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 20/29] drm/imagination: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 21/29] drm/lima: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 22/29] drm/msm: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 23/29] drm/nouveau: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 24/29] drm/panfrost: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 25/29] drm/panthor: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 26/29] drm/sched: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 27/29] drm/v3d: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 28/29] drm/xe: " Tvrtko Ursulin
2026-03-06 16:34 ` [PATCH v7 29/29] drm/sched: Remove drm_sched_init_args->num_rqs Tvrtko Ursulin
2026-03-08 22:37 ` Claude review: Fair(er) DRM scheduler Claude Code Review Bot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260306163445.97243-15-tvrtko.ursulin@igalia.com \
--to=tvrtko.ursulin@igalia.com \
--cc=amd-gfx@lists.freedesktop.org \
--cc=christian.koenig@amd.com \
--cc=dakr@kernel.org \
--cc=dri-devel@lists.freedesktop.org \
--cc=intel-xe@lists.freedesktop.org \
--cc=kernel-dev@igalia.com \
--cc=matthew.brost@intel.com \
--cc=phasta@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox