Movatterモバイル変換


[0]ホーム

URL:


Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Sign up
Appearance settings

Commitdad5559

Browse files
committed
Revert "drm/sched: Convert the GPU scheduler to variable number ..."
Revert submission 1033322Reason for revert: Breaks GPU resetReverted Changes:I518055d36:drm/amdgpu: drop setting buffer funcs in sdma442I3161b3ba3:drm/amdgpu: fix buffer funcs setting order on susp...If96a48811:drm/amdklc: fix a intree build error.I370135be6:drm/sched: Don't disturb the entity when in RR-mod...I051ca6e0b:drm/sched: Move free worker re-queuing out of the ...I25fb2785c:drm/sched: Rename drm_sched_get_cleanup_job to be ...I666125051:drm/sched: Rename drm_sched_run_job_queue_if_ready...I9a32472a9:drm/sched: Rename drm_sched_free_job_queue to be m...I4c2c9bed1:drm/sched: Re-queue run job worker when drm_sched_...Ie513a85c7:drm/sched: Drain all entities in DRM sched run job...I855c12a4e:drm/sched: Split free_job into own work itemI7a116284b:drm/sched: Convert drm scheduler to use a work que...Iffc1aa4cd:drm/amdgpu: move buffer funcs setting up a levelI3753e9dd4:drm/sched: Convert the GPU scheduler to variable n...Change-Id: I8fa3facd7111e26a08ff77e6345b4437631e9ac0
1 parentcf8e343 commitdad5559

File tree

10 files changed

+32
-99
lines changed

10 files changed

+32
-99
lines changed

‎drivers/gpu/drm/amd/amdgpu/amdgpu_device.c‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2661,7 +2661,6 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev)
26612661
}
26622662

26632663
r=drm_sched_init(&ring->sched,&amdgpu_sched_ops,
2664-
DRM_SCHED_PRIORITY_COUNT,
26652664
ring->num_hw_submission,0,
26662665
timeout,adev->reset_domain->wq,
26672666
ring->sched_score,ring->name,

‎drivers/gpu/drm/amd/amdgpu/amdgpu_job.c‎

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -326,8 +326,8 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
326326
inti;
327327

328328
/* Signal all jobs not yet scheduled */
329-
for (i=DRM_SCHED_PRIORITY_KERNEL;i<sched->num_rqs;i++) {
330-
structdrm_sched_rq*rq=sched->sched_rq[i];
329+
for (i=DRM_SCHED_PRIORITY_COUNT-1;i>=DRM_SCHED_PRIORITY_KERNEL;i--) {
330+
structdrm_sched_rq*rq=&sched->sched_rq[i];
331331
spin_lock(&rq->lock);
332332
list_for_each_entry(s_entity,&rq->entities,list) {
333333
while ((s_job=to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {

‎drivers/gpu/drm/etnaviv/etnaviv_sched.c‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -135,7 +135,6 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
135135
intret;
136136

137137
ret=drm_sched_init(&gpu->sched,&etnaviv_sched_ops,
138-
DRM_SCHED_PRIORITY_COUNT,
139138
etnaviv_hw_jobs_limit,etnaviv_job_hang_limit,
140139
msecs_to_jiffies(500),NULL,NULL,
141140
dev_name(gpu->dev),gpu->dev);

‎drivers/gpu/drm/lima/lima_sched.c‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -489,7 +489,6 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
489489
INIT_WORK(&pipe->recover_work,lima_sched_recover_work);
490490

491491
returndrm_sched_init(&pipe->base,&lima_sched_ops,
492-
DRM_SCHED_PRIORITY_COUNT,
493492
1,
494493
lima_job_hang_limit,
495494
msecs_to_jiffies(timeout),NULL,

‎drivers/gpu/drm/msm/msm_ringbuffer.c‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -95,7 +95,6 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
9595
sched_timeout=MAX_SCHEDULE_TIMEOUT;
9696

9797
ret=drm_sched_init(&ring->sched,&msm_sched_ops,
98-
DRM_SCHED_PRIORITY_COUNT,
9998
num_hw_submissions,0,sched_timeout,
10099
NULL,NULL,to_msm_bo(ring->bo)->name,gpu->dev->dev);
101100
if (ret) {

‎drivers/gpu/drm/panfrost/panfrost_job.c‎

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -871,7 +871,6 @@ int panfrost_job_init(struct panfrost_device *pfdev)
871871

872872
ret=drm_sched_init(&js->queue[j].sched,
873873
&panfrost_sched_ops,
874-
DRM_SCHED_PRIORITY_COUNT,
875874
nentries,0,
876875
msecs_to_jiffies(JOB_TIMEOUT_MS),
877876
pfdev->reset.wq,

‎drivers/gpu/drm/scheduler/sched_entity.c‎

Lines changed: 3 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -75,20 +75,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
7575
RCU_INIT_POINTER(entity->last_scheduled,NULL);
7676
RB_CLEAR_NODE(&entity->rb_tree_node);
7777

78-
if (!sched_list[0]->sched_rq) {
79-
/* Warn drivers not to do this and to fix their DRM
80-
* calling order.
81-
*/
82-
pr_warn("%s: called with uninitialized scheduler\n",__func__);
83-
}elseif (num_sched_list) {
84-
/* The "priority" of an entity cannot exceed the number
85-
* of run-queues of a scheduler.
86-
*/
87-
if (entity->priority >=sched_list[0]->num_rqs)
88-
entity->priority=max_t(u32,sched_list[0]->num_rqs,
89-
DRM_SCHED_PRIORITY_KERNEL);
90-
entity->rq=sched_list[0]->sched_rq[entity->priority];
91-
}
78+
if(num_sched_list)
79+
entity->rq=&sched_list[0]->sched_rq[entity->priority];
9280

9381
init_completion(&entity->entity_idle);
9482

@@ -646,7 +634,7 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
646634

647635
spin_lock(&entity->rq_lock);
648636
sched=drm_sched_pick_best(entity->sched_list,entity->num_sched_list);
649-
rq=sched ?sched->sched_rq[entity->priority] :NULL;
637+
rq=sched ?&sched->sched_rq[entity->priority] :NULL;
650638
if (rq!=entity->rq) {
651639
drm_sched_rq_remove_entity(entity->rq,entity);
652640
entity->rq=rq;

‎drivers/gpu/drm/scheduler/sched_main.c‎

Lines changed: 25 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -732,14 +732,8 @@ int drm_sched_job_init(struct drm_sched_job *job,
732732
structdrm_sched_entity*entity,
733733
u32credits,void*owner)
734734
{
735-
if (!entity->rq) {
736-
/* This will most likely be followed by missing frames
737-
* or worse--a blank screen--leave a trail in the
738-
* logs, so this can be debugged easier.
739-
*/
740-
drm_err(job->sched,"%s: entity has no rq!\n",__func__);
735+
if (!entity->rq)
741736
return-ENOENT;
742-
}
743737

744738
if (unlikely(!credits)) {
745739
pr_err("*ERROR* %s: credits cannot be 0!\n",__func__);
@@ -784,7 +778,7 @@ void drm_sched_job_arm(struct drm_sched_job *job)
784778
sched=entity->rq->sched;
785779

786780
job->sched=sched;
787-
job->s_priority=entity->priority;
781+
job->s_priority=entity->rq-sched->sched_rq;
788782
job->id=atomic64_inc_return(&sched->job_id_count);
789783

790784
drm_sched_fence_init(job->s_fence,job->entity);
@@ -999,11 +993,10 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
999993
inti;
1000994

1001995
/* Kernel run queue has higher priority than normal run queue*/
1002-
for (i=DRM_SCHED_PRIORITY_KERNEL;i<sched->num_rqs;i++) {
1003-
entity=drm_sched_policy==DRM_SCHED_POLICY_FIFO ?
1004-
drm_sched_rq_select_entity_fifo(sched,sched->sched_rq[i]) :
1005-
drm_sched_rq_select_entity_rr(sched,sched->sched_rq[i]);
1006-
996+
for (i=DRM_SCHED_PRIORITY_COUNT-1;i >=DRM_SCHED_PRIORITY_KERNEL;i--) {
997+
entity=drm_sched_policy==DRM_SCHED_POLICY_FIFO ?
998+
drm_sched_rq_select_entity_fifo(sched,&sched->sched_rq[i]) :
999+
drm_sched_rq_select_entity_rr(sched,&sched->sched_rq[i]);
10071000
if (entity)
10081001
break;
10091002
}
@@ -1183,9 +1176,7 @@ static int drm_sched_main(void *param)
11831176
*
11841177
* @sched: scheduler instance
11851178
* @ops: backend operations for this scheduler
1186-
* @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
11871179
* @credit_limit: the number of credits this scheduler can hold from all jobs
1188-
* @num_rqs: number of runqueues, one for each priority, up to DRM_SCHED_PRIORITY_COUNT
11891180
* @hang_limit: number of times to allow a job to hang before dropping it
11901181
* @timeout: timeout value in jiffies for the scheduler
11911182
* @timeout_wq: workqueue to use for timeout work. If NULL, the system_wq is
@@ -1198,12 +1189,11 @@ static int drm_sched_main(void *param)
11981189
*/
11991190
intdrm_sched_init(structdrm_gpu_scheduler*sched,
12001191
conststructdrm_sched_backend_ops*ops,
1201-
u32num_rqs,u32credit_limit,unsignedinthang_limit,
1192+
u32credit_limit,unsignedinthang_limit,
12021193
longtimeout,structworkqueue_struct*timeout_wq,
12031194
atomic_t*score,constchar*name,structdevice*dev)
12041195
{
12051196
inti,ret;
1206-
12071197
sched->ops=ops;
12081198
sched->credit_limit=credit_limit;
12091199
sched->name=name;
@@ -1213,35 +1203,8 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
12131203
sched->score=score ?score :&sched->_score;
12141204
sched->dev=dev;
12151205

1216-
if (num_rqs>DRM_SCHED_PRIORITY_COUNT) {
1217-
/* This is a gross violation--tell drivers what the problem is.
1218-
*/
1219-
drm_err(sched,"%s: num_rqs cannot be greater than DRM_SCHED_PRIORITY_COUNT\n",
1220-
__func__);
1221-
return-EINVAL;
1222-
}elseif (sched->sched_rq) {
1223-
/* Not an error, but warn anyway so drivers can
1224-
* fine-tune their DRM calling order, and return all
1225-
* is good.
1226-
*/
1227-
drm_warn(sched,"%s: scheduler already initialized!\n",__func__);
1228-
return0;
1229-
}
1230-
1231-
sched->sched_rq=kmalloc_array(num_rqs,sizeof(*sched->sched_rq),
1232-
GFP_KERNEL |__GFP_ZERO);
1233-
if (!sched->sched_rq) {
1234-
drm_err(sched,"%s: out of memory for sched_rq\n",__func__);
1235-
return-ENOMEM;
1236-
}
1237-
sched->num_rqs=num_rqs;
1238-
ret=-ENOMEM;
1239-
for (i=DRM_SCHED_PRIORITY_KERNEL;i<sched->num_rqs;i++) {
1240-
sched->sched_rq[i]=kzalloc(sizeof(*sched->sched_rq[i]),GFP_KERNEL);
1241-
if (!sched->sched_rq[i])
1242-
gotoOut_unroll;
1243-
drm_sched_rq_init(sched,sched->sched_rq[i]);
1244-
}
1206+
for (i=DRM_SCHED_PRIORITY_KERNEL;i<DRM_SCHED_PRIORITY_COUNT;i++)
1207+
drm_sched_rq_init(sched,&sched->sched_rq[i]);
12451208

12461209
init_waitqueue_head(&sched->wake_up_worker);
12471210
init_waitqueue_head(&sched->job_scheduled);
@@ -1252,24 +1215,18 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
12521215
atomic_set(&sched->_score,0);
12531216
atomic64_set(&sched->job_id_count,0);
12541217

1255-
/* Each scheduler will run on a seperate kernel thread */
1256-
sched->thread=kthread_run(drm_sched_main,sched,sched->name);
1257-
if (IS_ERR(sched->thread)) {
1258-
ret=PTR_ERR(sched->thread);
1259-
sched->thread=NULL;
1260-
DRM_DEV_ERROR(sched->dev,"Failed to create scheduler for %s.\n",name);
1261-
gotoOut_unroll;
1262-
}
1218+
1219+
/* Each scheduler will run on a seperate kernel thread */
1220+
sched->thread=kthread_run(drm_sched_main,sched,sched->name);
1221+
if (IS_ERR(sched->thread)) {
1222+
ret=PTR_ERR(sched->thread);
1223+
sched->thread=NULL;
1224+
DRM_DEV_ERROR(sched->dev,"Failed to create scheduler for %s.\n",name);
1225+
returnret;
1226+
}
12631227

12641228
sched->ready= true;
12651229
return0;
1266-
Out_unroll:
1267-
for (--i ;i >=DRM_SCHED_PRIORITY_KERNEL;i--)
1268-
kfree(sched->sched_rq[i]);
1269-
kfree(sched->sched_rq);
1270-
sched->sched_rq=NULL;
1271-
drm_err(sched,"%s: Failed to setup GPU scheduler--out of memory\n",__func__);
1272-
returnret;
12731230
}
12741231
EXPORT_SYMBOL(drm_sched_init);
12751232

@@ -1288,8 +1245,8 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
12881245
if (sched->thread)
12891246
kthread_stop(sched->thread);
12901247

1291-
for (i=DRM_SCHED_PRIORITY_KERNEL;i<sched->num_rqs;i++) {
1292-
structdrm_sched_rq*rq=sched->sched_rq[i];
1248+
for (i=DRM_SCHED_PRIORITY_COUNT-1;i>=DRM_SCHED_PRIORITY_KERNEL;i--) {
1249+
structdrm_sched_rq*rq=&sched->sched_rq[i];
12931250

12941251
spin_lock(&rq->lock);
12951252
list_for_each_entry(s_entity,&rq->entities,list)
@@ -1300,7 +1257,7 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
13001257
*/
13011258
s_entity->stopped= true;
13021259
spin_unlock(&rq->lock);
1303-
kfree(sched->sched_rq[i]);
1260+
13041261
}
13051262

13061263
/* Wakeup everyone stuck in drm_sched_entity_flush for this scheduler */
@@ -1310,8 +1267,6 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched)
13101267
cancel_delayed_work_sync(&sched->work_tdr);
13111268

13121269
sched->ready= false;
1313-
kfree(sched->sched_rq);
1314-
sched->sched_rq=NULL;
13151270
}
13161271
EXPORT_SYMBOL(drm_sched_fini);
13171272

@@ -1337,8 +1292,10 @@ void drm_sched_increase_karma(struct drm_sched_job *bad)
13371292
*/
13381293
if (bad->s_priority!=DRM_SCHED_PRIORITY_KERNEL) {
13391294
atomic_inc(&bad->karma);
1340-
for (i=DRM_SCHED_PRIORITY_HIGH;i<sched->num_rqs;i++) {
1341-
structdrm_sched_rq*rq=sched->sched_rq[i];
1295+
1296+
for (i=DRM_SCHED_PRIORITY_KERNEL;i<DRM_SCHED_PRIORITY_COUNT;
1297+
i++) {
1298+
structdrm_sched_rq*rq=&sched->sched_rq[i];
13421299

13431300
spin_lock(&rq->lock);
13441301
list_for_each_entry_safe(entity,tmp,&rq->entities,list) {

‎drivers/gpu/drm/v3d/v3d_sched.c‎

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -733,7 +733,6 @@ v3d_sched_init(struct v3d_dev *v3d)
733733

734734
ret=drm_sched_init(&v3d->queue[V3D_BIN].sched,
735735
&v3d_bin_sched_ops,
736-
DRM_SCHED_PRIORITY_COUNT,
737736
hw_jobs_limit,job_hang_limit,
738737
msecs_to_jiffies(hang_limit_ms),NULL,
739738
NULL,"v3d_bin",v3d->drm.dev);
@@ -742,7 +741,6 @@ v3d_sched_init(struct v3d_dev *v3d)
742741

743742
ret=drm_sched_init(&v3d->queue[V3D_RENDER].sched,
744743
&v3d_render_sched_ops,
745-
DRM_SCHED_PRIORITY_COUNT,
746744
hw_jobs_limit,job_hang_limit,
747745
msecs_to_jiffies(hang_limit_ms),NULL,
748746
NULL,"v3d_render",v3d->drm.dev);
@@ -751,7 +749,6 @@ v3d_sched_init(struct v3d_dev *v3d)
751749

752750
ret=drm_sched_init(&v3d->queue[V3D_TFU].sched,
753751
&v3d_tfu_sched_ops,
754-
DRM_SCHED_PRIORITY_COUNT,
755752
hw_jobs_limit,job_hang_limit,
756753
msecs_to_jiffies(hang_limit_ms),NULL,
757754
NULL,"v3d_tfu",v3d->drm.dev);
@@ -761,7 +758,6 @@ v3d_sched_init(struct v3d_dev *v3d)
761758
if (v3d_has_csd(v3d)) {
762759
ret=drm_sched_init(&v3d->queue[V3D_CSD].sched,
763760
&v3d_csd_sched_ops,
764-
DRM_SCHED_PRIORITY_COUNT,
765761
hw_jobs_limit,job_hang_limit,
766762
msecs_to_jiffies(hang_limit_ms),NULL,
767763
NULL,"v3d_csd",v3d->drm.dev);
@@ -770,7 +766,6 @@ v3d_sched_init(struct v3d_dev *v3d)
770766

771767
ret=drm_sched_init(&v3d->queue[V3D_CACHE_CLEAN].sched,
772768
&v3d_cache_clean_sched_ops,
773-
DRM_SCHED_PRIORITY_COUNT,
774769
hw_jobs_limit,job_hang_limit,
775770
msecs_to_jiffies(hang_limit_ms),NULL,
776771
NULL,"v3d_cache_clean",v3d->drm.dev);

‎include/drm/gpu_scheduler.h‎

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -490,7 +490,6 @@ struct drm_sched_backend_ops {
490490
* @credit_count: the current credit count of this scheduler
491491
* @timeout: the time after which a job is removed from the scheduler.
492492
* @name: name of the ring for which this scheduler is being used.
493-
* @num_rqs: Number of run-queues. This is at most DRM_SCHED_PRIORITY_COUNT,
494493
* as there's usually one run-queue per priority, but could be less.
495494
* @sched_rq: An allocated array of run-queues of size @num_rqs;
496495
* @wake_up_worker: the wait queue on which the scheduler sleeps until a job
@@ -521,8 +520,7 @@ struct drm_gpu_scheduler {
521520
atomic_tcredit_count;
522521
longtimeout;
523522
constchar*name;
524-
u32num_rqs;
525-
structdrm_sched_rq**sched_rq;
523+
structdrm_sched_rqsched_rq[DRM_SCHED_PRIORITY_COUNT];
526524
wait_queue_head_twake_up_worker;
527525
wait_queue_head_tjob_scheduled;
528526
atomic64_tjob_id_count;
@@ -541,7 +539,7 @@ struct drm_gpu_scheduler {
541539

542540
intdrm_sched_init(structdrm_gpu_scheduler*sched,
543541
conststructdrm_sched_backend_ops*ops,
544-
u32num_rqs,u32credit_limit,unsignedinthang_limit,
542+
u32credit_limit,unsignedinthang_limit,
545543
longtimeout,structworkqueue_struct*timeout_wq,
546544
atomic_t*score,constchar*name,structdevice*dev);
547545

0 commit comments

Comments
 (0)

[8]ページ先頭

©2009-2025 Movatter.jp