@@ -6612,7 +6612,7 @@ static bool try_to_block_task(struct rq *rq, struct task_struct *p,
66126612 return true;
66136613}
66146614
6615- static struct task_struct * cyclictest_task ;
6615+ static struct task_struct * cyclictest_task [ 16 ] ;
66166616
66176617/*
66186618 * __schedule() is the main scheduler function.
@@ -6769,25 +6769,28 @@ static void __sched notrace __schedule(int sched_mode)
67696769 psi_sched_switch (prev , next , !task_on_rq_queued (prev ) ||
67706770 prev -> se .sched_delayed );
67716771
6772- if (unlikely (!cyclictest_task )) {
6772+ if (unlikely (!cyclictest_task [ cpu ] )) {
67736773 if (next -> rt_priority == 98 &&
6774- strncmp (next -> comm , "cyclictest" , 10 ) == 0 ) {
6775- // Observe first cyclictest task with RT priority
6776- cyclictest_task = next ;
6774+ strncmp (next -> comm , "cyclictest" , 10 ) == 0 &&
6775+ next -> nr_cpus_allowed == 1 ) {
6776+ // store cyclictest task_struct after rt priority, cpu affinity are set
6777+ cyclictest_task [cpu ] = next ;
6778+ printk ("cyclictest_task[%d]: pid %d\n" ,
6779+ cpu , cyclictest_task [cpu ]-> pid );
67776780 }
67786781 }
67796782 trace_sched_switch (preempt , prev , next , prev_state );
67806783
67816784 /* Also unlocks the rq: */
67826785 rq = context_switch (rq , prev , next , & rf );
67836786 } else {
6784- if (cyclictest_task &&
6785- next != cyclictest_task &&
6786- task_cpu (cyclictest_task ) == cpu &&
6787- READ_ONCE (cyclictest_task -> __state ) == TASK_RUNNING &&
6787+ if (cyclictest_task [ cpu ] &&
6788+ next != cyclictest_task [ cpu ] &&
6789+ task_cpu (cyclictest_task [ cpu ] ) == cpu &&
6790+ READ_ONCE (cyclictest_task [ cpu ] -> __state ) == TASK_RUNNING &&
67886791 strncmp (next -> comm , "migration" , 9 ) != 0 ) {
67896792 panic ("cyclictest %d not scheduled cpu %d. next->pid %d" ,
6790- cyclictest_task -> pid , cpu , next -> pid );
6793+ cyclictest_task [ cpu ] -> pid , cpu , next -> pid );
67916794 }
67926795
67936796 rq_unpin_lock (rq , & rf );
0 commit comments