* Atomically set TIF_NEED_RESCHED[_LAZY] and test for TIF_PO | * Atomically set TIF_NEED_RESCHED and test for TIF_POLLING_N static inline bool set_nr_and_not_polling(struct task_struct | static inline bool set_nr_and_not_polling(struct task_struct | return !(fetch_or(&ti->flags, _TIF_NEED_RESCHED) & _T return !(fetch_or(&ti->flags, 1 << tif_bit) & _TIF_PO < if (val & (_TIF_NEED_RESCHED | _TIF_NEED_RESC | if (val & _TIF_NEED_RESCHED) static inline bool set_nr_and_not_polling(struct task_struct | static inline bool set_nr_and_not_polling(struct task_struct set_tsk_thread_flag(p, tif_bit); | set_tsk_need_resched(p); static void __resched_curr(struct rq *rq, int lazy) | void resched_curr(struct rq *rq) int cpu, tif_bit = TIF_NEED_RESCHED + lazy; < > int cpu; if (unlikely(test_tsk_thread_flag(curr, tif_bit))) | if (test_tsk_need_resched(curr)) set_tsk_thread_flag(curr, tif_bit); | set_tsk_need_resched(curr); if (!lazy) | set_preempt_need_resched(); set_preempt_need_resched(); < if (set_nr_and_not_polling(curr, tif_bit)) { | if (set_nr_and_not_polling(curr)) if (!lazy) | smp_send_reschedule(cpu); smp_send_reschedule(cpu); | else } else { < } < } < < void resched_curr(struct rq *rq) < { < __resched_curr(rq, 0); < } < < void resched_curr_lazy(struct rq *rq) < { < int lazy = IS_ENABLED(CONFIG_PREEMPT_BUILD_AUTO) && ! < TIF_NEED_RESCHED_LAZY_OFFSET : 0; < < if (lazy && unlikely(test_tsk_thread_flag(rq->curr, T < return; < < __resched_curr(rq, lazy); < if (set_nr_and_not_polling(rq->idle, TIF_NEED_RESCHED | if (set_nr_and_not_polling(rq->idle)) static DEFINE_WAIT_OVERRIDE_MAP(sched_map, LD_WAIT_CO < /* | if (task_is_running(tsk)) * Establish LD_WAIT_CONFIG context to ensure none of | return; * will use a blocking primitive -- which would lead < */ < lock_map_acquire_try(&sched_map); < < lock_map_release(&sched_map); < static __always_inline void __schedule_loop(unsigned int sche | asmlinkage __visible void __sched schedule(void) > struct task_struct *tsk = current; > > sched_submit_work(tsk); __schedule(sched_mode); | __schedule(SM_NONE); } < < asmlinkage __visible void __sched schedule(void) < { < struct task_struct *tsk = current; < < #ifdef CONFIG_RT_MUTEXES < lockdep_assert(!tsk->sched_rt_mutex); < #endif < < if (!task_is_running(tsk)) < sched_submit_work(tsk); < __schedule_loop(SM_NONE); < __schedule_loop(SM_RTLOCK_WAIT); | do { > preempt_disable(); > __schedule(SM_RTLOCK_WAIT); > sched_preempt_enable_no_resched(); > } while (need_resched()); /* < * Would be more useful with typeof()/auto_type but they don' < * bit-fields. Since it's a local thing, use int. Keep the ge < * name such that if someone were to implement this function < * notes. < */ < #define fetch_and_set(x, v) ({ int _x = (x); (x) = (v); _x; } < < void rt_mutex_pre_schedule(void) < { < lockdep_assert(!fetch_and_set(current->sched_rt_mutex < sched_submit_work(current); < } < < void rt_mutex_schedule(void) < { < lockdep_assert(current->sched_rt_mutex); < __schedule_loop(SM_NONE); < } < < void rt_mutex_post_schedule(void) < { < sched_update_worker(current); < lockdep_assert(fetch_and_set(current->sched_rt_mutex, < } < < /* < * task_is_pi_boosted - Check if task has been PI boosted. < * @p: Task to check. < * < * Return true if task is subject to priority inheritance. < */ < bool task_is_pi_boosted(const struct task_struct *p) < { < int prio = p->prio; < < if (!rt_prio(prio)) < return false; < return prio != p->normal_prio; < } < < #endif / #endif