* TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
*
* However, with slightly different timing the wakeup TASK_RUNNING store can
- * also collide with the TASK_UNINTERRUPTIBLE store. Loosing that store is not
+ * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
* a problem either because that will result in one extra go around the loop
* and our @cond test will save the day.
*
/*
* Actual scheduling parameters. Initialized with the values above,
- * they are continously updated during task execution. Note that
+ * they are continuously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* Remaining runtime for this instance */
};
#ifdef CONFIG_CPU_ISOLATION
-DECLARE_STATIC_KEY_FALSE(housekeeping_overriden);
+DECLARE_STATIC_KEY_FALSE(housekeeping_overridden);
extern int housekeeping_any_cpu(enum hk_flags flags);
extern const struct cpumask *housekeeping_cpumask(enum hk_flags flags);
extern void housekeeping_affine(struct task_struct *t, enum hk_flags flags);
static inline bool housekeeping_cpu(int cpu, enum hk_flags flags)
{
#ifdef CONFIG_CPU_ISOLATION
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
return housekeeping_test_cpu(cpu, flags);
#endif
return true;
{
/*
* NOIO implies both NOIO and NOFS and it is a weaker context
- * so always make sure it makes precendence
+ * so always make sure it makes precedence
*/
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
* Various counters maintained by the scheduler and fork(),
* exposed via /proc, sys.c or used by drivers via these APIs.
*
- * ( Note that all these values are aquired without locking,
+ * ( Note that all these values are acquired without locking,
* so they can only be relied on in narrow circumstances. )
*/
* preemption, thus the result might have a time-of-check-to-time-of-use
* race. The caller is responsible to use it correctly, for example:
*
- * - from a non-preemptable section (of course)
+ * - from a non-preemptible section (of course)
*
* - from a thread that is bound to a single CPU
*
/*
* Perform (stime * rtime) / total, but avoid multiplication overflow by
- * loosing precision when the numbers are big.
+ * losing precision when the numbers are big.
*/
static u64 scale_stime(u64 stime, u64 rtime, u64 total)
{
* refill the runtime and set the deadline a period in the future,
* because keeping the current (absolute) deadline of the task would
* result in breaking guarantees promised to other tasks (refer to
- * Documentation/scheduler/sched-deadline.txt for more informations).
+ * Documentation/scheduler/sched-deadline.txt for more information).
*
* This function returns true if:
*
memset(sa, 0, sizeof(*sa));
/*
- * Tasks are intialized with full load to be seen as heavy tasks until
+ * Tasks are initialized with full load to be seen as heavy tasks until
* they get a chance to stabilize to their real load level.
- * Group entities are intialized with zero load to reflect the fact that
+ * Group entities are initialized with zero load to reflect the fact that
* nothing has been attached to the task group yet.
*/
if (entity_is_task(se))
/*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
- * - Substract its load from the cfs_rq->runnable_avg.
- * - Substract its previous weight from cfs_rq->load.weight.
+ * - Subtract its load from the cfs_rq->runnable_avg.
+ * - Subtract its previous weight from cfs_rq->load.weight.
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
*/
#include "sched.h"
-DEFINE_STATIC_KEY_FALSE(housekeeping_overriden);
-EXPORT_SYMBOL_GPL(housekeeping_overriden);
+DEFINE_STATIC_KEY_FALSE(housekeeping_overridden);
+EXPORT_SYMBOL_GPL(housekeeping_overridden);
static cpumask_var_t housekeeping_mask;
static unsigned int housekeeping_flags;
int housekeeping_any_cpu(enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
return cpumask_any_and(housekeeping_mask, cpu_online_mask);
return smp_processor_id();
const struct cpumask *housekeeping_cpumask(enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
return housekeeping_mask;
return cpu_possible_mask;
void housekeeping_affine(struct task_struct *t, enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
set_cpus_allowed_ptr(t, housekeeping_mask);
}
bool housekeeping_test_cpu(int cpu, enum hk_flags flags)
{
- if (static_branch_unlikely(&housekeeping_overriden))
+ if (static_branch_unlikely(&housekeeping_overridden))
if (housekeeping_flags & flags)
return cpumask_test_cpu(cpu, housekeeping_mask);
return true;
if (!housekeeping_flags)
return;
- static_branch_enable(&housekeeping_overriden);
+ static_branch_enable(&housekeeping_overridden);
if (housekeeping_flags & HK_FLAG_TICK)
sched_tick_offload_init();
/*
* Deadline values of the currently executing and the
* earliest ready task on this rq. Caching these facilitates
- * the decision wether or not a ready but not running task
+ * the decision whether or not a ready but not running task
* should migrate somewhere else.
*/
struct {
#ifdef CONFIG_SMP
/*
* After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
- * successfuly executed on another CPU. We must ensure that updates of
+ * successfully executed on another CPU. We must ensure that updates of
* per-task data have been completed by this moment.
*/
smp_wmb();