c2n = per_cpu_ptr(&cyc2ns, cpu);
- raw_write_seqcount_latch(&c2n->seq);
+ write_seqcount_latch_begin(&c2n->seq);
c2n->data[0] = data;
- raw_write_seqcount_latch(&c2n->seq);
+ write_seqcount_latch(&c2n->seq);
c2n->data[1] = data;
+ write_seqcount_latch_end(&c2n->seq);
}
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
*
* If we need to allow unconditional lookups (say as required for NMI context
* usage) we need a more complex setup; this data structure provides this by
- * employing the latch technique -- see @raw_write_seqcount_latch -- to
+ * employing the latch technique -- see @write_seqcount_latch_begin -- to
* implement a latched RB-tree which does allow for unconditional lookups by
* virtue of always having (at least) one stable copy of the tree.
*
* @ops: operators defining the node order
*
* It inserts @node into @root in an ordered fashion such that we can always
- * observe one complete tree. See the comment for raw_write_seqcount_latch().
+ * observe one complete tree. See the comment for write_seqcount_latch_begin().
*
* The inserts use rcu_assign_pointer() to publish the element such that the
* tree structure is stored before we can observe the new @node.
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_insert(node, root, 0, ops->less);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_insert(node, root, 1, ops->less);
+ write_seqcount_latch_end(&root->seq);
}
/**
*
* Removes @node from the trees @root in an ordered fashion such that we can
* always observe one complete tree. See the comment for
- * raw_write_seqcount_latch().
+ * write_seqcount_latch_begin().
*
* It is assumed that @node will observe one RCU quiescent state before being
* reused of freed.
struct latch_tree_root *root,
const struct latch_tree_ops *ops)
{
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch_begin(&root->seq);
__lt_erase(node, root, 0);
- raw_write_seqcount_latch(&root->seq);
+ write_seqcount_latch(&root->seq);
__lt_erase(node, root, 1);
+ write_seqcount_latch_end(&root->seq);
}
/**
unsigned int seq;
do {
- seq = raw_read_seqcount_latch(&root->seq);
+ seq = read_seqcount_latch(&root->seq);
node = __lt_find(key, root, seq & 1, ops->comp);
- } while (raw_read_seqcount_latch_retry(&root->seq, seq));
+ } while (read_seqcount_latch_retry(&root->seq, seq));
return node;
}
/* Must be called under syslog_lock. */
static void latched_seq_write(struct latched_seq *ls, u64 val)
{
- raw_write_seqcount_latch(&ls->latch);
+ write_seqcount_latch_begin(&ls->latch);
ls->val[0] = val;
- raw_write_seqcount_latch(&ls->latch);
+ write_seqcount_latch(&ls->latch);
ls->val[1] = val;
+ write_seqcount_latch_end(&ls->latch);
}
/* Can be called from any context. */
u64 val;
do {
- seq = raw_read_seqcount_latch(&ls->latch);
+ seq = read_seqcount_latch(&ls->latch);
idx = seq & 0x1;
val = ls->val[idx];
- } while (raw_read_seqcount_latch_retry(&ls->latch, seq));
+ } while (read_seqcount_latch_retry(&ls->latch, seq));
return val;
}
notrace struct clock_read_data *sched_clock_read_begin(unsigned int *seq)
{
- *seq = raw_read_seqcount_latch(&cd.seq);
+ *seq = read_seqcount_latch(&cd.seq);
return cd.read_data + (*seq & 1);
}
notrace int sched_clock_read_retry(unsigned int seq)
{
- return raw_read_seqcount_latch_retry(&cd.seq, seq);
+ return read_seqcount_latch_retry(&cd.seq, seq);
}
static __always_inline unsigned long long __sched_clock(void)
static void update_clock_read_data(struct clock_read_data *rd)
{
/* steer readers towards the odd copy */
- raw_write_seqcount_latch(&cd.seq);
+ write_seqcount_latch_begin(&cd.seq);
/* now its safe for us to update the normal (even) copy */
cd.read_data[0] = *rd;
/* switch readers back to the even copy */
- raw_write_seqcount_latch(&cd.seq);
+ write_seqcount_latch(&cd.seq);
/* update the backup (odd) copy with the new data */
cd.read_data[1] = *rd;
+
+ write_seqcount_latch_end(&cd.seq);
}
/*
*/
static u64 notrace suspended_sched_clock_read(void)
{
- unsigned int seq = raw_read_seqcount_latch(&cd.seq);
+ unsigned int seq = read_seqcount_latch(&cd.seq);
return cd.read_data[seq & 1].epoch_cyc;
}
* We want to use this from any context including NMI and tracing /
* instrumenting the timekeeping code itself.
*
- * Employ the latch technique; see @raw_write_seqcount_latch.
+ * Employ the latch technique; see @write_seqcount_latch.
*
* So if a NMI hits the update of base[0] then it will use base[1]
* which is still consistent. In the worst case this can result is a
struct tk_read_base *base = tkf->base;
/* Force readers off to base[1] */
- raw_write_seqcount_latch(&tkf->seq);
+ write_seqcount_latch_begin(&tkf->seq);
/* Update base[0] */
memcpy(base, tkr, sizeof(*base));
/* Force readers back to base[0] */
- raw_write_seqcount_latch(&tkf->seq);
+ write_seqcount_latch(&tkf->seq);
/* Update base[1] */
memcpy(base + 1, base, sizeof(*base));
+
+ write_seqcount_latch_end(&tkf->seq);
}
static __always_inline u64 __ktime_get_fast_ns(struct tk_fast *tkf)
u64 now;
do {
- seq = raw_read_seqcount_latch(&tkf->seq);
+ seq = read_seqcount_latch(&tkf->seq);
tkr = tkf->base + (seq & 0x01);
now = ktime_to_ns(tkr->base);
now += __timekeeping_get_ns(tkr);
- } while (raw_read_seqcount_latch_retry(&tkf->seq, seq));
+ } while (read_seqcount_latch_retry(&tkf->seq, seq));
return now;
}