* @t: the given target.
* @ranges: array of new monitoring target ranges.
* @nr_ranges: length of @ranges.
- * @min_sz_region: minimum region size.
+ * @min_region_sz: minimum region size.
*
* This function adds new regions to, or modify existing regions of a
* monitoring target to fit in specific ranges.
* Return: 0 if success, or negative error code otherwise.
*/
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
- unsigned int nr_ranges, unsigned long min_sz_region)
+ unsigned int nr_ranges, unsigned long min_region_sz)
{
struct damon_region *r, *next;
unsigned int i;
/* no region intersects with this range */
newr = damon_new_region(
ALIGN_DOWN(range->start,
- min_sz_region),
- ALIGN(range->end, min_sz_region));
+ min_region_sz),
+ ALIGN(range->end, min_region_sz));
if (!newr)
return -ENOMEM;
damon_insert_region(newr, damon_prev_region(r), r, t);
} else {
/* resize intersecting regions to fit in this range */
first->ar.start = ALIGN_DOWN(range->start,
- min_sz_region);
- last->ar.end = ALIGN(range->end, min_sz_region);
+ min_region_sz);
+ last->ar.end = ALIGN(range->end, min_region_sz);
/* fill possible holes in the range */
err = damon_fill_regions_holes(first, last, t);
ctx->attrs.max_nr_regions = 1000;
ctx->addr_unit = 1;
- ctx->min_sz_region = DAMON_MIN_REGION_SZ;
+ ctx->min_region_sz = DAMON_MIN_REGION_SZ;
INIT_LIST_HEAD(&ctx->adaptive_targets);
INIT_LIST_HEAD(&ctx->schemes);
* If @src has no region, @dst keeps current regions.
*/
static int damon_commit_target_regions(struct damon_target *dst,
- struct damon_target *src, unsigned long src_min_sz_region)
+ struct damon_target *src, unsigned long src_min_region_sz)
{
struct damon_region *src_region;
struct damon_addr_range *ranges;
i = 0;
damon_for_each_region(src_region, src)
ranges[i++] = src_region->ar;
- err = damon_set_regions(dst, ranges, i, src_min_sz_region);
+ err = damon_set_regions(dst, ranges, i, src_min_region_sz);
kfree(ranges);
return err;
}
static int damon_commit_target(
struct damon_target *dst, bool dst_has_pid,
struct damon_target *src, bool src_has_pid,
- unsigned long src_min_sz_region)
+ unsigned long src_min_region_sz)
{
int err;
- err = damon_commit_target_regions(dst, src, src_min_sz_region);
+ err = damon_commit_target_regions(dst, src, src_min_region_sz);
if (err)
return err;
if (dst_has_pid)
err = damon_commit_target(
dst_target, damon_target_has_pid(dst),
src_target, damon_target_has_pid(src),
- src->min_sz_region);
+ src->min_region_sz);
if (err)
return err;
} else {
return -ENOMEM;
err = damon_commit_target(new_target, false,
src_target, damon_target_has_pid(src),
- src->min_sz_region);
+ src->min_region_sz);
if (err) {
damon_destroy_target(new_target, NULL);
return err;
}
dst->ops = src->ops;
dst->addr_unit = src->addr_unit;
- dst->min_sz_region = src->min_sz_region;
+ dst->min_region_sz = src->min_region_sz;
return 0;
}
if (ctx->attrs.min_nr_regions)
sz /= ctx->attrs.min_nr_regions;
- if (sz < ctx->min_sz_region)
- sz = ctx->min_sz_region;
+ if (sz < ctx->min_region_sz)
+ sz = ctx->min_region_sz;
return sz;
}
* @t: The target of the region.
* @rp: The pointer to the region.
* @s: The scheme to be applied.
- * @min_sz_region: minimum region size.
+ * @min_region_sz: minimum region size.
*
* If a quota of a scheme has exceeded in a quota charge window, the scheme's
* action would applied to only a part of the target access pattern fulfilling
* Return: true if the region should be entirely skipped, false otherwise.
*/
static bool damos_skip_charged_region(struct damon_target *t,
- struct damon_region **rp, struct damos *s, unsigned long min_sz_region)
+ struct damon_region **rp, struct damos *s,
+ unsigned long min_region_sz)
{
struct damon_region *r = *rp;
struct damos_quota *quota = &s->quota;
if (quota->charge_addr_from && r->ar.start <
quota->charge_addr_from) {
sz_to_skip = ALIGN_DOWN(quota->charge_addr_from -
- r->ar.start, min_sz_region);
+ r->ar.start, min_region_sz);
if (!sz_to_skip) {
- if (damon_sz_region(r) <= min_sz_region)
+ if (damon_sz_region(r) <= min_region_sz)
return true;
- sz_to_skip = min_sz_region;
+ sz_to_skip = min_region_sz;
}
damon_split_region_at(t, r, sz_to_skip);
r = damon_next_region(r);
static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t,
struct damon_region *r, struct damos_filter *filter,
- unsigned long min_sz_region)
+ unsigned long min_region_sz)
{
bool matched = false;
struct damon_target *ti;
matched = target_idx == filter->target_idx;
break;
case DAMOS_FILTER_TYPE_ADDR:
- start = ALIGN_DOWN(filter->addr_range.start, min_sz_region);
- end = ALIGN_DOWN(filter->addr_range.end, min_sz_region);
+ start = ALIGN_DOWN(filter->addr_range.start, min_region_sz);
+ end = ALIGN_DOWN(filter->addr_range.end, min_region_sz);
/* inside the range */
if (start <= r->ar.start && r->ar.end <= end) {
s->core_filters_allowed = false;
damos_for_each_core_filter(filter, s) {
- if (damos_filter_match(ctx, t, r, filter, ctx->min_sz_region)) {
+ if (damos_filter_match(ctx, t, r, filter, ctx->min_region_sz)) {
if (filter->allow)
s->core_filters_allowed = true;
return !filter->allow;
if (c->ops.apply_scheme) {
if (quota->esz && quota->charged_sz + sz > quota->esz) {
sz = ALIGN_DOWN(quota->esz - quota->charged_sz,
- c->min_sz_region);
+ c->min_region_sz);
if (!sz)
goto update_stat;
damon_split_region_at(t, r, sz);
if (quota->esz && quota->charged_sz >= quota->esz)
continue;
- if (damos_skip_charged_region(t, &r, s, c->min_sz_region))
+ if (damos_skip_charged_region(t, &r, s, c->min_region_sz))
continue;
if (s->max_nr_snapshots &&
/* Split every region in the given target into 'nr_subs' regions */
static void damon_split_regions_of(struct damon_target *t, int nr_subs,
- unsigned long min_sz_region)
+ unsigned long min_region_sz)
{
struct damon_region *r, *next;
unsigned long sz_region, sz_sub = 0;
sz_region = damon_sz_region(r);
for (i = 0; i < nr_subs - 1 &&
- sz_region > 2 * min_sz_region; i++) {
+ sz_region > 2 * min_region_sz; i++) {
/*
* Randomly select size of left sub-region to be at
* least 10 percent and at most 90% of original region
*/
sz_sub = ALIGN_DOWN(damon_rand(1, 10) *
- sz_region / 10, min_sz_region);
+ sz_region / 10, min_region_sz);
/* Do not allow blank region */
if (sz_sub == 0 || sz_sub >= sz_region)
continue;
nr_subregions = 3;
damon_for_each_target(t, ctx)
- damon_split_regions_of(t, nr_subregions, ctx->min_sz_region);
+ damon_split_regions_of(t, nr_subregions, ctx->min_region_sz);
last_nr_regions = nr_regions;
}
* @t: The monitoring target to set the region.
* @start: The pointer to the start address of the region.
* @end: The pointer to the end address of the region.
- * @min_sz_region: Minimum region size.
+ * @min_region_sz: Minimum region size.
*
* This function sets the region of @t as requested by @start and @end. If the
* values of @start and @end are zero, however, this function finds the biggest
*/
int damon_set_region_biggest_system_ram_default(struct damon_target *t,
unsigned long *start, unsigned long *end,
- unsigned long min_sz_region)
+ unsigned long min_region_sz)
{
struct damon_addr_range addr_range;
addr_range.start = *start;
addr_range.end = *end;
- return damon_set_regions(t, &addr_range, 1, min_sz_region);
+ return damon_set_regions(t, &addr_range, 1, min_region_sz);
}
/*