*/
static __must_check bool zram_slot_trylock(struct zram *zram, u32 index)
{
- unsigned long *lock = &zram->table[index].flags;
+ unsigned long *lock = &zram->table[index].__lock;
if (!test_and_set_bit_lock(ZRAM_ENTRY_LOCK, lock)) {
mutex_acquire(slot_dep_map(zram, index), 0, 1, _RET_IP_);
static void zram_slot_lock(struct zram *zram, u32 index)
{
- unsigned long *lock = &zram->table[index].flags;
+ unsigned long *lock = &zram->table[index].__lock;
mutex_acquire(slot_dep_map(zram, index), 0, 0, _RET_IP_);
wait_on_bit_lock(lock, ZRAM_ENTRY_LOCK, TASK_UNINTERRUPTIBLE);
static void zram_slot_unlock(struct zram *zram, u32 index)
{
- unsigned long *lock = &zram->table[index].flags;
+ unsigned long *lock = &zram->table[index].__lock;
mutex_release(slot_dep_map(zram, index), _RET_IP_);
clear_and_wake_up_bit(ZRAM_ENTRY_LOCK, lock);
}
static bool zram_test_flag(struct zram *zram, u32 index,
- enum zram_pageflags flag)
+ enum zram_pageflags flag)
{
- return zram->table[index].flags & BIT(flag);
+ return zram->table[index].attr.flags & BIT(flag);
}
static void zram_set_flag(struct zram *zram, u32 index,
- enum zram_pageflags flag)
+ enum zram_pageflags flag)
{
- zram->table[index].flags |= BIT(flag);
+ zram->table[index].attr.flags |= BIT(flag);
}
static void zram_clear_flag(struct zram *zram, u32 index,
- enum zram_pageflags flag)
+ enum zram_pageflags flag)
{
- zram->table[index].flags &= ~BIT(flag);
+ zram->table[index].attr.flags &= ~BIT(flag);
}
static size_t zram_get_obj_size(struct zram *zram, u32 index)
{
- return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
+ return zram->table[index].attr.flags & (BIT(ZRAM_FLAG_SHIFT) - 1);
}
-static void zram_set_obj_size(struct zram *zram,
- u32 index, size_t size)
+static void zram_set_obj_size(struct zram *zram, u32 index, size_t size)
{
- unsigned long flags = zram->table[index].flags >> ZRAM_FLAG_SHIFT;
+ unsigned long flags = zram->table[index].attr.flags >> ZRAM_FLAG_SHIFT;
- zram->table[index].flags = (flags << ZRAM_FLAG_SHIFT) | size;
+ zram->table[index].attr.flags = (flags << ZRAM_FLAG_SHIFT) | size;
}
static inline bool zram_allocated(struct zram *zram, u32 index)
* Clear previous priority value first, in case if we recompress
* further an already recompressed page
*/
- zram->table[index].flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
- ZRAM_COMP_PRIORITY_BIT1);
- zram->table[index].flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
+ zram->table[index].attr.flags &= ~(ZRAM_COMP_PRIORITY_MASK <<
+ ZRAM_COMP_PRIORITY_BIT1);
+ zram->table[index].attr.flags |= (prio << ZRAM_COMP_PRIORITY_BIT1);
}
static inline u32 zram_get_priority(struct zram *zram, u32 index)
{
- u32 prio = zram->table[index].flags >> ZRAM_COMP_PRIORITY_BIT1;
+ u32 prio = zram->table[index].attr.flags >> ZRAM_COMP_PRIORITY_BIT1;
return prio & ZRAM_COMP_PRIORITY_MASK;
}
zram_clear_flag(zram, index, ZRAM_IDLE);
zram_clear_flag(zram, index, ZRAM_PP_SLOT);
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
- zram->table[index].ac_time = ktime_get_boottime();
+ zram->table[index].attr.ac_time = (u32)ktime_get_boottime_seconds();
#endif
}
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
is_idle = !cutoff ||
- ktime_after(cutoff, zram->table[index].ac_time);
+ ktime_after(cutoff, zram->table[index].attr.ac_time);
#endif
if (is_idle)
zram_set_flag(zram, index, ZRAM_IDLE);
const char *buf, size_t len)
{
struct zram *zram = dev_to_zram(dev);
- ktime_t cutoff_time = 0;
+ ktime_t cutoff = 0;
if (!sysfs_streq(buf, "all")) {
/*
* If it did not parse as 'all' try to treat it as an integer
* when we have memory tracking enabled.
*/
- u64 age_sec;
+ u32 age_sec;
- if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) && !kstrtoull(buf, 0, &age_sec))
- cutoff_time = ktime_sub(ktime_get_boottime(),
- ns_to_ktime(age_sec * NSEC_PER_SEC));
+ if (IS_ENABLED(CONFIG_ZRAM_TRACK_ENTRY_ACTIME) &&
+ !kstrtouint(buf, 0, &age_sec))
+ cutoff = ktime_sub((u32)ktime_get_boottime_seconds(),
+ age_sec);
else
return -EINVAL;
}
return -EINVAL;
/*
- * A cutoff_time of 0 marks everything as idle, this is the
+ * A cutoff of 0 marks everything as idle, this is the
* "all" behavior.
*/
- mark_idle(zram, cutoff_time);
+ mark_idle(zram, cutoff);
return len;
}
if (!zram_allocated(zram, index))
goto next;
- ts = ktime_to_timespec64(zram->table[index].ac_time);
+ ts = ktime_to_timespec64(zram->table[index].attr.ac_time);
copied = snprintf(kbuf + written, count,
"%12zd %12lld.%06lu %c%c%c%c%c%c\n",
index, (s64)ts.tv_sec,
unsigned long handle;
#ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME
- zram->table[index].ac_time = 0;
+ zram->table[index].attr.ac_time = 0;
#endif
zram_clear_flag(zram, index, ZRAM_IDLE);
struct zram_table_entry zram_te;
int ret;
- BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.flags) * 8);
+ BUILD_BUG_ON(__NR_ZRAM_PAGEFLAGS > sizeof(zram_te.attr.flags) * 8);
ret = cpuhp_setup_state_multi(CPUHP_ZCOMP_PREPARE, "block/zram:prepare",
zcomp_cpu_up_prepare, zcomp_cpu_dead);