return ret;
down_write(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
zram->wb_limit_enable = val;
- spin_unlock(&zram->wb_limit_lock);
up_write(&zram->init_lock);
ret = len;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->wb_limit_enable;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return sysfs_emit(buf, "%d\n", val);
return ret;
down_write(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
zram->bd_wb_limit = val;
- spin_unlock(&zram->wb_limit_lock);
up_write(&zram->init_lock);
ret = len;
}
static ssize_t writeback_limit_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u64 val;
struct zram *zram = dev_to_zram(dev);
down_read(&zram->init_lock);
- spin_lock(&zram->wb_limit_lock);
val = zram->bd_wb_limit;
- spin_unlock(&zram->wb_limit_lock);
up_read(&zram->init_lock);
return sysfs_emit(buf, "%llu\n", val);
static void zram_account_writeback_rollback(struct zram *zram)
{
- spin_lock(&zram->wb_limit_lock);
+ lockdep_assert_held_read(&zram->init_lock);
+
if (zram->wb_limit_enable)
zram->bd_wb_limit += 1UL << (PAGE_SHIFT - 12);
- spin_unlock(&zram->wb_limit_lock);
}
static void zram_account_writeback_submit(struct zram *zram)
{
- spin_lock(&zram->wb_limit_lock);
+ lockdep_assert_held_read(&zram->init_lock);
+
if (zram->wb_limit_enable && zram->bd_wb_limit > 0)
zram->bd_wb_limit -= 1UL << (PAGE_SHIFT - 12);
- spin_unlock(&zram->wb_limit_lock);
}
static int zram_writeback_complete(struct zram *zram, struct zram_wb_req *req)
u32 index = 0;
while ((pps = select_pp_slot(ctl))) {
- spin_lock(&zram->wb_limit_lock);
if (zram->wb_limit_enable && !zram->bd_wb_limit) {
- spin_unlock(&zram->wb_limit_lock);
ret = -EIO;
break;
}
- spin_unlock(&zram->wb_limit_lock);
while (!req) {
req = zram_select_idle_req(wb_ctl);
init_rwsem(&zram->init_lock);
#ifdef CONFIG_ZRAM_WRITEBACK
zram->wb_batch_size = 32;
- spin_lock_init(&zram->wb_limit_lock);
#endif
/* gendisk structure */