struct gfs2_bufdata *bd;
lock_buffer(bh);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
clear_buffer_dirty(bh);
bd = bh->b_private;
if (bd) {
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
}
* again.
*/
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
bh = head;
do {
if (atomic_read(&bh->b_count))
bh = bh->b_this_page;
} while (bh != head);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
return try_to_free_buffers(folio);
cannot_release:
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
return false;
}
struct buffer_head *bh;
const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
if (nr_revokes == 0)
}
GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
spin_unlock(&sdp->sd_ail_lock);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
}
* If none of these conditions are true, our revokes are all
* flushed and we can return.
*/
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
have_revokes = !list_empty(&sdp->sd_log_revokes);
log_in_flight = atomic_read(&sdp->sd_log_in_flight);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
if (have_revokes)
goto flush;
if (log_in_flight)
/* number of revokes we still have room for */
unsigned int max_revokes = atomic_read(&sdp->sd_log_revokes_available);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
gfs2_ail1_empty(sdp, max_revokes);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
}
/**
goto out_withdraw;
lops_after_commit(sdp, tr);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
sdp->sd_log_blks_reserved = 0;
spin_lock(&sdp->sd_ail_lock);
tr = NULL;
}
spin_unlock(&sdp->sd_ail_lock);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
if (!(flags & GFS2_LOG_HEAD_FLUSH_NORMAL)) {
if (!sdp->sd_log_idle) {
unsigned int unused;
unsigned int maxres;
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
if (sdp->sd_log_tr) {
gfs2_merge_trans(sdp, tr);
gfs2_log_release(sdp, unused);
sdp->sd_log_blks_reserved = reserved;
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
}
static inline int gfs2_jrnl_flush_reqd(struct gfs2_sbd *sdp)
*/
#define GFS2_LOG_FLUSH_MIN_BLOCKS 4
-/**
- * gfs2_log_lock - acquire the right to mess with the log manager
- * @sdp: the filesystem
- *
- */
-
-static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
-__acquires(&sdp->sd_log_lock)
-{
- spin_lock(&sdp->sd_log_lock);
-}
-
-/**
- * gfs2_log_unlock - release the right to mess with the log manager
- * @sdp: the filesystem
- *
- */
-
-static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
-__releases(&sdp->sd_log_lock)
-{
- spin_unlock(&sdp->sd_log_lock);
-}
-
static inline void gfs2_ordered_add_inode(struct gfs2_inode *ip)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
unsigned n;
__be64 *ptr;
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
list_sort(NULL, blist, blocknr_cmp);
bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
while(total) {
num = total;
if (total > limit)
num = limit;
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
page = gfs2_get_log_desc(sdp,
is_databuf ? GFS2_LOG_DESC_JDATA :
GFS2_LOG_DESC_METADATA, num + 1, num);
ld = page_address(page);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
ptr = (__be64 *)(ld + 1);
n = 0;
break;
}
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
gfs2_log_write_page(sdp, page);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
n = 0;
list_for_each_entry_continue(bd2, blist, bd_list) {
get_bh(bd2->bd_bh);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
lock_buffer(bd2->bd_bh);
if (buffer_escaped(bd2->bd_bh)) {
} else {
gfs2_log_write_bh(sdp, bd2->bd_bh);
}
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
if (++n >= num)
break;
}
BUG_ON(total < num);
total -= num;
}
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
}
static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
struct buffer_head *bh;
u64 end = bstart + blen;
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
spin_lock(&sdp->sd_ail_lock);
list_for_each_entry_safe(tr, s, &sdp->sd_ail1_list, tr_list) {
list_for_each_entry_safe(bd, bs, &tr->tr_ail1_list,
}
}
spin_unlock(&sdp->sd_ail_lock);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
}
static struct buffer_head *gfs2_getjdatabuf(struct gfs2_inode *ip, u64 blkno)
}
if (bh) {
lock_buffer(bh);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
spin_lock(&sdp->sd_ail_lock);
gfs2_remove_from_journal(bh, ty);
spin_unlock(&sdp->sd_ail_lock);
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
brelse(bh);
}
set_bit(TR_TOUCHED, &tr->tr_flags);
goto out;
}
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
bd = bh->b_private;
if (bd == NULL) {
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
if (bh->b_private == NULL)
bd = gfs2_alloc_bufdata(gl, bh);
else
bd = bh->b_private;
lock_buffer(bh);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
}
gfs2_assert(sdp, bd->bd_gl == gl);
set_bit(TR_TOUCHED, &tr->tr_flags);
tr->tr_num_databuf_new++;
list_add_tail(&bd->bd_list, &tr->tr_databuf);
}
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
out:
unlock_buffer(bh);
}
set_bit(TR_TOUCHED, &tr->tr_flags);
goto out;
}
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
bd = bh->b_private;
if (bd == NULL) {
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
unlock_buffer(bh);
folio_lock(bh->b_folio);
if (bh->b_private == NULL)
bd = bh->b_private;
folio_unlock(bh->b_folio);
lock_buffer(bh);
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
}
gfs2_assert(sdp, bd->bd_gl == gl);
set_bit(TR_TOUCHED, &tr->tr_flags);
list_add(&bd->bd_list, &tr->tr_buf);
tr->tr_num_buf_new++;
out_unlock:
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
out:
unlock_buffer(bh);
}
struct gfs2_bufdata *bd, *tmp;
unsigned int n = len;
- gfs2_log_lock(sdp);
+ spin_lock(&sdp->sd_log_lock);
list_for_each_entry_safe(bd, tmp, &sdp->sd_log_revokes, bd_list) {
if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
list_del_init(&bd->bd_list);
break;
}
}
- gfs2_log_unlock(sdp);
+ spin_unlock(&sdp->sd_log_lock);
}
void gfs2_trans_free(struct gfs2_sbd *sdp, struct gfs2_trans *tr)