}
void gfs2_glock_free(struct gfs2_glock *gl) {
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
__gfs2_glock_free(gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
}
void gfs2_glock_free_later(struct gfs2_glock *gl) {
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
spin_lock(&lru_lock);
list_add(&gl->gl_lru, &sdp->sd_dead_glocks);
* work queue.
*/
static void gfs2_glock_queue_work(struct gfs2_glock *gl, unsigned long delay) {
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
if (!queue_delayed_work(sdp->sd_glock_wq, &gl->gl_work, delay)) {
/*
static void __gfs2_glock_put(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct address_space *mapping = gfs2_glock2aspace(gl);
lockref_mark_dead(&gl->gl_lockref);
smp_mb__after_atomic();
wake_up_bit(&gh->gh_iflags, HIF_WAIT);
if (gh->gh_flags & GL_ASYNC) {
- struct gfs2_sbd *sdp = gh->gh_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gh->gh_gl);
wake_up(&sdp->sd_async_glock_wait);
}
static void do_promote(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_holder *gh, *current_gh;
if (gfs2_withdrawn(sdp)) {
static void gfs2_set_demote(int nr, struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
set_bit(nr, &gl->gl_flags);
smp_mb();
do_xmote(gl, gh, LM_ST_UNLOCKED, false);
break;
default: /* Everything else */
- fs_err(gl->gl_name.ln_sbd,
+ fs_err(glock_sbd(gl),
"glock %u:%llu requested=%u ret=%u\n",
- gl->gl_name.ln_type, gl->gl_name.ln_number,
+ glock_type(gl), glock_number(gl),
gl->gl_req, ret);
GLOCK_BUG_ON(gl, 1);
}
__acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int ret;
prev_object = gl->gl_object;
gl->gl_object = object;
spin_unlock(&gl->gl_lockref.lock);
- if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == NULL))
+ if (gfs2_assert_warn(glock_sbd(gl), prev_object == NULL))
gfs2_dump_glock(NULL, gl, true);
}
prev_object = gl->gl_object;
gl->gl_object = NULL;
spin_unlock(&gl->gl_lockref.lock);
- if (gfs2_assert_warn(gl->gl_name.ln_sbd, prev_object == object))
+ if (gfs2_assert_warn(glock_sbd(gl), prev_object == object))
gfs2_dump_glock(NULL, gl, true);
}
bool gfs2_queue_try_to_evict(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
if (test_and_set_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
return false;
bool gfs2_queue_verify_delete(struct gfs2_glock *gl, bool later)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
unsigned long delay;
if (test_and_set_bit(GLF_VERIFY_DELETE, &gl->gl_flags))
{
struct delayed_work *dwork = to_delayed_work(work);
struct gfs2_glock *gl = container_of(dwork, struct gfs2_glock, gl_delete);
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
/*
gfs2_try_to_evict(gl);
if (verify_delete) {
- u64 no_addr = gl->gl_name.ln_number;
+ u64 no_addr = glock_number(gl);
struct inode *inode;
inode = gfs2_lookup_by_inum(sdp, no_addr, gl->gl_no_formal_ino,
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
gl->gl_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
- if (gl->gl_name.ln_type == LM_TYPE_INODE) {
+ if (glock_type(gl) == LM_TYPE_INODE) {
unsigned long holdtime, now = jiffies;
holdtime = gl->gl_tchange + gl->gl_hold_time;
gl->gl_object = NULL;
gl->gl_hold_time = GL_GLOCK_DFT_HOLD;
INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
- if (gl->gl_name.ln_type == LM_TYPE_IOPEN)
+ if (glock_type(gl) == LM_TYPE_IOPEN)
INIT_DELAYED_WORK(&gl->gl_delete, delete_work_func);
mapping = gfs2_glock2aspace(gl);
int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs,
unsigned int retries)
{
- struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(ghs[0].gh_gl);
unsigned long start_time = jiffies;
int i, ret = 0;
long timeout;
static inline void add_to_queue(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_holder *gh2;
GLOCK_BUG_ON(gl, gh->gh_owner_pid == NULL);
fs_err(sdp, "original: %pSR\n", (void *)gh2->gh_ip);
fs_err(sdp, "pid: %d\n", pid_nr(gh2->gh_owner_pid));
fs_err(sdp, "lock type: %d req lock state : %d\n",
- gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
+ glock_type(gh2->gh_gl), gh2->gh_state);
fs_err(sdp, "new: %pSR\n", (void *)gh->gh_ip);
fs_err(sdp, "pid: %d\n", pid_nr(gh->gh_owner_pid));
fs_err(sdp, "lock type: %d req lock state : %d\n",
- gh->gh_gl->gl_name.ln_type, gh->gh_state);
+ glock_type(gh->gh_gl), gh->gh_state);
gfs2_dump_glock(NULL, gl, true);
BUG();
}
int gfs2_glock_nq(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
int error;
if (gfs2_withdrawn(sdp))
gl->gl_lockref.count++;
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
!test_bit(GLF_DEMOTE, &gl->gl_flags) &&
- gl->gl_name.ln_type == LM_TYPE_INODE)
+ glock_type(gl) == LM_TYPE_INODE)
delay = gl->gl_hold_time;
gfs2_glock_queue_work(gl, delay);
}
set_bit(GLF_CANCELING, &gl->gl_flags);
spin_unlock(&gl->gl_lockref.lock);
- gl->gl_name.ln_sbd->sd_lockstruct.ls_ops->lm_cancel(gl);
+ glock_sbd(gl)->sd_lockstruct.ls_ops->lm_cancel(gl);
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_CANCELING, &gl->gl_flags);
gfs2_glock_hold(gl);
spin_lock(&gl->gl_lockref.lock);
if (!list_empty(&gl->gl_holders) &&
- gl->gl_name.ln_type == LM_TYPE_INODE) {
+ glock_type(gl) == LM_TYPE_INODE) {
unsigned long now = jiffies;
unsigned long holdtime;
void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{
- struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
+ struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct;
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_MAY_CANCEL, &gl->gl_flags);
gla = list_entry(a, struct gfs2_glock, gl_lru);
glb = list_entry(b, struct gfs2_glock, gl_lru);
- if (gla->gl_name.ln_number > glb->gl_name.ln_number)
+ if (glock_number(gla) > glock_number(glb))
return 1;
- if (gla->gl_name.ln_number < glb->gl_name.ln_number)
+ if (glock_number(gla) < glock_number(glb))
return -1;
return 0;
static bool can_free_glock(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
return !test_bit(GLF_LOCK, &gl->gl_flags) &&
!gl->gl_lockref.count &&
rhashtable_walk_start(&iter);
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
- if (gl->gl_name.ln_sbd == sdp)
+ if (glock_sbd(gl) == sdp)
examiner(gl);
}
static void flush_delete_work(struct gfs2_glock *gl)
{
- if (gl->gl_name.ln_type == LM_TYPE_IOPEN) {
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ if (glock_type(gl) == LM_TYPE_IOPEN) {
+ struct gfs2_sbd *sdp = glock_sbd(gl);
if (cancel_delayed_work(&gl->gl_delete)) {
queue_delayed_work(sdp->sd_delete_wq,
unsigned long long dtime;
const struct gfs2_holder *gh;
char gflags_buf[32];
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
char fs_id_buf[sizeof(sdp->sd_fsname) + 7];
unsigned long nrpages = 0;
gfs2_print_dbg(seq, "%sG: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d "
"v:%d r:%d m:%ld p:%lu\n",
fs_id_buf, state2str(gl->gl_state),
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number,
+ glock_type(gl),
+ (unsigned long long) glock_number(gl),
gflags2str(gflags_buf, gl),
state2str(gl->gl_target),
state2str(gl->gl_demote_state), dtime,
struct gfs2_glock *gl = iter_ptr;
seq_printf(seq, "G: n:%u/%llx rtt:%llu/%llu rttb:%llu/%llu irt:%llu/%llu dcnt: %llu qcnt: %llu\n",
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number,
+ glock_type(gl),
+ (unsigned long long) glock_number(gl),
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTT],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTVAR],
(unsigned long long)gl->gl_stats.stats[GFS2_LKS_SRTTB],
gl = NULL;
break;
}
- if (gl->gl_name.ln_sbd != gi->sdp)
+ if (glock_sbd(gl) != gi->sdp)
continue;
if (n <= 1) {
if (!lockref_get_not_dead(&gl->gl_lockref))
gl = GFS2_I(inode)->i_iopen_gh.gh_gl;
if (gl) {
seq_printf(seq, "%d %u %u/%llx\n",
- i->tgid, i->fd, gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number);
+ i->tgid, i->fd, glock_type(gl),
+ (unsigned long long) glock_number(gl));
}
gfs2_glockfd_seq_show_flock(seq, i);
inode_unlock_shared(inode);
BUG(); } } while(0)
#define gfs2_glock_assert_warn(gl, x) do { if (unlikely(!(x))) { \
gfs2_dump_glock(NULL, gl, true); \
- gfs2_assert_warn((gl)->gl_name.ln_sbd, (x)); } } \
+ gfs2_assert_warn(glock_sbd(gl), (x)); } } \
while (0)
#define gfs2_glock_assert_withdraw(gl, x) do { if (unlikely(!(x))) { \
gfs2_dump_glock(NULL, gl, true); \
- gfs2_assert_withdraw((gl)->gl_name.ln_sbd, (x)); } } \
+ gfs2_assert_withdraw(glock_sbd(gl), (x)); } } \
while (0)
__printf(2, 3)
static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
fs_err(sdp,
"AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
bh, (unsigned long long)bh->b_blocknr, bh->b_state,
bh->b_folio->mapping, bh->b_folio->flags.f);
fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
- gl->gl_name.ln_type, gl->gl_name.ln_number,
+ glock_type(gl), glock_number(gl),
gfs2_glock2aspace(gl));
gfs2_lm(sdp, "AIL error\n");
gfs2_withdraw(sdp);
static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
unsigned int nr_revokes)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct list_head *head = &gl->gl_ail_list;
struct gfs2_bufdata *bd, *tmp;
struct buffer_head *bh;
static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_trans tr;
unsigned int revokes;
int ret = 0;
void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
unsigned int revokes = atomic_read(&gl->gl_ail_count);
int ret;
static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct address_space *metamapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize;
static int rgrp_go_sync(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
int error;
static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct address_space *mapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize;
filemap_fdatawrite(metamapping);
error = filemap_fdatawait(metamapping);
if (error)
- gfs2_io_error(gl->gl_name.ln_sbd);
+ gfs2_io_error(glock_sbd(gl));
return error;
}
GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
- gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
+ gfs2_log_flush(glock_sbd(gl), gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_INODE_GO_SYNC);
filemap_fdatawrite(metamapping);
if (isreg) {
{
struct gfs2_inode *ip = gfs2_glock2inode(gl);
- gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
+ gfs2_assert_withdraw(glock_sbd(gl), !atomic_read(&gl->gl_ail_count));
if (flags & DIO_METADATA) {
struct address_space *mapping = gfs2_glock2aspace(gl);
}
}
- if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
- gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
+ if (ip == GFS2_I(glock_sbd(gl)->sd_rindex)) {
+ gfs2_log_flush(glock_sbd(gl), NULL,
GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_INODE_GO_INVAL);
- gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
+ glock_sbd(gl)->sd_rindex_uptodate = 0;
}
if (ip && S_ISREG(ip->i_inode.i_mode))
truncate_inode_pages(ip->i_inode.i_mapping, 0);
static void freeze_go_callback(struct gfs2_glock *gl, bool remote)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct super_block *sb = sdp->sd_vfs;
if (!remote ||
*/
static int freeze_go_xmote_bh(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
struct gfs2_glock *j_gl = ip->i_gl;
struct gfs2_log_header_host head;
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
{
struct gfs2_inode *ip = gl->gl_object;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
if (!remote || test_bit(SDF_KILL, &sdp->sd_flags))
return;
struct rhash_head gl_node;
};
+static inline unsigned int glock_type(const struct gfs2_glock *gl)
+{
+ return gl->gl_name.ln_type;
+}
+
+static inline u64 glock_number(const struct gfs2_glock *gl)
+{
+ return gl->gl_name.ln_number;
+}
+
enum {
GIF_QD_LOCKED = 1,
GIF_SW_PAGED = 3,
struct dentry *debugfs_dir; /* debugfs directory */
};
+#define glock_sbd(gl) ((gl)->gl_name.ln_sbd)
+
#define GFS2_BAD_INO 1
static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp)
static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
{
- const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ const struct gfs2_sbd *sdp = glock_sbd(gl);
preempt_disable();
- this_cpu_ptr(sdp->sd_lkstats)->lkstats[gl->gl_name.ln_type].stats[which]++;
+ this_cpu_ptr(sdp->sd_lkstats)->lkstats[glock_type(gl)].stats[which]++;
preempt_enable();
}
bool blocking)
{
struct gfs2_pcpu_lkstats *lks;
- const unsigned gltype = gl->gl_name.ln_type;
+ const unsigned gltype = glock_type(gl);
unsigned index = blocking ? GFS2_LKS_SRTTB : GFS2_LKS_SRTT;
s64 rtt;
preempt_disable();
rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp));
- lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
+ lks = this_cpu_ptr(glock_sbd(gl)->sd_lkstats);
gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */
gfs2_update_stats(&lks->lkstats[gltype], index, rtt); /* Global */
preempt_enable();
static inline void gfs2_update_request_times(struct gfs2_glock *gl)
{
struct gfs2_pcpu_lkstats *lks;
- const unsigned gltype = gl->gl_name.ln_type;
+ const unsigned gltype = glock_type(gl);
ktime_t dstamp;
s64 irt;
dstamp = gl->gl_dstamp;
gl->gl_dstamp = ktime_get_real();
irt = ktime_to_ns(ktime_sub(gl->gl_dstamp, dstamp));
- lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats);
+ lks = this_cpu_ptr(glock_sbd(gl)->sd_lkstats);
gfs2_update_stats(&gl->gl_stats, GFS2_LKS_SIRT, irt); /* Local */
gfs2_update_stats(&lks->lkstats[gltype], GFS2_LKS_SIRT, irt); /* Global */
preempt_enable();
gfs2_glock_cb(gl, LM_ST_SHARED);
break;
default:
- fs_err(gl->gl_name.ln_sbd, "unknown bast mode %d\n", mode);
+ fs_err(glock_sbd(gl), "unknown bast mode %d\n", mode);
BUG();
}
}
static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
unsigned int flags)
{
- struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
+ struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct;
bool blocking;
int cur, req;
u32 lkf;
int error;
gl->gl_req = req_state;
- cur = make_mode(gl->gl_name.ln_sbd, gl->gl_state);
- req = make_mode(gl->gl_name.ln_sbd, req_state);
+ cur = make_mode(glock_sbd(gl), gl->gl_state);
+ req = make_mode(glock_sbd(gl), req_state);
blocking = !down_conversion(cur, req) &&
!(flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB));
lkf = make_flags(gl, flags, req, blocking);
if (test_bit(GLF_INITIAL, &gl->gl_flags)) {
memset(strname, ' ', GDLM_STRNAME_BYTES - 1);
strname[GDLM_STRNAME_BYTES - 1] = '\0';
- gfs2_reverse_hex(strname + 7, gl->gl_name.ln_type);
- gfs2_reverse_hex(strname + 23, gl->gl_name.ln_number);
+ gfs2_reverse_hex(strname + 7, glock_type(gl));
+ gfs2_reverse_hex(strname + 23, glock_number(gl));
gl->gl_dstamp = ktime_get_real();
} else {
gfs2_update_request_times(gl);
static void gdlm_put_lock(struct gfs2_glock *gl)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct lm_lockstruct *ls = &sdp->sd_lockstruct;
uint32_t flags = 0;
int error;
if (error) {
fs_err(sdp, "gdlm_unlock %x,%llx err=%d\n",
- gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number, error);
+ glock_type(gl),
+ (unsigned long long) glock_number(gl), error);
}
}
static void gdlm_cancel(struct gfs2_glock *gl)
{
- struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
+ struct lm_lockstruct *ls = &glock_sbd(gl)->sd_lockstruct;
down_read(&ls->ls_sem);
if (likely(ls->ls_dlm != NULL)) {
static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
{
- return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
+ return glock_type(bd->bd_gl) == LM_TYPE_RGRP;
}
static void maybe_release_space(struct gfs2_bufdata *bd)
{
struct gfs2_glock *gl = bd->bd_gl;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
- unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
+ unsigned int index = bd->bd_bh->b_blocknr - glock_number(gl);
struct gfs2_bitmap *bi = rgd->rd_bits + index;
rgrp_lock_local(rgd);
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
struct address_space *mapping = gfs2_glock2aspace(gl);
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct folio *folio;
struct buffer_head *bh;
unsigned int shift;
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
int rahead, struct buffer_head **bhp)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct buffer_head *bh, *bhs[2];
int num = 0;
struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct buffer_head *first_bh, *bh;
u32 max_ra = gfs2_tune_get(sdp, gt_max_readahead) >>
sdp->sd_sb.sb_bsize_shift;
if (mapping->a_ops == &gfs2_meta_aops) {
struct gfs2_glock_aspace *gla =
container_of(mapping, struct gfs2_glock_aspace, mapping);
- return gla->glock.gl_name.ln_sbd;
+ return glock_sbd(&gla->glock);
} else
return inode->i_sb->s_fs_info;
}
gfs2_glock_dq_uninit(&ghs[qx]);
inode_unlock(&ip->i_inode);
kfree(ghs);
- gfs2_log_flush(ip->i_gl->gl_name.ln_sbd, ip->i_gl,
+ gfs2_log_flush(glock_sbd(ip->i_gl), ip->i_gl,
GFS2_LOG_HEAD_FLUSH_NORMAL | GFS2_LFC_DO_SYNC);
if (!error) {
for (x = 0; x < num_qd; x++) {
struct gfs2_holder i_gh;
int error;
- gfs2_assert_warn(sdp, sdp == qd->qd_gl->gl_name.ln_sbd);
+ gfs2_assert_warn(sdp, sdp == glock_sbd(qd->qd_gl));
restart:
error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
if (error)
static bool gfs2_rgrp_congested(const struct gfs2_rgrpd *rgd, int loops)
{
const struct gfs2_glock *gl = rgd->rd_gl;
- const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ const struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_lkstats *st;
u64 r_dcount, l_dcount;
u64 l_srttb, a_srttb = 0;
),
TP_fast_assign(
- __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
- __entry->glnum = gl->gl_name.ln_number;
- __entry->gltype = gl->gl_name.ln_type;
+ __entry->dev = glock_sbd(gl)->sd_vfs->s_dev;
+ __entry->glnum = glock_number(gl);
+ __entry->gltype = glock_type(gl);
__entry->cur_state = glock_trace_state(gl->gl_state);
__entry->new_state = glock_trace_state(new_state);
__entry->tgt_state = glock_trace_state(gl->gl_target);
),
TP_fast_assign(
- __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
- __entry->gltype = gl->gl_name.ln_type;
- __entry->glnum = gl->gl_name.ln_number;
+ __entry->dev = glock_sbd(gl)->sd_vfs->s_dev;
+ __entry->gltype = glock_type(gl);
+ __entry->glnum = glock_number(gl);
__entry->cur_state = glock_trace_state(gl->gl_state);
__entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
),
),
TP_fast_assign(
- __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
- __entry->gltype = gl->gl_name.ln_type;
- __entry->glnum = gl->gl_name.ln_number;
+ __entry->dev = glock_sbd(gl)->sd_vfs->s_dev;
+ __entry->gltype = glock_type(gl);
+ __entry->glnum = glock_number(gl);
__entry->cur_state = glock_trace_state(gl->gl_state);
__entry->dmt_state = glock_trace_state(gl->gl_demote_state);
__entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0);
),
TP_fast_assign(
- __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
- __entry->glnum = gh->gh_gl->gl_name.ln_number;
- __entry->gltype = gh->gh_gl->gl_name.ln_type;
+ __entry->dev = glock_sbd(gh->gh_gl)->sd_vfs->s_dev;
+ __entry->glnum = glock_number(gh->gh_gl);
+ __entry->gltype = glock_type(gh->gh_gl);
__entry->state = glock_trace_state(gh->gh_state);
),
),
TP_fast_assign(
- __entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
- __entry->glnum = gh->gh_gl->gl_name.ln_number;
- __entry->gltype = gh->gh_gl->gl_name.ln_type;
+ __entry->dev = glock_sbd(gh->gh_gl)->sd_vfs->s_dev;
+ __entry->glnum = glock_number(gh->gh_gl);
+ __entry->gltype = glock_type(gh->gh_gl);
__entry->queue = queue;
__entry->state = glock_trace_state(gh->gh_state);
),
),
TP_fast_assign(
- __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev;
- __entry->glnum = gl->gl_name.ln_number;
- __entry->gltype = gl->gl_name.ln_type;
+ __entry->dev = glock_sbd(gl)->sd_vfs->s_dev;
+ __entry->glnum = glock_number(gl);
+ __entry->gltype = glock_type(gl);
__entry->status = gl->gl_lksb.sb_status;
__entry->flags = gl->gl_lksb.sb_flags;
__entry->tdiff = tdiff;
),
TP_fast_assign(
- __entry->dev = bd->bd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
+ __entry->dev = glock_sbd(bd->bd_gl)->sd_vfs->s_dev;
__entry->pin = pin;
__entry->len = bd->bd_bh->b_size;
__entry->block = bd->bd_bh->b_blocknr;
- __entry->ino = bd->bd_gl->gl_name.ln_number;
+ __entry->ino = glock_number(bd->bd_gl);
),
TP_printk("%u,%u log %s %llu/%lu inode %llu",
),
TP_fast_assign(
- __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
+ __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev;
__entry->lblock = lblock;
__entry->pblock = buffer_mapped(bh) ? bh->b_blocknr : 0;
__entry->inum = ip->i_no_addr;
),
TP_fast_assign(
- __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
+ __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev;
__entry->inum = ip->i_no_addr;
__entry->pos = pos;
__entry->length = length;
),
TP_fast_assign(
- __entry->dev = ip->i_gl->gl_name.ln_sbd->sd_vfs->s_dev;
+ __entry->dev = glock_sbd(ip->i_gl)->sd_vfs->s_dev;
__entry->inum = ip->i_no_addr;
__entry->offset = iomap->offset;
__entry->length = iomap->length;
),
TP_fast_assign(
- __entry->dev = rgd->rd_gl->gl_name.ln_sbd->sd_vfs->s_dev;
+ __entry->dev = glock_sbd(rgd->rd_gl)->sd_vfs->s_dev;
__entry->start = block;
__entry->inum = ip->i_no_addr;
__entry->len = len;
void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
{
struct gfs2_trans *tr = current->journal_info;
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct gfs2_bufdata *bd;
lock_buffer(bh);
void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_sbd *sdp = glock_sbd(gl);
struct super_block *sb = sdp->sd_vfs;
struct gfs2_bufdata *bd;
struct gfs2_meta_header *mh;