// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*/
/** * __gfs2_ail_flush - remove all buffers for a given lock from the AIL * @gl: the glock * @fsync: set when called from fsync (not all buffers will be clean) * @nr_revokes: Number of buffers to revoke * * None of the buffers should be dirty, locked, or pinned.
*/
staticint gfs2_ail_empty_gl(struct gfs2_glock *gl)
{ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_trans tr; unsignedint revokes; int ret = 0;
revokes = atomic_read(&gl->gl_ail_count);
if (!revokes) { bool have_revokes; bool log_in_flight;
/* * We have nothing on the ail, but there could be revokes on * the sdp revoke queue, in which case, we still want to flush * the log and wait for it to finish. * * If the sdp revoke list is empty too, we might still have an * io outstanding for writing revokes, so we should wait for * it before returning. * * If none of these conditions are true, our revokes are all * flushed and we can return.
*/
gfs2_log_lock(sdp);
have_revokes = !list_empty(&sdp->sd_log_revokes);
log_in_flight = atomic_read(&sdp->sd_log_in_flight);
gfs2_log_unlock(sdp); if (have_revokes) goto flush; if (log_in_flight)
log_flush_wait(sdp); return 0;
}
/** * rgrp_go_sync - sync out the metadata for this glock * @gl: the glock * * Called when demoting or unlocking an EX glock. We must flush * to disk all dirty buffers/pages relating to this glock, and must not * return to caller to demote/unlock the glock until I/O is complete.
*/
/** * rgrp_go_inval - invalidate the metadata for this glock * @gl: the glock * @flags: * * We never used LM_ST_DEFERRED with resource groups, so that we * should always see the metadata flag set here. *
*/
/** * gfs2_inode_metasync - sync out the metadata of an inode * @gl: the glock protecting the inode *
*/ int gfs2_inode_metasync(struct gfs2_glock *gl)
{ struct address_space *metamapping = gfs2_glock2aspace(gl); int error;
filemap_fdatawrite(metamapping);
error = filemap_fdatawait(metamapping); if (error)
gfs2_io_error(gl->gl_name.ln_sbd); return error;
}
/** * inode_go_sync - Sync the dirty metadata of an inode * @gl: the glock protecting the inode *
*/
staticint inode_go_sync(struct gfs2_glock *gl)
{ struct gfs2_inode *ip = gfs2_glock2inode(gl); int isreg = ip && S_ISREG(ip->i_inode.i_mode); struct address_space *metamapping = gfs2_glock2aspace(gl); int error = 0, ret;
if (isreg) { if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
inode_dio_wait(&ip->i_inode);
} if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags)) goto out;
gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
GFS2_LFC_INODE_GO_SYNC);
filemap_fdatawrite(metamapping); if (isreg) { struct address_space *mapping = ip->i_inode.i_mapping;
filemap_fdatawrite(mapping);
error = filemap_fdatawait(mapping);
mapping_set_error(mapping, error);
}
ret = gfs2_inode_metasync(gl); if (!error)
error = ret;
ret = gfs2_ail_empty_gl(gl); if (!error)
error = ret; /* * Writeback of the data mapping may cause the dirty flag to be set * so we have to clear it again here.
*/
smp_mb__before_atomic();
clear_bit(GLF_DIRTY, &gl->gl_flags);
out:
gfs2_clear_glop_pending(ip); return error;
}
/** * inode_go_inval - prepare a inode glock to be released * @gl: the glock * @flags: * * Normally we invalidate everything, but if we are moving into * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we * can keep hold of the metadata, since it won't have changed. *
*/
/* * Try to get an active super block reference to prevent racing with * unmount (see super_trylock_shared()). But note that unmount isn't * the only place where a write lock on s_umount is taken, and we can * fail here because of things like remount as well.
*/ if (down_read_trylock(&sb->s_umount)) {
atomic_inc(&sb->s_active);
up_read(&sb->s_umount); if (!queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work))
deactivate_super(sb);
}
}
/** * freeze_go_xmote_bh - After promoting/demoting the freeze glock * @gl: the glock
*/ staticint freeze_go_xmote_bh(struct gfs2_glock *gl)
{ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode); struct gfs2_glock *j_gl = ip->i_gl; struct gfs2_log_header_host head; int error;
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
/** * iopen_go_callback - schedule the dcache entry for the inode to be deleted * @gl: the glock * @remote: true if this came from a different cluster node * * gl_lockref.lock lock is held while calling this
*/ staticvoid iopen_go_callback(struct gfs2_glock *gl, bool remote)
{ struct gfs2_inode *ip = gl->gl_object; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!remote || sb_rdonly(sdp->sd_vfs) ||
test_bit(SDF_KILL, &sdp->sd_flags)) return;
if (gl->gl_demote_state == LM_ST_UNLOCKED &&
gl->gl_state == LM_ST_SHARED && ip) {
gl->gl_lockref.count++; if (!gfs2_queue_try_to_evict(gl))
gl->gl_lockref.count--;
}
}
/** * inode_go_unlocked - wake up anyone waiting for dlm's unlock ast * @gl: glock being unlocked * * For now, this is only used for the journal inode glock. In withdraw * situations, we need to wait for the glock to be unlocked so that we know * other nodes may proceed with recovery / journal replay.
*/ staticvoid inode_go_unlocked(struct gfs2_glock *gl)
{ /* Note that we cannot reference gl_object because it's already set
* to NULL by this point in its lifecycle. */ if (!test_bit(GLF_UNLOCKED, &gl->gl_flags)) return;
clear_bit_unlock(GLF_UNLOCKED, &gl->gl_flags);
wake_up_bit(&gl->gl_flags, GLF_UNLOCKED);
}
/** * nondisk_go_callback - used to signal when a node did a withdraw * @gl: the nondisk glock * @remote: true if this came from a different cluster node *
*/ staticvoid nondisk_go_callback(struct gfs2_glock *gl, bool remote)
{ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
/* Ignore the callback unless it's from another node, and it's the
live lock. */ if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK) return;
/* First order of business is to cancel the demote request. We don't * really want to demote a nondisk glock. At best it's just to inform
* us of another node's withdraw. We'll keep it in SH mode. */
clear_bit(GLF_DEMOTE, &gl->gl_flags);
clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
/* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */ if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags)) return;
/* We only care when a node wants us to unlock, because that means
* they want a journal recovered. */ if (gl->gl_demote_state != LM_ST_UNLOCKED) return;
fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags); /* * We can't call remote_withdraw directly here or gfs2_recover_journal * because this is called from the glock unlock function and the * remote_withdraw needs to enqueue and dequeue the same "live" glock * we were called from. So we queue it to the control work queue in * lock_dlm.
*/
queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.