// SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** ******************************************************************************* ** ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. ** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved. ** ** *******************************************************************************
******************************************************************************/
/* Thread for sending/receiving messages for all lockspace's */
error = dlm_midcomms_start(); if (error)
log_print("cannot start dlm midcomms %d", error);
/* * Free all lkb's in xa
*/
xa_for_each(&ls->ls_lkbxa, id, lkb) {
lkb_idr_free(lkb);
}
xa_destroy(&ls->ls_lkbxa);
/* * Free all rsb's on rsbtbl
*/
rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
kfree(ls);
}
staticint new_lockspace(constchar *name, constchar *cluster,
uint32_t flags, int lvblen, conststruct dlm_lockspace_ops *ops, void *ops_arg, int *ops_result, dlm_lockspace_t **lockspace)
{ struct dlm_ls *ls; int namelen = strlen(name); int error;
if (namelen > DLM_LOCKSPACE_LEN || namelen == 0) return -EINVAL;
if (lvblen % 8) return -EINVAL;
if (!try_module_get(THIS_MODULE)) return -EINVAL;
if (!dlm_user_daemon_available()) {
log_print("dlm user daemon not available");
error = -EUNATCH; goto out;
}
if (ops && ops_result) { if (!dlm_config.ci_recover_callbacks)
*ops_result = -EOPNOTSUPP; else
*ops_result = 0;
}
if (!cluster)
log_print("dlm cluster name '%s' is being used without an application provided cluster name",
dlm_config.ci_cluster_name);
if (dlm_config.ci_recover_callbacks && cluster &&
strncmp(cluster, dlm_config.ci_cluster_name, DLM_LOCKSPACE_LEN)) {
log_print("dlm cluster name '%s' does not match " "the application cluster name '%s'",
dlm_config.ci_cluster_name, cluster);
error = -EBADR; goto out;
}
if (flags & DLM_LSFL_SOFTIRQ)
set_bit(LSFL_SOFTIRQ, &ls->ls_flags);
/* ls_exflags are forced to match among nodes, and we don't * need to require all nodes to have some flags set
*/
ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
DLM_LSFL_SOFTIRQ));
/* Due backwards compatibility with 3.1 we need to use maximum * possible dlm message size to be sure the message will fit and * not having out of bounds issues. However on sending side 3.2 * might send less.
*/
ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS); if (!ls->ls_recover_buf) {
error = -ENOMEM; goto out_lkbxa;
}
/* * Once started, dlm_recoverd first looks for ls in lslist, then * initializes ls_in_recovery as locked in "down" mode. We need * to wait for the wakeup from dlm_recoverd because in_recovery * has to start out in down mode.
*/
/* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the current lockspace members are (via configfs) and then tells the
lockspace to start running (via sysfs) in dlm_ls_start(). */
error = do_uevent(ls, 1); if (error < 0) goto out_recoverd;
/* wait until recovery is successful or failed */
wait_for_completion(&ls->ls_recovery_done);
error = ls->ls_recovery_result; if (error) goto out_members;
/* NOTE: We check the lkbxa here rather than the resource table. This is because there may be LKBs queued as ASTs that have been unlinked
from their RSBs and are pending deletion once the AST has been delivered */
staticint lockspace_busy(struct dlm_ls *ls, int force)
{ struct dlm_lkb *lkb; unsignedlong id; int rv = 0;
if (rv) {
log_debug(ls, "release_lockspace no remove %d", rv); return rv;
}
if (ls_count == 1)
dlm_midcomms_version_wait();
dlm_device_deregister(ls);
if (force != 3 && dlm_user_daemon_available())
do_uevent(ls, 0);
dlm_recoverd_stop(ls);
/* clear the LSFL_RUNNING flag to fast up * time_shutdown_sync(), we don't care anymore
*/
clear_bit(LSFL_RUNNING, &ls->ls_flags);
timer_shutdown_sync(&ls->ls_scan_timer);
if (ls_count == 1) {
dlm_clear_members(ls);
dlm_midcomms_shutdown();
}
/* delayed free of data structures see free_lockspace() */
queue_work(dlm_wq, &ls->ls_free_work);
module_put(THIS_MODULE); return 0;
}
/* * Called when a system has released all its locks and is not going to use the * lockspace any longer. We free everything we're managing for this lockspace. * Remaining nodes will go through the recovery process as if we'd died. The * lockspace must continue to function as usual, participating in recoveries, * until this returns. * * Force has 4 possible values: * 0 - don't destroy lockspace if it has any LKBs * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs * 2 - destroy lockspace regardless of LKBs * 3 - destroy lockspace as part of a forced shutdown
*/
int dlm_release_lockspace(void *lockspace, int force)
{ struct dlm_ls *ls; int error;
ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL;
dlm_put_lockspace(ls);
mutex_lock(&ls_lock);
error = release_lockspace(ls, force); if (!error)
ls_count--; if (!ls_count)
dlm_midcomms_stop();
mutex_unlock(&ls_lock);
return error;
}
void dlm_stop_lockspaces(void)
{ struct dlm_ls *ls; int count;
restart:
count = 0;
spin_lock_bh(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) { if (!test_bit(LSFL_RUNNING, &ls->ls_flags)) {
count++; continue;
}
spin_unlock_bh(&lslist_lock);
log_error(ls, "no userland control daemon, stopping lockspace");
dlm_ls_stop(ls); goto restart;
}
spin_unlock_bh(&lslist_lock);
if (count)
log_print("dlm user daemon left %d lockspaces", count);
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.2 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.