/* already have ref on dlm to avoid having
* it disappear. just double-check. */
BUG_ON(item->dlm != dlm);
/* this is allowed to sleep and
* call network stuff */
workfunc(item, item->data);
dlm_put(dlm);
kfree(item);
}
}
/* * RECOVERY THREAD
*/
void dlm_kick_recovery_thread(struct dlm_ctxt *dlm)
{ /* wake the recovery thread * this will wake the reco thread in one of three places * 1) sleeping with no recovery happening * 2) sleeping with recovery mastered elsewhere
* 3) recovery mastered here, waiting on reco data */
wake_up(&dlm->dlm_reco_thread_wq);
}
/* Launch the recovery thread */ int dlm_launch_recovery_thread(struct dlm_ctxt *dlm)
{
mlog(0, "starting dlm recovery thread...\n");
void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
{ if (dlm->dlm_reco_thread_task) {
mlog(0, "waiting for dlm recovery thread to exit\n");
kthread_stop(dlm->dlm_reco_thread_task);
dlm->dlm_reco_thread_task = NULL;
}
}
/* * this is lame, but here's how recovery works... * 1) all recovery threads cluster wide will work on recovering * ONE node at a time * 2) negotiate who will take over all the locks for the dead node. * that's right... ALL the locks. * 3) once a new master is chosen, everyone scans all locks * and moves aside those mastered by the dead guy * 4) each of these locks should be locked until recovery is done * 5) the new master collects up all of secondary lock queue info * one lock at a time, forcing each node to communicate back * before continuing * 6) each secondary lock queue responds with the full known lock info * 7) once the new master has run all its locks, it sends a ALLDONE! * message to everyone * 8) upon receiving this message, the secondary queue node unlocks * and responds to the ALLDONE * 9) once the new master gets responses from everyone, he unlocks * everything and recovery for this dead node is done *10) go back to 2) while there are still dead nodes *
*/
mlog(0, "dlm thread running for %s...\n", dlm->name);
while (!kthread_should_stop()) { if (dlm_domain_fully_joined(dlm)) {
status = dlm_do_recovery(dlm); if (status == -EAGAIN) { /* do not sleep, recheck immediately. */ continue;
} if (status < 0)
mlog_errno(status);
}
/* returns true when the recovery master has contacted us */ staticint dlm_reco_master_ready(struct dlm_ctxt *dlm)
{ int ready;
spin_lock(&dlm->spinlock);
ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM);
spin_unlock(&dlm->spinlock); return ready;
}
/* returns true if node is no longer in the domain
* could be dead or just not joined */ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node)
{ int dead;
spin_lock(&dlm->spinlock);
dead = !test_bit(node, dlm->domain_map);
spin_unlock(&dlm->spinlock); return dead;
}
/* returns true if node is no longer in the domain
* could be dead or just not joined */ staticint dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node)
{ int recovered;
spin_lock(&dlm->spinlock);
recovered = !test_bit(node, dlm->recovery_map);
spin_unlock(&dlm->spinlock); return recovered;
}
void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout)
{ if (dlm_is_node_dead(dlm, node)) return;
printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " "domain %s\n", node, dlm->name);
/* callers of the top-level api calls (dlmlock/dlmunlock) should * block on the dlm->reco.event when recovery is in progress. * the dlm recovery thread will set this state when it begins * recovering a dead node (as the new master or not) and clear * the state and wake as soon as all affected lock resources have
* been marked with the RECOVERY flag */ staticint dlm_in_recovery(struct dlm_ctxt *dlm)
{ int in_recovery;
spin_lock(&dlm->spinlock);
in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE);
spin_unlock(&dlm->spinlock); return in_recovery;
}
staticvoid dlm_print_recovery_master(struct dlm_ctxt *dlm)
{
printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " "dead node %u in domain %s\n", dlm->reco.new_master,
(dlm->node_num == dlm->reco.new_master ? "me" : "he"),
dlm->reco.dead_node, dlm->name);
}
staticint dlm_do_recovery(struct dlm_ctxt *dlm)
{ int status = 0; int ret;
spin_lock(&dlm->spinlock);
if (dlm->migrate_done) {
mlog(0, "%s: no need do recovery after migrating all " "lock resources\n", dlm->name);
spin_unlock(&dlm->spinlock); return 0;
}
/* check to see if the new master has died */ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM &&
test_bit(dlm->reco.new_master, dlm->recovery_map)) {
mlog(0, "new master %u died while recovering %u!\n",
dlm->reco.new_master, dlm->reco.dead_node); /* unset the new_master, leave dead_node */
dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM);
}
/* select a target to recover */ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { int bit;
bit = find_first_bit(dlm->recovery_map, O2NM_MAX_NODES); if (bit >= O2NM_MAX_NODES || bit < 0)
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); else
dlm_set_reco_dead_node(dlm, bit);
} elseif (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { /* BUG? */
mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n",
dlm->reco.dead_node);
dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM);
}
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { // mlog(0, "nothing to recover! sleeping now!\n");
spin_unlock(&dlm->spinlock); /* return to main thread loop and sleep. */ return 0;
}
mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n",
dlm->name, task_pid_nr(dlm->dlm_reco_thread_task),
dlm->reco.dead_node);
/* take write barrier */ /* (stops the list reshuffling thread, proxy ast handling) */
dlm_begin_recovery(dlm);
spin_unlock(&dlm->spinlock);
if (dlm->reco.new_master == dlm->node_num) goto master_here;
if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { /* choose a new master, returns 0 if this node * is the master, -EEXIST if it's another node. * this does not return until a new master is chosen
* or recovery completes entirely. */
ret = dlm_pick_recovery_master(dlm); if (!ret) { /* already notified everyone. go. */ goto master_here;
}
mlog(0, "another node will master this recovery session.\n");
}
dlm_print_recovery_master(dlm);
/* it is safe to start everything back up here * because all of the dead node's lock resources
* have been marked as in-recovery */
dlm_end_recovery(dlm);
/* sleep out in main dlm_recovery_thread loop. */ return 0;
master_here:
dlm_print_recovery_master(dlm);
status = dlm_remaster_locks(dlm, dlm->reco.dead_node); if (status < 0) { /* we should never hit this anymore */
mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " "retrying.\n", dlm->name, status, dlm->reco.dead_node); /* yield a bit to allow any final network messages
* to get handled on remaining nodes */
msleep(100);
} else { /* success! see if any other nodes need recovery */
mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n",
dlm->name, dlm->reco.dead_node, dlm->node_num);
spin_lock(&dlm->spinlock);
__dlm_reset_recovery(dlm);
dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE;
spin_unlock(&dlm->spinlock);
}
dlm_end_recovery(dlm);
/* continue and look for another dead node */ return -EAGAIN;
}
staticint dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node)
{ int status = 0; struct dlm_reco_node_data *ndata; int all_nodes_done; int destroy = 0; int pass = 0;
do { /* we have become recovery master. there is no escaping
* this, so just keep trying until we get it. */
status = dlm_init_recovery_area(dlm, dead_node); if (status < 0) {
mlog(ML_ERROR, "%s: failed to alloc recovery area, " "retrying\n", dlm->name);
msleep(1000);
}
} while (status != 0);
/* safe to access the node data list without a lock, since this
* process is the only one to change the list */
list_for_each_entry(ndata, &dlm->reco.node_data, list) {
BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT);
ndata->state = DLM_RECO_NODE_DATA_REQUESTING;
mlog(0, "%s: Requesting lock info from node %u\n", dlm->name,
ndata->node_num);
if (ndata->node_num == dlm->node_num) {
ndata->state = DLM_RECO_NODE_DATA_DONE; continue;
}
do {
status = dlm_request_all_locks(dlm, ndata->node_num,
dead_node); if (status < 0) {
mlog_errno(status); if (dlm_is_host_down(status)) { /* node died, ignore it for recovery */
status = 0;
ndata->state = DLM_RECO_NODE_DATA_DEAD; /* wait for the domain map to catch up
* with the network state. */
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_is_node_dead(dlm,
ndata->node_num),
msecs_to_jiffies(1000));
mlog(0, "waited 1 sec for %u, " "dead? %s\n", ndata->node_num,
str_yes_no(dlm_is_node_dead(dlm, ndata->node_num)));
} else { /* -ENOMEM on the other node */
mlog(0, "%s: node %u returned " "%d during recovery, retrying " "after a short wait\n",
dlm->name, ndata->node_num,
status);
msleep(100);
}
}
} while (status != 0);
spin_lock(&dlm_reco_state_lock); switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_FINALIZE_SENT: case DLM_RECO_NODE_DATA_REQUESTED:
BUG(); break; case DLM_RECO_NODE_DATA_DEAD:
mlog(0, "node %u died after requesting " "recovery info for node %u\n",
ndata->node_num, dead_node); /* fine. don't need this node's info.
* continue without it. */ break; case DLM_RECO_NODE_DATA_REQUESTING:
ndata->state = DLM_RECO_NODE_DATA_REQUESTED;
mlog(0, "now receiving recovery data from " "node %u for dead node %u\n",
ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_RECEIVING:
mlog(0, "already receiving recovery data from " "node %u for dead node %u\n",
ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_DONE:
mlog(0, "already DONE receiving recovery data " "from node %u for dead node %u\n",
ndata->node_num, dead_node); break;
}
spin_unlock(&dlm_reco_state_lock);
}
mlog(0, "%s: Done requesting all lock info\n", dlm->name);
/* nodes should be sending reco data now
* just need to wait */
while (1) { /* check all the nodes now to see if we are
* done, or if anyone died */
all_nodes_done = 1;
spin_lock(&dlm_reco_state_lock);
list_for_each_entry(ndata, &dlm->reco.node_data, list) {
mlog(0, "checking recovery state of node %u\n",
ndata->node_num); switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_REQUESTING:
mlog(ML_ERROR, "bad ndata state for " "node %u: state=%d\n",
ndata->node_num, ndata->state);
BUG(); break; case DLM_RECO_NODE_DATA_DEAD:
mlog(0, "node %u died after " "requesting recovery info for " "node %u\n", ndata->node_num,
dead_node); break; case DLM_RECO_NODE_DATA_RECEIVING: case DLM_RECO_NODE_DATA_REQUESTED:
mlog(0, "%s: node %u still in state %s\n",
dlm->name, ndata->node_num,
ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? "receiving" : "requested");
all_nodes_done = 0; break; case DLM_RECO_NODE_DATA_DONE:
mlog(0, "%s: node %u state is done\n",
dlm->name, ndata->node_num); break; case DLM_RECO_NODE_DATA_FINALIZE_SENT:
mlog(0, "%s: node %u state is finalize\n",
dlm->name, ndata->node_num); break;
}
}
spin_unlock(&dlm_reco_state_lock);
mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass,
str_yes_no(all_nodes_done)); if (all_nodes_done) { int ret;
/* Set this flag on recovery master to avoid * a new recovery for another dead node start * before the recovery is not done. That may
* cause recovery hung.*/
spin_lock(&dlm->spinlock);
dlm->reco.state |= DLM_RECO_STATE_FINALIZE;
spin_unlock(&dlm->spinlock);
/* all nodes are now in DLM_RECO_NODE_DATA_DONE state * just send a finalize message to everyone and
* clean up */
mlog(0, "all nodes are done! send finalize\n");
ret = dlm_send_finalize_reco_message(dlm); if (ret < 0)
mlog_errno(ret);
spin_lock(&dlm->spinlock);
dlm_finish_local_lockres_recovery(dlm, dead_node,
dlm->node_num);
spin_unlock(&dlm->spinlock);
mlog(0, "should be done with recovery!\n");
mlog(0, "finishing recovery of %s at %lu, " "dead=%u, this=%u, new=%u\n", dlm->name,
jiffies, dlm->reco.dead_node,
dlm->node_num, dlm->reco.new_master);
destroy = 1;
status = 0; /* rescan everything marked dirty along the way */
dlm_kick_thread(dlm, NULL); break;
} /* wait to be signalled, with periodic timeout
* to check for node death */
wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq,
kthread_should_stop(),
msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS));
spin_lock(&dlm->spinlock);
bitmap_copy(dlm->reco.node_map, dlm->domain_map, O2NM_MAX_NODES); /* nodes can only be removed (by dying) after dropping
* this lock, and death will be trapped later, so this should do */
spin_unlock(&dlm->spinlock);
while (1) {
num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); if (num >= O2NM_MAX_NODES) { break;
}
BUG_ON(num == dead_node);
/* negative status is handled by caller */ if (ret < 0)
mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " "to recover dead node %u\n", dlm->name, ret,
request_from, dead_node); else
ret = status; // return from here, then // sleep until all received or error return ret;
if (lr->dead_node != dlm->reco.dead_node) {
mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " "dead_node is %u\n", dlm->name, lr->node_idx,
lr->dead_node, dlm->reco.dead_node);
dlm_print_reco_node_status(dlm); /* this is a hack */
dlm_put(dlm); return -ENOMEM;
}
BUG_ON(lr->dead_node != dlm->reco.dead_node);
/* this will get freed by dlm_request_all_locks_worker */
buf = (char *) __get_free_page(GFP_NOFS); if (!buf) {
kfree(item);
dlm_put(dlm); return -ENOMEM;
}
/* queue up work for dlm_request_all_locks_worker */
dlm_grab(dlm); /* get an extra ref for the work item */
dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf);
item->u.ral.reco_master = lr->node_idx;
item->u.ral.dead_node = lr->dead_node;
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
queue_work(dlm->dlm_worker, &dlm->dispatched_work);
if (dead_node != dlm->reco.dead_node ||
reco_master != dlm->reco.new_master) { /* worker could have been created before the recovery master
* died. if so, do not continue, but do not error. */ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) {
mlog(ML_NOTICE, "%s: will not send recovery state, " "recovery master %u died, thread=(dead=%u,mas=%u)" " current=(dead=%u,mas=%u)\n", dlm->name,
reco_master, dead_node, reco_master,
dlm->reco.dead_node, dlm->reco.new_master);
} else {
mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " "master=%u), request(dead=%u, master=%u)\n",
dlm->name, dlm->reco.dead_node,
dlm->reco.new_master, dead_node, reco_master);
} goto leave;
}
/* lock resources should have already been moved to the * dlm->reco.resources list. now move items from that list * to a temp list if the dead owner matches. note that the * whole cluster recovers only one node at a time, so we * can safely move UNKNOWN lock resources for each recovery
* session. */
dlm_move_reco_locks_to_list(dlm, &resources, dead_node);
/* now we can begin blasting lockreses without the dlm lock */
/* any errors returned will be due to the new_master dying,
* the dlm_reco_thread should detect this */
list_for_each_entry(res, &resources, recovering) {
ret = dlm_send_one_lockres(dlm, res, mres, reco_master,
DLM_MRES_RECOVERY); if (ret < 0) {
mlog(ML_ERROR, "%s: node %u went down while sending " "recovery state for dead node %u, ret=%d\n", dlm->name,
reco_master, dead_node, ret);
skip_all_done = 1; break;
}
}
/* move the resources back to the list */
spin_lock(&dlm->spinlock);
list_splice_init(&resources, &dlm->reco.resources);
spin_unlock(&dlm->spinlock);
if (!skip_all_done) {
ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); if (ret < 0) {
mlog(ML_ERROR, "%s: node %u went down while sending " "recovery all-done for dead node %u, ret=%d\n",
dlm->name, reco_master, dead_node, ret);
}
}
leave:
free_page((unsignedlong)data);
}
mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " "node_idx=%u, this node=%u\n", done->dead_node,
dlm->reco.dead_node, done->node_idx, dlm->node_num);
mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " "node_idx=%u, this node=%u\n", done->dead_node,
dlm->reco.dead_node, done->node_idx, dlm->node_num);
spin_lock(&dlm_reco_state_lock);
list_for_each_entry(ndata, &dlm->reco.node_data, list) { if (ndata->node_num != done->node_idx) continue;
switch (ndata->state) { /* should have moved beyond INIT but not to FINALIZE yet */ case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_DEAD: case DLM_RECO_NODE_DATA_FINALIZE_SENT:
mlog(ML_ERROR, "bad ndata state for node %u:" " state=%d\n", ndata->node_num,
ndata->state);
BUG(); break; /* these states are possible at this point, anywhere along
* the line of recovery */ case DLM_RECO_NODE_DATA_DONE: case DLM_RECO_NODE_DATA_RECEIVING: case DLM_RECO_NODE_DATA_REQUESTED: case DLM_RECO_NODE_DATA_REQUESTING:
mlog(0, "node %u is DONE sending " "recovery data!\n",
ndata->node_num);
ndata->state = DLM_RECO_NODE_DATA_DONE;
ret = 0; break;
}
}
spin_unlock(&dlm_reco_state_lock);
/* wake the recovery thread, some node is done */ if (!ret)
dlm_kick_recovery_thread(dlm);
if (ret < 0)
mlog(ML_ERROR, "failed to find recovery node data for node " "%u\n", done->node_idx);
dlm_put(dlm);
staticint dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, struct dlm_migratable_lockres *mres,
u8 send_to, struct dlm_lock_resource *res, int total_locks)
{
u64 mig_cookie = be64_to_cpu(mres->mig_cookie); int mres_total_locks = be32_to_cpu(mres->total_locks); int ret = 0, status = 0;
u8 orig_flags = mres->flags,
orig_master = mres->master;
BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); if (!mres->num_locks) return 0;
/* add an all-done flag if we reached the last lock */
orig_flags = mres->flags;
BUG_ON(total_locks > mres_total_locks); if (total_locks == mres_total_locks)
mres->flags |= DLM_MRES_ALL_DONE;
/* send it */
ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres,
struct_size(mres, ml, mres->num_locks),
send_to, &status); if (ret < 0) { /* XXX: negative status is not handled.
* this will end up killing this node. */
mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " "node %u (%s)\n", dlm->name, mres->lockname_len,
mres->lockname, ret, send_to,
(orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery"));
} else { /* might get an -ENOMEM back here */
ret = status; if (ret < 0) {
mlog_errno(ret);
if (ret == -EFAULT) {
mlog(ML_ERROR, "node %u told me to kill " "myself!\n", send_to);
BUG();
}
}
}
/* zero and reinit the message buffer */
dlm_init_migratable_lockres(mres, res->lockname.name,
res->lockname.len, mres_total_locks,
mig_cookie, orig_flags, orig_master); return ret;
}
staticvoid dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, constchar *lockname, int namelen, int total_locks, u64 cookie,
u8 flags, u8 master)
{ /* mres here is one full page */
clear_page(mres);
mres->lockname_len = namelen;
memcpy(mres->lockname, lockname, namelen);
mres->num_locks = 0;
mres->total_locks = cpu_to_be32(total_locks);
mres->mig_cookie = cpu_to_be64(cookie);
mres->flags = flags;
mres->master = master;
}
staticvoid dlm_prepare_lvb_for_migration(struct dlm_lock *lock, struct dlm_migratable_lockres *mres, int queue)
{ if (!lock->lksb) return;
/* Ignore lvb in all locks in the blocked list */ if (queue == DLM_BLOCKED_LIST) return;
/* Only consider lvbs in locks with granted EX or PR lock levels */ if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) return;
if (dlm_lvb_is_empty(mres->lvb)) {
memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); return;
}
/* Ensure the lvb copied for migration matches in other valid locks */ if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) return;
total_locks = dlm_num_locks_in_lockres(res); if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { /* rare, but possible */
mlog(0, "argh. lockres has %d locks. this will " "require more than one network packet to " "migrate\n", total_locks);
mig_cookie = dlm_get_next_mig_cookie();
}
total_locks = 0; for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) {
queue = dlm_list_idx_to_ptr(res, i);
list_for_each_entry(lock, queue, list) { /* add another lock. */
total_locks++; if (!dlm_add_lock_to_array(lock, mres, i)) continue;
/* this filled the lock message,
* we must send it immediately. */
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to,
res, total_locks); if (ret < 0) goto error;
}
} if (total_locks == 0) { /* send a dummy lock to indicate a mastery reference only */
mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n",
dlm->name, res->lockname.len, res->lockname.name,
send_to, flags & DLM_MRES_RECOVERY ? "recovery" : "migration");
dlm_add_dummy_lock(dlm, mres);
} /* flush any remaining locks */
ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); if (ret < 0) goto error; return ret;
error:
mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n",
dlm->name, ret); if (!dlm_is_host_down(ret))
BUG();
mlog(0, "%s: node %u went down while sending %s " "lockres %.*s\n", dlm->name, send_to,
flags & DLM_MRES_RECOVERY ? "recovery" : "migration",
res->lockname.len, res->lockname.name); return ret;
}
/* * this message will contain no more than one page worth of * recovery data, and it will work on only one lockres. * there may be many locks in this page, and we may need to wait * for additional packets to complete all the locks (rare, but * possible).
*/ /* * NOTE: the allocation error cases here are scary * we really cannot afford to fail an alloc in recovery * do we spin? returning an error only delays the problem really
*/
real_master = mres->master; if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* cannot migrate a lockres with no master */
BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
}
mlog(0, "%s message received from node %u\n",
(mres->flags & DLM_MRES_RECOVERY) ? "recovery" : "migration", mres->master); if (mres->flags & DLM_MRES_ALL_DONE)
mlog(0, "all done flag. all lockres data received!\n");
ret = -ENOMEM;
buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS);
item = kzalloc(sizeof(*item), GFP_NOFS); if (!buf || !item) goto leave;
/* lookup the lock to see if we have a secondary queue for this * already... just add the locks in and this will have its owner
* and RECOVERY flag changed when it completes. */
hash = dlm_lockid_hash(mres->lockname, mres->lockname_len);
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres_full(dlm, mres->lockname, mres->lockname_len,
hash); if (res) { /* this will get a ref on res */ /* mark it as recovering/migrating and hash it */
spin_lock(&res->spinlock); if (res->state & DLM_LOCK_RES_DROPPING_REF) {
mlog(0, "%s: node is attempting to migrate " "lockres %.*s, but marked as dropping " " ref!\n", dlm->name,
mres->lockname_len, mres->lockname);
ret = -EINVAL;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
dlm_lockres_put(res); goto leave;
}
if (mres->flags & DLM_MRES_RECOVERY) {
res->state |= DLM_LOCK_RES_RECOVERING;
} else { if (res->state & DLM_LOCK_RES_MIGRATING) { /* this is at least the second
* lockres message */
mlog(0, "lock %.*s is already migrating\n",
mres->lockname_len,
mres->lockname);
} elseif (res->state & DLM_LOCK_RES_RECOVERING) { /* caller should BUG */
mlog(ML_ERROR, "node is attempting to migrate " "lock %.*s, but marked as recovering!\n",
mres->lockname_len, mres->lockname);
ret = -EFAULT;
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
dlm_lockres_put(res); goto leave;
}
res->state |= DLM_LOCK_RES_MIGRATING;
}
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
} else {
spin_unlock(&dlm->spinlock); /* need to allocate, just like if it was
* mastered here normally */
res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); if (!res) goto leave;
/* to match the ref that we would have gotten if
* dlm_lookup_lockres had succeeded */
dlm_lockres_get(res);
/* mark it as recovering/migrating and hash it */ if (mres->flags & DLM_MRES_RECOVERY)
res->state |= DLM_LOCK_RES_RECOVERING; else
res->state |= DLM_LOCK_RES_MIGRATING;
/* Add an extra ref for this lock-less lockres lest the * dlm_thread purges it before we get the chance to add
* locks to it */
dlm_lockres_get(res);
/* There are three refs that need to be put. * 1. Taken above. * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). * 3. dlm_lookup_lockres() * The first one is handled at the end of this function. The * other two are handled in the worker thread after locks have * been attached. Yes, we don't wait for purge time to match * kref_init. The lockres will still have at least one ref
* added because it is in the hash __dlm_insert_lockres() */
extra_refs++;
/* now that the new lockres is inserted,
* make it usable by other processes */
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_IN_PROGRESS;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
}
/* at this point we have allocated everything we need, * and we have a hashed lockres with an extra ref and
* the proper res->state flags. */
ret = 0;
spin_lock(&res->spinlock); /* drop this either when master requery finds a different master
* or when a lock is added by the recovery worker */
dlm_lockres_grab_inflight_ref(dlm, res); if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* migration cannot have an unknown master */
BUG_ON(!(mres->flags & DLM_MRES_RECOVERY));
mlog(0, "recovery has passed me a lockres with an " "unknown owner.. will need to requery: " "%.*s\n", mres->lockname_len, mres->lockname);
} else { /* take a reference now to pin the lockres, drop it
* when locks are added in the worker */
dlm_change_lockres_owner(dlm, res, dlm->node_num);
}
spin_unlock(&res->spinlock);
/* queue up work for dlm_mig_lockres_worker */
dlm_grab(dlm); /* get an extra ref for the work item */
memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */
dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf);
item->u.ml.lockres = res; /* already have a ref */
item->u.ml.real_master = real_master;
item->u.ml.extra_ref = extra_refs;
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
queue_work(dlm->dlm_worker, &dlm->dispatched_work);
leave: /* One extra ref taken needs to be put here */ if (extra_refs)
dlm_lockres_put(res);
dlm_put(dlm); if (ret < 0) {
kfree(buf);
kfree(item);
mlog_errno(ret);
}
res = item->u.ml.lockres;
real_master = item->u.ml.real_master;
extra_ref = item->u.ml.extra_ref;
if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* this case is super-rare. only occurs if
* node death happens during migration. */
again:
ret = dlm_lockres_master_requery(dlm, res, &real_master); if (ret < 0) {
mlog(0, "dlm_lockres_master_requery ret=%d\n",
ret); goto again;
} if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "lockres %.*s not claimed. " "this node will take it.\n",
res->lockname.len, res->lockname.name);
} else {
spin_lock(&res->spinlock);
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
mlog(0, "master needs to respond to sender " "that node %u still owns %.*s\n",
real_master, res->lockname.len,
res->lockname.name); /* cannot touch this lockres */ goto leave;
}
}
ret = dlm_process_recovery_data(dlm, res, mres); if (ret < 0)
mlog(0, "dlm_process_recovery_data returned %d\n", ret); else
mlog(0, "dlm_process_recovery_data succeeded\n");
if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) ==
(DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) {
ret = dlm_finish_migration(dlm, res, mres->master); if (ret < 0)
mlog_errno(ret);
}
leave: /* See comment in dlm_mig_lockres_handler() */ if (res) { if (extra_ref)
dlm_lockres_put(res);
dlm_lockres_put(res);
}
kfree(data);
}
staticint dlm_lockres_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 *real_master)
{ struct dlm_node_iter iter; int nodenum; int ret = 0;
*real_master = DLM_LOCK_RES_OWNER_UNKNOWN;
/* we only reach here if one of the two nodes in a * migration died while the migration was in progress. * at this point we need to requery the master. we * know that the new_master got as far as creating * an mle on at least one node, but we do not know * if any nodes had actually cleared the mle and set * the master to the new_master. the old master * is supposed to set the owner to UNKNOWN in the * event of a new_master death, so the only possible * responses that we can get from nodes here are * that the master is new_master, or that the master * is UNKNOWN. * if all nodes come back with UNKNOWN then we know * the lock needs remastering here. * if any node comes back with a valid master, check * to see if that master is the one that we are * recovering. if so, then the new_master died and * we need to remaster this lock. if not, then the * new_master survived and that node will respond to * other nodes about the owner. * if there is an owner, this node needs to dump this * lockres and alert the sender that this lockres
* was rejected. */
spin_lock(&dlm->spinlock);
dlm_node_iter_init(dlm->domain_map, &iter);
spin_unlock(&dlm->spinlock);
while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { /* do not send to self */ if (nodenum == dlm->node_num) continue;
ret = dlm_do_master_requery(dlm, res, nodenum, real_master); if (ret < 0) {
mlog_errno(ret); if (!dlm_is_host_down(ret))
BUG(); /* host is down, so answer for that node would be
* DLM_LOCK_RES_OWNER_UNKNOWN. continue. */
} if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) {
mlog(0, "lock master is %u\n", *real_master); break;
}
} return ret;
}
int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res,
u8 nodenum, u8 *real_master)
{ int ret; struct dlm_master_requery req; int status = DLM_LOCK_RES_OWNER_UNKNOWN;
/* this function cannot error, so unless the sending * or receiving of the message failed, the owner can
* be trusted */ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data)
{ struct dlm_ctxt *dlm = data; struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; struct dlm_lock_resource *res = NULL; unsignedint hash; int master = DLM_LOCK_RES_OWNER_UNKNOWN;
u32 flags = DLM_ASSERT_MASTER_REQUERY; int dispatched = 0;
if (!dlm_grab(dlm)) { /* since the domain has gone away on this
* node, the proper response is UNKNOWN */ return master;
}
hash = dlm_lockid_hash(req->name, req->namelen);
spin_lock(&dlm->spinlock);
res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); if (res) {
spin_lock(&res->spinlock);
master = res->owner; if (master == dlm->node_num) { int ret = dlm_dispatch_assert_master(dlm, res,
0, 0, flags); if (ret < 0) {
mlog_errno(ret);
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
spin_unlock(&dlm->spinlock);
dlm_put(dlm); /* sender will take care of this and retry */ return ret;
} else {
dispatched = 1;
__dlm_lockres_grab_inflight_worker(dlm, res);
spin_unlock(&res->spinlock);
}
} else { /* put.. in case we are not the master */
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
}
}
spin_unlock(&dlm->spinlock);
if (!dispatched)
dlm_put(dlm); return master;
}
staticinlinestruct list_head *
dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num)
{ struct list_head *ret;
BUG_ON(list_num < 0);
BUG_ON(list_num > 2);
ret = &(res->granted);
ret += list_num; return ret;
} /* TODO: do ast flush business * TODO: do MIGRATING and RECOVERING spinning
*/
/* * NOTE about in-flight requests during migration: * * Before attempting the migrate, the master has marked the lockres as * MIGRATING and then flushed all of its pending ASTS. So any in-flight * requests either got queued before the MIGRATING flag got set, in which * case the lock data will reflect the change and a return message is on * the way, or the request failed to get in before MIGRATING got set. In * this case, the caller will be told to spin and wait for the MIGRATING * flag to be dropped, then recheck the master. * This holds true for the convert, cancel and unlock cases, and since lvb * updates are tied to these same messages, it applies to lvb updates as * well. For the lock case, there is no way a lock can be on the master * queue and not be on the secondary queue since the lock is always added * locally first. This means that the new target node will never be sent * a lock that he doesn't already have on the list. * In total, this means that the local lock is correct and should not be * updated to match the one sent by the master. Any messages sent back * from the master before the MIGRATING flag will bring the lock properly * up-to-date, and the change will be ordered properly for the waiter. * We will *not* attempt to modify the lock underneath the waiter.
*/
staticint dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_migratable_lockres *mres)
{ struct dlm_migratable_lock *ml; struct list_head *queue, *iter; struct list_head *tmpq = NULL; struct dlm_lock *newlock = NULL; struct dlm_lockstatus *lksb = NULL; int ret = 0; int i, j, bad; struct dlm_lock *lock;
u8 from = O2NM_MAX_NODES;
__be64 c;
mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; i<mres->num_locks; i++) {
ml = &(mres->ml[i]);
if (dlm_is_dummy_lock(dlm, ml, &from)) { /* placeholder, just need to set the refmap bit */
BUG_ON(mres->num_locks != 1);
mlog(0, "%s:%.*s: dummy lock for %u\n",
dlm->name, mres->lockname_len, mres->lockname,
from);
spin_lock(&res->spinlock);
dlm_lockres_set_refmap_bit(dlm, res, from);
spin_unlock(&res->spinlock); break;
}
BUG_ON(ml->highest_blocked != LKM_IVMODE);
newlock = NULL;
lksb = NULL;
/* if the lock is for the local node it needs to * be moved to the proper location within the queue.
* do not allocate a new lock structure. */ if (ml->node == dlm->node_num) { /* MIGRATION ONLY! */
BUG_ON(!(mres->flags & DLM_MRES_MIGRATION));
/* lock is always created locally first, and
* destroyed locally last. it must be on the list */ if (!lock) {
c = ml->cookie;
mlog(ML_ERROR, "Could not find local lock " "with cookie %u:%llu, node %u, " "list %u, flags 0x%x, type %d, " "conv %d, highest blocked %d\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
ml->node, ml->list, ml->flags, ml->type,
ml->convert_type, ml->highest_blocked);
__dlm_print_one_lock_resource(res);
BUG();
}
if (lock->ml.node != ml->node) {
c = lock->ml.cookie;
mlog(ML_ERROR, "Mismatched node# in lock " "cookie %u:%llu, name %.*s, node %u\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
res->lockname.len, res->lockname.name,
lock->ml.node);
c = ml->cookie;
mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " "node %u, list %u, flags 0x%x, type %d, " "conv %d, highest blocked %d\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
ml->node, ml->list, ml->flags, ml->type,
ml->convert_type, ml->highest_blocked);
__dlm_print_one_lock_resource(res);
BUG();
}
if (tmpq != queue) {
c = ml->cookie;
mlog(0, "Lock cookie %u:%llu was on list %u " "instead of list %u for %.*s\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)),
j, ml->list, res->lockname.len,
res->lockname.name);
__dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock); continue;
}
/* see NOTE above about why we do not update
* to match the master here */
/* move the lock to its proper place */ /* do not alter lock refcount. switching lists. */
list_move_tail(&lock->list, queue);
spin_unlock(&res->spinlock);
mlog(0, "just reordered a local lock!\n"); continue;
}
/* lock is for another node. */
newlock = dlm_new_lock(ml->type, ml->node,
be64_to_cpu(ml->cookie), NULL); if (!newlock) {
ret = -ENOMEM; goto leave;
}
lksb = newlock->lksb;
dlm_lock_attach_lockres(newlock, res);
/* * If the lock is in the blocked list it can't have a valid lvb, * so skip it
*/ if (ml->list == DLM_BLOCKED_LIST) goto skip_lvb;
if (!dlm_lvb_is_empty(mres->lvb)) { if (lksb->flags & DLM_LKSB_PUT_LVB) { /* other node was trying to update * lvb when node died. recreate the
* lksb with the updated lvb. */
memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); /* the lock resource lvb update must happen * NOW, before the spinlock is dropped. * we no longer wait for the AST to update
* the lvb. */
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
} else { /* otherwise, the node is sending its
* most recent valid lvb info */
BUG_ON(ml->type != LKM_EXMODE &&
ml->type != LKM_PRMODE); if (!dlm_lvb_is_empty(res->lvb) &&
(ml->type == LKM_EXMODE ||
memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { int i;
mlog(ML_ERROR, "%s:%.*s: received bad " "lvb! type=%d\n", dlm->name,
res->lockname.len,
res->lockname.name, ml->type);
printk("lockres lvb=["); for (i=0; i<DLM_LVB_LEN; i++)
printk("%02x", res->lvb[i]);
printk("]\nmigrated lvb=["); for (i=0; i<DLM_LVB_LEN; i++)
printk("%02x", mres->lvb[i]);
printk("]\n");
dlm_print_one_lock_resource(res);
BUG();
}
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
}
}
skip_lvb:
/* NOTE: * wrt lock queue ordering and recovery: * 1. order of locks on granted queue is * meaningless. * 2. order of locks on converting queue is * LOST with the node death. sorry charlie. * 3. order of locks on the blocked queue is * also LOST. * order of locks does not affect integrity, it * just means that a lock request may get pushed * back in line as a result of the node death. * also note that for a given node the lock order * for its secondary queue locks is preserved * relative to each other, but clearly *not* * preserved relative to locks from other nodes.
*/
bad = 0;
spin_lock(&res->spinlock);
list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) {
c = lock->ml.cookie;
mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name,
res->lockname.len, res->lockname.name,
dlm_get_lock_cookie_node(be64_to_cpu(c)),
dlm_get_lock_cookie_seq(be64_to_cpu(c)));
__dlm_print_one_lock_resource(res);
bad = 1; break;
}
} if (!bad) {
dlm_lock_get(newlock); if (mres->flags & DLM_MRES_RECOVERY &&
ml->list == DLM_CONVERTING_LIST &&
newlock->ml.type >
newlock->ml.convert_type) { /* newlock is doing downconvert, add it to the
* head of converting list */
list_add(&newlock->list, queue);
} else
list_add_tail(&newlock->list, queue);
mlog(0, "%s:%.*s: added lock for node %u, " "setting refmap bit\n", dlm->name,
res->lockname.len, res->lockname.name, ml->node);
dlm_lockres_set_refmap_bit(dlm, res, ml->node);
}
spin_unlock(&res->spinlock);
}
mlog(0, "done running all the locks\n");
leave: /* balance the ref taken when the work was queued */
spin_lock(&res->spinlock);
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
res->state |= DLM_LOCK_RES_RECOVERING; if (!list_empty(&res->recovering)) {
mlog(0, "Recovering res %s:%.*s, is already on recovery list!\n",
dlm->name, res->lockname.len, res->lockname.name);
list_del_init(&res->recovering);
dlm_lockres_put(res);
} /* We need to hold a reference while on the recovery list */
dlm_lockres_get(res);
list_add_tail(&res->recovering, &dlm->reco.resources);
/* find any pending locks and put them back on proper list */ for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) {
queue = dlm_list_idx_to_ptr(res, i);
list_for_each_entry_safe(lock, next, queue, list) {
dlm_lock_get(lock); if (lock->convert_pending) { /* move converting lock back to granted */
mlog(0, "node died with convert pending " "on %.*s. move back to granted list.\n",
res->lockname.len, res->lockname.name);
dlm_revert_pending_convert(res, lock);
lock->convert_pending = 0;
} elseif (lock->lock_pending) { /* remove pending lock requests completely */
BUG_ON(i != DLM_BLOCKED_LIST);
mlog(0, "node died with lock pending " "on %.*s. remove from blocked list and skip.\n",
res->lockname.len, res->lockname.name); /* lock will be floating until ref in * dlmlock_remote is freed after the network * call returns. ok for it to not be on any * list since no ast can be called
* (the master is dead). */
dlm_revert_pending_lock(res, lock);
lock->lock_pending = 0;
} elseif (lock->unlock_pending) { /* if an unlock was in progress, treat as * if this had completed successfully * before sending this lock state to the * new master. note that the dlm_unlock * call is still responsible for calling * the unlockast. that will happen after * the network call times out. for now, * just move lists to prepare the new
* recovery master. */
BUG_ON(i != DLM_GRANTED_LIST);
mlog(0, "node died with unlock pending " "on %.*s. remove from blocked list and skip.\n",
res->lockname.len, res->lockname.name);
dlm_commit_pending_unlock(res, lock);
lock->unlock_pending = 0;
} elseif (lock->cancel_pending) { /* if a cancel was in progress, treat as * if this had completed successfully * before sending this lock state to the
* new master */
BUG_ON(i != DLM_CONVERTING_LIST);
mlog(0, "node died with cancel pending " "on %.*s. move back to granted list.\n",
res->lockname.len, res->lockname.name);
dlm_commit_pending_cancel(res, lock);
lock->cancel_pending = 0;
}
dlm_lock_put(lock);
}
}
}
/* removes all recovered locks from the recovery list. * sets the res->owner to the new master.
* unsets the RECOVERY flag and wakes waiters. */ staticvoid dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm,
u8 dead_node, u8 new_master)
{ int i; struct hlist_head *bucket; struct dlm_lock_resource *res, *next;
assert_spin_locked(&dlm->spinlock);
list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { if (res->owner == dead_node) {
mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
dlm->name, res->lockname.len, res->lockname.name,
res->owner, new_master);
list_del_init(&res->recovering);
spin_lock(&res->spinlock); /* new_master has our reference from
* the lock state sent during recovery */
dlm_change_lockres_owner(dlm, res, new_master);
res->state &= ~DLM_LOCK_RES_RECOVERING; if (__dlm_lockres_has_locks(res))
__dlm_dirty_lockres(dlm, res);
spin_unlock(&res->spinlock);
wake_up(&res->wq);
dlm_lockres_put(res);
}
}
/* this will become unnecessary eventually, but * for now we need to run the whole hash, clear * the RECOVERING state and set the owner
* if necessary */ for (i = 0; i < DLM_HASH_BUCKETS; i++) {
bucket = dlm_lockres_hash(dlm, i);
hlist_for_each_entry(res, bucket, hash_node) { if (res->state & DLM_LOCK_RES_RECOVERY_WAITING) {
spin_lock(&res->spinlock);
res->state &= ~DLM_LOCK_RES_RECOVERY_WAITING;
spin_unlock(&res->spinlock);
wake_up(&res->wq);
}
if (!(res->state & DLM_LOCK_RES_RECOVERING)) continue;
if (res->owner != dead_node &&
res->owner != dlm->node_num) continue;
if (!list_empty(&res->recovering)) {
list_del_init(&res->recovering);
dlm_lockres_put(res);
}
/* new_master has our reference from
* the lock state sent during recovery */
mlog(0, "%s: res %.*s, Changing owner from %u to %u\n",
dlm->name, res->lockname.len, res->lockname.name,
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.29 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.