// SPDX-License-Identifier: GPL-2.0 /* * Out of line spinlock code. * * Copyright IBM Corp. 2004, 2006 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
/* Enqueue the node for this CPU in the spinlock wait queue */
old = READ_ONCE(lp->lock); while (1) { if ((old & _Q_LOCK_CPU_MASK) == 0 &&
(old & _Q_LOCK_STEAL_MASK) != _Q_LOCK_STEAL_MASK) { /* * The lock is free but there may be waiters. * With no waiters simply take the lock, if there * are waiters try to steal the lock. The lock may * be stolen three times before the next queued * waiter will get the lock.
*/ new = (old ? (old + _Q_LOCK_STEAL_ADD) : 0) | lockval; if (arch_try_cmpxchg(&lp->lock, &old, new)) /* Got the lock */ goto out; /* lock passing in progress */ continue;
} /* Make the node of this CPU the new tail. */ new = node_id | (old & _Q_LOCK_MASK); if (arch_try_cmpxchg(&lp->lock, &old, new)) break;
} /* Set the 'next' pointer of the tail node in the queue */
tail_id = old & _Q_TAIL_MASK; if (tail_id != 0) {
node->prev = arch_spin_decode_tail(tail_id);
WRITE_ONCE(node->prev->next, node);
}
/* Pass the virtual CPU to the lock holder if it is not running */
owner = arch_spin_yield_target(old, node); if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
/* Spin on the CPU local node->prev pointer */ if (tail_id != 0) {
count = spin_retry; while (READ_ONCE(node->prev) != NULL) { if (count-- >= 0) continue;
count = spin_retry; /* Query running state of lock holder again. */
owner = arch_spin_yield_target(old, node); if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
}
/* Spin on the lock value in the spinlock_t */
count = spin_retry; while (1) {
old = READ_ONCE(lp->lock);
owner = old & _Q_LOCK_CPU_MASK; if (!owner) {
tail_id = old & _Q_TAIL_MASK; new = ((tail_id != node_id) ? tail_id : 0) | lockval; if (arch_try_cmpxchg(&lp->lock, &old, new)) /* Got the lock */ break; continue;
} if (count-- >= 0) continue;
count = spin_retry; if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
/* Pass lock_spin job to next CPU in the queue */ if (node_id && tail_id != node_id) { /* Wait until the next CPU has set up the 'next' pointer */ while ((next = READ_ONCE(node->next)) == NULL)
;
next->prev = NULL;
}
out:
get_lowcore()->spinlock_index--;
}
staticinlinevoid arch_spin_lock_classic(arch_spinlock_t *lp)
{ int lockval, old, new, owner, count;
lockval = spinlock_lockval(); /* cpu + 1 */
/* Pass the virtual CPU to the lock holder if it is not running */
owner = arch_spin_yield_target(READ_ONCE(lp->lock), NULL); if (owner && arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
count = spin_retry; while (1) {
old = arch_load_niai4(&lp->lock);
owner = old & _Q_LOCK_CPU_MASK; /* Try to get the lock if it is free. */ if (!owner) { new = (old & _Q_TAIL_MASK) | lockval; if (arch_try_cmpxchg_niai8(&lp->lock, old, new)) { /* Got the lock */ return;
} continue;
} if (count-- >= 0) continue;
count = spin_retry; if (!machine_is_lpar() || arch_vcpu_is_preempted(owner - 1))
smp_yield_cpu(owner - 1);
}
}
int arch_spin_trylock_retry(arch_spinlock_t *lp)
{ int cpu = spinlock_lockval(); int owner, count;
for (count = spin_retry; count > 0; count--) {
owner = READ_ONCE(lp->lock); /* Try to get the lock if it is free. */ if (!owner) { if (arch_try_cmpxchg(&lp->lock, &owner, cpu)) return 1;
}
} return 0;
}
EXPORT_SYMBOL(arch_spin_trylock_retry);
void arch_read_lock_wait(arch_rwlock_t *rw)
{ if (unlikely(in_interrupt())) { while (READ_ONCE(rw->cnts) & 0x10000)
barrier(); return;
}
/* Remove this reader again to allow recursive read locking */
__atomic_add_const(-1, &rw->cnts); /* Put the reader into the wait queue */
arch_spin_lock(&rw->wait); /* Now add this reader to the count value again */
__atomic_add_const(1, &rw->cnts); /* Loop until the writer is done */ while (READ_ONCE(rw->cnts) & 0x10000)
barrier();
arch_spin_unlock(&rw->wait);
}
EXPORT_SYMBOL(arch_read_lock_wait);
void arch_write_lock_wait(arch_rwlock_t *rw)
{ int old;
/* Add this CPU to the write waiters */
__atomic_add(0x20000, &rw->cnts);
/* Put the writer into the wait queue */
arch_spin_lock(&rw->wait);
while (1) {
old = READ_ONCE(rw->cnts); if ((old & 0x1ffff) == 0 &&
arch_try_cmpxchg(&rw->cnts, &old, old | 0x10000)) /* Got the lock */ break;
barrier();
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.