/**************************************************************************** ** ** This file is part of GAP, a system for computational discrete algebra. ** ** Copyright of GAP belongs to its developers, whose names are too numerous ** to list here. Please refer to the COPYRIGHT file for details. ** ** SPDX-License-Identifier: GPL-2.0-or-later ** ** This file contains the GAP interface for thread primitives.
*/
static Obj NewAtomicListInit(UInt tnum, UInt len, Obj init)
{
Obj result = NewAtomicList(tnum, len);
AtomicObj * data = ADDR_ATOM(result);
data->atom = CHANGE_ALIST_LEN(ALIST_RW, len); for (UInt i = 1; i <= len; i++)
data[i + 1].obj = init;
CHANGED_BAG(result);
MEMBAR_WRITE(); // Should not be necessary, but better be safe. return result;
}
static Obj NewAtomicListFrom(UInt tnum, Obj list)
{
UInt len = LEN_LIST(list);
Obj result = NewAtomicList(tnum, len);
AtomicObj * data = ADDR_ATOM(result);
data->atom = CHANGE_ALIST_LEN(ALIST_RW, len); for (UInt i = 1; i <= len; i++)
data[i + 1].obj = ELM0_LIST(list, i);;
CHANGED_BAG(result);
MEMBAR_WRITE(); // Should not be necessary, but better be safe. return result;
}
static Obj FuncAtomicList(Obj self, Obj args)
{
Obj init; Int len; switch (LEN_PLIST(args)) { case 0: return NewAtomicList(T_ALIST, 0); case 1:
init = ELM_PLIST(args, 1); if (IS_LIST(init)) { return NewAtomicListFrom(T_ALIST, init);
} elseif (IS_INTOBJ(init) && INT_INTOBJ(init) >= 0) {
len = INT_INTOBJ(init); return NewAtomicListInit(T_ALIST, len, 0);
} else {
ArgumentError( "AtomicList: Argument must be list or a non-negative integer");
} case 2:
init = ELM_PLIST(args, 1);
len = IS_INTOBJ(init) ? INT_INTOBJ(init) : -1; if (len < 0)
ArgumentError( "AtomicList: First argument must be a non-negative integer");
init = ELM_PLIST(args, 2); return NewAtomicListInit(T_ALIST, len, init); default:
ArgumentError("AtomicList: Too many arguments");
} return (Obj)0; // flow control hint
}
static Obj FuncFixedAtomicList(Obj self, Obj args)
{
Obj init; Int len; switch (LEN_PLIST(args)) { case 0: return NewAtomicList(T_FIXALIST, 0); case 1:
init = ELM_PLIST(args, 1); if (IS_LIST(init)) { return NewAtomicListFrom(T_FIXALIST, init);
} elseif (IS_INTOBJ(init) && INT_INTOBJ(init) >= 0) {
len = INT_INTOBJ(init); return NewAtomicListInit(T_FIXALIST, len, 0);
} else {
ArgumentError("FixedAtomicList: Argument must be list or a " "non-negative integer");
} case 2:
init = ELM_PLIST(args, 1);
len = IS_INTOBJ(init) ? INT_INTOBJ(init) : -1; if (len < 0)
ArgumentError("FixedAtomicList: First argument must be a " "non-negative integer");
init = ELM_PLIST(args, 2); return NewAtomicListInit(T_FIXALIST, len, init); default:
ArgumentError("FixedAtomicList: Too many arguments");
} return (Obj)0; // flow control hint
}
static Obj FuncMakeFixedAtomicList(Obj self, Obj list) { switch (TNUM_OBJ(list)) { case T_ALIST: case T_FIXALIST:
HashLock(list); switch (TNUM_OBJ(list)) { case T_ALIST: case T_FIXALIST:
RetypeBag(list, T_FIXALIST);
HashUnlock(list); return list; default:
HashUnlock(list);
RequireArgument(SELF_NAME, list, "must be an atomic list"); return (Obj) 0; // flow control hint
}
HashUnlock(list); break; default:
RequireArgument(SELF_NAME, list, "must be an atomic list");
} return (Obj) 0; // flow control hint
}
// If list[index] is bound then return it, else return 'value'. // The reason this function exists is that it is not thread-safe to // check if an index in a list is bound before reading it, as it // could be unbound before the actual reading is performed. static Obj ElmDefAList(Obj list, Int n, Obj value)
{
UInt len; const AtomicObj * addr;
Obj val;
static Obj AtomicCompareSwapAList(Obj list, Int index, Obj old, Obj new);
// Given atomic list 'list', assign list[index] the value 'new', if list[index] // is currently assigned 'old'. This operation is performed atomically. static Obj FuncCOMPARE_AND_SWAP(Obj self, Obj list, Obj index, Obj old, Obj new)
{ Int len;
AtomicObj aold, anew;
AtomicObj * addr;
Obj result;
UInt n = GetPositiveSmallInt(SELF_NAME, index);
switch (TNUM_OBJ(list)) { case T_FIXALIST: case T_APOSOBJ: break; case T_ALIST: return AtomicCompareSwapAList(list, n, old, new); default:
RequireArgument(SELF_NAME, list, "must be an atomic list");
}
addr = ADDR_ATOM(list);
len = ALIST_LEN((UInt)addr[0].atom);
// Similar to COMPARE_AND_SWAP, but assigns list[index] the value 'new' // if list[index] is currently unbound static Obj FuncATOMIC_BIND(Obj self, Obj list, Obj index, Obj new)
{ return FuncCOMPARE_AND_SWAP(self, list, index, 0, new);
}
// Similar to COMPARE_AND_SWAP, but unbinds list[index] if list[index] // is currently assigned 'old' static Obj FuncATOMIC_UNBIND(Obj self, Obj list, Obj index, Obj old)
{ return FuncCOMPARE_AND_SWAP(self, list, index, old, 0);
}
static Obj FuncATOMIC_ADDITION(Obj self, Obj list, Obj index, Obj inc)
{
UInt n;
UInt len;
AtomicObj aold, anew, *addr; switch (TNUM_OBJ(list)) { case T_FIXALIST: case T_APOSOBJ: break; default:
RequireArgument(SELF_NAME, list, "must be a fixed atomic list");
}
addr = ADDR_ATOM(list);
len = ALIST_LEN((UInt) addr[0].atom);
n = GetBoundedInt(SELF_NAME, index, 1, len);
RequireSmallInt(SELF_NAME, index); do
{
aold = addr[n+1]; if (!IS_INTOBJ(aold.obj))
ArgumentError("ATOMIC_ADDITION: list element is not an integer");
anew.obj = INTOBJ_INT(INT_INTOBJ(aold.obj) + INT_INTOBJ(inc));
} while (!COMPARE_AND_SWAP(&(addr[n+1].atom), aold.atom, anew.atom)); return anew.obj;
}
static Obj FuncAddAtomicList(Obj self, Obj list, Obj obj)
{ if (TNUM_OBJ(list) != T_ALIST)
RequireArgument(SELF_NAME, list, "must be a non-fixed atomic list"); return INTOBJ_INT(AddAList(list, obj));
}
Obj FromAtomicList(Obj list)
{
Obj result; const AtomicObj *data;
UInt i, len;
data = CONST_ADDR_ATOM(list);
len = ALIST_LEN((UInt) (data++->atom));
result = NEW_PLIST(T_PLIST, len);
SET_LEN_PLIST(result, len);
MEMBAR_READ(); for (i=1; i<=len; i++)
SET_ELM_PLIST(result, i, data[i].obj);
CHANGED_BAG(result); return result;
}
static Obj FuncFromAtomicList(Obj self, Obj list)
{ if (TNUM_OBJ(list) != T_FIXALIST && TNUM_OBJ(list) != T_ALIST)
RequireArgument(SELF_NAME, list, "must be an atomic list"); return FromAtomicList(list);
}
staticvoid PrintTLRecord(Obj obj)
{
Obj contents = GetTLInner(obj); const Obj *table = CONST_ADDR_OBJ(contents);
Obj record = 0;
Obj defrec = table[TLR_DEFAULTS]; int comma = 0;
AtomicObj *deftable; int i; if (TLS(threadID) < (UInt)table[TLR_SIZE]) {
record = table[TLR_DATA+TLS(threadID)];
}
Pr("%2>rec( %2>", 0, 0); if (record) { for (i = 1; i <= LEN_PREC(record); i++) {
Obj val = GET_ELM_PREC(record, i);
Pr("%I", (Int)NAME_RNAM(labs(GET_RNAM_PREC(record, i))), 0);
Pr ("%< := %>", 0, 0); if (val)
PrintObj(val); else
Pr("", 0, 0); if (i < LEN_PREC(record))
Pr("%2<, %2>", 0, 0); else
comma = 1;
}
}
HashLockShared(defrec);
deftable = ARecordTable(defrec); for (i = 0; i < deftable[AR_CAP].atom; i++) {
UInt key = deftable[AR_DATA+2*i].atom;
Obj value = deftable[AR_DATA+2*i+1].obj; if (key && (!record || !PositionPRec(record, key, 0))) { if (comma)
Pr("%2<, %2>", 0, 0);
Pr("%I", (Int)(NAME_RNAM(key)), 0);
Pr ("%< := %>", 0, 0);
PrintObj(CopyTraversed(value));
comma = 1;
}
}
HashUnlockShared(defrec);
Pr(" %4<)", 0, 0);
}
Obj GetARecordField(Obj record, UInt field)
{
AtomicObj *table = ARecordTable(record);
AtomicObj *data = table + AR_DATA;
UInt cap, bits, hash, n; /* We need a memory barrier to ensure that we see fields that * were updated before the table pointer was updated; there is
* a matching write barrier in the set operation. */
MEMBAR_READ();
cap = table[AR_CAP].atom;
bits = table[AR_BITS].atom;
hash = FibHash(field, bits);
n = cap; while (n-- > 0)
{
UInt key = data[hash*2].atom; if (key == field)
{
Obj result;
MEMBAR_READ(); // memory barrier
result = data[hash*2+1].obj; if (result != Undefined) return result;
} if (!key) return (Obj) 0;
hash++; if (hash == cap)
hash = 0;
} return (Obj) 0;
}
Obj ElmAList(Obj list, Int pos)
{ const AtomicObj *addr = CONST_ADDR_ATOM(list);
UInt len;
MEMBAR_READ();
len = ALIST_LEN((UInt)addr[0].atom);
Obj result; if (pos < 1 || pos > len) {
ErrorMayQuit( "Atomic List Element: =%d is an invalid index for ",
(Int)pos, 0);
}
result = addr[1 + pos].obj; if (!result)
ErrorMayQuit( "Atomic List Element: [%d] must have an assigned value",
(Int)pos, 0);
MEMBAR_READ(); return result;
}
staticBOOL IsbAList(Obj list, Int pos)
{ const AtomicObj *addr = CONST_ADDR_ATOM(list);
UInt len;
MEMBAR_READ();
len = ALIST_LEN((UInt) addr[0].atom); return pos >= 1 && pos <= len && addr[1+pos].obj;
}
staticvoid AssFixAList(Obj list, Int pos, Obj obj)
{
UInt pol = (UInt)CONST_ADDR_ATOM(list)[0].atom;
UInt len = ALIST_LEN(pol); if (pos < 1 || pos > len) {
ErrorMayQuit( "Atomic List Element: =%d is an invalid index for ",
(Int)pos, 0);
} switch (ALIST_POL(pol)) { case ALIST_RW:
ADDR_ATOM(list)[1+pos].obj = obj; break; case ALIST_W1:
COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
(AtomicUInt) 0, (AtomicUInt) obj); break; case ALIST_WX: if (!COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
(AtomicUInt) 0, (AtomicUInt) obj)) {
ErrorQuit("Atomic List Assignment: [%d] already has an assigned value", pos, (Int) 0);
} break;
}
CHANGED_BAG(list);
MEMBAR_WRITE();
}
// Ensure the capacity of atomic list 'list' is at least 'pos'. // Errors if 'pos' is 'list' is fixed length and 'pos' is greater // than the existing length. // If this function returns, then the code has a (possibly shared) // HashLock on the list, which must be released by the caller. staticvoid EnlargeAList(Obj list, Int pos)
{
HashLockShared(list);
AtomicObj * addr = ADDR_ATOM(list);
UInt pol = (UInt)addr[0].atom;
UInt len = ALIST_LEN(pol); if (pos > len) {
HashUnlockShared(list);
HashLock(list);
addr = ADDR_ATOM(list);
pol = (UInt)addr[0].atom;
len = ALIST_LEN(pol);
} if (pos > len) { if (TNUM_OBJ(list) != T_ALIST) {
HashUnlock(list);
ErrorQuit( "Atomic List Assignment: extending fixed size atomic list",
0, 0); return; // flow control hint
}
addr = ADDR_ATOM(list); if (pos > SIZE_BAG(list) / sizeof(AtomicObj) - 2) {
Obj newlist;
UInt newlen = len; do {
newlen = newlen * 3 / 2 + 1;
} while (pos > newlen);
newlist = NewBag(T_ALIST, sizeof(AtomicObj) * (2 + newlen));
memcpy(PTR_BAG(newlist), PTR_BAG(list), sizeof(AtomicObj) * (2 + len));
addr = ADDR_ATOM(newlist);
addr[0].atom = CHANGE_ALIST_LEN(pol, pos);
MEMBAR_WRITE(); // TODO: Won't work with GASMAN
SET_PTR_BAG(list, PTR_BAG(newlist));
MEMBAR_WRITE();
} else {
addr[0].atom = CHANGE_ALIST_LEN(pol, pos);
MEMBAR_WRITE();
}
}
}
void AssAList(Obj list, Int pos, Obj obj)
{ if (pos < 1) {
ErrorQuit( "Atomic List Element: =%d is an invalid index for ",
(Int) pos, 0);
}
EnlargeAList(list, pos);
AtomicObj * addr = ADDR_ATOM(list);
UInt pol = (UInt)addr[0].atom;
switch (ALIST_POL(pol)) { case ALIST_RW:
ADDR_ATOM(list)[1+pos].obj = obj; break; case ALIST_W1:
COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
(AtomicUInt) 0, (AtomicUInt) obj); break; case ALIST_WX: if (!COMPARE_AND_SWAP(&ADDR_ATOM(list)[1+pos].atom,
(AtomicUInt) 0, (AtomicUInt) obj)) {
HashUnlock(list);
ErrorQuit("Atomic List Assignment: [%d] already has an assigned value", pos, (Int) 0);
} break;
}
CHANGED_BAG(list);
MEMBAR_WRITE();
HashUnlock(list);
}
static Obj AtomicCompareSwapAList(Obj list, Int pos, Obj old, Obj new)
{ if (pos < 1) {
ErrorQuit( "Atomic List Element: =%d is an invalid index for ",
(Int)pos, 0);
}
staticInt DestroyAObjectsState(void)
{
Obj records;
UInt i, len;
records = TLS(tlRecords); if (records) {
len = LEN_PLIST(records); for (i = 1; i <= len; i++)
UpdateThreadRecord(ELM_PLIST(records, i), (Obj)0);
} return 0;
}
// Forbid comparison and copying of atomic objects, because they // cannot be done in a thread-safe manner staticInt AtomicRecordErrorNoCompare(Obj arg1, Obj arg2)
{
ErrorQuit("atomic records cannot be compared with other records", 0, 0); // Make compiler happy return 0;
}
staticInt AtomicListErrorNoCompare(Obj arg1, Obj arg2)
{
ErrorQuit("atomic lists cannot be compared with other lists", 0, 0); // Make compiler happy return 0;
}
static Obj AtomicErrorNoShallowCopy(Obj arg1)
{
ErrorQuit("atomic objects cannot be copied", 0, 0); // Make compiler happy return 0;
}
#if !defined(USE_THREADSAFE_COPYING) static Obj AtomicErrorNoCopy(Obj arg1, Int arg2)
{
ErrorQuit("atomic objects cannot be copied", 0, 0); // Make compiler happy return 0;
} #endif
// Forbid various operations on atomic lists and records we can't // perform thread-safely.
// Ensure that atomic objects cannot be copied for (UInt type = FIRST_ATOMIC_TNUM; type <= LAST_ATOMIC_TNUM; type++) {
ShallowCopyObjFuncs[type] = AtomicErrorNoShallowCopy; #if !defined(USE_THREADSAFE_COPYING)
CopyObjFuncs[type] = AtomicErrorNoCopy; // Do not error on CleanObj, just leave it as a no-op #endif// !defined(USE_THREADSAFE_COPYING)
}
// Ensure atomic lists can't be compared with other lists for (UInt type = FIRST_ATOMIC_LIST_TNUM; type <= LAST_ATOMIC_LIST_TNUM;
type++) { for (UInt t2 = FIRST_LIST_TNUM; t2 <= LAST_LIST_TNUM; ++t2) {
EqFuncs[type][t2] = AtomicListErrorNoCompare;
EqFuncs[t2][type] = AtomicListErrorNoCompare;
LtFuncs[type][t2] = AtomicListErrorNoCompare;
LtFuncs[t2][type] = AtomicListErrorNoCompare;
} for (UInt t2 = FIRST_ATOMIC_LIST_TNUM; t2 <= LAST_ATOMIC_LIST_TNUM;
++t2) {
EqFuncs[type][t2] = AtomicListErrorNoCompare;
EqFuncs[t2][type] = AtomicListErrorNoCompare;
LtFuncs[type][t2] = AtomicListErrorNoCompare;
LtFuncs[t2][type] = AtomicListErrorNoCompare;
}
}
// Ensure atomic records can't be compared with other records for (UInt type = FIRST_ATOMIC_RECORD_TNUM; type <= LAST_ATOMIC_RECORD_TNUM;
type++) { for (UInt t2 = FIRST_RECORD_TNUM; t2 <= LAST_RECORD_TNUM; ++t2) {
EqFuncs[type][t2] = AtomicRecordErrorNoCompare;
EqFuncs[t2][type] = AtomicRecordErrorNoCompare;
LtFuncs[type][t2] = AtomicRecordErrorNoCompare;
LtFuncs[t2][type] = AtomicRecordErrorNoCompare;
} for (UInt t2 = FIRST_ATOMIC_RECORD_TNUM; t2 <= LAST_ATOMIC_RECORD_TNUM;
++t2) {
EqFuncs[type][t2] = AtomicRecordErrorNoCompare;
EqFuncs[t2][type] = AtomicRecordErrorNoCompare;
LtFuncs[type][t2] = AtomicRecordErrorNoCompare;
LtFuncs[t2][type] = AtomicRecordErrorNoCompare;
}
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.