/**************************************************************************** ** ** This file is part of GAP, a system for computational discrete algebra. ** ** Copyright of GAP belongs to its developers, whose names are too numerous ** to list here. Please refer to the COPYRIGHT file for details. ** ** SPDX-License-Identifier: GPL-2.0-or-later ** ** This file contains the functions of the filters, operations, attributes, ** and properties package.
*/
sub = LoadSubObj(); SET_TRUES_FLAGS( flags, sub );
sub = LoadSubObj(); SET_HASH_FLAGS( flags, sub );
sub = LoadSubObj(); SET_AND_CACHE_FLAGS( flags, sub );
len = NRB_FLAGS(flags);
ptr = BLOCKS_FLAGS(flags); for ( i = 1; i <= len; i++ )
*ptr++ = LoadUInt();
} #endif
/**************************************************************************** ** *F FuncHASH_FLAGS( <self>, <flags> ) . . . . . . hash value of a flags list ** ** The hash value is independent of the size of a machine word (32 or 64). ** ** The rather peculiar cast in the definition of HASH_FLAGS_SIZE is needed ** to get the calculation to work right on the alpha. ** *T The 64 bit version depends on the byte order -- it assumes that ** the lower addressed half-word is the less significant **
*/ #define HASH_FLAGS_SIZE (Int4)67108879L
static Obj FuncHASH_FLAGS(Obj self, Obj flags)
{
Int4 hash;
Int4 x; Int len;
UInt4 * ptr; Int i;
// do some trivial checks
RequireFlags(SELF_NAME, flags); if ( HASH_FLAGS(flags) != 0 ) { return HASH_FLAGS(flags);
}
// do the real work */ #if !defined(SYS_IS_64_BIT) || !defined(WORDS_BIGENDIAN)
// 32 bit case -- this is the "defining" case, others are adjusted to // comply with this. For 64 bit systems in little endian mode, this // amounts to the same code, only the value of NRB_FLAGS has to be // adjusted
len = NRB_FLAGS(flags) * (sizeof(UInt) / sizeof(UInt4));
ptr = (UInt4 *)BLOCKS_FLAGS(flags);
hash = 0;
x = 1; for ( i = len; i >= 1; i-- ) {
hash = (hash + (*ptr % HASH_FLAGS_SIZE) * x) % HASH_FLAGS_SIZE;
x = (31 * x) % HASH_FLAGS_SIZE;
ptr++;
}
#else
// This is the hardest case: 64 bit big endian
len = NRB_FLAGS(flags);
ptr = (UInt4 *)BLOCKS_FLAGS(flags);
hash = 0;
x = 1; for ( i = len; i >= 1; i-- ) {
// least significant 32 bits first
hash = (hash + (ptr[1] % HASH_FLAGS_SIZE) * x) % HASH_FLAGS_SIZE;
x = (31 * x) % HASH_FLAGS_SIZE; // now the more significant
hash = (hash + (*ptr % HASH_FLAGS_SIZE) * x) % HASH_FLAGS_SIZE;
x = (31 * x) % HASH_FLAGS_SIZE;
/**************************************************************************** ** *F FuncTRUES_FLAGS( <self>, <flags> ) . . . true positions of a flags list ** ** see 'FuncPositionsTruesBlist' in "blister.c" for information.
*/ static Obj FuncTRUES_FLAGS(Obj self, Obj flags)
{
Obj sub; // handle of the result Int len; // logical length of the list
UInt * ptr; // pointer to flags
UInt nrb; // number of blocks in flags
UInt n; // number of bits in flags
UInt nn;
UInt i; // loop variable
// compute the number of 'true'-s just as in 'FuncSizeBlist'
nrb = NRB_FLAGS(flags);
ptr = (UInt*)BLOCKS_FLAGS(flags);
n = COUNT_TRUES_BLOCKS(ptr, nrb);
// make the sublist (we now know its size exactly)
sub = NEW_PLIST_IMM( T_PLIST, n );
SET_LEN_PLIST( sub, n );
// loop over the boolean list and stuff elements into <sub>
len = LEN_FLAGS( flags );
nn = 1; for ( i = 1; nn <= n && i <= len; i++ ) { if ( C_ELM_FLAGS( flags, i ) ) {
SET_ELM_PLIST( sub, nn, INTOBJ_INT(i) );
nn++;
}
}
CHANGED_BAG(sub);
// return the sublist
SET_TRUES_FLAGS( flags, sub );
CHANGED_BAG(flags); return sub;
}
/**************************************************************************** ** *F FuncSIZE_FLAGS( <self>, <flags> ) . . . . number of trues of a flags list ** ** see 'FuncSIZE_FLAGS'
*/ static Obj FuncSIZE_FLAGS(Obj self, Obj flags)
{
UInt * ptr; // pointer to flags
UInt nrb; // number of blocks in flags
UInt n; // number of bits in flags
static Obj FuncAND_FLAGS(Obj self, Obj flags1, Obj flags2)
{
Obj flags; Int len1; Int len2; Int size1; Int size2;
UInt * ptr;
UInt * ptr1;
UInt * ptr2; Int i;
#ifdef AND_FLAGS_HASH_SIZE
Obj cache;
Obj entry; #ifdef HPCGAP
Obj locked = 0; #endif
UInt hash;
UInt hash2; static UInt next = 0; // FIXME HPC-GAP: is usage of this static thread-safe? #endif
// do some trivial checks
RequireFlags(SELF_NAME, flags1);
RequireFlags(SELF_NAME, flags2);
if (flags1 == flags2) return flags1; if (LEN_FLAGS(flags2) == 0) return flags1; if (LEN_FLAGS(flags1) == 0) return flags2;
// check the cache # ifdef AND_FLAGS_HASH_SIZE // We want to ensure if we calculate 'flags1 and flags2', then // later do 'flags2 and flags1', we will get the value from the cache. // Therefore we just compare the location of the Bag masterpointers // for both flags (which doesn't change), and use the cache of the // smaller. To this end, ensure flags1 is the smaller one. if ( flags1 > flags2 ) {
SWAP(Obj, flags1, flags2);
}
/**************************************************************************** ** *F HandleMethodNotFound( <oper>, <nargs>, <args>, <verbose>, <constructor>, ** <precedence> ) ** ** This enables the special error handling for Method Not Found Errors. ** It assembles all the necessary information into a form where it can be ** conveniently accessed from GAP. **
*/
r = NEW_PREC(5); if (RNamOperation == 0)
{ // we can't do this in initialization because opers // is initialized BEFORE records
RNamIsConstructor = RNamName("isConstructor");
RNamIsVerbose = RNamName("isVerbose");
RNamOperation = RNamName("Operation");
RNamArguments = RNamName("Arguments");
RNamPrecedence = RNamName("Precedence");
}
AssPRec(r,RNamOperation,oper);
AssPRec(r,RNamArguments,arglist);
AssPRec(r,RNamIsVerbose,verbose ? True : False);
AssPRec(r,RNamIsConstructor,constructor ? True : False);
AssPRec(r,RNamPrecedence,INTOBJ_INT(precedence));
SortPRecRNam(r);
CALL_1ARGS(HANDLE_METHOD_NOT_FOUND, r); #ifdef HPCGAP
TLS(currentRegion) = savedRegion; #endif
ErrorQuit("panic, HANDLE_METHOD_NOT_FOUND should not return", 0, 0);
}
/**************************************************************************** ** *F FuncCOMPACT_TYPE_IDS( <self> ) . . . garbage collect the type IDs **
*/
#ifdef USE_GASMAN
static Obj FLUSH_ALL_METHOD_CACHES;
staticInt NextTypeID; static Obj IsType;
staticvoid FixTypeIDs( Bag b ) { if ( (TNUM_OBJ( b ) == T_POSOBJ) &&
(DoFilter(IsType, b ) == True ))
{
SET_ID_TYPE(b, INTOBJ_INT(NextTypeID));
NextTypeID++;
}
}
#endif
static Obj FuncCOMPACT_TYPE_IDS(Obj self)
{ #ifdef USE_GASMAN
NextTypeID = INT_INTOBJ_MIN;
CallbackForAllBags( FixTypeIDs );
CALL_0ARGS(FLUSH_ALL_METHOD_CACHES); return INTOBJ_INT(NextTypeID); #else // in general garbage collectors, we cannot iterate over // all bags ever allocated, so we can't implement this function; // however, with 64 bit versions of GAP, we also should never // run out of type ids, so this is of little concern
ErrorQuit("panic, COMPACT_TYPE_IDS is not available", 0, 0); #endif
}
/**************************************************************************** ** *F DoOperation<N>Args( <oper>, ... ) . . . . . . . . . . Operation Handlers ** ** This section of the file provides handlers for operations. The main ones ** are DoOperation0Args ... DoOperation6Args and the DoVerboseOperation ** tracing variants. Then there are variants for constructors. In the ** following section are handlers for attributes, properties and the ** operations related to them. ** ** This code has been refactored to reduce repetition. Its efficiency now ** depends on the C++ compiler inlining template functions and ** doing constant folding to effectively produce a specialised version of ** the main function.
*/
// Helper function to quickly get the type of an object, avoiding // indirection in the case of external objects with a stored type I.e., // the compiler can inline TYPE_COMOBJ etc., while it cannot inline // TYPE_OBJ staticinline Obj TYPE_OBJ_FEO(Obj obj)
{ #ifdef HPCGAP // TODO: We need to be able to automatically derive this.
ImpliedWriteGuard(obj); #endif switch ( TNUM_OBJ( obj ) ) { case T_COMOBJ: return TYPE_COMOBJ(obj); case T_POSOBJ: return TYPE_POSOBJ(obj); case T_DATOBJ: return TYPE_DATOBJ(obj); default: return TYPE_OBJ(obj);
}
}
// Method Cache -- we remember recently selected methods in a cache. // The effectiveness of this cache is vital for GAP's performance
// The next few functions deal with finding and allocating if necessary the // cache for a given operation and number of arguments, and some locking in // HPC-GAP
if (cache == 0) { /* This is a safe form of double-checked locking, because
* the cache value is not a reference. */
LockCache();
cache = CACHE_OPER(oper, i); if (cache == 0) {
CacheSize++;
cacheIndex = CacheSize;
SET_CACHE_OPER(oper, i, INTOBJ_INT(cacheIndex));
} else
cacheIndex = INT_INTOBJ(cache);
UnlockCache();
} else {
cacheIndex = INT_INTOBJ(cache);
}
if (cacheIndex > STATE(MethodCacheSize)) {
len = STATE(MethodCacheSize); while (cacheIndex > len)
len *= 2;
GROW_PLIST(STATE(MethodCache), len);
SET_LEN_PLIST(STATE(MethodCache), len);
STATE(MethodCacheItems) = ADDR_OBJ(STATE(MethodCache));
STATE(MethodCacheSize) = len;
}
#ifndef WARD_ENABLED // This function actually searches the cache. Normally it should be // called with n a compile-time constant to allow the optimiser to tidy // things up. template <Int n> static Obj GetMethodCached(Obj cacheBag, Int prec, Obj ids[])
{
UInt typematch;
Obj * cache;
Obj method = 0;
UInt i; const UInt cacheEntrySize = n + 2;
cache = BASE_PTR_PLIST(cacheBag); #ifdef HPCGAP
cache++; // skip over the pointer to the methods list #endif
// Up to CACHE_SIZE methods might be in the cache if (prec < CACHE_SIZE) { // first place to look and also the place we'll put the result:
UInt target = cacheEntrySize * prec; for (i = target; i < cacheEntrySize * CACHE_SIZE;
i += cacheEntrySize) { if (cache[i + 1] == INTOBJ_INT(prec)) {
typematch = 1; // This loop runs over the arguments, should be compiled away for (int j = 0; j < n; j++) { if (cache[i + j + 2] != ids[j]) {
typematch = 0; break;
}
} if (typematch) {
method = cache[i]; #ifdef COUNT_OPERS
CacheHitStatistics[prec][i / cacheEntrySize][n]++; #endif if (i > target) {
// We found the method, but it was further down the // cache than we would like it to be, so move it up
Obj buf[cacheEntrySize];
memcpy(buf, cache + i, sizeof(Obj) * cacheEntrySize);
SyMemmove(cache + target + cacheEntrySize,
cache + target, sizeof(Obj) * (i - target));
memcpy(cache + target, buf, sizeof(Obj) * cacheEntrySize);
} break;
}
}
}
} return method;
}
// Add a method to the cache -- called when a method is selected that is not // in the cache staticinlinevoid
CacheMethod(Obj cacheBag, UInt n, Int prec, Obj * ids, Obj method)
{ if (prec >= CACHE_SIZE) return; // We insert this method at position <prec> and move // the older methods down
UInt cacheEntrySize = n + 2;
Obj * cache = BASE_PTR_PLIST(cacheBag) + prec * cacheEntrySize; #ifdef HPCGAP
cache++; // skip over the pointer to the methods list #endif
SyMemmove(cache + cacheEntrySize, cache, sizeof(Obj) * (CACHE_SIZE - prec - 1) * cacheEntrySize);
cache[0] = method;
cache[1] = INTOBJ_INT(prec); for (UInt i = 0; i < n; i++)
cache[2 + i] = ids[i];
CHANGED_BAG(cacheBag);
} #endif// WARD_ENABLED
// This function searches through the methods of operation <oper> with // arity <n>, looking for those matching the given <types>. Among these, // the <prec>-th is selected (<prec> starts at 0). // // If <verbose> is non-zero, the matching method are printed by calling // 'VMETHOD_PRINT_INFO' resp. 'NEXT_VMETHOD_PRINT_INFO'. // // If <constructor> is non-zero, then <oper> is a constructor, leading // to <types[0]> being treated differently. // enum {
BASE_SIZE_METHODS_OPER_ENTRY = 6,
}; template <UInt n> static Obj GetMethodUncached(
UInt verbose, UInt constructor, Obj methods, Int prec, Obj types[])
{ if (methods == 0) return Fail;
const UInt len = LEN_PLIST(methods);
UInt matchCount = 0; for (UInt pos = 0; pos < len; pos += n + BASE_SIZE_METHODS_OPER_ENTRY) { // each method comprises n + BASE_SIZE_METHODS_OPER_ENTRY // entries in the 'methods' list: // entry 1 is the family predicate; // entries 2 till n+1 are the n argument filters // entry n+2 is the actual method // entry n+3 is the rank // entry n+4 is the info text // entry n+5 is, if set, the location where the method was installed // entry n+6 is, if set, the relative rank that was supplied when // the method was installed, either as a small integer // or a function of no arguments
// check argument filters against the given types
Obj filter; int k = 1; if (constructor) {
filter = ELM_PLIST(methods, pos + k + 1);
GAP_ASSERT(TNUM_OBJ(filter) == T_FLAGS); if (!IS_SUBSET_FLAGS(filter, types[0])) continue;
k++;
} for (; k <= n; ++k) {
filter = ELM_PLIST(methods, pos + k + 1);
GAP_ASSERT(TNUM_OBJ(filter) == T_FLAGS); if (!IS_SUBSET_FLAGS(FLAGS_TYPE(types[k - 1]), filter)) break;
}
// if some filter did not match, go to next method if (k <= n) continue;
// check family predicate, with a hot path for the very // common trivial predicate 'ReturnTrue'
Obj fampred = ELM_PLIST(methods, pos + 1); if (fampred != ReturnTrue) {
Obj res = 0; switch (n) { case 0:
res = CALL_0ARGS(fampred); break; case 1:
res = CALL_1ARGS(fampred, FAMILY_TYPE(types[0])); break; case 2:
res = CALL_2ARGS(fampred, FAMILY_TYPE(types[0]),
FAMILY_TYPE(types[1])); break; case 3:
res =
CALL_3ARGS(fampred, FAMILY_TYPE(types[0]),
FAMILY_TYPE(types[1]), FAMILY_TYPE(types[2])); break; case 4:
res = CALL_4ARGS(fampred, FAMILY_TYPE(types[0]),
FAMILY_TYPE(types[1]), FAMILY_TYPE(types[2]),
FAMILY_TYPE(types[3])); break; case 5:
res =
CALL_5ARGS(fampred, FAMILY_TYPE(types[0]),
FAMILY_TYPE(types[1]), FAMILY_TYPE(types[2]),
FAMILY_TYPE(types[3]), FAMILY_TYPE(types[4])); break; case 6:
res = CALL_6ARGS(fampred, FAMILY_TYPE(types[0]),
FAMILY_TYPE(types[1]), FAMILY_TYPE(types[2]),
FAMILY_TYPE(types[3]), FAMILY_TYPE(types[4]),
FAMILY_TYPE(types[5])); break; default:
ErrorMayQuit("not supported yet", 0, 0);
}
if (res != True) continue;
}
// we have a match; is it the right one? if (prec == matchCount) { if (verbose) {
CALL_3ARGS(prec == 0 ? VMETHOD_PRINT_INFO : NEXT_VMETHOD_PRINT_INFO, methods,
INTOBJ_INT(pos / (n + BASE_SIZE_METHODS_OPER_ENTRY) + 1),
INTOBJ_INT(n));
template <Int n, BOOL verbose, BOOL constructor> static Obj
DoOperationNArgs(Obj oper, Obj a1, Obj a2, Obj a3, Obj a4, Obj a5, Obj a6)
{ // the following two lines look this way to avoid "allocating" a // zero-length array, which would result in undefined behavior (even // though we don't access the two arrays when n is zero). In addition, we // carefully avoid warnings in GCC due to -Wduplicated-branches.
Obj types[n > 0 ? n : +1];
Obj ids[n > 0 ? n : +1]; Int prec;
Obj method;
Obj res;
Obj earlyMethod = CONST_OPER(oper)->earlyMethod[n]; if (earlyMethod) {
res = CallNArgs<n>(earlyMethod, a1, a2, a3, a4, a5, a6); if (res != TRY_NEXT_METHOD) return res;
}
switch (n) { case 6:
types[5] = TYPE_OBJ_FEO(a6);
FALLTHROUGH; case 5:
types[4] = TYPE_OBJ_FEO(a5);
FALLTHROUGH; case 4:
types[3] = TYPE_OBJ_FEO(a4);
FALLTHROUGH; case 3:
types[2] = TYPE_OBJ_FEO(a3);
FALLTHROUGH; case 2:
types[1] = TYPE_OBJ_FEO(a2);
FALLTHROUGH; case 1: if (constructor) {
RequireFilter("Constructor", a1, "the first argument");
types[0] = FLAGS_FILT(a1);
} else
types[0] = TYPE_OBJ_FEO(a1); case 0: break; default:
GAP_ASSERT(0);
}
if (n > 0) { if (constructor)
ids[0] = types[0]; else
ids[0] = ID_TYPE(types[0]);
}
for (int i = 1; i < n; i++)
ids[i] = ID_TYPE(types[i]);
#ifdef HPCGAP // reset the method cache if necessary if (ELM_PLIST(cacheBag, 1) != methods) {
Obj * cache = BASE_PTR_PLIST(cacheBag);
cache[0] = methods;
memset(cache + 1, 0, SIZE_OBJ(cacheBag)-2*sizeof(Obj));
} #endif
// outer loop deals with TryNextMethod
prec = -1; do {
prec++; // Is there a method in the cache
method = verbose ? 0 : GetMethodCached<n>(cacheBag, prec, ids);
// otherwise try to find one in the list of methods if (!method) {
method = GetMethodUncached<n>(verbose, constructor, methods, prec,
types); // update the cache if (!verbose && method)
CacheMethod(cacheBag, n, prec, ids, method);
}
// If there was no method found, then pass the information needed // for the error reporting. This function rarely returns if (method == Fail) {
Obj arglist; switch (n) { case 0:
arglist = NewEmptyPlist(); break; case 1:
arglist = NewPlistFromArgs(a1); break; case 2:
arglist = NewPlistFromArgs(a1, a2); break; case 3:
arglist = NewPlistFromArgs(a1, a2, a3); break; case 4:
arglist = NewPlistFromArgs(a1, a2, a3, a4); break; case 5:
arglist = NewPlistFromArgs(a1, a2, a3, a4, a5); break; case 6:
arglist = NewPlistFromArgs(a1, a2, a3, a4, a5, a6); break; default:
GAP_ASSERT(0);
}
HandleMethodNotFound(oper, arglist, verbose, constructor, prec);
}
if (!method) {
ErrorQuit("no method returned", 0, 0);
}
// call this method
res = CallNArgs<n>(method, a1, a2, a3, a4, a5, a6);
} while (res == TRY_NEXT_METHOD);
// reenter the given handler if (narg != -1)
SET_HDLR_FUNC(oper, narg, hdlr);
/*N 1996/06/06 mschoene this should not be done here */
SET_FLAG1_FILT(oper, INTOBJ_INT(0));
SET_FLAG2_FILT(oper, INTOBJ_INT(0));
SET_FLAGS_FILT(oper, False);
SET_SETTR_FILT(oper, False);
SET_TESTR_FILT(oper, False);
// This isn't an attribute (yet)
SET_ENABLED_ATTR(oper, 0);
/*N 1996/06/06 mschoene this should not be done here */
SET_FLAG1_FILT(oper, INTOBJ_INT(0));
SET_FLAG2_FILT(oper, INTOBJ_INT(0));
SET_FLAGS_FILT(oper, False);
SET_SETTR_FILT(oper, False);
SET_TESTR_FILT(oper, False);
// get the flag for the tester
flag2 = INT_INTOBJ( FLAG2_FILT( self ) );
// get type of the object and its flags
type = TYPE_OBJ_FEO( obj );
flags = FLAGS_TYPE( type );
// if the value of the attribute is already known, simply return it if ( SAFE_C_ELM_FLAGS( flags, flag2 ) ) { return DoOperation1Args( self, obj );
}
// call the operation to compute the value
val = DoOperation1Args( self, obj ); if (val == 0) {
ErrorMayQuit("Method for an attribute must return a value", 0, 0);
}
val = CopyObj( val, 0 );
// set the value (but not for internal objects) if ( ENABLED_ATTR( self ) == 1 && !IS_MUTABLE_OBJ( obj ) ) { switch ( TNUM_OBJ( obj ) ) { case T_COMOBJ: case T_POSOBJ: case T_DATOBJ: #ifdef HPCGAP case T_ACOMOBJ: case T_APOSOBJ: #endif
DoSetAttribute( SETTR_FILT(self), obj, val );
}
}
// return the value return val;
}
--> --------------------
--> maximum size reached
--> --------------------
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.55Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.