staticstruct hlist_head *pubkey_bucket(struct pubkey_hashtable *table, const u8 pubkey[NOISE_PUBLIC_KEY_LEN])
{ /* siphash gives us a secure 64bit number based on a random key. Since * the bits are uniformly distributed, we can then mask off to get the * bits we need.
*/ const u64 hash = siphash(pubkey, NOISE_PUBLIC_KEY_LEN, &table->key);
staticstruct hlist_head *index_bucket(struct index_hashtable *table, const __le32 index)
{ /* Since the indices are random and thus all bits are uniformly * distributed, we can find its bucket simply by masking.
*/ return &table->hashtable[(__force u32)index &
(HASH_SIZE(table->hashtable) - 1)];
}
/* At the moment, we limit ourselves to 2^20 total peers, which generally might * amount to 2^20*3 items in this hashtable. The algorithm below works by * picking a random number and testing it. We can see that these limits mean we * usually succeed pretty quickly: * * >>> def calculation(tries, size): * ... return (size / 2**32)**(tries - 1) * (1 - (size / 2**32)) * ... * >>> calculation(1, 2**20 * 3) * 0.999267578125 * >>> calculation(2, 2**20 * 3) * 0.0007318854331970215 * >>> calculation(3, 2**20 * 3) * 5.360489012673497e-07 * >>> calculation(4, 2**20 * 3) * 3.9261394135792216e-10 * * At the moment, we don't do any masking, so this algorithm isn't exactly * constant time in either the random guessing or in the hash list lookup. We * could require a minimum of 3 tries, which would successfully mask the * guessing. this would not, however, help with the growing hash lengths, which * is another thing to consider moving forward.
*/
search_unused_slot: /* First we try to find an unused slot, randomly, while unlocked. */
entry->index = (__force __le32)get_random_u32();
hlist_for_each_entry_rcu_bh(existing_entry,
index_bucket(table, entry->index),
index_hash) { if (existing_entry->index == entry->index) /* If it's already in use, we continue searching. */ goto search_unused_slot;
}
/* Once we've found an unused slot, we lock it, and then double-check * that nobody else stole it from us.
*/
spin_lock_bh(&table->lock);
hlist_for_each_entry_rcu_bh(existing_entry,
index_bucket(table, entry->index),
index_hash) { if (existing_entry->index == entry->index) {
spin_unlock_bh(&table->lock); /* If it was stolen, we start over. */ goto search_unused_slot;
}
} /* Otherwise, we know we have it exclusively (since we're locked), * so we insert.
*/
hlist_add_head_rcu(&entry->index_hash,
index_bucket(table, entry->index));
spin_unlock_bh(&table->lock);
/* Calling init here NULLs out index_hash, and in fact after this * function returns, it's theoretically possible for this to get * reinserted elsewhere. That means the RCU lookup below might either * terminate early or jump between buckets, in which case the packet * simply gets dropped, which isn't terrible.
*/
INIT_HLIST_NODE(&old->index_hash);
out:
spin_unlock_bh(&table->lock); return ret;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.