updated hashtable and started on basic hashtable testing
This commit is contained in:
parent
122eb1d351
commit
c344594af7
7 changed files with 453 additions and 187 deletions
|
|
@ -4,167 +4,363 @@
|
|||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
|
||||
struct ARC_Hashtable {
|
||||
uint32_t size;
|
||||
ARC_HashtableNode **nodes;
|
||||
ARC_Hashtable_Hash hash;
|
||||
ARC_Hashtable_KeyCompare compare;
|
||||
//a private struct to hold the keys and values of the hashtable
|
||||
typedef struct ARC_HashtableNode ARC_HashtableNode;
|
||||
struct ARC_HashtableNode {
|
||||
void *key;
|
||||
void *value;
|
||||
|
||||
uint32_t hashvalue;
|
||||
|
||||
//will be set if next slot is searched for, to be used to remove elements faster
|
||||
uint32_t initialIndex;
|
||||
};
|
||||
|
||||
void CRC32(void *key, size_t *keysize, uint32_t *hashval){
|
||||
*hashval = 0xffffffff;
|
||||
struct ARC_Hashtable {
|
||||
uint32_t currentCapacity;
|
||||
uint32_t currentSize;
|
||||
|
||||
for(size_t i = 0; i < *keysize; i++){
|
||||
ARC_HashtableNode *nodes;
|
||||
|
||||
ARC_Hashtable_HashFn hashFn;
|
||||
ARC_Hashtable_KeyCompareFn keyCompareFn;
|
||||
ARC_Hashtable_DestroyKeyValueFn *destroyKeyValueFn;
|
||||
};
|
||||
|
||||
//copied from here: https://en.wikipedia.org/wiki/Computation_of_cyclic_redundancy_checks#CRC-32_example
|
||||
uint32_t CRC32Fn(void *key){
|
||||
uint32_t hashvalue = 0xffffffff;
|
||||
|
||||
for(uint32_t i = 0; *(((char *)key) + i) != '\0'; i++){
|
||||
uint8_t value = *(((uint8_t *)key) + i);
|
||||
|
||||
for(uint8_t j = 0; j < 8; j++){
|
||||
uint8_t flag = (uint8_t)((value ^ *hashval) & 1);
|
||||
*hashval >>= 1;
|
||||
if(flag){ *hashval ^= 0xEDB888320; }
|
||||
uint8_t flag = (uint8_t)((value ^ hashvalue) & 1);
|
||||
hashvalue >>= 1;
|
||||
|
||||
if(flag){
|
||||
hashvalue ^= 0xEDB888320;
|
||||
}
|
||||
|
||||
value >>= 1;
|
||||
}
|
||||
}
|
||||
|
||||
*hashval = ~*hashval;
|
||||
hashvalue = ~hashvalue;
|
||||
|
||||
return hashvalue;
|
||||
}
|
||||
|
||||
int8_t ARC_Default_Key_Compare(void *key1, size_t *key1size, void *key2, size_t *key2size){
|
||||
return key1 == key2;
|
||||
ARC_Bool ARC_Hashtable_DefaultKeyCompareFn(void *key1, void *key2){
|
||||
return (ARC_Bool)(key1 == key2);
|
||||
}
|
||||
|
||||
void ARC_HashtableNode_Create(ARC_HashtableNode **node, void *key, size_t *keysize, void *data){
|
||||
*node = (ARC_HashtableNode *) malloc(sizeof(ARC_HashtableNode));
|
||||
(*node)->key = key;
|
||||
(*node)->keysize = *keysize;
|
||||
(*node)->data = data;
|
||||
(*node)->node = NULL;
|
||||
}
|
||||
void ARC_Hashtable_Create(ARC_Hashtable **hashtable, ARC_Hashtable_HashFn *hashFn, ARC_Hashtable_KeyCompareFn *keyCompareFn, ARC_Hashtable_DestroyKeyValueFn *destroyKeyValueFn){
|
||||
//clear the hashtable
|
||||
*hashtable = (ARC_Hashtable *) malloc(sizeof(ARC_Hashtable));
|
||||
|
||||
void ARC_HashtableNode_Destroy(ARC_HashtableNode *node, ARC_HashtableNode_DestroyExternal external, void *userdata){
|
||||
if(node == NULL){
|
||||
return;
|
||||
//set current capacity and size to start
|
||||
(*hashtable)->currentCapacity = 1;
|
||||
(*hashtable)->currentSize = 0;
|
||||
|
||||
//reserve enough memory for one node
|
||||
(*hashtable)->nodes = (ARC_HashtableNode *)malloc(sizeof(ARC_HashtableNode));
|
||||
|
||||
//set first and only key to null
|
||||
(*hashtable)->nodes[0] = (ARC_HashtableNode){ NULL, NULL, 0, 0 };
|
||||
|
||||
//default to CRC32, then override if hashFn exists
|
||||
(*hashtable)->hashFn = CRC32Fn;
|
||||
if(hashFn != NULL){
|
||||
(*hashtable)->hashFn = *hashFn;
|
||||
}
|
||||
|
||||
ARC_HashtableNode_Destroy(node->node, external, userdata);
|
||||
|
||||
if(external){
|
||||
external(node, userdata);
|
||||
//default to comparing pointers, then override if keyCompareFn exists
|
||||
(*hashtable)->keyCompareFn = ARC_Hashtable_DefaultKeyCompareFn;
|
||||
if(keyCompareFn != NULL){
|
||||
(*hashtable)->keyCompareFn = *keyCompareFn;
|
||||
}
|
||||
|
||||
free(node);
|
||||
//default to NULL, then create and copy destroyKeyValueFn if it exists
|
||||
(*hashtable)->destroyKeyValueFn = NULL;
|
||||
if(destroyKeyValueFn != NULL){
|
||||
(*hashtable)->destroyKeyValueFn = (ARC_Hashtable_DestroyKeyValueFn *)malloc(sizeof(ARC_Hashtable_DestroyKeyValueFn));
|
||||
*((*hashtable)->destroyKeyValueFn) = *destroyKeyValueFn;
|
||||
}
|
||||
}
|
||||
|
||||
void ARC_Hashtable_Create(ARC_Hashtable **htable, uint32_t bucketsize, ARC_Hashtable_Hash hash, ARC_Hashtable_KeyCompare compare){
|
||||
*htable = (ARC_Hashtable *) malloc(sizeof(ARC_Hashtable));
|
||||
(*htable)->size = bucketsize;
|
||||
(*htable)->nodes = (ARC_HashtableNode **) calloc(bucketsize, sizeof(ARC_HashtableNode *));
|
||||
(*htable)->hash = (hash)? hash : CRC32;
|
||||
(*htable)->compare = (compare)? compare : ARC_Default_Key_Compare;
|
||||
void ARC_Hashtable_Destroy(ARC_Hashtable *hashtable){
|
||||
//remove all the contents before clearing the
|
||||
ARC_Hashtable_Clear(hashtable);
|
||||
|
||||
//free the destroyKeyValueFn if it exists
|
||||
if(hashtable->destroyKeyValueFn != NULL){
|
||||
free(hashtable->destroyKeyValueFn);
|
||||
}
|
||||
|
||||
//free the empty nodes container
|
||||
free(hashtable->nodes);
|
||||
|
||||
//free the hashtable
|
||||
free(hashtable);
|
||||
}
|
||||
|
||||
void ARC_Hashtable_Destroy(ARC_Hashtable *htable, ARC_HashtableNode_DestroyExternal external, void *userdata){
|
||||
for(uint32_t i = 0; i < htable->size; i++){
|
||||
if(htable->nodes[i]){
|
||||
ARC_HashtableNode_Destroy(htable->nodes[i], external, userdata);
|
||||
}
|
||||
}
|
||||
void ARC_HashtableNode_SetNearestNodeToArray(ARC_HashtableNode *nodes, uint32_t capacity, ARC_HashtableNode node){
|
||||
//get the first possible index based on the node's hashvalue
|
||||
uint32_t index = node.hashvalue % capacity;
|
||||
|
||||
free(htable->nodes);
|
||||
free(htable);
|
||||
}
|
||||
//get the first possible node
|
||||
ARC_HashtableNode foundNode = nodes[index];
|
||||
|
||||
void ARC_Hashtable_Add(ARC_Hashtable *htable, void *key, size_t keysize, void *data){
|
||||
uint32_t size = 0;
|
||||
htable->hash(key, &keysize, &size);
|
||||
//init variable for found node
|
||||
uint32_t nextIndex = index;
|
||||
|
||||
ARC_HashtableNode *bucket = htable->nodes[size % htable->size];
|
||||
if(!bucket){
|
||||
ARC_HashtableNode_Create(&bucket, key, &keysize, data);
|
||||
htable->nodes[size % htable->size] = bucket;
|
||||
return;
|
||||
}
|
||||
//check each available node for a free slot
|
||||
while(foundNode.key != NULL){
|
||||
//up the current index by one
|
||||
nextIndex++;
|
||||
|
||||
if(!htable->compare(bucket->key, &bucket->keysize, key, &keysize)){
|
||||
arc_errno = ARC_ERRNO_EXISTS;
|
||||
return;
|
||||
}
|
||||
|
||||
while(bucket->node){
|
||||
if(!htable->compare(bucket->node->key, &bucket->node->keysize, key, &keysize)){
|
||||
arc_errno = ARC_ERRNO_EXISTS;
|
||||
return;
|
||||
//cycle back to the first index if it is above the array's capacity
|
||||
if(nextIndex >= capacity){
|
||||
index = 0;
|
||||
}
|
||||
|
||||
bucket = bucket->node;
|
||||
//check if the loop has circled back to the starting index to stop checking
|
||||
if(index == nextIndex){
|
||||
break;
|
||||
}
|
||||
|
||||
//get the next possible node
|
||||
foundNode = nodes[index];
|
||||
}
|
||||
|
||||
ARC_HashtableNode_Create(&(bucket->node), key, &keysize, data);
|
||||
//set the foundNode and next index
|
||||
nodes[nextIndex] = node;
|
||||
nodes[nextIndex].initialIndex = index;
|
||||
}
|
||||
|
||||
void ARC_Hashtable_Get(ARC_Hashtable *htable, void *key, size_t keysize, void **data){
|
||||
uint32_t size = 0;
|
||||
htable->hash(key, &keysize, &size);
|
||||
void ARC_Hashtable_Add(ARC_Hashtable *hashtable, void *key, void *value){
|
||||
//check to see if the current size is the same as a max uint32_t and if so it will overflow so throw an error
|
||||
if(hashtable->currentSize == ~((uint32_t)0)){
|
||||
arc_errno = ARC_ERRNO_OVERFLOW;
|
||||
ARC_DEBUG_LOG_ERROR("ARC_Hashtable_Add(hashtable, key, value), hashtable at max capacity tried adding another value");
|
||||
return;
|
||||
}
|
||||
|
||||
ARC_HashtableNode *bucket = htable->nodes[size % htable->size];
|
||||
if(!bucket){
|
||||
*data = NULL;
|
||||
//check to make sure key is not NULL
|
||||
if(key == NULL){
|
||||
arc_errno = ARC_ERRNO_NULL;
|
||||
ARC_DEBUG_LOG_ERROR("ARC_Hashtable_Add(hashtable, key, value), NULL was passed in for the key, this function cannot handle that");
|
||||
return;
|
||||
}
|
||||
|
||||
if(!htable->compare(bucket->key, &bucket->keysize, key, &keysize)){
|
||||
*data = bucket->data;
|
||||
//check if we are at the max of the current capacity
|
||||
if(hashtable->currentSize == hashtable->currentCapacity){
|
||||
//move the current nodes into a temporary variable to move into a resized array
|
||||
uint64_t oldCapacity = hashtable->currentCapacity;
|
||||
ARC_HashtableNode *oldNodes = hashtable->nodes;
|
||||
|
||||
//increase the current capacity by double
|
||||
hashtable->currentCapacity <<= 1;
|
||||
|
||||
//if for some reason the capacity is 0, we should set it to one so we do not error on realloc
|
||||
if(hashtable->currentCapacity != 0){
|
||||
hashtable->currentCapacity++;
|
||||
}
|
||||
|
||||
//resize the hashtable's array and copy the contents at the same time
|
||||
hashtable->nodes = (ARC_HashtableNode *)malloc(sizeof(ARC_HashtableNode) * hashtable->currentCapacity);
|
||||
|
||||
//set keys to null
|
||||
for(uint32_t index = 0; index < hashtable->currentCapacity; index++){
|
||||
hashtable->nodes[index].key = NULL;
|
||||
}
|
||||
|
||||
//add the old nodes into the new array
|
||||
for(uint32_t index = 0; index < oldCapacity; index++){
|
||||
ARC_HashtableNode_SetNearestNodeToArray(hashtable->nodes, hashtable->currentCapacity, oldNodes[index]);
|
||||
}
|
||||
}
|
||||
|
||||
//get the hashvalue
|
||||
uint32_t hashvalue = hashtable->hashFn(key);
|
||||
|
||||
//add to the vectors array and increase its current size
|
||||
ARC_HashtableNode_SetNearestNodeToArray(hashtable->nodes, hashtable->currentCapacity, (ARC_HashtableNode){ key, value, hashvalue, 0 });
|
||||
hashtable->currentSize++;
|
||||
}
|
||||
|
||||
void ARC_Hashtable_Remove(ARC_Hashtable *hashtable, void *key){
|
||||
//get the index from a hashvalue
|
||||
uint32_t index = hashtable->hashFn(key) % hashtable->currentCapacity;
|
||||
|
||||
//get the first possible node
|
||||
ARC_HashtableNode node = hashtable->nodes[index];
|
||||
|
||||
//check each available node for a match and break if the current nodes doesn't hold anything
|
||||
ARC_Bool nodeFound = ARC_False;
|
||||
for(uint32_t nextIndex = index; node.key != NULL;){
|
||||
if(hashtable->keyCompareFn(node.key, key) == ARC_True){
|
||||
index = nextIndex;
|
||||
nodeFound = ARC_True;
|
||||
}
|
||||
|
||||
//up the current index by one
|
||||
nextIndex++;
|
||||
|
||||
//cycle back to the first index if it is above the array's capacity
|
||||
if(nextIndex >= hashtable->currentCapacity){
|
||||
nextIndex = 0;
|
||||
}
|
||||
|
||||
//check if the loop has circled back to the starting index to stop checking
|
||||
if(index == nextIndex){
|
||||
break;
|
||||
}
|
||||
|
||||
//get the next possible node
|
||||
node = hashtable->nodes[nextIndex];
|
||||
}
|
||||
|
||||
//error if the node was not found
|
||||
if(nodeFound == ARC_False){
|
||||
arc_errno = ARC_ERRNO_DATA;
|
||||
ARC_DEBUG_LOG_ERROR("ARC_Hashtable_Remove(hashtable, key), key was not found in hashtable, could not remove");
|
||||
return;
|
||||
}
|
||||
|
||||
while(bucket->node){
|
||||
if(!htable->compare(bucket->node->key, &bucket->node->keysize, key, &keysize)){
|
||||
*data = bucket->node->data;
|
||||
//call delete data to clean up item if delete data function exists
|
||||
if(hashtable->destroyKeyValueFn != NULL){
|
||||
(*(hashtable->destroyKeyValueFn))(node.key, node.value);
|
||||
}
|
||||
|
||||
//set the current index to a starting index
|
||||
uint32_t currentIndex = index;
|
||||
|
||||
//get the next possible node
|
||||
node = hashtable->nodes[currentIndex];
|
||||
|
||||
//while the current node
|
||||
while(node.initialIndex == index){
|
||||
//set the last index to move a offset node back to
|
||||
uint32_t lastIndex = index;
|
||||
|
||||
//up the current index by one
|
||||
currentIndex++;
|
||||
|
||||
//cycle back to the first index if it is above the array's capacity
|
||||
if(currentIndex >= hashtable->currentCapacity){
|
||||
currentIndex = 0;
|
||||
}
|
||||
|
||||
//check if the loop has circled back to the starting index to stop checking and throw an error
|
||||
if(index == currentIndex){
|
||||
arc_errno = ARC_ERRNO_DATA;
|
||||
ARC_DEBUG_LOG_ERROR("ARC_Hashtable_Remove(hashtable, key), removed index matched initalIndex of node, this should never happen");
|
||||
return;
|
||||
}
|
||||
|
||||
bucket = bucket->node;
|
||||
//move the current node back one
|
||||
hashtable->nodes[lastIndex] = hashtable->nodes[currentIndex];
|
||||
|
||||
//get the next possible node
|
||||
node = hashtable->nodes[currentIndex];
|
||||
}
|
||||
|
||||
arc_errno = ARC_ERRNO_NULL;
|
||||
}
|
||||
//set the current value to an empty node
|
||||
hashtable->nodes[currentIndex] = (ARC_HashtableNode){ NULL, NULL, 0, currentIndex };
|
||||
|
||||
void ARC_Hashtable_Remove(ARC_Hashtable *htable, void *key, size_t keysize, ARC_HashtableNode_DestroyExternal external, void *userdata){
|
||||
uint32_t size = 0;
|
||||
htable->hash(key, &keysize, &size);
|
||||
//we have removed the item so we can decrease the current size
|
||||
hashtable->currentSize--;
|
||||
|
||||
ARC_HashtableNode *bucket = htable->nodes[size % htable->size];
|
||||
if(!bucket){
|
||||
arc_errno = ARC_ERRNO_NULL;
|
||||
//if the current size is half the current capacity or the current capacity is at the smallest limit, we do not need to do anything else
|
||||
if(hashtable->currentSize != hashtable->currentCapacity >> 1 || hashtable->currentCapacity == 1){
|
||||
return;
|
||||
}
|
||||
|
||||
if(!htable->compare(bucket->key, &bucket->keysize, key, &keysize)){
|
||||
ARC_HashtableNode *temp = bucket;
|
||||
htable->nodes[size % htable->size] = bucket->node;
|
||||
//move the current nodes into a temporary variable to move into a resized array
|
||||
uint64_t oldCapacity = hashtable->currentCapacity;
|
||||
ARC_HashtableNode *oldNodes = hashtable->nodes;
|
||||
|
||||
if(external){
|
||||
external(temp, userdata);
|
||||
}
|
||||
//half the capacity and copy it into a smaller array
|
||||
hashtable->currentCapacity >>= 1;
|
||||
|
||||
free(temp);
|
||||
return;
|
||||
//resize the hashtable's array and copy the contents at the same time
|
||||
hashtable->nodes = (ARC_HashtableNode *)malloc(sizeof(ARC_HashtableNode) * hashtable->currentCapacity);
|
||||
|
||||
//set keys to null
|
||||
for(uint32_t index = 0; index < hashtable->currentCapacity; index++){
|
||||
hashtable->nodes[index].key = NULL;
|
||||
}
|
||||
|
||||
while(bucket->node){
|
||||
if(!htable->compare(bucket->node->key, &bucket->node->keysize, key, &keysize)){
|
||||
ARC_HashtableNode *temp = bucket->node;
|
||||
bucket->node = bucket->node->node;
|
||||
|
||||
if(external){
|
||||
external(temp, userdata);
|
||||
}
|
||||
|
||||
free(temp);
|
||||
return;
|
||||
}
|
||||
|
||||
bucket = bucket->node;
|
||||
//add the old nodes into the new array
|
||||
for(uint32_t index = 0; index < oldCapacity; index++){
|
||||
ARC_HashtableNode_SetNearestNodeToArray(hashtable->nodes, hashtable->currentCapacity, oldNodes[index]);
|
||||
}
|
||||
}
|
||||
|
||||
void ARC_Hashtable_Clear(ARC_Hashtable *hashtable){
|
||||
//delete the array holding all the nodes
|
||||
free(hashtable->nodes);
|
||||
|
||||
//set current capacity and size to start
|
||||
hashtable->currentCapacity = 1;
|
||||
hashtable->currentSize = 0;
|
||||
|
||||
//reserve enough memory for one node
|
||||
hashtable->nodes = (ARC_HashtableNode *)malloc(sizeof(ARC_HashtableNode));
|
||||
|
||||
//set first and only key to null
|
||||
hashtable->nodes[0].key = NULL;
|
||||
}
|
||||
|
||||
void *ARC_Hashtable_Get(ARC_Hashtable *hashtable, void *key){
|
||||
//get the index from a hashvalue
|
||||
uint32_t index = hashtable->hashFn(key) % hashtable->currentCapacity;
|
||||
|
||||
//get the first possible node
|
||||
ARC_HashtableNode node = hashtable->nodes[index];
|
||||
|
||||
//check each available node for a match
|
||||
for(uint32_t nextIndex = index; node.key != NULL;){
|
||||
//if the key is found, return its value
|
||||
if(hashtable->keyCompareFn(node.key, key) == ARC_True){
|
||||
return node.value;
|
||||
}
|
||||
|
||||
//up the current index by one
|
||||
nextIndex++;
|
||||
|
||||
//cycle back to the first index if it is above the array's capacity
|
||||
if(nextIndex >= hashtable->currentCapacity){
|
||||
nextIndex = 0;
|
||||
}
|
||||
|
||||
//check if the loop has circled back to the starting index to stop checking
|
||||
if(index == nextIndex){
|
||||
break;
|
||||
}
|
||||
|
||||
//get the next possible node
|
||||
node = hashtable->nodes[nextIndex];
|
||||
}
|
||||
|
||||
//could not find node, so return NULL
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void ARC_Hashtable_RunIteration(ARC_Hashtable *hashtable, ARC_Hashtable_IteratorFn iteratorFn){
|
||||
//pass each non NULL nodes into an iteratorFn callback
|
||||
for(uint32_t index = 0; index < hashtable->currentCapacity; index++){
|
||||
//get the current node
|
||||
ARC_HashtableNode node = hashtable->nodes[index];
|
||||
|
||||
//skip past NULL keys
|
||||
if(node.key == NULL){
|
||||
continue;
|
||||
}
|
||||
|
||||
//passes current iteration into the callback function
|
||||
iteratorFn(node.key, node.value);
|
||||
}
|
||||
|
||||
arc_errno = ARC_ERRNO_NULL;
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue