diff options
| author | nasr <nsrddyn@gmail.com> | 2026-03-19 20:51:17 +0000 |
|---|---|---|
| committer | nasr <nsrddyn@gmail.com> | 2026-03-19 20:51:17 +0000 |
| commit | 55783d5c90896ceefbccba7e90210c73055fe74f (patch) | |
| tree | a42c959bca5884053c2c21cbc6cc9facbb3992df /source | |
| parent | 2fa08975fb2998588a51d4165bc573755f716a06 (diff) | |
feature(btree): calculating insertion position properly now
- fixed bug true was false when check if something was a nil btree node
(kind of dumb because we dont even nee that function that was to help us
out in the future but okay)
Diffstat (limited to 'source')
| -rw-r--r-- | source/btree_impl.h | 44 |
1 files changed, 19 insertions, 25 deletions
diff --git a/source/btree_impl.h b/source/btree_impl.h index 7dda6c9..b7015bd 100644 --- a/source/btree_impl.h +++ b/source/btree_impl.h | |||
| @@ -74,7 +74,7 @@ internal b32 | |||
| 74 | is_nil_btree_node(btree_node *node) | 74 | is_nil_btree_node(btree_node *node) |
| 75 | { | 75 | { |
| 76 | // NOTE(nasr): looks like a useless function but could be usefull when we implement a nil_btree_node | 76 | // NOTE(nasr): looks like a useless function but could be usefull when we implement a nil_btree_node |
| 77 | return (node != NULL); | 77 | return (node == NULL); |
| 78 | } | 78 | } |
| 79 | 79 | ||
| 80 | // TODO(nasr): 1. splitting the tree when getting too big? (horizontally) 2. joining trees? | 80 | // TODO(nasr): 1. splitting the tree when getting too big? (horizontally) 2. joining trees? |
| @@ -82,8 +82,8 @@ is_nil_btree_node(btree_node *node) | |||
| 82 | internal b8 | 82 | internal b8 |
| 83 | key_compare(key destination_key, key source_key) | 83 | key_compare(key destination_key, key source_key) |
| 84 | { | 84 | { |
| 85 | s32 source_index = source_key.header_index + source_key.row_index; | 85 | s32 source_index = source_key.header_index + source_key.row_index; |
| 86 | s32 destination_index = destination_key.header_index + destination_key.row_index; | 86 | s32 destination_index = destination_key.header_index + destination_key.row_index; |
| 87 | 87 | ||
| 88 | if(destination_index > source_index) return 1; | 88 | if(destination_index > source_index) return 1; |
| 89 | else if(destination_index < source_index) return -1; | 89 | else if(destination_index < source_index) return -1; |
| @@ -96,20 +96,18 @@ internal s32 | |||
| 96 | btree_find_ipos(key k, btree_node *start_node) | 96 | btree_find_ipos(key k, btree_node *start_node) |
| 97 | { | 97 | { |
| 98 | s32 index = 0; | 98 | s32 index = 0; |
| 99 | for (btree_node *current_node = start_node; | 99 | // scan right while the node's key at [index] is strictly less than k |
| 100 | index < current_node->key_count && key_compare(k, current_node->keys[0]) < 0; | 100 | while (index < start_node->key_count && key_compare(start_node->keys[index], k) < 0) |
| 101 | ++index); | 101 | { |
| 102 | ++index; | ||
| 103 | } | ||
| 102 | return index; | 104 | return index; |
| 103 | |||
| 104 | return 0; | ||
| 105 | |||
| 106 | } | 105 | } |
| 107 | 106 | ||
| 108 | // NOTE(nasr): nodes that get passed as parameters should've already been loaded into memory | 107 | // NOTE(nasr): nodes that get passed as parameters should've already been loaded into memory |
| 109 | internal void * | 108 | internal void * |
| 110 | btree_search(btree_node *node, key key) | 109 | btree_search(btree_node *node, key key) |
| 111 | { | 110 | { |
| 112 | |||
| 113 | s32 index = btree_find_ipos(key, node); | 111 | s32 index = btree_find_ipos(key, node); |
| 114 | 112 | ||
| 115 | if (index < node->key_count && key_compare(node->keys[index], key) == 0) | 113 | if (index < node->key_count && key_compare(node->keys[index], key) == 0) |
| @@ -121,8 +119,6 @@ btree_search(btree_node *node, key key) | |||
| 121 | return NULL; | 119 | return NULL; |
| 122 | } | 120 | } |
| 123 | return btree_search(node->children[index], key); | 121 | return btree_search(node->children[index], key); |
| 124 | |||
| 125 | return NULL; | ||
| 126 | } | 122 | } |
| 127 | 123 | ||
| 128 | // TODO(nasr): split node when key_count == btree_ORDER - 1 (node is full) | 124 | // TODO(nasr): split node when key_count == btree_ORDER - 1 (node is full) |
| @@ -131,18 +127,15 @@ btree_search(btree_node *node, key key) | |||
| 131 | internal void | 127 | internal void |
| 132 | btree_insert(mem_arena *arena, btree *tree, key key, void *payload) | 128 | btree_insert(mem_arena *arena, btree *tree, key key, void *payload) |
| 133 | { | 129 | { |
| 134 | |||
| 135 | if(is_nil_btree_node(tree->root)) | 130 | if(is_nil_btree_node(tree->root)) |
| 136 | { | 131 | { |
| 137 | tree->root = PushStructZero(arena, btree_node); | 132 | tree->root = PushStructZero(arena, btree_node); |
| 138 | tree->root->leaf = 1; | 133 | tree->root->leaf = 1; |
| 139 | tree->root->key_count = 0; | 134 | tree->root->key_count = 0; |
| 140 | |||
| 141 | } | 135 | } |
| 142 | 136 | ||
| 143 | btree_node *current_node = tree->root; | 137 | btree_node *current_node = tree->root; |
| 144 | 138 | ||
| 145 | |||
| 146 | if (current_node->leaf) | 139 | if (current_node->leaf) |
| 147 | { | 140 | { |
| 148 | // find the insertion position and shift keys right to make room | 141 | // find the insertion position and shift keys right to make room |
| @@ -162,20 +155,23 @@ btree_insert(mem_arena *arena, btree *tree, key key, void *payload) | |||
| 162 | 155 | ||
| 163 | if(current_node->key_count < (BTREE_ORDER - 1)) | 156 | if(current_node->key_count < (BTREE_ORDER - 1)) |
| 164 | { | 157 | { |
| 165 | current_node->key_count += 1; | 158 | current_node->key_count += 1; |
| 166 | } | 159 | } |
| 167 | else { | 160 | else { |
| 168 | // TODO(nasr): creating a new branch / tree? | 161 | // TODO(nasr): creating a new branch / tree? |
| 169 | // make a seperate function for this | 162 | // make a seperate function for this |
| 170 | s32 split_pos = current_node->parent->key_count / 2 - 1; | 163 | // NOTE(nasr): only split through the parent when one actually exists; |
| 171 | s32 updated_keycount_p = 0; | 164 | // root splits need to be handled separately (promote mid key, create two children) |
| 172 | for(s32 index = 0; index <= split_pos; ++index) | 165 | if(current_node->parent != NULL) |
| 173 | { | 166 | { |
| 174 | ++updated_keycount_p; | 167 | s32 split_pos = current_node->parent->key_count / 2 - 1; |
| 168 | s32 updated_keycount_p = 0; | ||
| 169 | for(s32 index = 0; index <= split_pos; ++index) | ||
| 170 | { | ||
| 171 | ++updated_keycount_p; | ||
| 172 | } | ||
| 173 | current_node->parent->key_count = updated_keycount_p; | ||
| 175 | } | 174 | } |
| 176 | |||
| 177 | current_node->parent->key_count = updated_keycount_p; | ||
| 178 | |||
| 179 | } | 175 | } |
| 180 | } | 176 | } |
| 181 | // TODO(nasr): internal node case walk down then split on the way back up | 177 | // TODO(nasr): internal node case walk down then split on the way back up |
| @@ -185,7 +181,6 @@ btree_insert(mem_arena *arena, btree *tree, key key, void *payload) | |||
| 185 | internal void | 181 | internal void |
| 186 | btree_write(btree *bt) | 182 | btree_write(btree *bt) |
| 187 | { | 183 | { |
| 188 | |||
| 189 | unused(bt); | 184 | unused(bt); |
| 190 | #if 0 | 185 | #if 0 |
| 191 | temp_arena *temp_arena = temp_arena_begin(arena); | 186 | temp_arena *temp_arena = temp_arena_begin(arena); |
| @@ -198,7 +193,6 @@ btree_write(btree *bt) | |||
| 198 | 193 | ||
| 199 | temp_arena_end(temp); | 194 | temp_arena_end(temp); |
| 200 | #endif | 195 | #endif |
| 201 | |||
| 202 | } | 196 | } |
| 203 | 197 | ||
| 204 | // - load the entire db into memory. | 198 | // - load the entire db into memory. |
