1 #include <lunaix/bcache.h>
2 #include <lunaix/mm/valloc.h>
3 #include <lunaix/spike.h>
5 static struct lru_zone* bcache_global_lru;
7 #define lock(bc) spinlock_acquire(&((bc)->lock))
8 #define unlock(bc) spinlock_release(&((bc)->lock))
11 __evict_internal_locked(struct bcache_node* node)
16 cache->ops.sync_cached(cache, node->tag, node->data);
18 cache->ops.release_on_evict(cache, node->data);
22 __try_evict_bcache(struct lru_node* node)
24 struct bcache_node* bnode;
27 bnode = container_of(node, struct bcache_node, lru_node);
28 cache = bnode->holder;
37 __evict_internal_locked(bnode);
38 btrie_remove(&cache->root, bnode->tag);
39 llist_delete(&bnode->objs);
49 bcache_create_zone(char* name)
51 return lru_new_zone(name, __try_evict_bcache);
55 bcache_init_zone(struct bcache* cache, bcache_zone_t lru, unsigned int log_ways,
56 int cap, unsigned int blk_size, struct bcache_ops* ops)
60 *cache = (struct bcache) {
66 btrie_init(&cache->root, log_ways);
67 llist_init_head(&cache->objs);
68 spinlock_init(&cache->lock);
72 bcache_put_and_ref(struct bcache* cache, unsigned long tag, void* block)
74 struct bcache_node* node;
78 node = (struct bcache_node*)btrie_get(&cache->root, tag);
82 __evict_internal_locked(node);
83 // Now the node is ready to be reused.
86 node = vzalloc(sizeof(*node));
87 btrie_set(&cache->root, tag, node);
90 *node = (struct bcache_node) {
97 lru_use_one(cache->lru, &node->lru_node);
98 llist_append(&cache->objs, &node->objs);
102 return (bcobj_t)node;
106 bcache_tryget(struct bcache* cache, unsigned long tag, bcobj_t* result)
108 struct bcache_node* node;
112 node = (struct bcache_node*)btrie_get(&cache->root, tag);
122 *result = (bcobj_t)node;
130 bcache_return(bcobj_t obj)
132 struct bcache_node* node = (struct bcache_node*) obj;
136 // non bisogno bloccare il cache, perche il lru ha la serratura propria.
137 lru_use_one(node->holder->lru, &node->lru_node);
142 bcache_promote(bcobj_t obj)
144 struct bcache_node* node;
146 node = (struct bcache_node*) obj;
148 lru_use_one(node->holder->lru, &node->lru_node);
152 bcache_evict(struct bcache* cache, unsigned long tag)
154 struct bcache_node* node;
158 node = (struct bcache_node*)btrie_get(&cache->root, tag);
160 if (!node || node->refs) {
165 __evict_internal_locked(node);
167 btrie_remove(&cache->root, tag);
168 lru_remove(cache->lru, &node->lru_node);
169 llist_delete(&node->objs);
177 bcache_flush_locked(struct bcache* cache)
179 struct bcache_node *pos, *n;
180 llist_for_each(pos, n, &cache->objs, objs) {
181 __evict_internal_locked(pos);
182 btrie_remove(&cache->root, pos->tag);
183 lru_remove(cache->lru, &pos->lru_node);
184 llist_delete(&pos->objs);
189 bcache_flush(struct bcache* cache)
193 bcache_flush_locked(cache);
199 bcache_free(struct bcache* cache)
203 bcache_flush_locked(cache);
204 btrie_release(&cache->root);
212 bcache_zone_free(bcache_zone_t zone)