1 #include <lunaix/ds/lru.h>
2 #include <lunaix/mm/valloc.h>
3 #include <lunaix/spike.h>
4 #include <lunaix/fs/twimap.h>
5 #include <lunaix/fs/twifs.h>
6 #include <lunaix/kthread.h>
7 #include <lunaix/owloysius.h>
9 #include <klibc/string.h>
11 static struct llist_header zone_lead = { .next = &zone_lead, .prev = &zone_lead };
13 DEFINE_SPINLOCK_OPS(struct lru_zone*, lock);
14 static DEFINE_SPINLOCK(zone_list_lock);
17 __do_evict_nolock(struct lru_zone* zone, struct llist_header* elem)
20 if (!zone->try_evict(container_of(elem, struct lru_node, lru_nodes))) {
21 // if the node is unable to evict, raise it's rank by one, so
22 // others can have chance in the next round
23 struct llist_header* new_tail = zone->lead_node.prev;
24 llist_prepend(new_tail, elem);
29 zone->evict_stats.n_single++;
33 __lru_evict_all_nolock(struct lru_zone* zone)
35 struct llist_header* tail, *curr;
37 tail = zone->lead_node.prev;
38 while (tail != &zone->lead_node)
42 __do_evict_nolock(zone, curr);
47 lru_new_zone(const char* name, evict_cb try_evict_cb)
49 struct lru_zone* zone = vzalloc(sizeof(struct lru_zone));
54 zone->try_evict = try_evict_cb;
56 strncpy(zone->name, name, sizeof(zone->name) - 1);
57 llist_init_head(&zone->lead_node);
58 spinlock_init(&zone->lock);
60 spinlock_acquire(&zone_list_lock);
61 llist_append(&zone_lead, &zone->zones);
62 spinlock_release(&zone_list_lock);
68 lru_free_zone(struct lru_zone* zone)
72 __lru_evict_all_nolock(zone);
74 if (llist_empty(&zone->lead_node))
76 llist_delete(&zone->zones);
83 We are unable to free it at this moment,
84 (probably due to tricky things happened
85 to some cached object). Thus mark it and
86 let the daemon try to free it asynchronously
88 zone->delayed_free = true;
95 lru_use_one(struct lru_zone* zone, struct lru_node* node)
99 assert(!zone->delayed_free);
101 if (node->lru_nodes.next && node->lru_nodes.prev) {
102 llist_delete(&node->lru_nodes);
108 llist_prepend(&zone->lead_node, &node->lru_nodes);
115 lru_evict_one(struct lru_zone* zone)
119 struct llist_header* tail = zone->lead_node.prev;
120 if (tail == &zone->lead_node) {
124 __do_evict_nolock(zone, tail);
131 lru_evict_half(struct lru_zone* zone)
134 struct llist_header *tail, *curr;
138 target = (int)(zone->objects / 2);
139 tail = zone->lead_node.prev;
141 while (tail != &zone->lead_node && target > 0) {
145 __do_evict_nolock(zone, curr);
149 zone->evict_stats.n_half++;
155 lru_evict_all(struct lru_zone* zone)
159 __lru_evict_all_nolock(zone);
161 zone->evict_stats.n_full++;
167 lru_remove(struct lru_zone* zone, struct lru_node* node)
169 if (llist_empty(&node->lru_nodes))
174 llist_delete(&node->lru_nodes);
184 struct lru_zone *pos, *n;
188 spinlock_acquire(&zone_list_lock);
190 // TODO add a watermark check before doing eviction
191 llist_for_each(pos, n, &zone_lead, zones) {
195 spinlock_release(&zone_list_lock);
204 // TODO make sure other are thread-safe first
206 // kthread_spawn((ptr_t)__lru_pool_daemon);
208 owloysius_fetch_init(__lru_pool_init, on_postboot)
212 __twimap_read_lru_pool(struct twimap* map)
214 struct lru_zone* zone;
216 zone = twimap_index(map, struct lru_zone*);
217 twimap_printf(map, "%s, %d, %d, %d, %d, %d, ",
221 zone->evict_stats.n_single,
222 zone->evict_stats.n_half,
223 zone->evict_stats.n_full);
225 if (zone->delayed_free) {
226 twimap_printf(map, "freeing %d attempts\n", zone->attempts);
229 twimap_printf(map, "active\n");
234 __twimap_reset_lru_pool(struct twimap* map)
236 map->index = container_of(&zone_lead, struct lru_zone, zones);
237 twimap_printf(map, "name, n_objs, hot, n_evt, n_half, n_full, status\n");
241 __twimap_gonext_lru_pool(struct twimap* map)
243 struct lru_zone* zone;
244 struct llist_header* next;
246 zone = twimap_index(map, struct lru_zone*);
247 next = zone->zones.next;
248 if (next == &zone_lead) {
252 map->index = container_of(next, struct lru_zone, zones);
257 lru_pool_twimappable()
259 twimap_export_list(NULL, lru_pool, FSACL_aR, NULL);
261 EXPORT_TWIFS_PLUGIN(__lru_twimap, lru_pool_twimappable);