1 #include <lunaix/blkbuf.h>
2 #include <lunaix/mm/cake.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/owloysius.h>
5 #include <lunaix/syslog.h>
6 #include <asm/muldiv64.h>
10 #define bb_cache_obj(bcache) \
11 container_of(bcache, struct blkbuf_cache, cached)
13 #define to_blkbuf(bbuf) ((struct blk_buf*)(bbuf))
15 static bcache_zone_t bb_zone;
16 static struct cake_pile* bb_pile;
19 __tolba(struct blkbuf_cache* cache, unsigned int blk_id)
21 return udiv64(((u64_t)cache->blksize * (u64_t)blk_id),
22 cache->blkdev->blk_size);
26 __blkbuf_do_sync(struct bcache* bc, unsigned long tag, void* data)
32 __blkbuf_sync_callback(struct blkio_req* req)
36 buf = (struct blk_buf*)req->evt_args;
39 ERROR("sync failed: io error, 0x%x", req->errcode);
45 __blkbuf_evict_callback(struct blkio_req* req)
49 buf = (struct blk_buf*)req->evt_args;
52 ERROR("sync on evict failed (io error, 0x%x)", req->errcode);
57 cake_release(bb_pile, buf);
61 __blkbuf_do_try_release(struct bcache* bc, void* data)
63 struct blkio_req* req;
66 buf = (struct blk_buf*)data;
69 if (llist_empty(&buf->dirty)) {
70 __blkbuf_evict_callback(req);
75 // since we are evicting, don't care if the sync is failed
76 llist_delete(&buf->dirty);
78 blkio_when_completed(req, __blkbuf_evict_callback);
83 static struct bcache_ops cache_ops = {
84 .release_on_evict = __blkbuf_do_try_release,
85 .sync_cached = __blkbuf_do_sync
89 __blkbuf_take_slow_lockness(struct blkbuf_cache* bc, unsigned int block_id)
92 struct blkio_req* req;
97 data = valloc(bc->blksize);
100 vbuf_alloc(&vbuf, data, bc->blksize);
102 lba = __tolba(bc, block_id);
103 buf = (struct blk_buf*)cake_grab(bb_pile);
104 req = blkio_vreq(vbuf, lba, __blkbuf_sync_callback, buf, 0);
106 // give dirty a know state
107 llist_init_head(&buf->dirty);
110 blkio_bindctx(req, bc->blkdev->blkio);
111 blkio_commit(req, BLKIO_WAIT);
114 ERROR("block io error (0x%x)", req->errcode);
115 cake_release(bb_pile, buf);
116 return (bbuf_t)INVL_BUFFER;
120 buf->cobj = bcache_put_and_ref(&bc->cached, block_id, buf);
127 blkbuf_create(struct block_dev* blkdev, unsigned int blk_size)
129 struct blkbuf_cache* bb_cache;
131 assert(is_pot(blk_size));
133 bb_cache = valloc(sizeof(*bb_cache));
134 bb_cache->blkdev = blkdev;
136 bcache_init_zone(&bb_cache->cached, bb_zone, 3, -1, blk_size, &cache_ops);
137 llist_init_head(&bb_cache->dirty);
138 mutex_init(&bb_cache->lock);
144 blkbuf_take(struct blkbuf_cache* bc, unsigned int block_id)
147 mutex_lock(&bc->lock);
148 if (bcache_tryget(&bc->cached, block_id, &cobj)) {
149 mutex_unlock(&bc->lock);
150 return (bbuf_t)bcached_data(cobj);
153 bbuf_t buf = __blkbuf_take_slow_lockness(bc, block_id);
155 mutex_unlock(&bc->lock);
160 blkbuf_put(bbuf_t buf)
162 if (unlikely(!buf || blkbuf_errbuf(buf))) {
166 struct blk_buf* bbuf;
167 bbuf = to_blkbuf(buf);
169 bcache_return(bbuf->cobj);
173 blkbuf_dirty(bbuf_t buf)
175 assert(buf && !blkbuf_errbuf(buf));
177 struct blk_buf* bbuf;
178 struct blkbuf_cache* bc;
180 bbuf = ((struct blk_buf*)buf);
181 bc = bcache_holder_embed(bbuf->cobj, struct blkbuf_cache, cached);
183 mutex_lock(&bc->lock);
185 if (llist_empty(&bbuf->dirty)) {
186 llist_append(&bc->dirty, &bbuf->dirty);
189 mutex_unlock(&bc->lock);
193 __schedule_sync_event(struct blk_buf* bbuf, bool wait)
195 struct blkio_req* blkio;
199 blkio_setwrite(blkio);
200 blkio_commit(blkio, wait ? BLKIO_WAIT : BLKIO_NOWAIT);
202 llist_delete(&bbuf->dirty);
206 blkbuf_schedule_sync(bbuf_t buf)
208 struct blk_buf* bbuf;
209 bbuf = to_blkbuf(buf);
211 __schedule_sync_event(bbuf, false);
215 blkbuf_syncall(struct blkbuf_cache* bc, bool async)
217 struct blk_buf *pos, *n;
219 mutex_lock(&bc->lock);
221 llist_for_each(pos, n, &bc->dirty, dirty) {
222 __schedule_sync_event(pos, !async);
225 mutex_unlock(&bc->lock);
231 return llist_empty(&bc->dirty);
235 blkbuf_release(struct blkbuf_cache* bc)
237 bcache_free(&bc->cached);
244 bb_zone = bcache_create_zone("blk_buf");
245 bb_pile = cake_new_pile("blk_buf", sizeof(struct blk_buf), 1, 0);
247 owloysius_fetch_init(__init_blkbuf, on_sysconf)