1 #include <lunaix/blkbuf.h>
2 #include <lunaix/mm/cake.h>
3 #include <lunaix/mm/valloc.h>
4 #include <lunaix/owloysius.h>
5 #include <lunaix/syslog.h>
9 #define bb_cache_obj(bcache) \
10 container_of(bcache, struct blkbuf_cache, cached)
12 #define to_blkbuf(bbuf) ((struct blk_buf*)(bbuf))
14 static bcache_zone_t bb_zone;
15 static struct cake_pile* bb_pile;
18 __tolba(struct blkbuf_cache* cache, unsigned int blk_id)
20 return ((u64_t)cache->blksize * (u64_t)blk_id) / cache->blkdev->blk_size;
24 __blkbuf_do_sync(struct bcache* bc, unsigned long tag, void* data)
30 __blkbuf_sync_callback(struct blkio_req* req)
34 buf = (struct blk_buf*)req->evt_args;
37 ERROR("sync failed: io error, 0x%x", req->errcode);
43 __blkbuf_evict_callback(struct blkio_req* req)
47 buf = (struct blk_buf*)req->evt_args;
50 ERROR("sync on evict failed (io error, 0x%x)", req->errcode);
55 cake_release(bb_pile, buf);
59 __blkbuf_do_try_release(struct bcache* bc, void* data)
61 struct blkio_req* req;
64 buf = (struct blk_buf*)data;
67 if (llist_empty(&buf->dirty)) {
68 __blkbuf_evict_callback(req);
73 // since we are evicting, don't care if the sync is failed
74 llist_delete(&buf->dirty);
76 blkio_when_completed(req, __blkbuf_evict_callback);
81 static struct bcache_ops cache_ops = {
82 .release_on_evict = __blkbuf_do_try_release,
83 .sync_cached = __blkbuf_do_sync
87 __blkbuf_take_slow_lockness(struct blkbuf_cache* bc, unsigned int block_id)
90 struct blkio_req* req;
95 data = valloc(bc->blksize);
98 vbuf_alloc(&vbuf, data, bc->blksize);
100 lba = __tolba(bc, block_id);
101 buf = (struct blk_buf*)cake_grab(bb_pile);
102 req = blkio_vreq(vbuf, lba, __blkbuf_sync_callback, buf, 0);
104 // give dirty a know state
105 llist_init_head(&buf->dirty);
108 blkio_bindctx(req, bc->blkdev->blkio);
109 blkio_commit(req, BLKIO_WAIT);
112 ERROR("block io error (0x%x)", req->errcode);
113 cake_release(bb_pile, buf);
114 return (bbuf_t)INVL_BUFFER;
118 buf->cobj = bcache_put_and_ref(&bc->cached, block_id, buf);
125 blkbuf_create(struct block_dev* blkdev, unsigned int blk_size)
127 struct blkbuf_cache* bb_cache;
129 assert(is_pot(blk_size));
131 bb_cache = valloc(sizeof(*bb_cache));
132 bb_cache->blkdev = blkdev;
134 bcache_init_zone(&bb_cache->cached, bb_zone, 3, -1, blk_size, &cache_ops);
135 llist_init_head(&bb_cache->dirty);
136 mutex_init(&bb_cache->lock);
142 blkbuf_take(struct blkbuf_cache* bc, unsigned int block_id)
145 mutex_lock(&bc->lock);
146 if (bcache_tryget(&bc->cached, block_id, &cobj)) {
147 mutex_unlock(&bc->lock);
148 return (bbuf_t)bcached_data(cobj);
151 bbuf_t buf = __blkbuf_take_slow_lockness(bc, block_id);
153 mutex_unlock(&bc->lock);
158 blkbuf_put(bbuf_t buf)
160 if (unlikely(!buf || blkbuf_errbuf(buf))) {
164 struct blk_buf* bbuf;
165 bbuf = to_blkbuf(buf);
167 bcache_return(bbuf->cobj);
171 blkbuf_dirty(bbuf_t buf)
173 assert(buf && !blkbuf_errbuf(buf));
175 struct blk_buf* bbuf;
176 struct blkbuf_cache* bc;
178 bbuf = ((struct blk_buf*)buf);
179 bc = bcache_holder_embed(bbuf->cobj, struct blkbuf_cache, cached);
181 mutex_lock(&bc->lock);
183 if (llist_empty(&bbuf->dirty)) {
184 llist_append(&bc->dirty, &bbuf->dirty);
187 mutex_unlock(&bc->lock);
191 __schedule_sync_event(struct blk_buf* bbuf, bool wait)
193 struct blkio_req* blkio;
197 blkio_setwrite(blkio);
198 blkio_commit(blkio, wait ? BLKIO_WAIT : BLKIO_NOWAIT);
200 llist_delete(&bbuf->dirty);
204 blkbuf_schedule_sync(bbuf_t buf)
206 struct blk_buf* bbuf;
207 bbuf = to_blkbuf(buf);
209 __schedule_sync_event(bbuf, false);
213 blkbuf_syncall(struct blkbuf_cache* bc, bool async)
215 struct blk_buf *pos, *n;
217 mutex_lock(&bc->lock);
219 llist_for_each(pos, n, &bc->dirty, dirty) {
220 __schedule_sync_event(pos, !async);
223 mutex_unlock(&bc->lock);
229 return llist_empty(&bc->dirty);
233 blkbuf_release(struct blkbuf_cache* bc)
235 bcache_free(&bc->cached);
242 bb_zone = bcache_create_zone("blk_buf");
243 bb_pile = cake_new_pile("blk_buf", sizeof(struct blk_buf), 1, 0);
245 owloysius_fetch_init(__init_blkbuf, on_earlyboot)