#include <lunaix/syslog.h>
#include <asm/muldiv64.h>
-LOG_MODULE("blkbuf")
+LOG_MODULE("blkbuf")
+
+#define MAX_DIRTY_COUNT 16
#define bb_cache_obj(bcache) \
container_of(bcache, struct blkbuf_cache, cached)
cache->blkdev->blk_size);
}
+static inline void
+__blkbuf_lock(struct blk_buf* buf)
+{
+ mutex_lock(&buf->lock);
+}
+
+static inline void
+__blkbuf_unlock(struct blk_buf* buf)
+{
+ mutex_unlock(&buf->lock);
+}
+
static void
__blkbuf_do_sync(struct bcache* bc, unsigned long tag, void* data)
{
// give dirty a know state
llist_init_head(&buf->dirty);
+ mutex_init(&buf->lock);
blkio_setread(req);
blkio_bindctx(req, bc->blkdev->blkio);
bcache_return(bbuf->cobj);
}
+
+static void
+__schedule_sync_nolock(struct blk_buf* bbuf, bool wait)
+{
+ struct blkio_req* blkio;
+
+ blkio = bbuf->breq;
+
+ blkio_setwrite(blkio);
+ blkio_commit(blkio, wait ? BLKIO_WAIT : BLKIO_NOWAIT);
+
+ llist_delete(&bbuf->dirty);
+ bbuf->dirty_count = 0;
+}
+
void
blkbuf_dirty(bbuf_t buf)
{
bbuf = ((struct blk_buf*)buf);
bc = bcache_holder_embed(bbuf->cobj, struct blkbuf_cache, cached);
- mutex_lock(&bc->lock);
+ __blkbuf_lock(bbuf);
- if (llist_empty(&bbuf->dirty)) {
+ if (llist_empty(&bbuf->dirty))
+ {
+ mutex_lock(&bc->lock);
llist_append(&bc->dirty, &bbuf->dirty);
+ mutex_unlock(&bc->lock);
}
- mutex_unlock(&bc->lock);
-}
-
-static inline void
-__schedule_sync_event(struct blk_buf* bbuf, bool wait)
-{
- struct blkio_req* blkio;
-
- blkio = bbuf->breq;
-
- blkio_setwrite(blkio);
- blkio_commit(blkio, wait ? BLKIO_WAIT : BLKIO_NOWAIT);
+ bbuf->dirty_count++;
+ if (bbuf->dirty_count >= MAX_DIRTY_COUNT) {
+ __schedule_sync_nolock(bbuf, false);
+ }
- llist_delete(&bbuf->dirty);
+ __blkbuf_unlock(bbuf);
}
void
struct blk_buf* bbuf;
bbuf = to_blkbuf(buf);
- __schedule_sync_event(bbuf, false);
+ __blkbuf_lock(bbuf);
+ __schedule_sync_nolock(bbuf, false);
+ __blkbuf_unlock(bbuf);
}
bool
mutex_lock(&bc->lock);
- llist_for_each(pos, n, &bc->dirty, dirty) {
- __schedule_sync_event(pos, !async);
+ llist_for_each(pos, n, &bc->dirty, dirty)
+ {
+ __blkbuf_lock(pos);
+ __schedule_sync_nolock(pos, !async);
+ __blkbuf_unlock(pos);
}
mutex_unlock(&bc->lock);