1 #include <lunaix/blkio.h>
2 #include <lunaix/syslog.h>
3 #include <lunaix/mm/cake.h>
4 #include <lunaix/mm/valloc.h>
8 static struct cake_pile* blkio_reqpile;
15 blkio_reqpile = cake_new_pile("blkio_req", sizeof(struct blkio_req), 1, 0);
18 static inline struct blkio_req*
19 __blkio_req_create(struct vecbuf* buffer,
25 options = options & ~0xf;
26 struct blkio_req* breq = (struct blkio_req*)cake_grab(blkio_reqpile);
27 *breq = (struct blkio_req){ .blk_addr = start_lba,
28 .completed = completed,
30 .evt_args = evt_args };
32 waitq_init(&breq->wait);
37 blkio_vrd(struct vecbuf* buffer,
43 return __blkio_req_create(buffer, start_lba, completed, evt_args, options);
47 blkio_vwr(struct vecbuf* buffer,
53 struct blkio_req* breq =
54 __blkio_req_create(buffer, start_lba, completed, evt_args, options);
55 breq->flags |= BLKIO_WRITE;
60 blkio_free_req(struct blkio_req* req)
62 cake_release(blkio_reqpile, (void*)req);
66 blkio_newctx(req_handler handler)
68 struct blkio_context* ctx =
69 (struct blkio_context*)vzalloc(sizeof(struct blkio_context));
70 ctx->handle_one = handler;
72 llist_init_head(&ctx->queue);
73 mutex_init(&ctx->lock);
79 blkio_commit(struct blkio_req* req, int options)
81 struct blkio_context* ctx;
83 if (blkio_is_pending(req)) {
84 // prevent double submition
90 req->flags |= BLKIO_PENDING;
92 if ((options & BLKIO_WAIT)) {
93 req->flags |= BLKIO_SHOULD_WAIT;
94 prepare_to_wait(&req->wait);
97 req->flags &= ~BLKIO_SHOULD_WAIT;
104 llist_append(&ctx->queue, &req->reqs);
107 // if the pipeline is not running (e.g., stalling). Then we should schedule
108 // one immediately and kick it started.
109 // NOTE: Possible race condition between blkio_commit and pwait.
110 // Consider: what if scheduler complete the request before pwait even get
112 // Two possible work around:
113 // #1. we disable the interrupt before schedule the request.
114 // #2. we do scheduling within interrupt context (e.g., attach a timer)
115 // As we don't want to overwhelming the interrupt context and also keep the
116 // request RTT as small as possible, hence #1 is preferred.
120 Potential racing here.
121 happened when blkio is committed at high volumn, while the
122 block device has very little latency.
123 This is particular serious for non-async blkio, it could
124 completed before we do pwait, causing the thread hanged indefinitely
127 if (blkio_stalled(ctx)) {
128 if ((options & BLKIO_WAIT)) {
130 try_wait_check_stall();
134 } else if ((options & BLKIO_WAIT)) {
135 try_wait_check_stall();
140 blkio_schedule(struct blkio_context* ctx)
142 // stall the pipeline if ctx is locked by others.
143 // we must not try to hold the lock in this case, as
144 // blkio_schedule will be in irq context most of the
145 // time, we can't afford the waiting there.
146 if (mutex_on_hold(&ctx->lock)) {
150 // will always successed when in irq context
153 if (llist_empty(&ctx->queue)) {
158 struct blkio_req* head = (struct blkio_req*)ctx->queue.next;
159 llist_delete(&head->reqs);
161 head->flags |= BLKIO_BUSY;
166 ctx->handle_one(head);
170 blkio_complete(struct blkio_req* req)
172 struct blkio_context* ctx;
175 req->flags &= ~(BLKIO_BUSY | BLKIO_PENDING);
177 // Wake all blocked processes on completion,
178 // albeit should be no more than one process in everycase (by design)
179 if ((req->flags & BLKIO_SHOULD_WAIT)) {
180 assert(!waitq_empty(&req->wait));
181 pwake_all(&req->wait);
185 WARN("request completed with error. (errno=0x%x, ctx=%p)",
186 req->errcode, (ptr_t)ctx);
189 if (req->completed) {
193 if ((req->flags & BLKIO_FOC)) {