test: regression on the async io feature.
refactor: centeralize process state changing.
#define HBA_FIS_SIZE 256
#define HBA_CLB_SIZE 1024
+#define HBA_MY_IE (HBA_PxINTR_DHR | HBA_PxINTR_TFEE)
+
// #define DO_HBA_FULL_RESET
LOG_MODULE("AHCI")
int
hba_bind_sbuf(struct hba_cmdh* cmdh, struct hba_cmdt* cmdt, struct membuf mbuf)
{
- assert_msg(mbuf.buffer <= 0x400000, "HBA: Buffer too big");
+ assert_msg(mbuf.size <= 0x400000, "HBA: Buffer too big");
cmdh->prdt_len = 1;
cmdt->entries[0] = (struct hba_prdte){ .data_base = vmm_v2p(mbuf.buffer),
.byte_count = mbuf.size - 1 };
hba_bind_vbuf(struct hba_cmdh* cmdh, struct hba_cmdt* cmdt, struct vecbuf* vbuf)
{
size_t i = 0;
- struct vecbuf *pos, *n;
+ struct vecbuf* pos = vbuf;
- llist_for_each(pos, n, &vbuf->components, components)
- {
+ do {
assert_msg(i < HBA_MAX_PRDTE, "HBA: Too many PRDTEs");
- assert_msg(pos->buf.buffer <= 0x400000, "HBA: Buffer too big");
+ assert_msg(pos->buf.size <= 0x400000, "HBA: Buffer too big");
cmdt->entries[i++] =
(struct hba_prdte){ .data_base = vmm_v2p(pos->buf.buffer),
.byte_count = pos->buf.size - 1 };
- }
+ pos = list_entry(pos->components.next, struct vecbuf, components);
+ } while (pos != vbuf);
cmdh->prdt_len = i + 1;
}
struct hba_cmdh* cmd_header;
// mask DHR interrupt
- port->regs[HBA_RPxIE] &= ~HBA_PxINTR_DHR;
+ port->regs[HBA_RPxIE] &= ~HBA_MY_IE;
// 预备DMA接收缓存,用于存放HBA传回的数据
uint16_t* data_in = (uint16_t*)valloc_dma(512);
done:
// reset interrupt status and unmask D2HR interrupt
- port->regs[HBA_RPxIE] |= HBA_PxINTR_DHR;
+ port->regs[HBA_RPxIE] |= HBA_MY_IE;
achi_register_ops(port);
vfree_dma(data_in);
return 1;
fail:
- port->regs[HBA_RPxIE] |= HBA_PxINTR_DHR;
+ port->regs[HBA_RPxIE] |= HBA_MY_IE;
vfree_dma(data_in);
vfree_dma(cmd_table);
} else {
scsi_create_packet12((struct scsi_cdb12*)cdb,
write ? SCSI_WRITE_BLOCKS_12 : SCSI_READ_BLOCKS_12,
- io_req->blk_addr,
+ io_req->blk_addr & -1,
count);
}
#include <lunaix/buffer.h>
#include <lunaix/ds/llist.h>
+#include <lunaix/ds/waitq.h>
#include <lunaix/types.h>
#define BLKIO_WRITE 0x1
#define BLKIO_ERROR 0x2
-// Free on complete
#define BLKIO_BUSY 0x4
#define BLKIO_PENDING 0x8
+
+// Free on complete
#define BLKIO_FOC 0x10
#define BLKIO_SCHED_IDEL 0x1
{
struct llist_header reqs;
struct blkio_context* io_ctx;
- u32_t flags;
struct vecbuf* vbuf;
+ u32_t flags;
+ waitq_t wait;
u64_t blk_addr;
void* evt_args;
blkio_cb completed;
static inline size_t
vbuf_size(struct vecbuf* vbuf)
{
+ if (!vbuf) {
+ return 0;
+ }
+
struct vecbuf* last =
list_entry(vbuf->components.prev, struct vecbuf, components);
return last->acc_sz;
extern volatile struct proc_info* __current;
+static inline void
+block_current()
+{
+ __current->state = PS_BLOCKED;
+}
+
/**
* @brief 分配并初始化一个进程控制块
*
.flags = options,
.evt_args = evt_args };
breq->vbuf = buffer;
+ waitq_init(&breq->wait);
return breq;
}
llist_append(&ctx->queue, &req->reqs);
// if the pipeline is not running (e.g., stalling). Then we should schedule
- // one immediately and kick it start.
+ // one immediately and kick it started.
if (!ctx->busy) {
blkio_schedule(ctx);
}
blkio_complete(struct blkio_req* req)
{
req->flags &= ~(BLKIO_BUSY | BLKIO_PENDING);
+
if (req->completed) {
req->completed(req);
}
+
+ // Wake all blocked processes on completion,
+ // albeit should be no more than one process in everycase (by design)
+ pwake_all(&req->wait);
+
if ((req->flags & BLKIO_FOC)) {
blkio_free_req(req);
}
void
block_init()
{
+ blkio_init();
lbd_pile = cake_new_pile("block_dev", sizeof(struct block_dev), 1, 0);
dev_registry = vcalloc(sizeof(struct block_dev*), MAX_DEV);
free_slot = 0;
size_t bsize = bdev->blk_size, rd_block = offset / bsize,
r = offset % bsize, rd_size = 0;
+ if (!(len = MIN(len, ((size_t)bdev->end_lba - rd_block + 1) * bsize))) {
+ return 0;
+ }
+
struct vecbuf* vbuf = vbuf_alloc(NULL, buf, len);
struct blkio_req* req;
void* tmp_buf = NULL;
req = blkio_vrd(vbuf, rd_block, NULL, NULL, 0);
blkio_commit(bdev->blkio, req);
- wait_if(req->flags & BLKIO_PENDING);
+
+ pwait(&req->wait);
if (!(errno = req->errcode)) {
memcpy(buf, tmp_buf + r, rd_size);
+ errno = len;
+ } else {
+ errno = -errno;
}
if (tmp_buf) {
size_t bsize = bdev->blk_size, rd_block = offset / bsize,
r = offset % bsize;
+ if (!(len = MIN(len, ((size_t)bdev->end_lba - rd_block + 1) * bsize))) {
+ return 0;
+ }
+
struct vecbuf* vbuf = vbuf_alloc(NULL, buf, len);
struct blkio_req* req;
void* tmp_buf = NULL;
req = blkio_vwr(vbuf, rd_block, NULL, NULL, 0);
blkio_commit(bdev->blkio, req);
- wait_if(req->flags & BLKIO_PENDING);
+
+ pwait(&req->wait);
int errno = req->errcode;
+ if (!errno) {
+ errno = len;
+ } else {
+ errno = -errno;
+ }
if (tmp_buf) {
vfree(tmp_buf);
block_alloc_dev(const char* blk_id, void* driver, req_handler ioreq_handler)
{
struct block_dev* bdev = cake_grab(lbd_pile);
- *bdev = (struct block_dev){ .driver = driver };
+ memset(bdev, 0, sizeof(struct block_dev));
strncpy(bdev->name, blk_id, PARTITION_NAME_SIZE);
bdev->blkio = blkio_newctx(ioreq_handler);
+ bdev->driver = driver;
+ bdev->blkio->driver = driver;
return bdev;
}
{
struct vecbuf* vbuf = valloc(sizeof(struct vecbuf));
- *vbuf =
- (struct vecbuf){ .buf = { .buffer = buf, .size = size }, .acc_sz = 0 };
+ *vbuf = (struct vecbuf){ .buf = { .buffer = buf, .size = size },
+ .acc_sz = vbuf_size(vec) + size };
if (vec) {
- vbuf->acc_sz = vbuf_size(vec) + size;
llist_append(&vec->components, &vbuf->components);
} else {
- llist_init_head(&vec->components);
+ llist_init_head(&vbuf->components);
}
return vbuf;
llist_append(&queue->waiters, ¤t_wq->waiters);
- // FIXME centralize the state change.
- __current->state = PS_BLOCKED;
+ block_current();
sched_yieldk();
}
llist_append(&root_proc->sleep.sleepers, &__current->sleep.sleepers);
__current->intr_ctx.registers.eax = seconds;
- __current->state = PS_BLOCKED;
+
+ block_current();
schedule();
}