LOG_MODULE("AHCI")
-static struct ahci_hba hba;
+struct ahci_hba hba;
-void
+static char sata_ifs[][20] = { "Not detected",
+ "SATA I (1.5Gbps)",
+ "SATA II (3.0Gbps)",
+ "SATA III (6.0Gbps)" };
+
+extern void
+ahci_fsexport(struct block_dev* bdev, void* fs_node);
+
+extern void
__ahci_hba_isr(const isr_param* param);
+extern void
+__ahci_blkio_handler(struct blkio_req* req);
+
int
ahci_init_device(struct hba_port* port);
void
achi_register_ops(struct hba_port* port);
+void
+ahci_register_device(struct hba_device* hbadev);
+
unsigned int
ahci_get_port_usage()
{
continue;
}
+ struct hba_device* hbadev = port->device;
kprintf(KINFO "sata%d: %s, sector_size=%dB, sector=%d\n",
i,
- port->device->model,
- port->device->block_size,
- (uint32_t)port->device->max_lba);
+ hbadev->model,
+ hbadev->block_size,
+ (uint32_t)hbadev->max_lba);
- block_mount_disk(port->device);
+ ahci_register_device(hbadev);
}
}
-char sata_ifs[][20] = { "Not detected",
- "SATA I (1.5Gbps)",
- "SATA II (3.0Gbps)",
- "SATA III (6.0Gbps)" };
-
void
-__ahci_hba_isr(const isr_param* param)
+ahci_register_device(struct hba_device* hbadev)
{
- // TODO: clear the interrupt status
- // TODO: I/O-operation scheduler should be here
- // kprintf(KDEBUG "HBA INTR\n");
+ struct block_dev* bdev =
+ block_alloc_dev(hbadev->model, hbadev, __ahci_blkio_handler);
+
+ bdev->end_lba = hbadev->max_lba;
+ bdev->blk_size = hbadev->block_size;
+
+ block_mount(bdev, ahci_fsexport);
}
void
cmd_fis->count = sector_count;
}
+int
+hba_bind_sbuf(struct hba_cmdh* cmdh, struct hba_cmdt* cmdt, struct membuf mbuf)
+{
+ assert_msg(mbuf.buffer <= 0x400000, "HBA: Buffer too big");
+ cmdh->prdt_len = 1;
+ cmdt->entries[0] = (struct hba_prdte){ .data_base = vmm_v2p(mbuf.buffer),
+ .byte_count = mbuf.size - 1 };
+}
+
+int
+hba_bind_vbuf(struct hba_cmdh* cmdh, struct hba_cmdt* cmdt, struct vecbuf* vbuf)
+{
+ size_t i = 0;
+ struct vecbuf *pos, *n;
+
+ llist_for_each(pos, n, &vbuf->components, components)
+ {
+ assert_msg(i < HBA_MAX_PRDTE, "HBA: Too many PRDTEs");
+ assert_msg(pos->buf.buffer <= 0x400000, "HBA: Buffer too big");
+
+ cmdt->entries[i++] =
+ (struct hba_prdte){ .data_base = vmm_v2p(pos->buf.buffer),
+ .byte_count = pos->buf.size - 1 };
+ }
+
+ cmdh->prdt_len = i + 1;
+}
+
int
hba_prepare_cmd(struct hba_port* port,
struct hba_cmdt** cmdt,
- struct hba_cmdh** cmdh,
- void* buffer,
- unsigned int size)
+ struct hba_cmdh** cmdh)
{
int slot = __get_free_slot(port);
assert_msg(slot >= 0, "HBA: No free slot");
- assert_msg(size <= 0x400000, "HBA: buffer too big");
// 构建命令头(Command Header)和命令表(Command Table)
struct hba_cmdh* cmd_header = &port->cmdlst[slot];
cmd_header->options =
HBA_CMDH_FIS_LEN(sizeof(struct sata_reg_fis)) | HBA_CMDH_CLR_BUSY;
- if (buffer) {
- cmd_header->prdt_len = 1;
- cmd_table->entries[0] =
- (struct hba_prdte){ .data_base = vmm_v2p(buffer),
- .byte_count = size - 1 };
- }
-
*cmdh = cmd_header;
*cmdt = cmd_table;
// 预备DMA接收缓存,用于存放HBA传回的数据
uint16_t* data_in = (uint16_t*)valloc_dma(512);
- int slot = hba_prepare_cmd(port, &cmd_table, &cmd_header, data_in, 512);
+ int slot = hba_prepare_cmd(port, &cmd_table, &cmd_header);
+ hba_bind_sbuf(
+ cmd_header, cmd_table, (struct membuf){ .buffer = data_in, .size = 512 });
port->device = vzalloc(sizeof(struct hba_device));
port->device->port = port;
{
port->device->ops.identify = ahci_identify_device;
if (!(port->device->flags & HBA_DEV_FATAPI)) {
- port->device->ops.read_buffer = sata_read_buffer;
- port->device->ops.write_buffer = sata_write_buffer;
+ port->device->ops.submit = sata_submit;
} else {
- port->device->ops.read_buffer = scsi_read_buffer;
- port->device->ops.write_buffer = scsi_write_buffer;
+ port->device->ops.submit = scsi_submit;
}
}
\ No newline at end of file
port->device->last_result.status = tfd & 0x00ff;
}
-int
-__sata_buffer_io(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size,
- int write)
+void
+sata_submit(struct hba_device* dev, struct blkio_req* io_req)
{
- assert_msg(((uintptr_t)buffer & 0x3) == 0, "HBA: Bad buffer alignment");
-
struct hba_port* port = dev->port;
struct hba_cmdh* header;
struct hba_cmdt* table;
- int slot = hba_prepare_cmd(port, &table, &header, buffer, size);
- header->options |= HBA_CMDH_WRITE * (write == 1);
+ int write = !!(io_req->flags & BLKIO_WRITE);
+ int slot = hba_prepare_cmd(port, &table, &header);
+ hba_bind_vbuf(header, table, io_req->vbuf);
- uint16_t count = ICEIL(size, port->device->block_size);
+ header->options |= HBA_CMDH_WRITE * write;
+
+ uint16_t count = ICEIL(vbuf_size(io_req->vbuf), port->device->block_size);
struct sata_reg_fis* fis = (struct sata_reg_fis*)table->command_fis;
if ((port->device->flags & HBA_DEV_FEXTLBA)) {
// 如果该设备支持48位LBA寻址
- sata_create_fis(
- fis, write ? ATA_WRITE_DMA_EXT : ATA_READ_DMA_EXT, lba, count);
+ sata_create_fis(fis,
+ write ? ATA_WRITE_DMA_EXT : ATA_READ_DMA_EXT,
+ io_req->blk_addr,
+ count);
} else {
- sata_create_fis(fis, write ? ATA_WRITE_DMA : ATA_READ_DMA, lba, count);
+ sata_create_fis(
+ fis, write ? ATA_WRITE_DMA : ATA_READ_DMA, io_req->blk_addr, count);
}
/*
确保我们使用的是LBA寻址模式
*/
fis->dev = (1 << 6);
- int is_ok = ahci_try_send(port, slot);
-
- vfree_dma(table);
- return is_ok;
-}
-
-int
-sata_read_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size)
-{
- return __sata_buffer_io(dev, lba, buffer, size, 0);
-}
-
-int
-sata_write_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size)
-{
- return __sata_buffer_io(dev, lba, buffer, size, 1);
-}
+ // The async way...
+ struct hba_cmd_state* cmds = valloc(sizeof(struct hba_cmd_state));
+ *cmds = (struct hba_cmd_state){ .cmd_table = table, .state_ctx = io_req };
+ ahci_post(port, cmds, slot);
+}
\ No newline at end of file
}
}
-int
-__scsi_buffer_io(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size,
- int write)
+void
+scsi_submit(struct hba_device* dev, struct blkio_req* io_req)
{
- assert_msg(((uintptr_t)buffer & 0x3) == 0, "HBA: Bad buffer alignment");
-
struct hba_port* port = dev->port;
struct hba_cmdh* header;
struct hba_cmdt* table;
- int slot = hba_prepare_cmd(port, &table, &header, buffer, size);
+
+ int write = !!(io_req->flags & BLKIO_WRITE);
+ int slot = hba_prepare_cmd(port, &table, &header);
+ hba_bind_vbuf(header, table, io_req->vbuf);
header->options |= (HBA_CMDH_WRITE * (write == 1)) | HBA_CMDH_ATAPI;
+ size_t size = vbuf_size(io_req->vbuf);
uint32_t count = ICEIL(size, port->device->block_size);
struct sata_reg_fis* fis = (struct sata_reg_fis*)table->command_fis;
if (port->device->cbd_size == SCSI_CDB16) {
scsi_create_packet16((struct scsi_cdb16*)cdb,
write ? SCSI_WRITE_BLOCKS_16 : SCSI_READ_BLOCKS_16,
- lba,
+ io_req->blk_addr,
count);
} else {
scsi_create_packet12((struct scsi_cdb12*)cdb,
write ? SCSI_WRITE_BLOCKS_12 : SCSI_READ_BLOCKS_12,
- lba,
+ io_req->blk_addr,
count);
}
// field: cdb->misc1
*((uint8_t*)cdb + 1) = 3 << 5; // RPROTECT=011b 禁用保护检查
- int is_ok = ahci_try_send(port, slot);
- vfree_dma(table);
-
- return is_ok;
-}
-
-int
-scsi_read_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size)
-{
- return __scsi_buffer_io(dev, lba, buffer, size, 0);
-}
-
-int
-scsi_write_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size)
-{
- return __scsi_buffer_io(dev, lba, buffer, size, 1);
+ // The async way...
+ struct hba_cmd_state* cmds = valloc(sizeof(struct hba_cmd_state));
+ *cmds = (struct hba_cmd_state){ .cmd_table = table, .state_ctx = io_req };
+ ahci_post(port, cmds, slot);
}
\ No newline at end of file
--- /dev/null
+#include <hal/ahci/ahci.h>
+#include <lunaix/block.h>
+#include <lunaix/fs/twifs.h>
+
+void
+__blk_rd_serial(struct twimap* map)
+{
+ struct hba_device* hbadev = twimap_data(map, struct hba_device*);
+ twimap_printf(map, "%s", hbadev->serial_num);
+}
+
+void
+__blk_rd_last_status(struct twimap* map)
+{
+ struct hba_device* hbadev = twimap_data(map, struct hba_device*);
+ twimap_printf(map,
+ "%p\t%p\t%p",
+ hbadev->last_result.status,
+ hbadev->last_result.error,
+ hbadev->last_result.sense_key);
+}
+
+void
+__blk_rd_capabilities(struct twimap* map)
+{
+ struct hba_device* hbadev = twimap_data(map, struct hba_device*);
+ twimap_printf(map, "%p", hbadev->capabilities);
+}
+
+void
+__blk_rd_aoffset(struct twimap* map)
+{
+ struct hba_device* hbadev = twimap_data(map, struct hba_device*);
+ twimap_printf(map, "%d", hbadev->alignment_offset);
+}
+
+void
+__blk_rd_wwid(struct twimap* map)
+{
+ struct hba_device* hbadev = twimap_data(map, struct hba_device*);
+ uint32_t h = hbadev->wwn >> 32;
+ uint32_t l = (uint32_t)hbadev->wwn;
+ if ((h | l)) {
+ twimap_printf(map, "wwn:%x%x", h, l);
+ } else {
+ twimap_printf(map, "0");
+ }
+}
+
+void
+ahci_fsexport(struct block_dev* bdev, void* fs_node)
+{
+ struct twifs_node* dev_root = (struct twifs_node*)fs_node;
+ struct twimap* map;
+
+ map = twifs_mapping(dev_root, bdev->driver, "serial");
+ map->read = __blk_rd_serial;
+
+ map = twifs_mapping(dev_root, bdev->driver, "last_status");
+ map->read = __blk_rd_last_status;
+
+ map = twifs_mapping(dev_root, bdev->driver, "wwid");
+ map->read = __blk_rd_wwid;
+
+ map = twifs_mapping(dev_root, bdev->driver, "capabilities");
+ map->read = __blk_rd_capabilities;
+
+ map = twifs_mapping(dev_root, bdev->driver, "alignment_offset");
+ map->read = __blk_rd_aoffset;
+}
\ No newline at end of file
--- /dev/null
+#include <hal/ahci/hba.h>
+#include <hal/ahci/sata.h>
+#include <lunaix/isrm.h>
+#include <lunaix/mm/valloc.h>
+
+extern struct ahci_hba hba;
+
+void
+__ahci_hba_isr(const isr_param* param)
+{
+ // ignore spurious interrupt
+ if (!hba.ports[HBA_RIS])
+ return;
+
+ u32_t port_num = 31 - __builtin_clz(hba.base[HBA_RIS]);
+ struct hba_port* port = hba.ports[port_num];
+ struct hba_cmd_context* cmdctx = &port->cmdctx;
+ u32_t ci_filtered = port->regs[HBA_RPxCI] ^ cmdctx->tracked_ci;
+
+ if (!ci_filtered) {
+ goto done;
+ }
+
+ u32_t slot = 31 - __builtin_clz(ci_filtered);
+ struct hba_cmd_state* cmdstate = cmdctx->issued[slot];
+
+ if (!cmdstate) {
+ goto done;
+ }
+
+ struct blkio_req* ioreq = (struct blkio_req*)cmdstate->state_ctx;
+ sata_read_error(port);
+ if ((port->device->last_result.status & HBA_PxTFD_ERR)) {
+ ioreq->errcode = port->regs[HBA_RPxTFD] & 0xffff;
+ ioreq->flags |= BLKIO_ERROR;
+ }
+
+ blkio_complete(ioreq);
+ vfree(cmdstate->cmd_table);
+
+done:
+ hba_clear_reg(port->regs[HBA_RPxIS]);
+}
+
+void
+__ahci_blkio_handler(struct blkio_req* req)
+{
+ struct hba_device* hbadev = (struct hba_device*)req->io_ctx->driver;
+
+ hbadev->ops.submit(hbadev, req);
+}
\ No newline at end of file
hba_clear_reg(port->regs[HBA_RPxIS]);
return retries < MAX_RETRY;
+}
+
+void
+ahci_post(struct hba_port* port, struct hba_cmd_state* state, int slot)
+{
+ int bitmask = 1 << slot;
+
+ // 确保端口是空闲的
+ wait_until(!(port->regs[HBA_RPxTFD] & (HBA_PxTFD_BSY | HBA_PxTFD_DRQ)));
+
+ hba_clear_reg(port->regs[HBA_RPxIS]);
+
+ port->cmdctx.issued[slot] = state;
+ port->cmdctx.tracked_ci |= bitmask;
+ port->regs[HBA_RPxCI] |= bitmask;
}
\ No newline at end of file
void
ahci_parsestr(char* str, uint16_t* reg_start, int size_word);
+/**
+ * @brief Issue a HBA command (synchronized)
+ *
+ * @param port
+ * @param slot
+ * @return int
+ */
int
ahci_try_send(struct hba_port* port, int slot);
+/**
+ * @brief Issue a HBA command (asynchronized)
+ *
+ * @param port
+ * @param state
+ * @param slot
+ */
+void
+ahci_post(struct hba_port* port, struct hba_cmd_state* state, int slot);
+
#endif /* __LUNAIX_AHCI_H */
#ifndef __LUNAIX_HBA_H
#define __LUNAIX_HBA_H
+#include <lunaix/blkio.h>
+#include <lunaix/buffer.h>
#include <lunaix/types.h>
#define HBA_RCAP 0
#define HBA_CMDH_CLR_BUSY (1 << 10)
#define HBA_CMDH_PRDT_LEN(entries) (((entries)&0xffff) << 16)
+#define HBA_MAX_PRDTE 4
+
struct hba_cmdh
{
uint16_t options;
uint8_t command_fis[64];
uint8_t atapi_cmd[16];
uint8_t reserved[0x30];
- struct hba_prdte entries[3];
+ struct hba_prdte entries[HBA_MAX_PRDTE];
} __HBA_PACKED__;
#define HBA_DEV_FEXTLBA 1
struct
{
int (*identify)(struct hba_device* dev);
- int (*read_buffer)(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size);
- int (*write_buffer)(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size);
+ void (*submit)(struct hba_device* dev, struct blkio_req* io_req);
} ops;
};
+struct hba_cmd_state
+{
+ struct hba_cmdt* cmd_table;
+ void* state_ctx;
+};
+
+struct hba_cmd_context
+{
+ struct hba_cmd_state* issued[32];
+ u32_t tracked_ci;
+};
+
struct hba_port
{
volatile hba_reg_t* regs;
unsigned int ssts;
struct hba_cmdh* cmdlst;
+ struct hba_cmd_context cmdctx;
void* fis;
struct hba_device* device;
};
int
hba_prepare_cmd(struct hba_port* port,
struct hba_cmdt** cmdt,
- struct hba_cmdh** cmdh,
- void* buffer,
- unsigned int size);
+ struct hba_cmdh** cmdh);
+
+int
+hba_bind_vbuf(struct hba_cmdh* cmdh,
+ struct hba_cmdt* cmdt,
+ struct vecbuf* vbuf);
+
+int
+hba_bind_sbuf(struct hba_cmdh* cmdh, struct hba_cmdt* cmdt, struct membuf mbuf);
#endif /* __LUNAIX_HBA_H */
uint64_t lba,
uint16_t sector_count);
-int
-sata_read_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size);
-
-int
-sata_write_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size);
+void
+sata_submit(struct hba_device* dev, struct blkio_req* io_req);
void
sata_read_error(struct hba_port* port);
uint64_t lba,
uint32_t alloc_size);
-int
-scsi_read_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size);
-
-int
-scsi_write_buffer(struct hba_device* dev,
- uint64_t lba,
- void* buffer,
- uint32_t size);
+void
+scsi_submit(struct hba_device* dev, struct blkio_req* io_req);
void
scsi_parse_capacity(struct hba_device* device, uint32_t* parameter);
--- /dev/null
+#ifndef __LUNAIX_BLKIO_H
+#define __LUNAIX_BLKIO_H
+
+#include <lunaix/buffer.h>
+#include <lunaix/ds/llist.h>
+#include <lunaix/types.h>
+
+#define BLKIO_WRITE 0x1
+#define BLKIO_ERROR 0x2
+
+// Free on complete
+#define BLKIO_BUSY 0x4
+#define BLKIO_PENDING 0x8
+#define BLKIO_FOC 0x10
+
+#define BLKIO_SCHED_IDEL 0x1
+
+struct blkio_req;
+
+typedef void (*blkio_cb)(struct blkio_req*);
+typedef void (*req_handler)(struct blkio_req*);
+
+struct blkio_req
+{
+ struct llist_header reqs;
+ struct blkio_context* io_ctx;
+ u32_t flags;
+ struct vecbuf* vbuf;
+ u64_t blk_addr;
+ void* evt_args;
+ blkio_cb completed;
+ int errcode;
+};
+
+struct blkio_context
+{
+ struct llist_header queue;
+ struct
+ {
+ u32_t seektime;
+ u32_t rotdelay;
+ } metrics;
+ req_handler handle_one;
+ u32_t state;
+ u32_t busy;
+ void* driver;
+};
+
+void
+blkio_init();
+
+/**
+ * @brief Vectorized read request
+ *
+ * @param vbuf
+ * @param start_lba
+ * @param completed
+ * @param evt_args
+ * @param options
+ * @return struct blkio_req*
+ */
+struct blkio_req*
+blkio_vrd(struct vecbuf* vbuf,
+ u64_t start_lba,
+ blkio_cb completed,
+ void* evt_args,
+ u32_t options);
+
+/**
+ * @brief Vectorized write request
+ *
+ * @param vbuf
+ * @param start_lba
+ * @param completed
+ * @param evt_args
+ * @param options
+ * @return struct blkio_req*
+ */
+struct blkio_req*
+blkio_vwr(struct vecbuf* vbuf,
+ u64_t start_lba,
+ blkio_cb completed,
+ void* evt_args,
+ u32_t options);
+
+void
+blkio_free_req(struct blkio_req* req);
+
+/**
+ * @brief Commit an IO request to scheduler.
+ *
+ * @param ctx
+ * @param req
+ */
+void
+blkio_commit(struct blkio_context* ctx, struct blkio_req* req);
+
+/**
+ * @brief Schedule an IO request to be handled.
+ *
+ * @param ctx
+ */
+void
+blkio_schedule(struct blkio_context* ctx);
+
+/**
+ * @brief Notify the scheduler when request is completed, either successful or
+ * failed.
+ *
+ * @param ctx
+ * @param req
+ */
+void
+blkio_complete(struct blkio_req* req);
+
+/**
+ * @brief Create a new block IO scheduling context
+ *
+ * @param handler Handler to handle request
+ * @return struct blkio_context*
+ */
+struct blkio_context*
+blkio_newctx(req_handler handler);
+
+#endif /* __LUNAIX_BLKIO_H */
#define __LUNAIX_BLOCK_H
#include <hal/ahci/hba.h>
+#include <lunaix/blkio.h>
#include <lunaix/device.h>
#define LPT_SIG 0x414e554c
#define PARTITION_NAME_SIZE 48
#define DEV_ID_SIZE 32
-typedef uint64_t partition_t;
-typedef uint32_t bdev_t;
-
struct block_dev
{
char bdev_id[DEV_ID_SIZE];
char name[PARTITION_NAME_SIZE];
- struct hba_device* hd_dev;
+ struct blkio_context* blkio;
struct device* dev;
- uint64_t base_lba;
- uint64_t end_lba;
+ void* driver;
+ u64_t end_lba;
+ u32_t blk_size;
};
struct lpt_entry
{
char part_name[PARTITION_NAME_SIZE];
- uint64_t base_lba;
- uint64_t end_lba;
+ u64_t base_lba;
+ u64_t end_lba;
} __attribute__((packed));
// Lunaix Partition Table
struct lpt_header
{
- uint32_t signature;
- uint32_t crc;
- uint32_t pt_start_lba;
- uint32_t pt_end_lba;
- uint32_t table_len;
+ u32_t signature;
+ u32_t crc;
+ u32_t pt_start_lba;
+ u32_t pt_end_lba;
+ u32_t table_len;
} __attribute__((packed));
+typedef u64_t partition_t;
+typedef uint32_t bdev_t;
+typedef void (*devfs_exporter)(struct block_dev* bdev, void* fsnode);
+
void
block_init();
+struct block_dev*
+block_alloc_dev(const char* blk_id, void* driver, req_handler ioreq_handler);
+
int
-block_mount_disk(struct hba_device* hd_dev);
+block_mount(struct block_dev* bdev, devfs_exporter export);
void
blk_mapping_init();
void
-blk_set_blkmapping(struct block_dev* bdev);
+blk_set_blkmapping(struct block_dev* bdev, void* fsnode);
#endif /* __LUNAIX_BLOCK_H */
--- /dev/null
+#ifndef __LUNAIX_BUFFER_H
+#define __LUNAIX_BUFFER_H
+
+#include <lunaix/ds/llist.h>
+#include <lunaix/types.h>
+
+struct membuf
+{
+ void* buffer;
+ size_t size;
+};
+
+struct vecbuf
+{
+ struct llist_header components;
+ struct membuf buf;
+ size_t acc_sz;
+};
+
+/**
+ * @brief Free a vectorized buffer
+ *
+ * @param vbuf
+ */
+void
+vbuf_free(struct vecbuf* vbuf);
+
+/**
+ * @brief Allocate a buffer, or append to `vec` (if not NULL) as a component
+ * thus to form a vectorized buffer.
+ *
+ * @param vec the buffer used to construct a vectorized buffer.
+ * @param buf a memeory region that holds data or partial data if vectorized
+ * @param len maximum number of bytes should recieved.
+ * @return struct vecbuf*
+ */
+struct vecbuf*
+vbuf_alloc(struct vecbuf* vec, void* buf, size_t len);
+
+static inline size_t
+vbuf_size(struct vecbuf* vbuf)
+{
+ struct vecbuf* last =
+ list_entry(vbuf->components.prev, struct vecbuf, components);
+ return last->acc_sz;
+}
+
+#endif /* __LUNAIX_BUFFER_H */
#define __LUNAIX_CODVAR_H
#include <lunaix/ds/llist.h>
+#include <lunaix/sched.h>
typedef struct waitq
{
void
pwake_all(waitq_t* queue);
+#define wait_if(cond) \
+ while ((cond)) { \
+ sched_yieldk(); \
+ }
+
#endif /* __LUNAIX_CODVAR_H */
typedef unsigned int u32_t;
typedef unsigned long long u64_t;
typedef unsigned long ptr_t;
+typedef signed long ssize_t;
typedef int32_t pid_t;
typedef int64_t lba_t;
__blk_rd_size(struct twimap* map)
{
struct block_dev* bdev = twimap_data(map, struct block_dev*);
- size_t secsize = bdev->hd_dev->block_size;
- twimap_printf(map, "%u", (bdev->end_lba - bdev->base_lba) * secsize);
+ size_t secsize = bdev->blk_size;
+ twimap_printf(map, "%u", bdev->end_lba * secsize);
}
void
-__blk_rd_secsize(struct twimap* map)
+__blk_rd_lblksz(struct twimap* map)
{
struct block_dev* bdev = twimap_data(map, struct block_dev*);
- size_t secsize = bdev->hd_dev->block_size;
- twimap_printf(map, "%u", secsize);
+ size_t lblksz = bdev->blk_size;
+ twimap_printf(map, "%u", lblksz);
}
void
-__blk_rd_range(struct twimap* map)
+__blk_rd_name(struct twimap* map)
{
struct block_dev* bdev = twimap_data(map, struct block_dev*);
- twimap_printf(
- map, "%u,%u", (uint32_t)bdev->base_lba, (uint32_t)bdev->end_lba);
+ twimap_printf(map, "%s", bdev->name);
}
void
-__blk_rd_model(struct twimap* map)
+blk_set_blkmapping(struct block_dev* bdev, void* fsnode)
{
- struct block_dev* bdev = twimap_data(map, struct block_dev*);
- twimap_printf(map, "%s", bdev->hd_dev->model);
-}
-
-void
-__blk_rd_serial(struct twimap* map)
-{
- struct block_dev* bdev = twimap_data(map, struct block_dev*);
- twimap_printf(map, "%s", bdev->hd_dev->serial_num);
-}
-
-void
-__blk_rd_status(struct twimap* map)
-{
- struct block_dev* bdev = twimap_data(map, struct block_dev*);
- twimap_printf(map, "%p", bdev->hd_dev->last_result.status);
-}
-
-void
-__blk_rd_error(struct twimap* map)
-{
- struct block_dev* bdev = twimap_data(map, struct block_dev*);
- twimap_printf(map, "%p", bdev->hd_dev->last_result.error);
-}
-
-void
-__blk_rd_sense_key(struct twimap* map)
-{
- struct block_dev* bdev = twimap_data(map, struct block_dev*);
- twimap_printf(map, "%p", bdev->hd_dev->last_result.sense_key);
-}
-
-void
-__blk_rd_wwid(struct twimap* map)
-{
- struct block_dev* bdev = twimap_data(map, struct block_dev*);
- uint32_t h = bdev->hd_dev->wwn >> 32;
- uint32_t l = (uint32_t)bdev->hd_dev->wwn;
- twimap_printf(map, "%x%x", h, l);
-}
-
-void
-blk_set_blkmapping(struct block_dev* bdev)
-{
- struct twifs_node* dev_root = twifs_dir_node(blk_root, bdev->bdev_id);
+ struct twifs_node* dev_root = (struct twifs_node*)fsnode;
struct twimap* map = twifs_mapping(dev_root, bdev, "size");
map->read = __blk_rd_size;
- map = twifs_mapping(dev_root, bdev, "secsize");
- map->read = __blk_rd_secsize;
-
- map = twifs_mapping(dev_root, bdev, "range");
- map->read = __blk_rd_range;
-
- map = twifs_mapping(dev_root, bdev, "model");
- map->read = __blk_rd_model;
-
- map = twifs_mapping(dev_root, bdev, "serial");
- map->read = __blk_rd_serial;
-
- map = twifs_mapping(dev_root, bdev, "status");
- map->read = __blk_rd_status;
-
- map = twifs_mapping(dev_root, bdev, "error");
- map->read = __blk_rd_error;
-
- map = twifs_mapping(dev_root, bdev, "sense-key");
- map->read = __blk_rd_sense_key;
+ map = twifs_mapping(dev_root, bdev, "lblk_size");
+ map->read = __blk_rd_lblksz;
- map = twifs_mapping(dev_root, bdev, "wwid");
- map->read = __blk_rd_wwid;
+ map = twifs_mapping(dev_root, bdev, "name");
+ map->read = __blk_rd_name;
}
\ No newline at end of file
--- /dev/null
+#include <lunaix/blkio.h>
+#include <lunaix/mm/cake.h>
+#include <lunaix/mm/valloc.h>
+
+static struct cake_pile* blkio_reqpile;
+
+void
+blkio_init()
+{
+ blkio_reqpile = cake_new_pile("blkio_req", sizeof(struct blkio_req), 1, 0);
+}
+
+static inline struct blkio_req*
+__blkio_req_create(struct vecbuf* buffer,
+ u64_t start_lba,
+ blkio_cb completed,
+ void* evt_args,
+ u32_t options)
+{
+ options = options & ~0xf;
+ struct blkio_req* breq = (struct blkio_req*)cake_grab(blkio_reqpile);
+ *breq = (struct blkio_req){ .blk_addr = start_lba,
+ .completed = completed,
+ .flags = options,
+ .evt_args = evt_args };
+ breq->vbuf = buffer;
+ return breq;
+}
+
+struct blkio_req*
+blkio_vrd(struct vecbuf* buffer,
+ u64_t start_lba,
+ blkio_cb completed,
+ void* evt_args,
+ u32_t options)
+{
+ return __blkio_req_create(buffer, start_lba, completed, evt_args, options);
+}
+
+struct blkio_req*
+blkio_vwr(struct vecbuf* buffer,
+ u64_t start_lba,
+ blkio_cb completed,
+ void* evt_args,
+ u32_t options)
+{
+ struct blkio_req* breq =
+ __blkio_req_create(buffer, start_lba, completed, evt_args, options);
+ breq->flags |= BLKIO_WRITE;
+ return breq;
+}
+
+void
+blkio_free_req(struct blkio_req* req)
+{
+ cake_release(blkio_reqpile, (void*)req);
+}
+
+struct blkio_context*
+blkio_newctx(req_handler handler)
+{
+ struct blkio_context* ctx =
+ (struct blkio_context*)vzalloc(sizeof(struct blkio_context));
+ ctx->handle_one = handler;
+
+ llist_init_head(&ctx->queue);
+
+ return ctx;
+}
+
+void
+blkio_commit(struct blkio_context* ctx, struct blkio_req* req)
+{
+ req->flags |= BLKIO_PENDING;
+ req->io_ctx = ctx;
+ llist_append(&ctx->queue, &req->reqs);
+
+ // if the pipeline is not running (e.g., stalling). Then we should schedule
+ // one immediately and kick it start.
+ if (!ctx->busy) {
+ blkio_schedule(ctx);
+ }
+}
+
+void
+blkio_schedule(struct blkio_context* ctx)
+{
+ if (llist_empty(&ctx->queue)) {
+ return;
+ }
+
+ struct blkio_req* head = (struct blkio_req*)ctx->queue.next;
+ llist_delete(&head->reqs);
+
+ head->flags |= BLKIO_BUSY;
+ head->io_ctx->busy++;
+
+ ctx->handle_one(head);
+}
+
+void
+blkio_complete(struct blkio_req* req)
+{
+ req->flags &= ~(BLKIO_BUSY | BLKIO_PENDING);
+ if (req->completed) {
+ req->completed(req);
+ }
+ if ((req->flags & BLKIO_FOC)) {
+ blkio_free_req(req);
+ }
+
+ req->io_ctx->busy--;
+
+ blkio_schedule(req->io_ctx);
+}
\ No newline at end of file
#define MAX_DEV 32
-struct cake_pile* lbd_pile;
-struct block_dev** dev_registry;
+static struct cake_pile* lbd_pile;
+static struct block_dev** dev_registry;
+static struct twifs_node* blk_sysroot;
int free_slot = 0;
lbd_pile = cake_new_pile("block_dev", sizeof(struct block_dev), 1, 0);
dev_registry = vcalloc(sizeof(struct block_dev*), MAX_DEV);
free_slot = 0;
-
- blk_mapping_init();
+ blk_sysroot = twifs_dir_node(NULL, "block");
}
int
{
int errno;
struct block_dev* bdev = (struct block_dev*)dev->underlay;
- size_t acc_size = 0, rd_size = 0, bsize = bdev->hd_dev->block_size,
- rd_block = offset / bsize, r = offset % bsize,
- max_blk = (size_t)bdev->hd_dev->max_lba;
- void* block = vzalloc(bsize);
-
- while (acc_size < len && rd_block < max_blk) {
- if (!bdev->hd_dev->ops.read_buffer(
- bdev->hd_dev, rd_block, block, bsize)) {
- errno = EIO;
- goto error;
- }
- rd_size = MIN(len - acc_size, bsize - r);
- memcpy(buf + acc_size, block + r, rd_size);
- acc_size += rd_size;
- r = 0;
- rd_block++;
+ size_t bsize = bdev->blk_size, rd_block = offset / bsize,
+ r = offset % bsize, rd_size = 0;
+
+ struct vecbuf* vbuf = vbuf_alloc(NULL, buf, len);
+ struct blkio_req* req;
+ void* tmp_buf = NULL;
+
+ if (r) {
+ tmp_buf = vzalloc(bsize);
+ rd_size = MIN(len, bsize - r);
+ vbuf->buf.size = rd_size;
+ vbuf->buf.buffer = tmp_buf;
+
+ vbuf_alloc(vbuf, buf + rd_size, len - rd_size);
}
- vfree(block);
- return acc_size;
+ req = blkio_vrd(vbuf, rd_block, NULL, NULL, 0);
+ blkio_commit(bdev->blkio, req);
+ wait_if(req->flags & BLKIO_PENDING);
-error:
- vfree(block);
+ if (!(errno = req->errcode)) {
+ memcpy(buf, tmp_buf + r, rd_size);
+ }
+
+ if (tmp_buf) {
+ vfree(tmp_buf);
+ }
+
+ blkio_free_req(req);
+ vbuf_free(vbuf);
return errno;
}
int
__block_write(struct device* dev, void* buf, size_t offset, size_t len)
{
- int errno;
struct block_dev* bdev = (struct block_dev*)dev->underlay;
- size_t acc_size = 0, wr_size = 0, bsize = bdev->hd_dev->block_size,
- wr_block = offset / bsize, r = offset % bsize;
- void* block = vzalloc(bsize);
-
- while (acc_size < len) {
- wr_size = MIN(len - acc_size, bsize - r);
- memcpy(block + r, buf + acc_size, wr_size);
- if (!bdev->hd_dev->ops.write_buffer(
- bdev->hd_dev, wr_block, block, bsize)) {
- errno = EIO;
- break;
- }
- acc_size += wr_size;
- r = 0;
- wr_block++;
+ size_t bsize = bdev->blk_size, rd_block = offset / bsize,
+ r = offset % bsize;
+
+ struct vecbuf* vbuf = vbuf_alloc(NULL, buf, len);
+ struct blkio_req* req;
+ void* tmp_buf = NULL;
+
+ if (r) {
+ size_t rd_size = MIN(len, bsize - r);
+ tmp_buf = vzalloc(bsize);
+ vbuf->buf.size = bsize;
+ vbuf->buf.buffer = tmp_buf;
+
+ memcpy(tmp_buf + r, buf, rd_size);
+ vbuf_alloc(vbuf, buf + rd_size, len - rd_size);
}
- vfree(block);
- return wr_block;
+ req = blkio_vwr(vbuf, rd_block, NULL, NULL, 0);
+ blkio_commit(bdev->blkio, req);
+ wait_if(req->flags & BLKIO_PENDING);
-error:
- vfree(block);
+ int errno = req->errcode;
+
+ if (tmp_buf) {
+ vfree(tmp_buf);
+ }
+
+ blkio_free_req(req);
+ vbuf_free(vbuf);
return errno;
}
+struct block_dev*
+block_alloc_dev(const char* blk_id, void* driver, req_handler ioreq_handler)
+{
+ struct block_dev* bdev = cake_grab(lbd_pile);
+ *bdev = (struct block_dev){ .driver = driver };
+
+ strncpy(bdev->name, blk_id, PARTITION_NAME_SIZE);
+
+ bdev->blkio = blkio_newctx(ioreq_handler);
+
+ return bdev;
+}
+
int
-block_mount_disk(struct hba_device* hd_dev)
+block_mount(struct block_dev* bdev, devfs_exporter fs_export)
{
int errno = 0;
- struct block_dev* bdev = cake_grab(lbd_pile);
- strncpy(bdev->name, hd_dev->model, PARTITION_NAME_SIZE);
- bdev->hd_dev = hd_dev;
- bdev->base_lba = 0;
- bdev->end_lba = hd_dev->max_lba;
+
if (!__block_register(bdev)) {
errno = BLOCK_EFULL;
goto error;
}
- blk_set_blkmapping(bdev);
+ struct twifs_node* dev_root = twifs_dir_node(blk_sysroot, bdev->bdev_id);
+ blk_set_blkmapping(bdev, dev_root);
+ fs_export(bdev, dev_root);
+
return errno;
error:
- kprintf(KERROR "Fail to mount hd: %s[%s] (%x)\n",
- hd_dev->model,
- hd_dev->serial_num,
- -errno);
+ kprintf(KERROR "Fail to mount block device: %s (%x)\n", bdev->name, -errno);
return errno;
}
--- /dev/null
+#include <lunaix/buffer.h>
+#include <lunaix/mm/valloc.h>
+
+struct vecbuf*
+vbuf_alloc(struct vecbuf* vec, void* buf, size_t size)
+{
+ struct vecbuf* vbuf = valloc(sizeof(struct vecbuf));
+
+ *vbuf =
+ (struct vecbuf){ .buf = { .buffer = buf, .size = size }, .acc_sz = 0 };
+
+ if (vec) {
+ vbuf->acc_sz = vbuf_size(vec) + size;
+ llist_append(&vec->components, &vbuf->components);
+ } else {
+ llist_init_head(&vec->components);
+ }
+
+ return vbuf;
+}
+
+void
+vbuf_free(struct vecbuf* vbuf)
+{
+ struct vecbuf *pos, *n;
+ llist_for_each(pos, n, &vbuf->components, components)
+ {
+ vfree(pos);
+ }
+ vfree(pos);
+}
\ No newline at end of file
#include <lunaix/ds/waitq.h>
#include <lunaix/process.h>
-#include <lunaix/sched.h>
#include <lunaix/spike.h>
void
void*
memcpy(void* dest, const void* src, size_t num)
{
+ if (!num)
+ return dest;
asm volatile("movl %1, %%edi\n"
"rep movsb\n" ::"S"(src),
"r"(dest),