1 #include <klibc/strfmt.h>
2 #include <klibc/string.h>
4 #include <hal/ahci/hba.h>
8 #include <lunaix/blkpart_gpt.h>
9 #include <lunaix/block.h>
10 #include <lunaix/fs/twifs.h>
11 #include <lunaix/mm/cake.h>
12 #include <lunaix/mm/page.h>
13 #include <lunaix/mm/valloc.h>
14 #include <lunaix/spike.h>
15 #include <lunaix/syslog.h>
26 static struct cake_pile* lbd_pile;
27 static struct block_dev** dev_registry;
28 static struct twifs_node* blk_sysroot;
29 static struct device* blk_parent_dev;
34 __block_mount_partitions(struct hba_device* hd_dev);
37 __block_register(struct block_dev* dev);
43 lbd_pile = cake_new_pile("block_dev", sizeof(struct block_dev), 1, 0);
44 dev_registry = vcalloc(sizeof(struct block_dev*), MAX_DEV);
46 blk_sysroot = twifs_dir_node(NULL, "block");
47 blk_parent_dev = device_addcat(NULL, "block");
51 __block_commit(struct blkio_context* blkio, struct blkio_req* req, int flags)
54 blkio_commit(blkio, req, flags);
56 if ((errno = req->errcode)) {
66 __block_read(struct device* dev, void* buf, size_t offset, size_t len)
69 struct block_dev* bdev = (struct block_dev*)dev->underlay;
70 size_t bsize = bdev->blk_size, rd_block = offset / bsize + bdev->start_lba,
71 r = offset % bsize, rd_size = 0;
73 if (!(len = MIN(len, ((size_t)bdev->end_lba - rd_block + 1) * bsize))) {
77 struct vecbuf* vbuf = NULL;
78 struct blkio_req* req;
79 void *head_buf = NULL, *tail_buf = NULL;
82 if (r || len < bsize) {
83 head_buf = valloc(bsize);
84 rd_size = MIN(len, bsize - r);
85 vbuf_alloc(&vbuf, head_buf, bsize);
89 if ((len - rd_size)) {
90 size_t llen = len - rd_size;
91 assert_msg(!(llen % bsize), "misalign block read");
92 vbuf_alloc(&vbuf, buf + rd_size, llen);
95 req = blkio_vrd(vbuf, rd_block, NULL, NULL, 0);
97 if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
98 memcpy(buf, head_buf + r, rd_size);
110 __block_write(struct device* dev, void* buf, size_t offset, size_t len)
112 struct block_dev* bdev = (struct block_dev*)dev->underlay;
113 size_t bsize = bdev->blk_size, wr_block = offset / bsize + bdev->start_lba,
114 r = offset % bsize, wr_size = 0;
116 if (!(len = MIN(len, ((size_t)bdev->end_lba - wr_block + 1) * bsize))) {
120 struct vecbuf* vbuf = NULL;
121 struct blkio_req* req;
122 void* tmp_buf = NULL;
125 size_t wr_size = MIN(len, bsize - r);
126 tmp_buf = vzalloc(bsize);
127 vbuf_alloc(&vbuf, tmp_buf, bsize);
129 memcpy(tmp_buf + r, buf, wr_size);
132 if ((len - wr_size)) {
133 size_t llen = len - wr_size;
134 assert_msg(!(llen % bsize), "misalign block write");
135 vbuf_alloc(&vbuf, buf + wr_size, llen);
138 req = blkio_vwr(vbuf, wr_block, NULL, NULL, 0);
141 if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
153 __block_read_page(struct device* dev, void* buf, size_t offset)
155 struct vecbuf* vbuf = NULL;
156 struct block_dev* bdev = (struct block_dev*)dev->underlay;
158 u32_t lba = offset / bdev->blk_size + bdev->start_lba;
159 u32_t rd_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
167 vbuf_alloc(&vbuf, buf, rd_lba * bdev->blk_size);
169 struct blkio_req* req = blkio_vrd(vbuf, lba, NULL, NULL, 0);
172 if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
173 errno = rd_lba * bdev->blk_size;
180 __block_write_page(struct device* dev, void* buf, size_t offset)
182 struct vecbuf* vbuf = NULL;
183 struct block_dev* bdev = (struct block_dev*)dev->underlay;
185 u32_t lba = offset / bdev->blk_size + bdev->start_lba;
186 u32_t wr_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
194 vbuf_alloc(&vbuf, buf, wr_lba * bdev->blk_size);
196 struct blkio_req* req = blkio_vwr(vbuf, lba, NULL, NULL, 0);
199 if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
200 errno = wr_lba * bdev->blk_size;
207 __block_rd_lb(struct block_dev* bdev, void* buf, u64_t start, size_t count)
209 struct vecbuf* vbuf = NULL;
210 vbuf_alloc(&vbuf, buf, bdev->blk_size * count);
212 struct blkio_req* req = blkio_vrd(vbuf, start, NULL, NULL, 0);
215 if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
223 __block_wr_lb(struct block_dev* bdev, void* buf, u64_t start, size_t count)
225 struct vecbuf* vbuf = NULL;
226 vbuf_alloc(&vbuf, buf, bdev->blk_size * count);
228 struct blkio_req* req = blkio_vwr(vbuf, start, NULL, NULL, 0);
231 if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
239 block_alloc_buf(struct block_dev* bdev)
241 return valloc(bdev->blk_size);
245 block_free_buf(struct block_dev* bdev, void* buf)
251 block_alloc_dev(const char* blk_id, void* driver, req_handler ioreq_handler)
253 struct block_dev* bdev = cake_grab(lbd_pile);
254 memset(bdev, 0, sizeof(struct block_dev));
255 llist_init_head(&bdev->parts);
256 strncpy(bdev->name, blk_id, PARTITION_NAME_SIZE);
258 bdev->blkio = blkio_newctx(ioreq_handler);
259 bdev->driver = driver;
260 bdev->blkio->driver = driver;
261 bdev->ops = (struct block_dev_ops){ .block_read = __block_rd_lb,
262 .block_write = __block_wr_lb };
268 block_mount(struct block_dev* bdev, devfs_exporter fs_export)
272 if (!__block_register(bdev)) {
277 errno = blkpart_probegpt(bdev->dev);
279 ERROR("Fail to parse partition table (%d)", errno);
281 // TODO try other PT parser...
284 struct twifs_node* dev_root = twifs_dir_node(blk_sysroot, bdev->bdev_id);
285 blk_set_blkmapping(bdev, dev_root);
286 fs_export(bdev, dev_root);
291 ERROR("Fail to mount block device: %s (%x)", bdev->name, -errno);
296 __block_register(struct block_dev* bdev)
298 if (free_slot >= MAX_DEV) {
302 struct device* dev = device_allocvol(blk_parent_dev, bdev);
303 dev->ops.write = __block_write;
304 dev->ops.write_page = __block_write_page;
305 dev->ops.read = __block_read;
306 dev->ops.read_page = __block_read_page;
310 device_register(dev, bdev->class, "sd%c", 'a' + free_slot);
311 dev_registry[free_slot++] = bdev;
313 strcpy(bdev->bdev_id, dev->name_val);
319 blk_mount_part(struct block_dev* bdev,
325 struct block_dev* pbdev = cake_grab(lbd_pile);
326 memcpy(pbdev, bdev, sizeof(*bdev));
328 struct device* dev = device_allocvol(NULL, pbdev);
329 dev->ops.write = __block_write;
330 dev->ops.write_page = __block_write_page;
331 dev->ops.read = __block_read;
332 dev->ops.read_page = __block_read_page;
334 pbdev->start_lba = start_lba;
335 pbdev->end_lba = end_lba;
338 strcpy(pbdev->bdev_id, dev->name_val);
340 strncpy(pbdev->name, name, PARTITION_NAME_SIZE);
343 llist_append(&bdev->parts, &pbdev->parts);
345 device_register(dev, pbdev->class, "%sp%d", bdev->bdev_id, index + 1);