1 #include <hal/ahci/hba.h>
2 #include <klibc/stdio.h>
3 #include <klibc/string.h>
5 #include <lunaix/block.h>
6 #include <lunaix/fs/twifs.h>
7 #include <lunaix/mm/cake.h>
8 #include <lunaix/mm/valloc.h>
9 #include <lunaix/syslog.h>
11 #include <lunaix/blkpart_gpt.h>
13 #include <lunaix/spike.h>
24 static struct cake_pile* lbd_pile;
25 static struct block_dev** dev_registry;
26 static struct twifs_node* blk_sysroot;
31 __block_mount_partitions(struct hba_device* hd_dev);
34 __block_register(struct block_dev* dev);
40 lbd_pile = cake_new_pile("block_dev", sizeof(struct block_dev), 1, 0);
41 dev_registry = vcalloc(sizeof(struct block_dev*), MAX_DEV);
43 blk_sysroot = twifs_dir_node(NULL, "block");
47 __block_read(struct device* dev, void* buf, size_t offset, size_t len)
50 struct block_dev* bdev = (struct block_dev*)dev->underlay;
51 size_t bsize = bdev->blk_size, rd_block = offset / bsize + bdev->start_lba,
52 r = offset % bsize, rd_size = 0;
54 if (!(len = MIN(len, ((size_t)bdev->end_lba - rd_block + 1) * bsize))) {
58 struct vecbuf* vbuf = NULL;
59 struct blkio_req* req;
60 void *head_buf = NULL, *tail_buf = NULL;
63 if (r || len < bsize) {
64 head_buf = valloc(bsize);
65 rd_size = MIN(len, bsize - r);
66 vbuf_alloc(&vbuf, head_buf, bsize);
70 if ((len - rd_size)) {
71 size_t llen = len - rd_size;
72 assert_msg(!(llen % bsize), "misalign block read");
73 vbuf_alloc(&vbuf, buf + rd_size, llen);
76 req = blkio_vrd(vbuf, rd_block, NULL, NULL, 0);
77 blkio_commit(bdev->blkio, req, BLKIO_WAIT);
79 if (!(errno = req->errcode)) {
80 memcpy(buf, head_buf + r, rd_size);
96 __block_write(struct device* dev, void* buf, size_t offset, size_t len)
98 struct block_dev* bdev = (struct block_dev*)dev->underlay;
99 size_t bsize = bdev->blk_size, wr_block = offset / bsize + bdev->start_lba,
100 r = offset % bsize, wr_size = 0;
102 if (!(len = MIN(len, ((size_t)bdev->end_lba - wr_block + 1) * bsize))) {
106 struct vecbuf* vbuf = NULL;
107 struct blkio_req* req;
108 void* tmp_buf = NULL;
111 size_t wr_size = MIN(len, bsize - r);
112 tmp_buf = vzalloc(bsize);
113 vbuf_alloc(&vbuf, tmp_buf, bsize);
115 memcpy(tmp_buf + r, buf, wr_size);
118 if ((len - wr_size)) {
119 size_t llen = len - wr_size;
120 assert_msg(!(llen % bsize), "misalign block write");
121 vbuf_alloc(&vbuf, buf + wr_size, llen);
124 req = blkio_vwr(vbuf, wr_block, NULL, NULL, 0);
125 blkio_commit(bdev->blkio, req, BLKIO_WAIT);
127 int errno = req->errcode;
144 __block_read_page(struct device* dev, void* buf, size_t offset)
146 struct vecbuf* vbuf = NULL;
147 struct block_dev* bdev = (struct block_dev*)dev->underlay;
149 u32_t lba = offset / bdev->blk_size + bdev->start_lba;
150 u32_t rd_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
158 vbuf_alloc(&vbuf, buf, rd_lba * bdev->blk_size);
160 struct blkio_req* req = blkio_vrd(vbuf, lba, NULL, NULL, 0);
162 blkio_commit(bdev->blkio, req, BLKIO_WAIT);
164 int errno = req->errcode;
166 errno = rd_lba * bdev->blk_size;
177 __block_write_page(struct device* dev, void* buf, size_t offset)
179 struct vecbuf* vbuf = NULL;
180 struct block_dev* bdev = (struct block_dev*)dev->underlay;
182 u32_t lba = offset / bdev->blk_size + bdev->start_lba;
183 u32_t rd_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
191 vbuf_alloc(&vbuf, buf, rd_lba * bdev->blk_size);
193 struct blkio_req* req = blkio_vwr(vbuf, lba, NULL, NULL, 0);
195 blkio_commit(bdev->blkio, req, BLKIO_WAIT);
197 int errno = req->errcode;
199 errno = rd_lba * bdev->blk_size;
210 __block_rd_lb(struct block_dev* bdev, void* buf, u64_t start, size_t count)
212 struct vecbuf* vbuf = NULL;
213 vbuf_alloc(&vbuf, buf, bdev->blk_size * count);
215 struct blkio_req* req = blkio_vrd(vbuf, start, NULL, NULL, 0);
216 blkio_commit(bdev->blkio, req, BLKIO_WAIT);
218 int errno = req->errcode;
232 __block_wr_lb(struct block_dev* bdev, void* buf, u64_t start, size_t count)
234 struct vecbuf* vbuf = NULL;
235 vbuf_alloc(&vbuf, buf, bdev->blk_size * count);
237 struct blkio_req* req = blkio_vwr(vbuf, start, NULL, NULL, 0);
238 blkio_commit(bdev->blkio, req, BLKIO_WAIT);
240 int errno = req->errcode;
254 block_alloc_buf(struct block_dev* bdev)
256 return valloc(bdev->blk_size);
260 block_free_buf(struct block_dev* bdev, void* buf)
266 block_alloc_dev(const char* blk_id, void* driver, req_handler ioreq_handler)
268 struct block_dev* bdev = cake_grab(lbd_pile);
269 memset(bdev, 0, sizeof(struct block_dev));
270 llist_init_head(&bdev->parts);
271 strncpy(bdev->name, blk_id, PARTITION_NAME_SIZE);
273 bdev->blkio = blkio_newctx(ioreq_handler);
274 bdev->driver = driver;
275 bdev->blkio->driver = driver;
276 bdev->ops = (struct block_dev_ops){ .block_read = __block_rd_lb,
277 .block_write = __block_wr_lb };
283 block_mount(struct block_dev* bdev, devfs_exporter fs_export)
287 if (!__block_register(bdev)) {
292 errno = blkpart_probegpt(bdev->dev);
294 kprintf(KERROR "Fail to parse partition table (%d)\n", errno);
296 // TODO try other PT parser...
299 struct twifs_node* dev_root = twifs_dir_node(blk_sysroot, bdev->bdev_id);
300 blk_set_blkmapping(bdev, dev_root);
301 fs_export(bdev, dev_root);
306 kprintf(KERROR "Fail to mount block device: %s (%x)\n", bdev->name, -errno);
311 __block_register(struct block_dev* bdev)
313 if (free_slot >= MAX_DEV) {
317 struct device* dev = device_addvol(NULL, bdev, "sd%c", 'a' + free_slot);
318 dev->write = __block_write;
319 dev->write_page = __block_write_page;
320 dev->read = __block_read;
321 dev->read_page = __block_read_page;
324 strcpy(bdev->bdev_id, dev->name_val);
325 dev_registry[free_slot++] = bdev;
330 blk_mount_part(struct block_dev* bdev,
336 struct block_dev* pbdev = cake_grab(lbd_pile);
337 memcpy(pbdev, bdev, sizeof(*bdev));
340 device_addvol(NULL, pbdev, "%sp%d", bdev->bdev_id, index + 1);
341 dev->write = __block_write;
342 dev->write_page = __block_write_page;
343 dev->read = __block_read;
344 dev->read_page = __block_read_page;
346 pbdev->start_lba = start_lba;
347 pbdev->end_lba = end_lba;
350 strcpy(pbdev->bdev_id, dev->name_val);
352 strncpy(pbdev->name, name, PARTITION_NAME_SIZE);
355 llist_append(&bdev->parts, &pbdev->parts);