+ return errno;
+}
+
+int
+__block_read_page(struct device* dev, void* buf, size_t offset)
+{
+ struct vecbuf* vbuf = NULL;
+ struct block_dev* bdev = (struct block_dev*)dev->underlay;
+
+ u32_t lba = offset / bdev->blk_size + bdev->start_lba;
+ u32_t rd_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
+
+ if (rd_lba <= lba) {
+ return 0;
+ }
+
+ rd_lba -= lba;
+
+ vbuf_alloc(&vbuf, buf, rd_lba * bdev->blk_size);
+
+ struct blkio_req* req = blkio_vrd(vbuf, lba, NULL, NULL, 0);
+
+ int errno;
+ if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
+ errno = rd_lba * bdev->blk_size;
+ }
+
+ return errno;
+}
+
+int
+__block_write_page(struct device* dev, void* buf, size_t offset)
+{
+ struct vecbuf* vbuf = NULL;
+ struct block_dev* bdev = (struct block_dev*)dev->underlay;
+
+ u32_t lba = offset / bdev->blk_size + bdev->start_lba;
+ u32_t wr_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
+
+ if (wr_lba <= lba) {
+ return 0;
+ }
+
+ wr_lba -= lba;
+
+ vbuf_alloc(&vbuf, buf, wr_lba * bdev->blk_size);
+
+ struct blkio_req* req = blkio_vwr(vbuf, lba, NULL, NULL, 0);
+
+ int errno;
+ if (!(errno = __block_commit(bdev->blkio, req, BLKIO_WAIT))) {
+ errno = wr_lba * bdev->blk_size;
+ }
+