}
req = blkio_vrd(vbuf, rd_block, NULL, NULL, 0);
- blkio_commit(bdev->blkio, req);
-
- pwait(&req->wait);
+ blkio_commit(bdev->blkio, req, BLKIO_WAIT);
if (!(errno = req->errcode)) {
memcpy(buf, head_buf + r, rd_size);
vbuf_alloc(&vbuf, buf + wr_size, llen);
}
- // FIXME race condition between blkio_commit and pwait.
- // Consider: what if scheduler complete the request before process enter
- // wait state?
req = blkio_vwr(vbuf, wr_block, NULL, NULL, 0);
- blkio_commit(bdev->blkio, req);
-
- pwait(&req->wait);
+ blkio_commit(bdev->blkio, req, BLKIO_WAIT);
int errno = req->errcode;
if (!errno) {
return errno;
}
+int
+__block_read_page(struct device* dev, void* buf, size_t offset)
+{
+ struct vecbuf* vbuf = NULL;
+ struct block_dev* bdev = (struct block_dev*)dev->underlay;
+
+ u32_t lba = offset / bdev->blk_size + bdev->start_lba;
+ u32_t rd_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
+
+ if (rd_lba <= lba) {
+ return 0;
+ }
+
+ rd_lba -= lba;
+
+ vbuf_alloc(&vbuf, buf, rd_lba * bdev->blk_size);
+
+ struct blkio_req* req = blkio_vrd(vbuf, lba, NULL, NULL, 0);
+
+ blkio_commit(bdev->blkio, req, BLKIO_WAIT);
+
+ int errno = req->errcode;
+ if (!errno) {
+ errno = rd_lba * bdev->blk_size;
+ } else {
+ errno = -errno;
+ }
+
+ blkio_free_req(req);
+ vbuf_free(vbuf);
+ return errno;
+}
+
+int
+__block_write_page(struct device* dev, void* buf, size_t offset)
+{
+ struct vecbuf* vbuf = NULL;
+ struct block_dev* bdev = (struct block_dev*)dev->underlay;
+
+ u32_t lba = offset / bdev->blk_size + bdev->start_lba;
+ u32_t rd_lba = MIN(lba + PG_SIZE / bdev->blk_size, bdev->end_lba);
+
+ if (rd_lba <= lba) {
+ return 0;
+ }
+
+ rd_lba -= lba;
+
+ vbuf_alloc(&vbuf, buf, rd_lba * bdev->blk_size);
+
+ struct blkio_req* req = blkio_vwr(vbuf, lba, NULL, NULL, 0);
+
+ blkio_commit(bdev->blkio, req, BLKIO_WAIT);
+
+ int errno = req->errcode;
+ if (!errno) {
+ errno = rd_lba * bdev->blk_size;
+ } else {
+ errno = -errno;
+ }
+
+ blkio_free_req(req);
+ vbuf_free(vbuf);
+ return errno;
+}
+
int
__block_rd_lb(struct block_dev* bdev, void* buf, u64_t start, size_t count)
{
vbuf_alloc(&vbuf, buf, bdev->blk_size * count);
struct blkio_req* req = blkio_vrd(vbuf, start, NULL, NULL, 0);
- blkio_commit(bdev->blkio, req);
- pwait(&req->wait);
+ blkio_commit(bdev->blkio, req, BLKIO_WAIT);
int errno = req->errcode;
if (!errno) {
vbuf_alloc(&vbuf, buf, bdev->blk_size * count);
struct blkio_req* req = blkio_vwr(vbuf, start, NULL, NULL, 0);
- blkio_commit(bdev->blkio, req);
- pwait(&req->wait);
+ blkio_commit(bdev->blkio, req, BLKIO_WAIT);
int errno = req->errcode;
if (!errno) {
struct device* dev = device_addvol(NULL, bdev, "sd%c", 'a' + free_slot);
dev->write = __block_write;
+ dev->write_page = __block_write_page;
dev->read = __block_read;
+ dev->read_page = __block_read_page;
bdev->dev = dev;
strcpy(bdev->bdev_id, dev->name_val);
struct device* dev =
device_addvol(NULL, pbdev, "%sp%d", bdev->bdev_id, index + 1);
dev->write = __block_write;
+ dev->write_page = __block_write_page;
dev->read = __block_read;
+ dev->read_page = __block_read_page;
pbdev->start_lba = start_lba;
pbdev->end_lba = end_lba;