1 #include <lunaix/fs/api.h>
2 #include <lunaix/mm/valloc.h>
4 #include <klibc/string.h>
8 static struct v_inode_ops ext2_inode_ops = {
9 .dir_lookup = ext2dr_lookup,
10 .open = ext2_open_inode,
13 .read_symlink = ext2_get_symlink,
14 .set_symlink = ext2_set_symlink,
15 .rename = ext2_rename,
17 .unlink = ext2_unlink,
18 .create = ext2_create,
19 .sync = ext2_sync_inode
22 static struct v_file_ops ext2_file_ops = {
23 .close = ext2_close_inode,
25 .read = ext2_inode_read,
26 .read_page = ext2_inode_read_page,
28 .write = ext2_inode_write,
29 .write_page = ext2_inode_write_page,
31 .readdir = ext2dr_read,
32 .seek = ext2_seek_inode,
33 .sync = ext2_file_sync
36 #define to_tag(e_ino, val) \
37 (((val) >> (e_ino)->inds_lgents) | (1 << msbiti))
38 #define valid_tag(tag) ((tag) & (1 << msbiti))
41 __btlb_insert(struct ext2_inode* e_inode, unsigned int blkid, bbuf_t buf)
43 struct ext2_btlb* btlb;
44 struct ext2_btlb_entry* btlbe = NULL;
47 if (unlikely(!blkid)) {
53 for (int i = 0; i < BTLB_SETS; i++)
55 if (valid_tag(btlb->buffer[i].tag)) {
59 btlbe = &btlb->buffer[i];
64 we have triggered the capacity miss.
65 since most file operation is heavily linear and strong
66 locality, we place our bet on it and avoid go through
67 the whole overhead of LRU eviction stuff. Just a trival
68 random eviction will do the fine job
70 cap_sel = hash_32(blkid, ilog2(BTLB_SETS));
71 btlbe = &btlb->buffer[cap_sel];
73 fsblock_put(btlbe->block);
76 btlbe->tag = to_tag(e_inode, blkid);
77 btlbe->block = fsblock_take(buf);
81 __btlb_hit(struct ext2_inode* e_inode, unsigned int blkid)
83 struct ext2_btlb* btlb;
84 struct ext2_btlb_entry* btlbe = NULL;
85 unsigned int in_tag, ref_cnts;
88 in_tag = to_tag(e_inode, blkid);
90 for (int i = 0; i < BTLB_SETS; i++)
92 btlbe = &btlb->buffer[i];
94 if (btlbe->tag != in_tag) {
98 ref_cnts = blkbuf_refcounts(btlbe->block);
101 btlbe->block = bbuf_null;
105 return fsblock_take(btlbe->block);
112 __btlb_flushall(struct ext2_inode* e_inode)
114 struct ext2_btlb* btlb;
115 struct ext2_btlb_entry* btlbe = NULL;
117 btlb = e_inode->btlb;
119 for (int i = 0; i < BTLB_SETS; i++)
121 btlbe = &btlb->buffer[i];
122 if (!valid_tag(btlbe->tag)) {
127 fsblock_put(btlbe->block);
132 * Obtain the number of indirect blocks that contains
133 * pointers to next level blocks.
135 * Let N be the number of ids that a data block can hold,
136 * then the total number of data blocks assigned (reserved)
139 * i_blocks = 12 + (N + 1) + (N^2 + N + 1) + (N^3 + N^2 + N + 1)
142 __get_nr_indblks(struct ext2_sbinfo* sb, size_t fsblks)
146 int nr_inds, n, acc_nr;
148 blks = (ssize_t)fsblks;
149 nr_ents = sb->block_size / sizeof(int);
157 if (blks > 0) // order-1 indirection
159 n = MIN(ICEIL(blks, nr_ents), acc_nr);
166 if (blks > 0) // order-2 indirection
168 n = MIN(ICEIL(blks, nr_ents), acc_nr);
175 if (blks > 0) // order-3 indirection
177 n = MAX(ICEIL(blks, nr_ents), acc_nr);
180 nr_inds += n + ICEIL(n, nr_ents) + 1;
183 assert_fs(blks <= 0);
189 ext2db_itbegin(struct ext2_iterator* iter, struct v_inode* inode, int mode)
191 struct ext2_inode* e_ino;
193 e_ino = EXT2_INO(inode);
194 *iter = (struct ext2_iterator){
197 .blksz = inode->sb->blksize,
200 if (mode == DBIT_MODE_ISIZE)
201 iter->end_pos = ICEIL(e_ino->isize, inode->sb->blksize);
203 iter->end_pos = e_ino->nr_fsblks - e_ino->nr_indblks;
207 ext2db_itreset(struct ext2_iterator* iter)
209 if (likely(iter->sel_buf)) {
210 fsblock_put(iter->sel_buf);
211 iter->sel_buf = NULL;
218 ext2db_itffw(struct ext2_iterator* iter, int count)
225 ext2db_itend(struct ext2_iterator* iter)
227 if (likely(iter->sel_buf)) {
228 fsblock_put(iter->sel_buf);
229 iter->sel_buf = NULL;
234 ext2db_itnext(struct ext2_iterator* iter)
238 if (unlikely(iter->has_error)) {
242 if (unlikely(iter->pos > iter->end_pos)) {
246 if (likely(iter->sel_buf)) {
247 fsblock_put(iter->sel_buf);
250 buf = ext2db_get(iter->inode, iter->pos++);
253 if (!buf || !ext2_itcheckbuf(iter)) {
257 iter->data = blkbuf_data(buf);
263 ext2ino_init(struct v_superblock* vsb, struct v_inode* inode)
265 // Placeholder, to make vsb happy
269 __destruct_ext2_inode(struct ext2_inode* e_inode)
271 __btlb_flushall(e_inode);
273 fsblock_put(e_inode->ind_ord1);
274 fsblock_put(e_inode->buf);
276 ext2gd_put(e_inode->blk_grp);
278 vfree_safe(e_inode->symlink);
279 vfree(e_inode->btlb);
284 ext2_destruct_inode(struct v_inode* inode)
286 struct ext2_inode* e_inode;
288 e_inode = EXT2_INO(inode);
291 __destruct_ext2_inode(e_inode);
295 __ext2ino_fill_common(struct v_inode* inode, ino_t ino_id)
297 fsapi_inode_setid(inode, ino_id, ino_id);
298 fsapi_inode_setfops(inode, &ext2_file_ops);
299 fsapi_inode_setops(inode, &ext2_inode_ops);
300 fsapi_inode_setdector(inode, ext2_destruct_inode);
305 __translate_vfs_itype(unsigned int v_itype)
307 unsigned int e_itype = IMODE_IFREG;
309 if (v_itype == VFS_IFFILE) {
310 e_itype = IMODE_IFREG;
312 else if (check_itype(v_itype, VFS_IFDIR)) {
313 e_itype = IMODE_IFDIR;
314 e_itype |= IMODE_UEX;
316 else if (check_itype(v_itype, VFS_IFSEQDEV)) {
317 e_itype = IMODE_IFCHR;
319 else if (check_itype(v_itype, VFS_IFVOLDEV)) {
320 e_itype = IMODE_IFBLK;
323 if (check_itype(v_itype, VFS_IFSYMLINK)) {
324 e_itype |= IMODE_IFLNK;
327 // FIXME we keep this until we have our own user manager
328 e_itype |= (IMODE_URD | IMODE_GRD | IMODE_ORD);
333 ext2ino_fill(struct v_inode* inode, ino_t ino_id)
335 struct ext2_sbinfo* sb;
336 struct ext2_inode* e_ino;
337 struct v_superblock* vsb;
338 struct ext2b_inode* b_ino;
339 unsigned int type = VFS_IFFILE;
345 if ((errno = ext2ino_get(vsb, ino_id, &e_ino))) {
350 ino_id = e_ino->ino_id;
352 fsapi_inode_setsize(inode, e_ino->isize);
354 fsapi_inode_settime(inode, b_ino->i_ctime,
358 fsapi_inode_setaccess(inode, b_ino->i_mode & IMODE_ACL_MASK);
359 fsapi_inode_setowner(inode, b_ino->i_uid,
362 __ext2ino_fill_common(inode, ino_id);
364 if (check_itype(b_ino->i_mode, IMODE_IFLNK)) {
365 type = VFS_IFSYMLINK;
367 else if (check_itype(b_ino->i_mode, IMODE_IFDIR)) {
370 else if (check_itype(b_ino->i_mode, IMODE_IFCHR)) {
373 else if (check_itype(b_ino->i_mode, IMODE_IFBLK)) {
377 fsapi_inode_settype(inode, type);
379 fsapi_inode_complete(inode, e_ino);
385 __get_group_desc(struct v_superblock* vsb, int ino,
386 struct ext2_gdesc** gd_out)
388 unsigned int blkgrp_id;
389 struct ext2_sbinfo* sb;
393 blkgrp_id = to_fsblock_id(ino) / sb->raw->s_ino_per_grp;
394 return ext2gd_take_at(vsb, blkgrp_id, gd_out);
397 static struct ext2b_inode*
398 __get_raw_inode(struct v_superblock* vsb, struct ext2_gdesc* gd,
399 bbuf_t* buf_out, int ino_index)
402 struct ext2_sbinfo* sb;
403 struct ext2b_inode* b_inode;
404 unsigned int ino_tab_sel, ino_tab_off, tab_partlen;
409 tab_partlen = sb->block_size / sb->raw->s_ino_size;
410 ino_tab_sel = ino_index / tab_partlen;
411 ino_tab_off = ino_index % tab_partlen;
413 ino_tab = fsblock_get(vsb, gd->info->bg_ino_tab + ino_tab_sel);
414 if (blkbuf_errbuf(ino_tab)) {
418 b_inode = (struct ext2b_inode*)blkbuf_data(ino_tab);
419 b_inode = &b_inode[ino_tab_off];
426 static struct ext2_inode*
427 __create_inode(struct v_superblock* vsb, struct ext2_gdesc* gd, int ino_index)
430 struct ext2_sbinfo* sb;
431 struct ext2b_inode* b_inode;
432 struct ext2_inode* inode;
433 unsigned int ind_ents;
437 b_inode = __get_raw_inode(vsb, gd, &ino_tab, ino_index);
442 inode = vzalloc(sizeof(*inode));
443 inode->btlb = vzalloc(sizeof(struct ext2_btlb));
444 inode->buf = ino_tab;
445 inode->ino = b_inode;
446 inode->blk_grp = ext2gd_take(gd);
447 inode->isize = b_inode->i_size;
449 if (ext2_feature(vsb, FEAT_LARGE_FILE)) {
450 inode->isize |= (size_t)((u64_t)(b_inode->i_size_h32) << 32);
453 if (b_inode->i_blocks) {
454 nr_linked = (size_t)b_inode->i_blocks;
455 nr_linked /= (sb->block_size / 512);
457 inode->nr_fsblks = nr_linked;
458 inode->nr_indblks = __get_nr_indblks(sb, nr_linked);
461 ind_ents = sb->block_size / sizeof(int);
462 assert(is_pot(ind_ents));
464 inode->inds_lgents = ilog2(ind_ents);
465 inode->ino_id = gd->ino_base + to_ext2ino_id(ino_index);
467 ext2_debug("ino(%d): isize=%lu, nr_blk=%lu, nr_inds=%lu",
468 inode->ino_id, inode->isize, inode->nr_fsblks, inode->nr_indblks);
473 ext2ino_get_fast(struct v_superblock* vsb,
474 unsigned int ino, struct ext2_fast_inode* fast_ino)
478 struct ext2_gdesc* gd;
479 struct ext2_sbinfo* sb;
480 struct ext2b_inode* b_inode;
481 unsigned int ino_rel_id;
484 errno = __get_group_desc(vsb, ino, &gd);
489 ino_rel_id = to_fsblock_id(ino) % sb->raw->s_ino_per_grp;
490 b_inode = __get_raw_inode(vsb, gd, &ino_tab, ino_rel_id);
492 fast_ino->buf = ino_tab;
493 fast_ino->ino = b_inode;
499 ext2ino_get(struct v_superblock* vsb,
500 unsigned int ino, struct ext2_inode** out)
502 struct ext2_sbinfo* sb;
503 struct ext2_inode* inode;
504 struct ext2_gdesc* gd;
505 struct ext2b_inode* b_inode;
506 unsigned int ino_rel_id;
507 unsigned int tab_partlen;
508 unsigned int ind_ents, prima_ind;
513 if ((errno = __get_group_desc(vsb, ino, &gd))) {
517 ino_rel_id = to_fsblock_id(ino) % sb->raw->s_ino_per_grp;
518 inode = __create_inode(vsb, gd, ino_rel_id);
523 b_inode = inode->ino;
524 prima_ind = b_inode->i_block.ind1;
531 inode->ind_ord1 = fsblock_get(vsb, prima_ind);
532 if (blkbuf_errbuf(inode->ind_ord1)) {
543 ext2ino_alloc(struct v_superblock* vsb,
544 struct ext2_inode* hint, struct ext2_inode** out)
547 struct ext2_gdesc* gd;
548 struct ext2_inode* inode;
550 free_ino_idx = ALLOC_FAIL;
553 free_ino_idx = ext2gd_alloc_inode(gd);
556 // locality hinted alloc failed, try entire fs
557 if (!valid_bmp_slot(free_ino_idx)) {
558 free_ino_idx = ext2ino_alloc_slot(vsb, &gd);
561 if (!valid_bmp_slot(free_ino_idx)) {
565 inode = __create_inode(vsb, gd, free_ino_idx);
568 ext2gd_free_inode(gd, free_ino_idx);
572 memset(inode->ino, 0, sizeof(*inode->ino));
573 fsblock_dirty(inode->buf);
580 __free_block_at(struct v_superblock *vsb, unsigned int block_pos)
583 struct ext2_gdesc* gd;
584 struct ext2_sbinfo * sb;
590 block_pos = ext2_datablock(vsb, block_pos);
593 gd_index = block_pos / sb->raw->s_blk_per_grp;
595 if ((errno = ext2gd_take_at(vsb, gd_index, &gd))) {
599 assert(block_pos >= gd->base);
600 ext2gd_free_block(gd, block_pos - gd->base);
607 __free_recurisve_from(struct v_superblock *vsb, struct ext2_inode* inode,
608 struct walk_stack* stack, int depth)
614 int ind_entries = 1 << inode->inds_lgents;
615 int max_len[] = { 15, ind_entries, ind_entries, ind_entries };
617 u32_t* tables = stack->tables;
618 u32_t* indices = stack->indices;
620 if (depth > MAX_INDS_DEPTH || !tables[depth]) {
624 idx = indices[depth];
625 len = max_len[depth];
626 tab = fsblock_get(vsb, ext2_datablock(vsb, tables[depth]));
628 if (blkbuf_errbuf(tab)) {
632 db_tab = blkbuf_data(tab);
634 int offset = offsetof(struct ext2b_inode, i_block_arr);
635 db_tab = offset(db_tab, offset);
641 for (; idx < len; idx++)
643 u32_t db_id = db_tab[idx];
649 if (depth >= MAX_INDS_DEPTH) {
653 tables[depth] = db_id;
654 errno = __free_recurisve_from(vsb, inode, stack, depth + 1);
660 __free_block_at(vsb, db_id);
670 ext2ino_free(struct v_inode* inode)
673 unsigned int ino_slot;
674 struct ext2_inode* e_ino;
675 struct ext2_gdesc* e_gd;
676 struct ext2b_inode* b_ino;
677 struct ext2_sbinfo* sb;
679 sb = EXT2_SB(inode->sb);
680 e_ino = EXT2_INO(inode);
682 e_gd = e_ino->blk_grp;
684 assert_fs(b_ino->i_lnk_cnt > 0);
685 fsblock_dirty(e_ino->buf);
688 if (b_ino->i_lnk_cnt >= 1) {
692 ext2ino_resizing(inode, 0);
694 ino_slot = e_ino->ino_id;
695 ino_slot = to_fsblock_id(ino_slot - e_gd->base);
696 ext2gd_free_inode(e_ino->blk_grp, ino_slot);
698 __destruct_ext2_inode(e_ino);
706 __update_inode_access_metadata(struct ext2b_inode* b_ino,
707 struct v_inode* inode)
709 b_ino->i_ctime = inode->ctime;
710 b_ino->i_atime = inode->atime;
711 b_ino->i_mtime = inode->mtime;
715 __update_inode_size(struct v_inode* inode, size_t size)
717 struct ext2b_inode* b_ino;
718 struct ext2_inode* e_ino;
719 struct ext2_sbinfo* sb;
721 sb = EXT2_SB(inode->sb);
722 e_ino = EXT2_INO(inode);
727 if (ext2_feature(inode->sb, FEAT_LARGE_FILE)) {
728 b_ino->i_size_l32 = (unsigned int)size;
729 b_ino->i_size_h32 = (unsigned int)((u64_t)size >> 32);
732 b_ino->i_size = size;
735 b_ino->i_blocks = e_ino->nr_fsblks * (sb->block_size / 512);
736 fsblock_dirty(e_ino->buf);
740 ext2ino_make(struct v_superblock* vsb, unsigned int itype,
741 struct ext2_inode* hint, struct v_inode** out)
744 struct ext2_inode* e_ino;
745 struct ext2b_inode* b_ino;
746 struct v_inode* inode;
748 errno = ext2ino_alloc(vsb, hint, &e_ino);
754 inode = vfs_i_alloc(vsb);
756 __ext2ino_fill_common(inode, e_ino->ino_id);
758 __update_inode_access_metadata(b_ino, inode);
759 b_ino->i_mode = __translate_vfs_itype(itype);
761 fsapi_inode_settype(inode, itype);
762 fsapi_inode_complete(inode, e_ino);
769 ext2_create(struct v_inode* this, struct v_dnode* dnode, unsigned int itype)
772 struct v_inode* created;
774 errno = ext2ino_make(this->sb, itype, EXT2_INO(this), &created);
779 return ext2_link(created, dnode);
783 ext2_link(struct v_inode* this, struct v_dnode* new_name)
786 struct v_inode* parent;
787 struct ext2_inode* e_ino;
788 struct ext2_dnode* e_dno;
789 struct ext2b_dirent dirent;
791 e_ino = EXT2_INO(this);
792 parent = fsapi_dnode_parent(new_name);
794 ext2dr_setup_dirent(&dirent, this, &new_name->name);
795 ext2ino_linkto(e_ino, &dirent);
797 errno = ext2dr_insert(parent, &dirent, &e_dno);
802 new_name->data = e_dno;
803 vfs_assign_inode(new_name, this);
805 // linking a dnode to parent could result new data block allocated
806 ext2_sync_inode(parent);
813 ext2_unlink(struct v_inode* this, struct v_dnode* name)
816 struct ext2_inode* e_ino;
817 struct ext2_dnode* e_dno;
819 e_ino = EXT2_INO(this);
820 e_dno = EXT2_DNO(name);
823 assert_fs(e_dno->self.dirent->inode == e_ino->ino_id);
825 errno = ext2dr_remove(e_dno);
830 // unlink a dnode from parent will not free the allocated data blocks
831 // rather, it leads to fragmentation
832 return ext2ino_free(this);
836 ext2ino_update(struct v_inode* inode)
838 struct ext2_inode* e_ino;
840 e_ino = EXT2_INO(inode);
841 __update_inode_access_metadata(e_ino->ino, inode);
843 fsblock_dirty(e_ino->buf);
846 /* ******************* Data Blocks ******************* */
849 __walkstate_set_stack(struct walk_state* state, int depth,
850 bbuf_t tab, unsigned int index)
852 state->stack.tables[depth] = fsblock_id(tab);
853 state->stack.indices[depth] = index;
856 #define WALKMODE_ALLOC 0b01
857 #define WALKMODE_NOBTLB 0b10
860 * @brief Walk the indrection chain given the position of data block
861 * relative to the inode. Upon completed, walk_state will be
862 * populated with result. On error, walk_state is untouched.
864 * Note, the result will always be one above the stopping level.
865 * That means, if your pos is pointed directly to file-content block
866 * (i.e., a leaf block), then the state is the indirect block that
867 * containing the ID of that leaf block.
869 * Two modes can be specified to alter the walk process:
872 * resolve any absence encountered
873 * during the walk by allocating and chaining indirect block
876 * Ignore the cached result, always perform a complete walk.
877 * This does not by pass the cache entirely, lower level caches
878 * like block buffer (blkio request cache) will be used transparently
880 * @param inode inode to walk
881 * @param pos flattened data block position to be located
882 * @param state contain the walk result
883 * @param mode walk mode
887 __walk_indirects(struct v_inode* inode, unsigned int pos,
888 struct walk_state* state, int mode)
891 int inds, stride, shifts, level;
892 unsigned int *slotref, index, next, mask;
893 struct ext2_inode* e_inode;
894 struct ext2b_inode* b_inode;
895 struct v_superblock* vsb;
896 bbuf_t table, next_table;
899 e_inode = EXT2_INO(inode);
900 b_inode = e_inode->ino;
903 alloc = (mode & WALKMODE_ALLOC) && !EXT2_SB(vsb)->read_only;
907 slotref = &b_inode->i_block_arr[pos];
908 table = fsblock_take(e_inode->buf);
914 stride = e_inode->inds_lgents;
915 if (!(pos >> stride)) {
918 else if (!(pos >> (stride * 2))) {
921 else if (!(pos >> (stride * 3))) {
925 fail("unrealistic block pos");
928 // bTLB cache the last level indirect block
929 if (!(mode & WALKMODE_NOBTLB) && (table = __btlb_hit(e_inode, pos))) {
931 index = pos & ((1 << stride) - 1);
932 slotref = &block_buffer(table, u32_t)[index];
936 shifts = stride * (inds - 1);
937 mask = ((1 << stride) - 1) << shifts;
939 index = 12 + inds - 1;
940 slotref = &b_inode->i_block.inds[inds - 1];
941 table = fsblock_take(e_inode->buf);
943 for (; level < inds; level++)
945 __walkstate_set_stack(state, level, table, index);
953 if ((errno = ext2db_alloc(inode, &next_table))) {
958 *slotref = fsblock_id(next_table);
959 fsblock_dirty(table);
962 next_table = fsblock_get(vsb, next);
968 if (blkbuf_errbuf(table)) {
974 index = (pos & mask) >> shifts;
975 slotref = &block_buffer(table, u32_t)[index];
978 mask = mask >> stride;
981 __btlb_insert(e_inode, pos, table);
984 assert(blkbuf_refcounts(table) >= 1);
988 state->slot_ref = slotref;
989 state->table = table;
990 state->level = level;
991 state->indirections = inds;
993 __walkstate_set_stack(state, level, table, index);
999 ext2db_get(struct v_inode* inode, unsigned int data_pos)
1003 struct walk_state state;
1005 ext2walk_init_state(&state);
1007 errno = __walk_indirects(inode, data_pos, &state, 0);
1009 return (bbuf_t)INVL_BUFFER;
1012 blkid = *state.slot_ref;
1014 ext2walk_free_state(&state);
1020 return fsblock_get(inode->sb, blkid);
1024 ext2db_acquire(struct v_inode* inode, unsigned int data_pos, bbuf_t* out)
1028 unsigned int block_id;
1029 struct walk_state state;
1031 ext2walk_init_state(&state);
1033 errno = __walk_indirects(inode, data_pos, &state, WALKMODE_ALLOC);
1038 block_id = *state.slot_ref;
1040 buf = fsblock_get(inode->sb, block_id);
1044 errno = ext2db_alloc(inode, &buf);
1046 ext2walk_free_state(&state);
1050 *state.slot_ref = fsblock_id(buf);
1051 fsblock_dirty(state.table);
1054 ext2walk_free_state(&state);
1056 if (blkbuf_errbuf(buf)) {
1065 ext2db_alloc(struct v_inode* inode, bbuf_t* out)
1068 struct ext2_gdesc* gd;
1069 struct ext2_inode* e_inode;
1070 struct v_superblock* vsb;
1072 next_free = ALLOC_FAIL;
1073 e_inode = EXT2_INO(inode);
1076 gd = e_inode->blk_grp;
1077 next_free = ext2gd_alloc_block(gd);
1079 // locality alloc failed, try entire fs
1080 if (!valid_bmp_slot(next_free)) {
1081 next_free = ext2db_alloc_slot(vsb, &gd);
1084 if (!valid_bmp_slot(next_free)) {
1088 next_free += gd->base;
1089 next_free = ext2_datablock(vsb, next_free);
1091 bbuf_t buf = fsblock_get(vsb, next_free);
1092 if (blkbuf_errbuf(buf)) {
1096 e_inode->nr_fsblks++;
1102 ext2db_free_pos(struct v_inode* inode, unsigned int block_pos)
1104 struct ext2_inode* e_inode;
1105 struct ext2_gdesc* gd;
1107 e_inode = EXT2_INO(inode);
1108 gd = e_inode->blk_grp;
1110 assert(block_pos >= gd->base);
1112 block_pos -= gd->base;
1114 ext2gd_free_block(gd, block_pos);
1118 ext2db_free(struct v_inode* inode, bbuf_t buf)
1120 assert(blkbuf_not_shared(buf));
1122 ext2db_free_pos(inode, blkbuf_id(buf));
1129 ext2ino_resizing(struct v_inode* inode, size_t new_size)
1134 struct walk_state state;
1135 struct ext2_inode* e_ino;
1136 struct ext2b_inode* b_ino;
1138 e_ino = EXT2_INO(inode);
1140 oldsize = e_ino->isize;
1142 if (oldsize == new_size) {
1146 __update_inode_size(inode, new_size);
1148 if (check_symlink_node(inode)) {
1152 if (oldsize < new_size) {
1156 ext2walk_init_state(&state);
1158 pos = new_size / fsapi_block_size(inode->sb);
1159 errno = __walk_indirects(inode, pos, &state, WALKMODE_NOBTLB);
1164 errno = __free_recurisve_from(inode->sb, e_ino, &state.stack, 0);
1166 ext2walk_free_state(&state);