+pci_probe_bar_info(struct pci_device* device)
+{
+ u32_t bar;
+ struct pci_base_addr* ba;
+ for (size_t i = 0; i < 6; i++) {
+ ba = &device->bar[i];
+ ba->size = pci_bar_sizing(device, &bar, i + 1);
+ if (PCI_BAR_MMIO(bar)) {
+ ba->start = PCI_BAR_ADDR_MM(bar);
+ ba->type |= PCI_BAR_CACHEABLE(bar) ? BAR_TYPE_CACHABLE : 0;
+ ba->type |= BAR_TYPE_MMIO;
+ } else {
+ ba->start = PCI_BAR_ADDR_IO(bar);
+ }
+ }
+}
+
+void
+pci_probe_msi_info(struct pci_device* device)
+{
+ // Note that Virtualbox have to use ICH9 chipset for MSI support.
+ // Qemu seems ok with default PIIX3, Bochs is pending to test...
+ // See https://www.virtualbox.org/manual/ch03.html (section 3.5.1)
+ pci_reg_t status =
+ pci_read_cspace(device->cspace_base, PCI_REG_STATUS_CMD) >> 16;
+
+ if (!(status & 0x10)) {
+ device->msi_loc = 0;
+ return;
+ }
+
+ pci_reg_t cap_ptr = pci_read_cspace(device->cspace_base, 0x34) & 0xff;
+ u32_t cap_hdr;
+
+ while (cap_ptr) {
+ cap_hdr = pci_read_cspace(device->cspace_base, cap_ptr);
+ if ((cap_hdr & 0xff) == 0x5) {
+ // MSI
+ device->msi_loc = cap_ptr;
+ return;
+ }
+ cap_ptr = (cap_hdr >> 8) & 0xff;
+ }
+}
+
+static void
+__pci_read_cspace(struct twimap* map)
+{
+ struct pci_device* pcidev = (struct pci_device*)(map->data);
+
+ for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
+ *(pci_reg_t*)(map->buffer + i) =
+ pci_read_cspace(pcidev->cspace_base, i);
+ }
+
+ map->size_acc = 256;
+}
+
+static void
+__pci_read_revid(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_REV(class));
+}
+
+static void
+__pci_read_class(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_CLASS(class));
+}
+
+static void
+__pci_bar_read(struct twimap* map)
+{
+ struct pci_device* pcidev = twimap_data(map, struct pci_device*);
+ int bar_index = twimap_index(map, int);
+
+ struct pci_base_addr* bar = &pcidev->bar[bar_index];
+
+ if (!bar->start && !bar->size) {
+ twimap_printf(map, "[%d] not present \n", bar_index);
+ return;
+ }
+
+ twimap_printf(
+ map, "[%d] base=%.8p, size=%.8p, ", bar_index, bar->start, bar->size);
+
+ if ((bar->type & BAR_TYPE_MMIO)) {
+ twimap_printf(map, "mmio");
+ if ((bar->type & BAR_TYPE_CACHABLE)) {
+ twimap_printf(map, ", prefetchable");
+ }
+ } else {
+ twimap_printf(map, "io");
+ }
+
+ twimap_printf(map, "\n");
+}
+
+static int
+__pci_bar_gonext(struct twimap* map)
+{
+ if (twimap_index(map, int) >= 5) {
+ return 0;
+ }
+ map->index += 1;
+ return 1;
+}
+
+void
+pci_build_fsmapping()
+{
+ struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
+ struct pci_device *pos, *n;
+ struct twimap* map;
+ llist_for_each(pos, n, &pci_devices, dev_chain)
+ {
+ pci_dev = twifs_dir_node(pci_class,
+ "%.2d:%.2d:%.2d.%.4x:%.4x",
+ PCI_BUS_NUM(pos->cspace_base),
+ PCI_SLOT_NUM(pos->cspace_base),
+ PCI_FUNCT_NUM(pos->cspace_base),
+ PCI_DEV_VENDOR(pos->device_info),
+ PCI_DEV_DEVID(pos->device_info));
+
+ map = twifs_mapping(pci_dev, pos, "config");
+ map->read = __pci_read_cspace;
+
+ map = twifs_mapping(pci_dev, pos, "revision");
+ map->read = __pci_read_revid;
+
+ map = twifs_mapping(pci_dev, pos, "class");
+ map->read = __pci_read_class;
+
+ map = twifs_mapping(pci_dev, pos, "io_bases");
+ map->read = __pci_bar_read;
+ map->go_next = __pci_bar_gonext;
+ }
+}
+EXPORT_TWIFS_PLUGIN(pci_devs, pci_build_fsmapping);
+
+size_t
+pci_bar_sizing(struct pci_device* dev, u32_t* bar_out, u32_t bar_num)
+{
+ pci_reg_t bar = pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num));
+ if (!bar) {
+ *bar_out = 0;
+ return 0;
+ }
+
+ pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), 0xffffffff);
+ pci_reg_t sized =
+ pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num)) & ~0x1;
+ if (PCI_BAR_MMIO(bar)) {
+ sized = PCI_BAR_ADDR_MM(sized);
+ }
+ *bar_out = bar;
+ pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), bar);
+ return ~sized + 1;
+}
+
+struct pci_device*
+pci_get_device_by_id(u16_t vendorId, u16_t deviceId)