+ struct pci_device* pcidev = (struct pci_device*)(map->data);
+
+ for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
+ *(pci_reg_t*)(map->buffer + i) =
+ pci_read_cspace(pcidev->cspace_base, i);
+ }
+
+ map->size_acc = 256;
+}
+
+void
+__pci_read_revid(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_REV(class));
+}
+
+void
+__pci_read_class(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_CLASS(class));
+}
+
+void
+__pci_bar_read(struct twimap* map)
+{
+ struct pci_device* pcidev = twimap_data(map, struct pci_device*);
+ int bar_index = twimap_index(map, int);
+
+ struct pci_base_addr* bar = &pcidev->bar[bar_index];
+
+ if (!bar->start && !bar->size) {
+ twimap_printf(map, "[%d] not present \n", bar_index);
+ return;
+ }
+
+ twimap_printf(
+ map, "[%d] base=%.8p, size=%.8p, ", bar_index, bar->start, bar->size);
+
+ if ((bar->type & BAR_TYPE_MMIO)) {
+ twimap_printf(map, "mmio");
+ if ((bar->type & BAR_TYPE_CACHABLE)) {
+ twimap_printf(map, ", prefetchable");
+ }
+ } else {
+ twimap_printf(map, "io");
+ }
+
+ twimap_printf(map, "\n");
+}
+
+int
+__pci_bar_gonext(struct twimap* map)
+{
+ if (twimap_index(map, int) >= 5) {
+ return 0;
+ }
+ map->index += 1;
+ return 1;
+}
+
+void
+pci_build_fsmapping()
+{
+ struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
+ struct pci_device *pos, *n;
+ struct twimap* map;
+ llist_for_each(pos, n, &pci_devices, dev_chain)
+ {
+ pci_dev = twifs_dir_node(pci_class,
+ "%.2d:%.2d:%.2d.%.4x:%.4x",
+ PCI_BUS_NUM(pos->cspace_base),
+ PCI_SLOT_NUM(pos->cspace_base),
+ PCI_FUNCT_NUM(pos->cspace_base),
+ PCI_DEV_VENDOR(pos->device_info),
+ PCI_DEV_DEVID(pos->device_info));
+
+ map = twifs_mapping(pci_dev, pos, "config");
+ map->read = __pci_read_cspace;
+
+ map = twifs_mapping(pci_dev, pos, "revision");
+ map->read = __pci_read_revid;
+
+ map = twifs_mapping(pci_dev, pos, "class");
+ map->read = __pci_read_class;
+
+ map = twifs_mapping(pci_dev, pos, "io_bases");
+ map->read = __pci_bar_read;
+ map->go_next = __pci_bar_gonext;
+ }
+}
+
+size_t
+pci_bar_sizing(struct pci_device* dev, u32_t* bar_out, u32_t bar_num)
+{
+ pci_reg_t bar = pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num));
+ if (!bar) {
+ *bar_out = 0;
+ return 0;
+ }
+
+ pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), 0xffffffff);
+ pci_reg_t sized =
+ pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num)) & ~0x1;
+ if (PCI_BAR_MMIO(bar)) {
+ sized = PCI_BAR_ADDR_MM(sized);
+ }
+ *bar_out = bar;
+ pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), bar);
+ return ~sized + 1;
+}
+
+void
+pci_setup_msi(struct pci_device* device, int vector)
+{
+ // Dest: APIC#0, Physical Destination, No redirection
+ u32_t msi_addr = (__APIC_BASE_PADDR);
+
+ // Edge trigger, Fixed delivery
+ u32_t msi_data = vector;
+
+ pci_write_cspace(
+ device->cspace_base, PCI_MSI_ADDR(device->msi_loc), msi_addr);
+
+ pci_reg_t reg1 = pci_read_cspace(device->cspace_base, device->msi_loc);
+ pci_reg_t msg_ctl = reg1 >> 16;
+
+ int offset = !!(msg_ctl & MSI_CAP_64BIT) * 4;
+ pci_write_cspace(device->cspace_base,
+ PCI_MSI_DATA(device->msi_loc, offset),
+ msi_data & 0xffff);
+
+ if ((msg_ctl & MSI_CAP_MASK)) {
+ pci_write_cspace(
+ device->cspace_base, PCI_MSI_MASK(device->msi_loc, offset), 0);
+ }
+
+ // manipulate the MSI_CTRL to allow device using MSI to request service.
+ reg1 = (reg1 & 0xff8fffff) | 0x10000;
+ pci_write_cspace(device->cspace_base, device->msi_loc, reg1);
+}
+
+struct pci_device*
+pci_get_device_by_id(uint16_t vendorId, uint16_t deviceId)
+{
+ u32_t dev_info = vendorId | (deviceId << 16);