* @copyright Copyright (c) 2022
*
*/
-#include <hal/acpi/acpi.h>
-#include <hal/apic.h>
#include <hal/pci.h>
-#include <lunaix/mm/kalloc.h>
+#include <sys/pci_hba.h>
+
+#include <klibc/string.h>
+#include <lunaix/fs/twifs.h>
+#include <lunaix/mm/valloc.h>
#include <lunaix/spike.h>
#include <lunaix/syslog.h>
LOG_MODULE("PCI")
-static struct llist_header pci_devices;
+static DEFINE_LLIST(pci_devices);
+static DECLARE_HASHTABLE(pci_devcache, 8);
+
+static struct device* pcidev_cat;
+static struct device_def pci_def;
void
pci_probe_msi_info(struct pci_device* device);
+static inline void
+pci_log_device(struct pci_device* pcidev)
+{
+ pciaddr_t loc = pcidev->loc;
+ struct device_def* binddef = pcidev->binding.def;
+
+ if (!binddef) {
+ kprintf("pci.%d:%d:%d, no binding\n",
+ PCILOC_BUS(loc),
+ PCILOC_DEV(loc),
+ PCILOC_FN(loc));
+ return;
+ }
+
+ kprintf("pci.%d:%d:%d, dev.%xh:%xh.%d, %s\n",
+ PCILOC_BUS(loc),
+ PCILOC_DEV(loc),
+ PCILOC_FN(loc),
+ binddef->class.fn_grp,
+ binddef->class.device,
+ binddef->class.variant,
+ binddef->name);
+}
+
+static struct pci_device*
+pci_create_device(pciaddr_t loc, ptr_t pci_base, int devinfo)
+{
+ pci_reg_t class = pci_read_cspace(pci_base, 0x8);
+ struct hbucket* bucket = device_definitions_byif(DEVIF_PCI);
+
+ u32_t devid = PCI_DEV_DEVID(devinfo);
+ u32_t vendor = PCI_DEV_VENDOR(devinfo);
+ pci_reg_t intr = pci_read_cspace(pci_base, 0x3c);
+
+ struct pci_device* device = vzalloc(sizeof(struct pci_device));
+ device->class_info = class;
+ device->device_info = devinfo;
+ device->cspace_base = pci_base;
+ device->intr_info = intr;
+
+ device_create(&device->dev, pcidev_cat, DEV_IFSYS, NULL);
+
+ pci_probe_msi_info(device);
+ pci_probe_bar_info(device);
+
+ llist_append(&pci_devices, &device->dev_chain);
+ device_register(&device->dev, &pci_def.class, "%x", loc);
+ pci_def.class.variant++;
+
+ // find a suitable binding
+
+ struct pci_device_def *pos, *n;
+ hashtable_bucket_foreach(bucket, pos, n, devdef.hlist_if)
+ {
+ if (pos->dev_class != PCI_DEV_CLASS(class)) {
+ continue;
+ }
+
+ u32_t idm = pos->ident_mask;
+ int result = (pos->dev_ident & idm) == (devinfo & idm);
+
+ if (result) {
+ goto found;
+ }
+ }
+
+ goto done;
+
+found:
+ if (!pos->devdef.bind) {
+ kprintf(KERROR "pci_loc:%x, (%xh:%xh.%d) unbindable\n",
+ loc,
+ pos->devdef.class.fn_grp,
+ pos->devdef.class.device,
+ pos->devdef.class.variant);
+ goto done;
+ }
+
+ int errno = pos->devdef.bind(&pos->devdef, &device->dev);
+ if (errno) {
+ kprintf(KERROR "pci_loc:%x, (%xh:%xh.%d) failed, e=%d\n",
+ loc,
+ pos->devdef.class.fn_grp,
+ pos->devdef.class.device,
+ pos->devdef.class.variant,
+ errno);
+ goto done;
+ }
+
+ device->binding.def = &pos->devdef;
+
+done:
+ return device;
+}
+
void
-pci_probe_device(int bus, int dev, int funct)
+pci_probe_device(pciaddr_t pci_loc)
{
- uint32_t base = PCI_ADDRESS(bus, dev, funct);
+ u32_t base = PCI_CFGADDR(pci_loc);
pci_reg_t reg1 = pci_read_cspace(base, 0);
// Vendor=0xffff则表示设备不存在
// 防止堆栈溢出
// QEMU的ICH9/Q35实现似乎有点问题,对于多功能设备的每一个功能的header type
// 都将第七位置位。而virtualbox 就没有这个毛病。
- if ((hdr_type & 0x80) && funct == 0) {
+ if ((hdr_type & 0x80) && PCILOC_FN(pci_loc) == 0) {
hdr_type = hdr_type & ~0x80;
// 探测多用途设备(multi-function device)
for (int i = 1; i < 7; i++) {
- pci_probe_device(bus, dev, i);
+ pci_probe_device(pci_loc + i);
}
}
- if (hdr_type != PCI_TDEV) {
- // XXX: 目前忽略所有桥接设备,比如PCI-PCI桥接器,或者是CardBus桥接器
- return;
+ struct pci_device *pos, *n;
+ hashtable_hash_foreach(pci_devcache, pci_loc, pos, n, dev_cache)
+ {
+ if (pos->loc == pci_loc) {
+ pci_log_device(pos);
+ return;
+ }
}
- pci_reg_t intr = pci_read_cspace(base, 0x3c);
- pci_reg_t class = pci_read_cspace(base, 0x8);
-
- struct pci_device* device = lxmalloc(sizeof(struct pci_device));
- *device = (struct pci_device){ .cspace_base = base,
- .class_info = class,
- .device_info = reg1,
- .intr_info = intr };
-
- pci_probe_msi_info(device);
+ struct pci_device* pcidev = pci_create_device(pci_loc, base, reg1);
+ if (pcidev) {
+ pcidev->loc = pci_loc;
+ hashtable_hash_in(pci_devcache, &pcidev->dev_cache, pci_loc);
+ pci_log_device(pcidev);
+ }
+}
- llist_append(&pci_devices, &device->dev_chain);
+void
+pci_scan()
+{
+ for (u32_t loc = 0; loc < (pciaddr_t)-1; loc += 8) {
+ pci_probe_device((pciaddr_t)loc);
+ }
}
void
-pci_probe()
+pci_probe_bar_info(struct pci_device* device)
{
- // 暴力扫描所有PCI设备
- // XXX: 尽管最多会有256条PCI总线,但就目前而言,只考虑bus #0就足够了
- for (int bus = 0; bus < 1; bus++) {
- for (int dev = 0; dev < 32; dev++) {
- pci_probe_device(bus, dev, 0);
+ u32_t bar;
+ struct pci_base_addr* ba;
+ for (size_t i = 0; i < 6; i++) {
+ ba = &device->bar[i];
+ ba->size = pci_bar_sizing(device, &bar, i + 1);
+ if (PCI_BAR_MMIO(bar)) {
+ ba->start = PCI_BAR_ADDR_MM(bar);
+ ba->type |= PCI_BAR_CACHEABLE(bar) ? BAR_TYPE_CACHABLE : 0;
+ ba->type |= BAR_TYPE_MMIO;
+ } else {
+ ba->start = PCI_BAR_ADDR_IO(bar);
}
}
}
}
pci_reg_t cap_ptr = pci_read_cspace(device->cspace_base, 0x34) & 0xff;
- uint32_t cap_hdr;
+ u32_t cap_hdr;
while (cap_ptr) {
cap_hdr = pci_read_cspace(device->cspace_base, cap_ptr);
}
}
-#define PCI_PRINT_BAR_LISTING
-
-void
-pci_print_device()
-{
- struct pci_device *pos, *n;
- llist_for_each(pos, n, &pci_devices, dev_chain)
- {
- kprintf(KINFO "(B%xh:D%xh:F%xh) Dev %x:%x, Class 0x%x\n",
- PCI_BUS_NUM(pos->cspace_base),
- PCI_SLOT_NUM(pos->cspace_base),
- PCI_FUNCT_NUM(pos->cspace_base),
- PCI_DEV_VENDOR(pos->device_info),
- PCI_DEV_DEVID(pos->device_info),
- PCI_DEV_CLASS(pos->class_info));
-
- kprintf(KINFO "\t IRQ: %d, INT#x: %d\n",
- PCI_INTR_IRQ(pos->intr_info),
- PCI_INTR_PIN(pos->intr_info));
-#ifdef PCI_PRINT_BAR_LISTING
- pci_reg_t bar;
- for (size_t i = 1; i <= 6; i++) {
- size_t size = pci_bar_sizing(pos, &bar, i);
- if (!bar)
- continue;
- if (PCI_BAR_MMIO(bar)) {
- kprintf(KINFO "\t BAR#%d (MMIO) %p [%d]\n",
- i,
- PCI_BAR_ADDR_MM(bar),
- size);
- } else {
- kprintf(KINFO "\t BAR#%d (I/O) %p [%d]\n",
- i,
- PCI_BAR_ADDR_IO(bar),
- size);
- }
- }
-#endif
- if (pos->msi_loc) {
- kprintf(KINFO "\t MSI supported (@%xh)\n", pos->msi_loc);
- }
- }
-}
-
size_t
-pci_bar_sizing(struct pci_device* dev, uint32_t* bar_out, uint32_t bar_num)
+pci_bar_sizing(struct pci_device* dev, u32_t* bar_out, u32_t bar_num)
{
pci_reg_t bar = pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num));
if (!bar) {
return ~sized + 1;
}
-void
-pci_setup_msi(struct pci_device* device, int vector)
-{
- // Dest: APIC#0, Physical Destination, No redirection
- uint32_t msi_addr = (__APIC_BASE_PADDR);
-
- // Edge trigger, Fixed delivery
- uint32_t msi_data = vector;
-
- pci_write_cspace(
- device->cspace_base, PCI_MSI_ADDR(device->msi_loc), msi_addr);
-
- pci_reg_t reg1 = pci_read_cspace(device->cspace_base, device->msi_loc);
- pci_reg_t msg_ctl = reg1 >> 16;
-
- int offset = !!(msg_ctl & MSI_CAP_64BIT) * 4;
- pci_write_cspace(device->cspace_base,
- PCI_MSI_DATA(device->msi_loc, offset),
- msi_data & 0xffff);
-
- if ((msg_ctl & MSI_CAP_MASK)) {
- pci_write_cspace(
- device->cspace_base, PCI_MSI_MASK(device->msi_loc, offset), 0);
- }
-
- // manipulate the MSI_CTRL to allow device using MSI to request service.
- reg1 = ((((reg1 >> 16) & ~0x70) | MSI_CAP_ENABLE) << 16) | (reg1 & 0xffff);
- pci_write_cspace(device->cspace_base, device->msi_loc, reg1);
-}
-
struct pci_device*
-pci_get_device_by_id(uint16_t vendorId, uint16_t deviceId)
+pci_get_device_by_id(u16_t vendorId, u16_t deviceId)
{
- uint32_t dev_info = vendorId | (deviceId << 16);
+ u32_t dev_info = vendorId | (deviceId << 16);
struct pci_device *pos, *n;
llist_for_each(pos, n, &pci_devices, dev_chain)
{
}
struct pci_device*
-pci_get_device_by_class(uint32_t class)
+pci_get_device_by_class(u32_t class)
{
struct pci_device *pos, *n;
llist_for_each(pos, n, &pci_devices, dev_chain)
return NULL;
}
+static void
+__pci_read_cspace(struct twimap* map)
+{
+ struct pci_device* pcidev = (struct pci_device*)(map->data);
+
+ for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
+ *(pci_reg_t*)(map->buffer + i) =
+ pci_read_cspace(pcidev->cspace_base, i);
+ }
+
+ map->size_acc = 256;
+}
+
+/*---------- TwiFS interface definition ----------*/
+
+static void
+__pci_read_revid(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_REV(class));
+}
+
+static void
+__pci_read_class(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_CLASS(class));
+}
+
+static void
+__pci_read_devinfo(struct twimap* map)
+{
+ int devinfo = twimap_data(map, struct pci_device*)->device_info;
+ twimap_printf(
+ map, "%x:%x", PCI_DEV_VENDOR(devinfo), PCI_DEV_DEVID(devinfo));
+}
+
+static void
+__pci_bar_read(struct twimap* map)
+{
+ struct pci_device* pcidev = twimap_data(map, struct pci_device*);
+ int bar_index = twimap_index(map, int);
+
+ struct pci_base_addr* bar = &pcidev->bar[bar_index];
+
+ if (!bar->start && !bar->size) {
+ twimap_printf(map, "[%d] not present \n", bar_index);
+ return;
+ }
+
+ twimap_printf(
+ map, "[%d] base=%.8p, size=%.8p, ", bar_index, bar->start, bar->size);
+
+ if ((bar->type & BAR_TYPE_MMIO)) {
+ twimap_printf(map, "mmio");
+ if ((bar->type & BAR_TYPE_CACHABLE)) {
+ twimap_printf(map, ", prefetchable");
+ }
+ } else {
+ twimap_printf(map, "io");
+ }
+
+ twimap_printf(map, "\n");
+}
+
+static int
+__pci_bar_gonext(struct twimap* map)
+{
+ if (twimap_index(map, int) >= 5) {
+ return 0;
+ }
+ map->index += 1;
+ return 1;
+}
+
+static void
+__pci_read_binding(struct twimap* map)
+{
+ struct pci_device* pcidev = twimap_data(map, struct pci_device*);
+ struct device_def* devdef = pcidev->binding.def;
+ if (!devdef) {
+ return;
+ }
+
+ twimap_printf(map,
+ "%xh:%xh.%d",
+ devdef->class.fn_grp,
+ devdef->class.device,
+ devdef->class.variant);
+}
+
+static void
+__pci_trigger_bus_rescan(struct twimap* map)
+{
+ pci_scan();
+}
+
void
-pci_init()
-{
- llist_init_head(&pci_devices);
- acpi_context* acpi = acpi_get_context();
- assert_msg(acpi, "ACPI not initialized.");
- if (acpi->mcfg.alloc_num) {
- // PCIe Enhanced Configuration Mechanism is supported.
- // TODO: support PCIe addressing mechanism
- }
- // Otherwise, fallback to use legacy PCI 3.0 method.
- pci_probe();
-}
\ No newline at end of file
+pci_build_fsmapping()
+{
+ struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
+ struct pci_device *pos, *n;
+ struct twimap* map;
+
+ map = twifs_mapping(pci_class, NULL, "rescan");
+ map->read = __pci_trigger_bus_rescan;
+
+ llist_for_each(pos, n, &pci_devices, dev_chain)
+ {
+ pci_dev = twifs_dir_node(pci_class, "%x", pos->loc);
+
+ map = twifs_mapping(pci_dev, pos, "config");
+ map->read = __pci_read_cspace;
+
+ map = twifs_mapping(pci_dev, pos, "revision");
+ map->read = __pci_read_revid;
+
+ map = twifs_mapping(pci_dev, pos, "class");
+ map->read = __pci_read_class;
+
+ map = twifs_mapping(pci_dev, pos, "binding");
+ map->read = __pci_read_binding;
+
+ map = twifs_mapping(pci_dev, pos, "io_bases");
+ map->read = __pci_bar_read;
+ map->go_next = __pci_bar_gonext;
+ }
+}
+EXPORT_TWIFS_PLUGIN(pci_devs, pci_build_fsmapping);
+
+/*---------- PCI 3.0 HBA device definition ----------*/
+
+static int
+pci_load_devices(struct device_def* def)
+{
+ pcidev_cat = device_addcat(NULL, "pci");
+
+ pci_scan();
+
+ return 0;
+}
+
+void
+pci_bind_instance(struct pci_device* pcidev, void* devobj)
+{
+ pcidev->dev.underlay = devobj;
+ pcidev->binding.dev = devobj;
+}
+
+static struct device_def pci_def = {
+ .name = "pci3.0-hba",
+ .class = DEVCLASS(DEVIF_SOC, DEVFN_BUSIF, DEV_PCI),
+ .init = pci_load_devices
+};
+EXPORT_DEVICE(pci3hba, &pci_def, load_poststage);