* @copyright Copyright (c) 2022
*
*/
-#include <hal/acpi/acpi.h>
-#include <hal/apic.h>
#include <hal/pci.h>
+#include <sys/pci_hba.h>
+
+#include <klibc/string.h>
#include <lunaix/fs/twifs.h>
#include <lunaix/mm/valloc.h>
#include <lunaix/spike.h>
LOG_MODULE("PCI")
-static struct llist_header pci_devices;
+static DEFINE_LLIST(pci_devices);
void
pci_probe_msi_info(struct pci_device* device);
+static struct pci_device*
+pci_create_device(ptr_t pci_base, int devinfo)
+{
+ pci_reg_t class = pci_read_cspace(pci_base, 0x8);
+ struct hbucket* bucket = device_definitions_byif(DEVIF_PCI);
+
+ u32_t devid = PCI_DEV_DEVID(devinfo);
+ u32_t vendor = PCI_DEV_VENDOR(devinfo);
+
+ kappendf(".%x:%x, ", vendor, devid);
+
+ struct pci_device_def *pos, *n;
+ hashtable_bucket_foreach(bucket, pos, n, devdef.hlist_if)
+ {
+ if (pos->dev_class != PCI_DEV_CLASS(class)) {
+ continue;
+ }
+
+ int result = (pos->dev_vendor & vendor) == vendor &&
+ (pos->dev_id & devid) == devid;
+
+ if (result) {
+ goto found;
+ }
+ }
+
+ kappendf(KWARN "unknown device\n");
+
+ return NULL;
+
+found:
+ pci_reg_t intr = pci_read_cspace(pci_base, 0x3c);
+
+ struct pci_device* device = vzalloc(sizeof(struct pci_device));
+ device->class_info = class;
+ device->device_info = devinfo;
+ device->cspace_base = pci_base;
+ device->intr_info = intr;
+
+ device_prepare(&device->dev, &pos->devdef.class);
+
+ pci_probe_msi_info(device);
+ pci_probe_bar_info(device);
+
+ kappendf("%s (dev.%x:%x:%x) \n",
+ pos->devdef.name,
+ pos->devdef.class.meta,
+ pos->devdef.class.device,
+ pos->devdef.class.variant);
+
+ if (!pos->devdef.init_for) {
+ kappendf(KERROR "bad def\n");
+ goto fail;
+ }
+
+ int errno = pos->devdef.init_for(&pos->devdef, &device->dev);
+ if (errno) {
+ kappendf(KERROR "failed (e=%d)\n", errno);
+ goto fail;
+ }
+
+ llist_append(&pci_devices, &device->dev_chain);
+
+ return device;
+
+fail:
+ vfree(device);
+ return NULL;
+}
+
void
pci_probe_device(int bus, int dev, int funct)
{
- uint32_t base = PCI_ADDRESS(bus, dev, funct);
+ u32_t base = PCI_ADDRESS(bus, dev, funct);
pci_reg_t reg1 = pci_read_cspace(base, 0);
// Vendor=0xffff则表示设备不存在
return;
}
- pci_reg_t intr = pci_read_cspace(base, 0x3c);
- pci_reg_t class = pci_read_cspace(base, 0x8);
-
- struct pci_device* device = valloc(sizeof(struct pci_device));
- *device = (struct pci_device){ .cspace_base = base,
- .class_info = class,
- .device_info = reg1,
- .intr_info = intr };
-
- pci_probe_msi_info(device);
+ kprintf("pci.%d:%d:%d", bus, dev, funct);
- llist_append(&pci_devices, &device->dev_chain);
+ pci_create_device(base, reg1);
}
void
-pci_probe()
+pci_scan()
{
- // 暴力扫描所有PCI设备
- // XXX: 尽管最多会有256条PCI总线,但就目前而言,只考虑bus #0就足够了
- for (int bus = 0; bus < 1; bus++) {
+ for (int bus = 0; bus < 256; bus++) {
for (int dev = 0; dev < 32; dev++) {
pci_probe_device(bus, dev, 0);
}
}
}
+void
+pci_probe_bar_info(struct pci_device* device)
+{
+ u32_t bar;
+ struct pci_base_addr* ba;
+ for (size_t i = 0; i < 6; i++) {
+ ba = &device->bar[i];
+ ba->size = pci_bar_sizing(device, &bar, i + 1);
+ if (PCI_BAR_MMIO(bar)) {
+ ba->start = PCI_BAR_ADDR_MM(bar);
+ ba->type |= PCI_BAR_CACHEABLE(bar) ? BAR_TYPE_CACHABLE : 0;
+ ba->type |= BAR_TYPE_MMIO;
+ } else {
+ ba->start = PCI_BAR_ADDR_IO(bar);
+ }
+ }
+}
+
void
pci_probe_msi_info(struct pci_device* device)
{
}
pci_reg_t cap_ptr = pci_read_cspace(device->cspace_base, 0x34) & 0xff;
- uint32_t cap_hdr;
+ u32_t cap_hdr;
while (cap_ptr) {
cap_hdr = pci_read_cspace(device->cspace_base, cap_ptr);
}
}
-#define PCI_PRINT_BAR_LISTING
-
-int
-__pci_read_cspace(struct v_inode* inode, void* buffer, size_t len, size_t fpos)
-{
- if (len < 256) {
- return ERANGE;
- }
-
- struct twifs_node* node = (struct twifs_node*)(inode->data);
- struct pci_device* pcidev = (struct pci_device*)(node->data);
-
- for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
- *(pci_reg_t*)(buffer + i) = pci_read_cspace(pcidev->cspace_base, i);
- }
-
- return 256;
-}
-
-void
-pci_build_fsmapping()
-{
- struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
- struct pci_device *pos, *n;
- llist_for_each(pos, n, &pci_devices, dev_chain)
- {
- pci_dev = twifs_dir_node(pci_class,
- "B%d:D%d:F%d.%x:%x",
- PCI_BUS_NUM(pos->cspace_base),
- PCI_SLOT_NUM(pos->cspace_base),
- PCI_FUNCT_NUM(pos->cspace_base),
- PCI_DEV_VENDOR(pos->device_info),
- PCI_DEV_DEVID(pos->device_info));
- struct twifs_node* fnode = twifs_file_node(pci_dev, "cspace");
- fnode->data = pos;
- fnode->ops.read = __pci_read_cspace;
- }
-}
-
size_t
-pci_bar_sizing(struct pci_device* dev, uint32_t* bar_out, uint32_t bar_num)
+pci_bar_sizing(struct pci_device* dev, u32_t* bar_out, u32_t bar_num)
{
pci_reg_t bar = pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num));
if (!bar) {
return ~sized + 1;
}
-void
-pci_setup_msi(struct pci_device* device, int vector)
-{
- // Dest: APIC#0, Physical Destination, No redirection
- uint32_t msi_addr = (__APIC_BASE_PADDR);
-
- // Edge trigger, Fixed delivery
- uint32_t msi_data = vector;
-
- pci_write_cspace(
- device->cspace_base, PCI_MSI_ADDR(device->msi_loc), msi_addr);
-
- pci_reg_t reg1 = pci_read_cspace(device->cspace_base, device->msi_loc);
- pci_reg_t msg_ctl = reg1 >> 16;
-
- int offset = !!(msg_ctl & MSI_CAP_64BIT) * 4;
- pci_write_cspace(device->cspace_base,
- PCI_MSI_DATA(device->msi_loc, offset),
- msi_data & 0xffff);
-
- if ((msg_ctl & MSI_CAP_MASK)) {
- pci_write_cspace(
- device->cspace_base, PCI_MSI_MASK(device->msi_loc, offset), 0);
- }
-
- // manipulate the MSI_CTRL to allow device using MSI to request service.
- reg1 = (reg1 & 0xff8fffff) | 0x10000;
- pci_write_cspace(device->cspace_base, device->msi_loc, reg1);
-}
-
struct pci_device*
-pci_get_device_by_id(uint16_t vendorId, uint16_t deviceId)
+pci_get_device_by_id(u16_t vendorId, u16_t deviceId)
{
- uint32_t dev_info = vendorId | (deviceId << 16);
+ u32_t dev_info = vendorId | (deviceId << 16);
struct pci_device *pos, *n;
llist_for_each(pos, n, &pci_devices, dev_chain)
{
}
struct pci_device*
-pci_get_device_by_class(uint32_t class)
+pci_get_device_by_class(u32_t class)
{
struct pci_device *pos, *n;
llist_for_each(pos, n, &pci_devices, dev_chain)
return NULL;
}
+static void
+__pci_read_cspace(struct twimap* map)
+{
+ struct pci_device* pcidev = (struct pci_device*)(map->data);
+
+ for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
+ *(pci_reg_t*)(map->buffer + i) =
+ pci_read_cspace(pcidev->cspace_base, i);
+ }
+
+ map->size_acc = 256;
+}
+
+/*---------- TwiFS interface definition ----------*/
+
+static void
+__pci_read_revid(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_REV(class));
+}
+
+static void
+__pci_read_class(struct twimap* map)
+{
+ int class = twimap_data(map, struct pci_device*)->class_info;
+ twimap_printf(map, "0x%x", PCI_DEV_CLASS(class));
+}
+
+static void
+__pci_bar_read(struct twimap* map)
+{
+ struct pci_device* pcidev = twimap_data(map, struct pci_device*);
+ int bar_index = twimap_index(map, int);
+
+ struct pci_base_addr* bar = &pcidev->bar[bar_index];
+
+ if (!bar->start && !bar->size) {
+ twimap_printf(map, "[%d] not present \n", bar_index);
+ return;
+ }
+
+ twimap_printf(
+ map, "[%d] base=%.8p, size=%.8p, ", bar_index, bar->start, bar->size);
+
+ if ((bar->type & BAR_TYPE_MMIO)) {
+ twimap_printf(map, "mmio");
+ if ((bar->type & BAR_TYPE_CACHABLE)) {
+ twimap_printf(map, ", prefetchable");
+ }
+ } else {
+ twimap_printf(map, "io");
+ }
+
+ twimap_printf(map, "\n");
+}
+
+static int
+__pci_bar_gonext(struct twimap* map)
+{
+ if (twimap_index(map, int) >= 5) {
+ return 0;
+ }
+ map->index += 1;
+ return 1;
+}
+
void
-pci_init()
+pci_build_fsmapping()
{
- llist_init_head(&pci_devices);
- acpi_context* acpi = acpi_get_context();
- assert_msg(acpi, "ACPI not initialized.");
- if (acpi->mcfg.alloc_num) {
- // PCIe Enhanced Configuration Mechanism is supported.
- // TODO: support PCIe addressing mechanism
- }
- // Otherwise, fallback to use legacy PCI 3.0 method.
- pci_probe();
-
- pci_build_fsmapping();
-}
\ No newline at end of file
+ struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
+ struct pci_device *pos, *n;
+ struct twimap* map;
+ llist_for_each(pos, n, &pci_devices, dev_chain)
+ {
+ pci_dev = twifs_dir_node(pci_class,
+ "%.2d:%.2d:%.2d.%.4x:%.4x",
+ PCI_BUS_NUM(pos->cspace_base),
+ PCI_SLOT_NUM(pos->cspace_base),
+ PCI_FUNCT_NUM(pos->cspace_base),
+ PCI_DEV_VENDOR(pos->device_info),
+ PCI_DEV_DEVID(pos->device_info));
+
+ map = twifs_mapping(pci_dev, pos, "config");
+ map->read = __pci_read_cspace;
+
+ map = twifs_mapping(pci_dev, pos, "revision");
+ map->read = __pci_read_revid;
+
+ map = twifs_mapping(pci_dev, pos, "class");
+ map->read = __pci_read_class;
+
+ map = twifs_mapping(pci_dev, pos, "io_bases");
+ map->read = __pci_bar_read;
+ map->go_next = __pci_bar_gonext;
+ }
+}
+EXPORT_TWIFS_PLUGIN(pci_devs, pci_build_fsmapping);
+
+/*---------- PCI 3.0 HBA device definition ----------*/
+
+static int
+pci_load_devices(struct device_def* def)
+{
+ pci_scan();
+
+ return 0;
+}
+
+static struct device_def pci_def = {
+ .name = "pci3.0-hba",
+ .class = DEVCLASS(DEVIF_SOC, DEVFN_BUSIF, DEV_BUS, 0),
+ .init = pci_load_devices
+};
+EXPORT_DEVICE(pci3hba, &pci_def, load_poststage);