3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief A software implementation of PCI Local Bus Specification Revision 3.0
8 * @copyright Copyright (c) 2022
11 #include <hal/acpi/acpi.h>
14 #include <klibc/string.h>
15 #include <lunaix/fs/twifs.h>
16 #include <lunaix/mm/valloc.h>
17 #include <lunaix/spike.h>
18 #include <lunaix/syslog.h>
22 static DEFINE_LLIST(pci_devices);
23 static DEFINE_LLIST(pci_drivers);
26 pci_probe_msi_info(struct pci_device* device);
29 pci_probe_device(int bus, int dev, int funct)
31 u32_t base = PCI_ADDRESS(bus, dev, funct);
32 pci_reg_t reg1 = pci_read_cspace(base, 0);
34 // Vendor=0xffff则表示设备不存在
35 if (PCI_DEV_VENDOR(reg1) == PCI_VENDOR_INVLD) {
39 pci_reg_t hdr_type = pci_read_cspace(base, 0xc);
40 hdr_type = (hdr_type >> 16) & 0xff;
43 // QEMU的ICH9/Q35实现似乎有点问题,对于多功能设备的每一个功能的header type
44 // 都将第七位置位。而virtualbox 就没有这个毛病。
45 if ((hdr_type & 0x80) && funct == 0) {
46 hdr_type = hdr_type & ~0x80;
47 // 探测多用途设备(multi-function device)
48 for (int i = 1; i < 7; i++) {
49 pci_probe_device(bus, dev, i);
53 if (hdr_type != PCI_TDEV) {
54 // XXX: 目前忽略所有桥接设备,比如PCI-PCI桥接器,或者是CardBus桥接器
58 pci_reg_t intr = pci_read_cspace(base, 0x3c);
59 pci_reg_t class = pci_read_cspace(base, 0x8);
61 struct pci_device* device = vzalloc(sizeof(struct pci_device));
62 *device = (struct pci_device){ .cspace_base = base,
67 pci_probe_msi_info(device);
68 pci_probe_bar_info(device);
70 llist_append(&pci_devices, &device->dev_chain);
72 if (!pci_bind_driver(device)) {
73 kprintf(KWARN "dev.%d:%d:%d %x:%x unknown device\n",
80 kprintf("dev.%d:%d:%d %x:%x %s\n",
86 device->driver.type->name);
94 // XXX: 尽管最多会有256条PCI总线,但就目前而言,只考虑bus #0就足够了
95 for (int bus = 0; bus < 256; bus++) {
96 for (int dev = 0; dev < 32; dev++) {
97 pci_probe_device(bus, dev, 0);
103 pci_probe_bar_info(struct pci_device* device)
106 struct pci_base_addr* ba;
107 for (size_t i = 0; i < 6; i++) {
108 ba = &device->bar[i];
109 ba->size = pci_bar_sizing(device, &bar, i + 1);
110 if (PCI_BAR_MMIO(bar)) {
111 ba->start = PCI_BAR_ADDR_MM(bar);
112 ba->type |= PCI_BAR_CACHEABLE(bar) ? BAR_TYPE_CACHABLE : 0;
113 ba->type |= BAR_TYPE_MMIO;
115 ba->start = PCI_BAR_ADDR_IO(bar);
121 pci_probe_msi_info(struct pci_device* device)
123 // Note that Virtualbox have to use ICH9 chipset for MSI support.
124 // Qemu seems ok with default PIIX3, Bochs is pending to test...
125 // See https://www.virtualbox.org/manual/ch03.html (section 3.5.1)
127 pci_read_cspace(device->cspace_base, PCI_REG_STATUS_CMD) >> 16;
129 if (!(status & 0x10)) {
134 pci_reg_t cap_ptr = pci_read_cspace(device->cspace_base, 0x34) & 0xff;
138 cap_hdr = pci_read_cspace(device->cspace_base, cap_ptr);
139 if ((cap_hdr & 0xff) == 0x5) {
141 device->msi_loc = cap_ptr;
144 cap_ptr = (cap_hdr >> 8) & 0xff;
149 __pci_read_cspace(struct twimap* map)
151 struct pci_device* pcidev = (struct pci_device*)(map->data);
153 for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
154 *(pci_reg_t*)(map->buffer + i) =
155 pci_read_cspace(pcidev->cspace_base, i);
162 __pci_read_revid(struct twimap* map)
164 int class = twimap_data(map, struct pci_device*)->class_info;
165 twimap_printf(map, "0x%x", PCI_DEV_REV(class));
169 __pci_read_class(struct twimap* map)
171 int class = twimap_data(map, struct pci_device*)->class_info;
172 twimap_printf(map, "0x%x", PCI_DEV_CLASS(class));
176 __pci_bar_read(struct twimap* map)
178 struct pci_device* pcidev = twimap_data(map, struct pci_device*);
179 int bar_index = twimap_index(map, int);
181 struct pci_base_addr* bar = &pcidev->bar[bar_index];
183 if (!bar->start && !bar->size) {
184 twimap_printf(map, "[%d] not present \n", bar_index);
189 map, "[%d] base=%.8p, size=%.8p, ", bar_index, bar->start, bar->size);
191 if ((bar->type & BAR_TYPE_MMIO)) {
192 twimap_printf(map, "mmio");
193 if ((bar->type & BAR_TYPE_CACHABLE)) {
194 twimap_printf(map, ", prefetchable");
197 twimap_printf(map, "io");
200 twimap_printf(map, "\n");
204 __pci_bar_gonext(struct twimap* map)
206 if (twimap_index(map, int) >= 5) {
214 pci_build_fsmapping()
216 struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
217 struct pci_device *pos, *n;
219 llist_for_each(pos, n, &pci_devices, dev_chain)
221 pci_dev = twifs_dir_node(pci_class,
222 "%.2d:%.2d:%.2d.%.4x:%.4x",
223 PCI_BUS_NUM(pos->cspace_base),
224 PCI_SLOT_NUM(pos->cspace_base),
225 PCI_FUNCT_NUM(pos->cspace_base),
226 PCI_DEV_VENDOR(pos->device_info),
227 PCI_DEV_DEVID(pos->device_info));
229 map = twifs_mapping(pci_dev, pos, "config");
230 map->read = __pci_read_cspace;
232 map = twifs_mapping(pci_dev, pos, "revision");
233 map->read = __pci_read_revid;
235 map = twifs_mapping(pci_dev, pos, "class");
236 map->read = __pci_read_class;
238 map = twifs_mapping(pci_dev, pos, "io_bases");
239 map->read = __pci_bar_read;
240 map->go_next = __pci_bar_gonext;
245 pci_bar_sizing(struct pci_device* dev, u32_t* bar_out, u32_t bar_num)
247 pci_reg_t bar = pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num));
253 pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), 0xffffffff);
255 pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num)) & ~0x1;
256 if (PCI_BAR_MMIO(bar)) {
257 sized = PCI_BAR_ADDR_MM(sized);
260 pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), bar);
265 pci_setup_msi(struct pci_device* device, int vector)
267 // Dest: APIC#0, Physical Destination, No redirection
268 u32_t msi_addr = (__APIC_BASE_PADDR);
270 // Edge trigger, Fixed delivery
271 u32_t msi_data = vector;
274 device->cspace_base, PCI_MSI_ADDR(device->msi_loc), msi_addr);
276 pci_reg_t reg1 = pci_read_cspace(device->cspace_base, device->msi_loc);
277 pci_reg_t msg_ctl = reg1 >> 16;
279 int offset = !!(msg_ctl & MSI_CAP_64BIT) * 4;
280 pci_write_cspace(device->cspace_base,
281 PCI_MSI_DATA(device->msi_loc, offset),
284 if ((msg_ctl & MSI_CAP_MASK)) {
286 device->cspace_base, PCI_MSI_MASK(device->msi_loc, offset), 0);
289 // manipulate the MSI_CTRL to allow device using MSI to request service.
290 reg1 = (reg1 & 0xff8fffff) | 0x10000;
291 pci_write_cspace(device->cspace_base, device->msi_loc, reg1);
295 pci_get_device_by_id(u16_t vendorId, u16_t deviceId)
297 u32_t dev_info = vendorId | (deviceId << 16);
298 struct pci_device *pos, *n;
299 llist_for_each(pos, n, &pci_devices, dev_chain)
301 if (pos->device_info == dev_info) {
310 pci_get_device_by_class(u32_t class)
312 struct pci_device *pos, *n;
313 llist_for_each(pos, n, &pci_devices, dev_chain)
315 if (PCI_DEV_CLASS(pos->class_info) == class) {
324 pci_add_driver(const char* name,
330 struct pci_driver* pci_drv = valloc(sizeof(*pci_drv));
331 *pci_drv = (struct pci_driver){ .create_driver = init,
332 .dev_info = (vendor << 16) | devid,
333 .dev_class = class };
335 strncpy(pci_drv->name, name, PCI_DRV_NAME_LEN);
338 llist_append(&pci_drivers, &pci_drv->drivers);
342 pci_bind_driver(struct pci_device* pci_dev)
344 struct pci_driver *pos, *n;
345 llist_for_each(pos, n, &pci_drivers, drivers)
348 if (pos->dev_info == pci_dev->device_info) {
354 if (pos->dev_class) {
355 if (pos->dev_class == PCI_DEV_CLASS(pci_dev->class_info)) {
356 pci_dev->driver.type = pos;
357 pci_dev->driver.instance = pos->create_driver(pci_dev);
368 acpi_context* acpi = acpi_get_context();
369 assert_msg(acpi, "ACPI not initialized.");
370 if (acpi->mcfg.alloc_num) {
371 // PCIe Enhanced Configuration Mechanism is supported.
372 // TODO: support PCIe addressing mechanism
374 // Otherwise, fallback to use legacy PCI 3.0 method.
377 pci_build_fsmapping();