3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief A software implementation of PCI Local Bus Specification Revision 3.0
8 * @copyright Copyright (c) 2022
13 #include <klibc/string.h>
14 #include <lunaix/fs/twifs.h>
15 #include <lunaix/mm/valloc.h>
16 #include <lunaix/spike.h>
17 #include <lunaix/syslog.h>
21 static DEFINE_LLIST(pci_devices);
22 static DECLARE_HASHTABLE(pci_devcache, 8);
24 static struct device_cat* pcidev_cat;
25 static struct device_def pci_def;
28 pci_probe_msi_info(struct pci_device* device);
31 pci_log_device(struct pci_device* pcidev)
33 pciaddr_t loc = pcidev->loc;
34 struct device_def* binddef = pcidev->binding.def;
36 kprintf("pci.%03d:%02d:%02d, class=%p, vendor:dev=%04x:%04x",
41 PCI_DEV_VENDOR(pcidev->device_info),
42 PCI_DEV_DEVID(pcidev->device_info));
45 static struct pci_device*
46 pci_create_device(pciaddr_t loc, ptr_t pci_base, int devinfo)
48 pci_reg_t class = pci_read_cspace(pci_base, 0x8);
50 u32_t devid = PCI_DEV_DEVID(devinfo);
51 u32_t vendor = PCI_DEV_VENDOR(devinfo);
52 pci_reg_t intr = pci_read_cspace(pci_base, 0x3c);
54 struct pci_device* device = vzalloc(sizeof(struct pci_device));
55 device->class_info = class;
56 device->device_info = devinfo;
57 device->cspace_base = pci_base;
58 device->intr_info = intr;
60 device_create(&device->dev, dev_meta(pcidev_cat), DEV_IFSYS, NULL);
62 pci_probe_msi_info(device);
63 pci_probe_bar_info(device);
65 llist_append(&pci_devices, &device->dev_chain);
66 register_device(&device->dev, &pci_def.class, "%x", loc);
67 pci_def.class.variant++;
73 pci_bind_definition(struct pci_device_def* pcidef, bool* more)
75 if (!pcidef->devdef.bind) {
76 ERROR("pcidev %xh:%xh.%d is unbindable",
77 pcidef->devdef.class.fn_grp,
78 pcidef->devdef.class.device,
79 pcidef->devdef.class.variant);
85 bool bind_attempted = 0;
88 struct device_def* devdef;
89 struct pci_device *pos, *n;
90 llist_for_each(pos, n, &pci_devices, dev_chain)
92 if (binded_pcidev(pos)) {
98 assert(pcidef->test_compatibility);
99 matched = pcidef->test_compatibility(pcidef, pos);
105 if (bind_attempted) {
110 bind_attempted = true;
111 devdef = &pcidef->devdef;
112 errno = devdef->bind(devdef, &pos->dev);
115 ERROR("pci_loc:%x, bind (%xh:%xh.%d) failed, e=%d",
117 devdef->class.fn_grp,
118 devdef->class.device,
119 devdef->class.variant,
124 pos->binding.def = &pcidef->devdef;
131 pci_bind_definition_all(struct pci_device_def* pcidef)
136 if ((e = pci_bind_definition(pcidef, &more))) {
145 pci_probe_device(pciaddr_t pci_loc)
147 u32_t base = PCI_CFGADDR(pci_loc);
148 pci_reg_t reg1 = pci_read_cspace(base, 0);
150 // Vendor=0xffff则表示设备不存在
151 if (PCI_DEV_VENDOR(reg1) == PCI_VENDOR_INVLD) {
155 pci_reg_t hdr_type = pci_read_cspace(base, 0xc);
156 hdr_type = (hdr_type >> 16) & 0xff;
159 // QEMU的ICH9/Q35实现似乎有点问题,对于多功能设备的每一个功能的header type
160 // 都将第七位置位。而virtualbox 就没有这个毛病。
161 if ((hdr_type & 0x80) && PCILOC_FN(pci_loc) == 0) {
162 hdr_type = hdr_type & ~0x80;
163 // 探测多用途设备(multi-function device)
164 for (int i = 1; i < 7; i++) {
165 pci_probe_device(pci_loc + i);
169 struct pci_device *pos, *n;
170 hashtable_hash_foreach(pci_devcache, pci_loc, pos, n, dev_cache)
172 if (pos->loc == pci_loc) {
178 struct pci_device* pcidev = pci_create_device(pci_loc, base, reg1);
180 pcidev->loc = pci_loc;
181 hashtable_hash_in(pci_devcache, &pcidev->dev_cache, pci_loc);
182 pci_log_device(pcidev);
189 for (u32_t loc = 0; loc < (pciaddr_t)-1; loc += 8) {
190 pci_probe_device((pciaddr_t)loc);
195 pci_probe_bar_info(struct pci_device* device)
198 struct pci_base_addr* ba;
199 for (size_t i = 0; i < PCI_BAR_COUNT; i++) {
200 ba = &device->bar[i];
201 ba->size = pci_bar_sizing(device, &bar, i + 1);
202 if (PCI_BAR_MMIO(bar)) {
203 ba->start = PCI_BAR_ADDR_MM(bar);
204 ba->type |= PCI_BAR_CACHEABLE(bar) ? BAR_TYPE_CACHABLE : 0;
205 ba->type |= BAR_TYPE_MMIO;
207 ba->start = PCI_BAR_ADDR_IO(bar);
213 pci_setup_msi(struct pci_device* device, int vector)
215 // PCI LB Spec. (Rev 3) Section 6.8 & 6.8.1
217 ptr_t msi_addr = pci_get_msi_base();
218 u32_t msi_data = pci_config_msi_data(vector);
220 pci_reg_t reg1 = pci_read_cspace(device->cspace_base, device->msi_loc);
221 pci_reg_t msg_ctl = reg1 >> 16;
222 int offset_cap64 = !!(msg_ctl & MSI_CAP_64BIT) * 4;
224 pci_write_cspace(device->cspace_base,
225 PCI_MSI_ADDR_LO(device->msi_loc),
229 pci_write_cspace(device->cspace_base,
230 PCI_MSI_ADDR_HI(device->msi_loc),
231 (u64_t)msi_addr >> 32);
234 pci_write_cspace(device->cspace_base,
235 PCI_MSI_DATA(device->msi_loc, offset_cap64),
238 if ((msg_ctl & MSI_CAP_MASK)) {
240 device->cspace_base, PCI_MSI_MASK(device->msi_loc, offset_cap64), 0);
243 // manipulate the MSI_CTRL to allow device using MSI to request service.
244 reg1 = (reg1 & 0xff8fffff) | 0x10000;
245 pci_write_cspace(device->cspace_base, device->msi_loc, reg1);
249 pci_probe_msi_info(struct pci_device* device)
251 // Note that Virtualbox have to use ICH9 chipset for MSI support.
252 // Qemu seems ok with default PIIX3, Bochs is pending to test...
253 // See https://www.virtualbox.org/manual/ch03.html (section 3.5.1)
255 pci_read_cspace(device->cspace_base, PCI_REG_STATUS_CMD) >> 16;
257 if (!(status & 0x10)) {
262 pci_reg_t cap_ptr = pci_read_cspace(device->cspace_base, 0x34) & 0xff;
266 cap_hdr = pci_read_cspace(device->cspace_base, cap_ptr);
267 if ((cap_hdr & 0xff) == 0x5) {
269 device->msi_loc = cap_ptr;
272 cap_ptr = (cap_hdr >> 8) & 0xff;
277 pci_bar_sizing(struct pci_device* dev, u32_t* bar_out, u32_t bar_num)
279 pci_reg_t bar = pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num));
285 pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), 0xffffffff);
287 pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num)) & ~0x1;
288 if (PCI_BAR_MMIO(bar)) {
289 sized = PCI_BAR_ADDR_MM(sized);
292 pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), bar);
297 pci_get_device_by_id(u16_t vendorId, u16_t deviceId)
299 u32_t dev_info = vendorId | (deviceId << 16);
300 struct pci_device *pos, *n;
301 llist_for_each(pos, n, &pci_devices, dev_chain)
303 if (pos->device_info == dev_info) {
312 pci_get_device_by_class(u32_t class)
314 struct pci_device *pos, *n;
315 llist_for_each(pos, n, &pci_devices, dev_chain)
317 if (PCI_DEV_CLASS(pos->class_info) == class) {
326 pci_apply_command(struct pci_device* pcidev, pci_reg_t cmd)
331 base = pcidev->cspace_base;
332 rcmd = pci_read_cspace(base, PCI_REG_STATUS_CMD);
335 rcmd = (rcmd & 0xffff0000) | cmd;
337 pci_write_cspace(base, PCI_REG_STATUS_CMD, rcmd);
341 __pci_read_cspace(struct twimap* map)
343 struct pci_device* pcidev = (struct pci_device*)(map->data);
345 for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
346 *(pci_reg_t*)(map->buffer + i) =
347 pci_read_cspace(pcidev->cspace_base, i);
353 /*---------- TwiFS interface definition ----------*/
356 __pci_read_revid(struct twimap* map)
358 int class = twimap_data(map, struct pci_device*)->class_info;
359 twimap_printf(map, "0x%x", PCI_DEV_REV(class));
363 __pci_read_class(struct twimap* map)
365 int class = twimap_data(map, struct pci_device*)->class_info;
366 twimap_printf(map, "0x%x", PCI_DEV_CLASS(class));
370 __pci_read_devinfo(struct twimap* map)
372 int devinfo = twimap_data(map, struct pci_device*)->device_info;
374 map, "%x:%x", PCI_DEV_VENDOR(devinfo), PCI_DEV_DEVID(devinfo));
378 __pci_bar_read(struct twimap* map)
380 struct pci_device* pcidev = twimap_data(map, struct pci_device*);
381 int bar_index = twimap_index(map, int);
383 struct pci_base_addr* bar = &pcidev->bar[bar_index];
385 if (!bar->start && !bar->size) {
386 twimap_printf(map, "[%d] not present \n", bar_index);
391 map, "[%d] base=%.8p, size=%.8p, ", bar_index, bar->start, bar->size);
393 if ((bar->type & BAR_TYPE_MMIO)) {
394 twimap_printf(map, "mmio");
395 if ((bar->type & BAR_TYPE_CACHABLE)) {
396 twimap_printf(map, ", prefetchable");
399 twimap_printf(map, "io");
402 twimap_printf(map, "\n");
406 __pci_bar_gonext(struct twimap* map)
408 if (twimap_index(map, int) >= 5) {
416 __pci_read_binding(struct twimap* map)
418 struct pci_device* pcidev = twimap_data(map, struct pci_device*);
419 struct device_def* devdef = pcidev->binding.def;
426 devdef->class.fn_grp,
427 devdef->class.device,
428 devdef->class.variant);
432 __pci_trigger_bus_rescan(struct twimap* map)
438 pci_build_fsmapping()
440 struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
441 struct pci_device *pos, *n;
444 map = twifs_mapping(pci_class, NULL, "rescan");
445 map->read = __pci_trigger_bus_rescan;
447 llist_for_each(pos, n, &pci_devices, dev_chain)
449 pci_dev = twifs_dir_node(pci_class, "%x", pos->loc);
451 map = twifs_mapping(pci_dev, pos, "config");
452 map->read = __pci_read_cspace;
454 map = twifs_mapping(pci_dev, pos, "revision");
455 map->read = __pci_read_revid;
457 map = twifs_mapping(pci_dev, pos, "class");
458 map->read = __pci_read_class;
460 map = twifs_mapping(pci_dev, pos, "binding");
461 map->read = __pci_read_binding;
463 map = twifs_mapping(pci_dev, pos, "io_bases");
464 map->read = __pci_bar_read;
465 map->go_next = __pci_bar_gonext;
468 EXPORT_TWIFS_PLUGIN(pci_devs, pci_build_fsmapping);
470 /*---------- PCI 3.0 HBA device definition ----------*/
473 pci_load_devices(struct device_def* def)
475 pcidev_cat = device_addcat(NULL, "pci");
483 pci_bind_instance(struct pci_device* pcidev, void* devobj)
485 pcidev->dev.underlay = devobj;
486 pcidev->binding.dev = devobj;
489 static struct device_def pci_def = {
490 .name = "Generic PCI",
491 .class = DEVCLASS(DEVIF_SOC, DEVFN_BUSIF, DEV_PCI),
492 .init = pci_load_devices
494 EXPORT_DEVICE(pci3hba, &pci_def, load_sysconf);