3 * @author Lunaixsky (zelong56@gmail.com)
4 * @brief A software implementation of PCI Local Bus Specification Revision 3.0
8 * @copyright Copyright (c) 2022
12 #include <sys/pci_hba.h>
14 #include <klibc/string.h>
15 #include <lunaix/fs/twifs.h>
16 #include <lunaix/mm/valloc.h>
17 #include <lunaix/spike.h>
18 #include <lunaix/syslog.h>
22 static DEFINE_LLIST(pci_devices);
23 static DECLARE_HASHTABLE(pci_devcache, 8);
25 static struct device_cat* pcidev_cat;
26 static struct device_def pci_def;
29 pci_probe_msi_info(struct pci_device* device);
32 pci_log_device(struct pci_device* pcidev)
34 pciaddr_t loc = pcidev->loc;
35 struct device_def* binddef = pcidev->binding.def;
37 kprintf("pci.%03d:%02d:%02d, class=%p, vendor:dev=%04x:%04x",
42 PCI_DEV_VENDOR(pcidev->device_info),
43 PCI_DEV_DEVID(pcidev->device_info));
46 static struct pci_device*
47 pci_create_device(pciaddr_t loc, ptr_t pci_base, int devinfo)
49 pci_reg_t class = pci_read_cspace(pci_base, 0x8);
51 u32_t devid = PCI_DEV_DEVID(devinfo);
52 u32_t vendor = PCI_DEV_VENDOR(devinfo);
53 pci_reg_t intr = pci_read_cspace(pci_base, 0x3c);
55 struct pci_device* device = vzalloc(sizeof(struct pci_device));
56 device->class_info = class;
57 device->device_info = devinfo;
58 device->cspace_base = pci_base;
59 device->intr_info = intr;
61 device_create(&device->dev, dev_meta(pcidev_cat), DEV_IFSYS, NULL);
63 pci_probe_msi_info(device);
64 pci_probe_bar_info(device);
66 llist_append(&pci_devices, &device->dev_chain);
67 register_device(&device->dev, &pci_def.class, "%x", loc);
68 pci_def.class.variant++;
74 pci_bind_definition(struct pci_device_def* pcidev_def, int* more)
76 u32_t class = pcidev_def->dev_class;
77 u32_t devid_mask = pcidev_def->ident_mask;
78 u32_t devid = pcidev_def->dev_ident & devid_mask;
80 if (!pcidev_def->devdef.bind) {
81 ERROR("pcidev %xh:%xh.%d is unbindable",
82 pcidev_def->devdef.class.fn_grp,
83 pcidev_def->devdef.class.device,
84 pcidev_def->devdef.class.variant);
90 int bind_attempted = 0;
93 struct device_def* devdef;
94 struct pci_device *pos, *n;
95 llist_for_each(pos, n, &pci_devices, dev_chain)
97 if (binded_pcidev(pos)) {
101 if (class != PCI_DEV_CLASS(pos->class_info)) {
105 int matched = (pos->device_info & devid_mask) == devid;
111 if (bind_attempted) {
117 devdef = &pcidev_def->devdef;
118 errno = devdef->bind(devdef, &pos->dev);
121 ERROR("pci_loc:%x, bind (%xh:%xh.%d) failed, e=%d",
123 devdef->class.fn_grp,
124 devdef->class.device,
125 devdef->class.variant,
130 pos->binding.def = &pcidev_def->devdef;
137 pci_bind_definition_all(struct pci_device_def* pcidef)
141 if (!(e = pci_bind_definition(pcidef, &more))) {
150 pci_probe_device(pciaddr_t pci_loc)
152 u32_t base = PCI_CFGADDR(pci_loc);
153 pci_reg_t reg1 = pci_read_cspace(base, 0);
155 // Vendor=0xffff则表示设备不存在
156 if (PCI_DEV_VENDOR(reg1) == PCI_VENDOR_INVLD) {
160 pci_reg_t hdr_type = pci_read_cspace(base, 0xc);
161 hdr_type = (hdr_type >> 16) & 0xff;
164 // QEMU的ICH9/Q35实现似乎有点问题,对于多功能设备的每一个功能的header type
165 // 都将第七位置位。而virtualbox 就没有这个毛病。
166 if ((hdr_type & 0x80) && PCILOC_FN(pci_loc) == 0) {
167 hdr_type = hdr_type & ~0x80;
168 // 探测多用途设备(multi-function device)
169 for (int i = 1; i < 7; i++) {
170 pci_probe_device(pci_loc + i);
174 struct pci_device *pos, *n;
175 hashtable_hash_foreach(pci_devcache, pci_loc, pos, n, dev_cache)
177 if (pos->loc == pci_loc) {
183 struct pci_device* pcidev = pci_create_device(pci_loc, base, reg1);
185 pcidev->loc = pci_loc;
186 hashtable_hash_in(pci_devcache, &pcidev->dev_cache, pci_loc);
187 pci_log_device(pcidev);
194 for (u32_t loc = 0; loc < (pciaddr_t)-1; loc += 8) {
195 pci_probe_device((pciaddr_t)loc);
200 pci_probe_bar_info(struct pci_device* device)
203 struct pci_base_addr* ba;
204 for (size_t i = 0; i < 6; i++) {
205 ba = &device->bar[i];
206 ba->size = pci_bar_sizing(device, &bar, i + 1);
207 if (PCI_BAR_MMIO(bar)) {
208 ba->start = PCI_BAR_ADDR_MM(bar);
209 ba->type |= PCI_BAR_CACHEABLE(bar) ? BAR_TYPE_CACHABLE : 0;
210 ba->type |= BAR_TYPE_MMIO;
212 ba->start = PCI_BAR_ADDR_IO(bar);
218 pci_setup_msi(struct pci_device* device, int vector)
220 // PCI LB Spec. (Rev 3) Section 6.8 & 6.8.1
222 ptr_t msi_addr = pci_get_msi_base();
223 u32_t msi_data = pci_config_msi_data(vector);
225 pci_reg_t reg1 = pci_read_cspace(device->cspace_base, device->msi_loc);
226 pci_reg_t msg_ctl = reg1 >> 16;
227 int offset_cap64 = !!(msg_ctl & MSI_CAP_64BIT) * 4;
229 pci_write_cspace(device->cspace_base,
230 PCI_MSI_ADDR_LO(device->msi_loc),
234 pci_write_cspace(device->cspace_base,
235 PCI_MSI_ADDR_HI(device->msi_loc),
236 (u64_t)msi_addr >> 32);
239 pci_write_cspace(device->cspace_base,
240 PCI_MSI_DATA(device->msi_loc, offset_cap64),
243 if ((msg_ctl & MSI_CAP_MASK)) {
245 device->cspace_base, PCI_MSI_MASK(device->msi_loc, offset_cap64), 0);
248 // manipulate the MSI_CTRL to allow device using MSI to request service.
249 reg1 = (reg1 & 0xff8fffff) | 0x10000;
250 pci_write_cspace(device->cspace_base, device->msi_loc, reg1);
254 pci_probe_msi_info(struct pci_device* device)
256 // Note that Virtualbox have to use ICH9 chipset for MSI support.
257 // Qemu seems ok with default PIIX3, Bochs is pending to test...
258 // See https://www.virtualbox.org/manual/ch03.html (section 3.5.1)
260 pci_read_cspace(device->cspace_base, PCI_REG_STATUS_CMD) >> 16;
262 if (!(status & 0x10)) {
267 pci_reg_t cap_ptr = pci_read_cspace(device->cspace_base, 0x34) & 0xff;
271 cap_hdr = pci_read_cspace(device->cspace_base, cap_ptr);
272 if ((cap_hdr & 0xff) == 0x5) {
274 device->msi_loc = cap_ptr;
277 cap_ptr = (cap_hdr >> 8) & 0xff;
282 pci_bar_sizing(struct pci_device* dev, u32_t* bar_out, u32_t bar_num)
284 pci_reg_t bar = pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num));
290 pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), 0xffffffff);
292 pci_read_cspace(dev->cspace_base, PCI_REG_BAR(bar_num)) & ~0x1;
293 if (PCI_BAR_MMIO(bar)) {
294 sized = PCI_BAR_ADDR_MM(sized);
297 pci_write_cspace(dev->cspace_base, PCI_REG_BAR(bar_num), bar);
302 pci_get_device_by_id(u16_t vendorId, u16_t deviceId)
304 u32_t dev_info = vendorId | (deviceId << 16);
305 struct pci_device *pos, *n;
306 llist_for_each(pos, n, &pci_devices, dev_chain)
308 if (pos->device_info == dev_info) {
317 pci_get_device_by_class(u32_t class)
319 struct pci_device *pos, *n;
320 llist_for_each(pos, n, &pci_devices, dev_chain)
322 if (PCI_DEV_CLASS(pos->class_info) == class) {
331 __pci_read_cspace(struct twimap* map)
333 struct pci_device* pcidev = (struct pci_device*)(map->data);
335 for (size_t i = 0; i < 256; i += sizeof(pci_reg_t)) {
336 *(pci_reg_t*)(map->buffer + i) =
337 pci_read_cspace(pcidev->cspace_base, i);
343 /*---------- TwiFS interface definition ----------*/
346 __pci_read_revid(struct twimap* map)
348 int class = twimap_data(map, struct pci_device*)->class_info;
349 twimap_printf(map, "0x%x", PCI_DEV_REV(class));
353 __pci_read_class(struct twimap* map)
355 int class = twimap_data(map, struct pci_device*)->class_info;
356 twimap_printf(map, "0x%x", PCI_DEV_CLASS(class));
360 __pci_read_devinfo(struct twimap* map)
362 int devinfo = twimap_data(map, struct pci_device*)->device_info;
364 map, "%x:%x", PCI_DEV_VENDOR(devinfo), PCI_DEV_DEVID(devinfo));
368 __pci_bar_read(struct twimap* map)
370 struct pci_device* pcidev = twimap_data(map, struct pci_device*);
371 int bar_index = twimap_index(map, int);
373 struct pci_base_addr* bar = &pcidev->bar[bar_index];
375 if (!bar->start && !bar->size) {
376 twimap_printf(map, "[%d] not present \n", bar_index);
381 map, "[%d] base=%.8p, size=%.8p, ", bar_index, bar->start, bar->size);
383 if ((bar->type & BAR_TYPE_MMIO)) {
384 twimap_printf(map, "mmio");
385 if ((bar->type & BAR_TYPE_CACHABLE)) {
386 twimap_printf(map, ", prefetchable");
389 twimap_printf(map, "io");
392 twimap_printf(map, "\n");
396 __pci_bar_gonext(struct twimap* map)
398 if (twimap_index(map, int) >= 5) {
406 __pci_read_binding(struct twimap* map)
408 struct pci_device* pcidev = twimap_data(map, struct pci_device*);
409 struct device_def* devdef = pcidev->binding.def;
416 devdef->class.fn_grp,
417 devdef->class.device,
418 devdef->class.variant);
422 __pci_trigger_bus_rescan(struct twimap* map)
428 pci_build_fsmapping()
430 struct twifs_node *pci_class = twifs_dir_node(NULL, "pci"), *pci_dev;
431 struct pci_device *pos, *n;
434 map = twifs_mapping(pci_class, NULL, "rescan");
435 map->read = __pci_trigger_bus_rescan;
437 llist_for_each(pos, n, &pci_devices, dev_chain)
439 pci_dev = twifs_dir_node(pci_class, "%x", pos->loc);
441 map = twifs_mapping(pci_dev, pos, "config");
442 map->read = __pci_read_cspace;
444 map = twifs_mapping(pci_dev, pos, "revision");
445 map->read = __pci_read_revid;
447 map = twifs_mapping(pci_dev, pos, "class");
448 map->read = __pci_read_class;
450 map = twifs_mapping(pci_dev, pos, "binding");
451 map->read = __pci_read_binding;
453 map = twifs_mapping(pci_dev, pos, "io_bases");
454 map->read = __pci_bar_read;
455 map->go_next = __pci_bar_gonext;
458 EXPORT_TWIFS_PLUGIN(pci_devs, pci_build_fsmapping);
460 /*---------- PCI 3.0 HBA device definition ----------*/
463 pci_load_devices(struct device_def* def)
465 pcidev_cat = device_addcat(NULL, "pci");
473 pci_bind_instance(struct pci_device* pcidev, void* devobj)
475 pcidev->dev.underlay = devobj;
476 pcidev->binding.dev = devobj;
479 static struct device_def pci_def = {
480 .name = "Generic PCI",
481 .class = DEVCLASS(DEVIF_SOC, DEVFN_BUSIF, DEV_PCI),
482 .init = pci_load_devices
484 EXPORT_DEVICE(pci3hba, &pci_def, load_sysconf);