diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-10-28 16:26:12 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-28 16:26:12 +0100 |
commit | 7a9787e1eba95a166265e6a260cf30af04ef0a99 (patch) | |
tree | e730a4565e0318140d2fbd2f0415d18a339d7336 /drivers/pci | |
parent | 41b9eb264c8407655db57b60b4457fe1b2ec9977 (diff) | |
parent | 0173a3265b228da319ceb9c1ec6a5682fd1b2d92 (diff) |
Merge commit 'v2.6.28-rc2' into x86/pci-ioapic-boot-irq-quirks
Diffstat (limited to 'drivers/pci')
63 files changed, 3234 insertions, 2051 deletions
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 7d63f8ced24..af3bfe22847 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -3,7 +3,8 @@ # obj-y += access.o bus.o probe.o remove.o pci.o quirks.o slot.o \ - pci-driver.o search.o pci-sysfs.o rom.o setup-res.o + pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \ + irq.o obj-$(CONFIG_PROC_FS) += proc.o # Build PCI Express stuff if needed @@ -26,6 +27,8 @@ obj-$(CONFIG_HT_IRQ) += htirq.o # Build Intel IOMMU support obj-$(CONFIG_DMAR) += dmar.o iova.o intel-iommu.o +obj-$(CONFIG_INTR_REMAP) += dmar.o intr_remapping.o + # # Some architectures use the generic PCI setup functions # diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 529d9d7727b..999cc4088b5 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -151,6 +151,13 @@ void pci_bus_add_devices(struct pci_bus *bus) if (retval) dev_err(&dev->dev, "Error creating cpuaffinity" " file, continuing...\n"); + + retval = device_create_file(&child_bus->dev, + &dev_attr_cpulistaffinity); + if (retval) + dev_err(&dev->dev, + "Error creating cpulistaffinity" + " file, continuing...\n"); } } } diff --git a/drivers/pci/dmar.c b/drivers/pci/dmar.c index f941f609dbf..691b3adeb87 100644 --- a/drivers/pci/dmar.c +++ b/drivers/pci/dmar.c @@ -19,15 +19,18 @@ * Author: Shaohua Li <shaohua.li@intel.com> * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> * - * This file implements early detection/parsing of DMA Remapping Devices + * This file implements early detection/parsing of Remapping Devices * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI * tables. + * + * These routines are used by both DMA-remapping and Interrupt-remapping */ #include <linux/pci.h> #include <linux/dmar.h> -#include "iova.h" -#include "intel-iommu.h" +#include <linux/iova.h> +#include <linux/intel-iommu.h> +#include <linux/timer.h> #undef PREFIX #define PREFIX "DMAR:" @@ -37,7 +40,6 @@ * these units are not supported by the architecture. */ LIST_HEAD(dmar_drhd_units); -LIST_HEAD(dmar_rmrr_units); static struct acpi_table_header * __initdata dmar_tbl; @@ -53,11 +55,6 @@ static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) list_add(&drhd->list, &dmar_drhd_units); } -static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) -{ - list_add(&rmrr->list, &dmar_rmrr_units); -} - static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope, struct pci_dev **dev, u16 segment) { @@ -172,19 +169,36 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) struct acpi_dmar_hardware_unit *drhd; struct dmar_drhd_unit *dmaru; int ret = 0; - static int include_all; dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); if (!dmaru) return -ENOMEM; + dmaru->hdr = header; drhd = (struct acpi_dmar_hardware_unit *)header; dmaru->reg_base_addr = drhd->address; dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ + ret = alloc_iommu(dmaru); + if (ret) { + kfree(dmaru); + return ret; + } + dmar_register_drhd_unit(dmaru); + return 0; +} + +static int __init dmar_parse_dev(struct dmar_drhd_unit *dmaru) +{ + struct acpi_dmar_hardware_unit *drhd; + static int include_all; + int ret = 0; + + drhd = (struct acpi_dmar_hardware_unit *) dmaru->hdr; + if (!dmaru->include_all) ret = dmar_parse_dev_scope((void *)(drhd + 1), - ((void *)drhd) + header->length, + ((void *)drhd) + drhd->header.length, &dmaru->devices_cnt, &dmaru->devices, drhd->segment); else { @@ -197,37 +211,59 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header) include_all = 1; } - if (ret || (dmaru->devices_cnt == 0 && !dmaru->include_all)) + if (ret) { + list_del(&dmaru->list); kfree(dmaru); - else - dmar_register_drhd_unit(dmaru); + } return ret; } +#ifdef CONFIG_DMAR +LIST_HEAD(dmar_rmrr_units); + +static void __init dmar_register_rmrr_unit(struct dmar_rmrr_unit *rmrr) +{ + list_add(&rmrr->list, &dmar_rmrr_units); +} + + static int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) { struct acpi_dmar_reserved_memory *rmrr; struct dmar_rmrr_unit *rmrru; - int ret = 0; rmrru = kzalloc(sizeof(*rmrru), GFP_KERNEL); if (!rmrru) return -ENOMEM; + rmrru->hdr = header; rmrr = (struct acpi_dmar_reserved_memory *)header; rmrru->base_address = rmrr->base_address; rmrru->end_address = rmrr->end_address; + + dmar_register_rmrr_unit(rmrru); + return 0; +} + +static int __init +rmrr_parse_dev(struct dmar_rmrr_unit *rmrru) +{ + struct acpi_dmar_reserved_memory *rmrr; + int ret; + + rmrr = (struct acpi_dmar_reserved_memory *) rmrru->hdr; ret = dmar_parse_dev_scope((void *)(rmrr + 1), - ((void *)rmrr) + header->length, + ((void *)rmrr) + rmrr->header.length, &rmrru->devices_cnt, &rmrru->devices, rmrr->segment); - if (ret || (rmrru->devices_cnt == 0)) + if (ret || (rmrru->devices_cnt == 0)) { + list_del(&rmrru->list); kfree(rmrru); - else - dmar_register_rmrr_unit(rmrru); + } return ret; } +#endif static void __init dmar_table_print_dmar_entry(struct acpi_dmar_header *header) @@ -240,19 +276,39 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header) drhd = (struct acpi_dmar_hardware_unit *)header; printk (KERN_INFO PREFIX "DRHD (flags: 0x%08x)base: 0x%016Lx\n", - drhd->flags, drhd->address); + drhd->flags, (unsigned long long)drhd->address); break; case ACPI_DMAR_TYPE_RESERVED_MEMORY: rmrr = (struct acpi_dmar_reserved_memory *)header; printk (KERN_INFO PREFIX "RMRR base: 0x%016Lx end: 0x%016Lx\n", - rmrr->base_address, rmrr->end_address); + (unsigned long long)rmrr->base_address, + (unsigned long long)rmrr->end_address); break; } } /** + * dmar_table_detect - checks to see if the platform supports DMAR devices + */ +static int __init dmar_table_detect(void) +{ + acpi_status status = AE_OK; + + /* if we could find DMAR table, then there are DMAR devices */ + status = acpi_get_table(ACPI_SIG_DMAR, 0, + (struct acpi_table_header **)&dmar_tbl); + + if (ACPI_SUCCESS(status) && !dmar_tbl) { + printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); + status = AE_NOT_FOUND; + } + + return (ACPI_SUCCESS(status) ? 1 : 0); +} + +/** * parse_dmar_table - parses the DMA reporting table */ static int __init @@ -262,11 +318,17 @@ parse_dmar_table(void) struct acpi_dmar_header *entry_header; int ret = 0; + /* + * Do it again, earlier dmar_tbl mapping could be mapped with + * fixed map. + */ + dmar_table_detect(); + dmar = (struct acpi_table_dmar *)dmar_tbl; if (!dmar) return -ENODEV; - if (dmar->width < PAGE_SHIFT_4K - 1) { + if (dmar->width < PAGE_SHIFT - 1) { printk(KERN_WARNING PREFIX "Invalid DMAR haw\n"); return -EINVAL; } @@ -284,7 +346,9 @@ parse_dmar_table(void) ret = dmar_parse_one_drhd(entry_header); break; case ACPI_DMAR_TYPE_RESERVED_MEMORY: +#ifdef CONFIG_DMAR ret = dmar_parse_one_rmrr(entry_header); +#endif break; default: printk(KERN_WARNING PREFIX @@ -300,15 +364,77 @@ parse_dmar_table(void) return ret; } +int dmar_pci_device_match(struct pci_dev *devices[], int cnt, + struct pci_dev *dev) +{ + int index; -int __init dmar_table_init(void) + while (dev) { + for (index = 0; index < cnt; index++) + if (dev == devices[index]) + return 1; + + /* Check our parent */ + dev = dev->bus->self; + } + + return 0; +} + +struct dmar_drhd_unit * +dmar_find_matched_drhd_unit(struct pci_dev *dev) { + struct dmar_drhd_unit *drhd = NULL; + + list_for_each_entry(drhd, &dmar_drhd_units, list) { + if (drhd->include_all || dmar_pci_device_match(drhd->devices, + drhd->devices_cnt, dev)) + return drhd; + } + + return NULL; +} + +int __init dmar_dev_scope_init(void) +{ + struct dmar_drhd_unit *drhd, *drhd_n; + int ret = -ENODEV; + + list_for_each_entry_safe(drhd, drhd_n, &dmar_drhd_units, list) { + ret = dmar_parse_dev(drhd); + if (ret) + return ret; + } + +#ifdef CONFIG_DMAR + { + struct dmar_rmrr_unit *rmrr, *rmrr_n; + list_for_each_entry_safe(rmrr, rmrr_n, &dmar_rmrr_units, list) { + ret = rmrr_parse_dev(rmrr); + if (ret) + return ret; + } + } +#endif + + return ret; +} + +int __init dmar_table_init(void) +{ + static int dmar_table_initialized; int ret; + if (dmar_table_initialized) + return 0; + + dmar_table_initialized = 1; + ret = parse_dmar_table(); if (ret) { - printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); + if (ret != -ENODEV) + printk(KERN_INFO PREFIX "parse DMAR table failure.\n"); return ret; } @@ -317,29 +443,320 @@ int __init dmar_table_init(void) return -ENODEV; } - if (list_empty(&dmar_rmrr_units)) { +#ifdef CONFIG_DMAR + if (list_empty(&dmar_rmrr_units)) printk(KERN_INFO PREFIX "No RMRR found\n"); - return -ENODEV; +#endif + +#ifdef CONFIG_INTR_REMAP + parse_ioapics_under_ir(); +#endif + return 0; +} + +void __init detect_intel_iommu(void) +{ + int ret; + + ret = dmar_table_detect(); + + { +#ifdef CONFIG_INTR_REMAP + struct acpi_table_dmar *dmar; + /* + * for now we will disable dma-remapping when interrupt + * remapping is enabled. + * When support for queued invalidation for IOTLB invalidation + * is added, we will not need this any more. + */ + dmar = (struct acpi_table_dmar *) dmar_tbl; + if (ret && cpu_has_x2apic && dmar->flags & 0x1) + printk(KERN_INFO + "Queued invalidation will be enabled to support " + "x2apic and Intr-remapping.\n"); +#endif +#ifdef CONFIG_DMAR + if (ret && !no_iommu && !iommu_detected && !swiotlb && + !dmar_disabled) + iommu_detected = 1; +#endif + } + dmar_tbl = NULL; +} + + +int alloc_iommu(struct dmar_drhd_unit *drhd) +{ + struct intel_iommu *iommu; + int map_size; + u32 ver; + static int iommu_allocated = 0; + + iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); + if (!iommu) + return -ENOMEM; + + iommu->seq_id = iommu_allocated++; + + iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); + goto error; } + iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); + iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); + + /* the registers might be more than one page */ + map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), + cap_max_fault_reg_offset(iommu->cap)); + map_size = VTD_PAGE_ALIGN(map_size); + if (map_size > VTD_PAGE_SIZE) { + iounmap(iommu->reg); + iommu->reg = ioremap(drhd->reg_base_addr, map_size); + if (!iommu->reg) { + printk(KERN_ERR "IOMMU: can't map the region\n"); + goto error; + } + } + + ver = readl(iommu->reg + DMAR_VER_REG); + pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", + (unsigned long long)drhd->reg_base_addr, + DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), + (unsigned long long)iommu->cap, + (unsigned long long)iommu->ecap); + + spin_lock_init(&iommu->register_lock); + drhd->iommu = iommu; return 0; +error: + kfree(iommu); + return -1; } -/** - * early_dmar_detect - checks to see if the platform supports DMAR devices +void free_iommu(struct intel_iommu *iommu) +{ + if (!iommu) + return; + +#ifdef CONFIG_DMAR + free_dmar_iommu(iommu); +#endif + + if (iommu->reg) + iounmap(iommu->reg); + kfree(iommu); +} + +/* + * Reclaim all the submitted descriptors which have completed its work. */ -int __init early_dmar_detect(void) +static inline void reclaim_free_desc(struct q_inval *qi) { - acpi_status status = AE_OK; + while (qi->desc_status[qi->free_tail] == QI_DONE) { + qi->desc_status[qi->free_tail] = QI_FREE; + qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; + qi->free_cnt++; + } +} - /* if we could find DMAR table, then there are DMAR devices */ - status = acpi_get_table(ACPI_SIG_DMAR, 0, - (struct acpi_table_header **)&dmar_tbl); +/* + * Submit the queued invalidation descriptor to the remapping + * hardware unit and wait for its completion. + */ +void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) +{ + struct q_inval *qi = iommu->qi; + struct qi_desc *hw, wait_desc; + int wait_index, index; + unsigned long flags; - if (ACPI_SUCCESS(status) && !dmar_tbl) { - printk (KERN_WARNING PREFIX "Unable to map DMAR\n"); - status = AE_NOT_FOUND; + if (!qi) + return; + + hw = qi->desc; + + spin_lock_irqsave(&qi->q_lock, flags); + while (qi->free_cnt < 3) { + spin_unlock_irqrestore(&qi->q_lock, flags); + cpu_relax(); + spin_lock_irqsave(&qi->q_lock, flags); } - return (ACPI_SUCCESS(status) ? 1 : 0); + index = qi->free_head; + wait_index = (index + 1) % QI_LENGTH; + + qi->desc_status[index] = qi->desc_status[wait_index] = QI_IN_USE; + + hw[index] = *desc; + + wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; + wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); + + hw[wait_index] = wait_desc; + + __iommu_flush_cache(iommu, &hw[index], sizeof(struct qi_desc)); + __iommu_flush_cache(iommu, &hw[wait_index], sizeof(struct qi_desc)); + + qi->free_head = (qi->free_head + 2) % QI_LENGTH; + qi->free_cnt -= 2; + + spin_lock(&iommu->register_lock); + /* + * update the HW tail register indicating the presence of + * new descriptors. + */ + writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); + spin_unlock(&iommu->register_lock); + + while (qi->desc_status[wait_index] != QI_DONE) { + /* + * We will leave the interrupts disabled, to prevent interrupt + * context to queue another cmd while a cmd is already submitted + * and waiting for completion on this cpu. This is to avoid + * a deadlock where the interrupt context can wait indefinitely + * for free slots in the queue. + */ + spin_unlock(&qi->q_lock); + cpu_relax(); + spin_lock(&qi->q_lock); + } + + qi->desc_status[index] = QI_DONE; + + reclaim_free_desc(qi); + spin_unlock_irqrestore(&qi->q_lock, flags); +} + +/* + * Flush the global interrupt entry cache. + */ +void qi_global_iec(struct intel_iommu *iommu) +{ + struct qi_desc desc; + + desc.low = QI_IEC_TYPE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); +} + +int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, + u64 type, int non_present_entry_flush) +{ + + struct qi_desc desc; + + if (non_present_entry_flush) { + if (!cap_caching_mode(iommu->cap)) + return 1; + else + did = 0; + } + + desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) + | QI_CC_GRAN(type) | QI_CC_TYPE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); + + return 0; + +} + +int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, + unsigned int size_order, u64 type, + int non_present_entry_flush) +{ + u8 dw = 0, dr = 0; + + struct qi_desc desc; + int ih = 0; + + if (non_present_entry_flush) { + if (!cap_caching_mode(iommu->cap)) + return 1; + else + did = 0; + } + + if (cap_write_drain(iommu->cap)) + dw = 1; + + if (cap_read_drain(iommu->cap)) + dr = 1; + + desc.low = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw) + | QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE; + desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) + | QI_IOTLB_AM(size_order); + + qi_submit_sync(&desc, iommu); + + return 0; + +} + +/* + * Enable Queued Invalidation interface. This is a must to support + * interrupt-remapping. Also used by DMA-remapping, which replaces + * register based IOTLB invalidation. + */ +int dmar_enable_qi(struct intel_iommu *iommu) +{ + u32 cmd, sts; + unsigned long flags; + struct q_inval *qi; + + if (!ecap_qis(iommu->ecap)) + return -ENOENT; + + /* + * queued invalidation is already setup and enabled. + */ + if (iommu->qi) + return 0; + + iommu->qi = kmalloc(sizeof(*qi), GFP_KERNEL); + if (!iommu->qi) + return -ENOMEM; + + qi = iommu->qi; + + qi->desc = (void *)(get_zeroed_page(GFP_KERNEL)); + if (!qi->desc) { + kfree(qi); + iommu->qi = 0; + return -ENOMEM; + } + + qi->desc_status = kmalloc(QI_LENGTH * sizeof(int), GFP_KERNEL); + if (!qi->desc_status) { + free_page((unsigned long) qi->desc); + kfree(qi); + iommu->qi = 0; + return -ENOMEM; + } + + qi->free_head = qi->free_tail = 0; + qi->free_cnt = QI_LENGTH; + + spin_lock_init(&qi->q_lock); + + spin_lock_irqsave(&iommu->register_lock, flags); + /* write zero to the tail reg */ + writel(0, iommu->reg + DMAR_IQT_REG); + + dmar_writeq(iommu->reg + DMAR_IQA_REG, virt_to_phys(qi->desc)); + + cmd = iommu->gcmd | DMA_GCMD_QIE; + iommu->gcmd |= DMA_GCMD_QIE; + writel(cmd, iommu->reg + DMAR_GCMD_REG); + + /* Make sure hardware complete it */ + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); + + return 0; } diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c index 93e37f0666a..e17ef54f0ef 100644 --- a/drivers/pci/hotplug/acpi_pcihp.c +++ b/drivers/pci/hotplug/acpi_pcihp.c @@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(acpi_get_hp_params_from_firmware); int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) { acpi_status status; - acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev)); + acpi_handle chandle, handle; struct pci_dev *pdev = dev; struct pci_bus *parent; struct acpi_buffer string = { ACPI_ALLOCATE_BUFFER, NULL }; @@ -399,10 +399,25 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) * Per PCI firmware specification, we should run the ACPI _OSC * method to get control of hotplug hardware before using it. If * an _OSC is missing, we look for an OSHP to do the same thing. - * To handle different BIOS behavior, we look for _OSC and OSHP - * within the scope of the hotplug controller and its parents, + * To handle different BIOS behavior, we look for _OSC on a root + * bridge preferentially (according to PCI fw spec). Later for + * OSHP within the scope of the hotplug controller and its parents, * upto the host bridge under which this controller exists. */ + handle = acpi_find_root_bridge_handle(pdev); + if (handle) { + acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); + dbg("Trying to get hotplug control for %s\n", + (char *)string.pointer); + status = pci_osc_control_set(handle, flags); + if (ACPI_SUCCESS(status)) + goto got_one; + kfree(string.pointer); + string = (struct acpi_buffer){ ACPI_ALLOCATE_BUFFER, NULL }; + } + + pdev = dev; + handle = DEVICE_ACPI_HANDLE(&dev->dev); while (!handle) { /* * This hotplug controller was not listed in the ACPI name @@ -427,15 +442,9 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); dbg("Trying to get hotplug control for %s \n", (char *)string.pointer); - status = pci_osc_control_set(handle, flags); - if (status == AE_NOT_FOUND) - status = acpi_run_oshp(handle); - if (ACPI_SUCCESS(status)) { - dbg("Gained control for hotplug HW for pci %s (%s)\n", - pci_name(dev), (char *)string.pointer); - kfree(string.pointer); - return 0; - } + status = acpi_run_oshp(handle); + if (ACPI_SUCCESS(status)) + goto got_one; if (acpi_root_bridge(handle)) break; chandle = handle; @@ -449,6 +458,11 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *dev, u32 flags) kfree(string.pointer); return -ENODEV; +got_one: + dbg("Gained control for hotplug HW for pci %s (%s)\n", pci_name(dev), + (char *)string.pointer); + kfree(string.pointer); + return 0; } EXPORT_SYMBOL(acpi_get_hp_hw_control_from_firmware); diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h index eecf7cbf413..f9e244da30a 100644 --- a/drivers/pci/hotplug/acpiphp.h +++ b/drivers/pci/hotplug/acpiphp.h @@ -36,7 +36,7 @@ #define _ACPIPHP_H #include <linux/acpi.h> -#include <linux/kobject.h> /* for KOBJ_NAME_LEN */ +#include <linux/kobject.h> #include <linux/mutex.h> #include <linux/pci_hotplug.h> @@ -50,9 +50,6 @@ #define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg) #define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) -/* name size which is used for entries in pcihpfs */ -#define SLOT_NAME_SIZE KOBJ_NAME_LEN /* {_SUN} */ - struct acpiphp_bridge; struct acpiphp_slot; @@ -63,9 +60,13 @@ struct slot { struct hotplug_slot *hotplug_slot; struct acpiphp_slot *acpi_slot; struct hotplug_slot_info info; - char name[SLOT_NAME_SIZE]; }; +static inline const char *slot_name(struct slot *slot) +{ + return hotplug_slot_name(slot->hotplug_slot); +} + /* * struct acpiphp_bridge - PCI bridge information * diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c index 0e496e866a8..95b536a23d2 100644 --- a/drivers/pci/hotplug/acpiphp_core.c +++ b/drivers/pci/hotplug/acpiphp_core.c @@ -44,6 +44,9 @@ #define MY_NAME "acpiphp" +/* name size which is used for entries in pcihpfs */ +#define SLOT_NAME_SIZE 21 /* {_SUN} */ + static int debug; int acpiphp_debug; @@ -84,7 +87,6 @@ static struct hotplug_slot_ops acpi_hotplug_slot_ops = { .get_adapter_status = get_adapter_status, }; - /** * acpiphp_register_attention - set attention LED callback * @info: must be completely filled with LED callbacks @@ -136,7 +138,7 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); /* enable the specified slot */ return acpiphp_enable_slot(slot->acpi_slot); @@ -154,7 +156,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); /* disable the specified slot */ retval = acpiphp_disable_slot(slot->acpi_slot); @@ -177,7 +179,7 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) { int retval = -ENODEV; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot)); if (attention_info && try_module_get(attention_info->owner)) { retval = attention_info->set_attn(hotplug_slot, status); @@ -200,7 +202,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = acpiphp_get_power_status(slot->acpi_slot); @@ -222,7 +224,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) { int retval = -EINVAL; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(hotplug_slot)); if (attention_info && try_module_get(attention_info->owner)) { retval = attention_info->get_attn(hotplug_slot, value); @@ -245,7 +247,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = acpiphp_get_latch_status(slot->acpi_slot); @@ -265,7 +267,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = acpiphp_get_adapter_status(slot->acpi_slot); @@ -299,7 +301,7 @@ static void release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); kfree(slot->hotplug_slot); kfree(slot); @@ -310,6 +312,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) { struct slot *slot; int retval = -ENOMEM; + char name[SLOT_NAME_SIZE]; slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) @@ -321,8 +324,6 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) slot->hotplug_slot->info = &slot->info; - slot->hotplug_slot->name = slot->name; - slot->hotplug_slot->private = slot; slot->hotplug_slot->release = &release_slot; slot->hotplug_slot->ops = &acpi_hotplug_slot_ops; @@ -336,11 +337,12 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) slot->hotplug_slot->info->cur_bus_speed = PCI_SPEED_UNKNOWN; acpiphp_slot->slot = slot; - snprintf(slot->name, sizeof(slot->name), "%u", slot->acpi_slot->sun); + snprintf(name, SLOT_NAME_SIZE, "%u", slot->acpi_slot->sun); retval = pci_hp_register(slot->hotplug_slot, acpiphp_slot->bridge->pci_bus, - acpiphp_slot->device); + acpiphp_slot->device, + name); if (retval == -EBUSY) goto error_hpslot; if (retval) { @@ -348,7 +350,7 @@ int acpiphp_register_hotplug_slot(struct acpiphp_slot *acpiphp_slot) goto error_hpslot; } - info("Slot [%s] registered\n", slot->hotplug_slot->name); + info("Slot [%s] registered\n", slot_name(slot)); return 0; error_hpslot: @@ -365,7 +367,7 @@ void acpiphp_unregister_hotplug_slot(struct acpiphp_slot *acpiphp_slot) struct slot *slot = acpiphp_slot->slot; int retval = 0; - info ("Slot [%s] unregistered\n", slot->hotplug_slot->name); + info("Slot [%s] unregistered\n", slot_name(slot)); retval = pci_hp_deregister(slot->hotplug_slot); if (retval) diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c index a3e4705dd8f..955aae4071f 100644 --- a/drivers/pci/hotplug/acpiphp_glue.c +++ b/drivers/pci/hotplug/acpiphp_glue.c @@ -169,7 +169,9 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val, } - +static struct acpi_dock_ops acpiphp_dock_ops = { + .handler = handle_hotplug_event_func, +}; /* callback routine to register each ACPI PCI slot object */ static acpi_status @@ -180,7 +182,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) struct acpiphp_func *newfunc; acpi_handle tmp; acpi_status status = AE_OK; - unsigned long adr, sun; + unsigned long long adr, sun; int device, function, retval; status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr); @@ -285,7 +287,7 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv) */ newfunc->flags &= ~FUNC_HAS_EJ0; if (register_hotplug_dock_device(handle, - handle_hotplug_event_func, newfunc)) + &acpiphp_dock_ops, newfunc)) dbg("failed to register dock device\n"); /* we need to be notified when dock events happen @@ -528,7 +530,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_status status; acpi_handle dummy_handle; - unsigned long tmp; + unsigned long long tmp; int device, function; struct pci_dev *dev; struct pci_bus *pci_bus = context; @@ -573,7 +575,7 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv) static int add_bridge(acpi_handle handle) { acpi_status status; - unsigned long tmp; + unsigned long long tmp; int seg, bus; acpi_handle dummy_handle; struct pci_bus *pci_bus; @@ -767,7 +769,7 @@ static int get_gsi_base(acpi_handle handle, u32 *gsi_base) { acpi_status status; int result = -1; - unsigned long gsb; + unsigned long long gsb; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; void *table; @@ -808,7 +810,7 @@ static acpi_status ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_status status; - unsigned long sta; + unsigned long long sta; acpi_handle tmp; struct pci_dev *pdev; u32 gsi_base; @@ -872,7 +874,7 @@ static acpi_status ioapic_remove(acpi_handle handle, u32 lvl, void *context, void **rv) { acpi_status status; - unsigned long sta; + unsigned long long sta; acpi_handle tmp; u32 gsi_base; struct acpiphp_ioapic *pos, *n, *ioapic = NULL; @@ -1264,7 +1266,7 @@ static int disable_device(struct acpiphp_slot *slot) static unsigned int get_slot_status(struct acpiphp_slot *slot) { acpi_status status; - unsigned long sta = 0; + unsigned long long sta = 0; u32 dvid; struct list_head *l; struct acpiphp_func *func; diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c index 2b7c45e3937..881fdd2b731 100644 --- a/drivers/pci/hotplug/acpiphp_ibm.c +++ b/drivers/pci/hotplug/acpiphp_ibm.c @@ -183,7 +183,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) union acpi_object args[2]; struct acpi_object_list params = { .pointer = args, .count = 2 }; acpi_status stat; - unsigned long rc; + unsigned long long rc; union apci_descriptor *ibm_slot; ibm_slot = ibm_slot_from_id(hpslot_to_sun(slot)); @@ -204,7 +204,7 @@ static int ibm_set_attention_status(struct hotplug_slot *slot, u8 status) err("APLS evaluation failed: 0x%08x\n", stat); return -ENODEV; } else if (!rc) { - err("APLS method failed: 0x%08lx\n", rc); + err("APLS method failed: 0x%08llx\n", rc); return -ERANGE; } return 0; diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h index d9769b30be9..9fff878cf02 100644 --- a/drivers/pci/hotplug/cpci_hotplug.h +++ b/drivers/pci/hotplug/cpci_hotplug.h @@ -30,6 +30,7 @@ #include <linux/types.h> #include <linux/pci.h> +#include <linux/pci_hotplug.h> /* PICMG 2.1 R2.0 HS CSR bits: */ #define HS_CSR_INS 0x0080 @@ -69,6 +70,11 @@ struct cpci_hp_controller { struct cpci_hp_controller_ops *ops; }; +static inline const char *slot_name(struct slot *slot) +{ + return hotplug_slot_name(slot->hotplug_slot); +} + extern int cpci_hp_register_controller(struct cpci_hp_controller *controller); extern int cpci_hp_unregister_controller(struct cpci_hp_controller *controller); extern int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last); diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c index 935947991dc..de94f4feef8 100644 --- a/drivers/pci/hotplug/cpci_hotplug_core.c +++ b/drivers/pci/hotplug/cpci_hotplug_core.c @@ -108,7 +108,7 @@ enable_slot(struct hotplug_slot *hotplug_slot) struct slot *slot = hotplug_slot->private; int retval = 0; - dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s", __func__, slot_name(slot)); if (controller->ops->set_power) retval = controller->ops->set_power(slot, 1); @@ -121,25 +121,23 @@ disable_slot(struct hotplug_slot *hotplug_slot) struct slot *slot = hotplug_slot->private; int retval = 0; - dbg("%s - physical_slot = %s", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s", __func__, slot_name(slot)); down_write(&list_rwsem); /* Unconfigure device */ - dbg("%s - unconfiguring slot %s", - __func__, slot->hotplug_slot->name); + dbg("%s - unconfiguring slot %s", __func__, slot_name(slot)); if ((retval = cpci_unconfigure_slot(slot))) { err("%s - could not unconfigure slot %s", - __func__, slot->hotplug_slot->name); + __func__, slot_name(slot)); goto disable_error; } - dbg("%s - finished unconfiguring slot %s", - __func__, slot->hotplug_slot->name); + dbg("%s - finished unconfiguring slot %s", __func__, slot_name(slot)); /* Clear EXT (by setting it) */ if (cpci_clear_ext(slot)) { err("%s - could not clear EXT for slot %s", - __func__, slot->hotplug_slot->name); + __func__, slot_name(slot)); retval = -ENODEV; goto disable_error; } @@ -214,7 +212,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot) struct slot *slot = hotplug_slot->private; kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot->name); kfree(slot->hotplug_slot); if (slot->dev) pci_dev_put(slot->dev); @@ -222,12 +219,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot) } #define SLOT_NAME_SIZE 6 -static void -make_slot_name(struct slot *slot) -{ - snprintf(slot->hotplug_slot->name, - SLOT_NAME_SIZE, "%02x:%02x", slot->bus->number, slot->number); -} int cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) @@ -235,7 +226,7 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) struct slot *slot; struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; - char *name; + char name[SLOT_NAME_SIZE]; int status = -ENOMEM; int i; @@ -262,34 +253,31 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) goto error_hpslot; hotplug_slot->info = info; - name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL); - if (!name) - goto error_info; - hotplug_slot->name = name; - slot->bus = bus; slot->number = i; slot->devfn = PCI_DEVFN(i, 0); + snprintf(name, SLOT_NAME_SIZE, "%02x:%02x", bus->number, i); + hotplug_slot->private = slot; hotplug_slot->release = &release_slot; - make_slot_name(slot); hotplug_slot->ops = &cpci_hotplug_slot_ops; /* * Initialize the slot info structure with some known * good values. */ - dbg("initializing slot %s", slot->hotplug_slot->name); + dbg("initializing slot %s", name); info->power_status = cpci_get_power_status(slot); info->attention_status = cpci_get_attention_status(slot); - dbg("registering slot %s", slot->hotplug_slot->name); - status = pci_hp_register(slot->hotplug_slot, bus, i); + dbg("registering slot %s", name); + status = pci_hp_register(slot->hotplug_slot, bus, i, name); if (status) { err("pci_hp_register failed with error %d", status); - goto error_name; + goto error_info; } + dbg("slot registered with name: %s", slot_name(slot)); /* Add slot to our internal list */ down_write(&list_rwsem); @@ -298,8 +286,6 @@ cpci_hp_register_bus(struct pci_bus *bus, u8 first, u8 last) up_write(&list_rwsem); } return 0; -error_name: - kfree(name); error_info: kfree(info); error_hpslot: @@ -327,7 +313,7 @@ cpci_hp_unregister_bus(struct pci_bus *bus) list_del(&slot->slot_list); slots--; - dbg("deregistering slot %s", slot->hotplug_slot->name); + dbg("deregistering slot %s", slot_name(slot)); status = pci_hp_deregister(slot->hotplug_slot); if (status) { err("pci_hp_deregister failed with error %d", @@ -379,11 +365,10 @@ init_slots(int clear_ins) return -1; } list_for_each_entry(slot, &slot_list, slot_list) { - dbg("%s - looking at slot %s", - __func__, slot->hotplug_slot->name); + dbg("%s - looking at slot %s", __func__, slot_name(slot)); if (clear_ins && cpci_check_and_clear_ins(slot)) dbg("%s - cleared INS for slot %s", - __func__, slot->hotplug_slot->name); + __func__, slot_name(slot)); dev = pci_get_slot(slot->bus, PCI_DEVFN(slot->number, 0)); if (dev) { if (update_adapter_status(slot->hotplug_slot, 1)) @@ -414,8 +399,7 @@ check_slots(void) } extracted = inserted = 0; list_for_each_entry(slot, &slot_list, slot_list) { - dbg("%s - looking at slot %s", - __func__, slot->hotplug_slot->name); + dbg("%s - looking at slot %s", __func__, slot_name(slot)); if (cpci_check_and_clear_ins(slot)) { /* * Some broken hardware (e.g. PLX 9054AB) asserts @@ -423,35 +407,34 @@ check_slots(void) */ if (slot->dev) { warn("slot %s already inserted", - slot->hotplug_slot->name); + slot_name(slot)); inserted++; continue; } /* Process insertion */ - dbg("%s - slot %s inserted", - __func__, slot->hotplug_slot->name); + dbg("%s - slot %s inserted", __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (1) = %04x", - __func__, slot->hotplug_slot->name, hs_csr); + __func__, slot_name(slot), hs_csr); /* Configure device */ dbg("%s - configuring slot %s", - __func__, slot->hotplug_slot->name); + __func__, slot_name(slot)); if (cpci_configure_slot(slot)) { err("%s - could not configure slot %s", - __func__, slot->hotplug_slot->name); + __func__, slot_name(slot)); continue; } dbg("%s - finished configuring slot %s", - __func__, slot->hotplug_slot->name); + __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (2) = %04x", - __func__, slot->hotplug_slot->name, hs_csr); + __func__, slot_name(slot), hs_csr); if (update_latch_status(slot->hotplug_slot, 1)) warn("failure to update latch file"); @@ -464,18 +447,18 @@ check_slots(void) /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR (3) = %04x", - __func__, slot->hotplug_slot->name, hs_csr); + __func__, slot_name(slot), hs_csr); inserted++; } else if (cpci_check_ext(slot)) { /* Process extraction request */ dbg("%s - slot %s extracted", - __func__, slot->hotplug_slot->name); + __func__, slot_name(slot)); /* GSM, debug */ hs_csr = cpci_get_hs_csr(slot); dbg("%s - slot %s HS_CSR = %04x", - __func__, slot->hotplug_slot->name, hs_csr); + __func__, slot_name(slot), hs_csr); if (!slot->extracting) { if (update_latch_status(slot->hotplug_slot, 0)) { @@ -493,7 +476,7 @@ check_slots(void) * bother trying to tell the driver or not? */ err("card in slot %s was improperly removed", - slot->hotplug_slot->name); + slot_name(slot)); if (update_adapter_status(slot->hotplug_slot, 0)) warn("failure to update adapter file"); slot->extracting = 0; diff --git a/drivers/pci/hotplug/cpci_hotplug_pci.c b/drivers/pci/hotplug/cpci_hotplug_pci.c index df82b95e287..829c327cfb5 100644 --- a/drivers/pci/hotplug/cpci_hotplug_pci.c +++ b/drivers/pci/hotplug/cpci_hotplug_pci.c @@ -209,7 +209,7 @@ int cpci_led_on(struct slot* slot) hs_cap + 2, hs_csr)) { err("Could not set LOO for slot %s", - slot->hotplug_slot->name); + hotplug_slot_name(slot->hotplug_slot)); return -ENODEV; } } @@ -238,7 +238,7 @@ int cpci_led_off(struct slot* slot) hs_cap + 2, hs_csr)) { err("Could not clear LOO for slot %s", - slot->hotplug_slot->name); + hotplug_slot_name(slot->hotplug_slot)); return -ENODEV; } } diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h index b1decfa88b7..afaf8f69f73 100644 --- a/drivers/pci/hotplug/cpqphp.h +++ b/drivers/pci/hotplug/cpqphp.h @@ -449,6 +449,11 @@ extern u8 cpqhp_disk_irq; /* inline functions */ +static inline char *slot_name(struct slot *slot) +{ + return hotplug_slot_name(slot->hotplug_slot); +} + /* * return_resource * @@ -696,14 +701,6 @@ static inline int get_presence_status(struct controller *ctrl, struct slot *slot return presence_save; } -#define SLOT_NAME_SIZE 10 - -static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot) -{ - snprintf(buffer, buffer_size, "%d", slot->number); -} - - static inline int wait_for_ctrl_irq(struct controller *ctrl) { DECLARE_WAITQUEUE(wait, current); diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c index 54defec51d0..8514c3a1746 100644 --- a/drivers/pci/hotplug/cpqphp_core.c +++ b/drivers/pci/hotplug/cpqphp_core.c @@ -315,14 +315,15 @@ static void release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot->name); kfree(slot->hotplug_slot); kfree(slot); } +#define SLOT_NAME_SIZE 10 + static int ctrl_slot_setup(struct controller *ctrl, void __iomem *smbios_start, void __iomem *smbios_table) @@ -335,6 +336,7 @@ static int ctrl_slot_setup(struct controller *ctrl, u8 slot_number; u8 ctrl_slot; u32 tempdword; + char name[SLOT_NAME_SIZE]; void __iomem *slot_entry= NULL; int result = -ENOMEM; @@ -363,16 +365,12 @@ static int ctrl_slot_setup(struct controller *ctrl, if (!hotplug_slot->info) goto error_hpslot; hotplug_slot_info = hotplug_slot->info; - hotplug_slot->name = kmalloc(SLOT_NAME_SIZE, GFP_KERNEL); - - if (!hotplug_slot->name) - goto error_info; slot->ctrl = ctrl; slot->bus = ctrl->bus; slot->device = slot_device; slot->number = slot_number; - dbg("slot->number = %d\n", slot->number); + dbg("slot->number = %u\n", slot->number); slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9, slot_entry); @@ -418,9 +416,9 @@ static int ctrl_slot_setup(struct controller *ctrl, /* register this slot with the hotplug pci core */ hotplug_slot->release = &release_slot; hotplug_slot->private = slot; - make_slot_name(hotplug_slot->name, SLOT_NAME_SIZE, slot); + snprintf(name, SLOT_NAME_SIZE, "%u", slot->number); hotplug_slot->ops = &cpqphp_hotplug_slot_ops; - + hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot); hotplug_slot_info->attention_status = cpq_get_attention_status(ctrl, slot); @@ -435,11 +433,12 @@ static int ctrl_slot_setup(struct controller *ctrl, slot->number, ctrl->slot_device_offset, slot_number); result = pci_hp_register(hotplug_slot, - ctrl->pci_dev->subordinate, - slot->device); + ctrl->pci_dev->bus, + slot->device, + name); if (result) { err("pci_hp_register failed with error %d\n", result); - goto error_name; + goto error_info; } slot->next = ctrl->slot; @@ -451,8 +450,6 @@ static int ctrl_slot_setup(struct controller *ctrl, } return 0; -error_name: - kfree(hotplug_slot->name); error_info: kfree(hotplug_slot_info); error_hpslot: @@ -638,7 +635,7 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status) u8 device; u8 function; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) return -ENODEV; @@ -665,7 +662,7 @@ static int process_SI(struct hotplug_slot *hotplug_slot) u8 device; u8 function; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) return -ENODEV; @@ -697,7 +694,7 @@ static int process_SS(struct hotplug_slot *hotplug_slot) u8 device; u8 function; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1) return -ENODEV; @@ -720,7 +717,7 @@ static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value) struct slot *slot = hotplug_slot->private; struct controller *ctrl = slot->ctrl; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); return cpqhp_hardware_test(ctrl, value); } @@ -731,7 +728,7 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; struct controller *ctrl = slot->ctrl; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = get_slot_enabled(ctrl, slot); return 0; @@ -742,7 +739,7 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; struct controller *ctrl = slot->ctrl; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = cpq_get_attention_status(ctrl, slot); return 0; @@ -753,7 +750,7 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; struct controller *ctrl = slot->ctrl; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = cpq_get_latch_status(ctrl, slot); @@ -765,7 +762,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; struct controller *ctrl = slot->ctrl; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = get_presence_status(ctrl, slot); @@ -777,7 +774,7 @@ static int get_max_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp struct slot *slot = hotplug_slot->private; struct controller *ctrl = slot->ctrl; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = ctrl->speed_capability; @@ -789,7 +786,7 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp struct slot *slot = hotplug_slot->private; struct controller *ctrl = slot->ctrl; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + dbg("%s - physical_slot = %s\n", __func__, slot_name(slot)); *value = ctrl->speed; diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c index ef041ca91c2..a60a2529099 100644 --- a/drivers/pci/hotplug/cpqphp_ctrl.c +++ b/drivers/pci/hotplug/cpqphp_ctrl.c @@ -1139,7 +1139,7 @@ static u8 set_controller_speed(struct controller *ctrl, u8 adapter_speed, u8 hp_ for(slot = ctrl->slot; slot; slot = slot->next) { if (slot->device == (hp_slot + ctrl->slot_device_offset)) continue; - if (!slot->hotplug_slot && !slot->hotplug_slot->info) + if (!slot->hotplug_slot || !slot->hotplug_slot->info) continue; if (slot->hotplug_slot->info->adapter_status == 0) continue; diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c index 40337a06c18..3a2637a0093 100644 --- a/drivers/pci/hotplug/fakephp.c +++ b/drivers/pci/hotplug/fakephp.c @@ -66,10 +66,10 @@ struct dummy_slot { struct pci_dev *dev; struct work_struct remove_work; unsigned long removed; - char name[8]; }; static int debug; +static int dup_slots; static LIST_HEAD(slot_list); static struct workqueue_struct *dummyphp_wq; @@ -96,10 +96,13 @@ static void dummy_release(struct hotplug_slot *slot) kfree(dslot); } +#define SLOT_NAME_SIZE 8 + static int add_slot(struct pci_dev *dev) { struct dummy_slot *dslot; struct hotplug_slot *slot; + char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; static int count = 1; @@ -119,19 +122,22 @@ static int add_slot(struct pci_dev *dev) if (!dslot) goto error_info; - slot->name = dslot->name; - snprintf(slot->name, sizeof(dslot->name), "fake%d", count++); - dbg("slot->name = %s\n", slot->name); + if (dup_slots) + snprintf(name, SLOT_NAME_SIZE, "fake"); + else + snprintf(name, SLOT_NAME_SIZE, "fake%d", count++); + dbg("slot->name = %s\n", name); slot->ops = &dummy_hotplug_slot_ops; slot->release = &dummy_release; slot->private = dslot; - retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn)); + retval = pci_hp_register(slot, dev->bus, PCI_SLOT(dev->devfn), name); if (retval) { err("pci_hp_register failed with error %d\n", retval); goto error_dslot; } + dbg("slot->name = %s\n", hotplug_slot_name(slot)); dslot->slot = slot; dslot->dev = pci_dev_get(dev); list_add (&dslot->node, &slot_list); @@ -167,10 +173,11 @@ static void remove_slot(struct dummy_slot *dslot) { int retval; - dbg("removing slot %s\n", dslot->slot->name); + dbg("removing slot %s\n", hotplug_slot_name(dslot->slot)); retval = pci_hp_deregister(dslot->slot); if (retval) - err("Problem unregistering a slot %s\n", dslot->slot->name); + err("Problem unregistering a slot %s\n", + hotplug_slot_name(dslot->slot)); } /* called from the single-threaded workqueue handler to remove a slot */ @@ -308,7 +315,7 @@ static int disable_slot(struct hotplug_slot *slot) return -ENODEV; dslot = slot->private; - dbg("%s - physical_slot = %s\n", __func__, slot->name); + dbg("%s - physical_slot = %s\n", __func__, hotplug_slot_name(slot)); for (func = 7; func >= 0; func--) { dev = pci_get_slot(dslot->dev->bus, dslot->dev->devfn + func); @@ -320,15 +327,15 @@ static int disable_slot(struct hotplug_slot *slot) return -ENODEV; } + /* remove the device from the pci core */ + pci_remove_bus_device(dev); + /* queue work item to blow away this sysfs entry and other * parts. */ INIT_WORK(&dslot->remove_work, remove_slot_worker); queue_work(dummyphp_wq, &dslot->remove_work); - /* blow away this sysfs entry and other parts. */ - remove_slot(dslot); - pci_dev_put(dev); } return 0; @@ -373,4 +380,5 @@ MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debugging mode enabled or not"); - +module_param(dup_slots, bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(dup_slots, "Force duplicate slot names for debugging"); diff --git a/drivers/pci/hotplug/ibmphp.h b/drivers/pci/hotplug/ibmphp.h index 612d9630150..a8d391a4957 100644 --- a/drivers/pci/hotplug/ibmphp.h +++ b/drivers/pci/hotplug/ibmphp.h @@ -707,17 +707,16 @@ struct slot { u8 device; u8 number; u8 real_physical_slot_num; - char name[100]; u32 capabilities; u8 supported_speed; u8 supported_bus_mode; + u8 flag; /* this is for disable slot and polling */ + u8 ctlr_index; struct hotplug_slot *hotplug_slot; struct controller *ctrl; struct pci_func *func; u8 irq[4]; - u8 flag; /* this is for disable slot and polling */ int bit_mode; /* 0 = 32, 1 = 64 */ - u8 ctlr_index; struct bus_info *bus_on; struct list_head ibm_slot_list; u8 status; diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c index 8467d028732..c1abac8ab5c 100644 --- a/drivers/pci/hotplug/ibmphp_ebda.c +++ b/drivers/pci/hotplug/ibmphp_ebda.c @@ -123,10 +123,8 @@ static struct ebda_pci_rsrc *alloc_ebda_pci_rsrc (void) static void __init print_bus_info (void) { struct bus_info *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &bus_info_head) { - ptr = list_entry (ptr1, struct bus_info, bus_info_list); + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { debug ("%s - slot_min = %x\n", __func__, ptr->slot_min); debug ("%s - slot_max = %x\n", __func__, ptr->slot_max); debug ("%s - slot_count = %x\n", __func__, ptr->slot_count); @@ -146,10 +144,8 @@ static void __init print_bus_info (void) static void print_lo_info (void) { struct rio_detail *ptr; - struct list_head *ptr1; debug ("print_lo_info ----\n"); - list_for_each (ptr1, &rio_lo_head) { - ptr = list_entry (ptr1, struct rio_detail, rio_detail_list); + list_for_each_entry(ptr, &rio_lo_head, rio_detail_list) { debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); @@ -163,10 +159,8 @@ static void print_lo_info (void) static void print_vg_info (void) { struct rio_detail *ptr; - struct list_head *ptr1; debug ("%s ---\n", __func__); - list_for_each (ptr1, &rio_vg_head) { - ptr = list_entry (ptr1, struct rio_detail, rio_detail_list); + list_for_each_entry(ptr, &rio_vg_head, rio_detail_list) { debug ("%s - rio_node_id = %x\n", __func__, ptr->rio_node_id); debug ("%s - rio_type = %x\n", __func__, ptr->rio_type); debug ("%s - owner_id = %x\n", __func__, ptr->owner_id); @@ -180,10 +174,8 @@ static void print_vg_info (void) static void __init print_ebda_pci_rsrc (void) { struct ebda_pci_rsrc *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &ibmphp_ebda_pci_rsrc_head) { - ptr = list_entry (ptr1, struct ebda_pci_rsrc, ebda_pci_rsrc_list); + list_for_each_entry(ptr, &ibmphp_ebda_pci_rsrc_head, ebda_pci_rsrc_list) { debug ("%s - rsrc type: %x bus#: %x dev_func: %x start addr: %x end addr: %x\n", __func__, ptr->rsrc_type ,ptr->bus_num, ptr->dev_fun,ptr->start_addr, ptr->end_addr); } @@ -192,10 +184,8 @@ static void __init print_ebda_pci_rsrc (void) static void __init print_ibm_slot (void) { struct slot *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &ibmphp_slot_head) { - ptr = list_entry (ptr1, struct slot, ibm_slot_list); + list_for_each_entry(ptr, &ibmphp_slot_head, ibm_slot_list) { debug ("%s - slot_number: %x\n", __func__, ptr->number); } } @@ -203,10 +193,8 @@ static void __init print_ibm_slot (void) static void __init print_opt_vg (void) { struct opt_rio *ptr; - struct list_head *ptr1; debug ("%s ---\n", __func__); - list_for_each (ptr1, &opt_vg_head) { - ptr = list_entry (ptr1, struct opt_rio, opt_rio_list); + list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { debug ("%s - rio_type %x\n", __func__, ptr->rio_type); debug ("%s - chassis_num: %x\n", __func__, ptr->chassis_num); debug ("%s - first_slot_num: %x\n", __func__, ptr->first_slot_num); @@ -217,13 +205,9 @@ static void __init print_opt_vg (void) static void __init print_ebda_hpc (void) { struct controller *hpc_ptr; - struct list_head *ptr1; u16 index; - list_for_each (ptr1, &ebda_hpc_head) { - - hpc_ptr = list_entry (ptr1, struct controller, ebda_hpc_list); - + list_for_each_entry(hpc_ptr, &ebda_hpc_head, ebda_hpc_list) { for (index = 0; index < hpc_ptr->slot_count; index++) { debug ("%s - physical slot#: %x\n", __func__, hpc_ptr->slots[index].slot_num); debug ("%s - pci bus# of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_bus_num); @@ -276,7 +260,7 @@ int __init ibmphp_access_ebda (void) iounmap (io_mem); debug ("returned ebda segment: %x\n", ebda_seg); - io_mem = ioremap (ebda_seg<<4, 65000); + io_mem = ioremap(ebda_seg<<4, 1024); if (!io_mem ) return -ENOMEM; next_offset = 0x180; @@ -460,9 +444,7 @@ static int __init ebda_rio_table (void) static struct opt_rio *search_opt_vg (u8 chassis_num) { struct opt_rio *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &opt_vg_head) { - ptr = list_entry (ptr1, struct opt_rio, opt_rio_list); + list_for_each_entry(ptr, &opt_vg_head, opt_rio_list) { if (ptr->chassis_num == chassis_num) return ptr; } @@ -473,10 +455,8 @@ static int __init combine_wpg_for_chassis (void) { struct opt_rio *opt_rio_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - struct list_head *list_head_ptr = NULL; - list_for_each (list_head_ptr, &rio_vg_head) { - rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); + list_for_each_entry(rio_detail_ptr, &rio_vg_head, rio_detail_list) { opt_rio_ptr = search_opt_vg (rio_detail_ptr->chassis_num); if (!opt_rio_ptr) { opt_rio_ptr = kzalloc(sizeof(struct opt_rio), GFP_KERNEL); @@ -497,14 +477,12 @@ static int __init combine_wpg_for_chassis (void) } /* - * reorgnizing linked list of expansion box + * reorganizing linked list of expansion box */ static struct opt_rio_lo *search_opt_lo (u8 chassis_num) { struct opt_rio_lo *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &opt_lo_head) { - ptr = list_entry (ptr1, struct opt_rio_lo, opt_rio_lo_list); + list_for_each_entry(ptr, &opt_lo_head, opt_rio_lo_list) { if (ptr->chassis_num == chassis_num) return ptr; } @@ -515,10 +493,8 @@ static int combine_wpg_for_expansion (void) { struct opt_rio_lo *opt_rio_lo_ptr = NULL; struct rio_detail *rio_detail_ptr = NULL; - struct list_head *list_head_ptr = NULL; - list_for_each (list_head_ptr, &rio_lo_head) { - rio_detail_ptr = list_entry (list_head_ptr, struct rio_detail, rio_detail_list); + list_for_each_entry(rio_detail_ptr, &rio_lo_head, rio_detail_list) { opt_rio_lo_ptr = search_opt_lo (rio_detail_ptr->chassis_num); if (!opt_rio_lo_ptr) { opt_rio_lo_ptr = kzalloc(sizeof(struct opt_rio_lo), GFP_KERNEL); @@ -550,20 +526,17 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) { struct opt_rio *opt_vg_ptr = NULL; struct opt_rio_lo *opt_lo_ptr = NULL; - struct list_head *ptr = NULL; int rc = 0; if (!var) { - list_for_each (ptr, &opt_vg_head) { - opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list); + list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { if ((first_slot < opt_vg_ptr->first_slot_num) && (slot_num >= opt_vg_ptr->first_slot_num)) { rc = -ENODEV; break; } } } else { - list_for_each (ptr, &opt_lo_head) { - opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list); + list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { if ((first_slot < opt_lo_ptr->first_slot_num) && (slot_num >= opt_lo_ptr->first_slot_num)) { rc = -ENODEV; break; @@ -576,10 +549,8 @@ static int first_slot_num (u8 slot_num, u8 first_slot, u8 var) static struct opt_rio_lo * find_rxe_num (u8 slot_num) { struct opt_rio_lo *opt_lo_ptr; - struct list_head *ptr; - list_for_each (ptr, &opt_lo_head) { - opt_lo_ptr = list_entry (ptr, struct opt_rio_lo, opt_rio_lo_list); + list_for_each_entry(opt_lo_ptr, &opt_lo_head, opt_rio_lo_list) { //check to see if this slot_num belongs to expansion box if ((slot_num >= opt_lo_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_lo_ptr->first_slot_num, 1))) return opt_lo_ptr; @@ -590,10 +561,8 @@ static struct opt_rio_lo * find_rxe_num (u8 slot_num) static struct opt_rio * find_chassis_num (u8 slot_num) { struct opt_rio *opt_vg_ptr; - struct list_head *ptr; - list_for_each (ptr, &opt_vg_head) { - opt_vg_ptr = list_entry (ptr, struct opt_rio, opt_rio_list); + list_for_each_entry(opt_vg_ptr, &opt_vg_head, opt_rio_list) { //check to see if this slot_num belongs to chassis if ((slot_num >= opt_vg_ptr->first_slot_num) && (!first_slot_num (slot_num, opt_vg_ptr->first_slot_num, 0))) return opt_vg_ptr; @@ -607,11 +576,9 @@ static struct opt_rio * find_chassis_num (u8 slot_num) static u8 calculate_first_slot (u8 slot_num) { u8 first_slot = 1; - struct list_head * list; struct slot * slot_cur; - list_for_each (list, &ibmphp_slot_head) { - slot_cur = list_entry (list, struct slot, ibm_slot_list); + list_for_each_entry(slot_cur, &ibmphp_slot_head, ibm_slot_list) { if (slot_cur->ctrl) { if ((slot_cur->ctrl->ctlr_type != 4) && (slot_cur->ctrl->ending_slot_num > first_slot) && (slot_num > slot_cur->ctrl->ending_slot_num)) first_slot = slot_cur->ctrl->ending_slot_num; @@ -620,11 +587,14 @@ static u8 calculate_first_slot (u8 slot_num) return first_slot + 1; } + +#define SLOT_NAME_SIZE 30 + static char *create_file_name (struct slot * slot_cur) { struct opt_rio *opt_vg_ptr = NULL; struct opt_rio_lo *opt_lo_ptr = NULL; - static char str[30]; + static char str[SLOT_NAME_SIZE]; int which = 0; /* rxe = 1, chassis = 0 */ u8 number = 1; /* either chassis or rxe # */ u8 first_slot = 1; @@ -736,7 +706,6 @@ static void release_slot(struct hotplug_slot *hotplug_slot) slot = hotplug_slot->private; kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot->name); kfree(slot->hotplug_slot); slot->ctrl = NULL; slot->bus_on = NULL; @@ -767,7 +736,7 @@ static int __init ebda_rsrc_controller (void) struct bus_info *bus_info_ptr1, *bus_info_ptr2; int rc; struct slot *tmp_slot; - struct list_head *list; + char name[SLOT_NAME_SIZE]; addr = hpc_list_ptr->phys_addr; for (ctlr = 0; ctlr < hpc_list_ptr->num_ctlrs; ctlr++) { @@ -931,12 +900,6 @@ static int __init ebda_rsrc_controller (void) goto error_no_hp_info; } - hp_slot_ptr->name = kmalloc(30, GFP_KERNEL); - if (!hp_slot_ptr->name) { - rc = -ENOMEM; - goto error_no_hp_name; - } - tmp_slot = kzalloc(sizeof(*tmp_slot), GFP_KERNEL); if (!tmp_slot) { rc = -ENOMEM; @@ -997,12 +960,10 @@ static int __init ebda_rsrc_controller (void) } /* each hpc */ - list_for_each (list, &ibmphp_slot_head) { - tmp_slot = list_entry (list, struct slot, ibm_slot_list); - - snprintf (tmp_slot->hotplug_slot->name, 30, "%s", create_file_name (tmp_slot)); + list_for_each_entry(tmp_slot, &ibmphp_slot_head, ibm_slot_list) { + snprintf(name, SLOT_NAME_SIZE, "%s", create_file_name(tmp_slot)); pci_hp_register(tmp_slot->hotplug_slot, - pci_find_bus(0, tmp_slot->bus), tmp_slot->device); + pci_find_bus(0, tmp_slot->bus), tmp_slot->device, name); } print_ebda_hpc (); @@ -1012,8 +973,6 @@ static int __init ebda_rsrc_controller (void) error: kfree (hp_slot_ptr->private); error_no_slot: - kfree (hp_slot_ptr->name); -error_no_hp_name: kfree (hp_slot_ptr->info); error_no_hp_info: kfree (hp_slot_ptr); @@ -1101,10 +1060,8 @@ u16 ibmphp_get_total_controllers (void) struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) { struct slot *slot; - struct list_head *list; - list_for_each (list, &ibmphp_slot_head) { - slot = list_entry (list, struct slot, ibm_slot_list); + list_for_each_entry(slot, &ibmphp_slot_head, ibm_slot_list) { if (slot->number == physical_num) return slot; } @@ -1120,10 +1077,8 @@ struct slot *ibmphp_get_slot_from_physical_num (u8 physical_num) struct bus_info *ibmphp_find_same_bus_num (u32 num) { struct bus_info *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &bus_info_head) { - ptr = list_entry (ptr1, struct bus_info, bus_info_list); + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { if (ptr->busno == num) return ptr; } @@ -1136,10 +1091,8 @@ struct bus_info *ibmphp_find_same_bus_num (u32 num) int ibmphp_get_bus_index (u8 num) { struct bus_info *ptr; - struct list_head *ptr1; - list_for_each (ptr1, &bus_info_head) { - ptr = list_entry (ptr1, struct bus_info, bus_info_list); + list_for_each_entry(ptr, &bus_info_head, bus_info_list) { if (ptr->busno == num) return ptr->index; } @@ -1212,11 +1165,9 @@ static struct pci_driver ibmphp_driver = { int ibmphp_register_pci (void) { struct controller *ctrl; - struct list_head *tmp; int rc = 0; - list_for_each (tmp, &ebda_hpc_head) { - ctrl = list_entry (tmp, struct controller, ebda_hpc_list); + list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { rc = pci_register_driver(&ibmphp_driver); break; @@ -1227,12 +1178,10 @@ int ibmphp_register_pci (void) static int ibmphp_probe (struct pci_dev * dev, const struct pci_device_id *ids) { struct controller *ctrl; - struct list_head *tmp; debug ("inside ibmphp_probe\n"); - list_for_each (tmp, &ebda_hpc_head) { - ctrl = list_entry (tmp, struct controller, ebda_hpc_list); + list_for_each_entry(ctrl, &ebda_hpc_head, ebda_hpc_list) { if (ctrl->ctlr_type == 1) { if ((dev->devfn == ctrl->u.pci_ctlr.dev_fun) && (dev->bus->number == ctrl->u.pci_ctlr.bus)) { ctrl->ctrl_dev = dev; diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c index 5f85b1b120e..535fce0f07f 100644 --- a/drivers/pci/hotplug/pci_hotplug_core.c +++ b/drivers/pci/hotplug/pci_hotplug_core.c @@ -37,6 +37,7 @@ #include <linux/init.h> #include <linux/mount.h> #include <linux/namei.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <asm/uaccess.h> @@ -61,7 +62,7 @@ static int debug; ////////////////////////////////////////////////////////////////// static LIST_HEAD(pci_hotplug_slot_list); -static DEFINE_SPINLOCK(pci_hotplug_slot_list_lock); +static DEFINE_MUTEX(pci_hp_mutex); /* these strings match up with the values in pci_bus_speed */ static char *pci_bus_speed_strings[] = { @@ -102,13 +103,13 @@ static int get_##name (struct hotplug_slot *slot, type *value) \ { \ struct hotplug_slot_ops *ops = slot->ops; \ int retval = 0; \ - if (try_module_get(ops->owner)) { \ - if (ops->get_##name) \ - retval = ops->get_##name(slot, value); \ - else \ - *value = slot->info->name; \ - module_put(ops->owner); \ - } \ + if (!try_module_get(ops->owner)) \ + return -ENODEV; \ + if (ops->get_##name) \ + retval = ops->get_##name(slot, value); \ + else \ + *value = slot->info->name; \ + module_put(ops->owner); \ return retval; \ } @@ -530,16 +531,12 @@ static struct hotplug_slot *get_slot_from_name (const char *name) struct hotplug_slot *slot; struct list_head *tmp; - spin_lock(&pci_hotplug_slot_list_lock); list_for_each (tmp, &pci_hotplug_slot_list) { slot = list_entry (tmp, struct hotplug_slot, slot_list); - if (strcmp(slot->name, name) == 0) - goto out; + if (strcmp(hotplug_slot_name(slot), name) == 0) + return slot; } - slot = NULL; -out: - spin_unlock(&pci_hotplug_slot_list_lock); - return slot; + return NULL; } /** @@ -547,13 +544,15 @@ out: * @bus: bus this slot is on * @slot: pointer to the &struct hotplug_slot to register * @slot_nr: slot number + * @name: name registered with kobject core * * Registers a hotplug slot with the pci hotplug subsystem, which will allow * userspace interaction to the slot. * * Returns 0 if successful, anything else for an error. */ -int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr) +int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr, + const char *name) { int result; struct pci_slot *pci_slot; @@ -568,48 +567,29 @@ int pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus, int slot_nr) return -EINVAL; } - /* Check if we have already registered a slot with the same name. */ - if (get_slot_from_name(slot->name)) - return -EEXIST; + mutex_lock(&pci_hp_mutex); /* * No problems if we call this interface from both ACPI_PCI_SLOT * driver and call it here again. If we've already created the * pci_slot, the interface will simply bump the refcount. */ - pci_slot = pci_create_slot(bus, slot_nr, slot->name); - if (IS_ERR(pci_slot)) - return PTR_ERR(pci_slot); - - if (pci_slot->hotplug) { - dbg("%s: already claimed\n", __func__); - pci_destroy_slot(pci_slot); - return -EBUSY; + pci_slot = pci_create_slot(bus, slot_nr, name, slot); + if (IS_ERR(pci_slot)) { + result = PTR_ERR(pci_slot); + goto out; } slot->pci_slot = pci_slot; pci_slot->hotplug = slot; - /* - * Allow pcihp drivers to override the ACPI_PCI_SLOT name. - */ - if (strcmp(kobject_name(&pci_slot->kobj), slot->name)) { - result = kobject_rename(&pci_slot->kobj, slot->name); - if (result) { - pci_destroy_slot(pci_slot); - return result; - } - } - - spin_lock(&pci_hotplug_slot_list_lock); list_add(&slot->slot_list, &pci_hotplug_slot_list); - spin_unlock(&pci_hotplug_slot_list_lock); result = fs_add_slot(pci_slot); kobject_uevent(&pci_slot->kobj, KOBJ_ADD); - dbg("Added slot %s to the list\n", slot->name); - - + dbg("Added slot %s to the list\n", name); +out: + mutex_unlock(&pci_hp_mutex); return result; } @@ -630,21 +610,23 @@ int pci_hp_deregister(struct hotplug_slot *hotplug) if (!hotplug) return -ENODEV; - temp = get_slot_from_name(hotplug->name); - if (temp != hotplug) + mutex_lock(&pci_hp_mutex); + temp = get_slot_from_name(hotplug_slot_name(hotplug)); + if (temp != hotplug) { + mutex_unlock(&pci_hp_mutex); return -ENODEV; + } - spin_lock(&pci_hotplug_slot_list_lock); list_del(&hotplug->slot_list); - spin_unlock(&pci_hotplug_slot_list_lock); slot = hotplug->pci_slot; fs_remove_slot(slot); - dbg("Removed slot %s from the list\n", hotplug->name); + dbg("Removed slot %s from the list\n", hotplug_slot_name(hotplug)); hotplug->release(hotplug); slot->hotplug = NULL; pci_destroy_slot(slot); + mutex_unlock(&pci_hp_mutex); return 0; } diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h index e3a1e7e7dba..b2801a7ee37 100644 --- a/drivers/pci/hotplug/pciehp.h +++ b/drivers/pci/hotplug/pciehp.h @@ -43,7 +43,6 @@ extern int pciehp_poll_mode; extern int pciehp_poll_time; extern int pciehp_debug; extern int pciehp_force; -extern int pciehp_slot_with_bus; extern struct workqueue_struct *pciehp_wq; #define dbg(format, arg...) \ @@ -58,19 +57,30 @@ extern struct workqueue_struct *pciehp_wq; #define warn(format, arg...) \ printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) +#define ctrl_dbg(ctrl, format, arg...) \ + do { \ + if (pciehp_debug) \ + dev_printk(, &ctrl->pcie->device, \ + format, ## arg); \ + } while (0) +#define ctrl_err(ctrl, format, arg...) \ + dev_err(&ctrl->pcie->device, format, ## arg) +#define ctrl_info(ctrl, format, arg...) \ + dev_info(&ctrl->pcie->device, format, ## arg) +#define ctrl_warn(ctrl, format, arg...) \ + dev_warn(&ctrl->pcie->device, format, ## arg) + #define SLOT_NAME_SIZE 10 struct slot { u8 bus; u8 device; - u32 number; u8 state; - struct timer_list task_event; u8 hp_slot; + u32 number; struct controller *ctrl; struct hpc_ops *hpc_ops; struct hotplug_slot *hotplug_slot; struct list_head slot_list; - char name[SLOT_NAME_SIZE]; unsigned long last_emi_toggle; struct delayed_work work; /* work for button event */ struct mutex lock; @@ -88,6 +98,7 @@ struct controller { int num_slots; /* Number of slots on ctlr */ int slot_num_inc; /* 1 or -1 */ struct pci_dev *pci_dev; + struct pcie_device *pcie; /* PCI Express port service */ struct list_head slot_list; struct hpc_ops *hpc_ops; wait_queue_head_t queue; /* sleep & wake process */ @@ -99,6 +110,7 @@ struct controller { struct timer_list poll_timer; int cmd_busy; unsigned int no_cmd_complete:1; + unsigned int link_active_reporting:1; }; #define INT_BUTTON_IGNORE 0 @@ -162,6 +174,11 @@ int pciehp_enable_slot(struct slot *p_slot); int pciehp_disable_slot(struct slot *p_slot); int pcie_enable_notification(struct controller *ctrl); +static inline const char *slot_name(struct slot *slot) +{ + return hotplug_slot_name(slot->hotplug_slot); +} + static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) { struct slot *slot; @@ -171,7 +188,7 @@ static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device) return slot; } - err("%s: slot (device=0x%x) not found\n", __func__, device); + ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device); return NULL; } diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c index 3677495c4f9..4b23bc39b11 100644 --- a/drivers/pci/hotplug/pciehp_core.c +++ b/drivers/pci/hotplug/pciehp_core.c @@ -41,7 +41,6 @@ int pciehp_debug; int pciehp_poll_mode; int pciehp_poll_time; int pciehp_force; -int pciehp_slot_with_bus; struct workqueue_struct *pciehp_wq; #define DRIVER_VERSION "0.4" @@ -56,12 +55,10 @@ module_param(pciehp_debug, bool, 0644); module_param(pciehp_poll_mode, bool, 0644); module_param(pciehp_poll_time, int, 0644); module_param(pciehp_force, bool, 0644); -module_param(pciehp_slot_with_bus, bool, 0644); MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds"); MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing"); -MODULE_PARM_DESC(pciehp_slot_with_bus, "Use bus number in the slot name"); #define PCIE_MODULE_NAME "pciehp" @@ -147,9 +144,10 @@ set_lock_exit: * sysfs interface which allows the user to toggle the Electro Mechanical * Interlock. Valid values are either 0 or 1. 0 == unlock, 1 == lock */ -static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, - size_t count) +static ssize_t lock_write_file(struct hotplug_slot *hotplug_slot, + const char *buf, size_t count) { + struct slot *slot = hotplug_slot->private; unsigned long llock; u8 lock; int retval = 0; @@ -160,10 +158,11 @@ static ssize_t lock_write_file(struct hotplug_slot *slot, const char *buf, switch (lock) { case 0: case 1: - retval = set_lock_status(slot, lock); + retval = set_lock_status(hotplug_slot, lock); break; default: - err ("%d is an invalid lock value\n", lock); + ctrl_err(slot->ctrl, "%d is an invalid lock value\n", + lock); retval = -EINVAL; } if (retval) @@ -183,7 +182,10 @@ static struct hotplug_slot_attribute hotplug_slot_attr_lock = { */ static void release_slot(struct hotplug_slot *hotplug_slot) { - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + struct slot *slot = hotplug_slot->private; + + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, hotplug_slot_name(hotplug_slot)); kfree(hotplug_slot->info); kfree(hotplug_slot); @@ -194,6 +196,7 @@ static int init_slots(struct controller *ctrl) struct slot *slot; struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; + char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; list_for_each_entry(slot, &ctrl->slot_list, slot_list) { @@ -207,37 +210,38 @@ static int init_slots(struct controller *ctrl) /* register this slot with the hotplug pci core */ hotplug_slot->info = info; - hotplug_slot->name = slot->name; hotplug_slot->private = slot; hotplug_slot->release = &release_slot; hotplug_slot->ops = &pciehp_hotplug_slot_ops; - get_power_status(hotplug_slot, &info->power_status); - get_attention_status(hotplug_slot, &info->attention_status); - get_latch_status(hotplug_slot, &info->latch_status); - get_adapter_status(hotplug_slot, &info->adapter_status); slot->hotplug_slot = hotplug_slot; + snprintf(name, SLOT_NAME_SIZE, "%u", slot->number); - dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " - "slot_device_offset=%x\n", slot->bus, slot->device, - slot->hp_slot, slot->number, ctrl->slot_device_offset); + ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " + "hp_slot=%x sun=%x slot_device_offset=%x\n", + pci_domain_nr(ctrl->pci_dev->subordinate), + slot->bus, slot->device, slot->hp_slot, slot->number, + ctrl->slot_device_offset); retval = pci_hp_register(hotplug_slot, ctrl->pci_dev->subordinate, - slot->device); + slot->device, + name); if (retval) { - err("pci_hp_register failed with error %d\n", retval); - if (retval == -EEXIST) - err("Failed to register slot because of name " - "collision. Try \'pciehp_slot_with_bus\' " - "module option.\n"); + ctrl_err(ctrl, "pci_hp_register failed with error %d\n", + retval); goto error_info; } + get_power_status(hotplug_slot, &info->power_status); + get_attention_status(hotplug_slot, &info->attention_status); + get_latch_status(hotplug_slot, &info->latch_status); + get_adapter_status(hotplug_slot, &info->adapter_status); /* create additional sysfs entries */ if (EMI(ctrl)) { retval = sysfs_create_file(&hotplug_slot->pci_slot->kobj, &hotplug_slot_attr_lock.attr); if (retval) { pci_hp_deregister(hotplug_slot); - err("cannot create additional sysfs entries\n"); + ctrl_err(ctrl, "Cannot create additional sysfs " + "entries\n"); goto error_info; } } @@ -271,7 +275,8 @@ static int set_attention_status(struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); hotplug_slot->info->attention_status = status; @@ -286,7 +291,8 @@ static int enable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); return pciehp_sysfs_enable_slot(slot); } @@ -296,7 +302,8 @@ static int disable_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); return pciehp_sysfs_disable_slot(slot); } @@ -306,7 +313,8 @@ static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_power_status(slot, value); if (retval < 0) @@ -320,7 +328,8 @@ static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_attention_status(slot, value); if (retval < 0) @@ -334,7 +343,8 @@ static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_latch_status(slot, value); if (retval < 0) @@ -348,7 +358,8 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_adapter_status(slot, value); if (retval < 0) @@ -363,7 +374,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_max_bus_speed(slot, value); if (retval < 0) @@ -377,7 +389,8 @@ static int get_cur_bus_speed(struct hotplug_slot *hotplug_slot, enum pci_bus_spe struct slot *slot = hotplug_slot->private; int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_cur_bus_speed(slot, value); if (retval < 0) @@ -395,14 +408,15 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ struct pci_dev *pdev = dev->port; if (pciehp_force) - dbg("Bypassing BIOS check for pciehp use on %s\n", - pci_name(pdev)); + dev_info(&dev->device, + "Bypassing BIOS check for pciehp use on %s\n", + pci_name(pdev)); else if (pciehp_get_hp_hw_control_from_firmware(pdev)) goto err_out_none; ctrl = pcie_init(dev); if (!ctrl) { - dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME); + dev_err(&dev->device, "Controller initialization failed\n"); goto err_out_none; } set_service_data(dev, ctrl); @@ -411,11 +425,10 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_ rc = init_slots(ctrl); if (rc) { if (rc == -EBUSY) - warn("%s: slot already registered by another " - "hotplug driver\n", PCIE_MODULE_NAME); + ctrl_warn(ctrl, "Slot already registered by another " + "hotplug driver\n"); else - err("%s: slot initialization failed\n", - PCIE_MODULE_NAME); + ctrl_err(ctrl, "Slot initialization failed\n"); goto err_out_release_ctlr; } @@ -454,13 +467,13 @@ static void pciehp_remove (struct pcie_device *dev) #ifdef CONFIG_PM static int pciehp_suspend (struct pcie_device *dev, pm_message_t state) { - printk("%s ENTRY\n", __func__); + dev_info(&dev->device, "%s ENTRY\n", __func__); return 0; } static int pciehp_resume (struct pcie_device *dev) { - printk("%s ENTRY\n", __func__); + dev_info(&dev->device, "%s ENTRY\n", __func__); if (pciehp_force) { struct controller *ctrl = get_service_data(dev); struct slot *t_slot; @@ -490,10 +503,9 @@ static struct pcie_port_service_id port_pci_ids[] = { { .driver_data = 0, }, { /* end: all zeroes */ } }; -static const char device_name[] = "hpdriver"; static struct pcie_port_service_driver hpdriver_portdrv = { - .name = (char *)device_name, + .name = PCIE_MODULE_NAME, .id_table = &port_pci_ids[0], .probe = pciehp_probe, @@ -513,7 +525,7 @@ static int __init pcied_init(void) dbg("pcie_port_service_register = %d\n", retval); info(DRIVER_DESC " version: " DRIVER_VERSION "\n"); if (retval) - dbg("%s: Failure to register service\n", __func__); + dbg("Failure to register service\n"); return retval; } diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c index 96a5d55a498..fead63c6b49 100644 --- a/drivers/pci/hotplug/pciehp_ctrl.c +++ b/drivers/pci/hotplug/pciehp_ctrl.c @@ -58,14 +58,15 @@ static int queue_interrupt_event(struct slot *p_slot, u32 event_type) u8 pciehp_handle_attention_button(struct slot *p_slot) { u32 event_type; + struct controller *ctrl = p_slot->ctrl; /* Attention Button Change */ - dbg("pciehp: Attention button interrupt received.\n"); + ctrl_dbg(ctrl, "Attention button interrupt received\n"); /* * Button pressed - See if need to TAKE ACTION!!! */ - info("Button pressed on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot)); event_type = INT_BUTTON_PRESS; queue_interrupt_event(p_slot, event_type); @@ -77,22 +78,23 @@ u8 pciehp_handle_switch_change(struct slot *p_slot) { u8 getstatus; u32 event_type; + struct controller *ctrl = p_slot->ctrl; /* Switch Change */ - dbg("pciehp: Switch interrupt received.\n"); + ctrl_dbg(ctrl, "Switch interrupt received\n"); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (getstatus) { /* * Switch opened */ - info("Latch open on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_OPEN; } else { /* * Switch closed */ - info("Latch close on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_CLOSE; } @@ -105,9 +107,10 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) { u32 event_type; u8 presence_save; + struct controller *ctrl = p_slot->ctrl; /* Presence Change */ - dbg("pciehp: Presence/Notify input change.\n"); + ctrl_dbg(ctrl, "Presence/Notify input change\n"); /* Switch is open, assume a presence change * Save the presence state @@ -117,13 +120,14 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) /* * Card Present */ - info("Card present on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Card present on Slot(%s)\n", slot_name(p_slot)); event_type = INT_PRESENCE_ON; } else { /* * Not Present */ - info("Card not present on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Card not present on Slot(%s)\n", + slot_name(p_slot)); event_type = INT_PRESENCE_OFF; } @@ -135,23 +139,25 @@ u8 pciehp_handle_presence_change(struct slot *p_slot) u8 pciehp_handle_power_fault(struct slot *p_slot) { u32 event_type; + struct controller *ctrl = p_slot->ctrl; /* power fault */ - dbg("pciehp: Power fault interrupt received.\n"); + ctrl_dbg(ctrl, "Power fault interrupt received\n"); if ( !(p_slot->hpc_ops->query_power_fault(p_slot))) { /* * power fault Cleared */ - info("Power fault cleared on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", + slot_name(p_slot)); event_type = INT_POWER_FAULT_CLEAR; } else { /* * power fault */ - info("Power fault on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot)); event_type = INT_POWER_FAULT; - info("power fault bit %x set\n", 0); + ctrl_info(ctrl, "Power fault bit %x set\n", 0); } queue_interrupt_event(p_slot, event_type); @@ -168,8 +174,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) /* turn off slot, turn on Amber LED, turn off Green LED if supported*/ if (POWER_CTRL(ctrl)) { if (pslot->hpc_ops->power_off_slot(pslot)) { - err("%s: Issue of Slot Power Off command failed\n", - __func__); + ctrl_err(ctrl, + "Issue of Slot Power Off command failed\n"); return; } } @@ -186,8 +192,8 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot) if (ATTN_LED(ctrl)) { if (pslot->hpc_ops->set_attention_status(pslot, 1)) { - err("%s: Issue of Set Attention Led command failed\n", - __func__); + ctrl_err(ctrl, + "Issue of Set Attention Led command failed\n"); return; } } @@ -204,10 +210,11 @@ static int board_added(struct slot *p_slot) { int retval = 0; struct controller *ctrl = p_slot->ctrl; + struct pci_bus *parent = ctrl->pci_dev->subordinate; - dbg("%s: slot device, slot offset, hp slot = %d, %d ,%d\n", - __func__, p_slot->device, - ctrl->slot_device_offset, p_slot->hp_slot); + ctrl_dbg(ctrl, "%s: slot device, slot offset, hp slot = %d, %d, %d\n", + __func__, p_slot->device, ctrl->slot_device_offset, + p_slot->hp_slot); if (POWER_CTRL(ctrl)) { /* Power on slot */ @@ -219,28 +226,25 @@ static int board_added(struct slot *p_slot) if (PWR_LED(ctrl)) p_slot->hpc_ops->green_led_blink(p_slot); - /* Wait for ~1 second */ - msleep(1000); - /* Check link training status */ retval = p_slot->hpc_ops->check_lnk_status(ctrl); if (retval) { - err("%s: Failed to check link status\n", __func__); + ctrl_err(ctrl, "Failed to check link status\n"); set_slot_off(ctrl, p_slot); return retval; } /* Check for a power fault */ if (p_slot->hpc_ops->query_power_fault(p_slot)) { - dbg("%s: power fault detected\n", __func__); + ctrl_dbg(ctrl, "Power fault detected\n"); retval = POWER_FAILURE; goto err_exit; } retval = pciehp_configure_device(p_slot); if (retval) { - err("Cannot add device 0x%x:%x\n", p_slot->bus, - p_slot->device); + ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n", + pci_domain_nr(parent), p_slot->bus, p_slot->device); goto err_exit; } @@ -272,14 +276,14 @@ static int remove_board(struct slot *p_slot) if (retval) return retval; - dbg("In %s, hp_slot = %d\n", __func__, p_slot->hp_slot); + ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, p_slot->hp_slot); if (POWER_CTRL(ctrl)) { /* power off slot */ retval = p_slot->hpc_ops->power_off_slot(p_slot); if (retval) { - err("%s: Issue of Slot Disable command failed\n", - __func__); + ctrl_err(ctrl, + "Issue of Slot Disable command failed\n"); return retval; } } @@ -320,8 +324,10 @@ static void pciehp_power_thread(struct work_struct *work) switch (p_slot->state) { case POWEROFF_STATE: mutex_unlock(&p_slot->lock); - dbg("%s: disabling bus:device(%x:%x)\n", - __func__, p_slot->bus, p_slot->device); + ctrl_dbg(p_slot->ctrl, + "Disabling domain:bus:device=%04x:%02x:%02x\n", + pci_domain_nr(p_slot->ctrl->pci_dev->subordinate), + p_slot->bus, p_slot->device); pciehp_disable_slot(p_slot); mutex_lock(&p_slot->lock); p_slot->state = STATIC_STATE; @@ -349,7 +355,8 @@ void pciehp_queue_pushbutton_work(struct work_struct *work) info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { - err("%s: Cannot allocate memory\n", __func__); + ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", + __func__); return; } info->p_slot = p_slot; @@ -403,12 +410,14 @@ static void handle_button_press_event(struct slot *p_slot) p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; - info("PCI slot #%s - powering off due to button " - "press.\n", p_slot->name); + ctrl_info(ctrl, + "PCI slot #%s - powering off due to button " + "press.\n", slot_name(p_slot)); } else { p_slot->state = BLINKINGON_STATE; - info("PCI slot #%s - powering on due to button " - "press.\n", p_slot->name); + ctrl_info(ctrl, + "PCI slot #%s - powering on due to button " + "press.\n", slot_name(p_slot)); } /* blink green LED and turn off amber */ if (PWR_LED(ctrl)) @@ -425,8 +434,7 @@ static void handle_button_press_event(struct slot *p_slot) * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ - info("Button cancel on Slot(%s)\n", p_slot->name); - dbg("%s: button cancel\n", __func__); + ctrl_info(ctrl, "Button cancel on Slot(%s)\n", slot_name(p_slot)); cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) { if (PWR_LED(ctrl)) @@ -437,8 +445,8 @@ static void handle_button_press_event(struct slot *p_slot) } if (ATTN_LED(ctrl)) p_slot->hpc_ops->set_attention_status(p_slot, 0); - info("PCI slot #%s - action canceled due to button press\n", - p_slot->name); + ctrl_info(ctrl, "PCI slot #%s - action canceled " + "due to button press\n", slot_name(p_slot)); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: @@ -448,11 +456,11 @@ static void handle_button_press_event(struct slot *p_slot) * this means that the previous attention button action * to hot-add or hot-remove is undergoing */ - info("Button ignore on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Button ignore on Slot(%s)\n", slot_name(p_slot)); update_slot_info(p_slot); break; default: - warn("Not a valid state\n"); + ctrl_warn(ctrl, "Not a valid state\n"); break; } } @@ -467,7 +475,8 @@ static void handle_surprise_event(struct slot *p_slot) info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { - err("%s: Cannot allocate memory\n", __func__); + ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", + __func__); return; } info->p_slot = p_slot; @@ -505,7 +514,7 @@ static void interrupt_event_handler(struct work_struct *work) case INT_PRESENCE_OFF: if (!HP_SUPR_RM(ctrl)) break; - dbg("Surprise Removal\n"); + ctrl_dbg(ctrl, "Surprise Removal\n"); update_slot_info(p_slot); handle_surprise_event(p_slot); break; @@ -522,22 +531,22 @@ int pciehp_enable_slot(struct slot *p_slot) { u8 getstatus = 0; int rc; + struct controller *ctrl = p_slot->ctrl; /* Check to see if (latch closed, card present, power off) */ mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { - info("%s: no adapter on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } if (MRL_SENS(p_slot->ctrl)) { rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { - info("%s: latch open on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "Latch open on slot(%s)\n", + slot_name(p_slot)); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } @@ -546,8 +555,8 @@ int pciehp_enable_slot(struct slot *p_slot) if (POWER_CTRL(p_slot->ctrl)) { rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || getstatus) { - info("%s: already enabled on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "Already enabled on slot(%s)\n", + slot_name(p_slot)); mutex_unlock(&p_slot->ctrl->crit_sect); return -EINVAL; } @@ -571,6 +580,7 @@ int pciehp_disable_slot(struct slot *p_slot) { u8 getstatus = 0; int ret = 0; + struct controller *ctrl = p_slot->ctrl; if (!p_slot->ctrl) return 1; @@ -581,8 +591,8 @@ int pciehp_disable_slot(struct slot *p_slot) if (!HP_SUPR_RM(p_slot->ctrl)) { ret = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (ret || !getstatus) { - info("%s: no adapter on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "No adapter on slot(%s)\n", + slot_name(p_slot)); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } @@ -591,8 +601,8 @@ int pciehp_disable_slot(struct slot *p_slot) if (MRL_SENS(p_slot->ctrl)) { ret = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (ret || getstatus) { - info("%s: latch open on slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "Latch open on slot(%s)\n", + slot_name(p_slot)); mutex_unlock(&p_slot->ctrl->crit_sect); return -ENODEV; } @@ -601,8 +611,8 @@ int pciehp_disable_slot(struct slot *p_slot) if (POWER_CTRL(p_slot->ctrl)) { ret = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (ret || !getstatus) { - info("%s: already disabled slot(%s)\n", __func__, - p_slot->name); + ctrl_info(ctrl, "Already disabled on slot(%s)\n", + slot_name(p_slot)); mutex_unlock(&p_slot->ctrl->crit_sect); return -EINVAL; } @@ -618,6 +628,7 @@ int pciehp_disable_slot(struct slot *p_slot) int pciehp_sysfs_enable_slot(struct slot *p_slot) { int retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -631,15 +642,17 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) p_slot->state = STATIC_STATE; break; case POWERON_STATE: - info("Slot %s is already in powering on state\n", - p_slot->name); + ctrl_info(ctrl, "Slot %s is already in powering on state\n", + slot_name(p_slot)); break; case BLINKINGOFF_STATE: case POWEROFF_STATE: - info("Already enabled on slot %s\n", p_slot->name); + ctrl_info(ctrl, "Already enabled on slot %s\n", + slot_name(p_slot)); break; default: - err("Not a valid state on slot %s\n", p_slot->name); + ctrl_err(ctrl, "Not a valid state on slot %s\n", + slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); @@ -650,6 +663,7 @@ int pciehp_sysfs_enable_slot(struct slot *p_slot) int pciehp_sysfs_disable_slot(struct slot *p_slot) { int retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -663,15 +677,17 @@ int pciehp_sysfs_disable_slot(struct slot *p_slot) p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: - info("Slot %s is already in powering off state\n", - p_slot->name); + ctrl_info(ctrl, "Slot %s is already in powering off state\n", + slot_name(p_slot)); break; case BLINKINGON_STATE: case POWERON_STATE: - info("Already disabled on slot %s\n", p_slot->name); + ctrl_info(ctrl, "Already disabled on slot %s\n", + slot_name(p_slot)); break; default: - err("Not a valid state on slot %s\n", p_slot->name); + ctrl_err(ctrl, "Not a valid state on slot %s\n", + slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c index 1323a43285d..b643ca13e4f 100644 --- a/drivers/pci/hotplug/pciehp_hpc.c +++ b/drivers/pci/hotplug/pciehp_hpc.c @@ -125,6 +125,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) /* Field definitions in Link Capabilities Register */ #define MAX_LNK_SPEED 0x000F #define MAX_LNK_WIDTH 0x03F0 +#define LINK_ACTIVE_REPORTING 0x00100000 /* Link Width Encoding */ #define LNK_X1 0x01 @@ -141,6 +142,7 @@ static inline int pciehp_writel(struct controller *ctrl, int reg, u32 value) #define LNK_TRN_ERR 0x0400 #define LNK_TRN 0x0800 #define SLOT_CLK_CONF 0x1000 +#define LINK_ACTIVE 0x2000 /* Field definitions in Slot Capabilities Register */ #define ATTN_BUTTN_PRSN 0x00000001 @@ -223,7 +225,7 @@ static void start_int_poll_timer(struct controller *ctrl, int sec) static inline int pciehp_request_irq(struct controller *ctrl) { - int retval, irq = ctrl->pci_dev->irq; + int retval, irq = ctrl->pcie->irq; /* Install interrupt polling timer. Start with 10 sec delay */ if (pciehp_poll_mode) { @@ -235,7 +237,8 @@ static inline int pciehp_request_irq(struct controller *ctrl) /* Installs the interrupt handler */ retval = request_irq(irq, pcie_isr, IRQF_SHARED, MY_NAME, ctrl); if (retval) - err("Cannot get irq %d for the hotplug controller\n", irq); + ctrl_err(ctrl, "Cannot get irq %d for the hotplug controller\n", + irq); return retval; } @@ -244,7 +247,7 @@ static inline void pciehp_free_irq(struct controller *ctrl) if (pciehp_poll_mode) del_timer_sync(&ctrl->poll_timer); else - free_irq(ctrl->pci_dev->irq, ctrl); + free_irq(ctrl->pcie->irq, ctrl); } static int pcie_poll_cmd(struct controller *ctrl) @@ -258,7 +261,7 @@ static int pcie_poll_cmd(struct controller *ctrl) return 1; } } - while (timeout > 1000) { + while (timeout > 0) { msleep(10); timeout -= 10; if (!pciehp_readw(ctrl, SLOTSTATUS, &slot_status)) { @@ -282,7 +285,7 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll) else rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout); if (!rc) - dbg("Command not completed in 1000 msec\n"); + ctrl_dbg(ctrl, "Command not completed in 1000 msec\n"); } /** @@ -301,7 +304,8 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); goto out; } @@ -312,26 +316,25 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) * proceed forward to issue the next command according * to spec. Just print out the error message. */ - dbg("%s: CMD_COMPLETED not clear after 1 sec.\n", - __func__); + ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n"); } else if (!NO_CMD_CMPL(ctrl)) { /* * This controller semms to notify of command completed * event even though it supports none of power * controller, attention led, power led and EMI. */ - dbg("%s: Unexpected CMD_COMPLETED. Need to wait for " - "command completed event.\n", __func__); + ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to " + "wait for command completed event.\n"); ctrl->no_cmd_complete = 0; } else { - dbg("%s: Unexpected CMD_COMPLETED. Maybe the " - "controller is broken.\n", __func__); + ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe " + "the controller is broken.\n"); } } retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); if (retval) { - err("%s: Cannot read SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); goto out; } @@ -341,7 +344,7 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) smp_mb(); retval = pciehp_writew(ctrl, SLOTCTRL, slot_ctrl); if (retval) - err("%s: Cannot write to SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "Cannot write to SLOTCTRL register\n"); /* * Wait for command completion. @@ -363,21 +366,62 @@ static int pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask) return retval; } +static inline int check_link_active(struct controller *ctrl) +{ + u16 link_status; + + if (pciehp_readw(ctrl, LNKSTATUS, &link_status)) + return 0; + return !!(link_status & LINK_ACTIVE); +} + +static void pcie_wait_link_active(struct controller *ctrl) +{ + int timeout = 1000; + + if (check_link_active(ctrl)) + return; + while (timeout > 0) { + msleep(10); + timeout -= 10; + if (check_link_active(ctrl)) + return; + } + ctrl_dbg(ctrl, "Data Link Layer Link Active not set in 1000 msec\n"); +} + static int hpc_check_lnk_status(struct controller *ctrl) { u16 lnk_status; int retval = 0; + /* + * Data Link Layer Link Active Reporting must be capable for + * hot-plug capable downstream port. But old controller might + * not implement it. In this case, we wait for 1000 ms. + */ + if (ctrl->link_active_reporting){ + /* Wait for Data Link Layer Link Active bit to be set */ + pcie_wait_link_active(ctrl); + /* + * We must wait for 100 ms after the Data Link Layer + * Link Active bit reads 1b before initiating a + * configuration access to the hot added device. + */ + msleep(100); + } else + msleep(1000); + retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); if (retval) { - err("%s: Cannot read LNKSTATUS register\n", __func__); + ctrl_err(ctrl, "Cannot read LNKSTATUS register\n"); return retval; } - dbg("%s: lnk_status = %x\n", __func__, lnk_status); + ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status); if ( (lnk_status & LNK_TRN) || (lnk_status & LNK_TRN_ERR) || !(lnk_status & NEG_LINK_WD)) { - err("%s : Link Training Error occurs \n", __func__); + ctrl_err(ctrl, "Link Training Error occurs \n"); retval = -1; return retval; } @@ -394,12 +438,12 @@ static int hpc_get_attention_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); if (retval) { - err("%s: Cannot read SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); return retval; } - dbg("%s: SLOTCTRL %x, value read %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x, value read %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); atten_led_state = (slot_ctrl & ATTN_LED_CTRL) >> 6; @@ -433,11 +477,11 @@ static int hpc_get_power_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTCTRL, &slot_ctrl); if (retval) { - err("%s: Cannot read SLOTCTRL register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTCTRL register\n", __func__); return retval; } - dbg("%s: SLOTCTRL %x value read %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x value read %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_ctrl); pwr_state = (slot_ctrl & PWR_CTRL) >> 10; @@ -464,7 +508,8 @@ static int hpc_get_latch_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); return retval; } @@ -482,7 +527,8 @@ static int hpc_get_adapter_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); return retval; } card_state = (u8)((slot_status & PRSN_STATE) >> 6); @@ -500,7 +546,7 @@ static int hpc_query_power_fault(struct slot *slot) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot check for power fault\n", __func__); + ctrl_err(ctrl, "Cannot check for power fault\n"); return retval; } pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1); @@ -516,7 +562,7 @@ static int hpc_get_emi_status(struct slot *slot, u8 *status) retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s : Cannot check EMI status\n", __func__); + ctrl_err(ctrl, "Cannot check EMI status\n"); return retval; } *status = (slot_status & EMI_STATE) >> EMI_STATUS_BIT; @@ -560,8 +606,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value) return -1; } rc = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); return rc; } @@ -575,8 +621,8 @@ static void hpc_set_green_led_on(struct slot *slot) slot_cmd = 0x0100; cmd_mask = PWR_LED_CTRL; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } static void hpc_set_green_led_off(struct slot *slot) @@ -588,8 +634,8 @@ static void hpc_set_green_led_off(struct slot *slot) slot_cmd = 0x0300; cmd_mask = PWR_LED_CTRL; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } static void hpc_set_green_led_blink(struct slot *slot) @@ -601,8 +647,8 @@ static void hpc_set_green_led_blink(struct slot *slot) slot_cmd = 0x0200; cmd_mask = PWR_LED_CTRL; pcie_write_cmd(ctrl, slot_cmd, cmd_mask); - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); } static int hpc_power_on_slot(struct slot * slot) @@ -613,20 +659,22 @@ static int hpc_power_on_slot(struct slot * slot) u16 slot_status; int retval = 0; - dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); + ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); /* Clear sticky power-fault bit from previous power failures */ retval = pciehp_readw(ctrl, SLOTSTATUS, &slot_status); if (retval) { - err("%s: Cannot read SLOTSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS register\n", + __func__); return retval; } slot_status &= PWR_FAULT_DETECTED; if (slot_status) { retval = pciehp_writew(ctrl, SLOTSTATUS, slot_status); if (retval) { - err("%s: Cannot write to SLOTSTATUS register\n", - __func__); + ctrl_err(ctrl, + "%s: Cannot write to SLOTSTATUS register\n", + __func__); return retval; } } @@ -644,11 +692,11 @@ static int hpc_power_on_slot(struct slot * slot) retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); if (retval) { - err("%s: Write %x command failed!\n", __func__, slot_cmd); + ctrl_err(ctrl, "Write %x command failed!\n", slot_cmd); return -1; } - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); return retval; } @@ -694,7 +742,7 @@ static int hpc_power_off_slot(struct slot * slot) int retval = 0; int changed; - dbg("%s: slot->hp_slot %x\n", __func__, slot->hp_slot); + ctrl_dbg(ctrl, "%s: slot->hp_slot %x\n", __func__, slot->hp_slot); /* * Set Bad DLLP Mask bit in Correctable Error Mask @@ -722,12 +770,12 @@ static int hpc_power_off_slot(struct slot * slot) retval = pcie_write_cmd(ctrl, slot_cmd, cmd_mask); if (retval) { - err("%s: Write command failed!\n", __func__); + ctrl_err(ctrl, "Write command failed!\n"); retval = -1; goto out; } - dbg("%s: SLOTCTRL %x write cmd %x\n", - __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); + ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", + __func__, ctrl->cap_base + SLOTCTRL, slot_cmd); out: if (changed) pcie_unmask_bad_dllp(ctrl); @@ -749,7 +797,8 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) intr_loc = 0; do { if (pciehp_readw(ctrl, SLOTSTATUS, &detected)) { - err("%s: Cannot read SLOTSTATUS\n", __func__); + ctrl_err(ctrl, "%s: Cannot read SLOTSTATUS\n", + __func__); return IRQ_NONE; } @@ -760,12 +809,13 @@ static irqreturn_t pcie_isr(int irq, void *dev_id) if (!intr_loc) return IRQ_NONE; if (detected && pciehp_writew(ctrl, SLOTSTATUS, detected)) { - err("%s: Cannot write to SLOTSTATUS\n", __func__); + ctrl_err(ctrl, "%s: Cannot write to SLOTSTATUS\n", + __func__); return IRQ_NONE; } } while (detected); - dbg("%s: intr_loc %x\n", __FUNCTION__, intr_loc); + ctrl_dbg(ctrl, "%s: intr_loc %x\n", __func__, intr_loc); /* Check Command Complete Interrupt Pending */ if (intr_loc & CMD_COMPLETED) { @@ -807,7 +857,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); if (retval) { - err("%s: Cannot read LNKCAP register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); return retval; } @@ -821,7 +871,7 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value) } *value = lnk_speed; - dbg("Max link speed = %d\n", lnk_speed); + ctrl_dbg(ctrl, "Max link speed = %d\n", lnk_speed); return retval; } @@ -836,7 +886,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, retval = pciehp_readl(ctrl, LNKCAP, &lnk_cap); if (retval) { - err("%s: Cannot read LNKCAP register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); return retval; } @@ -871,7 +921,7 @@ static int hpc_get_max_lnk_width(struct slot *slot, } *value = lnk_wdth; - dbg("Max link width = %d\n", lnk_wdth); + ctrl_dbg(ctrl, "Max link width = %d\n", lnk_wdth); return retval; } @@ -885,7 +935,8 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); if (retval) { - err("%s: Cannot read LNKSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", + __func__); return retval; } @@ -899,7 +950,7 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value) } *value = lnk_speed; - dbg("Current link speed = %d\n", lnk_speed); + ctrl_dbg(ctrl, "Current link speed = %d\n", lnk_speed); return retval; } @@ -914,7 +965,8 @@ static int hpc_get_cur_lnk_width(struct slot *slot, retval = pciehp_readw(ctrl, LNKSTATUS, &lnk_status); if (retval) { - err("%s: Cannot read LNKSTATUS register\n", __func__); + ctrl_err(ctrl, "%s: Cannot read LNKSTATUS register\n", + __func__); return retval; } @@ -949,7 +1001,7 @@ static int hpc_get_cur_lnk_width(struct slot *slot, } *value = lnk_wdth; - dbg("Current link width = %d\n", lnk_wdth); + ctrl_dbg(ctrl, "Current link width = %d\n", lnk_wdth); return retval; } @@ -998,7 +1050,7 @@ int pcie_enable_notification(struct controller *ctrl) PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; if (pcie_write_cmd(ctrl, cmd, mask)) { - err("%s: Cannot enable software notification\n", __func__); + ctrl_err(ctrl, "Cannot enable software notification\n"); return -1; } return 0; @@ -1010,7 +1062,7 @@ static void pcie_disable_notification(struct controller *ctrl) mask = PRSN_DETECT_ENABLE | ATTN_BUTTN_ENABLE | MRL_DETECT_ENABLE | PWR_FAULT_DETECT_ENABLE | HP_INTR_ENABLE | CMD_CMPL_INTR_ENABLE; if (pcie_write_cmd(ctrl, 0, mask)) - warn("%s: Cannot disable software notification\n", __func__); + ctrl_warn(ctrl, "Cannot disable software notification\n"); } static int pcie_init_notification(struct controller *ctrl) @@ -1030,15 +1082,6 @@ static void pcie_shutdown_notification(struct controller *ctrl) pciehp_free_irq(ctrl); } -static void make_slot_name(struct slot *slot) -{ - if (pciehp_slot_with_bus) - snprintf(slot->name, SLOT_NAME_SIZE, "%04d_%04d", - slot->bus, slot->number); - else - snprintf(slot->name, SLOT_NAME_SIZE, "%d", slot->number); -} - static int pcie_init_slot(struct controller *ctrl) { struct slot *slot; @@ -1053,7 +1096,6 @@ static int pcie_init_slot(struct controller *ctrl) slot->device = ctrl->slot_device_offset + slot->hp_slot; slot->hpc_ops = ctrl->hpc_ops; slot->number = ctrl->first_slot; - make_slot_name(slot); mutex_init(&slot->lock); INIT_DELAYED_WORK(&slot->work, pciehp_queue_pushbutton_work); list_add(&slot->slot_list, &ctrl->slot_list); @@ -1080,58 +1122,70 @@ static inline void dbg_ctrl(struct controller *ctrl) if (!pciehp_debug) return; - dbg("Hotplug Controller:\n"); - dbg(" Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", pci_name(pdev), pdev->irq); - dbg(" Vendor ID : 0x%04x\n", pdev->vendor); - dbg(" Device ID : 0x%04x\n", pdev->device); - dbg(" Subsystem ID : 0x%04x\n", pdev->subsystem_device); - dbg(" Subsystem Vendor ID : 0x%04x\n", pdev->subsystem_vendor); - dbg(" PCIe Cap offset : 0x%02x\n", ctrl->cap_base); + ctrl_info(ctrl, "Hotplug Controller:\n"); + ctrl_info(ctrl, " Seg/Bus/Dev/Func/IRQ : %s IRQ %d\n", + pci_name(pdev), pdev->irq); + ctrl_info(ctrl, " Vendor ID : 0x%04x\n", pdev->vendor); + ctrl_info(ctrl, " Device ID : 0x%04x\n", pdev->device); + ctrl_info(ctrl, " Subsystem ID : 0x%04x\n", + pdev->subsystem_device); + ctrl_info(ctrl, " Subsystem Vendor ID : 0x%04x\n", + pdev->subsystem_vendor); + ctrl_info(ctrl, " PCIe Cap offset : 0x%02x\n", ctrl->cap_base); for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if (!pci_resource_len(pdev, i)) continue; - dbg(" PCI resource [%d] : 0x%llx@0x%llx\n", i, - (unsigned long long)pci_resource_len(pdev, i), - (unsigned long long)pci_resource_start(pdev, i)); + ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n", + i, (unsigned long long)pci_resource_len(pdev, i), + (unsigned long long)pci_resource_start(pdev, i)); } - dbg("Slot Capabilities : 0x%08x\n", ctrl->slot_cap); - dbg(" Physical Slot Number : %d\n", ctrl->first_slot); - dbg(" Attention Button : %3s\n", ATTN_BUTTN(ctrl) ? "yes" : "no"); - dbg(" Power Controller : %3s\n", POWER_CTRL(ctrl) ? "yes" : "no"); - dbg(" MRL Sensor : %3s\n", MRL_SENS(ctrl) ? "yes" : "no"); - dbg(" Attention Indicator : %3s\n", ATTN_LED(ctrl) ? "yes" : "no"); - dbg(" Power Indicator : %3s\n", PWR_LED(ctrl) ? "yes" : "no"); - dbg(" Hot-Plug Surprise : %3s\n", HP_SUPR_RM(ctrl) ? "yes" : "no"); - dbg(" EMI Present : %3s\n", EMI(ctrl) ? "yes" : "no"); - dbg(" Comamnd Completed : %3s\n", NO_CMD_CMPL(ctrl)? "no" : "yes"); + ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap); + ctrl_info(ctrl, " Physical Slot Number : %d\n", ctrl->first_slot); + ctrl_info(ctrl, " Attention Button : %3s\n", + ATTN_BUTTN(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Power Controller : %3s\n", + POWER_CTRL(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " MRL Sensor : %3s\n", + MRL_SENS(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Attention Indicator : %3s\n", + ATTN_LED(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Power Indicator : %3s\n", + PWR_LED(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Hot-Plug Surprise : %3s\n", + HP_SUPR_RM(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " EMI Present : %3s\n", + EMI(ctrl) ? "yes" : "no"); + ctrl_info(ctrl, " Command Completed : %3s\n", + NO_CMD_CMPL(ctrl) ? "no" : "yes"); pciehp_readw(ctrl, SLOTSTATUS, ®16); - dbg("Slot Status : 0x%04x\n", reg16); + ctrl_info(ctrl, "Slot Status : 0x%04x\n", reg16); pciehp_readw(ctrl, SLOTCTRL, ®16); - dbg("Slot Control : 0x%04x\n", reg16); + ctrl_info(ctrl, "Slot Control : 0x%04x\n", reg16); } struct controller *pcie_init(struct pcie_device *dev) { struct controller *ctrl; - u32 slot_cap; + u32 slot_cap, link_cap; struct pci_dev *pdev = dev->port; ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { - err("%s : out of memory\n", __func__); + dev_err(&dev->device, "%s: Out of memory\n", __func__); goto abort; } INIT_LIST_HEAD(&ctrl->slot_list); + ctrl->pcie = dev; ctrl->pci_dev = pdev; ctrl->cap_base = pci_find_capability(pdev, PCI_CAP_ID_EXP); if (!ctrl->cap_base) { - err("%s: Cannot find PCI Express capability\n", __func__); - goto abort; + ctrl_err(ctrl, "Cannot find PCI Express capability\n"); + goto abort_ctrl; } if (pciehp_readl(ctrl, SLOTCAP, &slot_cap)) { - err("%s: Cannot read SLOTCAP register\n", __func__); - goto abort; + ctrl_err(ctrl, "Cannot read SLOTCAP register\n"); + goto abort_ctrl; } ctrl->slot_cap = slot_cap; @@ -1153,6 +1207,16 @@ struct controller *pcie_init(struct pcie_device *dev) !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl))) ctrl->no_cmd_complete = 1; + /* Check if Data Link Layer Link Active Reporting is implemented */ + if (pciehp_readl(ctrl, LNKCAP, &link_cap)) { + ctrl_err(ctrl, "%s: Cannot read LNKCAP register\n", __func__); + goto abort_ctrl; + } + if (link_cap & LINK_ACTIVE_REPORTING) { + ctrl_dbg(ctrl, "Link Active Reporting supported\n"); + ctrl->link_active_reporting = 1; + } + /* Clear all remaining event bits in Slot Status register */ if (pciehp_writew(ctrl, SLOTSTATUS, 0x1f)) goto abort_ctrl; @@ -1170,9 +1234,9 @@ struct controller *pcie_init(struct pcie_device *dev) goto abort_ctrl; } - info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", - pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device); + ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); if (pcie_init_slot(ctrl)) goto abort_ctrl; diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c index 6040dcceb25..10f9566ccee 100644 --- a/drivers/pci/hotplug/pciehp_pci.c +++ b/drivers/pci/hotplug/pciehp_pci.c @@ -39,8 +39,7 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp) u16 pci_cmd, pci_bctl; if (hpp->revision > 1) { - printk(KERN_WARNING "%s: Rev.%d type0 record not supported\n", - __func__, hpp->revision); + warn("Rev.%d type0 record not supported\n", hpp->revision); return; } @@ -81,8 +80,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp) u32 reg32; if (hpp->revision > 1) { - printk(KERN_WARNING "%s: Rev.%d type2 record not supported\n", - __func__, hpp->revision); + warn("Rev.%d type2 record not supported\n", hpp->revision); return; } @@ -149,8 +147,7 @@ static void program_fw_provided_values(struct pci_dev *dev) return; if (pciehp_get_hp_params_from_firmware(dev, &hpp)) { - printk(KERN_WARNING "%s: Could not get hotplug parameters\n", - __func__); + warn("Could not get hotplug parameters\n"); return; } @@ -198,18 +195,20 @@ int pciehp_configure_device(struct slot *p_slot) struct pci_dev *dev; struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; int num, fn; + struct controller *ctrl = p_slot->ctrl; dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (dev) { - err("Device %s already exists at %x:%x, cannot hot-add\n", - pci_name(dev), p_slot->bus, p_slot->device); + ctrl_err(ctrl, "Device %s already exists " + "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), + pci_domain_nr(parent), p_slot->bus, p_slot->device); pci_dev_put(dev); return -EINVAL; } num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (num == 0) { - err("No new device found\n"); + ctrl_err(ctrl, "No new device found\n"); return -ENODEV; } @@ -218,8 +217,8 @@ int pciehp_configure_device(struct slot *p_slot) if (!dev) continue; if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { - err("Cannot hot-add display device %s\n", - pci_name(dev)); + ctrl_err(ctrl, "Cannot hot-add display device %s\n", + pci_name(dev)); pci_dev_put(dev); continue; } @@ -244,9 +243,10 @@ int pciehp_unconfigure_device(struct slot *p_slot) u8 presence = 0; struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; u16 command; + struct controller *ctrl = p_slot->ctrl; - dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, - p_slot->device); + ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", + __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); ret = p_slot->hpc_ops->get_adapter_status(p_slot, &presence); if (ret) presence = 0; @@ -257,16 +257,17 @@ int pciehp_unconfigure_device(struct slot *p_slot) if (!temp) continue; if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { - err("Cannot remove display device %s\n", - pci_name(temp)); + ctrl_err(ctrl, "Cannot remove display device %s\n", + pci_name(temp)); pci_dev_put(temp); continue; } if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE && presence) { pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); if (bctl & PCI_BRIDGE_CTL_VGA) { - err("Cannot remove display device %s\n", - pci_name(temp)); + ctrl_err(ctrl, + "Cannot remove display device %s\n", + pci_name(temp)); pci_dev_put(temp); continue; } diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h index 7d5921b1ee7..419919a87b0 100644 --- a/drivers/pci/hotplug/rpaphp.h +++ b/drivers/pci/hotplug/rpaphp.h @@ -46,10 +46,10 @@ #define PRESENT 1 /* Card in slot */ #define MY_NAME "rpaphp" -extern int debug; +extern int rpaphp_debug; #define dbg(format, arg...) \ do { \ - if (debug) \ + if (rpaphp_debug) \ printk(KERN_DEBUG "%s: " format, \ MY_NAME , ## arg); \ } while (0) diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c index 1f84f402acd..95d02a08fdc 100644 --- a/drivers/pci/hotplug/rpaphp_core.c +++ b/drivers/pci/hotplug/rpaphp_core.c @@ -37,7 +37,7 @@ /* and pci_do_scan_bus */ #include "rpaphp.h" -int debug; +int rpaphp_debug; LIST_HEAD(rpaphp_slot_head); #define DRIVER_VERSION "0.1" @@ -50,7 +50,7 @@ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); -module_param(debug, bool, 0644); +module_param_named(debug, rpaphp_debug, bool, 0644); /** * set_attention_status - set attention LED diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c index 5acfd4f3d4c..513e1e28239 100644 --- a/drivers/pci/hotplug/rpaphp_pci.c +++ b/drivers/pci/hotplug/rpaphp_pci.c @@ -123,7 +123,7 @@ int rpaphp_enable_slot(struct slot *slot) slot->state = CONFIGURED; } - if (debug) { + if (rpaphp_debug) { struct pci_dev *dev; dbg("%s: pci_devs of slot[%s]\n", __func__, slot->dn->full_name); list_for_each_entry (dev, &bus->devices, bus_list) diff --git a/drivers/pci/hotplug/rpaphp_slot.c b/drivers/pci/hotplug/rpaphp_slot.c index 9b714ea93d2..2ea9cf1a8d0 100644 --- a/drivers/pci/hotplug/rpaphp_slot.c +++ b/drivers/pci/hotplug/rpaphp_slot.c @@ -43,7 +43,7 @@ static void rpaphp_release_slot(struct hotplug_slot *hotplug_slot) void dealloc_slot_struct(struct slot *slot) { kfree(slot->hotplug_slot->info); - kfree(slot->hotplug_slot->name); + kfree(slot->name); kfree(slot->hotplug_slot); kfree(slot); } @@ -63,11 +63,9 @@ struct slot *alloc_slot_struct(struct device_node *dn, GFP_KERNEL); if (!slot->hotplug_slot->info) goto error_hpslot; - slot->hotplug_slot->name = kmalloc(strlen(drc_name) + 1, GFP_KERNEL); - if (!slot->hotplug_slot->name) + slot->name = kstrdup(drc_name, GFP_KERNEL); + if (!slot->name) goto error_info; - slot->name = slot->hotplug_slot->name; - strcpy(slot->name, drc_name); slot->dn = dn; slot->index = drc_index; slot->power_domain = power_domain; @@ -137,7 +135,7 @@ int rpaphp_register_slot(struct slot *slot) slotno = PCI_SLOT(PCI_DN(slot->dn->child)->devfn); else slotno = -1; - retval = pci_hp_register(php_slot, slot->bus, slotno); + retval = pci_hp_register(php_slot, slot->bus, slotno, slot->name); if (retval) { err("pci_hp_register failed with error %d\n", retval); return retval; @@ -147,9 +145,5 @@ int rpaphp_register_slot(struct slot *slot) list_add(&slot->rpaphp_slot_list, &rpaphp_slot_head); info("Slot [%s] registered\n", slot->name); return 0; - -sysfs_fail: - pci_hp_deregister(php_slot); - return retval; } diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index 410fe0394a8..3eee70928d4 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c @@ -161,7 +161,8 @@ static int sn_pci_bus_valid(struct pci_bus *pci_bus) } static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, - struct pci_bus *pci_bus, int device) + struct pci_bus *pci_bus, int device, + char *name) { struct pcibus_info *pcibus_info; struct slot *slot; @@ -173,15 +174,9 @@ static int sn_hp_slot_private_alloc(struct hotplug_slot *bss_hotplug_slot, return -ENOMEM; bss_hotplug_slot->private = slot; - bss_hotplug_slot->name = kmalloc(SN_SLOT_NAME_SIZE, GFP_KERNEL); - if (!bss_hotplug_slot->name) { - kfree(bss_hotplug_slot->private); - return -ENOMEM; - } - slot->device_num = device; slot->pci_bus = pci_bus; - sprintf(bss_hotplug_slot->name, "%04x:%02x:%02x", + sprintf(name, "%04x:%02x:%02x", pci_domain_nr(pci_bus), ((u16)pcibus_info->pbi_buscommon.bs_persist_busnum), device + 1); @@ -418,7 +413,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) /* * Add the slot's devices to the ACPI infrastructure */ if (SN_ACPI_BASE_SUPPORT() && ssdt) { - unsigned long adr; + unsigned long long adr; struct acpi_device *pdevice; struct acpi_device *device; acpi_handle phandle; @@ -510,7 +505,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) /* free the ACPI resources for the slot */ if (SN_ACPI_BASE_SUPPORT() && PCI_CONTROLLER(slot->pci_bus)->acpi_handle) { - unsigned long adr; + unsigned long long adr; struct acpi_device *device; acpi_handle phandle; acpi_handle chandle = NULL; @@ -608,7 +603,6 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot, static void sn_release_slot(struct hotplug_slot *bss_hotplug_slot) { kfree(bss_hotplug_slot->info); - kfree(bss_hotplug_slot->name); kfree(bss_hotplug_slot->private); kfree(bss_hotplug_slot); } @@ -618,6 +612,7 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) int device; struct pci_slot *pci_slot; struct hotplug_slot *bss_hotplug_slot; + char name[SN_SLOT_NAME_SIZE]; int rc = 0; /* @@ -645,15 +640,14 @@ static int sn_hotplug_slot_register(struct pci_bus *pci_bus) } if (sn_hp_slot_private_alloc(bss_hotplug_slot, - pci_bus, device)) { + pci_bus, device, name)) { rc = -ENOMEM; goto alloc_err; } - bss_hotplug_slot->ops = &sn_hotplug_slot_ops; bss_hotplug_slot->release = &sn_release_slot; - rc = pci_hp_register(bss_hotplug_slot, pci_bus, device); + rc = pci_hp_register(bss_hotplug_slot, pci_bus, device, name); if (rc) goto register_err; diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h index 8a026f750de..6aba0b6cf2e 100644 --- a/drivers/pci/hotplug/shpchp.h +++ b/drivers/pci/hotplug/shpchp.h @@ -59,6 +59,20 @@ extern struct workqueue_struct *shpchp_wq; #define warn(format, arg...) \ printk(KERN_WARNING "%s: " format, MY_NAME , ## arg) +#define ctrl_dbg(ctrl, format, arg...) \ + do { \ + if (shpchp_debug) \ + dev_printk(, &ctrl->pci_dev->dev, \ + format, ## arg); \ + } while (0) +#define ctrl_err(ctrl, format, arg...) \ + dev_err(&ctrl->pci_dev->dev, format, ## arg) +#define ctrl_info(ctrl, format, arg...) \ + dev_info(&ctrl->pci_dev->dev, format, ## arg) +#define ctrl_warn(ctrl, format, arg...) \ + dev_warn(&ctrl->pci_dev->dev, format, ## arg) + + #define SLOT_NAME_SIZE 10 struct slot { u8 bus; @@ -69,15 +83,13 @@ struct slot { u8 state; u8 presence_save; u8 pwr_save; - struct timer_list task_event; - u8 hp_slot; struct controller *ctrl; struct hpc_ops *hpc_ops; struct hotplug_slot *hotplug_slot; struct list_head slot_list; - char name[SLOT_NAME_SIZE]; struct delayed_work work; /* work for button event */ struct mutex lock; + u8 hp_slot; }; struct event_info { @@ -169,6 +181,11 @@ extern void cleanup_slots(struct controller *ctrl); extern void shpchp_queue_pushbutton_work(struct work_struct *work); extern int shpc_init( struct controller *ctrl, struct pci_dev *pdev); +static inline const char *slot_name(struct slot *slot) +{ + return hotplug_slot_name(slot->hotplug_slot); +} + #ifdef CONFIG_ACPI #include <linux/pci-acpi.h> static inline int get_hp_params_from_firmware(struct pci_dev *dev, @@ -236,7 +253,7 @@ static inline struct slot *shpchp_find_slot(struct controller *ctrl, u8 device) return slot; } - err("%s: slot (device=0x%x) not found\n", __func__, device); + ctrl_err(ctrl, "Slot (device=0x%02x) not found\n", device); return NULL; } @@ -270,7 +287,9 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot) pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, &pcix_bridge_errors_reg); perr_set = pcix_bridge_errors_reg & PERR_OBSERVED_MASK; if (perr_set) { - dbg ("%s W1C: Bridge_Errors[ PERR_OBSERVED = %08X]\n",__func__ , perr_set); + ctrl_dbg(p_slot->ctrl, + "Bridge_Errors[ PERR_OBSERVED = %08X] (W1C)\n", + perr_set); pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MISC_BRIDGE_ERRORS_OFFSET, perr_set); } @@ -279,7 +298,7 @@ static inline void amd_pogo_errata_restore_misc_reg(struct slot *p_slot) pci_read_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, &pcix_mem_base_reg); rse_set = pcix_mem_base_reg & RSE_MASK; if (rse_set) { - dbg ("%s W1C: Memory_Base_Limit[ RSE ]\n",__func__ ); + ctrl_dbg(p_slot->ctrl, "Memory_Base_Limit[ RSE ] (W1C)\n"); pci_write_config_dword(p_slot->ctrl->pci_dev, PCIX_MEM_BASE_LIMIT_OFFSET, rse_set); } diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c index a8cbd039b85..fe8d149c229 100644 --- a/drivers/pci/hotplug/shpchp_core.c +++ b/drivers/pci/hotplug/shpchp_core.c @@ -39,7 +39,6 @@ int shpchp_debug; int shpchp_poll_mode; int shpchp_poll_time; -static int shpchp_slot_with_bus; struct workqueue_struct *shpchp_wq; #define DRIVER_VERSION "0.4" @@ -53,11 +52,9 @@ MODULE_LICENSE("GPL"); module_param(shpchp_debug, bool, 0644); module_param(shpchp_poll_mode, bool, 0644); module_param(shpchp_poll_time, int, 0644); -module_param(shpchp_slot_with_bus, bool, 0644); MODULE_PARM_DESC(shpchp_debug, "Debugging mode enabled or not"); MODULE_PARM_DESC(shpchp_poll_mode, "Using polling mechanism for hot-plug events or not"); MODULE_PARM_DESC(shpchp_poll_time, "Polling mechanism frequency, in seconds"); -MODULE_PARM_DESC(shpchp_slot_with_bus, "Use bus number in the slot name"); #define SHPC_MODULE_NAME "shpchp" @@ -92,28 +89,20 @@ static void release_slot(struct hotplug_slot *hotplug_slot) { struct slot *slot = hotplug_slot->private; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); kfree(slot->hotplug_slot->info); kfree(slot->hotplug_slot); kfree(slot); } -static void make_slot_name(struct slot *slot) -{ - if (shpchp_slot_with_bus) - snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%04d_%04d", - slot->bus, slot->number); - else - snprintf(slot->hotplug_slot->name, SLOT_NAME_SIZE, "%d", - slot->number); -} - static int init_slots(struct controller *ctrl) { struct slot *slot; struct hotplug_slot *hotplug_slot; struct hotplug_slot_info *info; + char name[SLOT_NAME_SIZE]; int retval = -ENOMEM; int i; @@ -132,8 +121,6 @@ static int init_slots(struct controller *ctrl) goto error_hpslot; hotplug_slot->info = info; - hotplug_slot->name = slot->name; - slot->hp_slot = i; slot->ctrl = ctrl; slot->bus = ctrl->pci_dev->subordinate->number; @@ -146,28 +133,27 @@ static int init_slots(struct controller *ctrl) /* register this slot with the hotplug pci core */ hotplug_slot->private = slot; hotplug_slot->release = &release_slot; - make_slot_name(slot); + snprintf(name, SLOT_NAME_SIZE, "%d", slot->number); hotplug_slot->ops = &shpchp_hotplug_slot_ops; - get_power_status(hotplug_slot, &info->power_status); - get_attention_status(hotplug_slot, &info->attention_status); - get_latch_status(hotplug_slot, &info->latch_status); - get_adapter_status(hotplug_slot, &info->adapter_status); - - dbg("Registering bus=%x dev=%x hp_slot=%x sun=%x " - "slot_device_offset=%x\n", slot->bus, slot->device, - slot->hp_slot, slot->number, ctrl->slot_device_offset); + ctrl_dbg(ctrl, "Registering domain:bus:dev=%04x:%02x:%02x " + "hp_slot=%x sun=%x slot_device_offset=%x\n", + pci_domain_nr(ctrl->pci_dev->subordinate), + slot->bus, slot->device, slot->hp_slot, slot->number, + ctrl->slot_device_offset); retval = pci_hp_register(slot->hotplug_slot, - ctrl->pci_dev->subordinate, slot->device); + ctrl->pci_dev->subordinate, slot->device, name); if (retval) { - err("pci_hp_register failed with error %d\n", retval); - if (retval == -EEXIST) - err("Failed to register slot because of name " - "collision. Try \'shpchp_slot_with_bus\' " - "module option.\n"); + ctrl_err(ctrl, "pci_hp_register failed with error %d\n", + retval); goto error_info; } + get_power_status(hotplug_slot, &info->power_status); + get_attention_status(hotplug_slot, &info->attention_status); + get_latch_status(hotplug_slot, &info->latch_status); + get_adapter_status(hotplug_slot, &info->adapter_status); + list_add(&slot->slot_list, &ctrl->slot_list); } @@ -205,7 +191,8 @@ static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status) { struct slot *slot = get_slot(hotplug_slot); - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); hotplug_slot->info->attention_status = status; slot->hpc_ops->set_attention_status(slot, status); @@ -217,7 +204,8 @@ static int enable_slot (struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); return shpchp_sysfs_enable_slot(slot); } @@ -226,7 +214,8 @@ static int disable_slot (struct hotplug_slot *hotplug_slot) { struct slot *slot = get_slot(hotplug_slot); - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); return shpchp_sysfs_disable_slot(slot); } @@ -236,7 +225,8 @@ static int get_power_status (struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = get_slot(hotplug_slot); int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_power_status(slot, value); if (retval < 0) @@ -250,7 +240,8 @@ static int get_attention_status (struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = get_slot(hotplug_slot); int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_attention_status(slot, value); if (retval < 0) @@ -264,7 +255,8 @@ static int get_latch_status (struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = get_slot(hotplug_slot); int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_latch_status(slot, value); if (retval < 0) @@ -278,7 +270,8 @@ static int get_adapter_status (struct hotplug_slot *hotplug_slot, u8 *value) struct slot *slot = get_slot(hotplug_slot); int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_adapter_status(slot, value); if (retval < 0) @@ -293,7 +286,8 @@ static int get_max_bus_speed(struct hotplug_slot *hotplug_slot, struct slot *slot = get_slot(hotplug_slot); int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_max_bus_speed(slot, value); if (retval < 0) @@ -307,7 +301,8 @@ static int get_cur_bus_speed (struct hotplug_slot *hotplug_slot, enum pci_bus_sp struct slot *slot = get_slot(hotplug_slot); int retval; - dbg("%s - physical_slot = %s\n", __func__, hotplug_slot->name); + ctrl_dbg(slot->ctrl, "%s: physical_slot = %s\n", + __func__, slot_name(slot)); retval = slot->hpc_ops->get_cur_bus_speed(slot, value); if (retval < 0) @@ -338,15 +333,14 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); if (!ctrl) { - err("%s : out of memory\n", __func__); + dev_err(&pdev->dev, "%s: Out of memory\n", __func__); goto err_out_none; } INIT_LIST_HEAD(&ctrl->slot_list); rc = shpc_init(ctrl, pdev); if (rc) { - dbg("%s: controller initialization failed\n", - SHPC_MODULE_NAME); + ctrl_dbg(ctrl, "Controller initialization failed\n"); goto err_out_free_ctrl; } @@ -355,7 +349,7 @@ static int shpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* Setup the slot information structures */ rc = init_slots(ctrl); if (rc) { - err("%s: slot initialization failed\n", SHPC_MODULE_NAME); + ctrl_err(ctrl, "Slot initialization failed\n"); goto err_out_release_ctlr; } diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c index dfb53932dfb..b8ab2796e66 100644 --- a/drivers/pci/hotplug/shpchp_ctrl.c +++ b/drivers/pci/hotplug/shpchp_ctrl.c @@ -62,7 +62,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl) u32 event_type; /* Attention Button Change */ - dbg("shpchp: Attention button interrupt received.\n"); + ctrl_dbg(ctrl, "Attention button interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); @@ -70,7 +70,7 @@ u8 shpchp_handle_attention_button(u8 hp_slot, struct controller *ctrl) /* * Button pressed - See if need to TAKE ACTION!!! */ - info("Button pressed on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Button pressed on Slot(%s)\n", slot_name(p_slot)); event_type = INT_BUTTON_PRESS; queue_interrupt_event(p_slot, event_type); @@ -86,29 +86,29 @@ u8 shpchp_handle_switch_change(u8 hp_slot, struct controller *ctrl) u32 event_type; /* Switch Change */ - dbg("shpchp: Switch interrupt received.\n"); + ctrl_dbg(ctrl, "Switch interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); - dbg("%s: Card present %x Power status %x\n", __func__, - p_slot->presence_save, p_slot->pwr_save); + ctrl_dbg(ctrl, "Card present %x Power status %x\n", + p_slot->presence_save, p_slot->pwr_save); if (getstatus) { /* * Switch opened */ - info("Latch open on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch open on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_OPEN; if (p_slot->pwr_save && p_slot->presence_save) { event_type = INT_POWER_FAULT; - err("Surprise Removal of card\n"); + ctrl_err(ctrl, "Surprise Removal of card\n"); } } else { /* * Switch closed */ - info("Latch close on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch close on Slot(%s)\n", slot_name(p_slot)); event_type = INT_SWITCH_CLOSE; } @@ -123,7 +123,7 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl) u32 event_type; /* Presence Change */ - dbg("shpchp: Presence/Notify input change.\n"); + ctrl_dbg(ctrl, "Presence/Notify input change\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); @@ -135,13 +135,15 @@ u8 shpchp_handle_presence_change(u8 hp_slot, struct controller *ctrl) /* * Card Present */ - info("Card present on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Card present on Slot(%s)\n", + slot_name(p_slot)); event_type = INT_PRESENCE_ON; } else { /* * Not Present */ - info("Card not present on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Card not present on Slot(%s)\n", + slot_name(p_slot)); event_type = INT_PRESENCE_OFF; } @@ -156,7 +158,7 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl) u32 event_type; /* Power fault */ - dbg("shpchp: Power fault interrupt received.\n"); + ctrl_dbg(ctrl, "Power fault interrupt received\n"); p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); @@ -164,18 +166,19 @@ u8 shpchp_handle_power_fault(u8 hp_slot, struct controller *ctrl) /* * Power fault Cleared */ - info("Power fault cleared on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Power fault cleared on Slot(%s)\n", + slot_name(p_slot)); p_slot->status = 0x00; event_type = INT_POWER_FAULT_CLEAR; } else { /* * Power fault */ - info("Power fault on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Power fault on Slot(%s)\n", slot_name(p_slot)); event_type = INT_POWER_FAULT; /* set power fault status for this board */ p_slot->status = 0xFF; - info("power fault bit %x set\n", hp_slot); + ctrl_info(ctrl, "Power fault bit %x set\n", hp_slot); } queue_interrupt_event(p_slot, event_type); @@ -191,10 +194,10 @@ static int change_bus_speed(struct controller *ctrl, struct slot *p_slot, { int rc = 0; - dbg("%s: change to speed %d\n", __func__, speed); + ctrl_dbg(ctrl, "Change speed to %d\n", speed); if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) { - err("%s: Issue of set bus speed mode command failed\n", - __func__); + ctrl_err(ctrl, "%s: Issue of set bus speed mode command " + "failed\n", __func__); return WRONG_BUS_FREQUENCY; } return rc; @@ -212,8 +215,8 @@ static int fix_bus_speed(struct controller *ctrl, struct slot *pslot, */ if (flag) { if (asp < bsp) { - err("%s: speed of bus %x and adapter %x mismatch\n", - __func__, bsp, asp); + ctrl_err(ctrl, "Speed of bus %x and adapter %x " + "mismatch\n", bsp, asp); rc = WRONG_BUS_FREQUENCY; } return rc; @@ -243,17 +246,18 @@ static int board_added(struct slot *p_slot) int rc = 0; enum pci_bus_speed asp, bsp, msp; struct controller *ctrl = p_slot->ctrl; + struct pci_bus *parent = ctrl->pci_dev->subordinate; hp_slot = p_slot->device - ctrl->slot_device_offset; - dbg("%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n", - __func__, p_slot->device, - ctrl->slot_device_offset, hp_slot); + ctrl_dbg(ctrl, + "%s: p_slot->device, slot_offset, hp_slot = %d, %d ,%d\n", + __func__, p_slot->device, ctrl->slot_device_offset, hp_slot); /* Power on slot without connecting to bus */ rc = p_slot->hpc_ops->power_on_slot(p_slot); if (rc) { - err("%s: Failed to power on slot\n", __func__); + ctrl_err(ctrl, "Failed to power on slot\n"); return -1; } @@ -262,33 +266,34 @@ static int board_added(struct slot *p_slot) return WRONG_BUS_FREQUENCY; if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) { - err("%s: Issue of set bus speed mode command failed\n", __func__); + ctrl_err(ctrl, "%s: Issue of set bus speed mode command" + " failed\n", __func__); return WRONG_BUS_FREQUENCY; } /* turn on board, blink green LED, turn off Amber LED */ if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { - err("%s: Issue of Slot Enable command failed\n", __func__); + ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; } } rc = p_slot->hpc_ops->get_adapter_speed(p_slot, &asp); if (rc) { - err("%s: Can't get adapter speed or bus mode mismatch\n", - __func__); + ctrl_err(ctrl, "Can't get adapter speed or " + "bus mode mismatch\n"); return WRONG_BUS_FREQUENCY; } rc = p_slot->hpc_ops->get_cur_bus_speed(p_slot, &bsp); if (rc) { - err("%s: Can't get bus operation speed\n", __func__); + ctrl_err(ctrl, "Can't get bus operation speed\n"); return WRONG_BUS_FREQUENCY; } rc = p_slot->hpc_ops->get_max_bus_speed(p_slot, &msp); if (rc) { - err("%s: Can't get max bus operation speed\n", __func__); + ctrl_err(ctrl, "Can't get max bus operation speed\n"); msp = bsp; } @@ -296,9 +301,9 @@ static int board_added(struct slot *p_slot) if (!list_empty(&ctrl->pci_dev->subordinate->devices)) slots_not_empty = 1; - dbg("%s: slots_not_empty %d, adapter_speed %d, bus_speed %d, " - "max_bus_speed %d\n", __func__, slots_not_empty, asp, - bsp, msp); + ctrl_dbg(ctrl, "%s: slots_not_empty %d, adapter_speed %d, bus_speed %d," + " max_bus_speed %d\n", __func__, slots_not_empty, asp, + bsp, msp); rc = fix_bus_speed(ctrl, p_slot, slots_not_empty, asp, bsp, msp); if (rc) @@ -306,26 +311,26 @@ static int board_added(struct slot *p_slot) /* turn on board, blink green LED, turn off Amber LED */ if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) { - err("%s: Issue of Slot Enable command failed\n", __func__); + ctrl_err(ctrl, "Issue of Slot Enable command failed\n"); return rc; } /* Wait for ~1 second */ msleep(1000); - dbg("%s: slot status = %x\n", __func__, p_slot->status); + ctrl_dbg(ctrl, "%s: slot status = %x\n", __func__, p_slot->status); /* Check for a power fault */ if (p_slot->status == 0xFF) { /* power fault occurred, but it was benign */ - dbg("%s: power fault\n", __func__); + ctrl_dbg(ctrl, "%s: Power fault\n", __func__); rc = POWER_FAILURE; p_slot->status = 0; goto err_exit; } if (shpchp_configure_device(p_slot)) { - err("Cannot add device at 0x%x:0x%x\n", p_slot->bus, - p_slot->device); + ctrl_err(ctrl, "Cannot add device at %04x:%02x:%02x\n", + pci_domain_nr(parent), p_slot->bus, p_slot->device); goto err_exit; } @@ -341,7 +346,8 @@ err_exit: /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { - err("%s: Issue of Slot Disable command failed\n", __func__); + ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", + __func__); return rc; } @@ -365,7 +371,7 @@ static int remove_board(struct slot *p_slot) hp_slot = p_slot->device - ctrl->slot_device_offset; p_slot = shpchp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset); - dbg("In %s, hp_slot = %d\n", __func__, hp_slot); + ctrl_dbg(ctrl, "%s: hp_slot = %d\n", __func__, hp_slot); /* Change status to shutdown */ if (p_slot->is_a_board) @@ -374,13 +380,14 @@ static int remove_board(struct slot *p_slot) /* turn off slot, turn on Amber LED, turn off Green LED */ rc = p_slot->hpc_ops->slot_disable(p_slot); if (rc) { - err("%s: Issue of Slot Disable command failed\n", __func__); + ctrl_err(ctrl, "%s: Issue of Slot Disable command failed\n", + __func__); return rc; } rc = p_slot->hpc_ops->set_attention_status(p_slot, 0); if (rc) { - err("%s: Issue of Set Attention command failed\n", __func__); + ctrl_err(ctrl, "Issue of Set Attention command failed\n"); return rc; } @@ -439,7 +446,8 @@ void shpchp_queue_pushbutton_work(struct work_struct *work) info = kmalloc(sizeof(*info), GFP_KERNEL); if (!info) { - err("%s: Cannot allocate memory\n", __func__); + ctrl_err(p_slot->ctrl, "%s: Cannot allocate memory\n", + __func__); return; } info->p_slot = p_slot; @@ -486,18 +494,19 @@ static int update_slot_info (struct slot *slot) static void handle_button_press_event(struct slot *p_slot) { u8 getstatus; + struct controller *ctrl = p_slot->ctrl; switch (p_slot->state) { case STATIC_STATE: p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (getstatus) { p_slot->state = BLINKINGOFF_STATE; - info("PCI slot #%s - powering off due to button " - "press.\n", p_slot->name); + ctrl_info(ctrl, "PCI slot #%s - powering off due to " + "button press.\n", slot_name(p_slot)); } else { p_slot->state = BLINKINGON_STATE; - info("PCI slot #%s - powering on due to button " - "press.\n", p_slot->name); + ctrl_info(ctrl, "PCI slot #%s - powering on due to " + "button press.\n", slot_name(p_slot)); } /* blink green LED and turn off amber */ p_slot->hpc_ops->green_led_blink(p_slot); @@ -512,16 +521,16 @@ static void handle_button_press_event(struct slot *p_slot) * press the attention again before the 5 sec. limit * expires to cancel hot-add or hot-remove */ - info("Button cancel on Slot(%s)\n", p_slot->name); - dbg("%s: button cancel\n", __func__); + ctrl_info(ctrl, "Button cancel on Slot(%s)\n", + slot_name(p_slot)); cancel_delayed_work(&p_slot->work); if (p_slot->state == BLINKINGOFF_STATE) p_slot->hpc_ops->green_led_on(p_slot); else p_slot->hpc_ops->green_led_off(p_slot); p_slot->hpc_ops->set_attention_status(p_slot, 0); - info("PCI slot #%s - action canceled due to button press\n", - p_slot->name); + ctrl_info(ctrl, "PCI slot #%s - action canceled due to " + "button press\n", slot_name(p_slot)); p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: @@ -531,11 +540,12 @@ static void handle_button_press_event(struct slot *p_slot) * this means that the previous attention button action * to hot-add or hot-remove is undergoing */ - info("Button ignore on Slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Button ignore on Slot(%s)\n", + slot_name(p_slot)); update_slot_info(p_slot); break; default: - warn("Not a valid state\n"); + ctrl_warn(ctrl, "Not a valid state\n"); break; } } @@ -551,7 +561,7 @@ static void interrupt_event_handler(struct work_struct *work) handle_button_press_event(p_slot); break; case INT_POWER_FAULT: - dbg("%s: power fault\n", __func__); + ctrl_dbg(p_slot->ctrl, "%s: Power fault\n", __func__); p_slot->hpc_ops->set_attention_status(p_slot, 1); p_slot->hpc_ops->green_led_off(p_slot); break; @@ -569,22 +579,24 @@ static int shpchp_enable_slot (struct slot *p_slot) { u8 getstatus = 0; int rc, retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; /* Check to see if (latch closed, card present, power off) */ mutex_lock(&p_slot->ctrl->crit_sect); rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { - info("No adapter on slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { - info("Latch open on slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || getstatus) { - info("Already enabled on slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Already enabled on slot(%s)\n", + slot_name(p_slot)); goto out; } @@ -593,7 +605,7 @@ static int shpchp_enable_slot (struct slot *p_slot) /* We have to save the presence info for these slots */ p_slot->hpc_ops->get_adapter_status(p_slot, &(p_slot->presence_save)); p_slot->hpc_ops->get_power_status(p_slot, &(p_slot->pwr_save)); - dbg("%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); + ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save); p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) || @@ -624,6 +636,7 @@ static int shpchp_disable_slot (struct slot *p_slot) { u8 getstatus = 0; int rc, retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; if (!p_slot->ctrl) return -ENODEV; @@ -633,17 +646,18 @@ static int shpchp_disable_slot (struct slot *p_slot) rc = p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus); if (rc || !getstatus) { - info("No adapter on slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "No adapter on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_latch_status(p_slot, &getstatus); if (rc || getstatus) { - info("Latch open on slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Latch open on slot(%s)\n", slot_name(p_slot)); goto out; } rc = p_slot->hpc_ops->get_power_status(p_slot, &getstatus); if (rc || !getstatus) { - info("Already disabled slot(%s)\n", p_slot->name); + ctrl_info(ctrl, "Already disabled on slot(%s)\n", + slot_name(p_slot)); goto out; } @@ -657,6 +671,7 @@ static int shpchp_disable_slot (struct slot *p_slot) int shpchp_sysfs_enable_slot(struct slot *p_slot) { int retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -670,15 +685,17 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot) p_slot->state = STATIC_STATE; break; case POWERON_STATE: - info("Slot %s is already in powering on state\n", - p_slot->name); + ctrl_info(ctrl, "Slot %s is already in powering on state\n", + slot_name(p_slot)); break; case BLINKINGOFF_STATE: case POWEROFF_STATE: - info("Already enabled on slot %s\n", p_slot->name); + ctrl_info(ctrl, "Already enabled on slot %s\n", + slot_name(p_slot)); break; default: - err("Not a valid state on slot %s\n", p_slot->name); + ctrl_err(ctrl, "Not a valid state on slot %s\n", + slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); @@ -689,6 +706,7 @@ int shpchp_sysfs_enable_slot(struct slot *p_slot) int shpchp_sysfs_disable_slot(struct slot *p_slot) { int retval = -ENODEV; + struct controller *ctrl = p_slot->ctrl; mutex_lock(&p_slot->lock); switch (p_slot->state) { @@ -702,15 +720,17 @@ int shpchp_sysfs_disable_slot(struct slot *p_slot) p_slot->state = STATIC_STATE; break; case POWEROFF_STATE: - info("Slot %s is already in powering off state\n", - p_slot->name); + ctrl_info(ctrl, "Slot %s is already in powering off state\n", + slot_name(p_slot)); break; case BLINKINGON_STATE: case POWERON_STATE: - info("Already disabled on slot %s\n", p_slot->name); + ctrl_info(ctrl, "Already disabled on slot %s\n", + slot_name(p_slot)); break; default: - err("Not a valid state on slot %s\n", p_slot->name); + ctrl_err(ctrl, "Not a valid state on slot %s\n", + slot_name(p_slot)); break; } mutex_unlock(&p_slot->lock); diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c index 7a0bff364cd..86dc3984776 100644 --- a/drivers/pci/hotplug/shpchp_hpc.c +++ b/drivers/pci/hotplug/shpchp_hpc.c @@ -300,10 +300,10 @@ static inline int shpc_wait_cmd(struct controller *ctrl) !is_ctrl_busy(ctrl), timeout); if (!rc && is_ctrl_busy(ctrl)) { retval = -EIO; - err("Command not completed in 1000 msec\n"); + ctrl_err(ctrl, "Command not completed in 1000 msec\n"); } else if (rc < 0) { retval = -EINTR; - info("Command was interrupted by a signal\n"); + ctrl_info(ctrl, "Command was interrupted by a signal\n"); } return retval; @@ -320,15 +320,14 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) if (!shpc_poll_ctrl_busy(ctrl)) { /* After 1 sec and and the controller is still busy */ - err("%s : Controller is still busy after 1 sec.\n", - __func__); + ctrl_err(ctrl, "Controller is still busy after 1 sec\n"); retval = -EBUSY; goto out; } ++t_slot; temp_word = (t_slot << 8) | (cmd & 0xFF); - dbg("%s: t_slot %x cmd %x\n", __func__, t_slot, cmd); + ctrl_dbg(ctrl, "%s: t_slot %x cmd %x\n", __func__, t_slot, cmd); /* To make sure the Controller Busy bit is 0 before we send out the * command. @@ -344,8 +343,9 @@ static int shpc_write_cmd(struct slot *slot, u8 t_slot, u8 cmd) cmd_status = hpc_check_cmd_status(slot->ctrl); if (cmd_status) { - err("%s: Failed to issued command 0x%x (error code = %d)\n", - __func__, cmd, cmd_status); + ctrl_err(ctrl, + "Failed to issued command 0x%x (error code = %d)\n", + cmd, cmd_status); retval = -EIO; } out: @@ -364,15 +364,15 @@ static int hpc_check_cmd_status(struct controller *ctrl) break; case 1: retval = SWITCH_OPEN; - err("%s: Switch opened!\n", __func__); + ctrl_err(ctrl, "Switch opened!\n"); break; case 2: retval = INVALID_CMD; - err("%s: Invalid HPC command!\n", __func__); + ctrl_err(ctrl, "Invalid HPC command!\n"); break; case 4: retval = INVALID_SPEED_MODE; - err("%s: Invalid bus speed/mode!\n", __func__); + ctrl_err(ctrl, "Invalid bus speed/mode!\n"); break; default: retval = cmd_status; @@ -483,8 +483,8 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) return -ENODEV; } - dbg("%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n", - __func__, slot_reg, pcix_cap, m66_cap); + ctrl_dbg(ctrl, "%s: slot_reg = %x, pcix_cap = %x, m66_cap = %x\n", + __func__, slot_reg, pcix_cap, m66_cap); switch (pcix_cap) { case 0x0: @@ -509,7 +509,7 @@ static int hpc_get_adapter_speed(struct slot *slot, enum pci_bus_speed *value) break; } - dbg("Adapter speed = %d\n", *value); + ctrl_dbg(ctrl, "Adapter speed = %d\n", *value); return retval; } @@ -526,7 +526,7 @@ static int hpc_get_mode1_ECC_cap(struct slot *slot, u8 *mode) retval = -1; } - dbg("Mode 1 ECC cap = %d\n", *mode); + ctrl_dbg(ctrl, "Mode 1 ECC cap = %d\n", *mode); return retval; } @@ -629,7 +629,7 @@ static int hpc_power_on_slot(struct slot * slot) retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_PWR); if (retval) - err("%s: Write command failed!\n", __func__); + ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__); return retval; } @@ -642,7 +642,7 @@ static int hpc_slot_enable(struct slot * slot) retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_ENABLE | SET_PWR_BLINK | SET_ATTN_OFF); if (retval) - err("%s: Write command failed!\n", __func__); + ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__); return retval; } @@ -655,7 +655,7 @@ static int hpc_slot_disable(struct slot * slot) retval = shpc_write_cmd(slot, slot->hp_slot, SET_SLOT_DISABLE | SET_PWR_OFF | SET_ATTN_ON); if (retval) - err("%s: Write command failed!\n", __func__); + ctrl_err(slot->ctrl, "%s: Write command failed!\n", __func__); return retval; } @@ -719,7 +719,7 @@ static int hpc_set_bus_speed_mode(struct slot * slot, enum pci_bus_speed value) retval = shpc_write_cmd(slot, 0, cmd); if (retval) - err("%s: Write command failed!\n", __func__); + ctrl_err(ctrl, "%s: Write command failed!\n", __func__); return retval; } @@ -735,7 +735,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id) if (!intr_loc) return IRQ_NONE; - dbg("%s: intr_loc = %x\n",__func__, intr_loc); + ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc); if(!shpchp_poll_mode) { /* @@ -748,7 +748,7 @@ static irqreturn_t shpc_isr(int irq, void *dev_id) shpc_writel(ctrl, SERR_INTR_ENABLE, serr_int); intr_loc2 = shpc_readl(ctrl, INTR_LOC); - dbg("%s: intr_loc2 = %x\n",__func__, intr_loc2); + ctrl_dbg(ctrl, "%s: intr_loc2 = %x\n", __func__, intr_loc2); } if (intr_loc & CMD_INTR_PENDING) { @@ -773,8 +773,8 @@ static irqreturn_t shpc_isr(int irq, void *dev_id) continue; slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); - dbg("%s: Slot %x with intr, slot register = %x\n", - __func__, hp_slot, slot_reg); + ctrl_dbg(ctrl, "Slot %x with intr, slot register = %x\n", + hp_slot, slot_reg); if (slot_reg & MRL_CHANGE_DETECTED) shpchp_handle_switch_change(hp_slot, ctrl); @@ -843,7 +843,7 @@ static int hpc_get_max_bus_speed (struct slot *slot, enum pci_bus_speed *value) } *value = bus_speed; - dbg("Max bus speed = %d\n", bus_speed); + ctrl_dbg(ctrl, "Max bus speed = %d\n", bus_speed); return retval; } @@ -911,7 +911,7 @@ static int hpc_get_cur_bus_speed (struct slot *slot, enum pci_bus_speed *value) break; } - dbg("Current bus speed = %d\n", bus_speed); + ctrl_dbg(ctrl, "Current bus speed = %d\n", bus_speed); return retval; } @@ -949,6 +949,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) u8 i; ctrl->pci_dev = pdev; /* pci_dev of the P2P bridge */ + ctrl_dbg(ctrl, "Hotplug Controller:\n"); if ((pdev->vendor == PCI_VENDOR_ID_AMD) || (pdev->device == PCI_DEVICE_ID_AMD_GOLAM_7450)) { @@ -958,34 +959,33 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) } else { ctrl->cap_offset = pci_find_capability(pdev, PCI_CAP_ID_SHPC); if (!ctrl->cap_offset) { - err("%s : cap_offset == 0\n", __func__); + ctrl_err(ctrl, "Cannot find PCI capability\n"); goto abort; } - dbg("%s: cap_offset = %x\n", __func__, ctrl->cap_offset); + ctrl_dbg(ctrl, " cap_offset = %x\n", ctrl->cap_offset); rc = shpc_indirect_read(ctrl, 0, &shpc_base_offset); if (rc) { - err("%s: cannot read base_offset\n", __func__); + ctrl_err(ctrl, "Cannot read base_offset\n"); goto abort; } rc = shpc_indirect_read(ctrl, 3, &tempdword); if (rc) { - err("%s: cannot read slot config\n", __func__); + ctrl_err(ctrl, "Cannot read slot config\n"); goto abort; } num_slots = tempdword & SLOT_NUM; - dbg("%s: num_slots (indirect) %x\n", __func__, num_slots); + ctrl_dbg(ctrl, " num_slots (indirect) %x\n", num_slots); for (i = 0; i < 9 + num_slots; i++) { rc = shpc_indirect_read(ctrl, i, &tempdword); if (rc) { - err("%s: cannot read creg (index = %d)\n", - __func__, i); + ctrl_err(ctrl, + "Cannot read creg (index = %d)\n", i); goto abort; } - dbg("%s: offset %d: value %x\n", __func__,i, - tempdword); + ctrl_dbg(ctrl, " offset %d: value %x\n", i, tempdword); } ctrl->mmio_base = @@ -993,30 +993,31 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) ctrl->mmio_size = 0x24 + 0x4 * num_slots; } - info("HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", pdev->vendor, pdev->device, pdev->subsystem_vendor, - pdev->subsystem_device); + ctrl_info(ctrl, "HPC vendor_id %x device_id %x ss_vid %x ss_did %x\n", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); rc = pci_enable_device(pdev); if (rc) { - err("%s: pci_enable_device failed\n", __func__); + ctrl_err(ctrl, "pci_enable_device failed\n"); goto abort; } if (!request_mem_region(ctrl->mmio_base, ctrl->mmio_size, MY_NAME)) { - err("%s: cannot reserve MMIO region\n", __func__); + ctrl_err(ctrl, "Cannot reserve MMIO region\n"); rc = -1; goto abort; } ctrl->creg = ioremap(ctrl->mmio_base, ctrl->mmio_size); if (!ctrl->creg) { - err("%s: cannot remap MMIO region %lx @ %lx\n", __func__, - ctrl->mmio_size, ctrl->mmio_base); + ctrl_err(ctrl, "Cannot remap MMIO region %lx @ %lx\n", + ctrl->mmio_size, ctrl->mmio_base); release_mem_region(ctrl->mmio_base, ctrl->mmio_size); rc = -1; goto abort; } - dbg("%s: ctrl->creg %p\n", __func__, ctrl->creg); + ctrl_dbg(ctrl, "ctrl->creg %p\n", ctrl->creg); mutex_init(&ctrl->crit_sect); mutex_init(&ctrl->cmd_lock); @@ -1035,21 +1036,21 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) /* Mask Global Interrupt Mask & Command Complete Interrupt Mask */ tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); - dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); + ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword); tempdword |= (GLOBAL_INTR_MASK | GLOBAL_SERR_MASK | COMMAND_INTR_MASK | ARBITER_SERR_MASK); tempdword &= ~SERR_INTR_RSVDZ_MASK; shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); - dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); + ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword); /* Mask the MRL sensor SERR Mask of individual slot in * Slot SERR-INT Mask & clear all the existing event if any */ for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); - dbg("%s: Default Logical Slot Register %d value %x\n", __func__, - hp_slot, slot_reg); + ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n", + hp_slot, slot_reg); slot_reg |= (PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | CON_PFAULT_INTR_MASK | MRL_CHANGE_SERR_MASK | @@ -1066,24 +1067,24 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) /* Installs the interrupt handler */ rc = pci_enable_msi(pdev); if (rc) { - info("Can't get msi for the hotplug controller\n"); - info("Use INTx for the hotplug controller\n"); + ctrl_info(ctrl, + "Can't get msi for the hotplug controller\n"); + ctrl_info(ctrl, + "Use INTx for the hotplug controller\n"); } rc = request_irq(ctrl->pci_dev->irq, shpc_isr, IRQF_SHARED, MY_NAME, (void *)ctrl); - dbg("%s: request_irq %d for hpc%d (returns %d)\n", - __func__, ctrl->pci_dev->irq, + ctrl_dbg(ctrl, "request_irq %d for hpc%d (returns %d)\n", + ctrl->pci_dev->irq, atomic_read(&shpchp_num_controllers), rc); if (rc) { - err("Can't get irq %d for the hotplug controller\n", - ctrl->pci_dev->irq); + ctrl_err(ctrl, "Can't get irq %d for the hotplug " + "controller\n", ctrl->pci_dev->irq); goto abort_iounmap; } } - dbg("%s: HPC at b:d:f:irq=0x%x:%x:%x:%x\n", __func__, - pdev->bus->number, PCI_SLOT(pdev->devfn), - PCI_FUNC(pdev->devfn), pdev->irq); + ctrl_dbg(ctrl, "HPC at %s irq=%x\n", pci_name(pdev), pdev->irq); /* * If this is the first controller to be initialized, @@ -1102,8 +1103,8 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) */ for (hp_slot = 0; hp_slot < ctrl->num_slots; hp_slot++) { slot_reg = shpc_readl(ctrl, SLOT_REG(hp_slot)); - dbg("%s: Default Logical Slot Register %d value %x\n", __func__, - hp_slot, slot_reg); + ctrl_dbg(ctrl, "Default Logical Slot Register %d value %x\n", + hp_slot, slot_reg); slot_reg &= ~(PRSNT_CHANGE_INTR_MASK | ISO_PFAULT_INTR_MASK | BUTTON_PRESS_INTR_MASK | MRL_CHANGE_INTR_MASK | CON_PFAULT_INTR_MASK | SLOT_REG_RSVDZ_MASK); @@ -1116,7 +1117,7 @@ int shpc_init(struct controller *ctrl, struct pci_dev *pdev) SERR_INTR_RSVDZ_MASK); shpc_writel(ctrl, SERR_INTR_ENABLE, tempdword); tempdword = shpc_readl(ctrl, SERR_INTR_ENABLE); - dbg("%s: SERR_INTR_ENABLE = %x\n", __func__, tempdword); + ctrl_dbg(ctrl, "SERR_INTR_ENABLE = %x\n", tempdword); } return 0; diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c index 3fc4ec0eea0..138f161becc 100644 --- a/drivers/pci/hotplug/shpchp_pci.c +++ b/drivers/pci/hotplug/shpchp_pci.c @@ -49,9 +49,7 @@ static void program_fw_provided_values(struct pci_dev *dev) /* use default values if we can't get them from firmware */ if (get_hp_params_from_firmware(dev, &hpp) || !hpp.t0 || (hpp.t0->revision > 1)) { - printk(KERN_WARNING - "%s: Could not get hotplug parameters. Use defaults\n", - __func__); + warn("Could not get hotplug parameters. Use defaults\n"); hpp.t0 = &hpp.type0_data; hpp.t0->revision = 0; hpp.t0->cache_line_size = 8; @@ -101,18 +99,20 @@ int __ref shpchp_configure_device(struct slot *p_slot) struct pci_dev *dev; struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; int num, fn; + struct controller *ctrl = p_slot->ctrl; dev = pci_get_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (dev) { - err("Device %s already exists at %x:%x, cannot hot-add\n", - pci_name(dev), p_slot->bus, p_slot->device); + ctrl_err(ctrl, "Device %s already exists " + "at %04x:%02x:%02x, cannot hot-add\n", pci_name(dev), + pci_domain_nr(parent), p_slot->bus, p_slot->device); pci_dev_put(dev); return -EINVAL; } num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0)); if (num == 0) { - err("No new device found\n"); + ctrl_err(ctrl, "No new device found\n"); return -ENODEV; } @@ -121,8 +121,8 @@ int __ref shpchp_configure_device(struct slot *p_slot) if (!dev) continue; if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) { - err("Cannot hot-add display device %s\n", - pci_name(dev)); + ctrl_err(ctrl, "Cannot hot-add display device %s\n", + pci_name(dev)); pci_dev_put(dev); continue; } @@ -138,14 +138,15 @@ int __ref shpchp_configure_device(struct slot *p_slot) break; } if (busnr >= end) { - err("No free bus for hot-added bridge\n"); + ctrl_err(ctrl, + "No free bus for hot-added bridge\n"); pci_dev_put(dev); continue; } child = pci_add_new_bus(parent, dev, busnr); if (!child) { - err("Cannot add new bus for %s\n", - pci_name(dev)); + ctrl_err(ctrl, "Cannot add new bus for %s\n", + pci_name(dev)); pci_dev_put(dev); continue; } @@ -168,8 +169,10 @@ int shpchp_unconfigure_device(struct slot *p_slot) int j; u8 bctl = 0; struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate; + struct controller *ctrl = p_slot->ctrl; - dbg("%s: bus/dev = %x/%x\n", __func__, p_slot->bus, p_slot->device); + ctrl_dbg(ctrl, "%s: domain:bus:dev = %04x:%02x:%02x\n", + __func__, pci_domain_nr(parent), p_slot->bus, p_slot->device); for (j=0; j<8 ; j++) { struct pci_dev* temp = pci_get_slot(parent, @@ -177,16 +180,17 @@ int shpchp_unconfigure_device(struct slot *p_slot) if (!temp) continue; if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) { - err("Cannot remove display device %s\n", - pci_name(temp)); + ctrl_err(ctrl, "Cannot remove display device %s\n", + pci_name(temp)); pci_dev_put(temp); continue; } if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) { pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl); if (bctl & PCI_BRIDGE_CTL_VGA) { - err("Cannot remove display device %s\n", - pci_name(temp)); + ctrl_err(ctrl, + "Cannot remove display device %s\n", + pci_name(temp)); pci_dev_put(temp); continue; } diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index 279c940a003..bf7d6ce9bbb 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c @@ -126,7 +126,8 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) cfg->msg.address_hi = 0xffffffff; irq = create_irq(); - if (irq < 0) { + + if (irq <= 0) { kfree(cfg); return -EBUSY; } diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 3f7b81c065d..a2692724b68 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -18,6 +18,7 @@ * Author: Ashok Raj <ashok.raj@intel.com> * Author: Shaohua Li <shaohua.li@intel.com> * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> + * Author: Fenghua Yu <fenghua.yu@intel.com> */ #include <linux/init.h> @@ -33,13 +34,15 @@ #include <linux/dma-mapping.h> #include <linux/mempool.h> #include <linux/timer.h> -#include "iova.h" -#include "intel-iommu.h" -#include <asm/proto.h> /* force_iommu in this header in x86-64*/ +#include <linux/iova.h> +#include <linux/intel-iommu.h> #include <asm/cacheflush.h> -#include <asm/gart.h> +#include <asm/iommu.h> #include "pci.h" +#define ROOT_SIZE VTD_PAGE_SIZE +#define CONTEXT_SIZE VTD_PAGE_SIZE + #define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) #define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) @@ -49,8 +52,6 @@ #define DEFAULT_DOMAIN_ADDRESS_WIDTH 48 -#define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) /* 10sec */ - #define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) @@ -58,8 +59,6 @@ static void flush_unmaps_timeout(unsigned long data); DEFINE_TIMER(unmap_timer, flush_unmaps_timeout, 0, 0); -static struct intel_iommu *g_iommus; - #define HIGH_WATER_MARK 250 struct deferred_flush_tables { int next; @@ -80,7 +79,7 @@ static long list_size; static void domain_remove_dev_info(struct dmar_domain *domain); -static int dmar_disabled; +int dmar_disabled; static int __initdata dmar_map_gfx = 1; static int dmar_forcedac; static int intel_iommu_strict; @@ -160,7 +159,7 @@ static inline void *alloc_domain_mem(void) return iommu_kmem_cache_alloc(iommu_domain_cache); } -static inline void free_domain_mem(void *vaddr) +static void free_domain_mem(void *vaddr) { kmem_cache_free(iommu_domain_cache, vaddr); } @@ -185,13 +184,6 @@ void free_iova_mem(struct iova *iova) kmem_cache_free(iommu_iova_cache, iova); } -static inline void __iommu_flush_cache( - struct intel_iommu *iommu, void *addr, int size) -{ - if (!ecap_coherent(iommu->ecap)) - clflush_cache_range(addr, size); -} - /* Gets context entry for a given bus and devfn */ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, u8 bus, u8 devfn) @@ -210,7 +202,7 @@ static struct context_entry * device_to_context_entry(struct intel_iommu *iommu, spin_unlock_irqrestore(&iommu->lock, flags); return NULL; } - __iommu_flush_cache(iommu, (void *)context, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, (void *)context, CONTEXT_SIZE); phy_addr = virt_to_phys((void *)context); set_root_value(root, phy_addr); set_root_present(root); @@ -356,7 +348,7 @@ static struct dma_pte * addr_to_dma_pte(struct dmar_domain *domain, u64 addr) return NULL; } __iommu_flush_cache(domain->iommu, tmp_page, - PAGE_SIZE_4K); + PAGE_SIZE); dma_set_pte_addr(*pte, virt_to_phys(tmp_page)); /* * high level table always sets r/w, last level page @@ -419,13 +411,13 @@ static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) start &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1; /* in case it's partial page */ - start = PAGE_ALIGN_4K(start); - end &= PAGE_MASK_4K; + start = PAGE_ALIGN(start); + end &= PAGE_MASK; /* we don't need lock here, nobody else touches the iova range */ while (start < end) { dma_pte_clear_one(domain, start); - start += PAGE_SIZE_4K; + start += VTD_PAGE_SIZE; } } @@ -479,7 +471,7 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) if (!root) return -ENOMEM; - __iommu_flush_cache(iommu, root, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, root, ROOT_SIZE); spin_lock_irqsave(&iommu->lock, flags); iommu->root_entry = root; @@ -488,19 +480,6 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu) return 0; } -#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \ -{\ - cycles_t start_time = get_cycles();\ - while (1) {\ - sts = op (iommu->reg + offset);\ - if (cond)\ - break;\ - if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\ - panic("DMAR hardware is malfunctioning\n");\ - cpu_relax();\ - }\ -} - static void iommu_set_root_entry(struct intel_iommu *iommu) { void *addr; @@ -587,31 +566,10 @@ static int __iommu_flush_context(struct intel_iommu *iommu, spin_unlock_irqrestore(&iommu->register_lock, flag); - /* flush context entry will implictly flush write buffer */ + /* flush context entry will implicitly flush write buffer */ return 0; } -static int inline iommu_flush_context_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_domain(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, 0, 0, DMA_CCMD_DOMAIN_INVL, - non_present_entry_flush); -} - -static int inline iommu_flush_context_device(struct intel_iommu *iommu, - u16 did, u16 source_id, u8 function_mask, int non_present_entry_flush) -{ - return __iommu_flush_context(iommu, did, source_id, function_mask, - DMA_CCMD_DEVICE_INVL, non_present_entry_flush); -} - /* return value determine if we need a write buffer flush */ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int size_order, u64 type, @@ -679,37 +637,25 @@ static int __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, printk(KERN_ERR"IOMMU: flush IOTLB failed\n"); if (DMA_TLB_IAIG(val) != DMA_TLB_IIRG(type)) pr_debug("IOMMU: tlb flush request %Lx, actual %Lx\n", - DMA_TLB_IIRG(type), DMA_TLB_IAIG(val)); - /* flush context entry will implictly flush write buffer */ + (unsigned long long)DMA_TLB_IIRG(type), + (unsigned long long)DMA_TLB_IAIG(val)); + /* flush iotlb entry will implicitly flush write buffer */ return 0; } -static int inline iommu_flush_iotlb_global(struct intel_iommu *iommu, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, - non_present_entry_flush); -} - -static int inline iommu_flush_iotlb_dsi(struct intel_iommu *iommu, u16 did, - int non_present_entry_flush) -{ - return __iommu_flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH, - non_present_entry_flush); -} - static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, u64 addr, unsigned int pages, int non_present_entry_flush) { unsigned int mask; - BUG_ON(addr & (~PAGE_MASK_4K)); + BUG_ON(addr & (~VTD_PAGE_MASK)); BUG_ON(pages == 0); /* Fallback to domain selective flush if no PSI support */ if (!cap_pgsel_inv(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, + non_present_entry_flush); /* * PSI requires page size to be 2 ^ x, and the base address is naturally @@ -718,11 +664,12 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, mask = ilog2(__roundup_pow_of_two(pages)); /* Fallback to domain selective flush if size is too big */ if (mask > cap_max_amask_val(iommu->cap)) - return iommu_flush_iotlb_dsi(iommu, did, - non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, 0, 0, + DMA_TLB_DSI_FLUSH, non_present_entry_flush); - return __iommu_flush_iotlb(iommu, did, addr, mask, - DMA_TLB_PSI_FLUSH, non_present_entry_flush); + return iommu->flush.flush_iotlb(iommu, did, addr, mask, + DMA_TLB_PSI_FLUSH, + non_present_entry_flush); } static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) @@ -855,7 +802,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) } static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, - u8 fault_reason, u16 source_id, u64 addr) + u8 fault_reason, u16 source_id, unsigned long long addr) { const char *reason; @@ -990,6 +937,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) return -ENOMEM; } + spin_lock_init(&iommu->lock); + /* * if Caching mode is set, then invalid translations are tagged * with domainid 0. Hence we need to pre-allocate it. @@ -998,62 +947,15 @@ static int iommu_init_domains(struct intel_iommu *iommu) set_bit(0, iommu->domain_ids); return 0; } -static struct intel_iommu *alloc_iommu(struct intel_iommu *iommu, - struct dmar_drhd_unit *drhd) -{ - int ret; - int map_size; - u32 ver; - iommu->reg = ioremap(drhd->reg_base_addr, PAGE_SIZE_4K); - if (!iommu->reg) { - printk(KERN_ERR "IOMMU: can't map the region\n"); - goto error; - } - iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); - iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); - - /* the registers might be more than one page */ - map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), - cap_max_fault_reg_offset(iommu->cap)); - map_size = PAGE_ALIGN_4K(map_size); - if (map_size > PAGE_SIZE_4K) { - iounmap(iommu->reg); - iommu->reg = ioremap(drhd->reg_base_addr, map_size); - if (!iommu->reg) { - printk(KERN_ERR "IOMMU: can't map the region\n"); - goto error; - } - } - - ver = readl(iommu->reg + DMAR_VER_REG); - pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", - drhd->reg_base_addr, DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), - iommu->cap, iommu->ecap); - ret = iommu_init_domains(iommu); - if (ret) - goto error_unmap; - spin_lock_init(&iommu->lock); - spin_lock_init(&iommu->register_lock); - - drhd->iommu = iommu; - return iommu; -error_unmap: - iounmap(iommu->reg); -error: - kfree(iommu); - return NULL; -} static void domain_exit(struct dmar_domain *domain); -static void free_iommu(struct intel_iommu *iommu) + +void free_dmar_iommu(struct intel_iommu *iommu) { struct dmar_domain *domain; int i; - if (!iommu) - return; - i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); for (; i < cap_ndoms(iommu->cap); ) { domain = iommu->domains[i]; @@ -1078,10 +980,6 @@ static void free_iommu(struct intel_iommu *iommu) /* free context mapping */ free_context_table(iommu); - - if (iommu->reg) - iounmap(iommu->reg); - kfree(iommu); } static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) @@ -1157,9 +1055,9 @@ static void dmar_init_reserved_ranges(void) if (!r->flags || !(r->flags & IORESOURCE_MEM)) continue; addr = r->start; - addr &= PAGE_MASK_4K; + addr &= PAGE_MASK; size = r->end - addr; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); iova = reserve_iova(&reserved_iova_list, IOVA_PFN(addr), IOVA_PFN(size + addr) - 1); if (!iova) @@ -1221,7 +1119,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width) domain->pgd = (struct dma_pte *)alloc_pgtable_page(); if (!domain->pgd) return -ENOMEM; - __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE_4K); + __iommu_flush_cache(iommu, domain->pgd, PAGE_SIZE); return 0; } @@ -1237,7 +1135,7 @@ static void domain_exit(struct dmar_domain *domain) /* destroy iovas */ put_iova_domain(&domain->iovad); end = DOMAIN_MAX_ADDR(domain->gaw); - end = end & (~PAGE_MASK_4K); + end = end & (~PAGE_MASK); /* clear ptes */ dma_pte_clear_range(domain, 0, end); @@ -1277,11 +1175,13 @@ static int domain_context_mapping_one(struct dmar_domain *domain, __iommu_flush_cache(iommu, context, sizeof(*context)); /* it's a non-present to present mapping */ - if (iommu_flush_context_device(iommu, domain->id, - (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, 1)) + if (iommu->flush.flush_context(iommu, domain->id, + (((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT, + DMA_CCMD_DEVICE_INVL, 1)) iommu_flush_write_buffer(iommu); else - iommu_flush_iotlb_dsi(iommu, 0, 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0); + spin_unlock_irqrestore(&iommu->lock, flags); return 0; } @@ -1356,22 +1256,25 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, u64 start_pfn, end_pfn; struct dma_pte *pte; int index; + int addr_width = agaw_to_width(domain->agaw); + + hpa &= (((u64)1) << addr_width) - 1; if ((prot & (DMA_PTE_READ|DMA_PTE_WRITE)) == 0) return -EINVAL; - iova &= PAGE_MASK_4K; - start_pfn = ((u64)hpa) >> PAGE_SHIFT_4K; - end_pfn = (PAGE_ALIGN_4K(((u64)hpa) + size)) >> PAGE_SHIFT_4K; + iova &= PAGE_MASK; + start_pfn = ((u64)hpa) >> VTD_PAGE_SHIFT; + end_pfn = (VTD_PAGE_ALIGN(((u64)hpa) + size)) >> VTD_PAGE_SHIFT; index = 0; while (start_pfn < end_pfn) { - pte = addr_to_dma_pte(domain, iova + PAGE_SIZE_4K * index); + pte = addr_to_dma_pte(domain, iova + VTD_PAGE_SIZE * index); if (!pte) return -ENOMEM; /* We don't need lock here, nobody else * touches the iova range */ BUG_ON(dma_pte_addr(*pte)); - dma_set_pte_addr(*pte, start_pfn << PAGE_SHIFT_4K); + dma_set_pte_addr(*pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_prot(*pte, prot); __iommu_flush_cache(domain->iommu, pte, sizeof(*pte)); start_pfn++; @@ -1383,8 +1286,10 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, static void detach_domain_for_dev(struct dmar_domain *domain, u8 bus, u8 devfn) { clear_context_table(domain->iommu, bus, devfn); - iommu_flush_context_global(domain->iommu, 0); - iommu_flush_iotlb_global(domain->iommu, 0); + domain->iommu->flush.flush_context(domain->iommu, 0, 0, 0, + DMA_CCMD_GLOBAL_INVL, 0); + domain->iommu->flush.flush_iotlb(domain->iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); } static void domain_remove_dev_info(struct dmar_domain *domain) @@ -1414,7 +1319,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain) * find_domain * Note: we use struct pci_dev->dev.archdata.iommu stores the info */ -struct dmar_domain * +static struct dmar_domain * find_domain(struct pci_dev *pdev) { struct device_domain_info *info; @@ -1426,37 +1331,6 @@ find_domain(struct pci_dev *pdev) return NULL; } -static int dmar_pci_device_match(struct pci_dev *devices[], int cnt, - struct pci_dev *dev) -{ - int index; - - while (dev) { - for (index = 0; index < cnt; index++) - if (dev == devices[index]) - return 1; - - /* Check our parent */ - dev = dev->bus->self; - } - - return 0; -} - -static struct dmar_drhd_unit * -dmar_find_matched_drhd_unit(struct pci_dev *dev) -{ - struct dmar_drhd_unit *drhd = NULL; - - list_for_each_entry(drhd, &dmar_drhd_units, list) { - if (drhd->include_all || dmar_pci_device_match(drhd->devices, - drhd->devices_cnt, dev)) - return drhd; - } - - return NULL; -} - /* domain is initialized */ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) { @@ -1578,11 +1452,13 @@ error: return find_domain(pdev); } -static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) +static int iommu_prepare_identity_map(struct pci_dev *pdev, + unsigned long long start, + unsigned long long end) { struct dmar_domain *domain; unsigned long size; - u64 base; + unsigned long long base; int ret; printk(KERN_INFO @@ -1594,9 +1470,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, u64 start, u64 end) return -ENOMEM; /* The address might not be aligned */ - base = start & PAGE_MASK_4K; + base = start & PAGE_MASK; size = end - base; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); if (!reserve_iova(&domain->iovad, IOVA_PFN(base), IOVA_PFN(base + size) - 1)) { printk(KERN_ERR "IOMMU: reserve iova failed\n"); @@ -1729,8 +1605,6 @@ int __init init_dmars(void) * endfor */ for_each_drhd_unit(drhd) { - if (drhd->ignored) - continue; g_num_of_iommus++; /* * lock not needed as this is only incremented in the single @@ -1739,12 +1613,6 @@ int __init init_dmars(void) */ } - g_iommus = kzalloc(g_num_of_iommus * sizeof(*iommu), GFP_KERNEL); - if (!g_iommus) { - ret = -ENOMEM; - goto error; - } - deferred_flush = kzalloc(g_num_of_iommus * sizeof(struct deferred_flush_tables), GFP_KERNEL); if (!deferred_flush) { @@ -1752,16 +1620,15 @@ int __init init_dmars(void) goto error; } - i = 0; for_each_drhd_unit(drhd) { if (drhd->ignored) continue; - iommu = alloc_iommu(&g_iommus[i], drhd); - i++; - if (!iommu) { - ret = -ENOMEM; + + iommu = drhd->iommu; + + ret = iommu_init_domains(iommu); + if (ret) goto error; - } /* * TBD: @@ -1775,6 +1642,28 @@ int __init init_dmars(void) } } + for_each_drhd_unit(drhd) { + if (drhd->ignored) + continue; + + iommu = drhd->iommu; + if (dmar_enable_qi(iommu)) { + /* + * Queued Invalidate not enabled, use Register Based + * Invalidate + */ + iommu->flush.flush_context = __iommu_flush_context; + iommu->flush.flush_iotlb = __iommu_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Register based " + "invalidation\n", drhd->reg_base_addr); + } else { + iommu->flush.flush_context = qi_flush_context; + iommu->flush.flush_iotlb = qi_flush_iotlb; + printk(KERN_INFO "IOMMU 0x%Lx: using Queued " + "invalidation\n", drhd->reg_base_addr); + } + } + /* * For each rmrr * for each dev attached to rmrr @@ -1827,9 +1716,10 @@ int __init init_dmars(void) iommu_set_root_entry(iommu); - iommu_flush_context_global(iommu, 0); - iommu_flush_iotlb_global(iommu, 0); - + iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL, + 0); + iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH, + 0); iommu_disable_protect_mem_regions(iommu); ret = iommu_enable_translation(iommu); @@ -1845,15 +1735,14 @@ error: iommu = drhd->iommu; free_iommu(iommu); } - kfree(g_iommus); return ret; } static inline u64 aligned_size(u64 host_addr, size_t size) { u64 addr; - addr = (host_addr & (~PAGE_MASK_4K)) + size; - return PAGE_ALIGN_4K(addr); + addr = (host_addr & (~PAGE_MASK)) + size; + return PAGE_ALIGN(addr); } struct iova * @@ -1867,20 +1756,20 @@ iommu_alloc_iova(struct dmar_domain *domain, size_t size, u64 end) return NULL; piova = alloc_iova(&domain->iovad, - size >> PAGE_SHIFT_4K, IOVA_PFN(end), 1); + size >> PAGE_SHIFT, IOVA_PFN(end), 1); return piova; } static struct iova * __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, - size_t size) + size_t size, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(dev); struct iova *iova = NULL; - if ((pdev->dma_mask <= DMA_32BIT_MASK) || (dmar_forcedac)) { - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); - } else { + if (dma_mask <= DMA_32BIT_MASK || dmar_forcedac) + iova = iommu_alloc_iova(domain, size, dma_mask); + else { /* * First try to allocate an io virtual address in * DMA_32BIT_MASK and if that fails then try allocating @@ -1888,7 +1777,7 @@ __intel_alloc_iova(struct device *dev, struct dmar_domain *domain, */ iova = iommu_alloc_iova(domain, size, DMA_32BIT_MASK); if (!iova) - iova = iommu_alloc_iova(domain, size, pdev->dma_mask); + iova = iommu_alloc_iova(domain, size, dma_mask); } if (!iova) { @@ -1927,12 +1816,12 @@ get_valid_domain_for_dev(struct pci_dev *pdev) return domain; } -static dma_addr_t -intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) +static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir, u64 dma_mask) { struct pci_dev *pdev = to_pci_dev(hwdev); struct dmar_domain *domain; - unsigned long start_paddr; + phys_addr_t start_paddr; struct iova *iova; int prot = 0; int ret; @@ -1947,11 +1836,11 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) size = aligned_size((u64)paddr, size); - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) goto error; - start_paddr = iova->pfn_lo << PAGE_SHIFT_4K; + start_paddr = (phys_addr_t)iova->pfn_lo << PAGE_SHIFT; /* * Check if DMAR supports zero-length reads on write only @@ -1969,30 +1858,33 @@ intel_map_single(struct device *hwdev, phys_addr_t paddr, size_t size, int dir) * is not a big problem */ ret = domain_page_mapping(domain, start_paddr, - ((u64)paddr) & PAGE_MASK_4K, size, prot); + ((u64)paddr) & PAGE_MASK, size, prot); if (ret) goto error; - pr_debug("Device %s request: %lx@%llx mapping: %lx@%llx, dir %d\n", - pci_name(pdev), size, (u64)paddr, - size, (u64)start_paddr, dir); - /* it's a non-present to present mapping */ ret = iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_paddr, size >> PAGE_SHIFT_4K, 1); + start_paddr, size >> VTD_PAGE_SHIFT, 1); if (ret) iommu_flush_write_buffer(domain->iommu); - return (start_paddr + ((u64)paddr & (~PAGE_MASK_4K))); + return start_paddr + ((u64)paddr & (~PAGE_MASK)); error: if (iova) __free_iova(&domain->iovad, iova); printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", - pci_name(pdev), size, (u64)paddr, dir); + pci_name(pdev), size, (unsigned long long)paddr, dir); return 0; } +dma_addr_t intel_map_single(struct device *hwdev, phys_addr_t paddr, + size_t size, int dir) +{ + return __intel_map_single(hwdev, paddr, size, dir, + to_pci_dev(hwdev)->dma_mask); +} + static void flush_unmaps(void) { int i, j; @@ -2002,7 +1894,11 @@ static void flush_unmaps(void) /* just flush them all */ for (i = 0; i < g_num_of_iommus; i++) { if (deferred_flush[i].next) { - iommu_flush_iotlb_global(&g_iommus[i], 0); + struct intel_iommu *iommu = + deferred_flush[i].domain[0]->iommu; + + iommu->flush.flush_iotlb(iommu, 0, 0, 0, + DMA_TLB_GLOBAL_FLUSH, 0); for (j = 0; j < deferred_flush[i].next; j++) { __free_iova(&deferred_flush[i].domain[j]->iovad, deferred_flush[i].iova[j]); @@ -2032,7 +1928,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) if (list_size == HIGH_WATER_MARK) flush_unmaps(); - iommu_id = dom->iommu - g_iommus; + iommu_id = dom->iommu->seq_id; + next = deferred_flush[iommu_id].next; deferred_flush[iommu_id].domain[next] = dom; deferred_flush[iommu_id].iova[next] = iova; @@ -2046,8 +1943,8 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova) spin_unlock_irqrestore(&async_umap_flush_lock, flags); } -static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, - size_t size, int dir) +void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size, + int dir) { struct pci_dev *pdev = to_pci_dev(dev); struct dmar_domain *domain; @@ -2063,11 +1960,11 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, if (!iova) return; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; size = aligned_size((u64)dev_addr, size); pr_debug("Device %s unmapping: %lx@%llx\n", - pci_name(pdev), size, (u64)start_addr); + pci_name(pdev), size, (unsigned long long)start_addr); /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -2075,7 +1972,7 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (intel_iommu_strict) { if (iommu_flush_iotlb_psi(domain->iommu, - domain->id, start_addr, size >> PAGE_SHIFT_4K, 0)) + domain->id, start_addr, size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ __free_iova(&domain->iovad, iova); @@ -2088,13 +1985,13 @@ static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, } } -static void * intel_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags) +void *intel_alloc_coherent(struct device *hwdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) { void *vaddr; int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); flags &= ~(GFP_DMA | GFP_DMA32); @@ -2103,19 +2000,21 @@ static void * intel_alloc_coherent(struct device *hwdev, size_t size, return NULL; memset(vaddr, 0, size); - *dma_handle = intel_map_single(hwdev, virt_to_bus(vaddr), size, DMA_BIDIRECTIONAL); + *dma_handle = __intel_map_single(hwdev, virt_to_bus(vaddr), size, + DMA_BIDIRECTIONAL, + hwdev->coherent_dma_mask); if (*dma_handle) return vaddr; free_pages((unsigned long)vaddr, order); return NULL; } -static void intel_free_coherent(struct device *hwdev, size_t size, - void *vaddr, dma_addr_t dma_handle) +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, + dma_addr_t dma_handle) { int order; - size = PAGE_ALIGN_4K(size); + size = PAGE_ALIGN(size); order = get_order(size); intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); @@ -2123,8 +2022,9 @@ static void intel_free_coherent(struct device *hwdev, size_t size, } #define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg))) -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) + +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, + int nelems, int dir) { int i; struct pci_dev *pdev = to_pci_dev(hwdev); @@ -2148,7 +2048,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, size += aligned_size((u64)addr, sg->length); } - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; /* clear the whole page */ dma_pte_clear_range(domain, start_addr, start_addr + size); @@ -2156,7 +2056,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, dma_pte_free_pagetable(domain, start_addr, start_addr + size); if (iommu_flush_iotlb_psi(domain->iommu, domain->id, start_addr, - size >> PAGE_SHIFT_4K, 0)) + size >> VTD_PAGE_SHIFT, 0)) iommu_flush_write_buffer(domain->iommu); /* free iova */ @@ -2177,8 +2077,8 @@ static int intel_nontranslate_map_sg(struct device *hddev, return nelems; } -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, - int nelems, int dir) +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, + int dir) { void *addr; int i; @@ -2206,7 +2106,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, size += aligned_size((u64)addr, sg->length); } - iova = __intel_alloc_iova(hwdev, domain, size); + iova = __intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); if (!iova) { sglist->dma_length = 0; return 0; @@ -2222,14 +2122,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) prot |= DMA_PTE_WRITE; - start_addr = iova->pfn_lo << PAGE_SHIFT_4K; + start_addr = iova->pfn_lo << PAGE_SHIFT; offset = 0; for_each_sg(sglist, sg, nelems, i) { addr = SG_ENT_VIRT_ADDRESS(sg); addr = (void *)virt_to_phys(addr); size = aligned_size((u64)addr, sg->length); ret = domain_page_mapping(domain, start_addr + offset, - ((u64)addr) & PAGE_MASK_4K, + ((u64)addr) & PAGE_MASK, size, prot); if (ret) { /* clear the page */ @@ -2243,14 +2143,14 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, return 0; } sg->dma_address = start_addr + offset + - ((u64)addr & (~PAGE_MASK_4K)); + ((u64)addr & (~PAGE_MASK)); sg->dma_length = sg->length; offset += size; } /* it's a non-present to present mapping */ if (iommu_flush_iotlb_psi(domain->iommu, domain->id, - start_addr, offset >> PAGE_SHIFT_4K, 1)) + start_addr, offset >> VTD_PAGE_SHIFT, 1)) iommu_flush_write_buffer(domain->iommu); return nelems; } @@ -2290,7 +2190,6 @@ static inline int iommu_devinfo_cache_init(void) sizeof(struct device_domain_info), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_devinfo_cache) { printk(KERN_ERR "Couldn't create devinfo cache\n"); @@ -2308,7 +2207,6 @@ static inline int iommu_iova_cache_init(void) sizeof(struct iova), 0, SLAB_HWCACHE_ALIGN, - NULL); if (!iommu_iova_cache) { printk(KERN_ERR "Couldn't create iova cache\n"); @@ -2348,15 +2246,6 @@ static void __init iommu_exit_mempool(void) } -void __init detect_intel_iommu(void) -{ - if (swiotlb || no_iommu || iommu_detected || dmar_disabled) - return; - if (early_dmar_detect()) { - iommu_detected = 1; - } -} - static void __init init_no_remapping_devices(void) { struct dmar_drhd_unit *drhd; @@ -2403,12 +2292,19 @@ int __init intel_iommu_init(void) { int ret = 0; - if (no_iommu || swiotlb || dmar_disabled) - return -ENODEV; - if (dmar_table_init()) return -ENODEV; + if (dmar_dev_scope_init()) + return -ENODEV; + + /* + * Check the need for DMA-remapping initialization now. + * Above initialization will also be used by Interrupt-remapping. + */ + if (no_iommu || swiotlb || dmar_disabled) + return -ENODEV; + iommu_init_mempool(); dmar_init_reserved_ranges(); @@ -2430,3 +2326,111 @@ int __init intel_iommu_init(void) return 0; } +void intel_iommu_domain_exit(struct dmar_domain *domain) +{ + u64 end; + + /* Domain 0 is reserved, so dont process it */ + if (!domain) + return; + + end = DOMAIN_MAX_ADDR(domain->gaw); + end = end & (~VTD_PAGE_MASK); + + /* clear ptes */ + dma_pte_clear_range(domain, 0, end); + + /* free page tables */ + dma_pte_free_pagetable(domain, 0, end); + + iommu_free_domain(domain); + free_domain_mem(domain); +} +EXPORT_SYMBOL_GPL(intel_iommu_domain_exit); + +struct dmar_domain *intel_iommu_domain_alloc(struct pci_dev *pdev) +{ + struct dmar_drhd_unit *drhd; + struct dmar_domain *domain; + struct intel_iommu *iommu; + + drhd = dmar_find_matched_drhd_unit(pdev); + if (!drhd) { + printk(KERN_ERR "intel_iommu_domain_alloc: drhd == NULL\n"); + return NULL; + } + + iommu = drhd->iommu; + if (!iommu) { + printk(KERN_ERR + "intel_iommu_domain_alloc: iommu == NULL\n"); + return NULL; + } + domain = iommu_alloc_domain(iommu); + if (!domain) { + printk(KERN_ERR + "intel_iommu_domain_alloc: domain == NULL\n"); + return NULL; + } + if (domain_init(domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { + printk(KERN_ERR + "intel_iommu_domain_alloc: domain_init() failed\n"); + intel_iommu_domain_exit(domain); + return NULL; + } + return domain; +} +EXPORT_SYMBOL_GPL(intel_iommu_domain_alloc); + +int intel_iommu_context_mapping( + struct dmar_domain *domain, struct pci_dev *pdev) +{ + int rc; + rc = domain_context_mapping(domain, pdev); + return rc; +} +EXPORT_SYMBOL_GPL(intel_iommu_context_mapping); + +int intel_iommu_page_mapping( + struct dmar_domain *domain, dma_addr_t iova, + u64 hpa, size_t size, int prot) +{ + int rc; + rc = domain_page_mapping(domain, iova, hpa, size, prot); + return rc; +} +EXPORT_SYMBOL_GPL(intel_iommu_page_mapping); + +void intel_iommu_detach_dev(struct dmar_domain *domain, u8 bus, u8 devfn) +{ + detach_domain_for_dev(domain, bus, devfn); +} +EXPORT_SYMBOL_GPL(intel_iommu_detach_dev); + +struct dmar_domain * +intel_iommu_find_domain(struct pci_dev *pdev) +{ + return find_domain(pdev); +} +EXPORT_SYMBOL_GPL(intel_iommu_find_domain); + +int intel_iommu_found(void) +{ + return g_num_of_iommus; +} +EXPORT_SYMBOL_GPL(intel_iommu_found); + +u64 intel_iommu_iova_to_pfn(struct dmar_domain *domain, u64 iova) +{ + struct dma_pte *pte; + u64 pfn; + + pfn = 0; + pte = addr_to_dma_pte(domain, iova); + + if (pte) + pfn = dma_pte_addr(*pte); + + return pfn >> VTD_PAGE_SHIFT; +} +EXPORT_SYMBOL_GPL(intel_iommu_iova_to_pfn); diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h deleted file mode 100644 index afc0ad96122..00000000000 --- a/drivers/pci/intel-iommu.h +++ /dev/null @@ -1,344 +0,0 @@ -/* - * Copyright (c) 2006, Intel Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 59 Temple - * Place - Suite 330, Boston, MA 02111-1307 USA. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Ashok Raj <ashok.raj@intel.com> - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> - */ - -#ifndef _INTEL_IOMMU_H_ -#define _INTEL_IOMMU_H_ - -#include <linux/types.h> -#include <linux/msi.h> -#include <linux/sysdev.h> -#include "iova.h" -#include <linux/io.h> - -/* - * We need a fixed PAGE_SIZE of 4K irrespective of - * arch PAGE_SIZE for IOMMU page tables. - */ -#define PAGE_SHIFT_4K (12) -#define PAGE_SIZE_4K (1UL << PAGE_SHIFT_4K) -#define PAGE_MASK_4K (((u64)-1) << PAGE_SHIFT_4K) -#define PAGE_ALIGN_4K(addr) (((addr) + PAGE_SIZE_4K - 1) & PAGE_MASK_4K) - -#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT_4K) -#define DMA_32BIT_PFN IOVA_PFN(DMA_32BIT_MASK) -#define DMA_64BIT_PFN IOVA_PFN(DMA_64BIT_MASK) - -/* - * Intel IOMMU register specification per version 1.0 public spec. - */ - -#define DMAR_VER_REG 0x0 /* Arch version supported by this IOMMU */ -#define DMAR_CAP_REG 0x8 /* Hardware supported capabilities */ -#define DMAR_ECAP_REG 0x10 /* Extended capabilities supported */ -#define DMAR_GCMD_REG 0x18 /* Global command register */ -#define DMAR_GSTS_REG 0x1c /* Global status register */ -#define DMAR_RTADDR_REG 0x20 /* Root entry table */ -#define DMAR_CCMD_REG 0x28 /* Context command reg */ -#define DMAR_FSTS_REG 0x34 /* Fault Status register */ -#define DMAR_FECTL_REG 0x38 /* Fault control register */ -#define DMAR_FEDATA_REG 0x3c /* Fault event interrupt data register */ -#define DMAR_FEADDR_REG 0x40 /* Fault event interrupt addr register */ -#define DMAR_FEUADDR_REG 0x44 /* Upper address register */ -#define DMAR_AFLOG_REG 0x58 /* Advanced Fault control */ -#define DMAR_PMEN_REG 0x64 /* Enable Protected Memory Region */ -#define DMAR_PLMBASE_REG 0x68 /* PMRR Low addr */ -#define DMAR_PLMLIMIT_REG 0x6c /* PMRR low limit */ -#define DMAR_PHMBASE_REG 0x70 /* pmrr high base addr */ -#define DMAR_PHMLIMIT_REG 0x78 /* pmrr high limit */ - -#define OFFSET_STRIDE (9) -/* -#define dmar_readl(dmar, reg) readl(dmar + reg) -#define dmar_readq(dmar, reg) ({ \ - u32 lo, hi; \ - lo = readl(dmar + reg); \ - hi = readl(dmar + reg + 4); \ - (((u64) hi) << 32) + lo; }) -*/ -static inline u64 dmar_readq(void __iomem *addr) -{ - u32 lo, hi; - lo = readl(addr); - hi = readl(addr + 4); - return (((u64) hi) << 32) + lo; -} - -static inline void dmar_writeq(void __iomem *addr, u64 val) -{ - writel((u32)val, addr); - writel((u32)(val >> 32), addr + 4); -} - -#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4) -#define DMAR_VER_MINOR(v) ((v) & 0x0f) - -/* - * Decoding Capability Register - */ -#define cap_read_drain(c) (((c) >> 55) & 1) -#define cap_write_drain(c) (((c) >> 54) & 1) -#define cap_max_amask_val(c) (((c) >> 48) & 0x3f) -#define cap_num_fault_regs(c) ((((c) >> 40) & 0xff) + 1) -#define cap_pgsel_inv(c) (((c) >> 39) & 1) - -#define cap_super_page_val(c) (((c) >> 34) & 0xf) -#define cap_super_offset(c) (((find_first_bit(&cap_super_page_val(c), 4)) \ - * OFFSET_STRIDE) + 21) - -#define cap_fault_reg_offset(c) ((((c) >> 24) & 0x3ff) * 16) -#define cap_max_fault_reg_offset(c) \ - (cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16) - -#define cap_zlr(c) (((c) >> 22) & 1) -#define cap_isoch(c) (((c) >> 23) & 1) -#define cap_mgaw(c) ((((c) >> 16) & 0x3f) + 1) -#define cap_sagaw(c) (((c) >> 8) & 0x1f) -#define cap_caching_mode(c) (((c) >> 7) & 1) -#define cap_phmr(c) (((c) >> 6) & 1) -#define cap_plmr(c) (((c) >> 5) & 1) -#define cap_rwbf(c) (((c) >> 4) & 1) -#define cap_afl(c) (((c) >> 3) & 1) -#define cap_ndoms(c) (((unsigned long)1) << (4 + 2 * ((c) & 0x7))) -/* - * Extended Capability Register - */ - -#define ecap_niotlb_iunits(e) ((((e) >> 24) & 0xff) + 1) -#define ecap_iotlb_offset(e) ((((e) >> 8) & 0x3ff) * 16) -#define ecap_max_iotlb_offset(e) \ - (ecap_iotlb_offset(e) + ecap_niotlb_iunits(e) * 16) -#define ecap_coherent(e) ((e) & 0x1) - - -/* IOTLB_REG */ -#define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60) -#define DMA_TLB_DSI_FLUSH (((u64)2) << 60) -#define DMA_TLB_PSI_FLUSH (((u64)3) << 60) -#define DMA_TLB_IIRG(type) ((type >> 60) & 7) -#define DMA_TLB_IAIG(val) (((val) >> 57) & 7) -#define DMA_TLB_READ_DRAIN (((u64)1) << 49) -#define DMA_TLB_WRITE_DRAIN (((u64)1) << 48) -#define DMA_TLB_DID(id) (((u64)((id) & 0xffff)) << 32) -#define DMA_TLB_IVT (((u64)1) << 63) -#define DMA_TLB_IH_NONLEAF (((u64)1) << 6) -#define DMA_TLB_MAX_SIZE (0x3f) - -/* PMEN_REG */ -#define DMA_PMEN_EPM (((u32)1)<<31) -#define DMA_PMEN_PRS (((u32)1)<<0) - -/* GCMD_REG */ -#define DMA_GCMD_TE (((u32)1) << 31) -#define DMA_GCMD_SRTP (((u32)1) << 30) -#define DMA_GCMD_SFL (((u32)1) << 29) -#define DMA_GCMD_EAFL (((u32)1) << 28) -#define DMA_GCMD_WBF (((u32)1) << 27) - -/* GSTS_REG */ -#define DMA_GSTS_TES (((u32)1) << 31) -#define DMA_GSTS_RTPS (((u32)1) << 30) -#define DMA_GSTS_FLS (((u32)1) << 29) -#define DMA_GSTS_AFLS (((u32)1) << 28) -#define DMA_GSTS_WBFS (((u32)1) << 27) - -/* CCMD_REG */ -#define DMA_CCMD_ICC (((u64)1) << 63) -#define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61) -#define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61) -#define DMA_CCMD_DEVICE_INVL (((u64)3) << 61) -#define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32) -#define DMA_CCMD_MASK_NOBIT 0 -#define DMA_CCMD_MASK_1BIT 1 -#define DMA_CCMD_MASK_2BIT 2 -#define DMA_CCMD_MASK_3BIT 3 -#define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16) -#define DMA_CCMD_DID(d) ((u64)((d) & 0xffff)) - -/* FECTL_REG */ -#define DMA_FECTL_IM (((u32)1) << 31) - -/* FSTS_REG */ -#define DMA_FSTS_PPF ((u32)2) -#define DMA_FSTS_PFO ((u32)1) -#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) - -/* FRCD_REG, 32 bits access */ -#define DMA_FRCD_F (((u32)1) << 31) -#define dma_frcd_type(d) ((d >> 30) & 1) -#define dma_frcd_fault_reason(c) (c & 0xff) -#define dma_frcd_source_id(c) (c & 0xffff) -#define dma_frcd_page_addr(d) (d & (((u64)-1) << 12)) /* low 64 bit */ - -/* - * 0: Present - * 1-11: Reserved - * 12-63: Context Ptr (12 - (haw-1)) - * 64-127: Reserved - */ -struct root_entry { - u64 val; - u64 rsvd1; -}; -#define ROOT_ENTRY_NR (PAGE_SIZE_4K/sizeof(struct root_entry)) -static inline bool root_present(struct root_entry *root) -{ - return (root->val & 1); -} -static inline void set_root_present(struct root_entry *root) -{ - root->val |= 1; -} -static inline void set_root_value(struct root_entry *root, unsigned long value) -{ - root->val |= value & PAGE_MASK_4K; -} - -struct context_entry; -static inline struct context_entry * -get_context_addr_from_root(struct root_entry *root) -{ - return (struct context_entry *) - (root_present(root)?phys_to_virt( - root->val & PAGE_MASK_4K): - NULL); -} - -/* - * low 64 bits: - * 0: present - * 1: fault processing disable - * 2-3: translation type - * 12-63: address space root - * high 64 bits: - * 0-2: address width - * 3-6: aval - * 8-23: domain id - */ -struct context_entry { - u64 lo; - u64 hi; -}; -#define context_present(c) ((c).lo & 1) -#define context_fault_disable(c) (((c).lo >> 1) & 1) -#define context_translation_type(c) (((c).lo >> 2) & 3) -#define context_address_root(c) ((c).lo & PAGE_MASK_4K) -#define context_address_width(c) ((c).hi & 7) -#define context_domain_id(c) (((c).hi >> 8) & ((1 << 16) - 1)) - -#define context_set_present(c) do {(c).lo |= 1;} while (0) -#define context_set_fault_enable(c) \ - do {(c).lo &= (((u64)-1) << 2) | 1;} while (0) -#define context_set_translation_type(c, val) \ - do { \ - (c).lo &= (((u64)-1) << 4) | 3; \ - (c).lo |= ((val) & 3) << 2; \ - } while (0) -#define CONTEXT_TT_MULTI_LEVEL 0 -#define context_set_address_root(c, val) \ - do {(c).lo |= (val) & PAGE_MASK_4K;} while (0) -#define context_set_address_width(c, val) do {(c).hi |= (val) & 7;} while (0) -#define context_set_domain_id(c, val) \ - do {(c).hi |= ((val) & ((1 << 16) - 1)) << 8;} while (0) -#define context_clear_entry(c) do {(c).lo = 0; (c).hi = 0;} while (0) - -/* - * 0: readable - * 1: writable - * 2-6: reserved - * 7: super page - * 8-11: available - * 12-63: Host physcial address - */ -struct dma_pte { - u64 val; -}; -#define dma_clear_pte(p) do {(p).val = 0;} while (0) - -#define DMA_PTE_READ (1) -#define DMA_PTE_WRITE (2) - -#define dma_set_pte_readable(p) do {(p).val |= DMA_PTE_READ;} while (0) -#define dma_set_pte_writable(p) do {(p).val |= DMA_PTE_WRITE;} while (0) -#define dma_set_pte_prot(p, prot) \ - do {(p).val = ((p).val & ~3) | ((prot) & 3); } while (0) -#define dma_pte_addr(p) ((p).val & PAGE_MASK_4K) -#define dma_set_pte_addr(p, addr) do {\ - (p).val |= ((addr) & PAGE_MASK_4K); } while (0) -#define dma_pte_present(p) (((p).val & 3) != 0) - -struct intel_iommu; - -struct dmar_domain { - int id; /* domain id */ - struct intel_iommu *iommu; /* back pointer to owning iommu */ - - struct list_head devices; /* all devices' list */ - struct iova_domain iovad; /* iova's that belong to this domain */ - - struct dma_pte *pgd; /* virtual address */ - spinlock_t mapping_lock; /* page table lock */ - int gaw; /* max guest address width */ - - /* adjusted guest address width, 0 is level 2 30-bit */ - int agaw; - -#define DOMAIN_FLAG_MULTIPLE_DEVICES 1 - int flags; -}; - -/* PCI domain-device relationship */ -struct device_domain_info { - struct list_head link; /* link to domain siblings */ - struct list_head global; /* link to global list */ - u8 bus; /* PCI bus numer */ - u8 devfn; /* PCI devfn number */ - struct pci_dev *dev; /* it's NULL for PCIE-to-PCI bridge */ - struct dmar_domain *domain; /* pointer to domain */ -}; - -extern int init_dmars(void); - -struct intel_iommu { - void __iomem *reg; /* Pointer to hardware regs, virtual addr */ - u64 cap; - u64 ecap; - unsigned long *domain_ids; /* bitmap of domains */ - struct dmar_domain **domains; /* ptr to domains */ - int seg; - u32 gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */ - spinlock_t lock; /* protect context, domain ids */ - spinlock_t register_lock; /* protect register handling */ - struct root_entry *root_entry; /* virtual address */ - - unsigned int irq; - unsigned char name[7]; /* Device Name */ - struct msi_msg saved_msg; - struct sys_device sysdev; -}; - -#ifndef CONFIG_DMAR_GFX_WA -static inline void iommu_prepare_gfx_mapping(void) -{ - return; -} -#endif /* !CONFIG_DMAR_GFX_WA */ - -#endif diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c new file mode 100644 index 00000000000..2de5a3238c9 --- /dev/null +++ b/drivers/pci/intr_remapping.c @@ -0,0 +1,512 @@ +#include <linux/interrupt.h> +#include <linux/dmar.h> +#include <linux/spinlock.h> +#include <linux/jiffies.h> +#include <linux/pci.h> +#include <linux/irq.h> +#include <asm/io_apic.h> +#include <linux/intel-iommu.h> +#include "intr_remapping.h" + +static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; +static int ir_ioapic_num; +int intr_remapping_enabled; + +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; +}; + +static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; + +static struct irq_2_iommu *irq_2_iommu(unsigned int irq) +{ + return (irq < nr_irqs) ? irq_2_iommuX + irq : NULL; +} + +static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) +{ + return irq_2_iommu(irq); +} + +static DEFINE_SPINLOCK(irq_2_ir_lock); + +static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq) +{ + struct irq_2_iommu *irq_iommu; + + irq_iommu = irq_2_iommu(irq); + + if (!irq_iommu) + return NULL; + + if (!irq_iommu->iommu) + return NULL; + + return irq_iommu; +} + +int irq_remapped(int irq) +{ + return valid_irq_2_iommu(irq) != NULL; +} + +int get_irte(int irq, struct irte *entry) +{ + int index; + struct irq_2_iommu *irq_iommu; + + if (!entry) + return -1; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + *entry = *(irq_iommu->iommu->ir_table->base + index); + + spin_unlock(&irq_2_ir_lock); + return 0; +} + +int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +{ + struct ir_table *table = iommu->ir_table; + struct irq_2_iommu *irq_iommu; + u16 index, start_index; + unsigned int mask = 0; + int i; + + if (!count) + return -1; + + /* protect irq_2_iommu_alloc later */ + if (irq >= nr_irqs) + return -1; + + /* + * start the IRTE search from index 0. + */ + index = start_index = 0; + + if (count > 1) { + count = __roundup_pow_of_two(count); + mask = ilog2(count); + } + + if (mask > ecap_max_handle_mask(iommu->ecap)) { + printk(KERN_ERR + "Requested mask %x exceeds the max invalidation handle" + " mask value %Lx\n", mask, + ecap_max_handle_mask(iommu->ecap)); + return -1; + } + + spin_lock(&irq_2_ir_lock); + do { + for (i = index; i < index + count; i++) + if (table->base[i].present) + break; + /* empty index found */ + if (i == index + count) + break; + + index = (index + count) % INTR_REMAP_TABLE_ENTRIES; + + if (index == start_index) { + spin_unlock(&irq_2_ir_lock); + printk(KERN_ERR "can't allocate an IRTE\n"); + return -1; + } + } while (1); + + for (i = index; i < index + count; i++) + table->base[i].present = 1; + + irq_iommu = irq_2_iommu_alloc(irq); + irq_iommu->iommu = iommu; + irq_iommu->irte_index = index; + irq_iommu->sub_handle = 0; + irq_iommu->irte_mask = mask; + + spin_unlock(&irq_2_ir_lock); + + return index; +} + +static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) +{ + struct qi_desc desc; + + desc.low = QI_IEC_IIDEX(index) | QI_IEC_TYPE | QI_IEC_IM(mask) + | QI_IEC_SELECTIVE; + desc.high = 0; + + qi_submit_sync(&desc, iommu); +} + +int map_irq_to_irte_handle(int irq, u16 *sub_handle) +{ + int index; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + *sub_handle = irq_iommu->sub_handle; + index = irq_iommu->irte_index; + spin_unlock(&irq_2_ir_lock); + return index; +} + +int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) +{ + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + + irq_iommu = irq_2_iommu_alloc(irq); + + irq_iommu->iommu = iommu; + irq_iommu->irte_index = index; + irq_iommu->sub_handle = subhandle; + irq_iommu->irte_mask = 0; + + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index) +{ + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + irq_iommu->iommu = NULL; + irq_iommu->irte_index = 0; + irq_iommu->sub_handle = 0; + irq_2_iommu(irq)->irte_mask = 0; + + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +int modify_irte(int irq, struct irte *irte_modified) +{ + int index; + struct irte *irte; + struct intel_iommu *iommu; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + iommu = irq_iommu->iommu; + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + irte = &iommu->ir_table->base[index]; + + set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); + __iommu_flush_cache(iommu, irte, sizeof(*irte)); + + qi_flush_iec(iommu, index, 0); + + spin_unlock(&irq_2_ir_lock); + return 0; +} + +int flush_irte(int irq) +{ + int index; + struct intel_iommu *iommu; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + iommu = irq_iommu->iommu; + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + + qi_flush_iec(iommu, index, irq_iommu->irte_mask); + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +struct intel_iommu *map_ioapic_to_ir(int apic) +{ + int i; + + for (i = 0; i < MAX_IO_APICS; i++) + if (ir_ioapic[i].id == apic) + return ir_ioapic[i].iommu; + return NULL; +} + +struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) +{ + struct dmar_drhd_unit *drhd; + + drhd = dmar_find_matched_drhd_unit(dev); + if (!drhd) + return NULL; + + return drhd->iommu; +} + +int free_irte(int irq) +{ + int index, i; + struct irte *irte; + struct intel_iommu *iommu; + struct irq_2_iommu *irq_iommu; + + spin_lock(&irq_2_ir_lock); + irq_iommu = valid_irq_2_iommu(irq); + if (!irq_iommu) { + spin_unlock(&irq_2_ir_lock); + return -1; + } + + iommu = irq_iommu->iommu; + + index = irq_iommu->irte_index + irq_iommu->sub_handle; + irte = &iommu->ir_table->base[index]; + + if (!irq_iommu->sub_handle) { + for (i = 0; i < (1 << irq_iommu->irte_mask); i++) + set_64bit((unsigned long *)irte, 0); + qi_flush_iec(iommu, index, irq_iommu->irte_mask); + } + + irq_iommu->iommu = NULL; + irq_iommu->irte_index = 0; + irq_iommu->sub_handle = 0; + irq_iommu->irte_mask = 0; + + spin_unlock(&irq_2_ir_lock); + + return 0; +} + +static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) +{ + u64 addr; + u32 cmd, sts; + unsigned long flags; + + addr = virt_to_phys((void *)iommu->ir_table->base); + + spin_lock_irqsave(&iommu->register_lock, flags); + + dmar_writeq(iommu->reg + DMAR_IRTA_REG, + (addr) | IR_X2APIC_MODE(mode) | INTR_REMAP_TABLE_REG_SIZE); + + /* Set interrupt-remapping table pointer */ + cmd = iommu->gcmd | DMA_GCMD_SIRTP; + writel(cmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_IRTPS), sts); + spin_unlock_irqrestore(&iommu->register_lock, flags); + + /* + * global invalidation of interrupt entry cache before enabling + * interrupt-remapping. + */ + qi_global_iec(iommu); + + spin_lock_irqsave(&iommu->register_lock, flags); + + /* Enable interrupt-remapping */ + cmd = iommu->gcmd | DMA_GCMD_IRE; + iommu->gcmd |= DMA_GCMD_IRE; + writel(cmd, iommu->reg + DMAR_GCMD_REG); + + IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, + readl, (sts & DMA_GSTS_IRES), sts); + + spin_unlock_irqrestore(&iommu->register_lock, flags); +} + + +static int setup_intr_remapping(struct intel_iommu *iommu, int mode) +{ + struct ir_table *ir_table; + struct page *pages; + + ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), + GFP_KERNEL); + + if (!iommu->ir_table) + return -ENOMEM; + + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); + + if (!pages) { + printk(KERN_ERR "failed to allocate pages of order %d\n", + INTR_REMAP_PAGE_ORDER); + kfree(iommu->ir_table); + return -ENOMEM; + } + + ir_table->base = page_address(pages); + + iommu_set_intr_remapping(iommu, mode); + return 0; +} + +int __init enable_intr_remapping(int eim) +{ + struct dmar_drhd_unit *drhd; + int setup = 0; + + /* + * check for the Interrupt-remapping support + */ + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (!ecap_ir_support(iommu->ecap)) + continue; + + if (eim && !ecap_eim_support(iommu->ecap)) { + printk(KERN_INFO "DRHD %Lx: EIM not supported by DRHD, " + " ecap %Lx\n", drhd->reg_base_addr, iommu->ecap); + return -1; + } + } + + /* + * Enable queued invalidation for all the DRHD's. + */ + for_each_drhd_unit(drhd) { + int ret; + struct intel_iommu *iommu = drhd->iommu; + ret = dmar_enable_qi(iommu); + + if (ret) { + printk(KERN_ERR "DRHD %Lx: failed to enable queued, " + " invalidation, ecap %Lx, ret %d\n", + drhd->reg_base_addr, iommu->ecap, ret); + return -1; + } + } + + /* + * Setup Interrupt-remapping for all the DRHD's now. + */ + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (!ecap_ir_support(iommu->ecap)) + continue; + + if (setup_intr_remapping(iommu, eim)) + goto error; + + setup = 1; + } + + if (!setup) + goto error; + + intr_remapping_enabled = 1; + + return 0; + +error: + /* + * handle error condition gracefully here! + */ + return -1; +} + +static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, + struct intel_iommu *iommu) +{ + struct acpi_dmar_hardware_unit *drhd; + struct acpi_dmar_device_scope *scope; + void *start, *end; + + drhd = (struct acpi_dmar_hardware_unit *)header; + + start = (void *)(drhd + 1); + end = ((void *)drhd) + header->length; + + while (start < end) { + scope = start; + if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { + if (ir_ioapic_num == MAX_IO_APICS) { + printk(KERN_WARNING "Exceeded Max IO APICS\n"); + return -1; + } + + printk(KERN_INFO "IOAPIC id %d under DRHD base" + " 0x%Lx\n", scope->enumeration_id, + drhd->address); + + ir_ioapic[ir_ioapic_num].iommu = iommu; + ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; + ir_ioapic_num++; + } + start += scope->length; + } + + return 0; +} + +/* + * Finds the assocaition between IOAPIC's and its Interrupt-remapping + * hardware unit. + */ +int __init parse_ioapics_under_ir(void) +{ + struct dmar_drhd_unit *drhd; + int ir_supported = 0; + + for_each_drhd_unit(drhd) { + struct intel_iommu *iommu = drhd->iommu; + + if (ecap_ir_support(iommu->ecap)) { + if (ir_parse_ioapic_scope(drhd->hdr, iommu)) + return -1; + + ir_supported = 1; + } + } + + if (ir_supported && ir_ioapic_num != nr_ioapics) { + printk(KERN_WARNING + "Not all IO-APIC's listed under remapping hardware\n"); + return -1; + } + + return ir_supported; +} diff --git a/drivers/pci/intr_remapping.h b/drivers/pci/intr_remapping.h new file mode 100644 index 00000000000..ca48f0df8ac --- /dev/null +++ b/drivers/pci/intr_remapping.h @@ -0,0 +1,8 @@ +#include <linux/intel-iommu.h> + +struct ioapic_scope { + struct intel_iommu *iommu; + unsigned int id; +}; + +#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) diff --git a/drivers/pci/iova.c b/drivers/pci/iova.c index 3ef4ac06431..2287116e982 100644 --- a/drivers/pci/iova.c +++ b/drivers/pci/iova.c @@ -7,7 +7,7 @@ * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> */ -#include "iova.h" +#include <linux/iova.h> void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) diff --git a/drivers/pci/iova.h b/drivers/pci/iova.h deleted file mode 100644 index 228f6c94b69..00000000000 --- a/drivers/pci/iova.h +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright (c) 2006, Intel Corporation. - * - * This file is released under the GPLv2. - * - * Copyright (C) 2006-2008 Intel Corporation - * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> - * - */ - -#ifndef _IOVA_H_ -#define _IOVA_H_ - -#include <linux/types.h> -#include <linux/kernel.h> -#include <linux/rbtree.h> -#include <linux/dma-mapping.h> - -/* IO virtual address start page frame number */ -#define IOVA_START_PFN (1) - -/* iova structure */ -struct iova { - struct rb_node node; - unsigned long pfn_hi; /* IOMMU dish out addr hi */ - unsigned long pfn_lo; /* IOMMU dish out addr lo */ -}; - -/* holds all the iova translations for a domain */ -struct iova_domain { - spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */ - spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ - struct rb_root rbroot; /* iova domain rbtree root */ - struct rb_node *cached32_node; /* Save last alloced node */ - unsigned long dma_32bit_pfn; -}; - -struct iova *alloc_iova_mem(void); -void free_iova_mem(struct iova *iova); -void free_iova(struct iova_domain *iovad, unsigned long pfn); -void __free_iova(struct iova_domain *iovad, struct iova *iova); -struct iova *alloc_iova(struct iova_domain *iovad, unsigned long size, - unsigned long limit_pfn, - bool size_aligned); -struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, - unsigned long pfn_hi); -void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); -void init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit); -struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); -void put_iova_domain(struct iova_domain *iovad); - -#endif diff --git a/drivers/pci/irq.c b/drivers/pci/irq.c new file mode 100644 index 00000000000..6441dfa969a --- /dev/null +++ b/drivers/pci/irq.c @@ -0,0 +1,60 @@ +/* + * PCI IRQ failure handing code + * + * Copyright (c) 2008 James Bottomley <James.Bottomley@HansenPartnership.com> + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/kernel.h> +#include <linux/pci.h> + +static void pci_note_irq_problem(struct pci_dev *pdev, const char *reason) +{ + struct pci_dev *parent = to_pci_dev(pdev->dev.parent); + + dev_printk(KERN_ERR, &pdev->dev, + "Potentially misrouted IRQ (Bridge %s %04x:%04x)\n", + parent->dev.bus_id, parent->vendor, parent->device); + dev_printk(KERN_ERR, &pdev->dev, "%s\n", reason); + dev_printk(KERN_ERR, &pdev->dev, "Please report to linux-kernel@vger.kernel.org\n"); + WARN_ON(1); +} + +/** + * pci_lost_interrupt - reports a lost PCI interrupt + * @pdev: device whose interrupt is lost + * + * The primary function of this routine is to report a lost interrupt + * in a standard way which users can recognise (instead of blaming the + * driver). + * + * Returns: + * a suggestion for fixing it (although the driver is not required to + * act on this). + */ +enum pci_lost_interrupt_reason pci_lost_interrupt(struct pci_dev *pdev) +{ + if (pdev->msi_enabled || pdev->msix_enabled) { + enum pci_lost_interrupt_reason ret; + + if (pdev->msix_enabled) { + pci_note_irq_problem(pdev, "MSIX routing failure"); + ret = PCI_LOST_IRQ_DISABLE_MSIX; + } else { + pci_note_irq_problem(pdev, "MSI routing failure"); + ret = PCI_LOST_IRQ_DISABLE_MSI; + } + return ret; + } +#ifdef CONFIG_ACPI + if (!(acpi_disabled || acpi_noirq)) { + pci_note_irq_problem(pdev, "Potential ACPI misrouting please reboot with acpi=noirq"); + /* currently no way to fix acpi on the fly */ + return PCI_LOST_IRQ_DISABLE_ACPI; + } +#endif + pci_note_irq_problem(pdev, "unknown cause (not MSI or ACPI)"); + return PCI_LOST_IRQ_NO_INFORMATION; +} +EXPORT_SYMBOL(pci_lost_interrupt); diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 15af618d36e..74801f7df9c 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -126,7 +126,16 @@ static void msix_flush_writes(unsigned int irq) } } -static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) +/* + * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to + * mask all MSI interrupts by clearing the MSI enable bit does not work + * reliably as devices without an INTx disable bit will then generate a + * level IRQ which will never be cleared. + * + * Returns 1 if it succeeded in masking the interrupt and 0 if the device + * doesn't support MSI masking. + */ +static int msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) { struct msi_desc *entry; @@ -144,8 +153,7 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) mask_bits |= flag & mask; pci_write_config_dword(entry->dev, pos, mask_bits); } else { - __msi_set_enable(entry->dev, entry->msi_attrib.pos, - !flag); + return 0; } break; case PCI_CAP_ID_MSIX: @@ -161,6 +169,7 @@ static void msi_set_mask_bits(unsigned int irq, u32 mask, u32 flag) break; } entry->msi_attrib.masked = !!flag; + return 1; } void read_msi_msg(unsigned int irq, struct msi_msg *msg) @@ -299,9 +308,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev) entry->msi_attrib.masked); pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); - control &= ~(PCI_MSI_FLAGS_QSIZE | PCI_MSI_FLAGS_ENABLE); - if (entry->msi_attrib.maskbit || !entry->msi_attrib.masked) - control |= PCI_MSI_FLAGS_ENABLE; + control &= ~PCI_MSI_FLAGS_QSIZE; + control |= PCI_MSI_FLAGS_ENABLE; pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); } @@ -370,23 +378,21 @@ static int msi_capability_init(struct pci_dev *dev) entry->msi_attrib.masked = 1; entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.pos = pos; - if (is_mask_bit_support(control)) { + if (entry->msi_attrib.maskbit) { entry->mask_base = (void __iomem *)(long)msi_mask_bits_reg(pos, - is_64bit_address(control)); + entry->msi_attrib.is_64); } entry->dev = dev; if (entry->msi_attrib.maskbit) { unsigned int maskbits, temp; /* All MSIs are unmasked by default, Mask them all */ pci_read_config_dword(dev, - msi_mask_bits_reg(pos, is_64bit_address(control)), + msi_mask_bits_reg(pos, entry->msi_attrib.is_64), &maskbits); temp = (1 << multi_msi_capable(control)); temp = ((temp - 1) & ~temp); maskbits |= temp; - pci_write_config_dword(dev, - msi_mask_bits_reg(pos, is_64bit_address(control)), - maskbits); + pci_write_config_dword(dev, entry->msi_attrib.is_64, maskbits); entry->msi_attrib.maskbits_mask = temp; } list_add_tail(&entry->list, &dev->msi_list); @@ -753,3 +759,24 @@ void pci_msi_init_pci_dev(struct pci_dev *dev) { INIT_LIST_HEAD(&dev->msi_list); } + +#ifdef CONFIG_ACPI +#include <linux/acpi.h> +#include <linux/pci-acpi.h> +static void __devinit msi_acpi_init(void) +{ + if (acpi_pci_disabled) + return; + pci_osc_support_set(OSC_MSI_SUPPORT); + pcie_osc_support_set(OSC_MSI_SUPPORT); +} +#else +static inline void msi_acpi_init(void) { } +#endif /* CONFIG_ACPI */ + +void __devinit msi_init(void) +{ + if (!pci_msi_enable) + return; + msi_acpi_init(); +} diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 7764768b6a0..dfe7c8e1b18 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -11,6 +11,7 @@ #include <linux/init.h> #include <linux/pci.h> #include <linux/module.h> +#include <linux/pci-aspm.h> #include <acpi/acpi.h> #include <acpi/acnamesp.h> #include <acpi/acresrc.h> @@ -23,17 +24,17 @@ struct acpi_osc_data { acpi_handle handle; u32 support_set; u32 control_set; - int is_queried; - u32 query_result; struct list_head sibiling; }; static LIST_HEAD(acpi_osc_data_list); struct acpi_osc_args { u32 capbuf[3]; - u32 query_result; + u32 ctrl_result; }; +static DEFINE_MUTEX(pci_acpi_lock); + static struct acpi_osc_data *acpi_get_osc_data(acpi_handle handle) { struct acpi_osc_data *data; @@ -107,9 +108,8 @@ static acpi_status acpi_run_osc(acpi_handle handle, goto out_kfree; } out_success: - if (flags & OSC_QUERY_ENABLE) - osc_args->query_result = - *((u32 *)(out_obj->buffer.pointer + 8)); + osc_args->ctrl_result = + *((u32 *)(out_obj->buffer.pointer + 8)); status = AE_OK; out_kfree: @@ -117,41 +117,53 @@ out_kfree: return status; } -static acpi_status acpi_query_osc(acpi_handle handle, - u32 level, void *context, void **retval) +static acpi_status __acpi_query_osc(u32 flags, struct acpi_osc_data *osc_data, + u32 *result) { acpi_status status; - struct acpi_osc_data *osc_data; - u32 flags = (unsigned long)context, support_set; - acpi_handle tmp; + u32 support_set; struct acpi_osc_args osc_args; - status = acpi_get_handle(handle, "_OSC", &tmp); - if (ACPI_FAILURE(status)) - return status; - - osc_data = acpi_get_osc_data(handle); - if (!osc_data) { - printk(KERN_ERR "acpi osc data array is full\n"); - return AE_ERROR; - } - /* do _OSC query for all possible controls */ support_set = osc_data->support_set | (flags & OSC_SUPPORT_MASKS); osc_args.capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; osc_args.capbuf[OSC_SUPPORT_TYPE] = support_set; osc_args.capbuf[OSC_CONTROL_TYPE] = OSC_CONTROL_MASKS; - status = acpi_run_osc(handle, &osc_args); + status = acpi_run_osc(osc_data->handle, &osc_args); if (ACPI_SUCCESS(status)) { osc_data->support_set = support_set; - osc_data->query_result = osc_args.query_result; - osc_data->is_queried = 1; + *result = osc_args.ctrl_result; } return status; } +static acpi_status acpi_query_osc(acpi_handle handle, + u32 level, void *context, void **retval) +{ + acpi_status status; + struct acpi_osc_data *osc_data; + u32 flags = (unsigned long)context, dummy; + acpi_handle tmp; + + status = acpi_get_handle(handle, "_OSC", &tmp); + if (ACPI_FAILURE(status)) + return AE_OK; + + mutex_lock(&pci_acpi_lock); + osc_data = acpi_get_osc_data(handle); + if (!osc_data) { + printk(KERN_ERR "acpi osc data array is full\n"); + goto out; + } + + __acpi_query_osc(flags, osc_data, &dummy); +out: + mutex_unlock(&pci_acpi_lock); + return AE_OK; +} + /** * __pci_osc_support_set - register OS support to Firmware * @flags: OS support bits @@ -180,7 +192,7 @@ acpi_status __pci_osc_support_set(u32 flags, const char *hid) acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) { acpi_status status; - u32 ctrlset, control_set; + u32 ctrlset, control_set, result; acpi_handle tmp; struct acpi_osc_data *osc_data; struct acpi_osc_args osc_args; @@ -189,19 +201,28 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) if (ACPI_FAILURE(status)) return status; + mutex_lock(&pci_acpi_lock); osc_data = acpi_get_osc_data(handle); if (!osc_data) { printk(KERN_ERR "acpi osc data array is full\n"); - return AE_ERROR; + status = AE_ERROR; + goto out; } ctrlset = (flags & OSC_CONTROL_MASKS); - if (!ctrlset) - return AE_TYPE; + if (!ctrlset) { + status = AE_TYPE; + goto out; + } - if (osc_data->is_queried && - ((osc_data->query_result & ctrlset) != ctrlset)) - return AE_SUPPORT; + status = __acpi_query_osc(osc_data->support_set, osc_data, &result); + if (ACPI_FAILURE(status)) + goto out; + + if ((result & ctrlset) != ctrlset) { + status = AE_SUPPORT; + goto out; + } control_set = osc_data->control_set | ctrlset; osc_args.capbuf[OSC_QUERY_TYPE] = 0; @@ -210,7 +231,8 @@ acpi_status pci_osc_control_set(acpi_handle handle, u32 flags) status = acpi_run_osc(handle, &osc_args); if (ACPI_SUCCESS(status)) osc_data->control_set = control_set; - +out: + mutex_unlock(&pci_acpi_lock); return status; } EXPORT_SYMBOL(pci_osc_control_set); @@ -372,6 +394,12 @@ static int __init acpi_pci_init(void) printk(KERN_INFO"ACPI FADT declares the system doesn't support MSI, so disable it\n"); pci_no_msi(); } + + if (acpi_gbl_FADT.boot_flags & BAF_PCIE_ASPM_CONTROL) { + printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n"); + pcie_no_aspm(); + } + ret = register_acpi_bus_type(&acpi_pci_bus); if (ret) return 0; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index a13f5348611..b4cdd690ae7 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -43,18 +43,32 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) { struct pci_dynid *dynid; struct pci_driver *pdrv = to_pci_driver(driver); + const struct pci_device_id *ids = pdrv->id_table; __u32 vendor, device, subvendor=PCI_ANY_ID, subdevice=PCI_ANY_ID, class=0, class_mask=0; unsigned long driver_data=0; int fields=0; - int retval = 0; + int retval; - fields = sscanf(buf, "%x %x %x %x %x %x %lux", + fields = sscanf(buf, "%x %x %x %x %x %x %lx", &vendor, &device, &subvendor, &subdevice, &class, &class_mask, &driver_data); if (fields < 2) return -EINVAL; + /* Only accept driver_data values that match an existing id_table + entry */ + retval = -EINVAL; + while (ids->vendor || ids->subvendor || ids->class_mask) { + if (driver_data == ids->driver_data) { + retval = 0; + break; + } + ids++; + } + if (retval) /* No match */ + return retval; + dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); if (!dynid) return -ENOMEM; @@ -65,8 +79,7 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count) dynid->id.subdevice = subdevice; dynid->id.class = class; dynid->id.class_mask = class_mask; - dynid->id.driver_data = pdrv->dynids.use_driver_data ? - driver_data : 0UL; + dynid->id.driver_data = driver_data; spin_lock(&pdrv->dynids.lock); list_add_tail(&dynid->node, &pdrv->dynids.list); diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index 9c718583a23..110022d7868 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -16,6 +16,7 @@ #include <linux/kernel.h> +#include <linux/sched.h> #include <linux/pci.h> #include <linux/stat.h> #include <linux/topology.h> @@ -422,7 +423,7 @@ pci_write_vpd(struct kobject *kobj, struct bin_attribute *bin_attr, * Reads 1, 2, or 4 bytes from legacy I/O port space using an arch specific * callback routine (pci_legacy_read). */ -ssize_t +static ssize_t pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -447,7 +448,7 @@ pci_read_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, * Writes 1, 2, or 4 bytes from legacy I/O port space using an arch specific * callback routine (pci_legacy_write). */ -ssize_t +static ssize_t pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { @@ -467,11 +468,11 @@ pci_write_legacy_io(struct kobject *kobj, struct bin_attribute *bin_attr, * @attr: struct bin_attribute for this file * @vma: struct vm_area_struct passed to mmap * - * Uses an arch specific callback, pci_mmap_legacy_page_range, to mmap + * Uses an arch specific callback, pci_mmap_legacy_mem_page_range, to mmap * legacy memory space (first meg of bus space) into application virtual * memory space. */ -int +static int pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, struct vm_area_struct *vma) { @@ -479,11 +480,109 @@ pci_mmap_legacy_mem(struct kobject *kobj, struct bin_attribute *attr, struct device, kobj)); - return pci_mmap_legacy_page_range(bus, vma); + return pci_mmap_legacy_page_range(bus, vma, pci_mmap_mem); +} + +/** + * pci_mmap_legacy_io - map legacy PCI IO into user memory space + * @kobj: kobject corresponding to device to be mapped + * @attr: struct bin_attribute for this file + * @vma: struct vm_area_struct passed to mmap + * + * Uses an arch specific callback, pci_mmap_legacy_io_page_range, to mmap + * legacy IO space (first meg of bus space) into application virtual + * memory space. Returns -ENOSYS if the operation isn't supported + */ +static int +pci_mmap_legacy_io(struct kobject *kobj, struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + struct pci_bus *bus = to_pci_bus(container_of(kobj, + struct device, + kobj)); + + return pci_mmap_legacy_page_range(bus, vma, pci_mmap_io); +} + +/** + * pci_create_legacy_files - create legacy I/O port and memory files + * @b: bus to create files under + * + * Some platforms allow access to legacy I/O port and ISA memory space on + * a per-bus basis. This routine creates the files and ties them into + * their associated read, write and mmap files from pci-sysfs.c + * + * On error unwind, but don't propogate the error to the caller + * as it is ok to set up the PCI bus without these files. + */ +void pci_create_legacy_files(struct pci_bus *b) +{ + int error; + + b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, + GFP_ATOMIC); + if (!b->legacy_io) + goto kzalloc_err; + + b->legacy_io->attr.name = "legacy_io"; + b->legacy_io->size = 0xffff; + b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_io->read = pci_read_legacy_io; + b->legacy_io->write = pci_write_legacy_io; + b->legacy_io->mmap = pci_mmap_legacy_io; + error = device_create_bin_file(&b->dev, b->legacy_io); + if (error) + goto legacy_io_err; + + /* Allocated above after the legacy_io struct */ + b->legacy_mem = b->legacy_io + 1; + b->legacy_mem->attr.name = "legacy_mem"; + b->legacy_mem->size = 1024*1024; + b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; + b->legacy_mem->mmap = pci_mmap_legacy_mem; + error = device_create_bin_file(&b->dev, b->legacy_mem); + if (error) + goto legacy_mem_err; + + return; + +legacy_mem_err: + device_remove_bin_file(&b->dev, b->legacy_io); +legacy_io_err: + kfree(b->legacy_io); + b->legacy_io = NULL; +kzalloc_err: + printk(KERN_WARNING "pci: warning: could not create legacy I/O port " + "and ISA memory resources to sysfs\n"); + return; +} + +void pci_remove_legacy_files(struct pci_bus *b) +{ + if (b->legacy_io) { + device_remove_bin_file(&b->dev, b->legacy_io); + device_remove_bin_file(&b->dev, b->legacy_mem); + kfree(b->legacy_io); /* both are allocated here */ + } } #endif /* HAVE_PCI_LEGACY */ #ifdef HAVE_PCI_MMAP + +static int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma) +{ + unsigned long nr, start, size; + + nr = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + start = vma->vm_pgoff; + size = pci_resource_len(pdev, resno) >> PAGE_SHIFT; + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", + current->comm, start, start+nr, pci_name(pdev), resno, size); + return 0; +} + /** * pci_mmap_resource - map a PCI resource into user memory space * @kobj: kobject for mapping @@ -510,6 +609,9 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr, if (i >= PCI_ROM_RESOURCE) return -ENODEV; + if (!pci_mmap_fits(pdev, i, vma)) + return -EINVAL; + /* pci_mmap_page_range() expects the same kind of entry as coming * from /proc/bus/pci/ which is a "user visible" value. If this is * different from the resource itself, arch will do necessary fixup. @@ -696,7 +798,7 @@ static struct bin_attribute pci_config_attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, - .size = 256, + .size = PCI_CFG_SPACE_SIZE, .read = pci_read_config, .write = pci_write_config, }; @@ -706,7 +808,7 @@ static struct bin_attribute pcie_config_attr = { .name = "config", .mode = S_IRUGO | S_IWUSR, }, - .size = 4096, + .size = PCI_CFG_SPACE_EXP_SIZE, .read = pci_read_config, .write = pci_write_config, }; @@ -716,86 +818,103 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev) return 0; } +static int pci_create_capabilities_sysfs(struct pci_dev *dev) +{ + int retval; + struct bin_attribute *attr; + + /* If the device has VPD, try to expose it in sysfs. */ + if (dev->vpd) { + attr = kzalloc(sizeof(*attr), GFP_ATOMIC); + if (!attr) + return -ENOMEM; + + attr->size = dev->vpd->len; + attr->attr.name = "vpd"; + attr->attr.mode = S_IRUSR | S_IWUSR; + attr->read = pci_read_vpd; + attr->write = pci_write_vpd; + retval = sysfs_create_bin_file(&dev->dev.kobj, attr); + if (retval) { + kfree(dev->vpd->attr); + return retval; + } + dev->vpd->attr = attr; + } + + /* Active State Power Management */ + pcie_aspm_create_sysfs_dev_files(dev); + + return 0; +} + int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev) { - struct bin_attribute *attr = NULL; int retval; + int rom_size = 0; + struct bin_attribute *attr; if (!sysfs_initialized) return -EACCES; - if (pdev->cfg_size < 4096) + if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) retval = sysfs_create_bin_file(&pdev->dev.kobj, &pci_config_attr); else retval = sysfs_create_bin_file(&pdev->dev.kobj, &pcie_config_attr); if (retval) goto err; - /* If the device has VPD, try to expose it in sysfs. */ - if (pdev->vpd) { - attr = kzalloc(sizeof(*attr), GFP_ATOMIC); - if (attr) { - pdev->vpd->attr = attr; - attr->size = pdev->vpd->len; - attr->attr.name = "vpd"; - attr->attr.mode = S_IRUSR | S_IWUSR; - attr->read = pci_read_vpd; - attr->write = pci_write_vpd; - retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); - if (retval) - goto err_vpd; - } else { - retval = -ENOMEM; - goto err_config_file; - } - } - retval = pci_create_resource_files(pdev); if (retval) - goto err_vpd_file; + goto err_config_file; + + if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) + rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) + rom_size = 0x20000; /* If the device has a ROM, try to expose it in sysfs. */ - if (pci_resource_len(pdev, PCI_ROM_RESOURCE) || - (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)) { + if (rom_size) { attr = kzalloc(sizeof(*attr), GFP_ATOMIC); - if (attr) { - pdev->rom_attr = attr; - attr->size = pci_resource_len(pdev, PCI_ROM_RESOURCE); - attr->attr.name = "rom"; - attr->attr.mode = S_IRUSR; - attr->read = pci_read_rom; - attr->write = pci_write_rom; - retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); - if (retval) - goto err_rom; - } else { + if (!attr) { retval = -ENOMEM; goto err_resource_files; } + attr->size = rom_size; + attr->attr.name = "rom"; + attr->attr.mode = S_IRUSR; + attr->read = pci_read_rom; + attr->write = pci_write_rom; + retval = sysfs_create_bin_file(&pdev->dev.kobj, attr); + if (retval) { + kfree(attr); + goto err_resource_files; + } + pdev->rom_attr = attr; } + /* add platform-specific attributes */ - if (pcibios_add_platform_entries(pdev)) + retval = pcibios_add_platform_entries(pdev); + if (retval) goto err_rom_file; - pcie_aspm_create_sysfs_dev_files(pdev); + /* add sysfs entries for various capabilities */ + retval = pci_create_capabilities_sysfs(pdev); + if (retval) + goto err_rom_file; return 0; err_rom_file: - if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) + if (rom_size) { sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); -err_rom: - kfree(pdev->rom_attr); + kfree(pdev->rom_attr); + pdev->rom_attr = NULL; + } err_resource_files: pci_remove_resource_files(pdev); -err_vpd_file: - if (pdev->vpd) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr); -err_vpd: - kfree(pdev->vpd->attr); - } err_config_file: - if (pdev->cfg_size < 4096) + if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); else sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); @@ -803,6 +922,16 @@ err: return retval; } +static void pci_remove_capabilities_sysfs(struct pci_dev *dev) +{ + if (dev->vpd && dev->vpd->attr) { + sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr); + kfree(dev->vpd->attr); + } + + pcie_aspm_remove_sysfs_dev_files(dev); +} + /** * pci_remove_sysfs_dev_files - cleanup PCI specific sysfs files * @pdev: device whose entries we should free @@ -811,27 +940,28 @@ err: */ void pci_remove_sysfs_dev_files(struct pci_dev *pdev) { + int rom_size = 0; + if (!sysfs_initialized) return; - pcie_aspm_remove_sysfs_dev_files(pdev); + pci_remove_capabilities_sysfs(pdev); - if (pdev->vpd) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->vpd->attr); - kfree(pdev->vpd->attr); - } - if (pdev->cfg_size < 4096) + if (pdev->cfg_size < PCI_CFG_SPACE_EXP_SIZE) sysfs_remove_bin_file(&pdev->dev.kobj, &pci_config_attr); else sysfs_remove_bin_file(&pdev->dev.kobj, &pcie_config_attr); pci_remove_resource_files(pdev); - if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) { - if (pdev->rom_attr) { - sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); - kfree(pdev->rom_attr); - } + if (pci_resource_len(pdev, PCI_ROM_RESOURCE)) + rom_size = pci_resource_len(pdev, PCI_ROM_RESOURCE); + else if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW) + rom_size = 0x20000; + + if (rom_size && pdev->rom_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, pdev->rom_attr); + kfree(pdev->rom_attr); } } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 44a46c92b72..21f2ac639ca 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -18,6 +18,7 @@ #include <linux/log2.h> #include <linux/pci-aspm.h> #include <linux/pm_wakeup.h> +#include <linux/interrupt.h> #include <asm/dma.h> /* isa_dma_bridge_buggy */ #include "pci.h" @@ -213,10 +214,13 @@ int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) int pci_find_ext_capability(struct pci_dev *dev, int cap) { u32 header; - int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */ - int pos = 0x100; + int ttl; + int pos = PCI_CFG_SPACE_SIZE; - if (dev->cfg_size <= 256) + /* minimum 8 bytes per capability */ + ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; + + if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) return 0; if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) @@ -234,7 +238,7 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap) return pos; pos = PCI_EXT_CAP_NEXT(header); - if (pos < 0x100) + if (pos < PCI_CFG_SPACE_SIZE) break; if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) @@ -572,6 +576,10 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state) if (!ret) pci_update_current_state(dev); } + /* This device is quirked not to be put into D3, so + don't put it in D3 */ + if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) + return 0; error = pci_raw_set_power_state(dev, state); @@ -1040,7 +1048,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) * @dev: PCI device to handle. * @state: PCI state from which device will issue PME#. */ -static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) +bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) { if (!dev->pm_cap) return false; @@ -1056,7 +1064,7 @@ static bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) * The caller must verify that the device is capable of generating PME# before * calling this function with @enable equal to 'true'. */ -static void pci_pme_active(struct pci_dev *dev, bool enable) +void pci_pme_active(struct pci_dev *dev, bool enable) { u16 pmcsr; @@ -1123,18 +1131,37 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable) } /** - * pci_prepare_to_sleep - prepare PCI device for system-wide transition into - * a sleep state - * @dev: Device to handle. + * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold + * @dev: PCI device to prepare + * @enable: True to enable wake-up event generation; false to disable * - * Choose the power state appropriate for the device depending on whether - * it can wake up the system and/or is power manageable by the platform - * (PCI_D3hot is the default) and put the device into that state. + * Many drivers want the device to wake up the system from D3_hot or D3_cold + * and this function allows them to set that up cleanly - pci_enable_wake() + * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI + * ordering constraints. + * + * This function only returns error code if the device is not capable of + * generating PME# from both D3_hot and D3_cold, and the platform is unable to + * enable wake-up power for it. */ -int pci_prepare_to_sleep(struct pci_dev *dev) +int pci_wake_from_d3(struct pci_dev *dev, bool enable) +{ + return pci_pme_capable(dev, PCI_D3cold) ? + pci_enable_wake(dev, PCI_D3cold, enable) : + pci_enable_wake(dev, PCI_D3hot, enable); +} + +/** + * pci_target_state - find an appropriate low power state for a given PCI dev + * @dev: PCI device + * + * Use underlying platform code to find a supported low power state for @dev. + * If the platform can't manage @dev, return the deepest state from which it + * can generate wake events, based on any available PME info. + */ +pci_power_t pci_target_state(struct pci_dev *dev) { pci_power_t target_state = PCI_D3hot; - int error; if (platform_pci_power_manageable(dev)) { /* @@ -1161,7 +1188,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev) * to generate PME#. */ if (!dev->pm_cap) - return -EIO; + return PCI_POWER_ERROR; if (dev->pme_support) { while (target_state @@ -1170,6 +1197,25 @@ int pci_prepare_to_sleep(struct pci_dev *dev) } } + return target_state; +} + +/** + * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state + * @dev: Device to handle. + * + * Choose the power state appropriate for the device depending on whether + * it can wake up the system and/or is power manageable by the platform + * (PCI_D3hot is the default) and put the device into that state. + */ +int pci_prepare_to_sleep(struct pci_dev *dev) +{ + pci_power_t target_state = pci_target_state(dev); + int error; + + if (target_state == PCI_POWER_ERROR) + return -EIO; + pci_enable_wake(dev, target_state, true); error = pci_set_power_state(dev, target_state); @@ -1181,8 +1227,7 @@ int pci_prepare_to_sleep(struct pci_dev *dev) } /** - * pci_back_from_sleep - turn PCI device on during system-wide transition into - * the working state a sleep state + * pci_back_from_sleep - turn PCI device on during system-wide transition into working state * @dev: Device to handle. * * Disable device's sytem wake-up capability and put it into D0. @@ -1222,25 +1267,25 @@ void pci_pm_init(struct pci_dev *dev) dev->d1_support = false; dev->d2_support = false; if (!pci_no_d1d2(dev)) { - if (pmc & PCI_PM_CAP_D1) { - dev_printk(KERN_DEBUG, &dev->dev, "supports D1\n"); + if (pmc & PCI_PM_CAP_D1) dev->d1_support = true; - } - if (pmc & PCI_PM_CAP_D2) { - dev_printk(KERN_DEBUG, &dev->dev, "supports D2\n"); + if (pmc & PCI_PM_CAP_D2) dev->d2_support = true; - } + + if (dev->d1_support || dev->d2_support) + dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", + dev->d1_support ? " D1" : "", + dev->d2_support ? " D2" : ""); } pmc &= PCI_PM_CAP_PME_MASK; if (pmc) { - dev_printk(KERN_INFO, &dev->dev, - "PME# supported from%s%s%s%s%s\n", - (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", - (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", - (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", - (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", - (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); + dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n", + (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", + (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", + (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", + (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", + (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; /* * Make device's PM flags reflect the wake-up capability, but @@ -1255,6 +1300,43 @@ void pci_pm_init(struct pci_dev *dev) } } +/** + * pci_enable_ari - enable ARI forwarding if hardware support it + * @dev: the PCI device + */ +void pci_enable_ari(struct pci_dev *dev) +{ + int pos; + u32 cap; + u16 ctrl; + struct pci_dev *bridge; + + if (!dev->is_pcie || dev->devfn) + return; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); + if (!pos) + return; + + bridge = dev->bus->self; + if (!bridge || !bridge->is_pcie) + return; + + pos = pci_find_capability(bridge, PCI_CAP_ID_EXP); + if (!pos) + return; + + pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); + if (!(cap & PCI_EXP_DEVCAP2_ARI)) + return; + + pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); + ctrl |= PCI_EXP_DEVCTL2_ARI; + pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); + + bridge->ari_enabled = 1; +} + int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) { @@ -1338,11 +1420,10 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) return 0; err_out: - dev_warn(&pdev->dev, "BAR %d: can't reserve %s region [%#llx-%#llx]\n", + dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n", bar, pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem", - (unsigned long long)pci_resource_start(pdev, bar), - (unsigned long long)pci_resource_end(pdev, bar)); + &pdev->resource[bar]); return -EBUSY; } @@ -1671,6 +1752,103 @@ EXPORT_SYMBOL(pci_set_dma_seg_boundary); #endif /** + * pci_execute_reset_function() - Reset a PCI device function + * @dev: Device function to reset + * + * Some devices allow an individual function to be reset without affecting + * other functions in the same device. The PCI device must be responsive + * to PCI config space in order to use this function. + * + * The device function is presumed to be unused when this function is called. + * Resetting the device will make the contents of PCI configuration space + * random, so any caller of this must be prepared to reinitialise the + * device including MSI, bus mastering, BARs, decoding IO and memory spaces, + * etc. + * + * Returns 0 if the device function was successfully reset or -ENOTTY if the + * device doesn't support resetting a single function. + */ +int pci_execute_reset_function(struct pci_dev *dev) +{ + u16 status; + u32 cap; + int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); + + if (!exppos) + return -ENOTTY; + pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); + if (!(cap & PCI_EXP_DEVCAP_FLR)) + return -ENOTTY; + + pci_block_user_cfg_access(dev); + + /* Wait for Transaction Pending bit clean */ + msleep(100); + pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); + if (status & PCI_EXP_DEVSTA_TRPND) { + dev_info(&dev->dev, "Busy after 100ms while trying to reset; " + "sleeping for 1 second\n"); + ssleep(1); + pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status); + if (status & PCI_EXP_DEVSTA_TRPND) + dev_info(&dev->dev, "Still busy after 1s; " + "proceeding with reset anyway\n"); + } + + pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_BCR_FLR); + mdelay(100); + + pci_unblock_user_cfg_access(dev); + return 0; +} +EXPORT_SYMBOL_GPL(pci_execute_reset_function); + +/** + * pci_reset_function() - quiesce and reset a PCI device function + * @dev: Device function to reset + * + * Some devices allow an individual function to be reset without affecting + * other functions in the same device. The PCI device must be responsive + * to PCI config space in order to use this function. + * + * This function does not just reset the PCI portion of a device, but + * clears all the state associated with the device. This function differs + * from pci_execute_reset_function in that it saves and restores device state + * over the reset. + * + * Returns 0 if the device function was successfully reset or -ENOTTY if the + * device doesn't support resetting a single function. + */ +int pci_reset_function(struct pci_dev *dev) +{ + u32 cap; + int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP); + int r; + + if (!exppos) + return -ENOTTY; + pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap); + if (!(cap & PCI_EXP_DEVCAP_FLR)) + return -ENOTTY; + + if (!dev->msi_enabled && !dev->msix_enabled) + disable_irq(dev->irq); + pci_save_state(dev); + + pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); + + r = pci_execute_reset_function(dev); + + pci_restore_state(dev); + if (!dev->msi_enabled && !dev->msix_enabled) + enable_irq(dev->irq); + + return r; +} +EXPORT_SYMBOL_GPL(pci_reset_function); + +/** * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count * @dev: PCI device to query * @@ -1858,6 +2036,9 @@ static int __devinit pci_init(void) while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { pci_fixup_device(pci_fixup_final, dev); } + + msi_init(); + return 0; } @@ -1920,7 +2101,11 @@ EXPORT_SYMBOL(pci_select_bars); EXPORT_SYMBOL(pci_set_power_state); EXPORT_SYMBOL(pci_save_state); EXPORT_SYMBOL(pci_restore_state); +EXPORT_SYMBOL(pci_pme_capable); +EXPORT_SYMBOL(pci_pme_active); EXPORT_SYMBOL(pci_enable_wake); +EXPORT_SYMBOL(pci_wake_from_d3); +EXPORT_SYMBOL(pci_target_state); EXPORT_SYMBOL(pci_prepare_to_sleep); EXPORT_SYMBOL(pci_back_from_sleep); EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index d807cd786f2..9de87e9f98f 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -1,3 +1,9 @@ +#ifndef DRIVERS_PCI_H +#define DRIVERS_PCI_H + +#define PCI_CFG_SPACE_SIZE 256 +#define PCI_CFG_SPACE_EXP_SIZE 4096 + /* Functions internal to the PCI core code */ extern int pci_uevent(struct device *dev, struct kobj_uevent_env *env); @@ -76,7 +82,13 @@ static inline int pci_proc_detach_bus(struct pci_bus *bus) { return 0; } /* Functions for PCI Hotplug drivers to use */ extern unsigned int pci_do_scan_bus(struct pci_bus *bus); +#ifdef HAVE_PCI_LEGACY +extern void pci_create_legacy_files(struct pci_bus *bus); extern void pci_remove_legacy_files(struct pci_bus *bus); +#else +static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } +static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; } +#endif /* Lock for read/write access to pci device and bus lists */ extern struct rw_semaphore pci_bus_sem; @@ -86,9 +98,11 @@ extern unsigned int pci_pm_d3_delay; #ifdef CONFIG_PCI_MSI void pci_no_msi(void); extern void pci_msi_init_pci_dev(struct pci_dev *dev); +extern void __devinit msi_init(void); #else static inline void pci_no_msi(void) { } static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } +static inline void msi_init(void) { } #endif #ifdef CONFIG_PCIEAER @@ -109,6 +123,7 @@ static inline int pci_no_d1d2(struct pci_dev *dev) extern int pcie_mch_quirk; extern struct device_attribute pci_dev_attrs[]; extern struct device_attribute dev_attr_cpuaffinity; +extern struct device_attribute dev_attr_cpulistaffinity; /** * pci_match_one_device - Tell if a PCI device structure has a matching @@ -144,3 +159,16 @@ struct pci_slot_attribute { }; #define to_pci_slot_attr(s) container_of(s, struct pci_slot_attribute, attr) +extern void pci_enable_ari(struct pci_dev *dev); +/** + * pci_ari_enabled - query ARI forwarding status + * @dev: the PCI device + * + * Returns 1 if ARI forwarding is enabled, or 0 if not enabled; + */ +static inline int pci_ari_enabled(struct pci_dev *dev) +{ + return dev->ari_enabled; +} + +#endif /* DRIVERS_PCI_H */ diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c index 77036f46acf..e390707661d 100644 --- a/drivers/pci/pcie/aer/aerdrv.c +++ b/drivers/pci/pcie/aer/aerdrv.c @@ -105,7 +105,7 @@ static irqreturn_t aer_irq(int irq, void *context) unsigned long flags; int pos; - pos = pci_find_aer_capability(pdev->port); + pos = pci_find_ext_capability(pdev->port, PCI_EXT_CAP_ID_ERR); /* * Must lock access to Root Error Status Reg, Root Error ID Reg, * and Root error producer/consumer index @@ -252,7 +252,7 @@ static pci_ers_result_t aer_root_reset(struct pci_dev *dev) u32 status; int pos; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); /* Disable Root's interrupt in response to error messages */ pci_write_config_dword(dev, pos + PCI_ERR_ROOT_COMMAND, 0); @@ -316,7 +316,7 @@ static void aer_error_resume(struct pci_dev *dev) pci_write_config_word(dev, pos + PCI_EXP_DEVSTA, reg16); /* Clean AER Root Error Status */ - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status); pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask); if (dev->error_state == pci_channel_io_normal) diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index 30f581b8791..6dd7b13e980 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -36,12 +36,7 @@ int aer_osc_setup(struct pcie_device *pciedev) if (acpi_pci_disabled) return -1; - /* Find root host bridge */ - while (pdev->bus->self) - pdev = pdev->bus->self; - handle = acpi_get_pci_rootbridge_handle( - pci_domain_nr(pdev->bus), pdev->bus->number); - + handle = acpi_find_root_bridge_handle(pdev); if (handle) { pcie_osc_support_set(OSC_EXT_PCI_CONFIG_SUPPORT); status = pci_osc_control_set(handle, diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c index ee5e7b5176d..dfc63d01f20 100644 --- a/drivers/pci/pcie/aer/aerdrv_core.c +++ b/drivers/pci/pcie/aer/aerdrv_core.c @@ -28,41 +28,15 @@ static int forceload; module_param(forceload, bool, 0); -#define PCI_CFG_SPACE_SIZE (0x100) -int pci_find_aer_capability(struct pci_dev *dev) -{ - int pos; - u32 reg32 = 0; - - /* Check if it's a pci-express device */ - pos = pci_find_capability(dev, PCI_CAP_ID_EXP); - if (!pos) - return 0; - - /* Check if it supports pci-express AER */ - pos = PCI_CFG_SPACE_SIZE; - while (pos) { - if (pci_read_config_dword(dev, pos, ®32)) - return 0; - - /* some broken boards return ~0 */ - if (reg32 == 0xffffffff) - return 0; - - if (PCI_EXT_CAP_ID(reg32) == PCI_EXT_CAP_ID_ERR) - break; - - pos = reg32 >> 20; - } - - return pos; -} - int pci_enable_pcie_error_reporting(struct pci_dev *dev) { u16 reg16 = 0; int pos; + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); + if (!pos) + return -EIO; + pos = pci_find_capability(dev, PCI_CAP_ID_EXP); if (!pos) return -EIO; @@ -102,7 +76,7 @@ int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev) int pos; u32 status, mask; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; @@ -123,7 +97,7 @@ int pci_cleanup_aer_correct_error_status(struct pci_dev *dev) int pos; u32 status; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (!pos) return -EIO; @@ -502,7 +476,7 @@ static void handle_error_source(struct pcie_device * aerdev, * Correctable error does not need software intevention. * No need to go through error recovery process. */ - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); if (pos) pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, info.status); @@ -542,7 +516,7 @@ void aer_enable_rootport(struct aer_rpc *rpc) reg16 &= ~(SYSTEM_ERROR_INTR_ON_MESG_MASK); pci_write_config_word(pdev, pos + PCI_EXP_RTCTL, reg16); - aer_pos = pci_find_aer_capability(pdev); + aer_pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Clear error status */ pci_read_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, ®32); pci_write_config_dword(pdev, aer_pos + PCI_ERR_ROOT_STATUS, reg32); @@ -579,7 +553,7 @@ static void disable_root_aer(struct aer_rpc *rpc) u32 reg32; int pos; - pos = pci_find_aer_capability(pdev); + pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR); /* Disable Root's interrupt in response to error messages */ pci_write_config_dword(pdev, pos + PCI_ERR_ROOT_COMMAND, 0); @@ -618,7 +592,7 @@ static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info) { int pos; - pos = pci_find_aer_capability(dev); + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); /* The device might not support AER */ if (!pos) @@ -755,7 +729,6 @@ int aer_init(struct pcie_device *dev) return AER_SUCCESS; } -EXPORT_SYMBOL_GPL(pci_find_aer_capability); EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting); EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting); EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index f82495583e6..8f63f4c6b85 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -55,7 +55,7 @@ struct pcie_link_state { struct endpoint_state endpoints[8]; }; -static int aspm_disabled; +static int aspm_disabled, aspm_force; static DEFINE_MUTEX(aspm_lock); static LIST_HEAD(link_list); @@ -510,6 +510,7 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) { struct pci_dev *child_dev; int child_pos; + u32 reg32; /* * Some functions in a slot might not all be PCIE functions, very @@ -519,6 +520,19 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev) child_pos = pci_find_capability(child_dev, PCI_CAP_ID_EXP); if (!child_pos) return -EINVAL; + + /* + * Disable ASPM for pre-1.1 PCIe device, we follow MS to use + * RBER bit to determine if a function is 1.1 version device + */ + pci_read_config_dword(child_dev, child_pos + PCI_EXP_DEVCAP, + ®32); + if (!(reg32 & PCI_EXP_DEVCAP_RBER) && !aspm_force) { + dev_printk(KERN_INFO, &child_dev->dev, "disabling ASPM" + " on pre-1.1 PCIe device. You can enable it" + " with 'pcie_aspm=force'\n"); + return -EINVAL; + } } return 0; } @@ -802,11 +816,23 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) static int __init pcie_aspm_disable(char *str) { - aspm_disabled = 1; + if (!strcmp(str, "off")) { + aspm_disabled = 1; + printk(KERN_INFO "PCIe ASPM is disabled\n"); + } else if (!strcmp(str, "force")) { + aspm_force = 1; + printk(KERN_INFO "PCIe ASPM is forcedly enabled\n"); + } return 1; } -__setup("pcie_noaspm", pcie_aspm_disable); +__setup("pcie_aspm=", pcie_aspm_disable); + +void pcie_no_aspm(void) +{ + if (!aspm_force) + aspm_disabled = 1; +} #ifdef CONFIG_ACPI #include <acpi/acpi_bus.h> diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h index 3656e0349dd..2529f3f2ea5 100644 --- a/drivers/pci/pcie/portdrv.h +++ b/drivers/pci/pcie/portdrv.h @@ -25,7 +25,6 @@ #define PCIE_CAPABILITIES_REG 0x2 #define PCIE_SLOT_CAPABILITIES_REG 0x14 #define PCIE_PORT_DEVICE_MAXSERVICES 4 -#define PCI_CFG_SPACE_SIZE 256 #define get_descriptor_id(type, service) (((type - 4) << 4) | service) diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 890f0d2b370..2e091e01482 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -195,24 +195,11 @@ static int get_port_device_capability(struct pci_dev *dev) /* PME Capable - root port capability */ if (((reg16 >> 4) & PORT_TYPE_MASK) == PCIE_RC_PORT) services |= PCIE_PORT_SERVICE_PME; - - pos = PCI_CFG_SPACE_SIZE; - while (pos) { - pci_read_config_dword(dev, pos, ®32); - switch (reg32 & 0xffff) { - case PCI_EXT_CAP_ID_ERR: - services |= PCIE_PORT_SERVICE_AER; - pos = reg32 >> 20; - break; - case PCI_EXT_CAP_ID_VC: - services |= PCIE_PORT_SERVICE_VC; - pos = reg32 >> 20; - break; - default: - pos = 0; - break; - } - } + + if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) + services |= PCIE_PORT_SERVICE_AER; + if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) + services |= PCIE_PORT_SERVICE_VC; return services; } diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index 367c9c20000..584422da8d8 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -91,7 +91,7 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev, pci_set_master(dev); if (!dev->irq && dev->pin) { - dev_warn(&dev->dev, "device [%04x/%04x] has invalid IRQ; " + dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; " "check vendor BIOS\n", dev->vendor, dev->device); } if (pcie_port_device_register(dev)) { diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index b1724cf31b6..003a9b3c293 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -14,8 +14,6 @@ #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ #define CARDBUS_RESERVE_BUSNR 3 -#define PCI_CFG_SPACE_SIZE 256 -#define PCI_CFG_SPACE_EXP_SIZE 4096 /* Ugh. Need to stop exporting this to modules. */ LIST_HEAD(pci_root_buses); @@ -44,50 +42,6 @@ int no_pci_devices(void) } EXPORT_SYMBOL(no_pci_devices); -#ifdef HAVE_PCI_LEGACY -/** - * pci_create_legacy_files - create legacy I/O port and memory files - * @b: bus to create files under - * - * Some platforms allow access to legacy I/O port and ISA memory space on - * a per-bus basis. This routine creates the files and ties them into - * their associated read, write and mmap files from pci-sysfs.c - */ -static void pci_create_legacy_files(struct pci_bus *b) -{ - b->legacy_io = kzalloc(sizeof(struct bin_attribute) * 2, - GFP_ATOMIC); - if (b->legacy_io) { - b->legacy_io->attr.name = "legacy_io"; - b->legacy_io->size = 0xffff; - b->legacy_io->attr.mode = S_IRUSR | S_IWUSR; - b->legacy_io->read = pci_read_legacy_io; - b->legacy_io->write = pci_write_legacy_io; - device_create_bin_file(&b->dev, b->legacy_io); - - /* Allocated above after the legacy_io struct */ - b->legacy_mem = b->legacy_io + 1; - b->legacy_mem->attr.name = "legacy_mem"; - b->legacy_mem->size = 1024*1024; - b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR; - b->legacy_mem->mmap = pci_mmap_legacy_mem; - device_create_bin_file(&b->dev, b->legacy_mem); - } -} - -void pci_remove_legacy_files(struct pci_bus *b) -{ - if (b->legacy_io) { - device_remove_bin_file(&b->dev, b->legacy_io); - device_remove_bin_file(&b->dev, b->legacy_mem); - kfree(b->legacy_io); /* both are allocated here */ - } -} -#else /* !HAVE_PCI_LEGACY */ -static inline void pci_create_legacy_files(struct pci_bus *bus) { return; } -void pci_remove_legacy_files(struct pci_bus *bus) { return; } -#endif /* HAVE_PCI_LEGACY */ - /* * PCI Bus Class Devices */ @@ -163,12 +117,9 @@ static inline unsigned int pci_calc_resource_flags(unsigned int flags) return IORESOURCE_MEM; } -/* - * Find the extent of a PCI decode.. - */ -static u32 pci_size(u32 base, u32 maxbase, u32 mask) +static u64 pci_size(u64 base, u64 maxbase, u64 mask) { - u32 size = mask & maxbase; /* Find the significant bits */ + u64 size = mask & maxbase; /* Find the significant bits */ if (!size) return 0; @@ -184,135 +135,148 @@ static u32 pci_size(u32 base, u32 maxbase, u32 mask) return size; } -static u64 pci_size64(u64 base, u64 maxbase, u64 mask) -{ - u64 size = mask & maxbase; /* Find the significant bits */ - if (!size) - return 0; +enum pci_bar_type { + pci_bar_unknown, /* Standard PCI BAR probe */ + pci_bar_io, /* An io port BAR */ + pci_bar_mem32, /* A 32-bit memory BAR */ + pci_bar_mem64, /* A 64-bit memory BAR */ +}; - /* Get the lowest of them to find the decode size, and - from that the extent. */ - size = (size & ~(size-1)) - 1; +static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar) +{ + if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) { + res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK; + return pci_bar_io; + } - /* base == maxbase can be valid only if the BAR has - already been programmed with all 1s. */ - if (base == maxbase && ((base | size) & mask) != mask) - return 0; + res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; - return size; + if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) + return pci_bar_mem64; + return pci_bar_mem32; } -static inline int is_64bit_memory(u32 mask) +/* + * If the type is not unknown, we assume that the lowest bit is 'enable'. + * Returns 1 if the BAR was 64-bit and 0 if it was 32-bit. + */ +static int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, + struct resource *res, unsigned int pos) { - if ((mask & (PCI_BASE_ADDRESS_SPACE|PCI_BASE_ADDRESS_MEM_TYPE_MASK)) == - (PCI_BASE_ADDRESS_SPACE_MEMORY|PCI_BASE_ADDRESS_MEM_TYPE_64)) - return 1; - return 0; -} + u32 l, sz, mask; -static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) -{ - unsigned int pos, reg, next; - u32 l, sz; - struct resource *res; + mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0; - for(pos=0; pos<howmany; pos = next) { - u64 l64; - u64 sz64; - u32 raw_sz; + res->name = pci_name(dev); - next = pos+1; - res = &dev->resource[pos]; - res->name = pci_name(dev); - reg = PCI_BASE_ADDRESS_0 + (pos << 2); - pci_read_config_dword(dev, reg, &l); - pci_write_config_dword(dev, reg, ~0); - pci_read_config_dword(dev, reg, &sz); - pci_write_config_dword(dev, reg, l); - if (!sz || sz == 0xffffffff) - continue; - if (l == 0xffffffff) - l = 0; - raw_sz = sz; - if ((l & PCI_BASE_ADDRESS_SPACE) == - PCI_BASE_ADDRESS_SPACE_MEMORY) { - sz = pci_size(l, sz, (u32)PCI_BASE_ADDRESS_MEM_MASK); - /* - * For 64bit prefetchable memory sz could be 0, if the - * real size is bigger than 4G, so we need to check - * szhi for that. - */ - if (!is_64bit_memory(l) && !sz) - continue; - res->start = l & PCI_BASE_ADDRESS_MEM_MASK; - res->flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; + pci_read_config_dword(dev, pos, &l); + pci_write_config_dword(dev, pos, mask); + pci_read_config_dword(dev, pos, &sz); + pci_write_config_dword(dev, pos, l); + + /* + * All bits set in sz means the device isn't working properly. + * If the BAR isn't implemented, all bits must be 0. If it's a + * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit + * 1 must be clear. + */ + if (!sz || sz == 0xffffffff) + goto fail; + + /* + * I don't know how l can have all bits set. Copied from old code. + * Maybe it fixes a bug on some ancient platform. + */ + if (l == 0xffffffff) + l = 0; + + if (type == pci_bar_unknown) { + type = decode_bar(res, l); + res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; + if (type == pci_bar_io) { + l &= PCI_BASE_ADDRESS_IO_MASK; + mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff; } else { - sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); - if (!sz) - continue; - res->start = l & PCI_BASE_ADDRESS_IO_MASK; - res->flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; + l &= PCI_BASE_ADDRESS_MEM_MASK; + mask = (u32)PCI_BASE_ADDRESS_MEM_MASK; } - res->end = res->start + (unsigned long) sz; - res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN; - if (is_64bit_memory(l)) { - u32 szhi, lhi; - - pci_read_config_dword(dev, reg+4, &lhi); - pci_write_config_dword(dev, reg+4, ~0); - pci_read_config_dword(dev, reg+4, &szhi); - pci_write_config_dword(dev, reg+4, lhi); - sz64 = ((u64)szhi << 32) | raw_sz; - l64 = ((u64)lhi << 32) | l; - sz64 = pci_size64(l64, sz64, PCI_BASE_ADDRESS_MEM_MASK); - next++; -#if BITS_PER_LONG == 64 - if (!sz64) { - res->start = 0; - res->end = 0; - res->flags = 0; - continue; - } - res->start = l64 & PCI_BASE_ADDRESS_MEM_MASK; - res->end = res->start + sz64; -#else - if (sz64 > 0x100000000ULL) { - dev_err(&dev->dev, "BAR %d: can't handle 64-bit" - " BAR\n", pos); - res->start = 0; - res->flags = 0; - } else if (lhi) { - /* 64-bit wide address, treat as disabled */ - pci_write_config_dword(dev, reg, - l & ~(u32)PCI_BASE_ADDRESS_MEM_MASK); - pci_write_config_dword(dev, reg+4, 0); - res->start = 0; - res->end = sz; - } -#endif + } else { + res->flags |= (l & IORESOURCE_ROM_ENABLE); + l &= PCI_ROM_ADDRESS_MASK; + mask = (u32)PCI_ROM_ADDRESS_MASK; + } + + if (type == pci_bar_mem64) { + u64 l64 = l; + u64 sz64 = sz; + u64 mask64 = mask | (u64)~0 << 32; + + pci_read_config_dword(dev, pos + 4, &l); + pci_write_config_dword(dev, pos + 4, ~0); + pci_read_config_dword(dev, pos + 4, &sz); + pci_write_config_dword(dev, pos + 4, l); + + l64 |= ((u64)l << 32); + sz64 |= ((u64)sz << 32); + + sz64 = pci_size(l64, sz64, mask64); + + if (!sz64) + goto fail; + + if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { + dev_err(&dev->dev, "can't handle 64-bit BAR\n"); + goto fail; + } else if ((sizeof(resource_size_t) < 8) && l) { + /* Address above 32-bit boundary; disable the BAR */ + pci_write_config_dword(dev, pos, 0); + pci_write_config_dword(dev, pos + 4, 0); + res->start = 0; + res->end = sz64; + } else { + res->start = l64; + res->end = l64 + sz64; + dev_printk(KERN_DEBUG, &dev->dev, + "reg %x 64bit mmio: %pR\n", pos, res); } + } else { + sz = pci_size(l, sz, mask); + + if (!sz) + goto fail; + + res->start = l; + res->end = l + sz; + + dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos, + (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio", + res); } + + out: + return (type == pci_bar_mem64) ? 1 : 0; + fail: + res->flags = 0; + goto out; +} + +static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) +{ + unsigned int pos, reg; + + for (pos = 0; pos < howmany; pos++) { + struct resource *res = &dev->resource[pos]; + reg = PCI_BASE_ADDRESS_0 + (pos << 2); + pos += __pci_read_base(dev, pci_bar_unknown, res, reg); + } + if (rom) { + struct resource *res = &dev->resource[PCI_ROM_RESOURCE]; dev->rom_base_reg = rom; - res = &dev->resource[PCI_ROM_RESOURCE]; - res->name = pci_name(dev); - pci_read_config_dword(dev, rom, &l); - pci_write_config_dword(dev, rom, ~PCI_ROM_ADDRESS_ENABLE); - pci_read_config_dword(dev, rom, &sz); - pci_write_config_dword(dev, rom, l); - if (l == 0xffffffff) - l = 0; - if (sz && sz != 0xffffffff) { - sz = pci_size(l, sz, (u32)PCI_ROM_ADDRESS_MASK); - if (sz) { - res->flags = (l & IORESOURCE_ROM_ENABLE) | - IORESOURCE_MEM | IORESOURCE_PREFETCH | - IORESOURCE_READONLY | IORESOURCE_CACHEABLE | - IORESOURCE_SIZEALIGN; - res->start = l & PCI_ROM_ADDRESS_MASK; - res->end = res->start + (unsigned long) sz; - } - } + res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | + IORESOURCE_READONLY | IORESOURCE_CACHEABLE | + IORESOURCE_SIZEALIGN; + __pci_read_base(dev, pci_bar_mem32, res, rom); } } @@ -334,9 +298,6 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) child->resource[i] = child->parent->resource[i - 3]; } - for(i=0; i<3; i++) - child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i]; - res = child->resource[0]; pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); @@ -357,6 +318,7 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) res->start = base; if (!res->end) res->end = limit + 0xfff; + dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res); } res = child->resource[1]; @@ -368,6 +330,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM; res->start = base; res->end = limit + 0xfffff; + dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n", + res); } res = child->resource[2]; @@ -403,6 +367,9 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child) res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH; res->start = base; res->end = limit + 0xfffff; + dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n", + (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32", + res); } } @@ -510,19 +477,27 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); u32 buses, i, j = 0; u16 bctl; + int broken = 0; pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n", buses & 0xffffff, pass); + /* Check if setup is sensible at all */ + if (!pass && + ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) { + dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n"); + broken = 1; + } + /* Disable MasterAbortMode during probing to avoid reporting of bus errors (in some architectures) */ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl); pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT); - if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus) { + if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) { unsigned int cmax, busnr; /* * Bus already configured by firmware, process it in the first @@ -560,7 +535,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, * do in the second pass. */ if (!pass) { - if (pcibios_assign_all_busses()) + if (pcibios_assign_all_busses() || broken) /* Temporarily disable forwarding of the configuration cycles on all bridges in this bus segment to avoid possible @@ -723,7 +698,7 @@ static int pci_setup_device(struct pci_dev * dev) dev->class = class; class >>= 8; - dev_dbg(&dev->dev, "found [%04x/%04x] class %06x header type %02x\n", + dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n", dev->vendor, dev->device, class, dev->hdr_type); /* "Unknown power state" */ @@ -805,6 +780,11 @@ static int pci_setup_device(struct pci_dev * dev) return 0; } +static void pci_release_capabilities(struct pci_dev *dev) +{ + pci_vpd_release(dev); +} + /** * pci_release_dev - free a pci device structure when all users of it are finished. * @dev: device that's been disconnected @@ -817,7 +797,7 @@ static void pci_release_dev(struct device *dev) struct pci_dev *pci_dev; pci_dev = to_pci_dev(dev); - pci_vpd_release(pci_dev); + pci_release_capabilities(pci_dev); kfree(pci_dev); } @@ -848,8 +828,9 @@ static void set_pcie_port_type(struct pci_dev *pdev) int pci_cfg_space_size_ext(struct pci_dev *dev) { u32 status; + int pos = PCI_CFG_SPACE_SIZE; - if (pci_read_config_dword(dev, 256, &status) != PCIBIOS_SUCCESSFUL) + if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL) goto fail; if (status == 0xffffffff) goto fail; @@ -897,8 +878,6 @@ struct pci_dev *alloc_pci_dev(void) INIT_LIST_HEAD(&dev->bus_list); - pci_msi_init_pci_dev(dev); - return dev; } EXPORT_SYMBOL(alloc_pci_dev); @@ -910,6 +889,7 @@ EXPORT_SYMBOL(alloc_pci_dev); static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) { struct pci_dev *dev; + struct pci_slot *slot; u32 l; u8 hdr_type; int delay = 1; @@ -958,6 +938,10 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) dev->error_state = pci_channel_io_normal; set_pcie_port_type(dev); + list_for_each_entry(slot, &bus->slots, list) + if (PCI_SLOT(devfn) == slot->number) + dev->slot = slot; + /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer) set this higher, assuming the system even supports it. */ dev->dma_mask = 0xffffffff; @@ -966,9 +950,22 @@ static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn) return NULL; } + return dev; +} + +static void pci_init_capabilities(struct pci_dev *dev) +{ + /* MSI/MSI-X list */ + pci_msi_init_pci_dev(dev); + + /* Power Management */ + pci_pm_init(dev); + + /* Vital Product Data */ pci_vpd_pci22_init(dev); - return dev; + /* Alternative Routing-ID Forwarding */ + pci_enable_ari(dev); } void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) @@ -987,8 +984,8 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) /* Fix up broken headers */ pci_fixup_device(pci_fixup_header, dev); - /* Initialize power management of the device */ - pci_pm_init(dev); + /* Initialize various capabilities */ + pci_init_capabilities(dev); /* * Add the device to our list of discovered devices @@ -1053,7 +1050,8 @@ int pci_scan_slot(struct pci_bus *bus, int devfn) } } - if (bus->self) + /* only one slot has pcie device */ + if (bus->self && nr) pcie_aspm_init_link_state(bus->self); return nr; @@ -1195,8 +1193,11 @@ EXPORT_SYMBOL(pci_scan_bridge); EXPORT_SYMBOL_GPL(pci_scan_child_bus); #endif -static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev *b) +static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b) { + const struct pci_dev *a = to_pci_dev(d_a); + const struct pci_dev *b = to_pci_dev(d_b); + if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1; else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1; @@ -1209,50 +1210,7 @@ static int __init pci_sort_bf_cmp(const struct pci_dev *a, const struct pci_dev return 0; } -/* - * Yes, this forcably breaks the klist abstraction temporarily. It - * just wants to sort the klist, not change reference counts and - * take/drop locks rapidly in the process. It does all this while - * holding the lock for the list, so objects can't otherwise be - * added/removed while we're swizzling. - */ -static void __init pci_insertion_sort_klist(struct pci_dev *a, struct list_head *list) -{ - struct list_head *pos; - struct klist_node *n; - struct device *dev; - struct pci_dev *b; - - list_for_each(pos, list) { - n = container_of(pos, struct klist_node, n_node); - dev = container_of(n, struct device, knode_bus); - b = to_pci_dev(dev); - if (pci_sort_bf_cmp(a, b) <= 0) { - list_move_tail(&a->dev.knode_bus.n_node, &b->dev.knode_bus.n_node); - return; - } - } - list_move_tail(&a->dev.knode_bus.n_node, list); -} - void __init pci_sort_breadthfirst(void) { - LIST_HEAD(sorted_devices); - struct list_head *pos, *tmp; - struct klist_node *n; - struct device *dev; - struct pci_dev *pdev; - struct klist *device_klist; - - device_klist = bus_get_device_klist(&pci_bus_type); - - spin_lock(&device_klist->k_lock); - list_for_each_safe(pos, tmp, &device_klist->k_list) { - n = container_of(pos, struct klist_node, n_node); - dev = container_of(n, struct device, knode_bus); - pdev = to_pci_dev(dev); - pci_insertion_sort_klist(pdev, &sorted_devices); - } - list_splice(&sorted_devices, &device_klist->k_list); - spin_unlock(&device_klist->k_lock); + bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp); } diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 4400dffbd93..e1098c302c4 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -88,7 +88,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp if ((pos & 3) && cnt > 2) { unsigned short val; pci_user_read_config_word(dev, pos, &val); - __put_user(cpu_to_le16(val), (unsigned short __user *) buf); + __put_user(cpu_to_le16(val), (__le16 __user *) buf); buf += 2; pos += 2; cnt -= 2; @@ -97,7 +97,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp while (cnt >= 4) { unsigned int val; pci_user_read_config_dword(dev, pos, &val); - __put_user(cpu_to_le32(val), (unsigned int __user *) buf); + __put_user(cpu_to_le32(val), (__le32 __user *) buf); buf += 4; pos += 4; cnt -= 4; @@ -106,7 +106,7 @@ proc_bus_pci_read(struct file *file, char __user *buf, size_t nbytes, loff_t *pp if (cnt >= 2) { unsigned short val; pci_user_read_config_word(dev, pos, &val); - __put_user(cpu_to_le16(val), (unsigned short __user *) buf); + __put_user(cpu_to_le16(val), (__le16 __user *) buf); buf += 2; pos += 2; cnt -= 2; @@ -156,8 +156,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof } if ((pos & 3) && cnt > 2) { - unsigned short val; - __get_user(val, (unsigned short __user *) buf); + __le16 val; + __get_user(val, (__le16 __user *) buf); pci_user_write_config_word(dev, pos, le16_to_cpu(val)); buf += 2; pos += 2; @@ -165,8 +165,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof } while (cnt >= 4) { - unsigned int val; - __get_user(val, (unsigned int __user *) buf); + __le32 val; + __get_user(val, (__le32 __user *) buf); pci_user_write_config_dword(dev, pos, le32_to_cpu(val)); buf += 4; pos += 4; @@ -174,8 +174,8 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof } if (cnt >= 2) { - unsigned short val; - __get_user(val, (unsigned short __user *) buf); + __le16 val; + __get_user(val, (__le16 __user *) buf); pci_user_write_config_word(dev, pos, le16_to_cpu(val)); buf += 2; pos += 2; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index c880dd0bbfb..0b60ed884d9 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -24,6 +24,14 @@ #include <linux/kallsyms.h> #include "pci.h" +int isa_dma_bridge_buggy; +EXPORT_SYMBOL(isa_dma_bridge_buggy); +int pci_pci_problems; +EXPORT_SYMBOL(pci_pci_problems); +int pcie_mch_quirk; +EXPORT_SYMBOL(pcie_mch_quirk); + +#ifdef CONFIG_PCI_QUIRKS /* The Mellanox Tavor device gives false positive parity errors * Mark this device with a broken_parity_status, to allow * PCI scanning code to "skip" this now blacklisted device. @@ -35,6 +43,20 @@ static void __devinit quirk_mellanox_tavor(struct pci_dev *dev) DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR,quirk_mellanox_tavor); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_MELLANOX,PCI_DEVICE_ID_MELLANOX_TAVOR_BRIDGE,quirk_mellanox_tavor); +/* Many VIA bridges seem to corrupt data for DAC. Disable it here */ +int forbid_dac __read_mostly; +EXPORT_SYMBOL(forbid_dac); + +static __devinit void via_no_dac(struct pci_dev *dev) +{ + if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { + dev_info(&dev->dev, + "VIA PCI bridge detected. Disabling DAC.\n"); + forbid_dac = 1; + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); + /* Deal with broken BIOS'es that neglect to enable passive release, which can cause problems in combination with the 82441FX/PPro MTRRs */ static void quirk_passive_release(struct pci_dev *dev) @@ -62,8 +84,6 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82441, quirk_p This appears to be BIOS not version dependent. So presumably there is a chipset level fix */ -int isa_dma_bridge_buggy; -EXPORT_SYMBOL(isa_dma_bridge_buggy); static void __devinit quirk_isa_dma_hangs(struct pci_dev *dev) { @@ -84,9 +104,6 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_1, quirk_isa_d DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_2, quirk_isa_dma_hangs); DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NEC, PCI_DEVICE_ID_NEC_CBUS_3, quirk_isa_dma_hangs); -int pci_pci_problems; -EXPORT_SYMBOL(pci_pci_problems); - /* * Chipsets where PCI->PCI transfers vanish or hang */ @@ -902,6 +919,19 @@ static void __init quirk_ide_samemode(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_10, quirk_ide_samemode); +/* + * Some ATA devices break if put into D3 + */ + +static void __devinit quirk_no_ata_d3(struct pci_dev *pdev) +{ + /* Quirk the legacy ATA devices only. The AHCI ones are ok */ + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; +} +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_ANY_ID, quirk_no_ata_d3); +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_ATI, PCI_ANY_ID, quirk_no_ata_d3); + /* This was originally an Alpha specific thing, but it really fits here. * The i82375 PCI/EISA bridge appears as non-classified. Fix that. */ @@ -1328,9 +1358,6 @@ static void __init quirk_alder_ioapic(struct pci_dev *pdev) DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_EESSC, quirk_alder_ioapic); #endif -int pcie_mch_quirk; -EXPORT_SYMBOL(pcie_mch_quirk); - static void __devinit quirk_pcie_mch(struct pci_dev *pdev) { pcie_mch_quirk = 1; @@ -1670,85 +1697,6 @@ static void __devinit fixup_rev1_53c810(struct pci_dev* dev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810); -static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) -{ - while (f < end) { - if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && - (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { -#ifdef DEBUG - dev_dbg(&dev->dev, "calling "); - print_fn_descriptor_symbol("%s\n", f->hook); -#endif - f->hook(dev); - } - f++; - } -} - -extern struct pci_fixup __start_pci_fixups_early[]; -extern struct pci_fixup __end_pci_fixups_early[]; -extern struct pci_fixup __start_pci_fixups_header[]; -extern struct pci_fixup __end_pci_fixups_header[]; -extern struct pci_fixup __start_pci_fixups_final[]; -extern struct pci_fixup __end_pci_fixups_final[]; -extern struct pci_fixup __start_pci_fixups_enable[]; -extern struct pci_fixup __end_pci_fixups_enable[]; -extern struct pci_fixup __start_pci_fixups_resume[]; -extern struct pci_fixup __end_pci_fixups_resume[]; -extern struct pci_fixup __start_pci_fixups_resume_early[]; -extern struct pci_fixup __end_pci_fixups_resume_early[]; -extern struct pci_fixup __start_pci_fixups_suspend[]; -extern struct pci_fixup __end_pci_fixups_suspend[]; - - -void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) -{ - struct pci_fixup *start, *end; - - switch(pass) { - case pci_fixup_early: - start = __start_pci_fixups_early; - end = __end_pci_fixups_early; - break; - - case pci_fixup_header: - start = __start_pci_fixups_header; - end = __end_pci_fixups_header; - break; - - case pci_fixup_final: - start = __start_pci_fixups_final; - end = __end_pci_fixups_final; - break; - - case pci_fixup_enable: - start = __start_pci_fixups_enable; - end = __end_pci_fixups_enable; - break; - - case pci_fixup_resume: - start = __start_pci_fixups_resume; - end = __end_pci_fixups_resume; - break; - - case pci_fixup_resume_early: - start = __start_pci_fixups_resume_early; - end = __end_pci_fixups_resume_early; - break; - - case pci_fixup_suspend: - start = __start_pci_fixups_suspend; - end = __end_pci_fixups_suspend; - break; - - default: - /* stupid compiler warning, you would think with an enum... */ - return; - } - pci_do_fixups(dev, start, end); -} -EXPORT_SYMBOL(pci_fixup_device); - /* Enable 1k I/O space granularity on the Intel P64H2 */ static void __devinit quirk_p64h2_1k_io(struct pci_dev *dev) { @@ -1871,9 +1819,14 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_c */ static void __devinit quirk_brcm_570x_limit_vpd(struct pci_dev *dev) { - /* Only disable the VPD capability for 5706, 5708, and 5709 rev. A */ + /* + * Only disable the VPD capability for 5706, 5706S, 5708, + * 5708S and 5709 rev. A + */ if ((dev->device == PCI_DEVICE_ID_NX2_5706) || + (dev->device == PCI_DEVICE_ID_NX2_5706S) || (dev->device == PCI_DEVICE_ID_NX2_5708) || + (dev->device == PCI_DEVICE_ID_NX2_5708S) || ((dev->device == PCI_DEVICE_ID_NX2_5709) && (dev->revision & 0xf0) == 0x0)) { if (dev->vpd) @@ -2117,3 +2070,82 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x4375, quirk_msi_intx_disable_bug); #endif /* CONFIG_PCI_MSI */ + +static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end) +{ + while (f < end) { + if ((f->vendor == dev->vendor || f->vendor == (u16) PCI_ANY_ID) && + (f->device == dev->device || f->device == (u16) PCI_ANY_ID)) { + dev_dbg(&dev->dev, "calling %pF\n", f->hook); + f->hook(dev); + } + f++; + } +} + +extern struct pci_fixup __start_pci_fixups_early[]; +extern struct pci_fixup __end_pci_fixups_early[]; +extern struct pci_fixup __start_pci_fixups_header[]; +extern struct pci_fixup __end_pci_fixups_header[]; +extern struct pci_fixup __start_pci_fixups_final[]; +extern struct pci_fixup __end_pci_fixups_final[]; +extern struct pci_fixup __start_pci_fixups_enable[]; +extern struct pci_fixup __end_pci_fixups_enable[]; +extern struct pci_fixup __start_pci_fixups_resume[]; +extern struct pci_fixup __end_pci_fixups_resume[]; +extern struct pci_fixup __start_pci_fixups_resume_early[]; +extern struct pci_fixup __end_pci_fixups_resume_early[]; +extern struct pci_fixup __start_pci_fixups_suspend[]; +extern struct pci_fixup __end_pci_fixups_suspend[]; + + +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) +{ + struct pci_fixup *start, *end; + + switch(pass) { + case pci_fixup_early: + start = __start_pci_fixups_early; + end = __end_pci_fixups_early; + break; + + case pci_fixup_header: + start = __start_pci_fixups_header; + end = __end_pci_fixups_header; + break; + + case pci_fixup_final: + start = __start_pci_fixups_final; + end = __end_pci_fixups_final; + break; + + case pci_fixup_enable: + start = __start_pci_fixups_enable; + end = __end_pci_fixups_enable; + break; + + case pci_fixup_resume: + start = __start_pci_fixups_resume; + end = __end_pci_fixups_resume; + break; + + case pci_fixup_resume_early: + start = __start_pci_fixups_resume_early; + end = __end_pci_fixups_resume_early; + break; + + case pci_fixup_suspend: + start = __start_pci_fixups_suspend; + end = __end_pci_fixups_suspend; + break; + + default: + /* stupid compiler warning, you would think with an enum... */ + return; + } + pci_do_fixups(dev, start, end); +} +#else +void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev) {} +#endif +EXPORT_SYMBOL(pci_fixup_device); diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c index bdc2a44d68e..042e0892442 100644 --- a/drivers/pci/remove.c +++ b/drivers/pci/remove.c @@ -73,6 +73,7 @@ void pci_remove_bus(struct pci_bus *pci_bus) up_write(&pci_bus_sem); pci_remove_legacy_files(pci_bus); device_remove_file(&pci_bus->dev, &dev_attr_cpuaffinity); + device_remove_file(&pci_bus->dev, &dev_attr_cpulistaffinity); device_unregister(&pci_bus->dev); } EXPORT_SYMBOL(pci_remove_bus); @@ -114,13 +115,9 @@ void pci_remove_behind_bridge(struct pci_dev *dev) { struct list_head *l, *n; - if (dev->subordinate) { - list_for_each_safe(l, n, &dev->subordinate->devices) { - struct pci_dev *dev = pci_dev_b(l); - - pci_remove_bus_device(dev); - } - } + if (dev->subordinate) + list_for_each_safe(l, n, &dev->subordinate->devices) + pci_remove_bus_device(pci_dev_b(l)); } static void pci_stop_bus_devices(struct pci_bus *bus) diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c index bd5c0e03139..1f5f6143f35 100644 --- a/drivers/pci/rom.c +++ b/drivers/pci/rom.c @@ -21,7 +21,7 @@ * between the ROM and other resources, so enabling it may disable access * to MMIO registers or other card memory. */ -static int pci_enable_rom(struct pci_dev *pdev) +int pci_enable_rom(struct pci_dev *pdev) { struct resource *res = pdev->resource + PCI_ROM_RESOURCE; struct pci_bus_region region; @@ -45,7 +45,7 @@ static int pci_enable_rom(struct pci_dev *pdev) * Disable ROM decoding on a PCI device by turning off the last bit in the * ROM BAR. */ -static void pci_disable_rom(struct pci_dev *pdev) +void pci_disable_rom(struct pci_dev *pdev) { u32 rom_addr; pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr); @@ -260,3 +260,5 @@ void pci_cleanup_rom(struct pci_dev *pdev) EXPORT_SYMBOL(pci_map_rom); EXPORT_SYMBOL(pci_unmap_rom); +EXPORT_SYMBOL_GPL(pci_enable_rom); +EXPORT_SYMBOL_GPL(pci_disable_rom); diff --git a/drivers/pci/search.c b/drivers/pci/search.c index 217814fef4e..5af8bd53814 100644 --- a/drivers/pci/search.c +++ b/drivers/pci/search.c @@ -162,10 +162,11 @@ EXPORT_SYMBOL(pci_find_slot); * time. */ struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device, - const struct pci_dev *from) + struct pci_dev *from) { struct pci_dev *pdev; + pci_dev_get(from); pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from); pci_dev_put(pdev); return pdev; @@ -263,23 +264,21 @@ static int match_pci_dev_by_id(struct device *dev, void *data) * this file. */ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, - const struct pci_dev *from) + struct pci_dev *from) { struct device *dev; struct device *dev_start = NULL; struct pci_dev *pdev = NULL; WARN_ON(in_interrupt()); - if (from) { - /* FIXME - * take the cast off, when bus_find_device is made const. - */ - dev_start = (struct device *)&from->dev; - } + if (from) + dev_start = &from->dev; dev = bus_find_device(&pci_bus_type, dev_start, (void *)id, match_pci_dev_by_id); if (dev) pdev = to_pci_dev(dev); + if (from) + pci_dev_put(from); return pdev; } @@ -301,7 +300,7 @@ static struct pci_dev *pci_get_dev_by_id(const struct pci_device_id *id, */ struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, unsigned int ss_vendor, unsigned int ss_device, - const struct pci_dev *from) + struct pci_dev *from) { struct pci_dev *pdev; struct pci_device_id *id; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 827c0a520e2..ea979f2bc6d 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -299,7 +299,7 @@ static void pbus_size_io(struct pci_bus *bus) if (r->parent || !(r->flags & IORESOURCE_IO)) continue; - r_size = r->end - r->start + 1; + r_size = resource_size(r); if (r_size < 0x400) /* Might be re-aligned for ISA */ @@ -350,15 +350,13 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long if (r->parent || (r->flags & mask) != type) continue; - r_size = r->end - r->start + 1; + r_size = resource_size(r); /* For bridges size != alignment */ - align = (i < PCI_BRIDGE_RESOURCES) ? r_size : r->start; + align = resource_alignment(r); order = __ffs(align) - 20; if (order > 11) { - dev_warn(&dev->dev, "BAR %d too large: " - "%#016llx-%#016llx\n", i, - (unsigned long long)r->start, - (unsigned long long)r->end); + dev_warn(&dev->dev, "BAR %d bad alignment %llx: " + "%pR\n", i, (unsigned long long)align, r); r->flags = 0; continue; } @@ -377,11 +375,10 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, unsigned long align = 0; min_align = 0; for (order = 0; order <= max_order; order++) { -#ifdef CONFIG_RESOURCES_64BIT - resource_size_t align1 = 1ULL << (order + 20); -#else - resource_size_t align1 = 1U << (order + 20); -#endif + resource_size_t align1 = 1; + + align1 <<= (order + 20); + if (!align) min_align = align1; else if (ALIGN(align + min_align, min_align) < align1) @@ -530,6 +527,38 @@ void __ref pci_bus_assign_resources(struct pci_bus *bus) } EXPORT_SYMBOL(pci_bus_assign_resources); +static void pci_bus_dump_res(struct pci_bus *bus) +{ + int i; + + for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { + struct resource *res = bus->resource[i]; + if (!res) + continue; + + printk(KERN_INFO "bus: %02x index %x %s: %pR\n", + bus->number, i, + (res->flags & IORESOURCE_IO) ? "io port" : "mmio", res); + } +} + +static void pci_bus_dump_resources(struct pci_bus *bus) +{ + struct pci_bus *b; + struct pci_dev *dev; + + + pci_bus_dump_res(bus); + + list_for_each_entry(dev, &bus->devices, bus_list) { + b = dev->subordinate; + if (!b) + continue; + + pci_bus_dump_resources(b); + } +} + void __init pci_assign_unassigned_resources(void) { @@ -545,4 +574,9 @@ pci_assign_unassigned_resources(void) pci_bus_assign_resources(bus); pci_enable_bridges(bus); } + + /* dump the resource on buses */ + list_for_each_entry(bus, &pci_root_buses, node) { + pci_bus_dump_resources(bus); + } } diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c index 1a5fc83c71b..2dbd96cce2d 100644 --- a/drivers/pci/setup-res.c +++ b/drivers/pci/setup-res.c @@ -49,10 +49,8 @@ void pci_update_resource(struct pci_dev *dev, struct resource *res, int resno) pcibios_resource_to_bus(dev, ®ion, res); - dev_dbg(&dev->dev, "BAR %d: got res [%#llx-%#llx] bus [%#llx-%#llx] " - "flags %#lx\n", resno, - (unsigned long long)res->start, - (unsigned long long)res->end, + dev_dbg(&dev->dev, "BAR %d: got res %pR bus [%#llx-%#llx] " + "flags %#lx\n", resno, res, (unsigned long long)region.start, (unsigned long long)region.end, (unsigned long)res->flags); @@ -114,13 +112,11 @@ int pci_claim_resource(struct pci_dev *dev, int resource) err = insert_resource(root, res); if (err) { - dev_err(&dev->dev, "BAR %d: %s of %s [%#llx-%#llx]\n", + dev_err(&dev->dev, "BAR %d: %s of %s %pR\n", resource, root ? "address space collision on" : "no parent found for", - dtype, - (unsigned long long)res->start, - (unsigned long long)res->end); + dtype, res); } return err; @@ -133,15 +129,14 @@ int pci_assign_resource(struct pci_dev *dev, int resno) resource_size_t size, min, align; int ret; - size = res->end - res->start + 1; + size = resource_size(res); min = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; align = resource_alignment(res); if (!align) { dev_err(&dev->dev, "BAR %d: can't allocate resource (bogus " - "alignment) [%#llx-%#llx] flags %#lx\n", - resno, (unsigned long long)res->start, - (unsigned long long)res->end, res->flags); + "alignment) %pR flags %#lx\n", + resno, res, res->flags); return -EINVAL; } @@ -162,11 +157,8 @@ int pci_assign_resource(struct pci_dev *dev, int resno) } if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource " - "[%#llx-%#llx]\n", resno, - res->flags & IORESOURCE_IO ? "I/O" : "mem", - (unsigned long long)res->start, - (unsigned long long)res->end); + dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", + resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); } else { res->flags &= ~IORESOURCE_STARTALIGN; if (resno < PCI_BRIDGE_RESOURCES) @@ -202,11 +194,8 @@ int pci_assign_resource_fixed(struct pci_dev *dev, int resno) } if (ret) { - dev_err(&dev->dev, "BAR %d: can't allocate %s resource " - "[%#llx-%#llx\n]", resno, - res->flags & IORESOURCE_IO ? "I/O" : "mem", - (unsigned long long)res->start, - (unsigned long long)res->end); + dev_err(&dev->dev, "BAR %d: can't allocate %s resource %pR\n", + resno, res->flags & IORESOURCE_IO ? "I/O" : "mem", res); } else if (resno < PCI_BRIDGE_RESOURCES) { pci_update_resource(dev, res, resno); } @@ -237,9 +226,8 @@ void pdev_sort_resources(struct pci_dev *dev, struct resource_list *head) r_align = resource_alignment(r); if (!r_align) { dev_warn(&dev->dev, "BAR %d: bogus alignment " - "[%#llx-%#llx] flags %#lx\n", - i, (unsigned long long)r->start, - (unsigned long long)r->end, r->flags); + "%pR flags %#lx\n", + i, r, r->flags); continue; } for (list = head; ; list = list->next) { @@ -287,9 +275,7 @@ int pci_enable_resources(struct pci_dev *dev, int mask) if (!r->parent) { dev_err(&dev->dev, "device not available because of " - "BAR %d [%#llx-%#llx] collisions\n", i, - (unsigned long long) r->start, - (unsigned long long) r->end); + "BAR %d %pR collisions\n", i, r); return -EINVAL; } diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c index 7e5b85cbd94..4dd1c3e157a 100644 --- a/drivers/pci/slot.c +++ b/drivers/pci/slot.c @@ -49,11 +49,16 @@ static ssize_t address_read_file(struct pci_slot *slot, char *buf) static void pci_slot_release(struct kobject *kobj) { + struct pci_dev *dev; struct pci_slot *slot = to_pci_slot(kobj); pr_debug("%s: releasing pci_slot on %x:%d\n", __func__, slot->bus->number, slot->number); + list_for_each_entry(dev, &slot->bus->devices, bus_list) + if (PCI_SLOT(dev->devfn) == slot->number) + dev->slot = NULL; + list_del(&slot->list); kfree(slot); @@ -73,18 +78,100 @@ static struct kobj_type pci_slot_ktype = { .default_attrs = pci_slot_default_attrs, }; +static char *make_slot_name(const char *name) +{ + char *new_name; + int len, max, dup; + + new_name = kstrdup(name, GFP_KERNEL); + if (!new_name) + return NULL; + + /* + * Make sure we hit the realloc case the first time through the + * loop. 'len' will be strlen(name) + 3 at that point which is + * enough space for "name-X" and the trailing NUL. + */ + len = strlen(name) + 2; + max = 1; + dup = 1; + + for (;;) { + struct kobject *dup_slot; + dup_slot = kset_find_obj(pci_slots_kset, new_name); + if (!dup_slot) + break; + kobject_put(dup_slot); + if (dup == max) { + len++; + max *= 10; + kfree(new_name); + new_name = kmalloc(len, GFP_KERNEL); + if (!new_name) + break; + } + sprintf(new_name, "%s-%d", name, dup++); + } + + return new_name; +} + +static int rename_slot(struct pci_slot *slot, const char *name) +{ + int result = 0; + char *slot_name; + + if (strcmp(pci_slot_name(slot), name) == 0) + return result; + + slot_name = make_slot_name(name); + if (!slot_name) + return -ENOMEM; + + result = kobject_rename(&slot->kobj, slot_name); + kfree(slot_name); + + return result; +} + +static struct pci_slot *get_slot(struct pci_bus *parent, int slot_nr) +{ + struct pci_slot *slot; + /* + * We already hold pci_bus_sem so don't worry + */ + list_for_each_entry(slot, &parent->slots, list) + if (slot->number == slot_nr) { + kobject_get(&slot->kobj); + return slot; + } + + return NULL; +} + /** * pci_create_slot - create or increment refcount for physical PCI slot * @parent: struct pci_bus of parent bridge * @slot_nr: PCI_SLOT(pci_dev->devfn) or -1 for placeholder * @name: user visible string presented in /sys/bus/pci/slots/<name> + * @hotplug: set if caller is hotplug driver, NULL otherwise * * PCI slots have first class attributes such as address, speed, width, * and a &struct pci_slot is used to manage them. This interface will * either return a new &struct pci_slot to the caller, or if the pci_slot * already exists, its refcount will be incremented. * - * Slots are uniquely identified by a @pci_bus, @slot_nr, @name tuple. + * Slots are uniquely identified by a @pci_bus, @slot_nr tuple. + * + * There are known platforms with broken firmware that assign the same + * name to multiple slots. Workaround these broken platforms by renaming + * the slots on behalf of the caller. If firmware assigns name N to + * multiple slots: + * + * The first slot is assigned N + * The second slot is assigned N-1 + * The third slot is assigned N-2 + * etc. * * Placeholder slots: * In most cases, @pci_bus, @slot_nr will be sufficient to uniquely identify @@ -93,71 +180,82 @@ static struct kobj_type pci_slot_ktype = { * the slot. In this scenario, the caller may pass -1 for @slot_nr. * * The following semantics are imposed when the caller passes @slot_nr == - * -1. First, the check for existing %struct pci_slot is skipped, as the - * caller may know about several unpopulated slots on a given %struct - * pci_bus, and each slot would have a @slot_nr of -1. Uniqueness for - * these slots is then determined by the @name parameter. We expect - * kobject_init_and_add() to warn us if the caller attempts to create - * multiple slots with the same name. The other change in semantics is + * -1. First, we no longer check for an existing %struct pci_slot, as there + * may be many slots with @slot_nr of -1. The other change in semantics is * user-visible, which is the 'address' parameter presented in sysfs will * consist solely of a dddd:bb tuple, where dddd is the PCI domain of the * %struct pci_bus and bb is the bus number. In other words, the devfn of * the 'placeholder' slot will not be displayed. */ - struct pci_slot *pci_create_slot(struct pci_bus *parent, int slot_nr, - const char *name) + const char *name, + struct hotplug_slot *hotplug) { + struct pci_dev *dev; struct pci_slot *slot; - int err; + int err = 0; + char *slot_name = NULL; down_write(&pci_bus_sem); if (slot_nr == -1) goto placeholder; - /* If we've already created this slot, bump refcount and return. */ - list_for_each_entry(slot, &parent->slots, list) { - if (slot->number == slot_nr) { - kobject_get(&slot->kobj); - pr_debug("%s: inc refcount to %d on %04x:%02x:%02x\n", - __func__, - atomic_read(&slot->kobj.kref.refcount), - pci_domain_nr(parent), parent->number, - slot_nr); - goto out; + /* + * Hotplug drivers are allowed to rename an existing slot, + * but only if not already claimed. + */ + slot = get_slot(parent, slot_nr); + if (slot) { + if (hotplug) { + if ((err = slot->hotplug ? -EBUSY : 0) + || (err = rename_slot(slot, name))) { + kobject_put(&slot->kobj); + slot = NULL; + goto err; + } } + goto out; } placeholder: slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { - slot = ERR_PTR(-ENOMEM); - goto out; + err = -ENOMEM; + goto err; } slot->bus = parent; slot->number = slot_nr; slot->kobj.kset = pci_slots_kset; - err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, - "%s", name); - if (err) { - printk(KERN_ERR "Unable to register kobject %s\n", name); + + slot_name = make_slot_name(name); + if (!slot_name) { + err = -ENOMEM; goto err; } + err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL, + "%s", slot_name); + if (err) + goto err; + INIT_LIST_HEAD(&slot->list); list_add(&slot->list, &parent->slots); + list_for_each_entry(dev, &parent->devices, bus_list) + if (PCI_SLOT(dev->devfn) == slot_nr) + dev->slot = slot; + /* Don't care if debug printk has a -1 for slot_nr */ pr_debug("%s: created pci_slot on %04x:%02x:%02x\n", __func__, pci_domain_nr(parent), parent->number, slot_nr); - out: +out: up_write(&pci_bus_sem); return slot; - err: +err: kfree(slot); slot = ERR_PTR(err); goto out; @@ -165,7 +263,7 @@ placeholder: EXPORT_SYMBOL_GPL(pci_create_slot); /** - * pci_update_slot_number - update %struct pci_slot -> number + * pci_renumber_slot - update %struct pci_slot -> number * @slot - %struct pci_slot to update * @slot_nr - new number for slot * @@ -173,27 +271,22 @@ EXPORT_SYMBOL_GPL(pci_create_slot); * created a placeholder slot in pci_create_slot() by passing a -1 as * slot_nr, to update their %struct pci_slot with the correct @slot_nr. */ - -void pci_update_slot_number(struct pci_slot *slot, int slot_nr) +void pci_renumber_slot(struct pci_slot *slot, int slot_nr) { - int name_count = 0; struct pci_slot *tmp; down_write(&pci_bus_sem); list_for_each_entry(tmp, &slot->bus->slots, list) { WARN_ON(tmp->number == slot_nr); - if (!strcmp(kobject_name(&tmp->kobj), kobject_name(&slot->kobj))) - name_count++; + goto out; } - if (name_count > 1) - printk(KERN_WARNING "pci_update_slot_number found %d slots with the same name: %s\n", name_count, kobject_name(&slot->kobj)); - slot->number = slot_nr; +out: up_write(&pci_bus_sem); } -EXPORT_SYMBOL_GPL(pci_update_slot_number); +EXPORT_SYMBOL_GPL(pci_renumber_slot); /** * pci_destroy_slot - decrement refcount for physical PCI slot @@ -203,7 +296,6 @@ EXPORT_SYMBOL_GPL(pci_update_slot_number); * just call kobject_put on its kobj and let our release methods do the * rest. */ - void pci_destroy_slot(struct pci_slot *slot) { pr_debug("%s: dec refcount to %d on %04x:%02x:%02x\n", __func__, |