aboutsummaryrefslogtreecommitdiff
path: root/drivers/pci/pci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci/pci.c')
-rw-r--r--drivers/pci/pci.c468
1 files changed, 355 insertions, 113 deletions
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6edecff0b41..cb1dd5f4988 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -19,8 +19,8 @@
#include <linux/pci-aspm.h>
#include <linux/pm_wakeup.h>
#include <linux/interrupt.h>
-#include <asm/dma.h> /* isa_dma_bridge_buggy */
#include <linux/device.h>
+#include <linux/pm_runtime.h>
#include <asm/setup.h>
#include "pci.h"
@@ -29,7 +29,23 @@ const char *pci_power_names[] = {
};
EXPORT_SYMBOL_GPL(pci_power_names);
-unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
+int isa_dma_bridge_buggy;
+EXPORT_SYMBOL(isa_dma_bridge_buggy);
+
+int pci_pci_problems;
+EXPORT_SYMBOL(pci_pci_problems);
+
+unsigned int pci_pm_d3_delay;
+
+static void pci_dev_d3_sleep(struct pci_dev *dev)
+{
+ unsigned int delay = dev->d3_delay;
+
+ if (delay < pci_pm_d3_delay)
+ delay = pci_pm_d3_delay;
+
+ msleep(delay);
+}
#ifdef CONFIG_PCI_DOMAINS
int pci_domains_supported = 1;
@@ -47,6 +63,15 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
+/*
+ * The default CLS is used if arch didn't set CLS explicitly and not
+ * all pci devices agree on the same value. Arch can override either
+ * the dfl or actual value as it sees fit. Don't forget this is
+ * measured in 32-bit words, not bytes.
+ */
+u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
+u8 pci_cache_line_size;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -278,6 +303,49 @@ int pci_find_ext_capability(struct pci_dev *dev, int cap)
}
EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+/**
+ * pci_bus_find_ext_capability - find an extended capability
+ * @bus: the PCI bus to query
+ * @devfn: PCI device to query
+ * @cap: capability code
+ *
+ * Like pci_find_ext_capability() but works for pci devices that do not have a
+ * pci_dev structure set up yet.
+ *
+ * Returns the address of the requested capability structure within the
+ * device's PCI configuration space or 0 in case the device does not
+ * support it.
+ */
+int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
+ int cap)
+{
+ u32 header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
+ return 0;
+ if (header == 0xffffffff || header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
+ break;
+ }
+
+ return 0;
+}
+
static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
{
int rc, ttl = PCI_FIND_CAP_TTL;
@@ -361,10 +429,9 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
{
const struct pci_bus *bus = dev->bus;
int i;
- struct resource *best = NULL;
+ struct resource *best = NULL, *r;
- for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
- struct resource *r = bus->resource[i];
+ pci_bus_for_each_resource(bus, r, i) {
if (!r)
continue;
if (res->start && !(res->start >= r->start && res->end <= r->end))
@@ -373,8 +440,12 @@ pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
continue; /* Wrong type */
if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
return r; /* Exact match */
- if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
- best = r; /* Approximating prefetchable by non-prefetchable */
+ /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
+ if (r->flags & IORESOURCE_PREFETCH)
+ continue;
+ /* .. but we can put a prefetchable resource inside a non-prefetchable one */
+ if (!best)
+ best = r;
}
return best;
}
@@ -434,6 +505,12 @@ static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
}
+static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
+{
+ return pci_platform_pm ?
+ pci_platform_pm->run_wake(dev, enable) : -ENODEV;
+}
+
/**
* pci_raw_set_power_state - Use PCI PM registers to set the power state of
* given PCI device
@@ -509,11 +586,15 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(PCI_PM_D2_DELAY);
- dev->current_state = state;
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ if (dev->current_state != state && printk_ratelimit())
+ dev_info(&dev->dev, "Refused to change power state, "
+ "currently in D%d\n", dev->current_state);
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
@@ -724,8 +805,8 @@ static int pci_save_pcie_state(struct pci_dev *dev)
u16 *cap;
u16 flags;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
- if (pos <= 0)
+ pos = pci_pcie_cap(dev);
+ if (!pos)
return 0;
save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
@@ -833,7 +914,7 @@ pci_save_state(struct pci_dev *dev)
int i;
/* XXX: 100% dword access ok here? */
for (i = 0; i < 16; i++)
- pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
+ pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
dev->state_saved = true;
if ((i = pci_save_pcie_state(dev)) != 0)
return i;
@@ -1136,11 +1217,11 @@ pci_disable_device(struct pci_dev *dev)
/**
* pcibios_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
- * Sets the PCI-E reset state for the device. This is the default
+ * Sets the PCIe reset state for the device. This is the default
* implementation. Architecture implementations can override this.
*/
int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
@@ -1151,7 +1232,7 @@ int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
/**
* pci_set_pcie_reset_state - set reset state for device dev
- * @dev: the PCI-E device reset
+ * @dev: the PCIe device reset
* @state: Reset state to enter into
*
*
@@ -1163,6 +1244,66 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
}
/**
+ * pci_check_pme_status - Check if given device has generated PME.
+ * @dev: Device to check.
+ *
+ * Check the PME status of the device and if set, clear it and clear PME enable
+ * (if set). Return 'true' if PME status and PME enable were both set or
+ * 'false' otherwise.
+ */
+bool pci_check_pme_status(struct pci_dev *dev)
+{
+ int pmcsr_pos;
+ u16 pmcsr;
+ bool ret = false;
+
+ if (!dev->pm_cap)
+ return false;
+
+ pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
+ pci_read_config_word(dev, pmcsr_pos, &pmcsr);
+ if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
+ return false;
+
+ /* Clear PME status. */
+ pmcsr |= PCI_PM_CTRL_PME_STATUS;
+ if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
+ /* Disable PME to avoid interrupt flood. */
+ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+ ret = true;
+ }
+
+ pci_write_config_word(dev, pmcsr_pos, pmcsr);
+
+ return ret;
+}
+
+/**
+ * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
+ * @dev: Device to handle.
+ * @ign: Ignored.
+ *
+ * Check if @dev has generated PME and queue a resume request for it in that
+ * case.
+ */
+static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
+{
+ if (pci_check_pme_status(dev))
+ pm_request_resume(&dev->dev);
+ return 0;
+}
+
+/**
+ * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
+ * @bus: Top bus of the subtree to walk.
+ */
+void pci_pme_wakeup_bus(struct pci_bus *bus)
+{
+ if (bus)
+ pci_walk_bus(bus, pci_pme_wakeup, NULL);
+}
+
+/**
* pci_pme_capable - check the capability of PCI device to generate PME#
* @dev: PCI device to handle.
* @state: PCI state from which device will issue PME#.
@@ -1198,14 +1339,15 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
- dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
+ dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
enable ? "enabled" : "disabled");
}
/**
- * pci_enable_wake - enable PCI device as wakeup event source
+ * __pci_enable_wake - enable PCI device as wakeup event source
* @dev: PCI device affected
* @state: PCI state from which device will issue wakeup events
+ * @runtime: True if the events are to be generated at run time
* @enable: True to enable event generation; false to disable
*
* This enables the device as a wakeup event source, or disables it.
@@ -1221,11 +1363,12 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
* Error code depending on the platform is returned if both the platform and
* the native mechanism fail to enable the generation of wake-up events
*/
-int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
+int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
+ bool runtime, bool enable)
{
int ret = 0;
- if (enable && !device_may_wakeup(&dev->dev))
+ if (enable && !runtime && !device_may_wakeup(&dev->dev))
return -EINVAL;
/* Don't do the same thing twice in a row for one device. */
@@ -1245,19 +1388,24 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
pci_pme_active(dev, true);
else
ret = 1;
- error = platform_pci_sleep_wake(dev, true);
+ error = runtime ? platform_pci_run_wake(dev, true) :
+ platform_pci_sleep_wake(dev, true);
if (ret)
ret = error;
if (!ret)
dev->wakeup_prepared = true;
} else {
- platform_pci_sleep_wake(dev, false);
+ if (runtime)
+ platform_pci_run_wake(dev, false);
+ else
+ platform_pci_sleep_wake(dev, false);
pci_pme_active(dev, false);
dev->wakeup_prepared = false;
}
return ret;
}
+EXPORT_SYMBOL(__pci_enable_wake);
/**
* pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
@@ -1367,6 +1515,66 @@ int pci_back_from_sleep(struct pci_dev *dev)
}
/**
+ * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
+ * @dev: PCI device being suspended.
+ *
+ * Prepare @dev to generate wake-up events at run time and put it into a low
+ * power state.
+ */
+int pci_finish_runtime_suspend(struct pci_dev *dev)
+{
+ pci_power_t target_state = pci_target_state(dev);
+ int error;
+
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+ __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error)
+ __pci_enable_wake(dev, target_state, true, false);
+
+ return error;
+}
+
+/**
+ * pci_dev_run_wake - Check if device can generate run-time wake-up events.
+ * @dev: Device to check.
+ *
+ * Return true if the device itself is cabable of generating wake-up events
+ * (through the platform or using the native PCIe PME) or if the device supports
+ * PME and one of its upstream bridges can generate wake-up events.
+ */
+bool pci_dev_run_wake(struct pci_dev *dev)
+{
+ struct pci_bus *bus = dev->bus;
+
+ if (device_run_wake(&dev->dev))
+ return true;
+
+ if (!dev->pme_support)
+ return false;
+
+ while (bus->parent) {
+ struct pci_dev *bridge = bus->self;
+
+ if (device_run_wake(&bridge->dev))
+ return true;
+
+ bus = bus->parent;
+ }
+
+ /* We have reached the root bus. */
+ if (bus->bridge)
+ return device_run_wake(bus->bridge);
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(pci_dev_run_wake);
+
+/**
* pci_pm_init - Initialize PM functions of given PCI device
* @dev: PCI device to handle.
*/
@@ -1375,7 +1583,10 @@ void pci_pm_init(struct pci_dev *dev)
int pm;
u16 pmc;
+ pm_runtime_forbid(&dev->dev);
+ device_enable_async_suspend(&dev->dev);
dev->wakeup_prepared = false;
+
dev->pm_cap = 0;
/* find PCI PM capability in list */
@@ -1392,6 +1603,7 @@ void pci_pm_init(struct pci_dev *dev)
}
dev->pm_cap = pm;
+ dev->d3_delay = PCI_PM_D3_WAIT;
dev->d1_support = false;
dev->d2_support = false;
@@ -1409,7 +1621,8 @@ void pci_pm_init(struct pci_dev *dev)
pmc &= PCI_PM_CAP_PME_MASK;
if (pmc) {
- dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "PME# supported from%s%s%s%s%s\n",
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
@@ -1506,7 +1719,7 @@ void pci_enable_ari(struct pci_dev *dev)
u16 ctrl;
struct pci_dev *bridge;
- if (!dev->is_pcie || dev->devfn)
+ if (!pci_is_pcie(dev) || dev->devfn)
return;
pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
@@ -1514,10 +1727,10 @@ void pci_enable_ari(struct pci_dev *dev)
return;
bridge = dev->bus->self;
- if (!bridge || !bridge->is_pcie)
+ if (!bridge || !pci_is_pcie(bridge))
return;
- pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(bridge);
if (!pos)
return;
@@ -1532,6 +1745,54 @@ void pci_enable_ari(struct pci_dev *dev)
bridge->ari_enabled = 1;
}
+static int pci_acs_enable;
+
+/**
+ * pci_request_acs - ask for ACS to be enabled if supported
+ */
+void pci_request_acs(void)
+{
+ pci_acs_enable = 1;
+}
+
+/**
+ * pci_enable_acs - enable ACS if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_acs(struct pci_dev *dev)
+{
+ int pos;
+ u16 cap;
+ u16 ctrl;
+
+ if (!pci_acs_enable)
+ return;
+
+ if (!pci_is_pcie(dev))
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
+ if (!pos)
+ return;
+
+ pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
+ pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
+
+ /* Source Validation */
+ ctrl |= (cap & PCI_ACS_SV);
+
+ /* P2P Request Redirect */
+ ctrl |= (cap & PCI_ACS_RR);
+
+ /* P2P Completion Redirect */
+ ctrl |= (cap & PCI_ACS_CR);
+
+ /* Upstream Forwarding */
+ ctrl |= (cap & PCI_ACS_UF);
+
+ pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
+}
+
/**
* pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
* @dev: the PCI device
@@ -1665,9 +1926,7 @@ static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_n
return 0;
err_out:
- dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
- bar,
- pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
+ dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
&pdev->resource[bar]);
return -EBUSY;
}
@@ -1862,31 +2121,6 @@ void pci_clear_master(struct pci_dev *dev)
__pci_set_master(dev, false);
}
-#ifdef PCI_DISABLE_MWI
-int pci_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-int pci_try_set_mwi(struct pci_dev *dev)
-{
- return 0;
-}
-
-void pci_clear_mwi(struct pci_dev *dev)
-{
-}
-
-#else
-
-#ifndef PCI_CACHE_LINE_BYTES
-#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
-#endif
-
-/* This can be overridden by arch code. */
-/* Don't forget this is measured in 32-bit words, not bytes */
-u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
-
/**
* pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
* @dev: the PCI device for which MWI is to be enabled
@@ -1897,13 +2131,12 @@ u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
*
* RETURNS: An appropriate -ERRNO error value on error, or zero for success.
*/
-static int
-pci_set_cacheline_size(struct pci_dev *dev)
+int pci_set_cacheline_size(struct pci_dev *dev)
{
u8 cacheline_size;
if (!pci_cache_line_size)
- return -EINVAL; /* The system doesn't support MWI. */
+ return -EINVAL;
/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
equal to or multiple of the right value. */
@@ -1924,6 +2157,24 @@ pci_set_cacheline_size(struct pci_dev *dev)
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
+
+#ifdef PCI_DISABLE_MWI
+int pci_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void pci_clear_mwi(struct pci_dev *dev)
+{
+}
+
+#else
/**
* pci_set_mwi - enables memory-write-invalidate PCI transaction
@@ -2047,33 +2298,6 @@ void pci_msi_off(struct pci_dev *dev)
}
}
-#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
-/*
- * These can be overridden by arch-specific implementations
- */
-int
-pci_set_dma_mask(struct pci_dev *dev, u64 mask)
-{
- if (!pci_dma_supported(dev, mask))
- return -EIO;
-
- dev->dma_mask = mask;
-
- return 0;
-}
-
-int
-pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
-{
- if (!pci_dma_supported(dev, mask))
- return -EIO;
-
- dev->dev.coherent_dma_mask = mask;
-
- return 0;
-}
-#endif
-
#ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
{
@@ -2095,9 +2319,9 @@ static int pcie_flr(struct pci_dev *dev, int probe)
int i;
int pos;
u32 cap;
- u16 status;
+ u16 status, control;
- pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ pos = pci_pcie_cap(dev);
if (!pos)
return -ENOTTY;
@@ -2122,8 +2346,10 @@ static int pcie_flr(struct pci_dev *dev, int probe)
"proceeding with reset anyway\n");
clear:
- pci_write_config_word(dev, pos + PCI_EXP_DEVCTL,
- PCI_EXP_DEVCTL_BCR_FLR);
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
+ control |= PCI_EXP_DEVCTL_BCR_FLR;
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
+
msleep(100);
return 0;
@@ -2187,12 +2413,12 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D3hot;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
csr &= ~PCI_PM_CTRL_STATE_MASK;
csr |= PCI_D0;
pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
- msleep(pci_pm_d3_delay);
+ pci_dev_d3_sleep(dev);
return 0;
}
@@ -2233,9 +2459,13 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
if (!probe) {
pci_block_user_cfg_access(dev);
/* block PM suspend, driver probe, etc. */
- down(&dev->dev.sem);
+ device_lock(&dev->dev);
}
+ rc = pci_dev_specific_reset(dev, probe);
+ if (rc != -ENOTTY)
+ goto done;
+
rc = pcie_flr(dev, probe);
if (rc != -ENOTTY)
goto done;
@@ -2251,7 +2481,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
rc = pci_parent_bus_reset(dev, probe);
done:
if (!probe) {
- up(&dev->dev.sem);
+ device_unlock(&dev->dev);
pci_unblock_user_cfg_access(dev);
}
@@ -2446,7 +2676,7 @@ int pcie_get_readrq(struct pci_dev *dev)
int ret, cap;
u16 ctl;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
return -EINVAL;
@@ -2476,7 +2706,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq)
v = (ffs(rq) - 8) << 12;
- cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ cap = pci_pcie_cap(dev);
if (!cap)
goto out;
@@ -2536,16 +2766,33 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return reg;
}
- dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
+ dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
+ return 0;
+}
+
+/* Some architectures require additional programming to enable VGA */
+static arch_set_vga_state_t arch_set_vga_state;
+
+void __init pci_register_set_vga_state(arch_set_vga_state_t func)
+{
+ arch_set_vga_state = func; /* NULL disables */
+}
+
+static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, bool change_bridge)
+{
+ if (arch_set_vga_state)
+ return arch_set_vga_state(dev, decode, command_bits,
+ change_bridge);
return 0;
}
/**
* pci_set_vga_state - set VGA decode state on device and parents if requested
- * @dev the PCI device
- * @decode - true = enable decoding, false = disable decoding
- * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
- * @change_bridge - traverse ancestors and change bridges
+ * @dev: the PCI device
+ * @decode: true = enable decoding, false = disable decoding
+ * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
+ * @change_bridge: traverse ancestors and change bridges
*/
int pci_set_vga_state(struct pci_dev *dev, bool decode,
unsigned int command_bits, bool change_bridge)
@@ -2553,9 +2800,15 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
struct pci_bus *bus;
struct pci_dev *bridge;
u16 cmd;
+ int rc;
WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+ /* ARCH specific VGA enables */
+ rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge);
+ if (rc)
+ return rc;
+
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (decode == true)
cmd |= command_bits;
@@ -2586,7 +2839,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
-spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_SPINLOCK(resource_alignment_lock);
/**
* pci_specified_resource_alignment - get resource alignment specified by user.
@@ -2719,16 +2972,10 @@ int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
return 1;
}
-static int __devinit pci_init(void)
+void __weak pci_fixup_cardbus(struct pci_bus *bus)
{
- struct pci_dev *dev = NULL;
-
- while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
- pci_fixup_device(pci_fixup_final, dev);
- }
-
- return 0;
}
+EXPORT_SYMBOL(pci_fixup_cardbus);
static int __init pci_setup(char *str)
{
@@ -2767,8 +3014,6 @@ static int __init pci_setup(char *str)
}
early_param("pci", pci_setup);
-device_initcall(pci_init);
-
EXPORT_SYMBOL(pci_reenable_device);
EXPORT_SYMBOL(pci_enable_device_io);
EXPORT_SYMBOL(pci_enable_device_mem);
@@ -2778,6 +3023,7 @@ EXPORT_SYMBOL(pcim_pin_device);
EXPORT_SYMBOL(pci_disable_device);
EXPORT_SYMBOL(pci_find_capability);
EXPORT_SYMBOL(pci_bus_find_capability);
+EXPORT_SYMBOL(pci_register_set_vga_state);
EXPORT_SYMBOL(pci_release_regions);
EXPORT_SYMBOL(pci_request_regions);
EXPORT_SYMBOL(pci_request_regions_exclusive);
@@ -2793,8 +3039,6 @@ EXPORT_SYMBOL(pci_set_mwi);
EXPORT_SYMBOL(pci_try_set_mwi);
EXPORT_SYMBOL(pci_clear_mwi);
EXPORT_SYMBOL_GPL(pci_intx);
-EXPORT_SYMBOL(pci_set_dma_mask);
-EXPORT_SYMBOL(pci_set_consistent_dma_mask);
EXPORT_SYMBOL(pci_assign_resource);
EXPORT_SYMBOL(pci_find_parent_resource);
EXPORT_SYMBOL(pci_select_bars);
@@ -2804,10 +3048,8 @@ EXPORT_SYMBOL(pci_save_state);
EXPORT_SYMBOL(pci_restore_state);
EXPORT_SYMBOL(pci_pme_capable);
EXPORT_SYMBOL(pci_pme_active);
-EXPORT_SYMBOL(pci_enable_wake);
EXPORT_SYMBOL(pci_wake_from_d3);
EXPORT_SYMBOL(pci_target_state);
EXPORT_SYMBOL(pci_prepare_to_sleep);
EXPORT_SYMBOL(pci_back_from_sleep);
EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
-